filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
public-engines/sample-engine/tests/conftest.py | #!/usr/bin/env python
# coding=utf-8
# Copyright [2019] [Apache Software Foundation]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
os.environ['TESTING'] = 'True'
@pytest.fixture
def mocked_params():
return {'params': 1}
| []
| []
| [
"TESTING"
]
| [] | ["TESTING"] | python | 1 | 0 | |
pkg/reconciler/proxy/proxy.go | /*
Copyright 2021 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package proxy
import (
"bytes"
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/markbates/inflect"
"go.uber.org/zap"
"gomodules.xyz/jsonpatch/v2"
admissionv1 "k8s.io/api/admission/v1"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
admissionlisters "k8s.io/client-go/listers/admissionregistration/v1"
corelisters "k8s.io/client-go/listers/core/v1"
"knative.dev/pkg/apis"
"knative.dev/pkg/apis/duck"
"knative.dev/pkg/controller"
"knative.dev/pkg/kmp"
"knative.dev/pkg/logging"
"knative.dev/pkg/ptr"
pkgreconciler "knative.dev/pkg/reconciler"
"knative.dev/pkg/system"
"knative.dev/pkg/webhook"
certresources "knative.dev/pkg/webhook/certificates/resources"
)
const (
// user-provided and system CA certificates
trustedCAConfigMapName = "config-trusted-cabundle"
trustedCAConfigMapVolume = "config-trusted-cabundle-volume"
trustedCAKey = "ca-bundle.crt"
// service serving certificates (required to talk to the internal registry)
serviceCAConfigMapName = "config-service-cabundle"
serviceCAConfigMapVolume = "config-service-cabundle-volume"
serviceCAKey = "service-ca.crt"
)
// reconciler implements the AdmissionController for resources
type reconciler struct {
webhook.StatelessAdmissionImpl
pkgreconciler.LeaderAwareFuncs
key types.NamespacedName
path string
withContext func(context.Context) context.Context
client kubernetes.Interface
mwhlister admissionlisters.MutatingWebhookConfigurationLister
secretlister corelisters.SecretLister
disallowUnknownFields bool
secretName string
}
var _ controller.Reconciler = (*reconciler)(nil)
var _ pkgreconciler.LeaderAware = (*reconciler)(nil)
var _ webhook.AdmissionController = (*reconciler)(nil)
var _ webhook.StatelessAdmissionController = (*reconciler)(nil)
// Reconcile implements controller.Reconciler
func (ac *reconciler) Reconcile(ctx context.Context, key string) error {
logger := logging.FromContext(ctx)
if !ac.IsLeaderFor(ac.key) {
logger.Debugf("Skipping key %q, not the leader.", ac.key)
return nil
}
// Look up the webhook secret, and fetch the CA cert bundle.
secret, err := ac.secretlister.Secrets(system.Namespace()).Get(ac.secretName)
if err != nil {
logger.Errorw("Error fetching secret", zap.Error(err))
return err
}
caCert, ok := secret.Data[certresources.CACert]
if !ok {
return fmt.Errorf("secret %q is missing %q key", ac.secretName, certresources.CACert)
}
// Reconcile the webhook configuration.
return ac.reconcileMutatingWebhook(ctx, caCert)
}
// Path implements AdmissionController
func (ac *reconciler) Path() string {
return ac.path
}
// Admit implements AdmissionController
func (ac *reconciler) Admit(ctx context.Context, request *admissionv1.AdmissionRequest) *admissionv1.AdmissionResponse {
if ac.withContext != nil {
ctx = ac.withContext(ctx)
}
logger := logging.FromContext(ctx)
switch request.Operation {
case admissionv1.Create:
default:
logger.Info("Unhandled webhook operation, letting it through ", request.Operation)
return &admissionv1.AdmissionResponse{Allowed: true}
}
patchBytes, err := ac.mutate(ctx, request)
if err != nil {
return webhook.MakeErrorStatus("mutation failed: %v", err)
}
logger.Infof("Kind: %q PatchBytes: %v", request.Kind, string(patchBytes))
return &admissionv1.AdmissionResponse{
Patch: patchBytes,
Allowed: true,
PatchType: func() *admissionv1.PatchType {
pt := admissionv1.PatchTypeJSONPatch
return &pt
}(),
}
}
func (ac *reconciler) reconcileMutatingWebhook(ctx context.Context, caCert []byte) error {
logger := logging.FromContext(ctx)
plural := strings.ToLower(inflect.Pluralize("Pod"))
rules := []admissionregistrationv1.RuleWithOperations{
{
Operations: []admissionregistrationv1.OperationType{
admissionregistrationv1.Create,
},
Rule: admissionregistrationv1.Rule{
APIGroups: []string{""},
APIVersions: []string{"v1"},
Resources: []string{plural, plural + "/status"},
},
},
}
configuredWebhook, err := ac.mwhlister.Get(ac.key.Name)
if err != nil {
return fmt.Errorf("error retrieving webhook: %w", err)
}
webhook := configuredWebhook.DeepCopy()
// Clear out any previous (bad) OwnerReferences.
// See: https://github.com/knative/serving/issues/5845
webhook.OwnerReferences = nil
for i, wh := range webhook.Webhooks {
if wh.Name != webhook.Name {
continue
}
webhook.Webhooks[i].Rules = rules
webhook.Webhooks[i].NamespaceSelector = &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{{
Key: "operator.tekton.dev/disable-proxy",
Operator: metav1.LabelSelectorOpDoesNotExist,
}, {
// "control-plane" is added to support Azure's AKS, otherwise the controllers fight.
// See knative/pkg#1590 for details.
Key: "control-plane",
Operator: metav1.LabelSelectorOpDoesNotExist,
}},
}
webhook.Webhooks[i].ObjectSelector = &metav1.LabelSelector{
MatchLabels: map[string]string{
"app.kubernetes.io/managed-by": "tekton-pipelines",
},
}
webhook.Webhooks[i].ClientConfig.CABundle = caCert
if webhook.Webhooks[i].ClientConfig.Service == nil {
return fmt.Errorf("missing service reference for webhook: %s", wh.Name)
}
webhook.Webhooks[i].ClientConfig.Service.Path = ptr.String(ac.Path())
}
if ok, err := kmp.SafeEqual(configuredWebhook, webhook); err != nil {
return fmt.Errorf("error diffing webhooks: %w", err)
} else if !ok {
logger.Info("Updating webhook")
mwhclient := ac.client.AdmissionregistrationV1().MutatingWebhookConfigurations()
if _, err := mwhclient.Update(ctx, webhook, metav1.UpdateOptions{}); err != nil {
return fmt.Errorf("failed to update webhook: %w", err)
}
} else {
logger.Info("Webhook is valid")
}
return nil
}
func (ac *reconciler) mutate(ctx context.Context, req *admissionv1.AdmissionRequest) ([]byte, error) {
kind := req.Kind
newBytes := req.Object.Raw
oldBytes := req.OldObject.Raw
// Why, oh why are these different types...
gvk := schema.GroupVersionKind{
Group: kind.Group,
Version: kind.Version,
Kind: kind.Kind,
}
logger := logging.FromContext(ctx)
if gvk.Group != "" || gvk.Version != "v1" || gvk.Kind != "Pod" {
logger.Error("Unhandled kind: ", gvk)
return nil, fmt.Errorf("unhandled kind: %v", gvk)
}
// nil values denote absence of `old` (create) or `new` (delete) objects.
var oldObj, newObj corev1.Pod
if len(newBytes) != 0 {
newDecoder := json.NewDecoder(bytes.NewBuffer(newBytes))
if ac.disallowUnknownFields {
newDecoder.DisallowUnknownFields()
}
if err := newDecoder.Decode(&newObj); err != nil {
return nil, fmt.Errorf("cannot decode incoming new object: %w", err)
}
}
if len(oldBytes) != 0 {
oldDecoder := json.NewDecoder(bytes.NewBuffer(oldBytes))
if ac.disallowUnknownFields {
oldDecoder.DisallowUnknownFields()
}
if err := oldDecoder.Decode(&oldObj); err != nil {
return nil, fmt.Errorf("cannot decode incoming old object: %w", err)
}
}
var patches duck.JSONPatch
var err error
// Skip this step if the type we're dealing with is a duck type, since it is inherently
// incomplete and this will patch away all of the unspecified fields.
// Add these before defaulting fields, otherwise defaulting may cause an illegal patch
// because it expects the round tripped through Golang fields to be present already.
rtp, err := roundTripPatch(newBytes, newObj)
if err != nil {
return nil, fmt.Errorf("cannot create patch for round tripped newBytes: %w", err)
}
patches = append(patches, rtp...)
ctx = apis.WithinCreate(ctx)
ctx = apis.WithUserInfo(ctx, &req.UserInfo)
// Default the new object.
if patches, err = setDefaults(ac.client, ctx, patches, newObj); err != nil {
logger.Errorw("Failed the resource specific defaulter", zap.Error(err))
// Return the error message as-is to give the defaulter callback
// discretion over (our portion of) the message that the user sees.
return nil, err
}
return json.Marshal(patches)
}
// roundTripPatch generates the JSONPatch that corresponds to round tripping the given bytes through
// the Golang type (JSON -> Golang type -> JSON). Because it is not always true that
// bytes == json.Marshal(json.Unmarshal(bytes)).
//
// For example, if bytes did not contain a 'spec' field and the Golang type specifies its 'spec'
// field without omitempty, then by round tripping through the Golang type, we would have added
// `'spec': {}`.
func roundTripPatch(bytes []byte, unmarshalled interface{}) (duck.JSONPatch, error) {
if unmarshalled == nil {
return duck.JSONPatch{}, nil
}
marshaledBytes, err := json.Marshal(unmarshalled)
if err != nil {
return nil, fmt.Errorf("cannot marshal interface: %w", err)
}
return jsonpatch.CreatePatch(bytes, marshaledBytes)
}
// setDefaults simply leverages apis.Defaultable to set defaults.
func setDefaults(client kubernetes.Interface, ctx context.Context, patches duck.JSONPatch, pod corev1.Pod) (duck.JSONPatch, error) {
before, after := pod.DeepCopyObject(), pod
var proxyEnv = []corev1.EnvVar{{
Name: "HTTPS_PROXY",
Value: os.Getenv("HTTPS_PROXY"),
}, {
Name: "HTTP_PROXY",
Value: os.Getenv("HTTP_PROXY"),
}, {
Name: "NO_PROXY",
Value: os.Getenv("NO_PROXY"),
}}
if after.Spec.Containers != nil {
for i, container := range after.Spec.Containers {
newEnvs := updateAndMergeEnv(container.Env, proxyEnv)
after.Spec.Containers[i].Env = newEnvs
}
}
exist, err := checkConfigMapExist(client, ctx, after.Namespace, trustedCAConfigMapName)
if err != nil {
return nil, err
}
if exist {
after = updateVolume(after, trustedCAConfigMapVolume, trustedCAConfigMapName, trustedCAKey)
}
exist, err = checkConfigMapExist(client, ctx, after.Namespace, serviceCAConfigMapName)
if err != nil {
return nil, err
}
if exist {
after = updateVolume(after, serviceCAConfigMapVolume, serviceCAConfigMapName, serviceCAKey)
}
patch, err := duck.CreatePatch(before, after)
if err != nil {
return nil, err
}
return append(patches, patch...), nil
}
// Ensure Configmap exist or not
func checkConfigMapExist(client kubernetes.Interface, ctx context.Context, ns string, name string) (bool, error) {
logger := logging.FromContext(ctx)
logger.Info("finding configmap: %s/%s", ns, name)
_, err := client.CoreV1().ConfigMaps(ns).Get(ctx, name, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
return false, nil
}
if err != nil && !errors.IsNotFound(err) {
return false, err
}
return true, nil
}
// update volume and volume mounts to mount the certs configmap
func updateVolume(pod corev1.Pod, volumeName, configmapName, key string) corev1.Pod {
volumes := pod.Spec.Volumes
for i, v := range volumes {
if v.Name == volumeName {
volumes = append(volumes[:i], volumes[i+1:]...)
break
}
}
// Let's add the trusted and service CA bundle ConfigMaps as a volume in
// the PodSpec which will later be mounted to add certs in the pod.
volumes = append(volumes,
// Add trusted CA bundle
corev1.Volume{
Name: volumeName,
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{Name: configmapName},
Items: []corev1.KeyToPath{
{
Key: key,
Path: key,
},
},
},
},
},
)
pod.Spec.Volumes = volumes
// Now that the injected certificates have been added as a volume, let's
// mount them via volumeMounts in the containers
for i, c := range pod.Spec.Containers {
volumeMounts := c.VolumeMounts
// If volume mounts for injected certificates already exist then remove them
for i, vm := range volumeMounts {
if vm.Name == volumeName {
volumeMounts = append(volumeMounts[:i], volumeMounts[i+1:]...)
break
}
}
// /etc/ssl/certs is the default place where CA certs reside in *nix
// however this can be overridden using SSL_CERT_DIR, let's check for
// that here.
sslCertDir := "/etc/ssl/certs"
for _, env := range c.Env {
if env.Name == "SSL_CERT_DIR" {
sslCertDir = env.Value
}
}
// Let's mount the certificates now.
volumeMounts = append(volumeMounts,
corev1.VolumeMount{
Name: volumeName,
MountPath: filepath.Join(sslCertDir, key),
SubPath: key,
ReadOnly: true,
},
)
c.VolumeMounts = volumeMounts
pod.Spec.Containers[i] = c
}
return pod
}
// updateAndMergeEnv will merge two slices of env
// precedence will be given to second input if exist with same name key
func updateAndMergeEnv(containerenvs []corev1.EnvVar, proxyEnv []corev1.EnvVar) []corev1.EnvVar {
for _, env := range proxyEnv {
if env.Value == "" {
// If value is empty then remove that key from container
containerenvs = remove(containerenvs, env.Name)
} else {
var updated bool
for i := range containerenvs {
if env.Name == containerenvs[i].Name {
containerenvs[i].Value = env.Value
updated = true
}
}
if !updated {
containerenvs = append(containerenvs, corev1.EnvVar{
Name: env.Name,
Value: env.Value,
})
}
}
}
return containerenvs
}
func remove(env []corev1.EnvVar, key string) []corev1.EnvVar {
for i := range env {
if env[i].Name == key {
return append(env[:i], env[i+1:]...)
}
}
return env
}
| [
"\"HTTPS_PROXY\"",
"\"HTTP_PROXY\"",
"\"NO_PROXY\""
]
| []
| [
"HTTP_PROXY",
"HTTPS_PROXY",
"NO_PROXY"
]
| [] | ["HTTP_PROXY", "HTTPS_PROXY", "NO_PROXY"] | go | 3 | 0 | |
resources/windowskiosk/config.go | package main
import (
"encoding/json"
"io/ioutil"
"os"
log "github.com/sirupsen/logrus"
)
// Config - values of our config
type Config struct {
ServiceURL string `json:"serviceURL"`
ServiceClientID string `json:"serviceClientID"`
ServiceClientSecret string `json:"serviceClientSecret"`
UserName string `json:"userName"`
Password string `json:"password"`
CredFilePath string `json:"credFilePath"`
ResourceName string `json:"resourceName"`
}
// Load in the config file to memory
// you can create a config file or pass in Environment variables
// the config file will take priority
func loadConfig() Config {
c := Config{}
// if config file isn't passed in, don't try to look at it
if len(os.Getenv("KIOSK_CONFIG_FILE")) == 0 {
return c
}
file, err := ioutil.ReadFile(os.Getenv("KIOSK_CONFIG_FILE"))
if err != nil {
log.Debugf("error reading in the config file: %s", err)
}
_ = json.Unmarshal([]byte(file), &c)
// if we still don't have an access secret let's generate a random one
return c
}
| [
"\"KIOSK_CONFIG_FILE\"",
"\"KIOSK_CONFIG_FILE\""
]
| []
| [
"KIOSK_CONFIG_FILE"
]
| [] | ["KIOSK_CONFIG_FILE"] | go | 1 | 0 | |
staging/src/k8s.io/apiserver/pkg/storage/etcd/testing/utils.go | /*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"fmt"
"io/ioutil"
"net"
"net/http"
"net/http/httptest"
"os"
"path"
"testing"
"time"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/storage/etcd/etcdtest"
"k8s.io/apiserver/pkg/storage/etcd/testing/testingcert"
"k8s.io/apiserver/pkg/storage/storagebackend"
etcd "github.com/coreos/etcd/client"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/etcdserver/api/etcdhttp"
"github.com/coreos/etcd/etcdserver/api/v2http"
"github.com/coreos/etcd/integration"
"github.com/coreos/etcd/pkg/testutil"
"github.com/coreos/etcd/pkg/transport"
"github.com/coreos/etcd/pkg/types"
"github.com/golang/glog"
"golang.org/x/net/context"
)
// EtcdTestServer encapsulates the datastructures needed to start local instance for testing
type EtcdTestServer struct {
// The following are lumped etcd2 test server params
// TODO: Deprecate in a post 1.5 release
etcdserver.ServerConfig
PeerListeners, ClientListeners []net.Listener
Client etcd.Client
CertificatesDir string
CertFile string
KeyFile string
CAFile string
raftHandler http.Handler
s *etcdserver.EtcdServer
hss []*httptest.Server
// The following are lumped etcd3 test server params
v3Cluster *integration.ClusterV3
V3Client *clientv3.Client
}
// newLocalListener opens a port localhost using any port
func newLocalListener(t *testing.T) net.Listener {
l, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
return l
}
// newSecuredLocalListener opens a port localhost using any port
// with SSL enable
func newSecuredLocalListener(t *testing.T, certFile, keyFile, caFile string) net.Listener {
var l net.Listener
l, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
tlsInfo := transport.TLSInfo{
CertFile: certFile,
KeyFile: keyFile,
CAFile: caFile,
}
tlscfg, err := tlsInfo.ServerConfig()
if err != nil {
t.Fatalf("unexpected serverConfig error: %v", err)
}
l, err = transport.NewKeepAliveListener(l, "https", tlscfg)
if err != nil {
t.Fatal(err)
}
return l
}
func newHttpTransport(t *testing.T, certFile, keyFile, caFile string) etcd.CancelableTransport {
tlsInfo := transport.TLSInfo{
CertFile: certFile,
KeyFile: keyFile,
CAFile: caFile,
}
tr, err := transport.NewTransport(tlsInfo, time.Second)
if err != nil {
t.Fatal(err)
}
return tr
}
// configureTestCluster will set the params to start an etcd server
func configureTestCluster(t *testing.T, name string, https bool) *EtcdTestServer {
var err error
m := &EtcdTestServer{}
pln := newLocalListener(t)
m.PeerListeners = []net.Listener{pln}
m.PeerURLs, err = types.NewURLs([]string{"http://" + pln.Addr().String()})
if err != nil {
t.Fatal(err)
}
// Allow test launches to control where etcd data goes, for space or performance reasons
baseDir := os.Getenv("TEST_ETCD_DIR")
if len(baseDir) == 0 {
baseDir = os.TempDir()
}
if https {
m.CertificatesDir, err = ioutil.TempDir(baseDir, "etcd_certificates")
if err != nil {
t.Fatal(err)
}
m.CertFile = path.Join(m.CertificatesDir, "etcdcert.pem")
if err = ioutil.WriteFile(m.CertFile, []byte(testingcert.CertFileContent), 0644); err != nil {
t.Fatal(err)
}
m.KeyFile = path.Join(m.CertificatesDir, "etcdkey.pem")
if err = ioutil.WriteFile(m.KeyFile, []byte(testingcert.KeyFileContent), 0644); err != nil {
t.Fatal(err)
}
m.CAFile = path.Join(m.CertificatesDir, "ca.pem")
if err = ioutil.WriteFile(m.CAFile, []byte(testingcert.CAFileContent), 0644); err != nil {
t.Fatal(err)
}
cln := newSecuredLocalListener(t, m.CertFile, m.KeyFile, m.CAFile)
m.ClientListeners = []net.Listener{cln}
m.ClientURLs, err = types.NewURLs([]string{"https://" + cln.Addr().String()})
if err != nil {
t.Fatal(err)
}
} else {
cln := newLocalListener(t)
m.ClientListeners = []net.Listener{cln}
m.ClientURLs, err = types.NewURLs([]string{"http://" + cln.Addr().String()})
if err != nil {
t.Fatal(err)
}
}
m.AuthToken = "simple"
m.Name = name
m.DataDir, err = ioutil.TempDir(baseDir, "etcd")
if err != nil {
t.Fatal(err)
}
clusterStr := fmt.Sprintf("%s=http://%s", name, pln.Addr().String())
m.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr)
if err != nil {
t.Fatal(err)
}
m.InitialClusterToken = "TestEtcd"
m.NewCluster = true
m.ForceNewCluster = false
m.ElectionTicks = 10
m.TickMs = uint(10)
return m
}
// launch will attempt to start the etcd server
func (m *EtcdTestServer) launch(t *testing.T) error {
var err error
if m.s, err = etcdserver.NewServer(&m.ServerConfig); err != nil {
return fmt.Errorf("failed to initialize the etcd server: %v", err)
}
m.s.SyncTicker = time.NewTicker(500 * time.Millisecond)
m.s.Start()
m.raftHandler = &testutil.PauseableHandler{Next: etcdhttp.NewPeerHandler(m.s)}
for _, ln := range m.PeerListeners {
hs := &httptest.Server{
Listener: ln,
Config: &http.Server{Handler: m.raftHandler},
}
hs.Start()
m.hss = append(m.hss, hs)
}
for _, ln := range m.ClientListeners {
hs := &httptest.Server{
Listener: ln,
Config: &http.Server{Handler: v2http.NewClientHandler(m.s, m.ServerConfig.ReqTimeout())},
}
hs.Start()
m.hss = append(m.hss, hs)
}
return nil
}
// waitForEtcd wait until etcd is propagated correctly
func (m *EtcdTestServer) waitUntilUp() error {
membersAPI := etcd.NewMembersAPI(m.Client)
for start := time.Now(); time.Since(start) < wait.ForeverTestTimeout; time.Sleep(10 * time.Millisecond) {
members, err := membersAPI.List(context.TODO())
if err != nil {
glog.Errorf("Error when getting etcd cluster members")
continue
}
if len(members) == 1 && len(members[0].ClientURLs) > 0 {
return nil
}
}
return fmt.Errorf("timeout on waiting for etcd cluster")
}
// Terminate will shutdown the running etcd server
func (m *EtcdTestServer) Terminate(t *testing.T) {
if m.v3Cluster != nil {
m.v3Cluster.Terminate(t)
} else {
m.Client = nil
m.s.Stop()
// TODO: This is a pretty ugly hack to workaround races during closing
// in-memory etcd server in unit tests - see #18928 for more details.
// We should get rid of it as soon as we have a proper fix - etcd clients
// have overwritten transport counting opened connections (probably by
// overwriting Dial function) and termination function waiting for all
// connections to be closed and stopping accepting new ones.
time.Sleep(250 * time.Millisecond)
for _, hs := range m.hss {
hs.CloseClientConnections()
hs.Close()
}
if err := os.RemoveAll(m.ServerConfig.DataDir); err != nil {
t.Fatal(err)
}
if len(m.CertificatesDir) > 0 {
if err := os.RemoveAll(m.CertificatesDir); err != nil {
t.Fatal(err)
}
}
}
}
// NewEtcdTestClientServer DEPRECATED creates a new client and server for testing
func NewEtcdTestClientServer(t *testing.T) *EtcdTestServer {
server := configureTestCluster(t, "foo", true)
err := server.launch(t)
if err != nil {
t.Fatalf("Failed to start etcd server error=%v", err)
return nil
}
cfg := etcd.Config{
Endpoints: server.ClientURLs.StringSlice(),
Transport: newHttpTransport(t, server.CertFile, server.KeyFile, server.CAFile),
}
server.Client, err = etcd.New(cfg)
if err != nil {
server.Terminate(t)
t.Fatalf("Unexpected error in NewEtcdTestClientServer (%v)", err)
return nil
}
if err := server.waitUntilUp(); err != nil {
server.Terminate(t)
t.Fatalf("Unexpected error in waitUntilUp (%v)", err)
return nil
}
return server
}
// NewUnsecuredEtcdTestClientServer DEPRECATED creates a new client and server for testing
func NewUnsecuredEtcdTestClientServer(t *testing.T) *EtcdTestServer {
server := configureTestCluster(t, "foo", false)
err := server.launch(t)
if err != nil {
t.Fatalf("Failed to start etcd server error=%v", err)
return nil
}
cfg := etcd.Config{
Endpoints: server.ClientURLs.StringSlice(),
Transport: newHttpTransport(t, server.CertFile, server.KeyFile, server.CAFile),
}
server.Client, err = etcd.New(cfg)
if err != nil {
t.Errorf("Unexpected error in NewUnsecuredEtcdTestClientServer (%v)", err)
server.Terminate(t)
return nil
}
if err := server.waitUntilUp(); err != nil {
t.Errorf("Unexpected error in waitUntilUp (%v)", err)
server.Terminate(t)
return nil
}
return server
}
// NewEtcd3TestClientServer creates a new client and server for testing
func NewUnsecuredEtcd3TestClientServer(t *testing.T) (*EtcdTestServer, *storagebackend.Config) {
server := &EtcdTestServer{
v3Cluster: integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1}),
}
server.V3Client = server.v3Cluster.RandClient()
config := &storagebackend.Config{
Type: "etcd3",
Prefix: etcdtest.PathPrefix(),
ServerList: server.V3Client.Endpoints(),
DeserializationCacheSize: etcdtest.DeserializationCacheSize,
Paging: true,
}
return server, config
}
| [
"\"TEST_ETCD_DIR\""
]
| []
| [
"TEST_ETCD_DIR"
]
| [] | ["TEST_ETCD_DIR"] | go | 1 | 0 | |
examples/worker/worker.go | package main
import (
"context"
"fmt"
"log"
"os"
"os/signal"
"github.com/thoas/bokchoy"
"github.com/thoas/bokchoy/logging"
"github.com/thoas/bokchoy/middleware"
)
func main() {
var (
err error
logger logging.Logger
ctx = context.Background()
loggerLevel = os.Getenv("LOGGER_LEVEL")
)
if loggerLevel == "development" {
logger, err = logging.NewDevelopmentLogger()
if err != nil {
log.Fatal(err)
}
defer logger.Sync()
}
engine, err := bokchoy.New(ctx, bokchoy.Config{
Broker: bokchoy.BrokerConfig{
Type: "redis",
Redis: bokchoy.RedisConfig{
Type: "client",
Client: bokchoy.RedisClientConfig{
Addr: "localhost:6379",
},
},
},
}, bokchoy.WithLogger(logger))
if err != nil {
log.Fatal(err)
}
engine.Use(middleware.Recoverer)
engine.Use(middleware.RequestID)
engine.Use(middleware.DefaultLogger)
engine.Queue("tasks.message").HandleFunc(func(r *bokchoy.Request) error {
fmt.Println("Receive request:", r)
fmt.Println("Request context:", r.Context())
fmt.Println("Payload:", r.Task.Payload)
r.Task.Result = "You can store your result here"
return nil
})
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
go func() {
for range c {
log.Print("Received signal, gracefully stopping")
engine.Stop(ctx)
}
}()
engine.Run(ctx)
}
| [
"\"LOGGER_LEVEL\""
]
| []
| [
"LOGGER_LEVEL"
]
| [] | ["LOGGER_LEVEL"] | go | 1 | 0 | |
env_bluetooth-API/lib/python2.7/site-packages/kombu/__init__.py | """Messaging library for Python."""
from __future__ import absolute_import, unicode_literals
import os
import re
import sys
if sys.version_info < (2, 7): # pragma: no cover
raise Exception('Kombu 4.0 requires Python versions 2.7 or later.')
from collections import namedtuple # noqa
__version__ = '4.1.0'
__author__ = 'Ask Solem'
__contact__ = '[email protected]'
__homepage__ = 'https://kombu.readthedocs.io'
__docformat__ = 'restructuredtext en'
# -eof meta-
version_info_t = namedtuple('version_info_t', (
'major', 'minor', 'micro', 'releaselevel', 'serial',
))
# bumpversion can only search for {current_version}
# so we have to parse the version here.
_temp = re.match(
r'(\d+)\.(\d+).(\d+)(.+)?', __version__).groups()
VERSION = version_info = version_info_t(
int(_temp[0]), int(_temp[1]), int(_temp[2]), _temp[3] or '', '')
del(_temp)
del(re)
STATICA_HACK = True
globals()['kcah_acitats'[::-1].upper()] = False
if STATICA_HACK: # pragma: no cover
# This is never executed, but tricks static analyzers (PyDev, PyCharm,
# pylint, etc.) into knowing the types of these symbols, and what
# they contain.
from kombu.connection import Connection, BrokerConnection # noqa
from kombu.entity import Exchange, Queue, binding # noqa
from kombu.message import Message # noqa
from kombu.messaging import Consumer, Producer # noqa
from kombu.pools import connections, producers # noqa
from kombu.utils.url import parse_url # noqa
from kombu.common import eventloop, uuid # noqa
from kombu.serialization import ( # noqa
enable_insecure_serializers,
disable_insecure_serializers,
)
# Lazy loading.
# - See werkzeug/__init__.py for the rationale behind this.
from types import ModuleType # noqa
all_by_module = {
'kombu.connection': ['Connection', 'BrokerConnection'],
'kombu.entity': ['Exchange', 'Queue', 'binding'],
'kombu.message': ['Message'],
'kombu.messaging': ['Consumer', 'Producer'],
'kombu.pools': ['connections', 'producers'],
'kombu.utils.url': ['parse_url'],
'kombu.common': ['eventloop', 'uuid'],
'kombu.serialization': [
'enable_insecure_serializers',
'disable_insecure_serializers',
],
}
object_origins = {}
for module, items in all_by_module.items():
for item in items:
object_origins[item] = module
class module(ModuleType):
"""Customized Python module."""
def __getattr__(self, name):
if name in object_origins:
module = __import__(object_origins[name], None, None, [name])
for extra_name in all_by_module[module.__name__]:
setattr(self, extra_name, getattr(module, extra_name))
return getattr(module, name)
return ModuleType.__getattribute__(self, name)
def __dir__(self):
result = list(new_module.__all__)
result.extend(('__file__', '__path__', '__doc__', '__all__',
'__docformat__', '__name__', '__path__', 'VERSION',
'__package__', '__version__', '__author__',
'__contact__', '__homepage__', '__docformat__'))
return result
# 2.5 does not define __package__
try:
package = __package__
except NameError: # pragma: no cover
package = 'kombu'
# keep a reference to this module so that it's not garbage collected
old_module = sys.modules[__name__]
new_module = sys.modules[__name__] = module(__name__)
new_module.__dict__.update({
'__file__': __file__,
'__path__': __path__,
'__doc__': __doc__,
'__all__': tuple(object_origins),
'__version__': __version__,
'__author__': __author__,
'__contact__': __contact__,
'__homepage__': __homepage__,
'__docformat__': __docformat__,
'__package__': package,
'version_info_t': version_info_t,
'version_info': version_info,
'VERSION': VERSION,
'absolute_import': absolute_import,
'unicode_literals': unicode_literals,
})
if os.environ.get('KOMBU_LOG_DEBUG'): # pragma: no cover
os.environ.update(KOMBU_LOG_CHANNEL='1', KOMBU_LOG_CONNECTION='1')
from .utils import debug
debug.setup_logging()
| []
| []
| [
"KOMBU_LOG_DEBUG"
]
| [] | ["KOMBU_LOG_DEBUG"] | python | 1 | 0 | |
plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This is the default algorithm provider for the scheduler.
package defaults
import (
"os"
"strconv"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/plugin/pkg/scheduler"
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities"
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
"github.com/golang/glog"
)
const (
// GCE instances can have up to 16 PD volumes attached.
DefaultMaxGCEPDVolumes = 16
ClusterAutoscalerProvider = "ClusterAutoscalerProvider"
StatefulSetKind = "StatefulSet"
)
func init() {
// Register functions that extract metadata used by predicates and priorities computations.
factory.RegisterPredicateMetadataProducerFactory(
func(args factory.PluginFactoryArgs) algorithm.MetadataProducer {
return predicates.NewPredicateMetadataFactory(args.PodLister)
})
factory.RegisterPriorityMetadataProducerFactory(
func(args factory.PluginFactoryArgs) algorithm.MetadataProducer {
return priorities.PriorityMetadata
})
// Retisters algorithm providers. By default we use 'DefaultProvider', but user can specify one to be used
// by specifying flag.
factory.RegisterAlgorithmProvider(factory.DefaultProvider, defaultPredicates(), defaultPriorities())
// Cluster autoscaler friendly scheduling algorithm.
factory.RegisterAlgorithmProvider(ClusterAutoscalerProvider, defaultPredicates(),
copyAndReplace(defaultPriorities(), "LeastRequestedPriority", "MostRequestedPriority"))
// Registers predicates and priorities that are not enabled by default, but user can pick when creating his
// own set of priorities/predicates.
// PodFitsPorts has been replaced by PodFitsHostPorts for better user understanding.
// For backwards compatibility with 1.0, PodFitsPorts is registered as well.
factory.RegisterFitPredicate("PodFitsPorts", predicates.PodFitsHostPorts)
// Fit is defined based on the absence of port conflicts.
// This predicate is actually a default predicate, because it is invoked from
// predicates.GeneralPredicates()
factory.RegisterFitPredicate("PodFitsHostPorts", predicates.PodFitsHostPorts)
// Fit is determined by resource availability.
// This predicate is actually a default predicate, because it is invoked from
// predicates.GeneralPredicates()
factory.RegisterFitPredicate("PodFitsResources", predicates.PodFitsResources)
// Fit is determined by the presence of the Host parameter and a string match
// This predicate is actually a default predicate, because it is invoked from
// predicates.GeneralPredicates()
factory.RegisterFitPredicate("HostName", predicates.PodFitsHost)
// Fit is determined by node selector query.
factory.RegisterFitPredicate("MatchNodeSelector", predicates.PodSelectorMatches)
// Use equivalence class to speed up predicates & priorities
factory.RegisterGetEquivalencePodFunction(GetEquivalencePod)
// ServiceSpreadingPriority is a priority config factory that spreads pods by minimizing
// the number of pods (belonging to the same service) on the same node.
// Register the factory so that it's available, but do not include it as part of the default priorities
// Largely replaced by "SelectorSpreadPriority", but registered for backward compatibility with 1.0
factory.RegisterPriorityConfigFactory(
"ServiceSpreadingPriority",
factory.PriorityConfigFactory{
Function: func(args factory.PluginFactoryArgs) algorithm.PriorityFunction {
return priorities.NewSelectorSpreadPriority(args.ServiceLister, algorithm.EmptyControllerLister{}, algorithm.EmptyReplicaSetLister{})
},
Weight: 1,
},
)
// EqualPriority is a prioritizer function that gives an equal weight of one to all nodes
// Register the priority function so that its available
// but do not include it as part of the default priorities
factory.RegisterPriorityFunction2("EqualPriority", scheduler.EqualPriorityMap, nil, 1)
// ImageLocalityPriority prioritizes nodes based on locality of images requested by a pod. Nodes with larger size
// of already-installed packages required by the pod will be preferred over nodes with no already-installed
// packages required by the pod or a small total size of already-installed packages required by the pod.
factory.RegisterPriorityFunction2("ImageLocalityPriority", priorities.ImageLocalityPriorityMap, nil, 1)
// Optional, cluster-autoscaler friendly priority function - give used nodes higher priority.
factory.RegisterPriorityFunction2("MostRequestedPriority", priorities.MostRequestedPriorityMap, nil, 1)
}
func defaultPredicates() sets.String {
return sets.NewString(
// Fit is determined by volume zone requirements.
factory.RegisterFitPredicateFactory(
"NoVolumeZoneConflict",
func(args factory.PluginFactoryArgs) algorithm.FitPredicate {
return predicates.NewVolumeZonePredicate(args.PVInfo, args.PVCInfo)
},
),
// Fit is determined by whether or not there would be too many AWS EBS volumes attached to the node
factory.RegisterFitPredicateFactory(
"MaxEBSVolumeCount",
func(args factory.PluginFactoryArgs) algorithm.FitPredicate {
// TODO: allow for generically parameterized scheduler predicates, because this is a bit ugly
maxVols := getMaxVols(aws.DefaultMaxEBSVolumes)
return predicates.NewMaxPDVolumeCountPredicate(predicates.EBSVolumeFilter, maxVols, args.PVInfo, args.PVCInfo)
},
),
// Fit is determined by whether or not there would be too many GCE PD volumes attached to the node
factory.RegisterFitPredicateFactory(
"MaxGCEPDVolumeCount",
func(args factory.PluginFactoryArgs) algorithm.FitPredicate {
// TODO: allow for generically parameterized scheduler predicates, because this is a bit ugly
maxVols := getMaxVols(DefaultMaxGCEPDVolumes)
return predicates.NewMaxPDVolumeCountPredicate(predicates.GCEPDVolumeFilter, maxVols, args.PVInfo, args.PVCInfo)
},
),
// Fit is determined by inter-pod affinity.
factory.RegisterFitPredicateFactory(
"MatchInterPodAffinity",
func(args factory.PluginFactoryArgs) algorithm.FitPredicate {
return predicates.NewPodAffinityPredicate(args.NodeInfo, args.PodLister, args.FailureDomains)
},
),
// Fit is determined by non-conflicting disk volumes.
factory.RegisterFitPredicate("NoDiskConflict", predicates.NoDiskConflict),
// GeneralPredicates are the predicates that are enforced by all Kubernetes components
// (e.g. kubelet and all schedulers)
factory.RegisterFitPredicate("GeneralPredicates", predicates.GeneralPredicates),
// Fit is determined based on whether a pod can tolerate all of the node's taints
factory.RegisterFitPredicate("PodToleratesNodeTaints", predicates.PodToleratesNodeTaints),
// Fit is determined by node memory pressure condition.
factory.RegisterFitPredicate("CheckNodeMemoryPressure", predicates.CheckNodeMemoryPressurePredicate),
// Fit is determined by node disk pressure condition.
factory.RegisterFitPredicate("CheckNodeDiskPressure", predicates.CheckNodeDiskPressurePredicate),
)
}
func defaultPriorities() sets.String {
return sets.NewString(
// spreads pods by minimizing the number of pods (belonging to the same service or replication controller) on the same node.
factory.RegisterPriorityConfigFactory(
"SelectorSpreadPriority",
factory.PriorityConfigFactory{
Function: func(args factory.PluginFactoryArgs) algorithm.PriorityFunction {
return priorities.NewSelectorSpreadPriority(args.ServiceLister, args.ControllerLister, args.ReplicaSetLister)
},
Weight: 1,
},
),
// pods should be placed in the same topological domain (e.g. same node, same rack, same zone, same power domain, etc.)
// as some other pods, or, conversely, should not be placed in the same topological domain as some other pods.
factory.RegisterPriorityConfigFactory(
"InterPodAffinityPriority",
factory.PriorityConfigFactory{
Function: func(args factory.PluginFactoryArgs) algorithm.PriorityFunction {
return priorities.NewInterPodAffinityPriority(args.NodeInfo, args.NodeLister, args.PodLister, args.HardPodAffinitySymmetricWeight, args.FailureDomains)
},
Weight: 1,
},
),
// Prioritize nodes by least requested utilization.
factory.RegisterPriorityFunction2("LeastRequestedPriority", priorities.LeastRequestedPriorityMap, nil, 1),
// Prioritizes nodes to help achieve balanced resource usage
factory.RegisterPriorityFunction2("BalancedResourceAllocation", priorities.BalancedResourceAllocationMap, nil, 1),
// Set this weight large enough to override all other priority functions.
// TODO: Figure out a better way to do this, maybe at same time as fixing #24720.
factory.RegisterPriorityFunction2("NodePreferAvoidPodsPriority", priorities.CalculateNodePreferAvoidPodsPriorityMap, nil, 10000),
// Prioritizes nodes that have labels matching NodeAffinity
factory.RegisterPriorityFunction2("NodeAffinityPriority", priorities.CalculateNodeAffinityPriorityMap, priorities.CalculateNodeAffinityPriorityReduce, 1),
// TODO: explain what it does.
factory.RegisterPriorityFunction2("TaintTolerationPriority", priorities.ComputeTaintTolerationPriorityMap, priorities.ComputeTaintTolerationPriorityReduce, 1),
)
}
// getMaxVols checks the max PD volumes environment variable, otherwise returning a default value
func getMaxVols(defaultVal int) int {
if rawMaxVols := os.Getenv("KUBE_MAX_PD_VOLS"); rawMaxVols != "" {
if parsedMaxVols, err := strconv.Atoi(rawMaxVols); err != nil {
glog.Errorf("Unable to parse maxiumum PD volumes value, using default of %v: %v", defaultVal, err)
} else if parsedMaxVols <= 0 {
glog.Errorf("Maximum PD volumes must be a positive value, using default of %v", defaultVal)
} else {
return parsedMaxVols
}
}
return defaultVal
}
func copyAndReplace(set sets.String, replaceWhat, replaceWith string) sets.String {
result := sets.NewString(set.List()...)
if result.Has(replaceWhat) {
result.Delete(replaceWhat)
result.Insert(replaceWith)
}
return result
}
// GetEquivalencePod returns a EquivalencePod which contains a group of pod attributes which can be reused.
func GetEquivalencePod(pod *v1.Pod) interface{} {
equivalencePod := EquivalencePod{}
// For now we only consider pods:
// 1. OwnerReferences is Controller
// 2. OwnerReferences kind is in valid controller kinds
// 3. with same OwnerReferences
// to be equivalent
if len(pod.OwnerReferences) != 0 {
for _, ref := range pod.OwnerReferences {
if *ref.Controller && isValidControllerKind(ref.Kind) {
equivalencePod.ControllerRef = ref
// a pod can only belongs to one controller
break
}
}
}
return &equivalencePod
}
// isValidControllerKind checks if a given controller's kind can be applied to equivalence pod algorithm.
func isValidControllerKind(kind string) bool {
switch kind {
// list of kinds that we cannot handle
case StatefulSetKind:
return false
default:
return true
}
}
// EquivalencePod is a group of pod attributes which can be reused as equivalence to schedule other pods.
type EquivalencePod struct {
ControllerRef v1.OwnerReference
}
| [
"\"KUBE_MAX_PD_VOLS\""
]
| []
| [
"KUBE_MAX_PD_VOLS"
]
| [] | ["KUBE_MAX_PD_VOLS"] | go | 1 | 0 | |
src/net/http/fs_test.go | // Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http_test
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"mime"
"mime/multipart"
"net"
. "net/http"
"net/http/httptest"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"reflect"
"regexp"
"runtime"
"strings"
"testing"
"time"
)
const (
testFile = "testdata/file"
testFileLen = 11
)
type wantRange struct {
start, end int64 // range [start,end)
}
var ServeFileRangeTests = []struct {
r string
code int
ranges []wantRange
}{
{r: "", code: StatusOK},
{r: "bytes=0-4", code: StatusPartialContent, ranges: []wantRange{{0, 5}}},
{r: "bytes=2-", code: StatusPartialContent, ranges: []wantRange{{2, testFileLen}}},
{r: "bytes=-5", code: StatusPartialContent, ranges: []wantRange{{testFileLen - 5, testFileLen}}},
{r: "bytes=3-7", code: StatusPartialContent, ranges: []wantRange{{3, 8}}},
{r: "bytes=0-0,-2", code: StatusPartialContent, ranges: []wantRange{{0, 1}, {testFileLen - 2, testFileLen}}},
{r: "bytes=0-1,5-8", code: StatusPartialContent, ranges: []wantRange{{0, 2}, {5, 9}}},
{r: "bytes=0-1,5-", code: StatusPartialContent, ranges: []wantRange{{0, 2}, {5, testFileLen}}},
{r: "bytes=5-1000", code: StatusPartialContent, ranges: []wantRange{{5, testFileLen}}},
{r: "bytes=0-,1-,2-,3-,4-", code: StatusOK}, // ignore wasteful range request
{r: "bytes=0-9", code: StatusPartialContent, ranges: []wantRange{{0, testFileLen - 1}}},
{r: "bytes=0-10", code: StatusPartialContent, ranges: []wantRange{{0, testFileLen}}},
{r: "bytes=0-11", code: StatusPartialContent, ranges: []wantRange{{0, testFileLen}}},
{r: "bytes=10-11", code: StatusPartialContent, ranges: []wantRange{{testFileLen - 1, testFileLen}}},
{r: "bytes=10-", code: StatusPartialContent, ranges: []wantRange{{testFileLen - 1, testFileLen}}},
{r: "bytes=11-", code: StatusRequestedRangeNotSatisfiable},
{r: "bytes=11-12", code: StatusRequestedRangeNotSatisfiable},
{r: "bytes=12-12", code: StatusRequestedRangeNotSatisfiable},
{r: "bytes=11-100", code: StatusRequestedRangeNotSatisfiable},
{r: "bytes=12-100", code: StatusRequestedRangeNotSatisfiable},
{r: "bytes=100-", code: StatusRequestedRangeNotSatisfiable},
{r: "bytes=100-1000", code: StatusRequestedRangeNotSatisfiable},
}
func TestServeFile(t *testing.T) {
setParallel(t)
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
ServeFile(w, r, "testdata/file")
}))
defer ts.Close()
var err error
file, err := ioutil.ReadFile(testFile)
if err != nil {
t.Fatal("reading file:", err)
}
// set up the Request (re-used for all tests)
var req Request
req.Header = make(Header)
if req.URL, err = url.Parse(ts.URL); err != nil {
t.Fatal("ParseURL:", err)
}
req.Method = "GET"
// straight GET
_, body := getBody(t, "straight get", req)
if !bytes.Equal(body, file) {
t.Fatalf("body mismatch: got %q, want %q", body, file)
}
// Range tests
Cases:
for _, rt := range ServeFileRangeTests {
if rt.r != "" {
req.Header.Set("Range", rt.r)
}
resp, body := getBody(t, fmt.Sprintf("range test %q", rt.r), req)
if resp.StatusCode != rt.code {
t.Errorf("range=%q: StatusCode=%d, want %d", rt.r, resp.StatusCode, rt.code)
}
if rt.code == StatusRequestedRangeNotSatisfiable {
continue
}
wantContentRange := ""
if len(rt.ranges) == 1 {
rng := rt.ranges[0]
wantContentRange = fmt.Sprintf("bytes %d-%d/%d", rng.start, rng.end-1, testFileLen)
}
cr := resp.Header.Get("Content-Range")
if cr != wantContentRange {
t.Errorf("range=%q: Content-Range = %q, want %q", rt.r, cr, wantContentRange)
}
ct := resp.Header.Get("Content-Type")
if len(rt.ranges) == 1 {
rng := rt.ranges[0]
wantBody := file[rng.start:rng.end]
if !bytes.Equal(body, wantBody) {
t.Errorf("range=%q: body = %q, want %q", rt.r, body, wantBody)
}
if strings.HasPrefix(ct, "multipart/byteranges") {
t.Errorf("range=%q content-type = %q; unexpected multipart/byteranges", rt.r, ct)
}
}
if len(rt.ranges) > 1 {
typ, params, err := mime.ParseMediaType(ct)
if err != nil {
t.Errorf("range=%q content-type = %q; %v", rt.r, ct, err)
continue
}
if typ != "multipart/byteranges" {
t.Errorf("range=%q content-type = %q; want multipart/byteranges", rt.r, typ)
continue
}
if params["boundary"] == "" {
t.Errorf("range=%q content-type = %q; lacks boundary", rt.r, ct)
continue
}
if g, w := resp.ContentLength, int64(len(body)); g != w {
t.Errorf("range=%q Content-Length = %d; want %d", rt.r, g, w)
continue
}
mr := multipart.NewReader(bytes.NewReader(body), params["boundary"])
for ri, rng := range rt.ranges {
part, err := mr.NextPart()
if err != nil {
t.Errorf("range=%q, reading part index %d: %v", rt.r, ri, err)
continue Cases
}
wantContentRange = fmt.Sprintf("bytes %d-%d/%d", rng.start, rng.end-1, testFileLen)
if g, w := part.Header.Get("Content-Range"), wantContentRange; g != w {
t.Errorf("range=%q: part Content-Range = %q; want %q", rt.r, g, w)
}
body, err := ioutil.ReadAll(part)
if err != nil {
t.Errorf("range=%q, reading part index %d body: %v", rt.r, ri, err)
continue Cases
}
wantBody := file[rng.start:rng.end]
if !bytes.Equal(body, wantBody) {
t.Errorf("range=%q: body = %q, want %q", rt.r, body, wantBody)
}
}
_, err = mr.NextPart()
if err != io.EOF {
t.Errorf("range=%q; expected final error io.EOF; got %v", rt.r, err)
}
}
}
}
func TestServeFile_DotDot(t *testing.T) {
tests := []struct {
req string
wantStatus int
}{
{"/testdata/file", 200},
{"/../file", 400},
{"/..", 400},
{"/../", 400},
{"/../foo", 400},
{"/..\\foo", 400},
{"/file/a", 200},
{"/file/a..", 200},
{"/file/a/..", 400},
{"/file/a\\..", 400},
}
for _, tt := range tests {
req, err := ReadRequest(bufio.NewReader(strings.NewReader("GET " + tt.req + " HTTP/1.1\r\nHost: foo\r\n\r\n")))
if err != nil {
t.Errorf("bad request %q: %v", tt.req, err)
continue
}
rec := httptest.NewRecorder()
ServeFile(rec, req, "testdata/file")
if rec.Code != tt.wantStatus {
t.Errorf("for request %q, status = %d; want %d", tt.req, rec.Code, tt.wantStatus)
}
}
}
var fsRedirectTestData = []struct {
original, redirect string
}{
{"/test/index.html", "/test/"},
{"/test/testdata", "/test/testdata/"},
{"/test/testdata/file/", "/test/testdata/file"},
}
func TestFSRedirect(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(StripPrefix("/test", FileServer(Dir("."))))
defer ts.Close()
for _, data := range fsRedirectTestData {
res, err := Get(ts.URL + data.original)
if err != nil {
t.Fatal(err)
}
res.Body.Close()
if g, e := res.Request.URL.Path, data.redirect; g != e {
t.Errorf("redirect from %s: got %s, want %s", data.original, g, e)
}
}
}
type testFileSystem struct {
open func(name string) (File, error)
}
func (fs *testFileSystem) Open(name string) (File, error) {
return fs.open(name)
}
func TestFileServerCleans(t *testing.T) {
defer afterTest(t)
ch := make(chan string, 1)
fs := FileServer(&testFileSystem{func(name string) (File, error) {
ch <- name
return nil, errors.New("file does not exist")
}})
tests := []struct {
reqPath, openArg string
}{
{"/foo.txt", "/foo.txt"},
{"//foo.txt", "/foo.txt"},
{"/../foo.txt", "/foo.txt"},
}
req, _ := NewRequest("GET", "http://example.com", nil)
for n, test := range tests {
rec := httptest.NewRecorder()
req.URL.Path = test.reqPath
fs.ServeHTTP(rec, req)
if got := <-ch; got != test.openArg {
t.Errorf("test %d: got %q, want %q", n, got, test.openArg)
}
}
}
func TestFileServerEscapesNames(t *testing.T) {
defer afterTest(t)
const dirListPrefix = "<pre>\n"
const dirListSuffix = "\n</pre>\n"
tests := []struct {
name, escaped string
}{
{`simple_name`, `<a href="simple_name">simple_name</a>`},
{`"'<>&`, `<a href="%22%27%3C%3E&">"'<>&</a>`},
{`?foo=bar#baz`, `<a href="%3Ffoo=bar%23baz">?foo=bar#baz</a>`},
{`<combo>?foo`, `<a href="%3Ccombo%3E%3Ffoo"><combo>?foo</a>`},
{`foo:bar`, `<a href="./foo:bar">foo:bar</a>`},
}
// We put each test file in its own directory in the fakeFS so we can look at it in isolation.
fs := make(fakeFS)
for i, test := range tests {
testFile := &fakeFileInfo{basename: test.name}
fs[fmt.Sprintf("/%d", i)] = &fakeFileInfo{
dir: true,
modtime: time.Unix(1000000000, 0).UTC(),
ents: []*fakeFileInfo{testFile},
}
fs[fmt.Sprintf("/%d/%s", i, test.name)] = testFile
}
ts := httptest.NewServer(FileServer(&fs))
defer ts.Close()
for i, test := range tests {
url := fmt.Sprintf("%s/%d", ts.URL, i)
res, err := Get(url)
if err != nil {
t.Fatalf("test %q: Get: %v", test.name, err)
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatalf("test %q: read Body: %v", test.name, err)
}
s := string(b)
if !strings.HasPrefix(s, dirListPrefix) || !strings.HasSuffix(s, dirListSuffix) {
t.Errorf("test %q: listing dir, full output is %q, want prefix %q and suffix %q", test.name, s, dirListPrefix, dirListSuffix)
}
if trimmed := strings.TrimSuffix(strings.TrimPrefix(s, dirListPrefix), dirListSuffix); trimmed != test.escaped {
t.Errorf("test %q: listing dir, filename escaped to %q, want %q", test.name, trimmed, test.escaped)
}
res.Body.Close()
}
}
func TestFileServerSortsNames(t *testing.T) {
defer afterTest(t)
const contents = "I am a fake file"
dirMod := time.Unix(123, 0).UTC()
fileMod := time.Unix(1000000000, 0).UTC()
fs := fakeFS{
"/": &fakeFileInfo{
dir: true,
modtime: dirMod,
ents: []*fakeFileInfo{
{
basename: "b",
modtime: fileMod,
contents: contents,
},
{
basename: "a",
modtime: fileMod,
contents: contents,
},
},
},
}
ts := httptest.NewServer(FileServer(&fs))
defer ts.Close()
res, err := Get(ts.URL)
if err != nil {
t.Fatalf("Get: %v", err)
}
defer res.Body.Close()
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatalf("read Body: %v", err)
}
s := string(b)
if !strings.Contains(s, "<a href=\"a\">a</a>\n<a href=\"b\">b</a>") {
t.Errorf("output appears to be unsorted:\n%s", s)
}
}
func mustRemoveAll(dir string) {
err := os.RemoveAll(dir)
if err != nil {
panic(err)
}
}
func TestFileServerImplicitLeadingSlash(t *testing.T) {
defer afterTest(t)
tempDir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("TempDir: %v", err)
}
defer mustRemoveAll(tempDir)
if err := ioutil.WriteFile(filepath.Join(tempDir, "foo.txt"), []byte("Hello world"), 0644); err != nil {
t.Fatalf("WriteFile: %v", err)
}
ts := httptest.NewServer(StripPrefix("/bar/", FileServer(Dir(tempDir))))
defer ts.Close()
get := func(suffix string) string {
res, err := Get(ts.URL + suffix)
if err != nil {
t.Fatalf("Get %s: %v", suffix, err)
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatalf("ReadAll %s: %v", suffix, err)
}
res.Body.Close()
return string(b)
}
if s := get("/bar/"); !strings.Contains(s, ">foo.txt<") {
t.Logf("expected a directory listing with foo.txt, got %q", s)
}
if s := get("/bar/foo.txt"); s != "Hello world" {
t.Logf("expected %q, got %q", "Hello world", s)
}
}
func TestDirJoin(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("skipping test on windows")
}
wfi, err := os.Stat("/etc/hosts")
if err != nil {
t.Skip("skipping test; no /etc/hosts file")
}
test := func(d Dir, name string) {
f, err := d.Open(name)
if err != nil {
t.Fatalf("open of %s: %v", name, err)
}
defer f.Close()
gfi, err := f.Stat()
if err != nil {
t.Fatalf("stat of %s: %v", name, err)
}
if !os.SameFile(gfi, wfi) {
t.Errorf("%s got different file", name)
}
}
test(Dir("/etc/"), "/hosts")
test(Dir("/etc/"), "hosts")
test(Dir("/etc/"), "../../../../hosts")
test(Dir("/etc"), "/hosts")
test(Dir("/etc"), "hosts")
test(Dir("/etc"), "../../../../hosts")
// Not really directories, but since we use this trick in
// ServeFile, test it:
test(Dir("/etc/hosts"), "")
test(Dir("/etc/hosts"), "/")
test(Dir("/etc/hosts"), "../")
}
func TestEmptyDirOpenCWD(t *testing.T) {
test := func(d Dir) {
name := "fs_test.go"
f, err := d.Open(name)
if err != nil {
t.Fatalf("open of %s: %v", name, err)
}
defer f.Close()
}
test(Dir(""))
test(Dir("."))
test(Dir("./"))
}
func TestServeFileContentType(t *testing.T) {
defer afterTest(t)
const ctype = "icecream/chocolate"
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
switch r.FormValue("override") {
case "1":
w.Header().Set("Content-Type", ctype)
case "2":
// Explicitly inhibit sniffing.
w.Header()["Content-Type"] = []string{}
}
ServeFile(w, r, "testdata/file")
}))
defer ts.Close()
get := func(override string, want []string) {
resp, err := Get(ts.URL + "?override=" + override)
if err != nil {
t.Fatal(err)
}
if h := resp.Header["Content-Type"]; !reflect.DeepEqual(h, want) {
t.Errorf("Content-Type mismatch: got %v, want %v", h, want)
}
resp.Body.Close()
}
get("0", []string{"text/plain; charset=utf-8"})
get("1", []string{ctype})
get("2", nil)
}
func TestServeFileMimeType(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
ServeFile(w, r, "testdata/style.css")
}))
defer ts.Close()
resp, err := Get(ts.URL)
if err != nil {
t.Fatal(err)
}
resp.Body.Close()
want := "text/css; charset=utf-8"
if h := resp.Header.Get("Content-Type"); h != want {
t.Errorf("Content-Type mismatch: got %q, want %q", h, want)
}
}
func TestServeFileFromCWD(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
ServeFile(w, r, "fs_test.go")
}))
defer ts.Close()
r, err := Get(ts.URL)
if err != nil {
t.Fatal(err)
}
r.Body.Close()
if r.StatusCode != 200 {
t.Fatalf("expected 200 OK, got %s", r.Status)
}
}
// Issue 13996
func TestServeDirWithoutTrailingSlash(t *testing.T) {
e := "/testdata/"
defer afterTest(t)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
ServeFile(w, r, ".")
}))
defer ts.Close()
r, err := Get(ts.URL + "/testdata")
if err != nil {
t.Fatal(err)
}
r.Body.Close()
if g := r.Request.URL.Path; g != e {
t.Errorf("got %s, want %s", g, e)
}
}
// Tests that ServeFile doesn't add a Content-Length if a Content-Encoding is
// specified.
func TestServeFileWithContentEncoding_h1(t *testing.T) { testServeFileWithContentEncoding(t, h1Mode) }
func TestServeFileWithContentEncoding_h2(t *testing.T) { testServeFileWithContentEncoding(t, h2Mode) }
func testServeFileWithContentEncoding(t *testing.T, h2 bool) {
defer afterTest(t)
cst := newClientServerTest(t, h2, HandlerFunc(func(w ResponseWriter, r *Request) {
w.Header().Set("Content-Encoding", "foo")
ServeFile(w, r, "testdata/file")
// Because the testdata is so small, it would fit in
// both the h1 and h2 Server's write buffers. For h1,
// sendfile is used, though, forcing a header flush at
// the io.Copy. http2 doesn't do a header flush so
// buffers all 11 bytes and then adds its own
// Content-Length. To prevent the Server's
// Content-Length and test ServeFile only, flush here.
w.(Flusher).Flush()
}))
defer cst.close()
resp, err := cst.c.Get(cst.ts.URL)
if err != nil {
t.Fatal(err)
}
resp.Body.Close()
if g, e := resp.ContentLength, int64(-1); g != e {
t.Errorf("Content-Length mismatch: got %d, want %d", g, e)
}
}
func TestServeIndexHtml(t *testing.T) {
defer afterTest(t)
const want = "index.html says hello\n"
ts := httptest.NewServer(FileServer(Dir(".")))
defer ts.Close()
for _, path := range []string{"/testdata/", "/testdata/index.html"} {
res, err := Get(ts.URL + path)
if err != nil {
t.Fatal(err)
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal("reading Body:", err)
}
if s := string(b); s != want {
t.Errorf("for path %q got %q, want %q", path, s, want)
}
res.Body.Close()
}
}
func TestFileServerZeroByte(t *testing.T) {
defer afterTest(t)
ts := httptest.NewServer(FileServer(Dir(".")))
defer ts.Close()
res, err := Get(ts.URL + "/..\x00")
if err != nil {
t.Fatal(err)
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal("reading Body:", err)
}
if res.StatusCode == 200 {
t.Errorf("got status 200; want an error. Body is:\n%s", string(b))
}
}
type fakeFileInfo struct {
dir bool
basename string
modtime time.Time
ents []*fakeFileInfo
contents string
err error
}
func (f *fakeFileInfo) Name() string { return f.basename }
func (f *fakeFileInfo) Sys() interface{} { return nil }
func (f *fakeFileInfo) ModTime() time.Time { return f.modtime }
func (f *fakeFileInfo) IsDir() bool { return f.dir }
func (f *fakeFileInfo) Size() int64 { return int64(len(f.contents)) }
func (f *fakeFileInfo) Mode() os.FileMode {
if f.dir {
return 0755 | os.ModeDir
}
return 0644
}
type fakeFile struct {
io.ReadSeeker
fi *fakeFileInfo
path string // as opened
entpos int
}
func (f *fakeFile) Close() error { return nil }
func (f *fakeFile) Stat() (os.FileInfo, error) { return f.fi, nil }
func (f *fakeFile) Readdir(count int) ([]os.FileInfo, error) {
if !f.fi.dir {
return nil, os.ErrInvalid
}
var fis []os.FileInfo
limit := f.entpos + count
if count <= 0 || limit > len(f.fi.ents) {
limit = len(f.fi.ents)
}
for ; f.entpos < limit; f.entpos++ {
fis = append(fis, f.fi.ents[f.entpos])
}
if len(fis) == 0 && count > 0 {
return fis, io.EOF
} else {
return fis, nil
}
}
type fakeFS map[string]*fakeFileInfo
func (fs fakeFS) Open(name string) (File, error) {
name = path.Clean(name)
f, ok := fs[name]
if !ok {
return nil, os.ErrNotExist
}
if f.err != nil {
return nil, f.err
}
return &fakeFile{ReadSeeker: strings.NewReader(f.contents), fi: f, path: name}, nil
}
func TestDirectoryIfNotModified(t *testing.T) {
defer afterTest(t)
const indexContents = "I am a fake index.html file"
fileMod := time.Unix(1000000000, 0).UTC()
fileModStr := fileMod.Format(TimeFormat)
dirMod := time.Unix(123, 0).UTC()
indexFile := &fakeFileInfo{
basename: "index.html",
modtime: fileMod,
contents: indexContents,
}
fs := fakeFS{
"/": &fakeFileInfo{
dir: true,
modtime: dirMod,
ents: []*fakeFileInfo{indexFile},
},
"/index.html": indexFile,
}
ts := httptest.NewServer(FileServer(fs))
defer ts.Close()
res, err := Get(ts.URL)
if err != nil {
t.Fatal(err)
}
b, err := ioutil.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
if string(b) != indexContents {
t.Fatalf("Got body %q; want %q", b, indexContents)
}
res.Body.Close()
lastMod := res.Header.Get("Last-Modified")
if lastMod != fileModStr {
t.Fatalf("initial Last-Modified = %q; want %q", lastMod, fileModStr)
}
req, _ := NewRequest("GET", ts.URL, nil)
req.Header.Set("If-Modified-Since", lastMod)
res, err = DefaultClient.Do(req)
if err != nil {
t.Fatal(err)
}
if res.StatusCode != 304 {
t.Fatalf("Code after If-Modified-Since request = %v; want 304", res.StatusCode)
}
res.Body.Close()
// Advance the index.html file's modtime, but not the directory's.
indexFile.modtime = indexFile.modtime.Add(1 * time.Hour)
res, err = DefaultClient.Do(req)
if err != nil {
t.Fatal(err)
}
if res.StatusCode != 200 {
t.Fatalf("Code after second If-Modified-Since request = %v; want 200; res is %#v", res.StatusCode, res)
}
res.Body.Close()
}
func mustStat(t *testing.T, fileName string) os.FileInfo {
fi, err := os.Stat(fileName)
if err != nil {
t.Fatal(err)
}
return fi
}
func TestServeContent(t *testing.T) {
defer afterTest(t)
type serveParam struct {
name string
modtime time.Time
content io.ReadSeeker
contentType string
etag string
}
servec := make(chan serveParam, 1)
ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
p := <-servec
if p.etag != "" {
w.Header().Set("ETag", p.etag)
}
if p.contentType != "" {
w.Header().Set("Content-Type", p.contentType)
}
ServeContent(w, r, p.name, p.modtime, p.content)
}))
defer ts.Close()
type testCase struct {
// One of file or content must be set:
file string
content io.ReadSeeker
modtime time.Time
serveETag string // optional
serveContentType string // optional
reqHeader map[string]string
wantLastMod string
wantContentType string
wantContentRange string
wantStatus int
}
htmlModTime := mustStat(t, "testdata/index.html").ModTime()
tests := map[string]testCase{
"no_last_modified": {
file: "testdata/style.css",
wantContentType: "text/css; charset=utf-8",
wantStatus: 200,
},
"with_last_modified": {
file: "testdata/index.html",
wantContentType: "text/html; charset=utf-8",
modtime: htmlModTime,
wantLastMod: htmlModTime.UTC().Format(TimeFormat),
wantStatus: 200,
},
"not_modified_modtime": {
file: "testdata/style.css",
serveETag: `"foo"`, // Last-Modified sent only when no ETag
modtime: htmlModTime,
reqHeader: map[string]string{
"If-Modified-Since": htmlModTime.UTC().Format(TimeFormat),
},
wantStatus: 304,
},
"not_modified_modtime_with_contenttype": {
file: "testdata/style.css",
serveContentType: "text/css", // explicit content type
serveETag: `"foo"`, // Last-Modified sent only when no ETag
modtime: htmlModTime,
reqHeader: map[string]string{
"If-Modified-Since": htmlModTime.UTC().Format(TimeFormat),
},
wantStatus: 304,
},
"not_modified_etag": {
file: "testdata/style.css",
serveETag: `"foo"`,
reqHeader: map[string]string{
"If-None-Match": `"foo"`,
},
wantStatus: 304,
},
"not_modified_etag_no_seek": {
content: panicOnSeek{nil}, // should never be called
serveETag: `W/"foo"`, // If-None-Match uses weak ETag comparison
reqHeader: map[string]string{
"If-None-Match": `"baz", W/"foo"`,
},
wantStatus: 304,
},
"if_none_match_mismatch": {
file: "testdata/style.css",
serveETag: `"foo"`,
reqHeader: map[string]string{
"If-None-Match": `"Foo"`,
},
wantStatus: 200,
wantContentType: "text/css; charset=utf-8",
},
"range_good": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"Range": "bytes=0-4",
},
wantStatus: StatusPartialContent,
wantContentType: "text/css; charset=utf-8",
wantContentRange: "bytes 0-4/8",
},
"range_match": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"Range": "bytes=0-4",
"If-Range": `"A"`,
},
wantStatus: StatusPartialContent,
wantContentType: "text/css; charset=utf-8",
wantContentRange: "bytes 0-4/8",
},
"range_match_weak_etag": {
file: "testdata/style.css",
serveETag: `W/"A"`,
reqHeader: map[string]string{
"Range": "bytes=0-4",
"If-Range": `W/"A"`,
},
wantStatus: 200,
wantContentType: "text/css; charset=utf-8",
},
"range_no_overlap": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"Range": "bytes=10-20",
},
wantStatus: StatusRequestedRangeNotSatisfiable,
wantContentType: "text/plain; charset=utf-8",
wantContentRange: "bytes */8",
},
// An If-Range resource for entity "A", but entity "B" is now current.
// The Range request should be ignored.
"range_no_match": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"Range": "bytes=0-4",
"If-Range": `"B"`,
},
wantStatus: 200,
wantContentType: "text/css; charset=utf-8",
},
"range_with_modtime": {
file: "testdata/style.css",
modtime: time.Date(2014, 6, 25, 17, 12, 18, 0 /* nanos */, time.UTC),
reqHeader: map[string]string{
"Range": "bytes=0-4",
"If-Range": "Wed, 25 Jun 2014 17:12:18 GMT",
},
wantStatus: StatusPartialContent,
wantContentType: "text/css; charset=utf-8",
wantContentRange: "bytes 0-4/8",
wantLastMod: "Wed, 25 Jun 2014 17:12:18 GMT",
},
"range_with_modtime_nanos": {
file: "testdata/style.css",
modtime: time.Date(2014, 6, 25, 17, 12, 18, 123 /* nanos */, time.UTC),
reqHeader: map[string]string{
"Range": "bytes=0-4",
"If-Range": "Wed, 25 Jun 2014 17:12:18 GMT",
},
wantStatus: StatusPartialContent,
wantContentType: "text/css; charset=utf-8",
wantContentRange: "bytes 0-4/8",
wantLastMod: "Wed, 25 Jun 2014 17:12:18 GMT",
},
"unix_zero_modtime": {
content: strings.NewReader("<html>foo"),
modtime: time.Unix(0, 0),
wantStatus: StatusOK,
wantContentType: "text/html; charset=utf-8",
},
"ifmatch_matches": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"If-Match": `"Z", "A"`,
},
wantStatus: 200,
wantContentType: "text/css; charset=utf-8",
},
"ifmatch_star": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"If-Match": `*`,
},
wantStatus: 200,
wantContentType: "text/css; charset=utf-8",
},
"ifmatch_failed": {
file: "testdata/style.css",
serveETag: `"A"`,
reqHeader: map[string]string{
"If-Match": `"B"`,
},
wantStatus: 412,
wantContentType: "text/plain; charset=utf-8",
},
"ifmatch_fails_on_weak_etag": {
file: "testdata/style.css",
serveETag: `W/"A"`,
reqHeader: map[string]string{
"If-Match": `W/"A"`,
},
wantStatus: 412,
wantContentType: "text/plain; charset=utf-8",
},
"if_unmodified_since_true": {
file: "testdata/style.css",
modtime: htmlModTime,
reqHeader: map[string]string{
"If-Unmodified-Since": htmlModTime.UTC().Format(TimeFormat),
},
wantStatus: 200,
wantContentType: "text/css; charset=utf-8",
wantLastMod: htmlModTime.UTC().Format(TimeFormat),
},
"if_unmodified_since_false": {
file: "testdata/style.css",
modtime: htmlModTime,
reqHeader: map[string]string{
"If-Unmodified-Since": htmlModTime.Add(-2 * time.Second).UTC().Format(TimeFormat),
},
wantStatus: 412,
wantContentType: "text/plain; charset=utf-8",
wantLastMod: htmlModTime.UTC().Format(TimeFormat),
},
}
for testName, tt := range tests {
var content io.ReadSeeker
if tt.file != "" {
f, err := os.Open(tt.file)
if err != nil {
t.Fatalf("test %q: %v", testName, err)
}
defer f.Close()
content = f
} else {
content = tt.content
}
servec <- serveParam{
name: filepath.Base(tt.file),
content: content,
modtime: tt.modtime,
etag: tt.serveETag,
contentType: tt.serveContentType,
}
req, err := NewRequest("GET", ts.URL, nil)
if err != nil {
t.Fatal(err)
}
for k, v := range tt.reqHeader {
req.Header.Set(k, v)
}
res, err := DefaultClient.Do(req)
if err != nil {
t.Fatal(err)
}
io.Copy(ioutil.Discard, res.Body)
res.Body.Close()
if res.StatusCode != tt.wantStatus {
t.Errorf("test %q: status = %d; want %d", testName, res.StatusCode, tt.wantStatus)
}
if g, e := res.Header.Get("Content-Type"), tt.wantContentType; g != e {
t.Errorf("test %q: content-type = %q, want %q", testName, g, e)
}
if g, e := res.Header.Get("Content-Range"), tt.wantContentRange; g != e {
t.Errorf("test %q: content-range = %q, want %q", testName, g, e)
}
if g, e := res.Header.Get("Last-Modified"), tt.wantLastMod; g != e {
t.Errorf("test %q: last-modified = %q, want %q", testName, g, e)
}
}
}
// Issue 12991
func TestServerFileStatError(t *testing.T) {
rec := httptest.NewRecorder()
r, _ := NewRequest("GET", "http://foo/", nil)
redirect := false
name := "file.txt"
fs := issue12991FS{}
ExportServeFile(rec, r, fs, name, redirect)
if body := rec.Body.String(); !strings.Contains(body, "403") || !strings.Contains(body, "Forbidden") {
t.Errorf("wanted 403 forbidden message; got: %s", body)
}
}
type issue12991FS struct{}
func (issue12991FS) Open(string) (File, error) { return issue12991File{}, nil }
type issue12991File struct{ File }
func (issue12991File) Stat() (os.FileInfo, error) { return nil, os.ErrPermission }
func (issue12991File) Close() error { return nil }
func TestServeContentErrorMessages(t *testing.T) {
defer afterTest(t)
fs := fakeFS{
"/500": &fakeFileInfo{
err: errors.New("random error"),
},
"/403": &fakeFileInfo{
err: &os.PathError{Err: os.ErrPermission},
},
}
ts := httptest.NewServer(FileServer(fs))
defer ts.Close()
for _, code := range []int{403, 404, 500} {
res, err := DefaultClient.Get(fmt.Sprintf("%s/%d", ts.URL, code))
if err != nil {
t.Errorf("Error fetching /%d: %v", code, err)
continue
}
if res.StatusCode != code {
t.Errorf("For /%d, status code = %d; want %d", code, res.StatusCode, code)
}
res.Body.Close()
}
}
// verifies that sendfile is being used on Linux
func TestLinuxSendfile(t *testing.T) {
setParallel(t)
defer afterTest(t)
if runtime.GOOS != "linux" {
t.Skip("skipping; linux-only test")
}
if _, err := exec.LookPath("strace"); err != nil {
t.Skip("skipping; strace not found in path")
}
ln, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
lnf, err := ln.(*net.TCPListener).File()
if err != nil {
t.Fatal(err)
}
defer ln.Close()
syscalls := "sendfile,sendfile64"
switch runtime.GOARCH {
case "mips64", "mips64le", "s390x":
// strace on the above platforms doesn't support sendfile64
// and will error out if we specify that with `-e trace='.
syscalls = "sendfile"
}
var buf bytes.Buffer
child := exec.Command("strace", "-f", "-q", "-e", "trace="+syscalls, os.Args[0], "-test.run=TestLinuxSendfileChild")
child.ExtraFiles = append(child.ExtraFiles, lnf)
child.Env = append([]string{"GO_WANT_HELPER_PROCESS=1"}, os.Environ()...)
child.Stdout = &buf
child.Stderr = &buf
if err := child.Start(); err != nil {
t.Skipf("skipping; failed to start straced child: %v", err)
}
res, err := Get(fmt.Sprintf("http://%s/", ln.Addr()))
if err != nil {
t.Fatalf("http client error: %v", err)
}
_, err = io.Copy(ioutil.Discard, res.Body)
if err != nil {
t.Fatalf("client body read error: %v", err)
}
res.Body.Close()
// Force child to exit cleanly.
Post(fmt.Sprintf("http://%s/quit", ln.Addr()), "", nil)
child.Wait()
rx := regexp.MustCompile(`sendfile(64)?\(\d+,\s*\d+,\s*NULL,\s*\d+`)
out := buf.String()
if !rx.MatchString(out) {
t.Errorf("no sendfile system call found in:\n%s", out)
}
}
func getBody(t *testing.T, testName string, req Request) (*Response, []byte) {
r, err := DefaultClient.Do(&req)
if err != nil {
t.Fatalf("%s: for URL %q, send error: %v", testName, req.URL.String(), err)
}
b, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Fatalf("%s: for URL %q, reading body: %v", testName, req.URL.String(), err)
}
return r, b
}
// TestLinuxSendfileChild isn't a real test. It's used as a helper process
// for TestLinuxSendfile.
func TestLinuxSendfileChild(*testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
defer os.Exit(0)
fd3 := os.NewFile(3, "ephemeral-port-listener")
ln, err := net.FileListener(fd3)
if err != nil {
panic(err)
}
mux := NewServeMux()
mux.Handle("/", FileServer(Dir("testdata")))
mux.HandleFunc("/quit", func(ResponseWriter, *Request) {
os.Exit(0)
})
s := &Server{Handler: mux}
err = s.Serve(ln)
if err != nil {
panic(err)
}
}
func TestFileServerCleanPath(t *testing.T) {
tests := []struct {
path string
wantCode int
wantOpen []string
}{
{"/", 200, []string{"/", "/index.html"}},
{"/dir", 301, []string{"/dir"}},
{"/dir/", 200, []string{"/dir", "/dir/index.html"}},
}
for _, tt := range tests {
var log []string
rr := httptest.NewRecorder()
req, _ := NewRequest("GET", "http://foo.localhost"+tt.path, nil)
FileServer(fileServerCleanPathDir{&log}).ServeHTTP(rr, req)
if !reflect.DeepEqual(log, tt.wantOpen) {
t.Logf("For %s: Opens = %q; want %q", tt.path, log, tt.wantOpen)
}
if rr.Code != tt.wantCode {
t.Logf("For %s: Response code = %d; want %d", tt.path, rr.Code, tt.wantCode)
}
}
}
type fileServerCleanPathDir struct {
log *[]string
}
func (d fileServerCleanPathDir) Open(path string) (File, error) {
*(d.log) = append(*(d.log), path)
if path == "/" || path == "/dir" || path == "/dir/" {
// Just return back something that's a directory.
return Dir(".").Open(".")
}
return nil, os.ErrNotExist
}
type panicOnSeek struct{ io.ReadSeeker }
func Test_scanETag(t *testing.T) {
tests := []struct {
in string
wantETag string
wantRemain string
}{
{`W/"etag-1"`, `W/"etag-1"`, ""},
{`"etag-2"`, `"etag-2"`, ""},
{`"etag-1", "etag-2"`, `"etag-1"`, `, "etag-2"`},
{"", "", ""},
{"", "", ""},
{"W/", "", ""},
{`W/"truc`, "", ""},
{`w/"case-sensitive"`, "", ""},
}
for _, test := range tests {
etag, remain := ExportScanETag(test.in)
if etag != test.wantETag || remain != test.wantRemain {
t.Errorf("scanETag(%q)=%q %q, want %q %q", test.in, etag, remain, test.wantETag, test.wantRemain)
}
}
}
| [
"\"GO_WANT_HELPER_PROCESS\""
]
| []
| [
"GO_WANT_HELPER_PROCESS"
]
| [] | ["GO_WANT_HELPER_PROCESS"] | go | 1 | 0 | |
cmd/gomobile/bind_test.go | // Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"bytes"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"testing"
"text/template"
)
func TestBindAndroid(t *testing.T) {
androidHome := os.Getenv("ANDROID_HOME")
if androidHome == "" {
t.Skip("ANDROID_HOME not found, skipping bind")
}
platform, err := androidAPIPath()
if err != nil {
t.Skip("No android API platform found in $ANDROID_HOME, skipping bind")
}
platform = strings.Replace(platform, androidHome, "$ANDROID_HOME", -1)
defer func() {
xout = os.Stderr
buildN = false
buildX = false
buildO = ""
buildTarget = ""
bindJavaPkg = ""
}()
buildN = true
buildX = true
buildO = "asset.aar"
buildTarget = "android/arm"
tests := []struct {
javaPkg string
}{
{
// Empty javaPkg
},
{
javaPkg: "com.example.foo",
},
}
for _, tc := range tests {
bindJavaPkg = tc.javaPkg
buf := new(bytes.Buffer)
xout = buf
gopath = filepath.SplitList(goEnv("GOPATH"))[0]
if goos == "windows" {
os.Setenv("HOMEDRIVE", "C:")
}
cmdBind.flag.Parse([]string{"github.com/danbrough/mobile/asset"})
err := runBind(cmdBind)
if err != nil {
t.Log(buf.String())
t.Fatal(err)
}
got := filepath.ToSlash(buf.String())
output, err := defaultOutputData("")
if err != nil {
t.Fatal(err)
}
data := struct {
outputData
AndroidPlatform string
JavaPkg string
}{
outputData: output,
AndroidPlatform: platform,
JavaPkg: tc.javaPkg,
}
wantBuf := new(bytes.Buffer)
if err := bindAndroidTmpl.Execute(wantBuf, data); err != nil {
t.Errorf("%+v: computing diff failed: %v", tc, err)
continue
}
diff, err := diff(got, wantBuf.String())
if err != nil {
t.Errorf("%+v: computing diff failed: %v", tc, err)
continue
}
if diff != "" {
t.Errorf("%+v: unexpected output:\n%s", tc, diff)
}
}
}
func TestBindApple(t *testing.T) {
if !xcodeAvailable() {
t.Skip("Xcode is missing")
}
defer func() {
xout = os.Stderr
buildN = false
buildX = false
buildO = ""
buildTarget = ""
bindPrefix = ""
}()
buildN = true
buildX = true
buildO = "Asset.xcframework"
buildTarget = "ios/arm64"
tests := []struct {
prefix string
out string
}{
{
// empty prefix
},
{
prefix: "Foo",
},
{
out: "Abcde.xcframework",
},
}
for _, tc := range tests {
bindPrefix = tc.prefix
if tc.out != "" {
buildO = tc.out
}
buf := new(bytes.Buffer)
xout = buf
gopath = filepath.SplitList(goEnv("GOPATH"))[0]
if goos == "windows" {
os.Setenv("HOMEDRIVE", "C:")
}
cmdBind.flag.Parse([]string{"github.com/danbrough/mobile/asset"})
if err := runBind(cmdBind); err != nil {
t.Log(buf.String())
t.Fatal(err)
}
got := filepath.ToSlash(buf.String())
output, err := defaultOutputData("")
if err != nil {
t.Fatal(err)
}
data := struct {
outputData
Output string
Prefix string
}{
outputData: output,
Output: buildO[:len(buildO)-len(".xcframework")],
Prefix: tc.prefix,
}
wantBuf := new(bytes.Buffer)
if err := bindAppleTmpl.Execute(wantBuf, data); err != nil {
t.Errorf("%+v: computing diff failed: %v", tc, err)
continue
}
diff, err := diff(got, wantBuf.String())
if err != nil {
t.Errorf("%+v: computing diff failed: %v", tc, err)
continue
}
if diff != "" {
t.Errorf("%+v: unexpected output:\n%s", tc, diff)
}
}
}
var bindAndroidTmpl = template.Must(template.New("output").Parse(`GOMOBILE={{.GOPATH}}/pkg/gomobile
WORK=$WORK
GOOS=android CGO_ENABLED=1 gobind -lang=go,java -outdir=$WORK{{if .JavaPkg}} -javapkg={{.JavaPkg}}{{end}} github.com/danbrough/mobile/asset
mkdir -p $WORK/src
PWD=$WORK/src GOOS=android GOARCH=arm CC=$NDK_PATH/toolchains/llvm/prebuilt/{{.NDKARCH}}/bin/armv7a-linux-androideabi16-clang CXX=$NDK_PATH/toolchains/llvm/prebuilt/{{.NDKARCH}}/bin/armv7a-linux-androideabi16-clang++ CGO_ENABLED=1 GOARM=7 GOPATH=$WORK:$GOPATH go mod tidy
PWD=$WORK/src GOOS=android GOARCH=arm CC=$NDK_PATH/toolchains/llvm/prebuilt/{{.NDKARCH}}/bin/armv7a-linux-androideabi16-clang CXX=$NDK_PATH/toolchains/llvm/prebuilt/{{.NDKARCH}}/bin/armv7a-linux-androideabi16-clang++ CGO_ENABLED=1 GOARM=7 GOPATH=$WORK:$GOPATH go build -x -buildmode=c-shared -o=$WORK/android/src/main/jniLibs/armeabi-v7a/libgojni.so ./gobind
PWD=$WORK/java javac -d $WORK/javac-output -source 1.7 -target 1.7 -bootclasspath {{.AndroidPlatform}}/android.jar *.java
jar c -C $WORK/javac-output .
`))
var bindAppleTmpl = template.Must(template.New("output").Parse(`GOMOBILE={{.GOPATH}}/pkg/gomobile
WORK=$WORK
rm -r -f "{{.Output}}.xcframework"
GOOS=ios CGO_ENABLED=1 gobind -lang=go,objc -outdir=$WORK/ios -tags=ios{{if .Prefix}} -prefix={{.Prefix}}{{end}} github.com/danbrough/mobile/asset
mkdir -p $WORK/ios/src
PWD=$WORK/ios/src GOOS=ios GOARCH=arm64 GOFLAGS=-tags=ios CC=iphoneos-clang CXX=iphoneos-clang++ CGO_CFLAGS=-isysroot iphoneos -miphoneos-version-min=13.0 -fembed-bitcode -arch arm64 CGO_CXXFLAGS=-isysroot iphoneos -miphoneos-version-min=13.0 -fembed-bitcode -arch arm64 CGO_LDFLAGS=-isysroot iphoneos -miphoneos-version-min=13.0 -fembed-bitcode -arch arm64 CGO_ENABLED=1 DARWIN_SDK=iphoneos GOPATH=$WORK/ios:$GOPATH go mod tidy
PWD=$WORK/ios/src GOOS=ios GOARCH=arm64 GOFLAGS=-tags=ios CC=iphoneos-clang CXX=iphoneos-clang++ CGO_CFLAGS=-isysroot iphoneos -miphoneos-version-min=13.0 -fembed-bitcode -arch arm64 CGO_CXXFLAGS=-isysroot iphoneos -miphoneos-version-min=13.0 -fembed-bitcode -arch arm64 CGO_LDFLAGS=-isysroot iphoneos -miphoneos-version-min=13.0 -fembed-bitcode -arch arm64 CGO_ENABLED=1 DARWIN_SDK=iphoneos GOPATH=$WORK/ios:$GOPATH go build -x -buildmode=c-archive -o $WORK/{{.Output}}-ios-arm64.a ./gobind
mkdir -p $WORK/ios/iphoneos/{{.Output}}.framework/Versions/A/Headers
ln -s A $WORK/ios/iphoneos/{{.Output}}.framework/Versions/Current
ln -s Versions/Current/Headers $WORK/ios/iphoneos/{{.Output}}.framework/Headers
ln -s Versions/Current/{{.Output}} $WORK/ios/iphoneos/{{.Output}}.framework/{{.Output}}
xcrun lipo $WORK/{{.Output}}-ios-arm64.a -create -o $WORK/ios/iphoneos/{{.Output}}.framework/Versions/A/{{.Output}}
cp $WORK/ios/src/gobind/{{.Prefix}}Asset.objc.h $WORK/ios/iphoneos/{{.Output}}.framework/Versions/A/Headers/{{.Prefix}}Asset.objc.h
mkdir -p $WORK/ios/iphoneos/{{.Output}}.framework/Versions/A/Headers
cp $WORK/ios/src/gobind/Universe.objc.h $WORK/ios/iphoneos/{{.Output}}.framework/Versions/A/Headers/Universe.objc.h
mkdir -p $WORK/ios/iphoneos/{{.Output}}.framework/Versions/A/Headers
cp $WORK/ios/src/gobind/ref.h $WORK/ios/iphoneos/{{.Output}}.framework/Versions/A/Headers/ref.h
mkdir -p $WORK/ios/iphoneos/{{.Output}}.framework/Versions/A/Headers
mkdir -p $WORK/ios/iphoneos/{{.Output}}.framework/Versions/A/Headers
mkdir -p $WORK/ios/iphoneos/{{.Output}}.framework/Versions/A/Resources
ln -s Versions/Current/Resources $WORK/ios/iphoneos/{{.Output}}.framework/Resources
mkdir -p $WORK/ios/iphoneos/{{.Output}}.framework/Resources
mkdir -p $WORK/ios/iphoneos/{{.Output}}.framework/Versions/A/Modules
ln -s Versions/Current/Modules $WORK/ios/iphoneos/{{.Output}}.framework/Modules
xcodebuild -create-xcframework -framework $WORK/ios/iphoneos/{{.Output}}.framework -output {{.Output}}.xcframework
`))
func TestBindAppleAll(t *testing.T) {
if !xcodeAvailable() {
t.Skip("Xcode is missing")
}
defer func() {
xout = os.Stderr
buildN = false
buildX = false
buildO = ""
buildTarget = ""
bindPrefix = ""
}()
buildN = true
buildX = true
buildO = "Asset.xcframework"
buildTarget = "ios"
buf := new(bytes.Buffer)
xout = buf
gopath = filepath.SplitList(goEnv("GOPATH"))[0]
if goos == "windows" {
os.Setenv("HOMEDRIVE", "C:")
}
cmdBind.flag.Parse([]string{"github.com/danbrough/mobile/asset"})
if err := runBind(cmdBind); err != nil {
t.Log(buf.String())
t.Fatal(err)
}
}
func TestBindWithGoModules(t *testing.T) {
if runtime.GOOS == "android" || runtime.GOOS == "ios" {
t.Skipf("gomobile and gobind are not available on %s", runtime.GOOS)
}
dir, err := ioutil.TempDir("", "gomobile-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
if out, err := exec.Command("go", "build", "-o="+dir, "github.com/danbrough/mobile/cmd/gobind").CombinedOutput(); err != nil {
t.Fatalf("%v: %s", err, string(out))
}
if out, err := exec.Command("go", "build", "-o="+dir, "github.com/danbrough/mobile/cmd/gomobile").CombinedOutput(); err != nil {
t.Fatalf("%v: %s", err, string(out))
}
path := dir
if p := os.Getenv("PATH"); p != "" {
path += string(filepath.ListSeparator) + p
}
for _, target := range []string{"android", "ios"} {
t.Run(target, func(t *testing.T) {
switch target {
case "android":
androidHome := os.Getenv("ANDROID_HOME")
if androidHome == "" {
t.Skip("ANDROID_HOME not found, skipping bind")
}
if _, err := androidAPIPath(); err != nil {
t.Skip("No android API platform found in $ANDROID_HOME, skipping bind")
}
case "ios":
if !xcodeAvailable() {
t.Skip("Xcode is missing")
}
}
var out string
switch target {
case "android":
out = filepath.Join(dir, "cgopkg.aar")
case "ios":
out = filepath.Join(dir, "Cgopkg.xcframework")
}
tests := []struct {
Name string
Path string
Dir string
}{
{
Name: "Absolute Path",
Path: "github.com/danbrough/mobile/bind/testdata/cgopkg",
},
{
Name: "Relative Path",
Path: "./bind/testdata/cgopkg",
Dir: filepath.Join("..", ".."),
},
}
for _, tc := range tests {
tc := tc
t.Run(tc.Name, func(t *testing.T) {
cmd := exec.Command(filepath.Join(dir, "gomobile"), "bind", "-target="+target, "-o="+out, tc.Path)
cmd.Env = append(os.Environ(), "PATH="+path, "GO111MODULE=on")
cmd.Dir = tc.Dir
if out, err := cmd.CombinedOutput(); err != nil {
t.Errorf("gomobile bind failed: %v\n%s", err, string(out))
}
})
}
})
}
}
| [
"\"ANDROID_HOME\"",
"\"PATH\"",
"\"ANDROID_HOME\""
]
| []
| [
"ANDROID_HOME",
"PATH"
]
| [] | ["ANDROID_HOME", "PATH"] | go | 2 | 0 | |
src/garage/experiment/experiment.py | # flake8: noqa
import base64
import collections
import datetime
import inspect
import os
import os.path as osp
import pickle
import re
import subprocess
import sys
import dateutil.tz
import numpy as np
from garage.core import Serializable
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def flatten(l):
return [item for sublist in l for item in sublist]
class BinaryOp(Serializable):
def __init__(self):
Serializable.quick_init(self, locals())
def rdiv(self, a, b):
return b / a
# def __init__(self, opname, a, b):
# self.opname = opname
# self.a = a
# self.b = b
class VariantDict(AttrDict):
def __init__(self, d, hidden_keys):
super(VariantDict, self).__init__(d)
self._hidden_keys = hidden_keys
def dump(self):
return {k: v for k, v in self.items() if k not in self._hidden_keys}
class VariantGenerator:
"""
Usage:
vg = VariantGenerator()
vg.add("param1", [1, 2, 3])
vg.add("param2", ['x', 'y'])
vg.variants() => # all combinations of [1,2,3] x ['x','y']
Supports noncyclic dependency among parameters:
vg = VariantGenerator()
vg.add("param1", [1, 2, 3])
vg.add("param2", lambda param1: [param1+1, param1+2])
vg.variants() => # ..
"""
def __init__(self):
self._variants = []
self._populate_variants()
self._hidden_keys = []
for k, vs, cfg in self._variants:
if cfg.get('hide', False):
self._hidden_keys.append(k)
def add(self, key, vals, **kwargs):
self._variants.append((key, vals, kwargs))
def _populate_variants(self):
methods = inspect.getmembers(
self.__class__,
predicate=lambda x: inspect.isfunction(x) or inspect.ismethod(x))
methods = [
x[1].__get__(self, self.__class__) for x in methods
if getattr(x[1], '__is_variant', False)
]
for m in methods:
self.add(m.__name__, m, **getattr(m, '__variant_config', dict()))
def variants(self, randomized=False):
ret = list(self.ivariants())
if randomized:
np.random.shuffle(ret)
return list(map(self.variant_dict, ret))
def variant_dict(self, variant):
return VariantDict(variant, self._hidden_keys)
def to_name_suffix(self, variant):
suffix = []
for k, vs, cfg in self._variants:
if not cfg.get('hide', False):
suffix.append(k + '_' + str(variant[k]))
return '_'.join(suffix)
def ivariants(self):
dependencies = list()
for key, vals, _ in self._variants:
if hasattr(vals, '__call__'):
args = inspect.getfullargspec(vals).args
if hasattr(vals, 'im_self') or hasattr(vals, '__self__'):
# remove the first 'self' parameter
args = args[1:]
dependencies.append((key, set(args)))
else:
dependencies.append((key, set()))
sorted_keys = []
# topo sort all nodes
while len(sorted_keys) < len(self._variants):
# get all nodes with zero in-degree
free_nodes = [k for k, v in dependencies if not v]
if not free_nodes:
error_msg = 'Invalid parameter dependency: \n'
for k, v in dependencies:
if v:
error_msg += k + ' depends on ' + ' & '.join(v) + '\n'
raise ValueError(error_msg)
dependencies = [(k, v) for k, v in dependencies
if k not in free_nodes]
# remove the free nodes from the remaining dependencies
for _, v in dependencies:
v.difference_update(free_nodes)
sorted_keys += free_nodes
return self._ivariants_sorted(sorted_keys)
def _ivariants_sorted(self, sorted_keys):
if not sorted_keys:
yield dict()
else:
first_keys = sorted_keys[:-1]
first_variants = self._ivariants_sorted(first_keys)
last_key = sorted_keys[-1]
last_vals = [v for k, v, _ in self._variants if k == last_key][0]
if hasattr(last_vals, '__call__'):
last_val_keys = inspect.getfullargspec(last_vals).args
if hasattr(last_vals, 'im_self') or hasattr(
last_vals, '__self__'):
last_val_keys = last_val_keys[1:]
else:
last_val_keys = None
for variant in first_variants:
if hasattr(last_vals, '__call__'):
last_variants = last_vals(
**{k: variant[k]
for k in last_val_keys})
for last_choice in last_variants:
yield AttrDict(variant, **{last_key: last_choice})
else:
for last_choice in last_vals:
yield AttrDict(variant, **{last_key: last_choice})
def variant(*args, **kwargs):
def _variant(fn):
fn.__is_variant = True
fn.__variant_config = kwargs
return fn
if len(args) == 1 and isinstance(args[0], collections.Callable):
return _variant(args[0])
return _variant
def query_yes_no(question, default='yes'):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {'yes': True, 'y': True, 'ye': True, 'no': False, 'n': False}
if default is None:
prompt = ' [y/n] '
elif default == 'yes':
prompt = ' [Y/n] '
elif default == 'no':
prompt = ' [y/N] '
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
exp_count = 0
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')
def run_experiment(method_call=None,
batch_tasks=None,
exp_prefix='experiment',
exp_name=None,
log_dir=None,
script='garage.experiment.experiment_wrapper',
python_command='python',
dry=False,
env=None,
variant=None,
use_tf=False,
use_gpu=False,
use_cloudpickle=None,
pre_commands=None,
**kwargs):
"""Serialize the method call and run the experiment using the
specified mode.
Args:
method_call (callable): A method call.
batch_tasks (list[dict]): A batch of method calls.
exp_prefix (str): Name prefix for the experiment.
exp_name (str): Name of the experiment.
log_dir (str): Log directory for the experiment.
script (str): The name of the entrance point python script.
python_command (str): Python command to run the experiment.
dry (bool): Whether to do a dry-run, which only prints the
commands without executing them.
env (dict): Extra environment variables.
variant (dict): If provided, should be a dictionary of parameters.
use_tf (bool): Used along with the Theano and GPU configuration
when using TensorFlow
use_gpu (bool): Whether the launched task is running on GPU.
This triggers a few configuration changes including certain
environment flags.
use_cloudpickle (bool): Whether to use cloudpickle or not.
pre_commands (str): Pre commands to run the experiment.
"""
assert method_call is not None or batch_tasks is not None, (
'Must provide at least either method_call or batch_tasks')
if use_cloudpickle is None:
for task in (batch_tasks or [method_call]):
assert hasattr(task, '__call__')
use_cloudpickle = True
# ensure variant exists
if variant is None:
variant = dict()
if batch_tasks is None:
batch_tasks = [
dict(
kwargs,
pre_commands=pre_commands,
method_call=method_call,
exp_name=exp_name,
log_dir=log_dir,
env=env,
variant=variant,
use_cloudpickle=use_cloudpickle)
]
global exp_count
if use_tf:
if not use_gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = ''
else:
os.unsetenv('CUDA_VISIBLE_DEVICES')
for task in batch_tasks:
call = task.pop('method_call')
if use_cloudpickle:
import cloudpickle
data = base64.b64encode(cloudpickle.dumps(call)).decode('utf-8')
else:
data = base64.b64encode(pickle.dumps(call)).decode('utf-8')
task['args_data'] = data
exp_count += 1
if task.get('exp_name', None) is None:
task['exp_name'] = '{}_{}_{:04n}'.format(exp_prefix, timestamp,
exp_count)
if task.get('log_dir', None) is None:
task['log_dir'] = (
'{log_dir}/local/{exp_prefix}/{exp_name}'.format(
log_dir=osp.join(os.getcwd(), 'data'),
exp_prefix=exp_prefix.replace('_', '-'),
exp_name=task['exp_name']))
if task.get('variant', None) is not None:
variant = task.pop('variant')
if 'exp_name' not in variant:
variant['exp_name'] = task['exp_name']
task['variant_data'] = base64.b64encode(
pickle.dumps(variant)).decode('utf-8')
elif 'variant' in task:
del task['variant']
task['env'] = task.get('env', dict()) or dict()
task['env']['GARAGE_USE_GPU'] = str(use_gpu)
task['env']['GARAGE_USE_TF'] = str(use_tf)
for task in batch_tasks:
env = task.pop('env', None)
command = to_local_command(
task, python_command=python_command, script=script)
print(command)
if dry:
return
try:
if env is None:
env = dict()
subprocess.call(command, shell=True, env=dict(os.environ, **env))
except Exception as e:
print(e)
if isinstance(e, KeyboardInterrupt):
raise
_find_unsafe = re.compile(r'[a-zA-Z0-9_^@%+=:,./-]').search
def _shellquote(s):
"""Return a shell-escaped version of the string *s*."""
if not s:
return "''"
if _find_unsafe(s) is None:
return s
# use single quotes, and put single quotes into double quotes
# the string $'b is then quoted as '$'"'"'b'
return "'" + s.replace("'", "'\"'\"'") + "'"
def _to_param_val(v):
if v is None:
return ''
elif isinstance(v, list):
return ' '.join(map(_shellquote, list(map(str, v))))
else:
return _shellquote(str(v))
def to_local_command(params,
python_command='python',
script='garage.experiment.experiment_wrapper'):
command = python_command + ' -m ' + script
garage_env = eval(os.environ.get('GARAGE_ENV', '{}'))
for k, v in garage_env.items():
command = '{}={} '.format(k, v) + command
pre_commands = params.pop('pre_commands', None)
post_commands = params.pop('post_commands', None)
if pre_commands is not None or post_commands is not None:
print('Not executing the pre_commands: ', pre_commands,
', nor post_commands: ', post_commands)
for k, v in params.items():
if isinstance(v, dict):
for nk, nv in v.items():
if str(nk) == '_name':
command += ' --{} {}'.format(k, _to_param_val(nv))
else:
command += \
' --{}_{} {}'.format(k, nk, _to_param_val(nv))
else:
command += ' --{} {}'.format(k, _to_param_val(v))
return command
def concretize(obj):
if isinstance(obj, dict):
# make sure that there's no hidden caveat
ret = dict()
for k, v in obj.items():
ret[concretize(k)] = concretize(v)
return ret
elif isinstance(obj, (list, tuple)):
return obj.__class__(list(map(concretize, obj)))
else:
return obj
| []
| []
| [
"CUDA_VISIBLE_DEVICES",
"GARAGE_ENV"
]
| [] | ["CUDA_VISIBLE_DEVICES", "GARAGE_ENV"] | python | 2 | 0 | |
storage/backward_compatible_s3.go | package storage
import (
"io"
"io/ioutil"
"net/http"
"os"
"strings"
"github.com/sirupsen/logrus"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
awsSession "github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/tokubai/kinu/logger"
)
type BackwardCompatibleS3Storage struct {
Storage
client *s3.S3
region string
bucket string
bucketBasePath string
}
type BackwardCompatibleS3StorageItem struct {
StorageItem
Object *s3.Object
}
func openBackwardCompatibleS3Storage() (Storage, error) {
s := &BackwardCompatibleS3Storage{}
err := s.Open()
if err != nil {
return nil, logger.ErrorDebug(err)
}
return s, nil
}
func (s *BackwardCompatibleS3Storage) Open() error {
s.region = os.Getenv("KINU_S3_REGION")
if len(s.region) == 0 {
return &ErrInvalidStorageOption{Message: "KINU_S3_REGION system env is required"}
}
s.bucket = os.Getenv("KINU_S3_BUCKET")
if len(s.bucket) == 0 {
return &ErrInvalidStorageOption{Message: "KINU_S3_BUCKET system env is required"}
}
s.bucketBasePath = os.Getenv("KINU_S3_BUCKET_BASE_PATH")
s.client = s3.New(awsSession.New(), &aws.Config{Region: aws.String(s.region)})
logger.WithFields(logrus.Fields{
"bucket": s.bucket,
"base_path": s.bucketBasePath,
"region": s.region,
}).Debug("open s3 storage")
return nil
}
func (s *BackwardCompatibleS3Storage) BuildKey(key string) string {
if len(s.bucketBasePath) == 0 {
return key
} else if strings.HasSuffix(s.bucketBasePath, "/") {
return s.bucketBasePath + key
} else {
return s.bucketBasePath + "/" + key
}
}
func (s *BackwardCompatibleS3Storage) Fetch(key string) (*Object, error) {
key = s.BuildKey(key)
params := &s3.GetObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(key),
}
logger.WithFields(logrus.Fields{
"bucket": s.bucket,
"key": key,
}).Debug("start get object from s3")
resp, err := s.client.GetObject(params)
if reqerr, ok := err.(awserr.RequestFailure); ok && reqerr.StatusCode() == http.StatusNotFound {
return nil, ErrImageNotFound
} else if err != nil {
return nil, logger.ErrorDebug(err)
}
logger.WithFields(logrus.Fields{
"bucket": s.bucket,
"key": key,
}).Debug("found object from s3")
defer resp.Body.Close()
object := &Object{
Metadata: make(map[string]string, 0),
}
for k, v := range resp.Metadata {
object.Metadata[k] = *v
}
object.Body, err = ioutil.ReadAll(resp.Body)
if err != nil {
return nil, logger.ErrorDebug(err)
}
return object, nil
}
func (s *BackwardCompatibleS3Storage) PutFromBlob(key string, image []byte, contentType string, metadata map[string]string) error {
tmpfile, err := ioutil.TempFile("", "kinu-upload")
if err != nil {
return logger.ErrorDebug(err)
}
_, err = tmpfile.Write(image)
if err != nil {
return logger.ErrorDebug(err)
}
defer func() {
tmpfile.Close()
os.Remove(tmpfile.Name())
}()
return s.Put(key, tmpfile, contentType, metadata)
}
func (s *BackwardCompatibleS3Storage) Put(key string, imageFile io.ReadSeeker, contentType string, metadata map[string]string) error {
putMetadata := make(map[string]*string, 0)
for k, v := range metadata {
putMetadata[k] = aws.String(v)
}
_, err := imageFile.Seek(0, 0)
if err != nil {
return logger.ErrorDebug(err)
}
_, err = s.client.PutObject(&s3.PutObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(s.BuildKey(key)),
ContentType: aws.String(contentType),
Body: imageFile,
Metadata: putMetadata,
})
logger.WithFields(logrus.Fields{
"bucket": s.bucket,
"key": s.BuildKey(key),
}).Debug("put to s3")
if err != nil {
return logger.ErrorDebug(err)
}
return nil
}
func (s *BackwardCompatibleS3Storage) List(key string) ([]StorageItem, error) {
resp, err := s.client.ListObjects(&s3.ListObjectsInput{
Bucket: aws.String(s.bucket),
Prefix: aws.String(s.BuildKey(key)),
})
if err != nil {
return nil, logger.ErrorDebug(err)
}
logger.WithFields(logrus.Fields{
"bucket": s.bucket,
"key": s.BuildKey(key),
}).Debug("start list object from s3")
items := make([]StorageItem, 0)
for _, object := range resp.Contents {
logger.WithFields(logrus.Fields{
"key": &object.Key,
}).Debug("found object")
item := BackwardCompatibleS3StorageItem{Object: object}
items = append(items, &item)
}
return items, nil
}
func (s *BackwardCompatibleS3Storage) Move(from string, to string) error {
fromKey := s.bucket + "/" + from
toKey := s.bucketBasePath + "/" + to
_, err := s.client.CopyObject(&s3.CopyObjectInput{
Bucket: aws.String(s.bucket),
CopySource: aws.String(fromKey),
Key: aws.String(toKey),
})
logger.WithFields(logrus.Fields{
"from": fromKey,
"to": toKey,
}).Debug("move s3 object start")
if reqerr, ok := err.(awserr.RequestFailure); ok && reqerr.StatusCode() == http.StatusNotFound {
return ErrImageNotFound
} else if err != nil {
return logger.ErrorDebug(err)
}
_, err = s.client.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(from),
})
if reqerr, ok := err.(awserr.RequestFailure); ok && reqerr.StatusCode() == http.StatusNotFound {
return ErrImageNotFound
} else if err != nil {
return logger.ErrorDebug(err)
}
return nil
}
func (s *BackwardCompatibleS3StorageItem) IsValid() bool {
if len(s.Extension()) == 0 {
return false
}
if len(s.ImageSize()) == 0 {
return false
}
return true
}
func (s *BackwardCompatibleS3StorageItem) Key() string {
return *s.Object.Key
}
func (s *BackwardCompatibleS3StorageItem) Filename() string {
path := strings.Split(s.Key(), "/")
return path[len(path)-1]
}
func (s *BackwardCompatibleS3StorageItem) Extension() string {
path := strings.Split(*s.Object.Key, ".")
return path[len(path)-1]
}
// KeyFormat: :image_type/:id/:id.original.:date.:format or :image_type/:id/:id.:format
func (s *BackwardCompatibleS3StorageItem) ImageSize() string {
if strings.Contains(s.Key(), "original") {
return "original"
} else {
return "1000"
}
}
| [
"\"KINU_S3_REGION\"",
"\"KINU_S3_BUCKET\"",
"\"KINU_S3_BUCKET_BASE_PATH\""
]
| []
| [
"KINU_S3_BUCKET_BASE_PATH",
"KINU_S3_REGION",
"KINU_S3_BUCKET"
]
| [] | ["KINU_S3_BUCKET_BASE_PATH", "KINU_S3_REGION", "KINU_S3_BUCKET"] | go | 3 | 0 | |
detect.go | package phpdist
import (
"os"
"github.com/paketo-buildpacks/packit/v2"
)
//go:generate faux --interface VersionParser --output fakes/version_parser.go
// BuildPlanMetadata is the buildpack specific data included in build plan
// requirements.
type BuildPlanMetadata struct {
Version string `toml:"version"`
VersionSource string `toml:"version-source"`
}
// Detect will return a packit.DetectFunc that will be invoked during the
// detect phase of the buildpack lifecycle.
//
// Detect always passes, and will contribute a Build Plan that provides php.
func Detect() packit.DetectFunc {
return func(context packit.DetectContext) (packit.DetectResult, error) {
var requirements []packit.BuildPlanRequirement
version := os.Getenv("BP_PHP_VERSION")
if version != "" {
requirements = append(requirements, packit.BuildPlanRequirement{
Name: "php",
Metadata: BuildPlanMetadata{
Version: version,
VersionSource: "BP_PHP_VERSION",
},
})
}
return packit.DetectResult{
Plan: packit.BuildPlan{
Provides: []packit.BuildPlanProvision{
{Name: "php"},
},
Requires: requirements,
},
}, nil
}
}
| [
"\"BP_PHP_VERSION\""
]
| []
| [
"BP_PHP_VERSION"
]
| [] | ["BP_PHP_VERSION"] | go | 1 | 0 | |
docs/source/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'IBM Spectrum Scale CSI'
copyright = '2019, IBM'
author = 'John Dunham'
master_doc = 'index'
# The full version, including alpha/beta/rc tags
release = '1.0.1'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
html_theme = 'default'
else:
html_theme = 'sphinx_rtd_theme'
html_theme_options = {
'collapse_navigation': True,
'sticky_navigation': True
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add md to suffix.
source_suffix = ['.md', '.rst']
# Markdown support.
source_parsers = { '.md' : 'recommonmark.parser.CommonMarkParser' }
# collection of substitutions.
rst_epilog="""
.. |driver-repo| replace:: GitHubDriver_
.. |operator-repo| replace:: GitHubOperator_
.. _GitHubOperator: https://github.com/IBM/
"""
| []
| []
| [
"READTHEDOCS"
]
| [] | ["READTHEDOCS"] | python | 1 | 0 | |
zgrab2/lib/http2/h2c/h2c.go | // Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package h2c implements the unencrypted "h2c" form of HTTP/2.
//
// The h2c protocol is the non-TLS version of HTTP/2 which is not available from
// net/http or golang.org/x/net/http2.
package h2c
import (
"bufio"
"bytes"
"encoding/base64"
"encoding/binary"
"errors"
"fmt"
"io"
"log"
"net"
"github.com/zmap/zgrab2/lib/modern_http"
"net/textproto"
"os"
"strings"
"golang.org/x/net/http/httpguts"
"github.com/zmap/zgrab2/lib/http2"
"github.com/zmap/zgrab2/lib/http2/hpack"
)
var (
http2VerboseLogs bool
)
func init() {
e := os.Getenv("GODEBUG")
if strings.Contains(e, "http2debug=1") || strings.Contains(e, "http2debug=2") {
http2VerboseLogs = true
}
}
// h2cHandler is a Handler which implements h2c by hijacking the HTTP/1 traffic
// that should be h2c traffic. There are two ways to begin a h2c connection
// (RFC 7540 Section 3.2 and 3.4): (1) Starting with Prior Knowledge - this
// works by starting an h2c connection with a string of bytes that is valid
// HTTP/1, but unlikely to occur in practice and (2) Upgrading from HTTP/1 to
// h2c - this works by using the HTTP/1 Upgrade header to request an upgrade to
// h2c. When either of those situations occur we hijack the HTTP/1 connection,
// convert it to a HTTP/2 connection and pass the net.Conn to http2.ServeConn.
type h2cHandler struct {
Handler http.Handler
s *http2.Server
}
// NewHandler returns an http.Handler that wraps h, intercepting any h2c
// traffic. If a request is an h2c connection, it's hijacked and redirected to
// s.ServeConn. Otherwise the returned Handler just forwards requests to h. This
// works because h2c is designed to be parseable as valid HTTP/1, but ignored by
// any HTTP server that does not handle h2c. Therefore we leverage the HTTP/1
// compatible parts of the Go http library to parse and recognize h2c requests.
// Once a request is recognized as h2c, we hijack the connection and convert it
// to an HTTP/2 connection which is understandable to s.ServeConn. (s.ServeConn
// understands HTTP/2 except for the h2c part of it.)
func NewHandler(h http.Handler, s *http2.Server) http.Handler {
return &h2cHandler{
Handler: h,
s: s,
}
}
// ServeHTTP implement the h2c support that is enabled by h2c.GetH2CHandler.
func (s h2cHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Handle h2c with prior knowledge (RFC 7540 Section 3.4)
if r.Method == "PRI" && len(r.Header) == 0 && r.URL.Path == "*" && r.Proto == "HTTP/2.0" {
if http2VerboseLogs {
log.Print("h2c: attempting h2c with prior knowledge.")
}
conn, err := initH2CWithPriorKnowledge(w)
if err != nil {
if http2VerboseLogs {
log.Printf("h2c: error h2c with prior knowledge: %v", err)
}
return
}
defer conn.Close()
s.s.ServeConn(conn, &http2.ServeConnOpts{Handler: s.Handler})
return
}
// Handle Upgrade to h2c (RFC 7540 Section 3.2)
if conn, err := h2cUpgrade(w, r); err == nil {
defer conn.Close()
s.s.ServeConn(conn, &http2.ServeConnOpts{Handler: s.Handler})
return
}
s.Handler.ServeHTTP(w, r)
return
}
// initH2CWithPriorKnowledge implements creating a h2c connection with prior
// knowledge (Section 3.4) and creates a net.Conn suitable for http2.ServeConn.
// All we have to do is look for the client preface that is suppose to be part
// of the body, and reforward the client preface on the net.Conn this function
// creates.
func initH2CWithPriorKnowledge(w http.ResponseWriter) (net.Conn, error) {
hijacker, ok := w.(http.Hijacker)
if !ok {
panic("Hijack not supported.")
}
conn, rw, err := hijacker.Hijack()
if err != nil {
panic(fmt.Sprintf("Hijack failed: %v", err))
}
const expectedBody = "SM\r\n\r\n"
buf := make([]byte, len(expectedBody))
n, err := io.ReadFull(rw, buf)
if err != nil {
return nil, fmt.Errorf("could not read from the buffer: %s", err)
}
if string(buf[:n]) == expectedBody {
c := &rwConn{
Conn: conn,
Reader: io.MultiReader(strings.NewReader(http2.ClientPreface), rw),
BufWriter: rw.Writer,
}
return c, nil
}
conn.Close()
if http2VerboseLogs {
log.Printf(
"h2c: missing the request body portion of the client preface. Wanted: %v Got: %v",
[]byte(expectedBody),
buf[0:n],
)
}
return nil, errors.New("invalid client preface")
}
// drainClientPreface reads a single instance of the HTTP/2 client preface from
// the supplied reader.
func drainClientPreface(r io.Reader) error {
var buf bytes.Buffer
prefaceLen := int64(len(http2.ClientPreface))
n, err := io.CopyN(&buf, r, prefaceLen)
if err != nil {
return err
}
if n != prefaceLen || buf.String() != http2.ClientPreface {
return fmt.Errorf("Client never sent: %s", http2.ClientPreface)
}
return nil
}
// h2cUpgrade establishes a h2c connection using the HTTP/1 upgrade (Section 3.2).
func h2cUpgrade(w http.ResponseWriter, r *http.Request) (net.Conn, error) {
if !isH2CUpgrade(r.Header) {
return nil, errors.New("non-conforming h2c headers")
}
// Initial bytes we put into conn to fool http2 server
initBytes, _, err := convertH1ReqToH2(r)
if err != nil {
return nil, err
}
hijacker, ok := w.(http.Hijacker)
if !ok {
return nil, errors.New("hijack not supported.")
}
conn, rw, err := hijacker.Hijack()
if err != nil {
return nil, fmt.Errorf("hijack failed: %v", err)
}
rw.Write([]byte("HTTP/1.1 101 Switching Protocols\r\n" +
"Connection: Upgrade\r\n" +
"Upgrade: h2c\r\n\r\n"))
rw.Flush()
// A conforming client will now send an H2 client preface which need to drain
// since we already sent this.
if err := drainClientPreface(rw); err != nil {
return nil, err
}
c := &rwConn{
Conn: conn,
Reader: io.MultiReader(initBytes, rw),
BufWriter: newSettingsAckSwallowWriter(rw.Writer),
}
return c, nil
}
// convert the data contained in the HTTP/1 upgrade request into the HTTP/2
// version in byte form.
func convertH1ReqToH2(r *http.Request) (*bytes.Buffer, []http2.Setting, error) {
h2Bytes := bytes.NewBuffer([]byte((http2.ClientPreface)))
framer := http2.NewFramer(h2Bytes, nil)
settings, err := getH2Settings(r.Header)
if err != nil {
return nil, nil, err
}
if err := framer.WriteSettings(settings...); err != nil {
return nil, nil, err
}
headerBytes, err := getH2HeaderBytes(r, getMaxHeaderTableSize(settings))
if err != nil {
return nil, nil, err
}
maxFrameSize := int(getMaxFrameSize(settings))
needOneHeader := len(headerBytes) < maxFrameSize
err = framer.WriteHeaders(http2.HeadersFrameParam{
StreamID: 1,
BlockFragment: headerBytes,
EndHeaders: needOneHeader,
})
if err != nil {
return nil, nil, err
}
for i := maxFrameSize; i < len(headerBytes); i += maxFrameSize {
if len(headerBytes)-i > maxFrameSize {
if err := framer.WriteContinuation(1,
false, // endHeaders
headerBytes[i:maxFrameSize]); err != nil {
return nil, nil, err
}
} else {
if err := framer.WriteContinuation(1,
true, // endHeaders
headerBytes[i:]); err != nil {
return nil, nil, err
}
}
}
return h2Bytes, settings, nil
}
// getMaxFrameSize returns the SETTINGS_MAX_FRAME_SIZE. If not present default
// value is 16384 as specified by RFC 7540 Section 6.5.2.
func getMaxFrameSize(settings []http2.Setting) uint32 {
for _, setting := range settings {
if setting.ID == http2.SettingMaxFrameSize {
return setting.Val
}
}
return 16384
}
// getMaxHeaderTableSize returns the SETTINGS_HEADER_TABLE_SIZE. If not present
// default value is 4096 as specified by RFC 7540 Section 6.5.2.
func getMaxHeaderTableSize(settings []http2.Setting) uint32 {
for _, setting := range settings {
if setting.ID == http2.SettingHeaderTableSize {
return setting.Val
}
}
return 4096
}
// bufWriter is a Writer interface that also has a Flush method.
type bufWriter interface {
io.Writer
Flush() error
}
// rwConn implements net.Conn but overrides Read and Write so that reads and
// writes are forwarded to the provided io.Reader and bufWriter.
type rwConn struct {
net.Conn
io.Reader
BufWriter bufWriter
}
// Read forwards reads to the underlying Reader.
func (c *rwConn) Read(p []byte) (int, error) {
return c.Reader.Read(p)
}
// Write forwards writes to the underlying bufWriter and immediately flushes.
func (c *rwConn) Write(p []byte) (int, error) {
n, err := c.BufWriter.Write(p)
if err := c.BufWriter.Flush(); err != nil {
return 0, err
}
return n, err
}
// settingsAckSwallowWriter is a writer that normally forwards bytes to its
// underlying Writer, but swallows the first SettingsAck frame that it sees.
type settingsAckSwallowWriter struct {
Writer *bufio.Writer
buf []byte
didSwallow bool
}
// newSettingsAckSwallowWriter returns a new settingsAckSwallowWriter.
func newSettingsAckSwallowWriter(w *bufio.Writer) *settingsAckSwallowWriter {
return &settingsAckSwallowWriter{
Writer: w,
buf: make([]byte, 0),
didSwallow: false,
}
}
// Write implements io.Writer interface. Normally forwards bytes to w.Writer,
// except for the first Settings ACK frame that it sees.
func (w *settingsAckSwallowWriter) Write(p []byte) (int, error) {
if !w.didSwallow {
w.buf = append(w.buf, p...)
// Process all the frames we have collected into w.buf
for {
// Append until we get full frame header which is 9 bytes
if len(w.buf) < 9 {
break
}
// Check if we have collected a whole frame.
fh, err := http2.ReadFrameHeader(bytes.NewBuffer(w.buf))
if err != nil {
// Corrupted frame, fail current Write
return 0, err
}
fSize := fh.Length + 9
if uint32(len(w.buf)) < fSize {
// Have not collected whole frame. Stop processing buf, and withold on
// forward bytes to w.Writer until we get the full frame.
break
}
// We have now collected a whole frame.
if fh.Type == http2.FrameSettings && fh.Flags.Has(http2.FlagSettingsAck) {
// If Settings ACK frame, do not forward to underlying writer, remove
// bytes from w.buf, and record that we have swallowed Settings Ack
// frame.
w.didSwallow = true
w.buf = w.buf[fSize:]
continue
}
// Not settings ack frame. Forward bytes to w.Writer.
if _, err := w.Writer.Write(w.buf[:fSize]); err != nil {
// Couldn't forward bytes. Fail current Write.
return 0, err
}
w.buf = w.buf[fSize:]
}
return len(p), nil
}
return w.Writer.Write(p)
}
// Flush calls w.Writer.Flush.
func (w *settingsAckSwallowWriter) Flush() error {
return w.Writer.Flush()
}
// isH2CUpgrade returns true if the header properly request an upgrade to h2c
// as specified by Section 3.2.
func isH2CUpgrade(h http.Header) bool {
return httpguts.HeaderValuesContainsToken(h[textproto.CanonicalMIMEHeaderKey("Upgrade")], "h2c") &&
httpguts.HeaderValuesContainsToken(h[textproto.CanonicalMIMEHeaderKey("Connection")], "HTTP2-Settings")
}
// getH2Settings returns the []http2.Setting that are encoded in the
// HTTP2-Settings header.
func getH2Settings(h http.Header) ([]http2.Setting, error) {
vals, ok := h[textproto.CanonicalMIMEHeaderKey("HTTP2-Settings")]
if !ok {
return nil, errors.New("missing HTTP2-Settings header")
}
if len(vals) != 1 {
return nil, fmt.Errorf("expected 1 HTTP2-Settings. Got: %v", vals)
}
settings, err := decodeSettings(vals[0])
if err != nil {
return nil, fmt.Errorf("Invalid HTTP2-Settings: %q", vals[0])
}
return settings, nil
}
// decodeSettings decodes the base64url header value of the HTTP2-Settings
// header. RFC 7540 Section 3.2.1.
func decodeSettings(headerVal string) ([]http2.Setting, error) {
b, err := base64.RawURLEncoding.DecodeString(headerVal)
if err != nil {
return nil, err
}
if len(b)%6 != 0 {
return nil, err
}
settings := make([]http2.Setting, 0)
for i := 0; i < len(b)/6; i++ {
settings = append(settings, http2.Setting{
ID: http2.SettingID(binary.BigEndian.Uint16(b[i*6 : i*6+2])),
Val: binary.BigEndian.Uint32(b[i*6+2 : i*6+6]),
})
}
return settings, nil
}
// getH2HeaderBytes return the headers in r a []bytes encoded by HPACK.
func getH2HeaderBytes(r *http.Request, maxHeaderTableSize uint32) ([]byte, error) {
headerBytes := bytes.NewBuffer(nil)
hpackEnc := hpack.NewEncoder(headerBytes)
hpackEnc.SetMaxDynamicTableSize(maxHeaderTableSize)
// Section 8.1.2.3
err := hpackEnc.WriteField(hpack.HeaderField{
Name: ":method",
Value: r.Method,
})
if err != nil {
return nil, err
}
err = hpackEnc.WriteField(hpack.HeaderField{
Name: ":scheme",
Value: "http",
})
if err != nil {
return nil, err
}
err = hpackEnc.WriteField(hpack.HeaderField{
Name: ":authority",
Value: r.Host,
})
if err != nil {
return nil, err
}
path := r.URL.Path
if r.URL.RawQuery != "" {
path = strings.Join([]string{path, r.URL.RawQuery}, "?")
}
err = hpackEnc.WriteField(hpack.HeaderField{
Name: ":path",
Value: path,
})
if err != nil {
return nil, err
}
// TODO Implement Section 8.3
for header, values := range r.Header {
// Skip non h2 headers
if isNonH2Header(header) {
continue
}
for _, v := range values {
err := hpackEnc.WriteField(hpack.HeaderField{
Name: strings.ToLower(header),
Value: v,
})
if err != nil {
return nil, err
}
}
}
return headerBytes.Bytes(), nil
}
// Connection specific headers listed in RFC 7540 Section 8.1.2.2 that are not
// suppose to be transferred to HTTP/2. The Http2-Settings header is skipped
// since already use to create the HTTP/2 SETTINGS frame.
var nonH2Headers = []string{
"Connection",
"Keep-Alive",
"Proxy-Connection",
"Transfer-Encoding",
"Upgrade",
"Http2-Settings",
}
// isNonH2Header returns true if header should not be transferred to HTTP/2.
func isNonH2Header(header string) bool {
for _, nonH2h := range nonH2Headers {
if header == nonH2h {
return true
}
}
return false
}
| [
"\"GODEBUG\""
]
| []
| [
"GODEBUG"
]
| [] | ["GODEBUG"] | go | 1 | 0 | |
selfdrive/debug/dump.py | #!/usr/bin/env python3
import os
import sys
import argparse
import json
from hexdump import hexdump
from cereal import log
import cereal.messaging as messaging
from cereal.services import service_list
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Sniff a communcation socket')
parser.add_argument('--pipe', action='store_true')
parser.add_argument('--raw', action='store_true')
parser.add_argument('--json', action='store_true')
parser.add_argument('--dump-json', action='store_true')
parser.add_argument('--no-print', action='store_true')
parser.add_argument('--addr', default='127.0.0.1')
parser.add_argument('--values', help='values to monitor (instead of entire event)')
parser.add_argument("socket", type=str, nargs='*', help="socket name")
args = parser.parse_args()
if args.addr != "127.0.0.1":
os.environ["ZMQ"] = "1"
messaging.context = messaging.Context()
poller = messaging.Poller()
for m in args.socket if len(args.socket) > 0 else service_list:
sock = messaging.sub_sock(m, poller, addr=args.addr)
values = None
if args.values:
values = [s.strip().split(".") for s in args.values.split(",")]
while 1:
polld = poller.poll(1000)
for sock in polld:
msg = sock.receive()
evt = log.Event.from_bytes(msg)
if not args.no_print:
if args.pipe:
sys.stdout.write(msg)
sys.stdout.flush()
elif args.raw:
hexdump(msg)
elif args.json:
print(json.loads(msg))
elif args.dump_json:
print(json.dumps(evt.to_dict()))
elif values:
print("logMonotime_bhcho = {}".format(evt.logMonoTime))
for value in values:
if hasattr(evt, value[0]):
item = evt
for key in value:
item = getattr(item, key)
#print("{} = {}".format(".".join(value), item))
#print("")
else:
print(evt)
| []
| []
| [
"ZMQ"
]
| [] | ["ZMQ"] | python | 1 | 0 | |
docs/source/conf.py | # -*- coding: utf-8 -*-
#
# botocore documentation build configuration file, created by
# sphinx-quickstart on Sun Dec 2 07:26:23 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
from botocore.session import get_session
from botocore.docs import generate_docs
generate_docs(os.path.dirname(os.path.abspath(__file__)), get_session())
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'botocore'
copyright = u'2013, Mitch Garnaat'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.13.'
# The full version, including alpha/beta/rc tags.
release = '1.13.25'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_show_sourcelink = False
html_sidebars = {
'**': ['logo-text.html',
'globaltoc.html',
'localtoc.html',
'searchbox.html']
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'botocoredoc'
import guzzle_sphinx_theme
extensions.append("guzzle_sphinx_theme")
html_translator_class = 'guzzle_sphinx_theme.HTMLTranslator'
html_theme_path = guzzle_sphinx_theme.html_theme_path()
html_theme = 'guzzle_sphinx_theme'
# Guzzle theme options (see theme.conf for more information)
html_theme_options = {
# hack to add tracking
"google_analytics_account": os.getenv('TRACKING', False),
"base_url": "http://docs.aws.amazon.com/aws-sdk-php/guide/latest/"
}
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'botocore.tex', u'botocore Documentation',
u'Mitch Garnaat', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'botocore', u'botocore Documentation',
[u'Mitch Garnaat'], 3)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'botocore', u'botocore Documentation',
u'Mitch Garnaat', 'botocore', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| []
| []
| [
"TRACKING"
]
| [] | ["TRACKING"] | python | 1 | 0 | |
cmd/authelia-scripts/cmd_bootstrap.go | package main
import (
"fmt"
"os"
"os/exec"
"strings"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/authelia/authelia/v4/internal/utils"
)
// HostEntry represents an entry in /etc/hosts.
type HostEntry struct {
Domain string
IP string
}
var hostEntries = []HostEntry{
// For unit tests.
{Domain: "local.example.com", IP: "127.0.0.1"},
// For authelia backend.
{Domain: "authelia.example.com", IP: "192.168.240.50"},
// For common tests.
{Domain: "login.example.com", IP: "192.168.240.100"},
{Domain: "admin.example.com", IP: "192.168.240.100"},
{Domain: "singlefactor.example.com", IP: "192.168.240.100"},
{Domain: "dev.example.com", IP: "192.168.240.100"},
{Domain: "home.example.com", IP: "192.168.240.100"},
{Domain: "mx1.mail.example.com", IP: "192.168.240.100"},
{Domain: "mx2.mail.example.com", IP: "192.168.240.100"},
{Domain: "public.example.com", IP: "192.168.240.100"},
{Domain: "secure.example.com", IP: "192.168.240.100"},
{Domain: "mail.example.com", IP: "192.168.240.100"},
{Domain: "duo.example.com", IP: "192.168.240.100"},
// For Traefik suite.
{Domain: "traefik.example.com", IP: "192.168.240.100"},
// For HAProxy suite.
{Domain: "haproxy.example.com", IP: "192.168.240.100"},
// For testing network ACLs.
{Domain: "proxy-client1.example.com", IP: "192.168.240.201"},
{Domain: "proxy-client2.example.com", IP: "192.168.240.202"},
{Domain: "proxy-client3.example.com", IP: "192.168.240.203"},
// Redis Replicas.
{Domain: "redis-node-0.example.com", IP: "192.168.240.110"},
{Domain: "redis-node-1.example.com", IP: "192.168.240.111"},
{Domain: "redis-node-2.example.com", IP: "192.168.240.112"},
// Redis Sentinel Replicas.
{Domain: "redis-sentinel-0.example.com", IP: "192.168.240.120"},
{Domain: "redis-sentinel-1.example.com", IP: "192.168.240.121"},
{Domain: "redis-sentinel-2.example.com", IP: "192.168.240.122"},
// Kubernetes dashboard.
{Domain: "kubernetes.example.com", IP: "192.168.240.110"},
// OIDC tester app.
{Domain: "oidc.example.com", IP: "192.168.240.100"},
{Domain: "oidc-public.example.com", IP: "192.168.240.100"},
}
func runCommand(cmd string, args ...string) {
command := utils.CommandWithStdout(cmd, args...)
err := command.Run()
if err != nil {
panic(err)
}
}
func checkCommandExist(cmd string, resolutionHint string) {
fmt.Print("Checking if '" + cmd + "' command is installed...")
command := exec.Command("bash", "-c", "command -v "+cmd) //nolint:gosec // Used only in development.
err := command.Run()
if err != nil {
msg := "[ERROR] You must install " + cmd + " on your machine."
if resolutionHint != "" {
msg += fmt.Sprintf(" %s", resolutionHint)
}
log.Fatal(msg)
}
fmt.Println(" OK")
}
func createTemporaryDirectory() {
err := os.MkdirAll("/tmp/authelia", 0755)
if err != nil {
panic(err)
}
}
func createPNPMDirectory() {
home := os.Getenv("HOME")
if home != "" {
bootstrapPrintln("Creating ", home+"/.pnpm-store")
err := os.MkdirAll(home+"/.pnpm-store", 0755)
if err != nil {
panic(err)
}
}
}
func bootstrapPrintln(args ...interface{}) {
a := make([]interface{}, 0)
a = append(a, "[BOOTSTRAP]")
a = append(a, args...)
fmt.Println(a...)
}
func shell(cmd string) {
runCommand("bash", "-c", cmd)
}
func prepareHostsFile() {
contentBytes, err := readHostsFile()
if err != nil {
panic(err)
}
lines := strings.Split(string(contentBytes), "\n")
toBeAddedLine := make([]string, 0)
modified := false
for _, entry := range hostEntries {
domainInHostFile := false
for i, line := range lines {
domainFound := strings.Contains(line, entry.Domain)
ipFound := strings.Contains(line, entry.IP)
if domainFound {
domainInHostFile = true
// The IP is not up to date.
if ipFound {
break
} else {
lines[i] = entry.IP + " " + entry.Domain
modified = true
break
}
}
}
if !domainInHostFile {
toBeAddedLine = append(toBeAddedLine, entry.IP+" "+entry.Domain)
}
}
if len(toBeAddedLine) > 0 {
lines = append(lines, toBeAddedLine...)
modified = true
}
fd, err := os.CreateTemp("/tmp/authelia/", "hosts")
if err != nil {
panic(err)
}
_, err = fd.Write([]byte(strings.Join(lines, "\n")))
if err != nil {
panic(err)
}
if modified {
bootstrapPrintln("/etc/hosts needs to be updated")
shell(fmt.Sprintf("cat %s | sudo tee /etc/hosts > /dev/null", fd.Name()))
}
err = fd.Close()
if err != nil {
panic(err)
}
}
// ReadHostsFile reads the hosts file.
func readHostsFile() ([]byte, error) {
bs, err := os.ReadFile("/etc/hosts")
if err != nil {
return nil, err
}
return bs, nil
}
func readVersion(cmd string, args ...string) {
command := exec.Command(cmd, args...)
b, err := command.Output()
if err != nil {
panic(err)
}
fmt.Print(cmd + " => " + string(b))
}
func readVersions() {
readVersion("go", "version")
readVersion("node", "--version")
readVersion("pnpm", "--version")
readVersion("docker", "--version")
readVersion("docker-compose", "version")
}
// Bootstrap bootstrap authelia dev environment.
func Bootstrap(cobraCmd *cobra.Command, args []string) {
bootstrapPrintln("Checking command installation...")
checkCommandExist("node", "Follow installation guidelines from https://nodejs.org/en/download/package-manager/ or download installer from https://nodejs.org/en/download/")
checkCommandExist("pnpm", "Follow installation guidelines from https://pnpm.io/installation")
checkCommandExist("docker", "Follow installation guidelines from https://docs.docker.com/get-docker/")
checkCommandExist("docker-compose", "Follow installation guidelines from https://docs.docker.com/compose/install/")
bootstrapPrintln("Getting versions of tools")
readVersions()
bootstrapPrintln("Checking if GOPATH is set")
goPathFound := false
for _, v := range os.Environ() {
if strings.HasPrefix(v, "GOPATH=") {
goPathFound = true
break
}
}
if !goPathFound {
log.Fatal("GOPATH is not set")
}
createTemporaryDirectory()
createPNPMDirectory()
bootstrapPrintln("Preparing /etc/hosts to serve subdomains of example.com...")
prepareHostsFile()
fmt.Println()
bootstrapPrintln("Run 'authelia-scripts suites setup Standalone' to start Authelia and visit https://home.example.com:8080.")
bootstrapPrintln("More details at https://github.com/authelia/authelia/blob/master/docs/getting-started.md")
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
from sphinx.builders.html import StandaloneHTMLBuilder
import subprocess, os
# Run doxygen first
# read_the_docs_build = os.environ.get('READTHEDOCS', None) == 'True'
# if read_the_docs_build:
subprocess.call('doxygen doxyfile.doxy', shell=True)
# -- Project information -----------------------------------------------------
project = 'LwMEM'
copyright = '2022, Tilen MAJERLE'
author = 'Tilen MAJERLE'
# Try to get branch at which this is running
# and try to determine which version to display in sphinx
# Version is using git tag if on master/main or "latest-develop" if on develop branch
version = ''
git_branch = ''
def cmd_exec_print(t):
print("cmd > ", t, "\n", os.popen(t).read().strip(), "\n")
# Print demo data here
cmd_exec_print('git branch')
cmd_exec_print('git describe')
cmd_exec_print('git describe --tags')
cmd_exec_print('git describe --tags --abbrev=0')
cmd_exec_print('git describe --tags --abbrev=1')
# Get current branch
res = os.popen('git branch').read().strip()
for line in res.split("\n"):
if line[0] == '*':
git_branch = line[1:].strip()
# Decision for display version
git_branch = git_branch.replace('(HEAD detached at ', '').replace(')', '')
if git_branch.find('master') >= 0 or git_branch.find('main') >= 0:
#version = os.popen('git describe --tags --abbrev=0').read().strip()
version = 'latest-stable'
elif git_branch.find('develop-') >= 0 or git_branch.find('develop/') >= 0:
version = 'branch-' + git_branch
elif git_branch == 'develop' or git_branch == 'origin/develop':
version = 'latest-develop'
else:
version = os.popen('git describe --tags --abbrev=0').read().strip()
# For debugging purpose only
print("GIT BRANCH: " + git_branch)
print("PROJ VERSION: " + version)
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.autosectionlabel',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx_sitemap',
'breathe',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
highlight_language = 'c'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_options = {
'canonical_url': '',
'analytics_id': '', # Provided by Google in your dashboard
'display_version': True,
'prev_next_buttons_location': 'bottom',
'style_external_links': False,
'logo_only': False,
# Toc options
'collapse_navigation': True,
'sticky_navigation': True,
'navigation_depth': 4,
'includehidden': True,
'titles_only': False
}
html_logo = 'static/images/logo.svg'
github_url = 'https://github.com/MaJerle/lwmem'
html_baseurl = 'https://docs.majerle.eu/projects/lwmem/'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
html_css_files = [
'css/common.css',
'css/custom.css',
'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.15.1/css/all.min.css',
]
html_js_files = [
''
]
# Master index file
master_doc = 'index'
# --- Breathe configuration -----------------------------------------------------
breathe_projects = {
"lwmem": "_build/xml/"
}
breathe_default_project = "lwmem"
breathe_default_members = ('members', 'undoc-members')
breathe_show_enumvalue_initializer = True | []
| []
| [
"READTHEDOCS"
]
| [] | ["READTHEDOCS"] | python | 1 | 0 | |
raven/contrib/flask.py | """
raven.contrib.flask
~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
try:
from flask_login import current_user
except ImportError:
has_flask_login = False
else:
has_flask_login = True
import sys
import os
import logging
from flask import request, current_app, g
from flask.signals import got_request_exception, request_finished
from raven.conf import setup_logging
from raven.base import Client
from raven.middleware import Sentry as SentryMiddleware
from raven.handlers.logging import SentryHandler
from raven.utils.compat import _urlparse
from raven.utils.wsgi import get_headers, get_environ
from werkzeug.exceptions import ClientDisconnected
def make_client(client_cls, app, dsn=None):
return client_cls(
dsn=dsn or app.config.get('SENTRY_DSN') or os.environ.get('SENTRY_DSN'),
include_paths=set(app.config.get('SENTRY_INCLUDE_PATHS', [])) | set([app.import_name]),
exclude_paths=app.config.get('SENTRY_EXCLUDE_PATHS'),
servers=app.config.get('SENTRY_SERVERS'),
name=app.config.get('SENTRY_NAME'),
public_key=app.config.get('SENTRY_PUBLIC_KEY'),
secret_key=app.config.get('SENTRY_SECRET_KEY'),
project=app.config.get('SENTRY_PROJECT'),
site=app.config.get('SENTRY_SITE_NAME'),
processors=app.config.get('SENTRY_PROCESSORS'),
string_max_length=app.config.get('SENTRY_MAX_LENGTH_STRING'),
list_max_length=app.config.get('SENTRY_MAX_LENGTH_LIST'),
auto_log_stacks=app.config.get('SENTRY_AUTO_LOG_STACKS'),
tags=app.config.get('SENTRY_TAGS'),
extra={
'app': app,
},
)
class Sentry(object):
"""
Flask application for Sentry.
Look up configuration from ``os.environ['SENTRY_DSN']``::
>>> sentry = Sentry(app)
Pass an arbitrary DSN::
>>> sentry = Sentry(app, dsn='http://public:[email protected]/1')
Pass an explicit client::
>>> sentry = Sentry(app, client=client)
Automatically configure logging::
>>> sentry = Sentry(app, logging=True, level=logging.ERROR)
Capture an exception::
>>> try:
>>> 1 / 0
>>> except ZeroDivisionError:
>>> sentry.captureException()
Capture a message::
>>> sentry.captureMessage('hello, world!')
By default, the Flask integration will do the following:
- Hook into the `got_request_exception` signal. This can be disabled by
passing `register_signal=False`.
- Wrap the WSGI application. This can be disabled by passing
`wrap_wsgi=False`.
- Capture information from Flask-Login (if available).
"""
# TODO(dcramer): the client isn't using local context and therefore
# gets shared by every app that does init on it
def __init__(self, app=None, client=None, client_cls=Client, dsn=None,
logging=False, level=logging.NOTSET, wrap_wsgi=None,
register_signal=True):
self.dsn = dsn
self.logging = logging
self.client_cls = client_cls
self.client = client
self.level = level
self.wrap_wsgi = wrap_wsgi
self.register_signal = register_signal
if app:
self.init_app(app)
@property
def last_event_id(self):
return getattr(self, '_last_event_id', None)
@last_event_id.setter
def last_event_id(self, value):
self._last_event_id = value
try:
g.sentry_event_id = value
except Exception:
pass
def handle_exception(self, *args, **kwargs):
if not self.client:
return
ignored_exc_type_list = current_app.config.get('RAVEN_IGNORE_EXCEPTIONS', [])
exc = sys.exc_info()[1]
if any((isinstance(exc, ignored_exc_type) for ignored_exc_type in ignored_exc_type_list)):
return
self.captureException(exc_info=kwargs.get('exc_info'))
def get_user_info(self, request):
"""
Requires Flask-Login (https://pypi.python.org/pypi/Flask-Login/) to be installed
and setup
"""
if not has_flask_login:
return
if not hasattr(current_app, 'login_manager'):
return
try:
is_authenticated = current_user.is_authenticated()
except AttributeError:
# HACK: catch the attribute error thrown by flask-login is not attached
# > current_user = LocalProxy(lambda: _request_ctx_stack.top.user)
# E AttributeError: 'RequestContext' object has no attribute 'user'
return {}
if is_authenticated:
user_info = {
'is_authenticated': True,
'is_anonymous': current_user.is_anonymous(),
'id': current_user.get_id(),
}
if 'SENTRY_USER_ATTRS' in current_app.config:
for attr in current_app.config['SENTRY_USER_ATTRS']:
if hasattr(current_user, attr):
user_info[attr] = getattr(current_user, attr)
else:
user_info = {
'is_authenticated': False,
'is_anonymous': current_user.is_anonymous(),
}
return user_info
def get_http_info(self, request):
urlparts = _urlparse.urlsplit(request.url)
try:
formdata = request.form
except ClientDisconnected:
formdata = {}
return {
'url': '%s://%s%s' % (urlparts.scheme, urlparts.netloc, urlparts.path),
'query_string': urlparts.query,
'method': request.method,
'data': formdata,
'headers': dict(get_headers(request.environ)),
'env': dict(get_environ(request.environ)),
}
def before_request(self, *args, **kwargs):
self.last_event_id = None
self.client.http_context(self.get_http_info(request))
self.client.user_context(self.get_user_info(request))
def after_request(self, sender, response, *args, **kwargs):
response.headers['X-Sentry-ID'] = self.last_event_id
self.client.context.clear()
return response
def init_app(self, app, dsn=None, logging=None, level=None, wrap_wsgi=None,
register_signal=None):
if dsn is not None:
self.dsn = dsn
if level is not None:
self.level = level
if wrap_wsgi is not None:
self.wrap_wsgi = wrap_wsgi
else:
# Fix https://github.com/getsentry/raven-python/issues/412
# the gist is that we get errors twice in debug mode if we don't do this
if app and app.debug:
self.wrap_wsgi = False
else:
self.wrap_wsgi = True
if register_signal is not None:
self.register_signal = register_signal
if logging is not None:
self.logging = logging
if not self.client:
self.client = make_client(self.client_cls, app, self.dsn)
if self.logging:
setup_logging(SentryHandler(self.client, level=self.level))
if self.wrap_wsgi:
app.wsgi_app = SentryMiddleware(app.wsgi_app, self.client)
app.before_request(self.before_request)
if self.register_signal:
got_request_exception.connect(self.handle_exception, sender=app)
request_finished.connect(self.after_request, sender=app)
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['sentry'] = self
def captureException(self, *args, **kwargs):
assert self.client, 'captureException called before application configured'
result = self.client.captureException(*args, **kwargs)
if result:
self.last_event_id = self.client.get_ident(result)
else:
self.last_event_id = None
return result
def captureMessage(self, *args, **kwargs):
assert self.client, 'captureMessage called before application configured'
result = self.client.captureMessage(*args, **kwargs)
if result:
self.last_event_id = self.client.get_ident(result)
else:
self.last_event_id = None
return result
def user_context(self, *args, **kwargs):
assert self.client, 'user_context called before application configured'
return self.client.user_context(*args, **kwargs)
def tags_context(self, *args, **kwargs):
assert self.client, 'tags_context called before application configured'
return self.client.tags_context(*args, **kwargs)
def extra_context(self, *args, **kwargs):
assert self.client, 'extra_context called before application configured'
return self.client.extra_context(*args, **kwargs)
| []
| []
| [
"SENTRY_DSN"
]
| [] | ["SENTRY_DSN"] | python | 1 | 0 | |
src/cmd/go/get.go | // Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"fmt"
"go/build"
"os"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
)
var cmdGet = &Command{
UsageLine: "get [-d] [-f] [-fix] [-insecure] [-t] [-u] [build flags] [packages]",
Short: "download and install packages and dependencies",
Long: `
Get downloads the packages named by the import paths, along with their
dependencies. It then installs the named packages, like 'go install'.
The -d flag instructs get to stop after downloading the packages; that is,
it instructs get not to install the packages.
The -f flag, valid only when -u is set, forces get -u not to verify that
each package has been checked out from the source control repository
implied by its import path. This can be useful if the source is a local fork
of the original.
The -fix flag instructs get to run the fix tool on the downloaded packages
before resolving dependencies or building the code.
The -insecure flag permits fetching from repositories and resolving
custom domains using insecure schemes such as HTTP. Use with caution.
The -t flag instructs get to also download the packages required to build
the tests for the specified packages.
The -u flag instructs get to use the network to update the named packages
and their dependencies. By default, get uses the network to check out
missing packages but does not use it to look for updates to existing packages.
The -v flag enables verbose progress and debug output.
Get also accepts build flags to control the installation. See 'go help build'.
When checking out a new package, get creates the target directory
GOPATH/src/<import-path>. If the GOPATH contains multiple entries,
get uses the first one. For more details see: 'go help gopath'.
When checking out or updating a package, get looks for a branch or tag
that matches the locally installed version of Go. The most important
rule is that if the local installation is running version "go1", get
searches for a branch or tag named "go1". If no such version exists it
retrieves the most recent version of the package.
When go get checks out or updates a Git repository,
it also updates any git submodules referenced by the repository.
Get never checks out or updates code stored in vendor directories.
For more about specifying packages, see 'go help packages'.
For more about how 'go get' finds source code to
download, see 'go help importpath'.
See also: go build, go install, go clean.
`,
}
var getD = cmdGet.Flag.Bool("d", false, "")
var getF = cmdGet.Flag.Bool("f", false, "")
var getT = cmdGet.Flag.Bool("t", false, "")
var getU = cmdGet.Flag.Bool("u", false, "")
var getFix = cmdGet.Flag.Bool("fix", false, "")
var getInsecure = cmdGet.Flag.Bool("insecure", false, "")
func init() {
addBuildFlags(cmdGet)
cmdGet.Run = runGet // break init loop
}
func runGet(cmd *Command, args []string) {
if *getF && !*getU {
fatalf("go get: cannot use -f flag without -u")
}
// Disable any prompting for passwords by Git.
// Only has an effect for 2.3.0 or later, but avoiding
// the prompt in earlier versions is just too hard.
// If user has explicitly set GIT_TERMINAL_PROMPT=1, keep
// prompting.
// See golang.org/issue/9341 and golang.org/issue/12706.
if os.Getenv("GIT_TERMINAL_PROMPT") == "" {
os.Setenv("GIT_TERMINAL_PROMPT", "0")
}
// Disable any ssh connection pooling by Git.
// If a Git subprocess forks a child into the background to cache a new connection,
// that child keeps stdout/stderr open. After the Git subprocess exits,
// os /exec expects to be able to read from the stdout/stderr pipe
// until EOF to get all the data that the Git subprocess wrote before exiting.
// The EOF doesn't come until the child exits too, because the child
// is holding the write end of the pipe.
// This is unfortunate, but it has come up at least twice
// (see golang.org/issue/13453 and golang.org/issue/16104)
// and confuses users when it does.
// If the user has explicitly set GIT_SSH or GIT_SSH_COMMAND,
// assume they know what they are doing and don't step on it.
// But default to turning off ControlMaster.
if os.Getenv("GIT_SSH") == "" && os.Getenv("GIT_SSH_COMMAND") == "" {
os.Setenv("GIT_SSH_COMMAND", "ssh -o ControlMaster=no")
}
// Phase 1. Download/update.
var stk importStack
mode := 0
if *getT {
mode |= getTestDeps
}
args = downloadPaths(args)
for _, arg := range args {
download(arg, nil, &stk, mode)
}
exitIfErrors()
// Phase 2. Rescan packages and re-evaluate args list.
// Code we downloaded and all code that depends on it
// needs to be evicted from the package cache so that
// the information will be recomputed. Instead of keeping
// track of the reverse dependency information, evict
// everything.
for name := range packageCache {
delete(packageCache, name)
}
// In order to rebuild packages information completely,
// we need to clear commands cache. Command packages are
// referring to evicted packages from the package cache.
// This leads to duplicated loads of the standard packages.
for name := range cmdCache {
delete(cmdCache, name)
}
args = importPaths(args)
packagesForBuild(args)
// Phase 3. Install.
if *getD {
// Download only.
// Check delayed until now so that importPaths
// and packagesForBuild have a chance to print errors.
return
}
installPackages(args, true)
}
// downloadPaths prepares the list of paths to pass to download.
// It expands ... patterns that can be expanded. If there is no match
// for a particular pattern, downloadPaths leaves it in the result list,
// in the hope that we can figure out the repository from the
// initial ...-free prefix.
func downloadPaths(args []string) []string {
args = importPathsNoDotExpansion(args)
var out []string
for _, a := range args {
if strings.Contains(a, "...") {
var expand []string
// Use matchPackagesInFS to avoid printing
// warnings. They will be printed by the
// eventual call to importPaths instead.
if build.IsLocalImport(a) {
expand = matchPackagesInFS(a)
} else {
expand = matchPackages(a)
}
if len(expand) > 0 {
out = append(out, expand...)
continue
}
}
out = append(out, a)
}
return out
}
// downloadCache records the import paths we have already
// considered during the download, to avoid duplicate work when
// there is more than one dependency sequence leading to
// a particular package.
var downloadCache = map[string]bool{}
// downloadRootCache records the version control repository
// root directories we have already considered during the download.
// For example, all the packages in the github.com/google/codesearch repo
// share the same root (the directory for that path), and we only need
// to run the hg commands to consider each repository once.
var downloadRootCache = map[string]bool{}
// download runs the download half of the get command
// for the package named by the argument.
func download(arg string, parent *Package, stk *importStack, mode int) {
if mode&useVendor != 0 {
// Caller is responsible for expanding vendor paths.
panic("internal error: download mode has useVendor set")
}
load := func(path string, mode int) *Package {
if parent == nil {
return loadPackage(path, stk)
}
return loadImport(path, parent.Dir, parent, stk, nil, mode)
}
p := load(arg, mode)
if p.Error != nil && p.Error.hard {
errorf("%s", p.Error)
return
}
// loadPackage inferred the canonical ImportPath from arg.
// Use that in the following to prevent hysteresis effects
// in e.g. downloadCache and packageCache.
// This allows invocations such as:
// mkdir -p $GOPATH/src/github.com/user
// cd $GOPATH/src/github.com/user
// go get ./foo
// see: golang.org/issue/9767
arg = p.ImportPath
// There's nothing to do if this is a package in the standard library.
if p.Standard {
return
}
// Only process each package once.
// (Unless we're fetching test dependencies for this package,
// in which case we want to process it again.)
if downloadCache[arg] && mode&getTestDeps == 0 {
return
}
downloadCache[arg] = true
pkgs := []*Package{p}
wildcardOkay := len(*stk) == 0
isWildcard := false
// Download if the package is missing, or update if we're using -u.
if p.Dir == "" || *getU {
// The actual download.
stk.push(arg)
err := downloadPackage(p)
if err != nil {
errorf("%s", &PackageError{ImportStack: stk.copy(), Err: err.Error()})
stk.pop()
return
}
stk.pop()
args := []string{arg}
// If the argument has a wildcard in it, re-evaluate the wildcard.
// We delay this until after reloadPackage so that the old entry
// for p has been replaced in the package cache.
if wildcardOkay && strings.Contains(arg, "...") {
if build.IsLocalImport(arg) {
args = matchPackagesInFS(arg)
} else {
args = matchPackages(arg)
}
isWildcard = true
}
// Clear all relevant package cache entries before
// doing any new loads.
for _, arg := range args {
p := packageCache[arg]
if p != nil {
delete(packageCache, p.Dir)
delete(packageCache, p.ImportPath)
}
}
pkgs = pkgs[:0]
for _, arg := range args {
// Note: load calls loadPackage or loadImport,
// which push arg onto stk already.
// Do not push here too, or else stk will say arg imports arg.
p := load(arg, mode)
if p.Error != nil {
errorf("%s", p.Error)
continue
}
pkgs = append(pkgs, p)
}
}
// Process package, which might now be multiple packages
// due to wildcard expansion.
for _, p := range pkgs {
if *getFix {
run(buildToolExec, stringList(tool("fix"), relPaths(p.allgofiles)))
// The imports might have changed, so reload again.
p = reloadPackage(arg, stk)
if p.Error != nil {
errorf("%s", p.Error)
return
}
}
if isWildcard {
// Report both the real package and the
// wildcard in any error message.
stk.push(p.ImportPath)
}
// Process dependencies, now that we know what they are.
imports := p.Imports
if mode&getTestDeps != 0 {
// Process test dependencies when -t is specified.
// (But don't get test dependencies for test dependencies:
// we always pass mode 0 to the recursive calls below.)
imports = stringList(imports, p.TestImports, p.XTestImports)
}
for i, path := range imports {
if path == "C" {
continue
}
// Fail fast on import naming full vendor path.
// Otherwise expand path as needed for test imports.
// Note that p.Imports can have additional entries beyond p.build.Imports.
orig := path
if i < len(p.build.Imports) {
orig = p.build.Imports[i]
}
if j, ok := findVendor(orig); ok {
stk.push(path)
err := &PackageError{
ImportStack: stk.copy(),
Err: "must be imported as " + path[j+len("vendor/"):],
}
stk.pop()
errorf("%s", err)
continue
}
// If this is a test import, apply vendor lookup now.
// We cannot pass useVendor to download, because
// download does caching based on the value of path,
// so it must be the fully qualified path already.
if i >= len(p.Imports) {
path = vendoredImportPath(p, path)
}
download(path, p, stk, 0)
}
if isWildcard {
stk.pop()
}
}
}
// downloadPackage runs the create or download command
// to make the first copy of or update a copy of the given package.
func downloadPackage(p *Package) error {
var (
vcs *vcsCmd
repo, rootPath string
err error
)
security := secure
if *getInsecure {
security = insecure
}
if p.build.SrcRoot != "" {
// Directory exists. Look for checkout along path to src.
vcs, rootPath, err = vcsFromDir(p.Dir, p.build.SrcRoot)
if err != nil {
return err
}
repo = "<local>" // should be unused; make distinctive
// Double-check where it came from.
if *getU && vcs.remoteRepo != nil {
dir := filepath.Join(p.build.SrcRoot, filepath.FromSlash(rootPath))
remote, err := vcs.remoteRepo(vcs, dir)
if err != nil {
return err
}
repo = remote
if !*getF {
if rr, err := repoRootForImportPath(p.ImportPath, security); err == nil {
repo := rr.repo
if rr.vcs.resolveRepo != nil {
resolved, err := rr.vcs.resolveRepo(rr.vcs, dir, repo)
if err == nil {
repo = resolved
}
}
if remote != repo && rr.isCustom {
return fmt.Errorf("%s is a custom import path for %s, but %s is checked out from %s", rr.root, repo, dir, remote)
}
}
}
}
} else {
// Analyze the import path to determine the version control system,
// repository, and the import path for the root of the repository.
rr, err := repoRootForImportPath(p.ImportPath, security)
if err != nil {
return err
}
vcs, repo, rootPath = rr.vcs, rr.repo, rr.root
}
if !vcs.isSecure(repo) && !*getInsecure {
return fmt.Errorf("cannot download, %v uses insecure protocol", repo)
}
if p.build.SrcRoot == "" {
// Package not found. Put in first directory of $GOPATH.
list := filepath.SplitList(buildContext.GOPATH)
if len(list) == 0 {
return fmt.Errorf("cannot download, $GOPATH not set. For more details see: 'go help gopath'")
}
// Guard against people setting GOPATH=$GOROOT.
if list[0] == goroot {
return fmt.Errorf("cannot download, $GOPATH must not be set to $GOROOT. For more details see: 'go help gopath'")
}
if _, err := os.Stat(filepath.Join(list[0], "src/cmd/go/alldocs.go")); err == nil {
return fmt.Errorf("cannot download, %s is a GOROOT, not a GOPATH. For more details see: 'go help gopath'", list[0])
}
p.build.Root = list[0]
p.build.SrcRoot = filepath.Join(list[0], "src")
p.build.PkgRoot = filepath.Join(list[0], "pkg")
}
root := filepath.Join(p.build.SrcRoot, filepath.FromSlash(rootPath))
// If we've considered this repository already, don't do it again.
if downloadRootCache[root] {
return nil
}
downloadRootCache[root] = true
if buildV {
fmt.Fprintf(os.Stderr, "%s (download)\n", rootPath)
}
// Check that this is an appropriate place for the repo to be checked out.
// The target directory must either not exist or have a repo checked out already.
meta := filepath.Join(root, "."+vcs.cmd)
st, err := os.Stat(meta)
if err == nil && !st.IsDir() {
return fmt.Errorf("%s exists but is not a directory", meta)
}
if err != nil {
// Metadata directory does not exist. Prepare to checkout new copy.
// Some version control tools require the target directory not to exist.
// We require that too, just to avoid stepping on existing work.
if _, err := os.Stat(root); err == nil {
return fmt.Errorf("%s exists but %s does not - stale checkout?", root, meta)
}
_, err := os.Stat(p.build.Root)
gopathExisted := err == nil
// Some version control tools require the parent of the target to exist.
parent, _ := filepath.Split(root)
if err = os.MkdirAll(parent, 0777); err != nil {
return err
}
if buildV && !gopathExisted && p.build.Root == buildContext.GOPATH {
fmt.Fprintf(os.Stderr, "created GOPATH=%s; see 'go help gopath'\n", p.build.Root)
}
if err = vcs.create(root, repo); err != nil {
return err
}
} else {
// Metadata directory does exist; download incremental updates.
if err = vcs.download(root); err != nil {
return err
}
}
if buildN {
// Do not show tag sync in -n; it's noise more than anything,
// and since we're not running commands, no tag will be found.
// But avoid printing nothing.
fmt.Fprintf(os.Stderr, "# cd %s; %s sync/update\n", root, vcs.cmd)
return nil
}
// Select and sync to appropriate version of the repository.
tags, err := vcs.tags(root)
if err != nil {
return err
}
vers := runtime.Version()
if i := strings.Index(vers, " "); i >= 0 {
vers = vers[:i]
}
if err := vcs.tagSync(root, selectTag(vers, tags)); err != nil {
return err
}
return nil
}
// goTag matches go release tags such as go1 and go1.2.3.
// The numbers involved must be small (at most 4 digits),
// have no unnecessary leading zeros, and the version cannot
// end in .0 - it is go1, not go1.0 or go1.0.0.
var goTag = regexp.MustCompile(
`^go((0|[1-9][0-9]{0,3})\.)*([1-9][0-9]{0,3})$`,
)
// selectTag returns the closest matching tag for a given version.
// Closest means the latest one that is not after the current release.
// Version "goX" (or "goX.Y" or "goX.Y.Z") matches tags of the same form.
// Version "release.rN" matches tags of the form "go.rN" (N being a floating-point number).
// Version "weekly.YYYY-MM-DD" matches tags like "go.weekly.YYYY-MM-DD".
//
// NOTE(rsc): Eventually we will need to decide on some logic here.
// For now, there is only "go1". This matches the docs in go help get.
func selectTag(goVersion string, tags []string) (match string) {
for _, t := range tags {
if t == "go1" {
return "go1"
}
}
return ""
/*
if goTag.MatchString(goVersion) {
v := goVersion
for _, t := range tags {
if !goTag.MatchString(t) {
continue
}
if cmpGoVersion(match, t) < 0 && cmpGoVersion(t, v) <= 0 {
match = t
}
}
}
return match
*/
}
// cmpGoVersion returns -1, 0, +1 reporting whether
// x < y, x == y, or x > y.
func cmpGoVersion(x, y string) int {
// Malformed strings compare less than well-formed strings.
if !goTag.MatchString(x) {
return -1
}
if !goTag.MatchString(y) {
return +1
}
// Compare numbers in sequence.
xx := strings.Split(x[len("go"):], ".")
yy := strings.Split(y[len("go"):], ".")
for i := 0; i < len(xx) && i < len(yy); i++ {
// The Atoi are guaranteed to succeed
// because the versions match goTag.
xi, _ := strconv.Atoi(xx[i])
yi, _ := strconv.Atoi(yy[i])
if xi < yi {
return -1
} else if xi > yi {
return +1
}
}
if len(xx) < len(yy) {
return -1
}
if len(xx) > len(yy) {
return +1
}
return 0
}
| [
"\"GIT_TERMINAL_PROMPT\"",
"\"GIT_SSH\"",
"\"GIT_SSH_COMMAND\""
]
| []
| [
"GIT_SSH",
"GIT_SSH_COMMAND",
"GIT_TERMINAL_PROMPT"
]
| [] | ["GIT_SSH", "GIT_SSH_COMMAND", "GIT_TERMINAL_PROMPT"] | go | 3 | 0 | |
transform.go | package main
import (
"log"
"time"
)
var utc, seattle *time.Location
func init() {
utc, _ = time.LoadLocation("UTC")
seattle, _ = time.LoadLocation("America/Los_Angeles")
}
func Event2Record(de *DeviceEvent) []*SensorRecord {
t, err := time.ParseInLocation(Props.DateLayout, de.Timestamp, utc)
if err != nil {
log.Printf("ERROR: timestamp not valid <%v>\n", err)
return nil
}
if t.Year() == 2000 {
// timestamp initial value is 2000-2-1 00:00:00
log.Println("WARN: GPS signal is disconnected")
// return nil
}
t = t.In(seattle) // the timestamp from the device (unstable)
now := time.Now().In(seattle) // post-generated
rs := make([]*SensorRecord, len(de.Packets))
for idx, sp := range de.Packets {
rs[idx] = &SensorRecord{
ReportTime: now,
SensorName: sp.Name,
SensorValue: sp.Value,
}
}
return rs
}
func Filter(param string) (time.Time, error) {
return time.ParseInLocation(time.RFC3339, param, seattle)
}
| []
| []
| []
| [] | [] | go | null | null | null |
test/conftest.py | # coding=utf-8
"""
Pytest config file
"""
import os
import sys
from pathlib import Path
import pytest
# from esst import core
def pytest_configure(config):
"""
Runs at tests startup
Args:
config: pytest config args
"""
print('pytest args: ', config.args)
os.environ['DCS_PATH'] = 'test'
os.environ['DCS_SERVER_NAME'] = 'test'
os.environ['DCS_SERVER_PASSWORD'] = 'test'
os.environ['DISCORD_BOT_NAME'] = 'test'
os.environ['DISCORD_CHANNEL'] = 'test'
os.environ['DISCORD_TOKEN'] = 'test'
sys._called_from_test = True
# noinspection SpellCheckingInspection
def pytest_unconfigure(config):
"""Tear down"""
print('pytest args: ', config.args)
# noinspection PyUnresolvedReferences,PyProtectedMember
del sys._called_from_test
# @pytest.fixture(autouse=True)
# def _reset_fs():
# core.FS._reset()
# yield
# core.FS._reset()
@pytest.fixture(autouse=True)
def _dummy_config():
Path('./esst_test.yml').write_text("""
dcs_path: './DCS'
dcs_server_name: 'server_name'
dcs_server_password: 'server_pwd'
discord_bot_name: 'bot_name'
discord_channel: 'channel'
discord_token: 'token'
""")
yield
@pytest.fixture(autouse=True)
def cleandir(request, tmpdir):
"""
Creates a clean directory and cd into it for the duration of the test
Args:
request: Pytest request object
tmpdir: Pytest tmpdir fixture
"""
# from esst.core import FS
# FS.saved_games_path = Path(str(tmpdir), 'Saved Games').absolute()
# FS.ur_install_path = Path(str(tmpdir), 'UniversRadio').absolute()
if 'nocleandir' in request.keywords:
yield
else:
current_dir = os.getcwd()
os.chdir(str(tmpdir))
yield os.getcwd()
os.chdir(current_dir)
def pytest_addoption(parser):
"""Adds options to Pytest command line"""
parser.addoption("--long", action="store_true",
help="run long tests")
def pytest_runtest_setup(item):
"""Test suite setup"""
# Skip tests that are marked with the "long" marker
long_marker = item.get_marker("long")
if long_marker is not None and not item.config.getoption('long'):
pytest.skip('skipping long tests')
| []
| []
| [
"DCS_PATH",
"DISCORD_TOKEN",
"DISCORD_CHANNEL",
"DCS_SERVER_PASSWORD",
"DISCORD_BOT_NAME",
"DCS_SERVER_NAME"
]
| [] | ["DCS_PATH", "DISCORD_TOKEN", "DISCORD_CHANNEL", "DCS_SERVER_PASSWORD", "DISCORD_BOT_NAME", "DCS_SERVER_NAME"] | python | 6 | 0 | |
phnorm/main.go | package main
import (
"fmt"
"os"
"regexp"
"github.com/joho/godotenv"
_ "github.com/lib/pq"
"github.com/santosh/gophercises/phnorm/db"
)
func init() {
must(godotenv.Load())
}
const (
host = "localhost"
port = 5432
username = "postgres"
dbname = "gophercises_phone"
)
func main() {
password := os.Getenv("PASSWORD")
psqlInfo := fmt.Sprintf("host=%s port=%d user=%s password=%s sslmode=disable", host, port, username, password)
must(db.Reset("postgres", psqlInfo, dbname))
psqlInfo = fmt.Sprintf("%s dbname=%s", psqlInfo, dbname)
must(db.Migrate("postgres", psqlInfo))
db, err := db.Open("postgres", psqlInfo)
must(err)
defer db.Close()
err = db.Seed()
must(err)
phones, err := db.AllPhones()
must(err)
for _, p := range phones {
fmt.Printf("Working on... %+v\n", p)
number := normalize(p.Number)
if number != p.Number {
fmt.Println("Updating or removing...", number)
existing, err := db.FindPhone(number)
must(err)
if existing != nil {
must(db.DeletePhone(p.ID))
} else {
p.Number = number
must(db.UpdatePhone(&p))
}
} else {
fmt.Println("No changes required")
}
}
}
func must(err error) {
if err != nil {
panic(err)
}
}
func normalize(phone string) string {
re := regexp.MustCompile("\\D")
return re.ReplaceAllString(phone, "")
}
// func normalize(phone string) string {
// var buf bytes.Buffer
// for _, ch := range phone {
// if ch >= '0' && ch <= '9' {
// buf.WriteRune(ch)
// }
// }
// return buf.String()
// }
| [
"\"PASSWORD\""
]
| []
| [
"PASSWORD"
]
| [] | ["PASSWORD"] | go | 1 | 0 | |
python/swaps/single_swap.py | from web3 import Web3
import eth_abi
import os
import json
from decimal import *
import webbrowser
# Load private key and connect to RPC endpoint
rpc_endpoint = os.environ.get("RPC_ENDPOINT")
private_key = os.environ.get("KEY_PRIVATE")
if rpc_endpoint is None or private_key is None or private_key == "":
print("\n[ERROR] You must set environment variables for RPC_ENDPOINT and KEY_PRIVATE\n")
quit()
web3 = Web3(Web3.HTTPProvider(rpc_endpoint))
account = web3.eth.account.privateKeyToAccount(private_key)
address = account.address
# Define network settings
network = "kovan"
block_explorer_url = "https://kovan.etherscan.io/"
chain_id = 42
gas_price = 2
# Load contract for Balancer Vault
address_vault = "0xBA12222222228d8Ba445958a75a0704d566BF2C8"
path_abi_vault = '../../abis/Vault.json'
with open(path_abi_vault) as f:
abi_vault = json.load(f)
contract_vault = web3.eth.contract(
address=web3.toChecksumAddress(address_vault),
abi=abi_vault
)
# Where are the tokens coming from/going to?
fund_settings = {
"sender": address,
"recipient": address,
"fromInternalBalance": False,
"toInternalBalance": False
}
# When should the transaction timeout?
deadline = 999999999999999999
# Pool IDs
pool_BAL_WETH = "0x61d5dc44849c9c87b0856a2a311536205c96c7fd000200000000000000000000"
# Token addresses
token_BAL = "0x41286Bb1D3E870f3F750eB7E1C25d7E48c8A1Ac7".lower()
token_WETH = "0xdFCeA9088c8A88A76FF74892C1457C17dfeef9C1".lower()
# Token data
token_data = {
token_BAL:{
"symbol":"BAL",
"decimals":"18",
"limit":"0"
},
token_WETH:{
"symbol":"WETH",
"decimals":"18",
"limit":"1"
}
}
swap = {
"poolId":pool_BAL_WETH,
"assetIn":token_WETH,
"assetOut":token_BAL,
"amount":"1"
}
# SwapKind is an Enum. This example handles a GIVEN_IN swap.
# https://github.com/balancer-labs/balancer-v2-monorepo/blob/0328ed575c1b36fb0ad61ab8ce848083543070b9/pkg/vault/contracts/interfaces/IVault.sol#L497
swap_kind = 0 #0 = GIVEN_IN, 1 = GIVEN_OUT
user_data_encoded = eth_abi.encode_abi(['uint256'], [0])
swap_struct = (
swap["poolId"],
swap_kind,
web3.toChecksumAddress(swap["assetIn"]),
web3.toChecksumAddress(swap["assetOut"]),
int(Decimal(swap["amount"]) * 10 ** Decimal((token_data[swap["assetIn"]]["decimals"]))),
user_data_encoded
)
fund_struct = (
web3.toChecksumAddress(fund_settings["sender"]),
fund_settings["fromInternalBalance"],
web3.toChecksumAddress(fund_settings["recipient"]),
fund_settings["toInternalBalance"]
)
token_limit = int(Decimal(token_data[swap["assetIn"]]["limit"]) * 10 ** Decimal(token_data[swap["assetIn"]]["decimals"]))
single_swap_function = contract_vault.functions.swap(
swap_struct,
fund_struct,
token_limit,
deadline
)
try:
gas_estimate = single_swap_function.estimateGas()
except:
gas_estimate = 100000
print("Failed to estimate gas, attempting to send with", gas_estimate, "gas limit...")
data = single_swap_function.buildTransaction(
{
'chainId': chain_id,
'gas': gas_estimate,
'gasPrice': web3.toWei(gas_price, 'gwei'),
'nonce': web3.eth.get_transaction_count(address),
}
)
signed_tx = web3.eth.account.sign_transaction(data, private_key)
tx_hash = web3.eth.send_raw_transaction(signed_tx.rawTransaction).hex()
print("Sending transaction...")
url = block_explorer_url + "tx/" + tx_hash
webbrowser.open_new_tab(url)
| []
| []
| [
"KEY_PRIVATE",
"RPC_ENDPOINT"
]
| [] | ["KEY_PRIVATE", "RPC_ENDPOINT"] | python | 2 | 0 | |
main.py | import os
from flask import Flask, jsonify, request
app = Flask(__name__)
from EntityParser import EntityParser
nlp = EntityParser()
@app.route('/')
def homepage():
return 'Entity Extraction Service'
@app.route('/private/entityExtraction')
def extract_entities():
text = request.args.get('text')
lang = request.args.get('lang')
entities = nlp.extract_entities(text, lang)
return jsonify(entities)
def main():
app.run(host='0.0.0.0', port=int(os.getenv('PORT', default=3001)))
# app.run(debug=True)
if __name__ == '__main__':
main()
| []
| []
| [
"PORT"
]
| [] | ["PORT"] | python | 1 | 0 | |
examples/getOrder.go | package main
import (
"context"
"github.com/JustinGaoF/go-tdameritrade/tdameritrade"
"golang.org/x/oauth2"
"log"
"os"
"time"
)
func main() {
//pass an http client with auth
token := os.Getenv("TDAMERITRADE_CLIENT_ID")
if token == "" {
log.Fatal("Unauthorized: No token present")
}
refreshToken := os.Getenv("TDAMERITRADE_REFRESH_TOKEN")
if refreshToken == "" {
log.Fatal("Unauthorized: No refresh token present")
}
conf := oauth2.Config{
ClientID: token,
Endpoint: oauth2.Endpoint{
TokenURL: "https://api.tdameritrade.com/v1/oauth2/token",
},
RedirectURL: "https://localhost",
}
tkn := &oauth2.Token{
RefreshToken: refreshToken,
}
ctx := context.Background()
tc := conf.Client(ctx, tkn)
c, err := tdameritrade.NewClient(tc)
if err != nil {
log.Fatal(err)
}
accounts, _, err := c.Account.GetAccounts(ctx, nil)
if err != nil {
log.Fatal(err)
}
accountId := (*accounts)[0].AccountID
orderParam := tdameritrade.OrderParams{
MaxResults: 5,
From: time.Now().Add(-24 * time.Hour),
To: time.Time{},
Status: "",
}
orders, _, err := c.Account.GetOrderByPath(ctx, string(accountId), &orderParam)
if err != nil {
log.Fatal(err)
}
size := len(*orders)
log.Println("get orders count", size)
}
| [
"\"TDAMERITRADE_CLIENT_ID\"",
"\"TDAMERITRADE_REFRESH_TOKEN\""
]
| []
| [
"TDAMERITRADE_REFRESH_TOKEN",
"TDAMERITRADE_CLIENT_ID"
]
| [] | ["TDAMERITRADE_REFRESH_TOKEN", "TDAMERITRADE_CLIENT_ID"] | go | 2 | 0 | |
cli/command/cli.go | package command
import (
"context"
"io"
"net"
"net/http"
"os"
"path/filepath"
"runtime"
"time"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/config"
cliconfig "github.com/docker/cli/cli/config"
"github.com/docker/cli/cli/config/configfile"
cliflags "github.com/docker/cli/cli/flags"
manifeststore "github.com/docker/cli/cli/manifest/store"
registryclient "github.com/docker/cli/cli/registry/client"
"github.com/docker/cli/cli/trust"
dopts "github.com/docker/cli/opts"
"github.com/docker/docker/api"
"github.com/docker/docker/api/types"
registrytypes "github.com/docker/docker/api/types/registry"
"github.com/docker/docker/client"
"github.com/docker/go-connections/tlsconfig"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/theupdateframework/notary"
notaryclient "github.com/theupdateframework/notary/client"
"github.com/theupdateframework/notary/passphrase"
)
// Streams is an interface which exposes the standard input and output streams
type Streams interface {
In() *InStream
Out() *OutStream
Err() io.Writer
}
// Cli represents the docker command line client.
type Cli interface {
Client() client.APIClient
Out() *OutStream
Err() io.Writer
In() *InStream
SetIn(in *InStream)
ConfigFile() *configfile.ConfigFile
ServerInfo() ServerInfo
ClientInfo() ClientInfo
NotaryClient(imgRefAndAuth trust.ImageRefAndAuth, actions []string) (notaryclient.Repository, error)
DefaultVersion() string
ManifestStore() manifeststore.Store
RegistryClient(bool) registryclient.RegistryClient
ContentTrustEnabled() bool
}
// DockerCli is an instance the docker command line client.
// Instances of the client can be returned from NewDockerCli.
type DockerCli struct {
configFile *configfile.ConfigFile
in *InStream
out *OutStream
err io.Writer
client client.APIClient
serverInfo ServerInfo
clientInfo ClientInfo
contentTrust bool
}
// DefaultVersion returns api.defaultVersion or DOCKER_API_VERSION if specified.
func (cli *DockerCli) DefaultVersion() string {
return cli.clientInfo.DefaultVersion
}
// Client returns the APIClient
func (cli *DockerCli) Client() client.APIClient {
return cli.client
}
// Out returns the writer used for stdout
func (cli *DockerCli) Out() *OutStream {
return cli.out
}
// Err returns the writer used for stderr
func (cli *DockerCli) Err() io.Writer {
return cli.err
}
// SetIn sets the reader used for stdin
func (cli *DockerCli) SetIn(in *InStream) {
cli.in = in
}
// In returns the reader used for stdin
func (cli *DockerCli) In() *InStream {
return cli.in
}
// ShowHelp shows the command help.
func ShowHelp(err io.Writer) func(*cobra.Command, []string) error {
return func(cmd *cobra.Command, args []string) error {
cmd.SetOutput(err)
cmd.HelpFunc()(cmd, args)
return nil
}
}
// ConfigFile returns the ConfigFile
func (cli *DockerCli) ConfigFile() *configfile.ConfigFile {
return cli.configFile
}
// ServerInfo returns the server version details for the host this client is
// connected to
func (cli *DockerCli) ServerInfo() ServerInfo {
return cli.serverInfo
}
// ClientInfo returns the client details for the cli
func (cli *DockerCli) ClientInfo() ClientInfo {
return cli.clientInfo
}
// ContentTrustEnabled returns whether content trust has been enabled by an
// environment variable.
func (cli *DockerCli) ContentTrustEnabled() bool {
return cli.contentTrust
}
// ManifestStore returns a store for local manifests
func (cli *DockerCli) ManifestStore() manifeststore.Store {
// TODO: support override default location from config file
return manifeststore.NewStore(filepath.Join(config.Dir(), "manifests"))
}
// RegistryClient returns a client for communicating with a Docker distribution
// registry
func (cli *DockerCli) RegistryClient(allowInsecure bool) registryclient.RegistryClient {
resolver := func(ctx context.Context, index *registrytypes.IndexInfo) types.AuthConfig {
return ResolveAuthConfig(ctx, cli, index)
}
return registryclient.NewRegistryClient(resolver, UserAgent(), allowInsecure)
}
// Initialize the dockerCli runs initialization that must happen after command
// line flags are parsed.
func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions) error {
cli.configFile = cliconfig.LoadDefaultConfigFile(cli.err)
var err error
cli.client, err = NewAPIClientFromFlags(opts.Common, cli.configFile)
if tlsconfig.IsErrEncryptedKey(err) {
passRetriever := passphrase.PromptRetrieverWithInOut(cli.In(), cli.Out(), nil)
newClient := func(password string) (client.APIClient, error) {
opts.Common.TLSOptions.Passphrase = password
return NewAPIClientFromFlags(opts.Common, cli.configFile)
}
cli.client, err = getClientWithPassword(passRetriever, newClient)
}
if err != nil {
return err
}
hasExperimental, err := isEnabled(cli.configFile.Experimental)
if err != nil {
return errors.Wrap(err, "Experimental field")
}
orchestrator, err := GetOrchestrator(opts.Common.Orchestrator, cli.configFile.Orchestrator)
if err != nil {
return err
}
cli.clientInfo = ClientInfo{
DefaultVersion: cli.client.ClientVersion(),
HasExperimental: hasExperimental,
Orchestrator: orchestrator,
}
cli.initializeFromClient()
return nil
}
func isEnabled(value string) (bool, error) {
switch value {
case "enabled":
return true, nil
case "", "disabled":
return false, nil
default:
return false, errors.Errorf("%q is not valid, should be either enabled or disabled", value)
}
}
func (cli *DockerCli) initializeFromClient() {
ping, err := cli.client.Ping(context.Background())
if err != nil {
// Default to true if we fail to connect to daemon
cli.serverInfo = ServerInfo{HasExperimental: true}
if ping.APIVersion != "" {
cli.client.NegotiateAPIVersionPing(ping)
}
return
}
cli.serverInfo = ServerInfo{
HasExperimental: ping.Experimental,
OSType: ping.OSType,
}
cli.client.NegotiateAPIVersionPing(ping)
}
func getClientWithPassword(passRetriever notary.PassRetriever, newClient func(password string) (client.APIClient, error)) (client.APIClient, error) {
for attempts := 0; ; attempts++ {
passwd, giveup, err := passRetriever("private", "encrypted TLS private", false, attempts)
if giveup || err != nil {
return nil, errors.Wrap(err, "private key is encrypted, but could not get passphrase")
}
apiclient, err := newClient(passwd)
if !tlsconfig.IsErrEncryptedKey(err) {
return apiclient, err
}
}
}
// NotaryClient provides a Notary Repository to interact with signed metadata for an image
func (cli *DockerCli) NotaryClient(imgRefAndAuth trust.ImageRefAndAuth, actions []string) (notaryclient.Repository, error) {
return trust.GetNotaryRepository(cli.In(), cli.Out(), UserAgent(), imgRefAndAuth.RepoInfo(), imgRefAndAuth.AuthConfig(), actions...)
}
// ServerInfo stores details about the supported features and platform of the
// server
type ServerInfo struct {
HasExperimental bool
OSType string
}
// ClientInfo stores details about the supported features of the client
type ClientInfo struct {
HasExperimental bool
DefaultVersion string
Orchestrator Orchestrator
}
// HasKubernetes checks if kubernetes orchestrator is enabled
func (c ClientInfo) HasKubernetes() bool {
return c.Orchestrator == OrchestratorKubernetes || c.Orchestrator == OrchestratorAll
}
// HasSwarm checks if swarm orchestrator is enabled
func (c ClientInfo) HasSwarm() bool {
return c.Orchestrator == OrchestratorSwarm || c.Orchestrator == OrchestratorAll
}
// HasAll checks if all orchestrator is enabled
func (c ClientInfo) HasAll() bool {
return c.Orchestrator == OrchestratorAll
}
// NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err.
func NewDockerCli(in io.ReadCloser, out, err io.Writer, isTrusted bool) *DockerCli {
return &DockerCli{in: NewInStream(in), out: NewOutStream(out), err: err, contentTrust: isTrusted}
}
// NewAPIClientFromFlags creates a new APIClient from command line flags
func NewAPIClientFromFlags(opts *cliflags.CommonOptions, configFile *configfile.ConfigFile) (client.APIClient, error) {
host, err := getServerHost(opts.Hosts, opts.TLSOptions)
if err != nil {
return &client.Client{}, err
}
customHeaders := configFile.HTTPHeaders
if customHeaders == nil {
customHeaders = map[string]string{}
}
customHeaders["User-Agent"] = UserAgent()
verStr := api.DefaultVersion
if tmpStr := os.Getenv("DOCKER_API_VERSION"); tmpStr != "" {
verStr = tmpStr
}
return client.NewClientWithOpts(
withHTTPClient(opts.TLSOptions),
client.WithHTTPHeaders(customHeaders),
client.WithVersion(verStr),
client.WithHost(host),
)
}
func getServerHost(hosts []string, tlsOptions *tlsconfig.Options) (string, error) {
var host string
switch len(hosts) {
case 0:
host = os.Getenv("DOCKER_HOST")
case 1:
host = hosts[0]
default:
return "", errors.New("Please specify only one -H")
}
return dopts.ParseHost(tlsOptions != nil, host)
}
func withHTTPClient(tlsOpts *tlsconfig.Options) func(*client.Client) error {
return func(c *client.Client) error {
if tlsOpts == nil {
// Use the default HTTPClient
return nil
}
opts := *tlsOpts
opts.ExclusiveRootPools = true
tlsConfig, err := tlsconfig.Client(opts)
if err != nil {
return err
}
httpClient := &http.Client{
Transport: &http.Transport{
TLSClientConfig: tlsConfig,
DialContext: (&net.Dialer{
KeepAlive: 30 * time.Second,
Timeout: 30 * time.Second,
}).DialContext,
},
CheckRedirect: client.CheckRedirect,
}
return client.WithHTTPClient(httpClient)(c)
}
}
// UserAgent returns the user agent string used for making API requests
func UserAgent() string {
return "Docker-Client/" + cli.Version + " (" + runtime.GOOS + ")"
}
| [
"\"DOCKER_API_VERSION\"",
"\"DOCKER_HOST\""
]
| []
| [
"DOCKER_HOST",
"DOCKER_API_VERSION"
]
| [] | ["DOCKER_HOST", "DOCKER_API_VERSION"] | go | 2 | 0 | |
config.py | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config():
SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess'
MONGO_URI = os.environ.get('MONGO_URI', os.environ.get('PRITUNL_MONGODB_URI'))
| []
| []
| [
"MONGO_URI",
"SECRET_KEY",
"PRITUNL_MONGODB_URI"
]
| [] | ["MONGO_URI", "SECRET_KEY", "PRITUNL_MONGODB_URI"] | python | 3 | 0 | |
sosw/components/test/unit/test_sns.py | import boto3
import json
import logging
import shutil
import unittest
import uuid
import os
import csv
from collections import defaultdict
from unittest.mock import MagicMock
from sosw.components.sns import *
logging.getLogger('botocore').setLevel(logging.WARNING)
os.environ["STAGE"] = "test"
os.environ["autotest"] = "True"
class sns_TestCase(unittest.TestCase):
def clean_queue(self):
setattr(self.sns, 'queue', [])
def setUp(self):
self.sns = SnsManager(test=True, subject='Autotest SNS Subject')
self.sns.commit = MagicMock(side_effect=self.clean_queue)
def tearDown(self):
pass
def test_init__reads_config(self):
sns = SnsManager(config={'subject': 'subj', 'recepient': 'arn::some_topic'})
self.assertEqual(sns.recipient, 'arn:aws:sns:us-west-2:000000000000:autotest_topic',
"The Topic must be automatically reset for test")
self.assertEqual(sns.subject, 'subj', "Subject was not set during __init__ from config.")
def test_queue_message(self):
self.sns.send_message("test message")
self.assertEqual(len(self.sns.queue), 1, "Default send_message() did not queue the message.")
def test_queue_message_with_subject(self):
self.sns.send_message("test message", subject="New Subject")
self.assertEqual(len(self.sns.queue), 1, "send_message() with custom subject did not queue.")
def test_commit_queue(self):
self.sns.send_message("test message")
self.sns.commit()
self.assertEqual(len(self.sns.queue), 0, f"Commit did not clean queue")
self.sns.commit.assert_called_once()
def test_commit_on_change_subject(self):
self.sns.send_message("test message")
self.sns.set_subject("New Subject")
self.assertEqual(len(self.sns.queue), 0, "On change subject the queue should be committed.")
def test_no_commit_on_change_subject_if_subject_is_same(self):
self.sns.send_message("test message")
self.sns.set_subject("Autotest SNS Subject")
self.assertEqual(len(self.sns.queue), 1, "On change subject the queue should be committed.")
def test_no_commit_on_same_subject(self):
self.sns.send_message("test message")
self.sns.send_message("test message", subject="Autotest SNS Subject")
self.assertEqual(len(self.sns.queue), 2, "On sending message with exactly same subject, it should be queued.")
def test_commit_and_queue_on_change_subject(self):
self.sns.send_message("test message")
self.assertEqual(len(self.sns.queue), 1)
self.sns.send_message("test message", subject="New Subject")
self.assertEqual(len(self.sns.queue), 1, "On change subject, old message should be committed, new one queued.")
def test_commit_auto_on_change_recipient(self):
self.sns.send_message("test message")
self.assertEqual(len(self.sns.queue), 1, f"Initial send_message() did not queue the message")
self.sns.set_recipient('arn:aws:sns:new_recipient')
self.assertEqual(len(self.sns.queue), 0)
def test_no_commit_on_change_recipient_if_recipient_is_same(self):
self.sns.send_message("test message")
self.assertEqual(len(self.sns.queue), 1, f"Initial send_message() did not queue the message")
self.sns.set_recipient('arn:aws:sns:us-west-2:000000000000:autotest_topic')
self.assertEqual(len(self.sns.queue), 1)
def test_validate_recipient(self):
"""
Must be a string with ARN of SNS Topic. Validator just checks that string starts with 'arn:aws:'
"""
self.assertRaises(AssertionError, self.sns.set_recipient, 'just_new_recipient_not_full_arn')
def test_create_topic_invalid_name(self):
with self.assertRaises(RuntimeError) as exc:
self.sns.create_topic('')
self.assertEqual(str(exc.exception), "You passed invalid topic name")
def test_create_topic_return_value(self):
self.sns.client = MagicMock()
self.sns.client.create_topic = MagicMock(return_value={'TopicArn': 'test_arn'})
self.assertEqual(self.sns.create_topic('topic_name'), 'test_arn')
def test_create_subscription_invalid_params(self):
with self.assertRaises(RuntimeError) as exc:
self.sns.create_subscription('', 'protocol', 'endpoint')
self.assertEqual(str(exc.exception), "You must send valid topic ARN, Protocol and Endpoint to add a subscription")
def test_get_message_attribute_validate_output(self):
self.assertEqual(self.sns.get_message_attribute(10), {'DataType': 'Number', 'StringValue': '10'})
self.assertEqual(self.sns.get_message_attribute(10.99), {'DataType': 'Number', 'StringValue': '10.99'})
self.assertEqual(self.sns.get_message_attribute('Test'), {'DataType': 'String', 'StringValue': 'Test'})
self.assertEqual(
self.sns.get_message_attribute(['Test1', 'Test2', 'Test3']),
{'DataType': 'String.Array', 'StringValue': json.dumps(['Test1', 'Test2', 'Test3'])}
)
def test_commit_on_change_message_attributes(self):
self.sns.send_message("test message")
self.assertEqual(len(self.sns.queue), 1, "There is 1 message in the queue.")
self.sns.send_message("test message", message_attributes={'price': 100})
self.assertEqual(len(self.sns.queue), 1, "On change message_attributes, old message should be committed, "
"new one queued.")
self.sns.send_message("test message", message_attributes={'price': 100})
self.assertEqual(len(self.sns.queue), 2, "On sending message with exactly same message_attributes, it should "
"be queued.")
self.sns.send_message("test message", message_attributes={'price': 100, 'cancellation': True})
self.assertEqual(len(self.sns.queue), 1, "On sending message with different message_attributes, old messages "
"should be committed. New one should be queued.")
if __name__ == '__main__':
unittest.main()
| []
| []
| [
"autotest",
"STAGE"
]
| [] | ["autotest", "STAGE"] | python | 2 | 0 | |
go/src/github.com/hashicorp/terraform/builtin/providers/github/resource_github_team_membership_test.go | package github
import (
"fmt"
"testing"
"github.com/google/go-github/github"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"os"
)
func TestAccGithubTeamMembership_basic(t *testing.T) {
var membership github.Membership
testUser := os.Getenv("GITHUB_TEST_USER")
testAccGithubTeamMembershipConfig := fmt.Sprintf(`
resource "github_membership" "test_org_membership" {
username = "%s"
role = "member"
}
resource "github_team" "test_team" {
name = "foo"
description = "Terraform acc test group"
}
resource "github_team_membership" "test_team_membership" {
team_id = "${github_team.test_team.id}"
username = "%s"
role = "member"
}
`, testUser, testUser)
testAccGithubTeamMembershipUpdateConfig := fmt.Sprintf(`
resource "github_membership" "test_org_membership" {
username = "%s"
role = "member"
}
resource "github_team" "test_team" {
name = "foo"
description = "Terraform acc test group"
}
resource "github_team_membership" "test_team_membership" {
team_id = "${github_team.test_team.id}"
username = "%s"
role = "maintainer"
}
`, testUser, testUser)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckGithubTeamMembershipDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccGithubTeamMembershipConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckGithubTeamMembershipExists("github_team_membership.test_team_membership", &membership),
testAccCheckGithubTeamMembershipRoleState("github_team_membership.test_team_membership", "member", &membership),
),
},
resource.TestStep{
Config: testAccGithubTeamMembershipUpdateConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckGithubTeamMembershipExists("github_team_membership.test_team_membership", &membership),
testAccCheckGithubTeamMembershipRoleState("github_team_membership.test_team_membership", "maintainer", &membership),
),
},
},
})
}
func testAccCheckGithubTeamMembershipDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*Organization).client
for _, rs := range s.RootModule().Resources {
if rs.Type != "github_team_membership" {
continue
}
t, u := parseTwoPartID(rs.Primary.ID)
membership, resp, err := conn.Organizations.GetTeamMembership(toGithubID(t), u)
if err == nil {
if membership != nil {
return fmt.Errorf("Team membership still exists")
}
}
if resp.StatusCode != 404 {
return err
}
return nil
}
return nil
}
func testAccCheckGithubTeamMembershipExists(n string, membership *github.Membership) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not Found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No team membership ID is set")
}
conn := testAccProvider.Meta().(*Organization).client
t, u := parseTwoPartID(rs.Primary.ID)
teamMembership, _, err := conn.Organizations.GetTeamMembership(toGithubID(t), u)
if err != nil {
return err
}
*membership = *teamMembership
return nil
}
}
func testAccCheckGithubTeamMembershipRoleState(n, expected string, membership *github.Membership) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not Found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No team membership ID is set")
}
conn := testAccProvider.Meta().(*Organization).client
t, u := parseTwoPartID(rs.Primary.ID)
teamMembership, _, err := conn.Organizations.GetTeamMembership(toGithubID(t), u)
if err != nil {
return err
}
resourceRole := membership.Role
actualRole := teamMembership.Role
if *resourceRole != expected {
return fmt.Errorf("Team membership role %v in resource does match expected state of %v", *resourceRole, expected)
}
if *resourceRole != *actualRole {
return fmt.Errorf("Team membership role %v in resource does match actual state of %v", *resourceRole, *actualRole)
}
return nil
}
}
| [
"\"GITHUB_TEST_USER\""
]
| []
| [
"GITHUB_TEST_USER"
]
| [] | ["GITHUB_TEST_USER"] | go | 1 | 0 | |
src/pal/automation/util.py | import sys
import getopt
import os
import subprocess
import shutil
import logging as log
def Initialize(platform):
print "Initializing Workspace"
global workspace
workspace = os.environ['WORKSPACE']
if platform == "windows":
# Jenkins puts quotes in the path, which is wrong. Remove quotes.
os.environ['PATH'] = os.environ['PATH'].replace('"','')
return workspace
def ParseArgs(argv):
print "Parsing arguments for compile"
try:
opts, args = getopt.getopt(argv, "t:p:a:v", ["target=", "platform=", "arch=", "verbose","noclean"])
except getopt.GetoptError:
print "ERROR: \n\t usage: python compile.py --target <target> --platform <windows|linux> --arch <arch> [--verbose] [--noclean]"
return 2,"","","",True
verbose = False
cleanUp = True
acceptedPlatforms = ['windows','linux']
for opt, arg in opts:
if opt in ("-t", "--target"):
target = arg
elif opt in ("-p", "--platform"):
if arg.lower() not in acceptedPlatforms:
print "ERROR: " + arg + "not an accepted platform. Use windows or linux."
sys.exit(2)
platform = arg.lower()
elif opt in ("-a", "--arch"):
arch = arg
elif opt in ("-v", "--verbose"):
verbose = True
elif opt in ("-c", "--noclean"):
cleanUp = False
if verbose:
log.basicConfig(format="%(levelname)s: %(message)s", level=log.DEBUG)
log.info("In verbose mode.")
else:
log.basicConfig(format="%(levelname)s: %(message)s")
if target == "" or platform == "" or arch == "":
# must specify target, project and arch
log.error("Must specify target, project and arch")
return 2,"","","",True
return 0,target,platform,arch,cleanUp
def SetupDirectories(target, arch, platform):
log.info("Setting up directories")
global rootdir
global builddir
global fullBuildDirPath
rootdir = "build"
if not os.path.isdir(rootdir):
os.mkdir(rootdir)
os.chdir(rootdir)
builddir = "build-" + platform
if platform == "windows":
builddir = builddir + "-" + arch + "-" + target
if os.path.isdir(builddir):
shutil.rmtree(builddir)
os.mkdir(builddir)
os.chdir(builddir)
fullbuilddirpath = workspace + "/" + rootdir + "/" + builddir
return fullbuilddirpath
def Cleanup(cleanUp,workspace):
print "\n==================================================\n"
print "Cleaning Up."
print "\n==================================================\n"
if cleanUp:
os.chdir(workspace + "/" + rootdir)
shutil.rmtree(builddir)
os.chdir("..")
shutil.rmtree(rootdir)
log.shutdown()
return 0
| []
| []
| [
"WORKSPACE",
"PATH"
]
| [] | ["WORKSPACE", "PATH"] | python | 2 | 0 | |
server/config.py | from dotenv import load_dotenv
import os
import redis
load_dotenv()
class ApplicationConfig:
SECRET_KEY = os.environ["SECRET_KEY"]
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_ECHO = True
SQLALCHEMY_DATABASE_URI = r"sqlite:///./db.sqlite"
SESSION_TYPE = "redis"
SESSION_PERMANENT = False
SESSION_USE_SIGNER = True
SESSION_REDIS = redis.from_url("redis://127.0.0.1:6379") | []
| []
| [
"SECRET_KEY"
]
| [] | ["SECRET_KEY"] | python | 1 | 0 | |
traffic_ops_ort/atstccfg/config/config.go | package config
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import (
"errors"
"fmt"
"net/url"
"os"
"strings"
"time"
"github.com/apache/trafficcontrol/lib/go-atscfg"
"github.com/apache/trafficcontrol/lib/go-log"
"github.com/apache/trafficcontrol/lib/go-tc"
"github.com/apache/trafficcontrol/traffic_ops_ort/atstccfg/toreq"
"github.com/apache/trafficcontrol/traffic_ops_ort/atstccfg/toreqnew"
flag "github.com/ogier/pflag"
)
const AppName = "atstccfg"
const Version = "0.2"
const UserAgent = AppName + "/" + Version
const ExitCodeSuccess = 0
const ExitCodeErrGeneric = 1
const ExitCodeNotFound = 104
const ExitCodeBadRequest = 100
var ErrNotFound = errors.New("not found")
var ErrBadRequest = errors.New("bad request")
type Cfg struct {
CacheHostName string
DisableProxy bool
GetData string
ListPlugins bool
LogLocationErr string
LogLocationInfo string
LogLocationWarn string
NumRetries int
RevalOnly bool
SetQueueStatus string
SetRevalStatus string
TOInsecure bool
TOPass string
TOTimeout time.Duration
TOURL *url.URL
TOUser string
Dir string
}
type TCCfg struct {
Cfg
TOClient *toreq.TOClient
TOClientNew *toreqnew.TOClient
}
func (cfg Cfg) ErrorLog() log.LogLocation { return log.LogLocation(cfg.LogLocationErr) }
func (cfg Cfg) WarningLog() log.LogLocation { return log.LogLocation(cfg.LogLocationWarn) }
func (cfg Cfg) InfoLog() log.LogLocation { return log.LogLocation(cfg.LogLocationInfo) }
func (cfg Cfg) DebugLog() log.LogLocation { return log.LogLocation(log.LogLocationNull) } // atstccfg doesn't use the debug logger, use Info instead.
func (cfg Cfg) EventLog() log.LogLocation { return log.LogLocation(log.LogLocationNull) } // atstccfg doesn't use the event logger.
// GetCfg gets the application configuration, from arguments and environment variables.
func GetCfg() (Cfg, error) {
toURLPtr := flag.StringP("traffic-ops-url", "u", "", "Traffic Ops URL. Must be the full URL, including the scheme. Required. May also be set with the environment variable TO_URL.")
toUserPtr := flag.StringP("traffic-ops-user", "U", "", "Traffic Ops username. Required. May also be set with the environment variable TO_USER.")
toPassPtr := flag.StringP("traffic-ops-password", "P", "", "Traffic Ops password. Required. May also be set with the environment variable TO_PASS.")
numRetriesPtr := flag.IntP("num-retries", "r", 5, "The number of times to retry getting a file if it fails.")
logLocationErrPtr := flag.StringP("log-location-error", "e", "stderr", "Where to log errors. May be a file path, stdout, stderr, or null.")
logLocationWarnPtr := flag.StringP("log-location-warning", "w", "stderr", "Where to log warnings. May be a file path, stdout, stderr, or null.")
logLocationInfoPtr := flag.StringP("log-location-info", "i", "stderr", "Where to log information messages. May be a file path, stdout, stderr, or null.")
toInsecurePtr := flag.BoolP("traffic-ops-insecure", "s", false, "Whether to ignore HTTPS certificate errors from Traffic Ops. It is HIGHLY RECOMMENDED to never use this in a production environment, but only for debugging.")
toTimeoutMSPtr := flag.IntP("traffic-ops-timeout-milliseconds", "t", 30000, "Timeout in milliseconds for Traffic Ops requests.")
versionPtr := flag.BoolP("version", "v", false, "Print version information and exit.")
listPluginsPtr := flag.BoolP("list-plugins", "l", false, "Print the list of plugins.")
helpPtr := flag.BoolP("help", "h", false, "Print usage information and exit")
cacheHostNamePtr := flag.StringP("cache-host-name", "n", "", "Host name of the cache to generate config for. Must be the server host name in Traffic Ops, not a URL, and not the FQDN")
getDataPtr := flag.StringP("get-data", "d", "", "non-config-file Traffic Ops Data to get. Valid values are update-status, packages, chkconfig, system-info, and statuses")
setQueueStatusPtr := flag.StringP("set-queue-status", "q", "", "POSTs to Traffic Ops setting the queue status of the server. Must be 'true' or 'false'. Requires --set-reval-status also be set")
setRevalStatusPtr := flag.StringP("set-reval-status", "a", "", "POSTs to Traffic Ops setting the revalidate status of the server. Must be 'true' or 'false'. Requires --set-queue-status also be set")
revalOnlyPtr := flag.BoolP("revalidate-only", "y", false, "Whether to exclude files not named 'regex_revalidate.config'")
disableProxyPtr := flag.BoolP("traffic-ops-disable-proxy", "p", false, "Whether to not use the Traffic Ops proxy specified in the GLOBAL Parameter tm.rev_proxy.url")
dirPtr := flag.StringP("dir", "D", "", "ATS config directory, used for config files without location parameters or with relative paths. May be blank. If blank and any required config file location parameter is missing or relative, will error.")
flag.Parse()
if *versionPtr {
fmt.Println(AppName + " v" + Version)
os.Exit(0)
} else if *helpPtr {
flag.PrintDefaults()
os.Exit(0)
} else if *listPluginsPtr {
return Cfg{ListPlugins: true}, nil
}
toURL := *toURLPtr
toUser := *toUserPtr
toPass := *toPassPtr
numRetries := *numRetriesPtr
logLocationErr := *logLocationErrPtr
logLocationWarn := *logLocationWarnPtr
logLocationInfo := *logLocationInfoPtr
toInsecure := *toInsecurePtr
toTimeout := time.Millisecond * time.Duration(*toTimeoutMSPtr)
listPlugins := *listPluginsPtr
cacheHostName := *cacheHostNamePtr
getData := *getDataPtr
setQueueStatus := *setQueueStatusPtr
setRevalStatus := *setRevalStatusPtr
revalOnly := *revalOnlyPtr
disableProxy := *disableProxyPtr
dir := *dirPtr
urlSourceStr := "argument" // for error messages
if toURL == "" {
urlSourceStr = "environment variable"
toURL = os.Getenv("TO_URL")
}
if toUser == "" {
toUser = os.Getenv("TO_USER")
}
// TO_PASSWORD is preferred over TO_PASS, as it's the one commonly used by
// Traffic Control tools. Hopefully, we'll be able to get rid of TO_PASS
// entirely in the near future, to make this less confusing.
if toPass == "" {
toPass = os.Getenv("TO_PASS")
}
if toPass == "" {
toPass = os.Getenv("TO_PASSWORD")
}
usageStr := "Usage: ./" + AppName + " --traffic-ops-url=myurl --traffic-ops-user=myuser --traffic-ops-password=mypass --cache-host-name=my-cache"
if strings.TrimSpace(toURL) == "" {
return Cfg{}, errors.New("Missing required argument --traffic-ops-url or TO_URL environment variable. " + usageStr)
}
if strings.TrimSpace(toUser) == "" {
return Cfg{}, errors.New("Missing required argument --traffic-ops-user or TO_USER environment variable. " + usageStr)
}
if strings.TrimSpace(toPass) == "" {
return Cfg{}, errors.New("Missing required argument --traffic-ops-password or TO_PASS environment variable. " + usageStr)
}
if strings.TrimSpace(cacheHostName) == "" {
return Cfg{}, errors.New("Missing required argument --cache-host-name. " + usageStr)
}
toURLParsed, err := url.Parse(toURL)
if err != nil {
return Cfg{}, errors.New("parsing Traffic Ops URL from " + urlSourceStr + " '" + toURL + "': " + err.Error())
} else if err := ValidateURL(toURLParsed); err != nil {
return Cfg{}, errors.New("invalid Traffic Ops URL from " + urlSourceStr + " '" + toURL + "': " + err.Error())
}
cfg := Cfg{
LogLocationErr: logLocationErr,
LogLocationWarn: logLocationWarn,
LogLocationInfo: logLocationInfo,
NumRetries: numRetries,
TOInsecure: toInsecure,
TOPass: toPass,
TOTimeout: toTimeout,
TOURL: toURLParsed,
TOUser: toUser,
ListPlugins: listPlugins,
CacheHostName: cacheHostName,
GetData: getData,
SetRevalStatus: setRevalStatus,
SetQueueStatus: setQueueStatus,
RevalOnly: revalOnly,
DisableProxy: disableProxy,
Dir: dir,
}
if err := log.InitCfg(cfg); err != nil {
return Cfg{}, errors.New("Initializing loggers: " + err.Error() + "\n")
}
return cfg, nil
}
func ValidateURL(u *url.URL) error {
if u == nil {
return errors.New("nil url")
}
if u.Scheme != "http" && u.Scheme != "https" {
return errors.New("scheme expected 'http' or 'https', actual '" + u.Scheme + "'")
}
if strings.TrimSpace(u.Host) == "" {
return errors.New("no host")
}
return nil
}
type ATSConfigFile struct {
tc.ATSConfigMetaDataConfigFile
Text string
ContentType string
LineComment string
}
// ATSConfigFiles implements sort.Interface and sorts by the Location and then FileNameOnDisk, i.e. the full file path.
type ATSConfigFiles []ATSConfigFile
func (fs ATSConfigFiles) Len() int { return len(fs) }
func (fs ATSConfigFiles) Less(i, j int) bool {
if fs[i].Location != fs[j].Location {
return fs[i].Location < fs[j].Location
}
return fs[i].FileNameOnDisk < fs[j].FileNameOnDisk
}
func (fs ATSConfigFiles) Swap(i, j int) { fs[i], fs[j] = fs[j], fs[i] }
// TOData is the Traffic Ops data needed to generate configs.
// See each field for details on the data required.
// - If a field says 'must', the creation of TOData is guaranteed to do so, and users of the struct may rely on that.
// - If it says 'may', the creation may or may not do so, and therefore users of the struct must filter if they
// require the potential fields to be omitted to generate correctly.
type TOData struct {
// Servers must be all the servers from Traffic Ops. May include servers not on the current cdn.
Servers []tc.ServerNullable
// CacheGroups must be all cachegroups in Traffic Ops with Servers on the current server's cdn. May also include CacheGroups without servers on the current cdn.
CacheGroups []tc.CacheGroupNullable
// GlobalParams must be all Parameters in Traffic Ops on the tc.GlobalProfileName Profile. Must not include other parameters.
GlobalParams []tc.Parameter
// ScopeParams must be all Parameters in Traffic Ops with the name "scope". Must not include other Parameters.
ScopeParams []tc.Parameter
// ServerParams must be all Parameters on the Profile of the current server. Must not include other Parameters.
ServerParams []tc.Parameter
// CacheKeyParams must be all Parameters with the ConfigFile atscfg.CacheKeyParameterConfigFile.
CacheKeyParams []tc.Parameter
// ParentConfigParams must be all Parameters with the ConfigFile "parent.config.
ParentConfigParams []tc.Parameter
// DeliveryServices must include all Delivery Services on the current server's cdn, including those not assigned to the server. Must not include delivery services on other cdns.
DeliveryServices []tc.DeliveryServiceNullableV30
// DeliveryServiceServers must include all delivery service servers in Traffic Ops for all delivery services on the current cdn, including those not assigned to the current server.
DeliveryServiceServers []tc.DeliveryServiceServer
// Server must be the server we're fetching configs from
Server *tc.ServerNullable
// TOToolName must be the Parameter named 'tm.toolname' on the tc.GlobalConfigFileName Profile.
TOToolName string
// TOToolName must be the Parameter named 'tm.url' on the tc.GlobalConfigFileName Profile.
TOURL string
// Jobs must be all Jobs on the server's CDN. May include jobs on other CDNs.
Jobs []tc.Job
// CDN must be the CDN of the server.
CDN tc.CDN
// DeliveryServiceRegexes must be all regexes on all delivery services on this server's cdn.
DeliveryServiceRegexes []tc.DeliveryServiceRegexes
// Profile must be the Profile of the server being requested.
Profile tc.Profile
// URISigningKeys must be a map of every delivery service which is URI Signed, to its keys.
URISigningKeys map[tc.DeliveryServiceName][]byte
// URLSigKeys must be a map of every delivery service which uses URL Sig, to its keys.
URLSigKeys map[tc.DeliveryServiceName]tc.URLSigKeys
// ServerCapabilities must be a map of all server IDs on this server's CDN, to a set of their capabilities. May also include servers from other cdns.
ServerCapabilities map[int]map[atscfg.ServerCapability]struct{}
// DSRequiredCapabilities must be a map of all delivery service IDs on this server's CDN, to a set of their required capabilities. Delivery Services with no required capabilities may not have an entry in the map.
DSRequiredCapabilities map[int]map[atscfg.ServerCapability]struct{}
// SSLKeys must be all the ssl keys for the server's cdn.
SSLKeys []tc.CDNSSLKeys
// Topologies must be all the topologies for the server's cdn.
// May incude topologies of other cdns.
Topologies []tc.Topology
}
| [
"\"TO_URL\"",
"\"TO_USER\"",
"\"TO_PASS\"",
"\"TO_PASSWORD\""
]
| []
| [
"TO_URL",
"TO_PASSWORD",
"TO_PASS",
"TO_USER"
]
| [] | ["TO_URL", "TO_PASSWORD", "TO_PASS", "TO_USER"] | go | 4 | 0 | |
src/main/java/io/cloudreactor/javaquickstart/Main.java | package io.cloudreactor.javaquickstart;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.cloudreactor.tasksymphony.wrapperio.TaskStatusUpdater;
/** The main class of this application, which demonstrates the use of the
* task status updater.
*/
public final class Main {
private Main() { }
/** Entrypoint.
*
* @param args An array of command-line arguments.
*/
public static void main(final String[] args) {
var taskName = "main";
if (args.length > 0) {
taskName = args[0];
}
switch (taskName) {
case "main" -> runMainTask();
case "add" -> runAdderTask();
case "readsecret" -> readSecret();
default -> throw new RuntimeException("Unknown task name: " + taskName);
}
}
static void runMainTask() {
try (TaskStatusUpdater statusUpdater = new TaskStatusUpdater()) {
statusUpdater.sendUpdateAndIgnoreError(
0L, // successCount
null, // errorCount
null, // skippedCount
10L, // expectedCount
"Starting Java QuickStart app ...", // lastStatusMessage
null // extraProps
);
LOGGER.info("Hello!");
for (long i = 0; i < 10; i++) {
statusUpdater.sendUpdateAndIgnoreError(
i, // successCount
null, // errorCount
null, // skippedCount
null, // expectedCount
"Updating row " + i + " ...", // lastStatusMessage
null // extraProps
);
}
statusUpdater.sendUpdateAndIgnoreError(
null, // successCount
null, // errorCount
null, // skippedCount
null, // expectedCount
"Finished Java QuickStart app!", // lastStatusMessage
null // extraProps
);
}
}
static void runAdderTask() {
var result = add(5, 10);
try (TaskStatusUpdater statusUpdater = new TaskStatusUpdater()) {
statusUpdater.sendUpdateAndIgnoreError(
null, // successCount
null, // errorCount
null, // skippedCount
null, // expectedCount
"Result = " + result, // lastStatusMessage
null // extraProps
);
}
}
static int add(final int a, final int b) {
return a + b;
}
static void readSecret() {
try (TaskStatusUpdater statusUpdater = new TaskStatusUpdater()) {
var secret = System.getenv("SECRET_VALUE");
var message = "Secret is " + secret;
System.out.println(message);
statusUpdater.sendUpdateAndIgnoreError(
null, // successCount
null, // errorCount
null, // skippedCount
null, // expectedCount
message, // lastStatusMessage
null // extraProps
);
}
}
/** Logger for this class. */
private static final Logger LOGGER = LoggerFactory.getLogger(Main.class);
}
| [
"\"SECRET_VALUE\""
]
| []
| [
"SECRET_VALUE"
]
| [] | ["SECRET_VALUE"] | java | 1 | 0 | |
gago/accounts_test.go | package gago
import (
"os"
"testing"
)
//TestReport Test antisampling and concurrency with batching
func TestAccounts(t *testing.T) {
if os.Getenv("GAGO_AUTH") == "" {
t.Skip("Skip test, no auth")
}
authFile := os.Getenv("GAGO_AUTH")
_, analyticsService := Authenticate(authFile)
GetAccountSummary(analyticsService)
}
| [
"\"GAGO_AUTH\"",
"\"GAGO_AUTH\""
]
| []
| [
"GAGO_AUTH"
]
| [] | ["GAGO_AUTH"] | go | 1 | 0 | |
cmd/kat-client/client.go | package main
import (
"bytes"
"context"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"encoding/binary"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"syscall"
"time"
grpc_echo_pb "github.com/datawire/ambassador/pkg/api/kat"
"github.com/gogo/protobuf/proto"
"github.com/gorilla/websocket"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
// Should we output GRPCWeb debugging?
var debug_grpc_web bool // We set this value in main() XXX This is a hack
// Limit concurrency
// Semaphore is a counting semaphore that can be used to limit concurrency.
type Semaphore chan bool
// NewSemaphore returns a new Semaphore with the specified capacity.
func NewSemaphore(n int) Semaphore {
sem := make(Semaphore, n)
for i := 0; i < n; i++ {
sem.Release()
}
return sem
}
// Acquire blocks until a slot/token is available.
func (s Semaphore) Acquire() {
<-s
}
// Release returns a slot/token to the pool.
func (s Semaphore) Release() {
s <- true
}
// rlimit frobnicates the interplexing beacon. Or maybe it reverses the polarity
// of the neutron flow. I'm not sure. FIXME.
func rlimit() {
var rLimit syscall.Rlimit
err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rLimit)
if err != nil {
log.Println("Error getting rlimit:", err)
} else {
log.Println("Initial rlimit:", rLimit)
}
rLimit.Max = 999999
rLimit.Cur = 999999
err = syscall.Setrlimit(syscall.RLIMIT_NOFILE, &rLimit)
if err != nil {
log.Println("Error setting rlimit:", err)
}
err = syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rLimit)
if err != nil {
log.Println("Error getting rlimit:", err)
} else {
log.Println("Final rlimit", rLimit)
}
}
// Query and Result manipulation
// Query represents one kat query as read from the supplied input. It will be
// mutated to include results from that query.
type Query map[string]interface{}
// CACert returns the "ca_cert" field as a string or returns the empty string.
func (q Query) CACert() string {
val, ok := q["ca_cert"]
if ok {
return val.(string)
}
return ""
}
// ClientCert returns the "client_cert" field as a string or returns the empty string.
func (q Query) ClientCert() string {
val, ok := q["client_cert"]
if ok {
return val.(string)
}
return ""
}
// ClientKey returns the "client_key" field as a string or returns the empty string.
func (q Query) ClientKey() string {
val, ok := q["client_key"]
if ok {
return val.(string)
}
return ""
}
// Insecure returns whether the query has a field called "insecure" whose value is true.
func (q Query) Insecure() bool {
val, ok := q["insecure"]
return ok && val.(bool)
}
// SNI returns whether the query has a field called "sni" whose value is true.
func (q Query) SNI() bool {
val, ok := q["sni"]
return ok && val.(bool)
}
// IsWebsocket returns whether the query's URL starts with "ws:".
func (q Query) IsWebsocket() bool {
return strings.HasPrefix(q.URL(), "ws:")
}
// URL returns the query's URL.
func (q Query) URL() string {
return q["url"].(string)
}
// MinTLSVersion returns the minimun TLS protocol version.
func (q Query) MinTLSVersion() uint16 {
switch q["minTLSv"].(string) {
case "v1.0":
return tls.VersionTLS10
case "v1.1":
return tls.VersionTLS11
case "v1.2":
return tls.VersionTLS12
case "v1.3":
return tls.VersionTLS13
default:
return 0
}
}
// MaxTLSVersion returns the maximum TLS protocol version.
func (q Query) MaxTLSVersion() uint16 {
switch q["maxTLSv"].(string) {
case "v1.0":
return tls.VersionTLS10
case "v1.1":
return tls.VersionTLS11
case "v1.2":
return tls.VersionTLS12
case "v1.3":
return tls.VersionTLS13
default:
return 0
}
}
// CipherSuites returns the list of configured Cipher Suites
func (q Query) CipherSuites() []uint16 {
val, ok := q["cipherSuites"]
if !ok {
return []uint16{}
}
cs := []uint16{}
for _, s := range val.([]interface{}) {
switch s.(string) {
// TLS 1.0 - 1.2 cipher suites.
case "TLS_RSA_WITH_RC4_128_SHA":
cs = append(cs, tls.TLS_RSA_WITH_RC4_128_SHA)
case "TLS_RSA_WITH_3DES_EDE_CBC_SHA":
cs = append(cs, tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA)
case "TLS_RSA_WITH_AES_128_CBC_SHA":
cs = append(cs, tls.TLS_RSA_WITH_AES_128_CBC_SHA)
case "TLS_RSA_WITH_AES_256_CBC_SHA":
cs = append(cs, tls.TLS_RSA_WITH_AES_256_CBC_SHA)
case "TLS_RSA_WITH_AES_128_CBC_SHA256":
cs = append(cs, tls.TLS_RSA_WITH_AES_128_CBC_SHA256)
case "TLS_RSA_WITH_AES_128_GCM_SHA256":
cs = append(cs, tls.TLS_RSA_WITH_AES_128_GCM_SHA256)
case "TLS_RSA_WITH_AES_256_GCM_SHA384":
cs = append(cs, tls.TLS_RSA_WITH_AES_256_GCM_SHA384)
case "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA":
cs = append(cs, tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA)
case "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA":
cs = append(cs, tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA)
case "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA":
cs = append(cs, tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA)
case "TLS_ECDHE_RSA_WITH_RC4_128_SHA":
cs = append(cs, tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA)
case "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA":
cs = append(cs, tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA)
case "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA":
cs = append(cs, tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA)
case "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA":
cs = append(cs, tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA)
case "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256":
cs = append(cs, tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256)
case "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256":
cs = append(cs, tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256)
case "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256":
cs = append(cs, tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256)
case "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256":
cs = append(cs, tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256)
case "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384":
cs = append(cs, tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384)
case "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384":
cs = append(cs, tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384)
case "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305":
cs = append(cs, tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305)
case "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305":
cs = append(cs, tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305)
// TLS 1.3 cipher suites are not tunable
// TLS_RSA_WITH_RC4_128_SHA
// TLS_ECDHE_RSA_WITH_RC4_128_SHA
// TLS_ECDHE_ECDSA_WITH_RC4_128_SHA
// TLS_FALLBACK_SCSV isn't a standard cipher suite but an indicator
// that the client is doing version fallback. See RFC 7507.
case "TLS_FALLBACK_SCSV":
cs = append(cs, tls.TLS_FALLBACK_SCSV)
default:
}
}
return cs
}
// ECDHCurves returns the list of configured ECDH CurveIDs
func (q Query) ECDHCurves() []tls.CurveID {
val, ok := q["ecdhCurves"]
if !ok {
return []tls.CurveID{}
}
cs := []tls.CurveID{}
for _, s := range val.([]interface{}) {
switch s.(string) {
// TLS 1.0 - 1.2 cipher suites.
case "CurveP256":
cs = append(cs, tls.CurveP256)
case "CurveP384":
cs = append(cs, tls.CurveP384)
case "CurveP521":
cs = append(cs, tls.CurveP521)
case "X25519":
cs = append(cs, tls.X25519)
default:
}
}
return cs
}
// Method returns the query's method or "GET" if unspecified.
func (q Query) Method() string {
val, ok := q["method"]
if ok {
return val.(string)
}
return "GET"
}
// Headers returns the an http.Header object populated with any headers passed
// in as part of the query.
func (q Query) Headers() (result http.Header) {
result = make(http.Header)
headers, ok := q["headers"]
if ok {
for key, val := range headers.(map[string]interface{}) {
result.Add(key, val.(string))
}
}
return result
}
// Body returns an io.Reader for the base64 encoded body supplied in
// the query.
func (q Query) Body() io.Reader {
body, ok := q["body"]
if ok {
buf, err := base64.StdEncoding.DecodeString(body.(string))
if err != nil {
panic(err)
}
return bytes.NewReader(buf)
} else {
return nil
}
}
// GrpcType returns the query's grpc_type field or the empty string.
func (q Query) GrpcType() string {
val, ok := q["grpc_type"]
if ok {
return val.(string)
}
return ""
}
// Cookies returns a slice of http.Cookie objects populated with any cookies
// passed in as part of the query.
func (q Query) Cookies() (result []http.Cookie) {
result = []http.Cookie{}
cookies, ok := q["cookies"]
if ok {
for _, c := range cookies.([]interface{}) {
cookie := http.Cookie{
Name: c.(map[string]interface{})["name"].(string),
Value: c.(map[string]interface{})["value"].(string),
}
result = append(result, cookie)
}
}
return result
}
// Result represents the result of one kat query. Upon first access to a query's
// result field, the Result object will be created and added to the query.
type Result map[string]interface{}
// Result returns the query's result field as a Result object. If the field
// doesn't exist, a new Result object is created and placed in that field. If
// the field exists and contains something else, panic!
func (q Query) Result() Result {
val, ok := q["result"]
if !ok {
val = make(Result)
q["result"] = val
}
return val.(Result)
}
// CheckErr populates the query result with error information if an error is
// passed in (and logs the error).
func (q Query) CheckErr(err error) bool {
if err != nil {
log.Printf("%v: %v", q.URL(), err)
q.Result()["error"] = err.Error()
return true
}
return false
}
// DecodeGrpcWebTextBody treats the body as a series of base64-encode chunks. It
// returns the decoded proto and trailers.
func DecodeGrpcWebTextBody(body []byte) ([]byte, http.Header, error) {
// First, decode all the base64 stuff coming in. An annoyance here
// is that while the data coming over the wire are encoded in
// multiple chunks, we can't rely on seeing that framing when
// decoding: a chunk that's the right length to not need any base-64
// padding will just run into the next chunk.
//
// So we loop to grab all the chunks, but we just serialize it into
// a single raw byte array.
var raw []byte
cycle := 0
for {
if debug_grpc_web {
log.Printf("%v: base64 body '%v'", cycle, body)
}
cycle++
if len(body) <= 0 {
break
}
chunk := make([]byte, base64.StdEncoding.DecodedLen(len(body)))
n, err := base64.StdEncoding.Decode(chunk, body)
if err != nil && n <= 0 {
log.Printf("Failed to process body: %v\n", err)
return nil, nil, err
}
raw = append(raw, chunk[:n]...)
consumed := base64.StdEncoding.EncodedLen(n)
body = body[consumed:]
}
// Next up, we need to split this into protobuf data and trailers. We
// do this using grpc-web framing information for this -- each frame
// consists of one byte of type, four bytes of length, then the data
// itself.
//
// For our use case here, a type of 0 is the protobuf frame, and a type
// of 0x80 is the trailers.
trailers := make(http.Header) // the trailers will get saved here
var proto []byte // this is what we hand off to protobuf decode
var frame_start, frame_len uint32
var frame_type byte
var frame []byte
frame_start = 0
if debug_grpc_web {
log.Printf("starting frame split, len %v: %v", len(raw), raw)
}
for (frame_start + 5) < uint32(len(raw)) {
frame_type = raw[frame_start]
frame_len = binary.BigEndian.Uint32(raw[frame_start+1 : frame_start+5])
frame = raw[frame_start+5 : frame_start+5+frame_len]
if (frame_type & 128) > 0 {
// Trailers frame
if debug_grpc_web {
log.Printf(" trailers @%v (len %v, type %v) %v - %v", frame_start, frame_len, frame_type, len(frame), frame)
}
lines := strings.Split(string(frame), "\n")
for _, line := range lines {
split := strings.SplitN(strings.TrimSpace(line), ":", 2)
if len(split) == 2 {
key := strings.TrimSpace(split[0])
value := strings.TrimSpace(split[1])
trailers.Add(key, value)
}
}
} else {
// Protobuf frame
if debug_grpc_web {
log.Printf(" protobuf @%v (len %v, type %v) %v - %v", frame_start, frame_len, frame_type, len(frame), frame)
}
proto = frame
}
frame_start += frame_len + 5
}
return proto, trailers, nil
}
// AddResponse populates a query's result with data from the query's HTTP
// response object.
//
// This is not called for websockets or real GRPC. It _is_ called for
// GRPC-bridge, GRPC-web, and (of course) HTTP(s).
func (q Query) AddResponse(resp *http.Response) {
result := q.Result()
result["status"] = resp.StatusCode
result["headers"] = resp.Header
headers := result["headers"].(http.Header)
if headers != nil {
// Copy in the client's start date.
cstart := q["client-start-date"]
// We'll only have a client-start-date if we're doing plain old HTTP, at
// present -- so not for WebSockets or gRPC or the like. Don't try to
// save the start and end dates if we have no start date.
if cstart != nil {
headers.Add("Client-Start-Date", q["client-start-date"].(string))
// Add the client's end date.
headers.Add("Client-End-Date", time.Now().Format(time.RFC3339Nano))
}
}
if resp.TLS != nil {
result["tls_version"] = resp.TLS.Version
result["tls"] = resp.TLS.PeerCertificates
result["cipher_suite"] = resp.TLS.CipherSuite
}
body, err := ioutil.ReadAll(resp.Body)
if !q.CheckErr(err) {
log.Printf("%v: %v", q.URL(), resp.Status)
result["body"] = body
if q.GrpcType() != "" && len(body) > 5 {
if q.GrpcType() == "web" {
// This is the GRPC-web case. Go forth and decode the base64'd
// GRPC-web body madness.
decodedBody, trailers, err := DecodeGrpcWebTextBody(body)
if q.CheckErr(err) {
log.Printf("Failed to decode grpc-web-text body: %v", err)
return
}
body = decodedBody
if debug_grpc_web {
log.Printf("decodedBody '%v'", body)
}
for key, values := range trailers {
for _, value := range values {
headers.Add(key, value)
}
}
} else {
// This is the GRPC-bridge case -- throw away the five-byte type/length
// framing at the start, and just leave the protobuf.
body = body[5:]
}
response := &grpc_echo_pb.EchoResponse{}
err := proto.Unmarshal(body, response)
if q.CheckErr(err) {
log.Printf("Failed to unmarshal proto: %v", err)
return
}
result["text"] = response // q.r.json needs a different format
return
}
var jsonBody interface{}
err = json.Unmarshal(body, &jsonBody)
if err == nil {
result["json"] = jsonBody
} else {
result["text"] = string(body)
}
}
}
// Request processing
// ExecuteWebsocketQuery handles Websocket queries
func ExecuteWebsocketQuery(query Query) {
url := query.URL()
c, resp, err := websocket.DefaultDialer.Dial(url, query.Headers())
if query.CheckErr(err) {
return
}
defer c.Close()
query.AddResponse(resp)
messages := query["messages"].([]interface{})
for _, msg := range messages {
err = c.WriteMessage(websocket.TextMessage, []byte(msg.(string)))
if query.CheckErr(err) {
return
}
}
err = c.WriteMessage(websocket.CloseMessage,
websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""))
if query.CheckErr(err) {
return
}
answers := []string{}
result := query.Result()
defer func() {
result["messages"] = answers
}()
for {
_, message, err := c.ReadMessage()
if err != nil {
if websocket.IsUnexpectedCloseError(err, websocket.CloseNormalClosure) {
query.CheckErr(err)
}
return
}
answers = append(answers, string(message))
}
}
// GetGRPCReqBody returns the body of the HTTP request using the
// HTTP/1.1-gRPC bridge format as described in the Envoy docs
// https://www.envoyproxy.io/docs/envoy/v1.9.0/configuration/http_filters/grpc_http1_bridge_filter
func GetGRPCReqBody() (*bytes.Buffer, error) {
// Protocol:
// . 1 byte of zero (not compressed).
// . network order (big-endian) of proto message length.
// . serialized proto message.
buf := &bytes.Buffer{}
if err := binary.Write(buf, binary.BigEndian, uint8(0)); err != nil {
log.Printf("error when packing first byte: %v", err)
return nil, err
}
m := &grpc_echo_pb.EchoRequest{}
m.Data = "foo"
pbuf := &proto.Buffer{}
if err := pbuf.Marshal(m); err != nil {
log.Printf("error when serializing the gRPC message: %v", err)
return nil, err
}
if err := binary.Write(buf, binary.BigEndian, uint32(len(pbuf.Bytes()))); err != nil {
log.Printf("error when packing message length: %v", err)
return nil, err
}
for i := 0; i < len(pbuf.Bytes()); i++ {
if err := binary.Write(buf, binary.BigEndian, uint8(pbuf.Bytes()[i])); err != nil {
log.Printf("error when packing message: %v", err)
return nil, err
}
}
return buf, nil
}
// CallRealGRPC handles real gRPC queries, i.e. queries that use the normal gRPC
// generated code and the normal HTTP/2-based transport.
func CallRealGRPC(query Query) {
qURL, err := url.Parse(query.URL())
if query.CheckErr(err) {
log.Printf("grpc url parse failed: %v", err)
return
}
const requiredPath = "/echo.EchoService/Echo"
if qURL.Path != requiredPath {
query.Result()["error"] = fmt.Sprintf("GRPC path %s is not %s", qURL.Path, requiredPath)
return
}
dialHost := qURL.Host
if !strings.Contains(dialHost, ":") {
// There is no port number in the URL, but grpc.Dial wants host:port.
if qURL.Scheme == "https" {
dialHost = dialHost + ":443"
} else {
dialHost = dialHost + ":80"
}
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// Dial runs in the background and thus always appears to succeed. If you
// pass grpc.WithBlock() to make it wait for a connection, failures just hit
// the deadline rather than returning a useful error like "no such host" or
// "connection refused" or whatever. Perhaps they are considered "transient"
// and there's some retry logic we need to turn off. Anyhow, we don't pass
// grpc.WithBlock(), instead letting the error happen at the request below.
// This makes useful error messages visible in most cases.
var dialOptions []grpc.DialOption
if qURL.Scheme != "https" {
dialOptions = append(dialOptions, grpc.WithInsecure())
}
conn, err := grpc.DialContext(ctx, dialHost, dialOptions...)
if query.CheckErr(err) {
log.Printf("grpc dial failed: %v", err)
return
}
defer conn.Close()
client := grpc_echo_pb.NewEchoServiceClient(conn)
request := &grpc_echo_pb.EchoRequest{Data: "real gRPC"}
// Prepare outgoing headers, which are passed via Context
md := metadata.MD{}
headers, ok := query["headers"]
if ok {
for key, val := range headers.(map[string]interface{}) {
md.Set(key, val.(string))
}
}
ctx = metadata.NewOutgoingContext(ctx, md)
response, err := client.Echo(ctx, request)
stat, ok := status.FromError(err)
if !ok { // err is not nil and not a grpc Status
query.CheckErr(err)
log.Printf("grpc echo request failed: %v", err)
return
}
// It's hard to tell the difference between a failed connection and a
// successful connection that set an error code. We'll use the
// heuristic that DNS errors and Connection Refused both appear to
// return code 14 (Code.Unavailable).
grpcCode := int(stat.Code())
if grpcCode == 14 {
query.CheckErr(err)
log.Printf("grpc echo request connection failed: %v", err)
return
}
// Now process the response and synthesize the requisite result values.
// Note: Don't set result.body to anything that cannot be decoded as base64,
// or the kat harness will fail.
resHeader := make(http.Header)
resHeader.Add("Grpc-Status", fmt.Sprint(grpcCode))
resHeader.Add("Grpc-Message", stat.Message())
result := query.Result()
result["headers"] = resHeader
result["body"] = ""
result["status"] = 200
if err == nil {
result["text"] = response // q.r.json needs a different format
}
// Stuff that's not available:
// - query.result.status (the HTTP status -- synthesized as 200)
// - query.result.headers (the HTTP response headers -- we're just putting
// in grpc-status and grpc-message as the former is required by the
// tests and the latter can be handy)
// - query.result.body (the raw HTTP body)
// - query.result.json or query.result.text (the parsed HTTP body -- we're
// emitting the full EchoResponse object in the text field)
}
// ExecuteQuery constructs the appropriate request, executes it, and records the
// response and related information in query.result.
func ExecuteQuery(query Query) {
// Websocket stuff is handled elsewhere
if query.IsWebsocket() {
ExecuteWebsocketQuery(query)
return
}
// Real gRPC is handled elsewhere
if query.GrpcType() == "real" {
CallRealGRPC(query)
return
}
// Prepare an http.Transport with customized TLS settings.
transport := &http.Transport{
MaxIdleConns: 10,
IdleConnTimeout: 30 * time.Second,
TLSClientConfig: &tls.Config{},
}
if query.Insecure() {
transport.TLSClientConfig.InsecureSkipVerify = true
}
if caCert := query.CACert(); len(caCert) > 0 {
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM([]byte(caCert))
clientCert, err := tls.X509KeyPair([]byte(query.ClientCert()), []byte(query.ClientKey()))
if err != nil {
log.Fatal(err)
}
transport.TLSClientConfig.RootCAs = caCertPool
transport.TLSClientConfig.Certificates = []tls.Certificate{clientCert}
}
if query.MinTLSVersion() != 0 {
transport.TLSClientConfig.MinVersion = query.MinTLSVersion()
}
if query.MaxTLSVersion() != 0 {
transport.TLSClientConfig.MaxVersion = query.MaxTLSVersion()
}
if len(query.CipherSuites()) > 0 {
transport.TLSClientConfig.CipherSuites = query.CipherSuites()
}
if len(query.ECDHCurves()) > 0 {
transport.TLSClientConfig.CurvePreferences = query.ECDHCurves()
}
// Prepare the HTTP request
var body io.Reader
method := query.Method()
if query.GrpcType() != "" {
// Perform special handling for gRPC-bridge and gRPC-web
buf, err := GetGRPCReqBody()
if query.CheckErr(err) {
log.Printf("gRPC buffer error: %v", err)
return
}
if query.GrpcType() == "web" {
result := make([]byte, base64.StdEncoding.EncodedLen(buf.Len()))
base64.StdEncoding.Encode(result, buf.Bytes())
buf = bytes.NewBuffer(result)
}
body = buf
method = "POST"
} else {
body = query.Body()
}
req, err := http.NewRequest(method, query.URL(), body)
if query.CheckErr(err) {
log.Printf("request error: %v", err)
return
}
req.Header = query.Headers()
for _, cookie := range query.Cookies() {
req.AddCookie(&cookie)
}
// Save the client's start date.
query["client-start-date"] = time.Now().Format(time.RFC3339Nano)
// Handle host and SNI
host := req.Header.Get("Host")
if host != "" {
if query.SNI() {
transport.TLSClientConfig.ServerName = host
}
req.Host = host
}
// Perform the request and save the results.
client := &http.Client{
Transport: transport,
Timeout: time.Duration(10 * time.Second),
CheckRedirect: func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
},
}
resp, err := client.Do(req)
if query.CheckErr(err) {
return
}
query.AddResponse(resp)
}
func main() {
debug_grpc_web = false
rlimit()
var input, output string
flag.StringVar(&input, "input", "", "input filename")
flag.StringVar(&output, "output", "", "output filename")
flag.Parse()
var data []byte
var err error
// Read input file
if input == "" {
data, err = ioutil.ReadAll(os.Stdin)
} else {
data, err = ioutil.ReadFile(input)
}
if err != nil {
panic(err)
}
// Parse input file
var specs []Query
err = json.Unmarshal(data, &specs)
if err != nil {
panic(err)
}
// Prep semaphore to limit concurrency
limitStr := os.Getenv("KAT_QUERY_LIMIT")
limit, err := strconv.Atoi(limitStr)
if err != nil {
limit = 25
}
sem := NewSemaphore(limit)
// Launch queries concurrently
count := len(specs)
queries := make(chan bool)
for i := 0; i < count; i++ {
go func(idx int) {
sem.Acquire()
defer func() {
queries <- true
sem.Release()
}()
ExecuteQuery(specs[idx])
}(i)
}
// Wait for all the answers
for i := 0; i < count; i++ {
<-queries
}
// Generate the output file
bytes, err := json.MarshalIndent(specs, "", " ")
if err != nil {
log.Print(err)
} else if output == "" {
fmt.Print(string(bytes))
} else {
err = ioutil.WriteFile(output, bytes, 0644)
if err != nil {
log.Print(err)
}
}
}
| [
"\"KAT_QUERY_LIMIT\""
]
| []
| [
"KAT_QUERY_LIMIT"
]
| [] | ["KAT_QUERY_LIMIT"] | go | 1 | 0 | |
run.py | # Many thanks to daya for modifying the code :)
# ==============================================================================
"""Main function to run the code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
import numpy as np
from src.data_provider import datasets_factory
from src.models.model_factory import Model
import src.trainer as trainer
#from src.utils import preprocess
import tensorflow as tf
import argparse
def add_arguments(parser):
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument("--train_data_paths", type=str, default="", help="train data paths")
parser.add_argument("--valid_data_paths", type=str, default="", help="validation data paths")
parser.add_argument("--test_data_paths", type=str, default="", help="train data paths")
parser.add_argument("--save_dir", type=str, default="", help="dir to store trained net")
parser.add_argument("--gen_frm_dir", type=str, default="", help="dir to store result.")
parser.add_argument("--is_training", type="bool", nargs="?", const=True,
default=False,
help="training or testing")
parser.add_argument("--dataset_name", type=str, default="milan", help="name of dataset")
parser.add_argument("--input_seq_length", type=int, default=10, help="number of input snapshots")
parser.add_argument("--output_seq_length", type=int, default=10, help="number of output snapshots")
parser.add_argument("--dimension_3D", type=int, default=2, help="dimension of input depth")
parser.add_argument("--img_width", type=int, default=100, help="input image width.")
parser.add_argument("--patch_size", type=int, default=1, help="patch size on one dimension")
parser.add_argument("--reverse_input", type="bool", nargs="?", const=True,
default=False,
help="reverse the input/outputs during training.")
parser.add_argument("--model_name", type=str, default="e3d_lstm", help="The name of the architecture")
parser.add_argument("--pretrained_model", type=str, default="", help=".ckpt file to initialize from")
parser.add_argument("--num_hidden", type=str, default="10,10,10,10", help="COMMA separated number of units of e3d lstms")
parser.add_argument("--filter_size", type=int, default=5, help="filter of a e3d lstm layer")
parser.add_argument("--layer_norm", type="bool", nargs="?", const=True,
default=True,
help="whether to apply tensor layer norm")
parser.add_argument("--scheduled_sampling", type="bool", nargs="?", const=True,
default=True,
help="for scheduled sampling")
parser.add_argument("--sampling_stop_iter", type=int, default=40, help="for scheduled sampling")
parser.add_argument("--sampling_start_value", type=float, default=1.0, help="for scheduled sampling")
parser.add_argument("--sampling_changing_rate", type=float, default=0.00002, help="for scheduled sampling")
parser.add_argument("--lr", type=float, default=0.001, help="learning rate")
parser.add_argument("--batch_size", type=int, default=50, help="batch size for training")
parser.add_argument("--max_iterations", type=int, default=50, help="max num of steps")
parser.add_argument("--display_interval", type=int, default=1, help="number of iters showing training loss")
parser.add_argument("--test_interval", type=int, default=1, help="number of iters for test")
parser.add_argument("--snapshot_interval", type=int, default=50, help="number of iters saving models")
# parser.add_argument("--num_save_samples", type=int, default=10, help="number of sequences to be saved")
parser.add_argument("--n_gpu", type=int, default=1, help="how many GPUs to distribute the training across")
parser.add_argument("--allow_gpu_growth", type="bool", nargs="?", const=True,
default=True,
help="allow gpu growth")
def main(unused_argv):
"""Main function."""
print(FLAGS)
# print(FLAGS.reverse_input)
if FLAGS.is_training:
if tf.gfile.Exists(FLAGS.save_dir):
tf.gfile.DeleteRecursively(FLAGS.save_dir)
tf.gfile.MakeDirs(FLAGS.save_dir)
if tf.gfile.Exists(FLAGS.gen_frm_dir):
tf.gfile.DeleteRecursively(FLAGS.gen_frm_dir)
tf.gfile.MakeDirs(FLAGS.gen_frm_dir)
gpu_list = np.asarray(
os.environ.get('CUDA_VISIBLE_DEVICES', '-1').split(','), dtype=np.int32)
FLAGS.n_gpu = len(gpu_list)
print('Initializing models')
model = Model(FLAGS)
if FLAGS.is_training:
train_wrapper(model)
else:
test_wrapper(model)
def schedule_sampling(eta, itr):
"""Gets schedule sampling parameters for training."""
zeros = np.zeros((FLAGS.batch_size, FLAGS.output_seq_length // FLAGS.dimension_3D - 1, FLAGS.img_width, FLAGS.img_width, FLAGS.dimension_3D))
if not FLAGS.scheduled_sampling:
return 0.0, zeros
if itr < FLAGS.sampling_stop_iter:
eta -= FLAGS.sampling_changing_rate
else:
eta = 0.0
random_flip = np.random.random_sample(
(FLAGS.batch_size, FLAGS.output_seq_length // FLAGS.dimension_3D - 1))
true_token = (random_flip < eta)
ones = np.ones((FLAGS.img_width // FLAGS.patch_size, FLAGS.img_width // FLAGS.patch_size, FLAGS.patch_size**2*FLAGS.dimension_3D))
zeros = np.zeros((FLAGS.img_width // FLAGS.patch_size, FLAGS.img_width // FLAGS.patch_size, FLAGS.patch_size**2 * FLAGS.dimension_3D))
real_input_flag = []
for i in range(FLAGS.batch_size):
for j in range(FLAGS.output_seq_length // FLAGS.dimension_3D - 1):
if true_token[i, j]:
real_input_flag.append(ones)
else:
real_input_flag.append(zeros)
real_input_flag = np.array(real_input_flag)
real_input_flag = np.reshape(real_input_flag,(FLAGS.batch_size, FLAGS.output_seq_length // FLAGS.dimension_3D - 1,FLAGS.img_width // FLAGS.patch_size, FLAGS.img_width // FLAGS.patch_size,FLAGS.patch_size**2 * FLAGS.dimension_3D))
return eta, real_input_flag
def train_wrapper(model):
"""Wrapping function to train the model."""
if FLAGS.pretrained_model:
model.load(FLAGS.pretrained_model)
# load data
train_input_handle, test_input_handle = datasets_factory.data_provider(
FLAGS.dataset_name,
FLAGS.train_data_paths,
FLAGS.valid_data_paths,
FLAGS.batch_size * FLAGS.n_gpu,
FLAGS.img_width,
FLAGS.input_seq_length,
FLAGS.output_seq_length,
FLAGS.dimension_3D,
is_training=True)
print('Data loaded.')
eta = FLAGS.sampling_start_value
tra_cost = 0.0
batch_id = 0
stopping = [10000000000000000]
for itr in range(2351, FLAGS.max_iterations + 1):
if itr == 2:
print('training process started.')
#if itr % 50 == 0:
# print('training timestep: ' + str(itr))
if train_input_handle.no_batch_left() or itr % 50 == 0:
model.save(itr)
print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),'itr: ' + str(itr))
print('training loss: ' + str(tra_cost / batch_id))
val_cost = trainer.test(model, test_input_handle,FLAGS, itr)
if val_cost < min(stopping):
stopping = [val_cost]
elif len(stopping) < 10:
stopping.append(val_cost)
if len(stopping) == 10:
break
train_input_handle.begin(do_shuffle=True)
tra_cost = 0
batch_id = 0
ims = train_input_handle.get_batch()
batch_id += 1
eta, real_input_flag = schedule_sampling(eta, itr)
tra_cost += trainer.train(model, ims, real_input_flag, FLAGS, itr)
#if itr % FLAGS.snapshot_interval == 0:
#model.save(itr)
#if itr % FLAGS.test_interval == 0:
#trainer.test(model, test_input_handle, FLAGS, itr)
train_input_handle.next_batch()
def test_wrapper(model):
model.load(FLAGS.pretrained_model)
test_input_handle = datasets_factory.data_provider(
FLAGS.dataset_name,
FLAGS.train_data_paths,
FLAGS.test_data_paths, # Should use test data rather than training or validation data.
FLAGS.batch_size * FLAGS.n_gpu,
FLAGS.img_width,
FLAGS.input_seq_length,
FLAGS.output_seq_length,
FLAGS.dimension_3D,
is_training=False)
trainer.test(model, test_input_handle, FLAGS, 'test_result')
if __name__ == '__main__':
nmt_parser = argparse.ArgumentParser()
add_arguments(nmt_parser)
FLAGS, unparsed = nmt_parser.parse_known_args()
tf.app.run(main=main)
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
pkg/pod/pod_test.go | /*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pod
import (
"fmt"
"os"
"path/filepath"
"strings"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/tektoncd/pipeline/pkg/apis/pipeline"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
"github.com/tektoncd/pipeline/pkg/system"
"github.com/tektoncd/pipeline/test/diff"
"github.com/tektoncd/pipeline/test/names"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
fakek8s "k8s.io/client-go/kubernetes/fake"
)
var (
images = pipeline.Images{
EntrypointImage: "entrypoint-image",
CredsImage: "override-with-creds:latest",
ShellImage: "busybox",
}
)
func TestMakePod(t *testing.T) {
names.TestingSeed()
implicitEnvVars := []corev1.EnvVar{{
Name: "HOME",
Value: pipeline.HomeDir,
}}
secretsVolumeMount := corev1.VolumeMount{
Name: "tekton-internal-secret-volume-multi-creds-9l9zj",
MountPath: "/tekton/creds-secrets/multi-creds",
}
secretsVolume := corev1.Volume{
Name: "tekton-internal-secret-volume-multi-creds-9l9zj",
VolumeSource: corev1.VolumeSource{Secret: &corev1.SecretVolumeSource{SecretName: "multi-creds"}},
}
placeToolsInit := corev1.Container{
Name: "place-tools",
Image: images.EntrypointImage,
Command: []string{"cp", "/ko-app/entrypoint", "/tekton/tools/entrypoint"},
VolumeMounts: []corev1.VolumeMount{toolsMount},
}
runtimeClassName := "gvisor"
automountServiceAccountToken := false
dnsPolicy := corev1.DNSNone
enableServiceLinks := false
priorityClassName := "system-cluster-critical"
for _, c := range []struct {
desc string
trs v1beta1.TaskRunSpec
ts v1beta1.TaskSpec
want *corev1.PodSpec
wantAnnotations map[string]string
}{{
desc: "simple",
ts: v1beta1.TaskSpec{
Steps: []v1beta1.Step{{Container: corev1.Container{
Name: "name",
Image: "image",
Command: []string{"cmd"}, // avoid entrypoint lookup.
}}},
},
want: &corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
InitContainers: []corev1.Container{placeToolsInit},
Containers: []corev1.Container{{
Name: "step-name",
Image: "image",
Command: []string{"/tekton/tools/entrypoint"},
Args: []string{
"-wait_file",
"/tekton/downward/ready",
"-wait_file_content",
"-post_file",
"/tekton/tools/0",
"-termination_path",
"/tekton/termination",
"-entrypoint",
"cmd",
"--",
},
Env: implicitEnvVars,
VolumeMounts: append([]corev1.VolumeMount{toolsMount, downwardMount}, implicitVolumeMounts...),
WorkingDir: pipeline.WorkspaceDir,
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
TerminationMessagePath: "/tekton/termination",
}},
Volumes: append(implicitVolumes, toolsVolume, downwardVolume),
},
}, {
desc: "with service account",
ts: v1beta1.TaskSpec{
Steps: []v1beta1.Step{{Container: corev1.Container{
Name: "name",
Image: "image",
Command: []string{"cmd"}, // avoid entrypoint lookup.
}}},
},
trs: v1beta1.TaskRunSpec{
ServiceAccountName: "service-account",
},
want: &corev1.PodSpec{
ServiceAccountName: "service-account",
RestartPolicy: corev1.RestartPolicyNever,
InitContainers: []corev1.Container{{
Name: "credential-initializer",
Image: images.CredsImage,
Command: []string{"/ko-app/creds-init"},
Args: []string{
"-basic-docker=multi-creds=https://docker.io",
"-basic-docker=multi-creds=https://us.gcr.io",
"-basic-git=multi-creds=github.com",
"-basic-git=multi-creds=gitlab.com",
},
VolumeMounts: append(implicitVolumeMounts, secretsVolumeMount),
Env: implicitEnvVars,
},
placeToolsInit,
},
Containers: []corev1.Container{{
Name: "step-name",
Image: "image",
Command: []string{"/tekton/tools/entrypoint"},
Args: []string{
"-wait_file",
"/tekton/downward/ready",
"-wait_file_content",
"-post_file",
"/tekton/tools/0",
"-termination_path",
"/tekton/termination",
"-entrypoint",
"cmd",
"--",
},
Env: implicitEnvVars,
VolumeMounts: append([]corev1.VolumeMount{toolsMount, downwardMount}, implicitVolumeMounts...),
WorkingDir: pipeline.WorkspaceDir,
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
TerminationMessagePath: "/tekton/termination",
}},
Volumes: append(implicitVolumes, secretsVolume, toolsVolume, downwardVolume),
},
}, {
desc: "with-pod-template",
ts: v1beta1.TaskSpec{
Steps: []v1beta1.Step{{Container: corev1.Container{
Name: "name",
Image: "image",
Command: []string{"cmd"}, // avoid entrypoint lookup.
}}},
},
trs: v1beta1.TaskRunSpec{
PodTemplate: &v1beta1.PodTemplate{
SecurityContext: &corev1.PodSecurityContext{
Sysctls: []corev1.Sysctl{
{Name: "net.ipv4.tcp_syncookies", Value: "1"},
},
},
RuntimeClassName: &runtimeClassName,
AutomountServiceAccountToken: &automountServiceAccountToken,
DNSPolicy: &dnsPolicy,
DNSConfig: &corev1.PodDNSConfig{
Nameservers: []string{"8.8.8.8"},
Searches: []string{"tekton.local"},
},
EnableServiceLinks: &enableServiceLinks,
PriorityClassName: &priorityClassName,
},
},
want: &corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
InitContainers: []corev1.Container{placeToolsInit},
Containers: []corev1.Container{{
Name: "step-name",
Image: "image",
Command: []string{"/tekton/tools/entrypoint"},
Args: []string{
"-wait_file",
"/tekton/downward/ready",
"-wait_file_content",
"-post_file",
"/tekton/tools/0",
"-termination_path",
"/tekton/termination",
"-entrypoint",
"cmd",
"--",
},
Env: implicitEnvVars,
VolumeMounts: append([]corev1.VolumeMount{toolsMount, downwardMount}, implicitVolumeMounts...),
WorkingDir: pipeline.WorkspaceDir,
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
TerminationMessagePath: "/tekton/termination",
}},
Volumes: append(implicitVolumes, toolsVolume, downwardVolume),
SecurityContext: &corev1.PodSecurityContext{
Sysctls: []corev1.Sysctl{
{Name: "net.ipv4.tcp_syncookies", Value: "1"},
},
},
RuntimeClassName: &runtimeClassName,
AutomountServiceAccountToken: &automountServiceAccountToken,
DNSPolicy: dnsPolicy,
DNSConfig: &corev1.PodDNSConfig{
Nameservers: []string{"8.8.8.8"},
Searches: []string{"tekton.local"},
},
EnableServiceLinks: &enableServiceLinks,
PriorityClassName: priorityClassName,
},
}, {
desc: "very long step name",
ts: v1beta1.TaskSpec{
Steps: []v1beta1.Step{{Container: corev1.Container{
Name: "a-very-very-long-character-step-name-to-trigger-max-len----and-invalid-characters",
Image: "image",
Command: []string{"cmd"}, // avoid entrypoint lookup.
}}},
},
want: &corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
InitContainers: []corev1.Container{placeToolsInit},
Containers: []corev1.Container{{
Name: "step-a-very-very-long-character-step-name-to-trigger-max-len", // step name trimmed.
Image: "image",
Command: []string{"/tekton/tools/entrypoint"},
Args: []string{
"-wait_file",
"/tekton/downward/ready",
"-wait_file_content",
"-post_file",
"/tekton/tools/0",
"-termination_path",
"/tekton/termination",
"-entrypoint",
"cmd",
"--",
},
Env: implicitEnvVars,
VolumeMounts: append([]corev1.VolumeMount{toolsMount, downwardMount}, implicitVolumeMounts...),
WorkingDir: pipeline.WorkspaceDir,
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
TerminationMessagePath: "/tekton/termination",
}},
Volumes: append(implicitVolumes, toolsVolume, downwardVolume),
},
}, {
desc: "step name ends with non alphanumeric",
ts: v1beta1.TaskSpec{
Steps: []v1beta1.Step{{Container: corev1.Container{
Name: "ends-with-invalid-%%__$$",
Image: "image",
Command: []string{"cmd"}, // avoid entrypoint lookup.
}}},
},
want: &corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
InitContainers: []corev1.Container{placeToolsInit},
Containers: []corev1.Container{{
Name: "step-ends-with-invalid", // invalid suffix removed.
Image: "image",
Command: []string{"/tekton/tools/entrypoint"},
Args: []string{
"-wait_file",
"/tekton/downward/ready",
"-wait_file_content",
"-post_file",
"/tekton/tools/0",
"-termination_path",
"/tekton/termination",
"-entrypoint",
"cmd",
"--",
},
Env: implicitEnvVars,
VolumeMounts: append([]corev1.VolumeMount{toolsMount, downwardMount}, implicitVolumeMounts...),
WorkingDir: pipeline.WorkspaceDir,
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
TerminationMessagePath: "/tekton/termination",
}},
Volumes: append(implicitVolumes, toolsVolume, downwardVolume),
},
}, {
desc: "workingDir in workspace",
ts: v1beta1.TaskSpec{
Steps: []v1beta1.Step{{Container: corev1.Container{
Name: "name",
Image: "image",
Command: []string{"cmd"}, // avoid entrypoint lookup.
WorkingDir: filepath.Join(pipeline.WorkspaceDir, "test"),
}}},
},
want: &corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
InitContainers: []corev1.Container{
{
Name: "working-dir-initializer",
Image: images.ShellImage,
Command: []string{"sh"},
Args: []string{"-c", fmt.Sprintf("mkdir -p %s", filepath.Join(pipeline.WorkspaceDir, "test"))},
WorkingDir: pipeline.WorkspaceDir,
VolumeMounts: implicitVolumeMounts,
},
placeToolsInit,
},
Containers: []corev1.Container{{
Name: "step-name",
Image: "image",
Command: []string{"/tekton/tools/entrypoint"},
Args: []string{
"-wait_file",
"/tekton/downward/ready",
"-wait_file_content",
"-post_file",
"/tekton/tools/0",
"-termination_path",
"/tekton/termination",
"-entrypoint",
"cmd",
"--",
},
Env: implicitEnvVars,
VolumeMounts: append([]corev1.VolumeMount{toolsMount, downwardMount}, implicitVolumeMounts...),
WorkingDir: filepath.Join(pipeline.WorkspaceDir, "test"),
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
TerminationMessagePath: "/tekton/termination",
}},
Volumes: append(implicitVolumes, toolsVolume, downwardVolume),
},
}, {
desc: "sidecar container",
ts: v1beta1.TaskSpec{
Steps: []v1beta1.Step{{Container: corev1.Container{
Name: "primary-name",
Image: "primary-image",
Command: []string{"cmd"}, // avoid entrypoint lookup.
}}},
Sidecars: []v1beta1.Sidecar{{
Container: corev1.Container{
Name: "sc-name",
Image: "sidecar-image",
},
}},
},
wantAnnotations: map[string]string{},
want: &corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
InitContainers: []corev1.Container{placeToolsInit},
Containers: []corev1.Container{{
Name: "step-primary-name",
Image: "primary-image",
Command: []string{"/tekton/tools/entrypoint"},
Args: []string{
"-wait_file",
"/tekton/downward/ready",
"-wait_file_content",
"-post_file",
"/tekton/tools/0",
"-termination_path",
"/tekton/termination",
"-entrypoint",
"cmd",
"--",
},
Env: implicitEnvVars,
VolumeMounts: append([]corev1.VolumeMount{toolsMount, downwardMount}, implicitVolumeMounts...),
WorkingDir: pipeline.WorkspaceDir,
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
TerminationMessagePath: "/tekton/termination",
}, {
Name: "sidecar-sc-name",
Image: "sidecar-image",
Resources: corev1.ResourceRequirements{
Requests: nil,
},
}},
Volumes: append(implicitVolumes, toolsVolume, downwardVolume),
},
}, {
desc: "sidecar container with script",
ts: v1beta1.TaskSpec{
Steps: []v1beta1.Step{{Container: corev1.Container{
Name: "primary-name",
Image: "primary-image",
Command: []string{"cmd"}, // avoid entrypoint lookup.
}}},
Sidecars: []v1beta1.Sidecar{{
Container: corev1.Container{
Name: "sc-name",
Image: "sidecar-image",
},
Script: "#!/bin/sh\necho hello from sidecar",
}},
},
wantAnnotations: map[string]string{},
want: &corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
InitContainers: []corev1.Container{
{
Name: "place-scripts",
Image: "busybox",
Command: []string{"sh"},
TTY: true,
VolumeMounts: []corev1.VolumeMount{scriptsVolumeMount},
Args: []string{"-c", `tmpfile="/tekton/scripts/sidecar-script-0-9l9zj"
touch ${tmpfile} && chmod +x ${tmpfile}
cat > ${tmpfile} << 'sidecar-script-heredoc-randomly-generated-mz4c7'
#!/bin/sh
echo hello from sidecar
sidecar-script-heredoc-randomly-generated-mz4c7
`},
},
placeToolsInit,
},
Containers: []corev1.Container{{
Name: "step-primary-name",
Image: "primary-image",
Command: []string{"/tekton/tools/entrypoint"},
Args: []string{
"-wait_file",
"/tekton/downward/ready",
"-wait_file_content",
"-post_file",
"/tekton/tools/0",
"-termination_path",
"/tekton/termination",
"-entrypoint",
"cmd",
"--",
},
Env: implicitEnvVars,
VolumeMounts: append([]corev1.VolumeMount{toolsMount, downwardMount}, implicitVolumeMounts...),
WorkingDir: pipeline.WorkspaceDir,
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
TerminationMessagePath: "/tekton/termination",
}, {
Name: "sidecar-sc-name",
Image: "sidecar-image",
Resources: corev1.ResourceRequirements{
Requests: nil,
},
Command: []string{"/tekton/scripts/sidecar-script-0-9l9zj"},
VolumeMounts: []corev1.VolumeMount{scriptsVolumeMount},
}},
Volumes: append(implicitVolumes, scriptsVolume, toolsVolume, downwardVolume),
},
}, {
desc: "resource request",
ts: v1beta1.TaskSpec{
Steps: []v1beta1.Step{{Container: corev1.Container{
Image: "image",
Command: []string{"cmd"}, // avoid entrypoint lookup.
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("8"),
corev1.ResourceMemory: resource.MustParse("10Gi"),
},
},
}}, {Container: corev1.Container{
Image: "image",
Command: []string{"cmd"}, // avoid entrypoint lookup.
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("1"),
corev1.ResourceMemory: resource.MustParse("100Gi"),
},
},
}}},
},
want: &corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
InitContainers: []corev1.Container{placeToolsInit},
Containers: []corev1.Container{{
Name: "step-unnamed-0",
Image: "image",
Command: []string{"/tekton/tools/entrypoint"},
Args: []string{
"-wait_file",
"/tekton/downward/ready",
"-wait_file_content",
"-post_file",
"/tekton/tools/0",
"-termination_path",
"/tekton/termination",
"-entrypoint",
"cmd",
"--",
},
Env: implicitEnvVars,
VolumeMounts: append([]corev1.VolumeMount{toolsMount, downwardMount}, implicitVolumeMounts...),
WorkingDir: pipeline.WorkspaceDir,
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("8"),
corev1.ResourceMemory: zeroQty,
corev1.ResourceEphemeralStorage: zeroQty,
},
},
TerminationMessagePath: "/tekton/termination",
}, {
Name: "step-unnamed-1",
Image: "image",
Command: []string{"/tekton/tools/entrypoint"},
Args: []string{
"-wait_file",
"/tekton/tools/0",
"-post_file",
"/tekton/tools/1",
"-termination_path",
"/tekton/termination",
"-entrypoint",
"cmd",
"--",
},
Env: implicitEnvVars,
VolumeMounts: append([]corev1.VolumeMount{toolsMount}, implicitVolumeMounts...),
WorkingDir: pipeline.WorkspaceDir,
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: zeroQty,
corev1.ResourceMemory: resource.MustParse("100Gi"),
corev1.ResourceEphemeralStorage: zeroQty,
},
},
TerminationMessagePath: "/tekton/termination",
}},
Volumes: append(implicitVolumes, toolsVolume, downwardVolume),
},
}, {
desc: "step with script and stepTemplate",
ts: v1beta1.TaskSpec{
StepTemplate: &corev1.Container{
Env: []corev1.EnvVar{{Name: "FOO", Value: "bar"}},
Args: []string{"template", "args"},
},
Steps: []v1beta1.Step{{
Container: corev1.Container{
Name: "one",
Image: "image",
},
Script: "#!/bin/sh\necho hello from step one",
}, {
Container: corev1.Container{
Name: "two",
Image: "image",
VolumeMounts: []corev1.VolumeMount{{Name: "i-have-a-volume-mount"}},
},
Script: `#!/usr/bin/env python
print("Hello from Python")`,
}, {
Container: corev1.Container{
Name: "regular-step",
Image: "image",
Command: []string{"regular", "command"},
},
}},
},
want: &corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
InitContainers: []corev1.Container{
{
Name: "place-scripts",
Image: images.ShellImage,
Command: []string{"sh"},
TTY: true,
Args: []string{"-c", `tmpfile="/tekton/scripts/script-0-9l9zj"
touch ${tmpfile} && chmod +x ${tmpfile}
cat > ${tmpfile} << 'script-heredoc-randomly-generated-mz4c7'
#!/bin/sh
echo hello from step one
script-heredoc-randomly-generated-mz4c7
tmpfile="/tekton/scripts/script-1-mssqb"
touch ${tmpfile} && chmod +x ${tmpfile}
cat > ${tmpfile} << 'script-heredoc-randomly-generated-78c5n'
#!/usr/bin/env python
print("Hello from Python")
script-heredoc-randomly-generated-78c5n
`},
VolumeMounts: []corev1.VolumeMount{scriptsVolumeMount},
},
{
Name: "place-tools",
Image: images.EntrypointImage,
Command: []string{"cp", "/ko-app/entrypoint", "/tekton/tools/entrypoint"},
VolumeMounts: []corev1.VolumeMount{toolsMount},
}},
Containers: []corev1.Container{{
Name: "step-one",
Image: "image",
Command: []string{"/tekton/tools/entrypoint"},
Args: []string{
"-wait_file",
"/tekton/downward/ready",
"-wait_file_content",
"-post_file",
"/tekton/tools/0",
"-termination_path",
"/tekton/termination",
"-entrypoint",
"/tekton/scripts/script-0-9l9zj",
"--",
"template",
"args",
},
Env: append(implicitEnvVars, corev1.EnvVar{Name: "FOO", Value: "bar"}),
VolumeMounts: append([]corev1.VolumeMount{scriptsVolumeMount, toolsMount, downwardMount}, implicitVolumeMounts...),
WorkingDir: pipeline.WorkspaceDir,
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
TerminationMessagePath: "/tekton/termination",
}, {
Name: "step-two",
Image: "image",
Command: []string{"/tekton/tools/entrypoint"},
Args: []string{
"-wait_file",
"/tekton/tools/0",
"-post_file",
"/tekton/tools/1",
"-termination_path",
"/tekton/termination",
"-entrypoint",
"/tekton/scripts/script-1-mssqb",
"--",
"template",
"args",
},
Env: append(implicitEnvVars, corev1.EnvVar{Name: "FOO", Value: "bar"}),
VolumeMounts: append([]corev1.VolumeMount{{Name: "i-have-a-volume-mount"}, scriptsVolumeMount, toolsMount}, implicitVolumeMounts...),
WorkingDir: pipeline.WorkspaceDir,
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
TerminationMessagePath: "/tekton/termination",
}, {
Name: "step-regular-step",
Image: "image",
Command: []string{"/tekton/tools/entrypoint"},
Args: []string{
"-wait_file",
"/tekton/tools/1",
"-post_file",
"/tekton/tools/2",
"-termination_path",
"/tekton/termination",
"-entrypoint",
"regular",
"--",
"command",
"template",
"args",
},
Env: append(implicitEnvVars, corev1.EnvVar{Name: "FOO", Value: "bar"}),
VolumeMounts: append([]corev1.VolumeMount{toolsMount}, implicitVolumeMounts...),
WorkingDir: pipeline.WorkspaceDir,
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
TerminationMessagePath: "/tekton/termination",
}},
Volumes: append(implicitVolumes, scriptsVolume, toolsVolume, downwardVolume),
},
}, {
desc: "using another scheduler",
ts: v1beta1.TaskSpec{
Steps: []v1beta1.Step{
{
Container: corev1.Container{
Name: "schedule-me",
Image: "image",
Command: []string{"cmd"}, // avoid entrypoint lookup.
},
},
},
},
trs: v1beta1.TaskRunSpec{
PodTemplate: &v1beta1.PodTemplate{
SchedulerName: "there-scheduler",
},
},
want: &corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
InitContainers: []corev1.Container{placeToolsInit},
SchedulerName: "there-scheduler",
Volumes: append(implicitVolumes, toolsVolume, downwardVolume),
Containers: []corev1.Container{{
Name: "step-schedule-me",
Image: "image",
Command: []string{"/tekton/tools/entrypoint"},
Args: []string{
"-wait_file",
"/tekton/downward/ready",
"-wait_file_content",
"-post_file",
"/tekton/tools/0",
"-termination_path",
"/tekton/termination",
"-entrypoint",
"cmd",
"--",
},
Env: implicitEnvVars,
VolumeMounts: append([]corev1.VolumeMount{toolsMount, downwardMount}, implicitVolumeMounts...),
WorkingDir: pipeline.WorkspaceDir,
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
TerminationMessagePath: "/tekton/termination",
}},
},
}, {
desc: "using hostNetwork",
ts: v1beta1.TaskSpec{
Steps: []v1beta1.Step{
{
Container: corev1.Container{
Name: "use-my-hostNetwork",
Image: "image",
Command: []string{"cmd"}, // avoid entrypoint lookup.
},
},
},
},
trs: v1beta1.TaskRunSpec{
PodTemplate: &v1beta1.PodTemplate{
HostNetwork: true,
},
},
want: &corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyNever,
InitContainers: []corev1.Container{placeToolsInit},
HostNetwork: true,
Volumes: append(implicitVolumes, toolsVolume, downwardVolume),
Containers: []corev1.Container{{
Name: "step-use-my-hostNetwork",
Image: "image",
Command: []string{"/tekton/tools/entrypoint"},
Args: []string{
"-wait_file",
"/tekton/downward/ready",
"-wait_file_content",
"-post_file",
"/tekton/tools/0",
"-termination_path",
"/tekton/termination",
"-entrypoint",
"cmd",
"--",
},
Env: implicitEnvVars,
VolumeMounts: append([]corev1.VolumeMount{toolsMount, downwardMount}, implicitVolumeMounts...),
WorkingDir: pipeline.WorkspaceDir,
Resources: corev1.ResourceRequirements{Requests: allZeroQty()},
TerminationMessagePath: "/tekton/termination",
}},
},
}} {
t.Run(c.desc, func(t *testing.T) {
names.TestingSeed()
kubeclient := fakek8s.NewSimpleClientset(
&corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "default"}},
&corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "service-account", Namespace: "default"},
Secrets: []corev1.ObjectReference{{
Name: "multi-creds",
}},
},
&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{Name: "multi-creds",
Annotations: map[string]string{
"tekton.dev/docker-0": "https://us.gcr.io",
"tekton.dev/docker-1": "https://docker.io",
"tekton.dev/git-0": "github.com",
"tekton.dev/git-1": "gitlab.com",
}},
Type: "kubernetes.io/basic-auth",
Data: map[string][]byte{
"username": []byte("foo"),
"password": []byte("BestEver"),
},
},
)
tr := &v1beta1.TaskRun{
ObjectMeta: metav1.ObjectMeta{
Name: "taskrun-name",
Annotations: map[string]string{
ReleaseAnnotation: ReleaseAnnotationValue,
},
},
Spec: c.trs,
}
// No entrypoints should be looked up.
entrypointCache := fakeCache{}
got, err := MakePod(images, tr, c.ts, kubeclient, entrypointCache, true)
if err != nil {
t.Fatalf("MakePod: %v", err)
}
if !strings.HasPrefix(got.Name, "taskrun-name-pod-") {
t.Errorf("Pod name %q should have prefix 'taskrun-name-pod-'", got.Name)
}
if d := cmp.Diff(c.want, &got.Spec, resourceQuantityCmp); d != "" {
t.Errorf("Diff %s", diff.PrintWantGot(d))
}
})
}
}
func TestMakeLabels(t *testing.T) {
taskRunName := "task-run-name"
want := map[string]string{
taskRunLabelKey: taskRunName,
"foo": "bar",
"hello": "world",
}
got := MakeLabels(&v1beta1.TaskRun{
ObjectMeta: metav1.ObjectMeta{
Name: taskRunName,
Labels: map[string]string{
"foo": "bar",
"hello": "world",
},
},
})
if d := cmp.Diff(got, want); d != "" {
t.Errorf("Diff labels %s", diff.PrintWantGot(d))
}
}
func TestShouldOverrideHomeEnv(t *testing.T) {
for _, tc := range []struct {
description string
configMap *corev1.ConfigMap
expected bool
}{{
description: "Default behaviour: A missing disable-home-env-overwrite flag should result in true",
configMap: &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: GetFeatureFlagsConfigName(), Namespace: system.GetNamespace()},
Data: map[string]string{},
},
expected: true,
}, {
description: "Setting disable-home-env-overwrite to false should result in true",
configMap: &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: GetFeatureFlagsConfigName(), Namespace: system.GetNamespace()},
Data: map[string]string{
featureFlagDisableHomeEnvKey: "false",
},
},
expected: true,
}, {
description: "Setting disable-home-env-overwrite to true should result in false",
configMap: &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: GetFeatureFlagsConfigName(), Namespace: system.GetNamespace()},
Data: map[string]string{
featureFlagDisableHomeEnvKey: "true",
},
},
expected: false,
}} {
t.Run(tc.description, func(t *testing.T) {
kubeclient := fakek8s.NewSimpleClientset(
tc.configMap,
)
if result := ShouldOverrideHomeEnv(kubeclient); result != tc.expected {
t.Errorf("Expected %t Received %t", tc.expected, result)
}
})
}
}
func TestGetFeatureFlagsConfigName(t *testing.T) {
for _, tc := range []struct {
description string
featureFlagEnvValue string
expected string
}{{
description: "Feature flags config value not set",
featureFlagEnvValue: "",
expected: "feature-flags",
}, {
description: "Feature flags config value set",
featureFlagEnvValue: "feature-flags-test",
expected: "feature-flags-test",
}} {
t.Run(tc.description, func(t *testing.T) {
original := os.Getenv("CONFIG_FEATURE_FLAGS_NAME")
defer t.Cleanup(func() {
os.Setenv("CONFIG_FEATURE_FLAGS_NAME", original)
})
if tc.featureFlagEnvValue != "" {
os.Setenv("CONFIG_FEATURE_FLAGS_NAME", tc.featureFlagEnvValue)
}
got := GetFeatureFlagsConfigName()
want := tc.expected
if got != want {
t.Errorf("GetFeatureFlagsConfigName() = %s, want %s", got, want)
}
})
}
}
func TestShouldOverrideWorkingDir(t *testing.T) {
for _, tc := range []struct {
description string
configMap *corev1.ConfigMap
expected bool
}{{
description: "Default behaviour: A missing disable-working-directory-overwrite flag should result in true",
configMap: &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: GetFeatureFlagsConfigName(), Namespace: system.GetNamespace()},
Data: map[string]string{},
},
expected: true,
}, {
description: "Setting disable-working-directory-overwrite to false should result in true",
configMap: &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: GetFeatureFlagsConfigName(), Namespace: system.GetNamespace()},
Data: map[string]string{
featureFlagDisableWorkingDirKey: "false",
},
},
expected: true,
}, {
description: "Setting disable-working-directory-overwrite to true should result in false",
configMap: &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: GetFeatureFlagsConfigName(), Namespace: system.GetNamespace()},
Data: map[string]string{
featureFlagDisableWorkingDirKey: "true",
},
},
expected: false,
}} {
t.Run(tc.description, func(t *testing.T) {
kubeclient := fakek8s.NewSimpleClientset(
tc.configMap,
)
if result := shouldOverrideWorkingDir(kubeclient); result != tc.expected {
t.Errorf("Expected %t Received %t", tc.expected, result)
}
})
}
}
| [
"\"CONFIG_FEATURE_FLAGS_NAME\""
]
| []
| [
"CONFIG_FEATURE_FLAGS_NAME"
]
| [] | ["CONFIG_FEATURE_FLAGS_NAME"] | go | 1 | 0 | |
rss_it/settings.py | """
Django settings for rss_it project.
Generated by 'django-admin startproject' using Django 2.2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# SECRET_KEY = thekey
SECRET_KEY = os.environ.get('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
# DEBUG = os.environ.get('DEBUG_VALUE', False)
ALLOWED_HOSTS = [
'rss-it.herokuapp.com',
'127.0.0.1']
# Application definition
INSTALLED_APPS = [
'gui.apps.GuiConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'rss_it.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'rss_it.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage' | []
| []
| [
"DEBUG_VALUE",
"SECRET_KEY"
]
| [] | ["DEBUG_VALUE", "SECRET_KEY"] | python | 2 | 0 | |
rates_test.go | package dinero
import (
"os"
"reflect"
"testing"
. "github.com/onsi/gomega"
)
// TestAllRates will test updating our local store of forex rates from the OXR API.
func TestAllRates(t *testing.T) {
// Register the test.
RegisterTestingT(t)
// Init dinero client.
client := NewClient(os.Getenv("OPEN_EXCHANGE_APP_ID"))
// Set a base currency to work with.
client.Rates.SetBaseCurrency("AUD")
// Get latest forex rates.
response, err := client.Rates.All()
if err != nil {
t.Fatalf("Unexpected error running client.Rates.All(): %s", err.Error())
}
if response.Base != "AUD" {
t.Fatalf("Unexpected base oxr rate: %s. Expecting `AUD`.", err.Error())
}
if response.UpdatedAt.IsZero() {
t.Fatalf("Unexpected response timestamp: %s.", err.Error())
}
if response.Rates == nil {
t.Fatalf("Unexpected length of rates: %s.", err.Error())
}
}
// TestSingleRate will test pulling a single rate.
func TestSingleRate(t *testing.T) {
// Register the test.
RegisterTestingT(t)
// Init dinero client.
client := NewClient(os.Getenv("OPEN_EXCHANGE_APP_ID"))
// Set a base currency to work with.
client.Rates.SetBaseCurrency("AUD")
// Get latest forex rates for NZD (using AUD as a base).
response, err := client.Rates.Single("NZD")
if err != nil {
t.Fatalf("Unexpected error running client.Rates.Single('NZD'): %s", err.Error())
}
// Did we get a *float64 back?
if reflect.TypeOf(response).String() != "*float64" {
t.Fatalf("Unexpected rate datatype, expected float64 got %T", response)
}
}
| [
"\"OPEN_EXCHANGE_APP_ID\"",
"\"OPEN_EXCHANGE_APP_ID\""
]
| []
| [
"OPEN_EXCHANGE_APP_ID"
]
| [] | ["OPEN_EXCHANGE_APP_ID"] | go | 1 | 0 | |
server/src/main/java/edu/escuelaing/arep/webservices/SimpleWebService.java | package edu.escuelaing.arep.webservices;
import edu.escuelaing.arep.security.AuthProvider;
import spark.Request;
import spark.Response;
import java.util.ArrayList;
import static spark.Spark.*;
import static spark.Spark.post;
public class SimpleWebService {
public static void main(String[] args) {
port(getPort());
secure("keystores/ecikeystore.p12", "123456", null, null);
staticFileLocation("/public");
post("/result", (req, resp) -> {
System.out.println(req.body());
if(!checkAuthentication(req, resp)){
resp.redirect("/login.html");
resp.status(403);
return "403";
} else {
resp.type("application/json");
String[] nums = req.body().split(",");
ArrayList<Integer> ans = new ArrayList<Integer>();
for(String a: nums){
ans.add(Integer.parseInt(a));
}
resp.status(200);
return "{\"result\": "+ calculateMean(ans)+"}";
}
});
}
/**
* Calcula el promedio de una lista de enteros
* @param num Lista de enteros
* @return Promedio de la lista
*/
public static float calculateMean(ArrayList<Integer> num){
int ans = 0;
System.out.println(num);
for(int a: num){
ans+=a;
}
float re =(float)ans/(float)num.size();
return re;
}
/**
* Cambia el puerto de respuesta, dependiendo del entorno de despliegue
* @return Puerto a trabajar
*/
public static int getPort() {
if (System.getenv("PORT") != null) {
return Integer.parseInt(System.getenv("PORT"));
}
return 5000; //returns default port if heroku-port isn't set (i.e. on localhost)
}
/**
* Verifica que el request tenga autenticación
* @param req Request del usuario
* @param res Respuesta del usuario
* @return Si el usuario tiene autenticación
*/
public static boolean checkAuthentication(Request req, Response res){
boolean follow = AuthProvider.isAuthenticated(req);
String page = req.pathInfo();
System.out.println(follow);
return follow || page.equals("/login.html");
}
}
| [
"\"PORT\"",
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | java | 1 | 0 | |
aiven/resource_database_test.go | // Copyright (c) 2017 jelmersnoeck
// Copyright (c) 2018-2021 Aiven, Helsinki, Finland. https://aiven.io/
package aiven
import (
"fmt"
"os"
"testing"
"github.com/aiven/aiven-go-client"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
)
func init() {
resource.AddTestSweepers("aiven_database", &resource.Sweeper{
Name: "aiven_database",
F: sweepDatabases,
Dependencies: []string{
"aiven_connection_pool",
},
})
}
func sweepDatabases(region string) error {
client, err := sharedClient(region)
if err != nil {
return fmt.Errorf("error getting client: %s", err)
}
conn := client.(*aiven.Client)
projects, err := conn.Projects.List()
if err != nil {
return fmt.Errorf("error retrieving a list of projects : %s", err)
}
for _, project := range projects {
if project.Name == os.Getenv("AIVEN_PROJECT_NAME") {
services, err := conn.Services.List(project.Name)
if err != nil {
return fmt.Errorf("error retrieving a list of services for a project `%s`: %s", project.Name, err)
}
for _, service := range services {
dbs, err := conn.Databases.List(project.Name, service.Name)
if err != nil {
if err.(aiven.Error).Status == 403 || err.(aiven.Error).Status == 501 {
continue
}
return fmt.Errorf("error retrieving a list of databases for a service `%s`: %s", service.Name, err)
}
for _, db := range dbs {
if db.DatabaseName == "defaultdb" {
continue
}
err = conn.Databases.Delete(project.Name, service.Name, db.DatabaseName)
if err != nil {
return fmt.Errorf("error destroying database `%s` during sweep: %s", db.DatabaseName, err)
}
}
}
}
}
return nil
}
func TestAccAivenDatabase_basic(t *testing.T) {
resourceName := "aiven_database.foo"
rName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)
rName2 := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)
resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
ProviderFactories: testAccProviderFactories,
CheckDestroy: testAccCheckAivenDatabaseResourceDestroy,
Steps: []resource.TestStep{
{
Config: testAccDatabaseResource(rName),
Check: resource.ComposeTestCheckFunc(
testAccCheckAivenDatabaseAttributes("data.aiven_database.database"),
resource.TestCheckResourceAttr(resourceName, "project", os.Getenv("AIVEN_PROJECT_NAME")),
resource.TestCheckResourceAttr(resourceName, "service_name", fmt.Sprintf("test-acc-sr-%s", rName)),
resource.TestCheckResourceAttr(resourceName, "database_name", fmt.Sprintf("test-acc-db-%s", rName)),
resource.TestCheckResourceAttr(resourceName, "termination_protection", "false"),
),
},
{
Config: testAccDatabaseTerminationProtectionResource(rName2),
PreventPostDestroyRefresh: true,
ExpectNonEmptyPlan: true,
PlanOnly: true,
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr(resourceName, "project", os.Getenv("AIVEN_PROJECT_NAME")),
resource.TestCheckResourceAttr(resourceName, "service_name", fmt.Sprintf("test-acc-sr-%s", rName2)),
resource.TestCheckResourceAttr(resourceName, "database_name", fmt.Sprintf("test-acc-db-%s", rName2)),
resource.TestCheckResourceAttr(resourceName, "termination_protection", "true"),
),
},
},
})
}
func testAccCheckAivenDatabaseResourceDestroy(s *terraform.State) error {
c := testAccProvider.Meta().(*aiven.Client)
// loop through the resources in state, verifying each database is destroyed
for _, rs := range s.RootModule().Resources {
if rs.Type != "aiven_database" {
continue
}
projectName, serviceName, databaseName := splitResourceID3(rs.Primary.ID)
db, err := c.Databases.Get(projectName, serviceName, databaseName)
if err != nil {
if err.(aiven.Error).Status != 404 {
return err
}
}
if db != nil {
return fmt.Errorf("databse (%s) still exists", rs.Primary.ID)
}
}
return nil
}
func testAccDatabaseResource(name string) string {
return fmt.Sprintf(`
data "aiven_project" "foo" {
project = "%s"
}
resource "aiven_pg" "bar" {
project = data.aiven_project.foo.project
cloud_name = "google-europe-west1"
plan = "startup-4"
service_name = "test-acc-sr-%s"
maintenance_window_dow = "monday"
maintenance_window_time = "10:00:00"
pg_user_config {
public_access {
pg = true
prometheus = false
}
pg {
idle_in_transaction_session_timeout = 900
}
}
}
resource "aiven_database" "foo" {
project = aiven_pg.bar.project
service_name = aiven_pg.bar.service_name
database_name = "test-acc-db-%s"
lc_ctype = "en_US.UTF-8"
lc_collate = "en_US.UTF-8"
}
data "aiven_database" "database" {
project = aiven_database.foo.project
service_name = aiven_database.foo.service_name
database_name = aiven_database.foo.database_name
depends_on = [aiven_database.foo]
}`,
os.Getenv("AIVEN_PROJECT_NAME"), name, name)
}
func testAccDatabaseTerminationProtectionResource(name string) string {
return fmt.Sprintf(`
data "aiven_project" "foo" {
project = "%s"
}
resource "aiven_pg" "bar" {
project = data.aiven_project.foo.project
cloud_name = "google-europe-west1"
plan = "startup-4"
service_name = "test-acc-sr-%s"
maintenance_window_dow = "monday"
maintenance_window_time = "10:00:00"
pg_user_config {
public_access {
pg = true
prometheus = false
}
pg {
idle_in_transaction_session_timeout = 900
}
}
}
resource "aiven_database" "foo" {
project = aiven_pg.bar.project
service_name = aiven_pg.bar.service_name
database_name = "test-acc-db-%s"
termination_protection = true
}
data "aiven_database" "database" {
project = aiven_database.foo.project
service_name = aiven_database.foo.service_name
database_name = aiven_database.foo.database_name
depends_on = [aiven_database.foo]
}`,
os.Getenv("AIVEN_PROJECT_NAME"), name, name)
}
func testAccCheckAivenDatabaseAttributes(n string) resource.TestCheckFunc {
return func(s *terraform.State) error {
r := s.RootModule().Resources[n]
a := r.Primary.Attributes
if a["project"] == "" {
return fmt.Errorf("expected to get a project name from Aiven")
}
if a["service_name"] == "" {
return fmt.Errorf("expected to get a service_name from Aiven")
}
if a["database_name"] == "" {
return fmt.Errorf("expected to get a database_name from Aiven")
}
if a["database_name"] == "" {
return fmt.Errorf("expected to get a database_name from Aiven")
}
return nil
}
}
| [
"\"AIVEN_PROJECT_NAME\"",
"\"AIVEN_PROJECT_NAME\"",
"\"AIVEN_PROJECT_NAME\"",
"\"AIVEN_PROJECT_NAME\"",
"\"AIVEN_PROJECT_NAME\""
]
| []
| [
"AIVEN_PROJECT_NAME"
]
| [] | ["AIVEN_PROJECT_NAME"] | go | 1 | 0 | |
cmd/rtail/cmd/tail.go | package cmd
import (
"context"
"fmt"
"io"
"log"
"os"
"strings"
"time"
"github.com/marcsauter/rtail/pkg/global"
"github.com/marcsauter/rtail/pkg/pb"
"github.com/spf13/cobra"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
// tailCmd represents the tail command
var tailCmd = &cobra.Command{
Use: "tail file@host",
Short: "tail",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
// validate argument(s)
if len(args) != 1 {
cmd.Usage()
return
}
filenode := strings.Split(args[0], "@")
if len(filenode) != 2 {
cmd.Usage()
return
}
// evaluate flags
proxy, _ := cmd.Flags().GetString("proxy")
timeout, _ := cmd.Flags().GetDuration("timeout")
last, _ := cmd.Flags().GetInt("last")
follow, _ := cmd.Flags().GetBool("follow")
// connect to proxy
conn, err := grpc.Dial(proxy, grpc.WithInsecure())
if err != nil {
log.Fatalf("did not connect: %v", err)
}
defer conn.Close()
// context
ctxt, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
// add token to metadata
token := os.Getenv("TOKEN")
ctxm := metadata.NewOutgoingContext(ctxt, metadata.Pairs(
"token", token,
))
c := pb.NewProxyClient(conn)
stream, err := c.Get(ctxm, &pb.FileRequest{
Provider: filenode[1],
Path: filenode[0],
Last: uint32(last),
Follow: follow,
})
if err != nil {
log.Fatalf("could not tail %s: %v", args[0], err)
}
for {
l, err := stream.Recv()
switch err {
case io.EOF:
return
case nil:
fmt.Println(l.Line)
default:
log.Fatalf("failed to receive a line from %s: %v", args[0], err)
}
}
},
}
func init() {
rootCmd.AddCommand(tailCmd)
tailCmd.Flags().StringP("proxy", "p", global.ProxyDefault, "proxy address")
tailCmd.Flags().DurationP("timeout", "t", 5*time.Second, "timeout")
tailCmd.Flags().IntP("last", "l", 0, "last lines (0 = whole file)")
tailCmd.Flags().BoolP("follow", "f", false, "follow")
}
| [
"\"TOKEN\""
]
| []
| [
"TOKEN"
]
| [] | ["TOKEN"] | go | 1 | 0 | |
cmake/CMakeBuild/bin/pyhhi/build/cmksupp.py |
from __future__ import print_function
import logging
import os
import re
import sys
import pyhhi.build.common.bldtools as bldtools
import pyhhi.build.common.util as util
import pyhhi.build.common.ver as ver
from pyhhi.build.common.system import SystemInfo
from pyhhi.build.cmkfnd import CMakeFinder
class CMakeLauncherParams(object):
def __init__(self):
self.dry_run = False
self.cmk_build = False
self.clean_first = False
# cmk_bin_dir override the default CMake search path.
self.cmk_bin_dir = None
self.cmk_build_jobs = 1
self.cmk_build_target = None
#msbuild: verbosity levels: q[uiet], m[inimal], n[ormal], d[etailed], and diag[nostic]
self.cmk_build_verbosity = 'minimal'
self.cmk_generator_alias = None
self.cmk_warning_flags = []
self.cmk_cache_entries = []
# gcc, gcc-4.9, clang, msvc, msvc-19.0
self.toolset_str = None
self.target_arch = None
# tuple/list of build configurations (debug, release)
self.build_configs = tuple(['debug'])
self.link_variants = tuple(['static'])
# Assigned if the script and its modules are to be installed.
self.install_dir = None
# Python directories to search for cache files/directories.
self.py_cache_dirs = []
class CMakeCompilerInfo(object):
def __init__(self):
# If a toolchain file is given, all other attributes are not used and a cross compiler configuration
# is assumed.
self.cmake_toolchain_file = None
# gcc, clang, msvc
self.compiler_family = None
self.version_major_minor = None
self.target_arch = None
self.cmake_cxx_compiler = None
self.cmake_c_compiler = None
self.mingw = False
def is_cross_compiler(self):
return self.cmake_toolchain_file is not None
def __str__(self):
if self.cmake_toolchain_file:
s = "toolchain file: %s\n" % self.cmake_toolchain_file
else:
s = "compiler family: %s\n" % self.compiler_family
s += "compiler version (major.minor): %s\n" % ver.version_tuple_to_str(self.version_major_minor)
if self.target_arch:
s += "target arch: %s\n" % self.target_arch
if self.cmake_cxx_compiler:
s += "cmake cxx compiler: %s\n" % self.cmake_cxx_compiler
s += "cmake c compiler: %s\n" % self.cmake_c_compiler
return s
class CMakeBuildTreeInfo(object):
def __init__(self, build_root, compiler_info, generator_alias):
self._logger = logging.getLogger(__name__)
self._sys_info = SystemInfo()
self._build_root = build_root
self._generator_alias = generator_alias
self._default_build_configs = ['debug', 'release', 'relwithdebinfo', 'minsizerel']
# key=bld_variant.lnk_variant value=build_dir
self._build_dir_dict = self._create_build_dir_dict(compiler_info, generator_alias, self._default_build_configs)
def get_build_dir(self, bld_config, lnk_variant):
return self._build_dir_dict[bld_config + '.' + lnk_variant]
def is_multi_configuration_generator(self):
return self._generator_alias.startswith('vs') or (self._generator_alias == 'xcode')
def _create_build_dir_dict(self, compiler_info, generator_alias, build_configs):
build_dir_dict = {}
if compiler_info.is_cross_compiler():
assert generator_alias == 'umake'
build_root = self._create_cross_compile_build_dir(self._build_root, compiler_info.cmake_toolchain_file, generator_alias)
print("cross compile build root: " + build_root)
if generator_alias == 'umake':
for cfg in build_configs:
build_dir_dict[cfg + '.' + 'static'] = os.path.join(build_root, cfg)
build_dir_dict[cfg + '.' + 'shared'] = os.path.join(build_root, cfg + '-shared')
else:
target_arch = compiler_info.target_arch
if compiler_info.mingw:
toolset_dir = "{}-mingw-{}".format(compiler_info.compiler_family, ver.version_tuple_to_str(compiler_info.version_major_minor))
else:
toolset_dir = "{}-{}".format(compiler_info.compiler_family, ver.version_tuple_to_str(compiler_info.version_major_minor))
if self.is_multi_configuration_generator():
for cfg in build_configs:
build_dir_dict[cfg + '.' + 'static'] = os.path.join(self._build_root, generator_alias, toolset_dir, target_arch)
build_dir_dict[cfg + '.' + 'shared'] = os.path.join(self._build_root, generator_alias, toolset_dir, target_arch + '-shared')
else:
for cfg in build_configs:
build_dir_dict[cfg + '.' + 'static'] = os.path.join(self._build_root, generator_alias, toolset_dir, target_arch, cfg)
build_dir_dict[cfg + '.' + 'shared'] = os.path.join(self._build_root, generator_alias, toolset_dir, target_arch, cfg + '-shared')
return build_dir_dict
def _create_cross_compile_build_dir(self, build_root, toolchain_file, generator_alias):
assert generator_alias == 'umake'
basenm = os.path.basename(toolchain_file)
re_match = re.match(r'(.+)\.cmake$', basenm)
if re_match:
basenm = re_match.group(1)
if self._sys_info.is_linux():
# Try to strip a distro related suffix.
distro_suffix = self._sys_info.get_os_distro_short()
re_match = re.match('(.+)[-]' + distro_suffix + '([-_.0-9]+)?$', basenm)
if re_match:
basenm = re_match.group(1)
build_dir = os.path.join(build_root, basenm)
return build_dir
class CMakeLauncher(object):
def __init__(self, verbosity=1):
self._logger = logging.getLogger(__name__)
self._sys_info = SystemInfo()
self._verbosity_level = verbosity
self._cmake_finder = CMakeFinder(self._sys_info)
self._top_dir = None
self._build_root = None
self._build_tree_create_if_not_exists = True
self._build_tree_info = None
self._deploy_dir = None
self._script_name = os.path.basename(sys.argv[0])
self._cmk_cache_file = 'CMakeCache.txt'
# cache entries the user cannot override via -Dxxx
self._cmk_reserved_cache_vars = ['BUILD_SHARED_LIBS',
'CMAKE_BUILD_TYPE',
'CMAKE_CONFIGURATION_TYPES',
'CMAKE_CXX_COMPILER',
'CMAKE_C_COMPILER',
'CMAKE_TOOLCHAIN_FILE']
self._dict_to_cmake_generator = {'umake': 'Unix Makefiles',
'mgwmake': 'MinGW Makefiles',
'ninja': 'Ninja',
'xcode': 'Xcode',
'vs15': 'Visual Studio 15 2017',
'vs14': 'Visual Studio 14 2015',
'vs12': 'Visual Studio 12 2013',
'vs11': 'Visual Studio 11 2012',
'vs10': 'Visual Studio 10 2010'}
# list of default configuration types for multiconfiguration generators.
self._default_config_types = ['debug', 'release']
# self._default_config_types = ['debug', 'release', 'relwithdebinfo']
self._dict_to_cmake_config = {'debug': 'Debug', 'release': 'Release', 'relwithdebinfo': 'RelWithDebInfo', 'minsizerel': 'MinSizeRel'}
self._dict_to_vs_platform_name = {'x86_64': 'x64', 'x86': 'Win32'}
self._prefer_vs_native_toolsets = True
if self._sys_info.is_windows():
self._msvc_registry = bldtools.MsvcRegistry()
self._dict_to_vs_platform_toolset = {'msvc-19.0': 'v140',
'msvc-18.0': 'v120',
'msvc-17.0': 'v110',
'msvc-16.0': 'v100'}
self._dict_generator_alias_to_msvc_toolsets = {'vs14': ['msvc-19.0', 'msvc-18.0', 'msvc-17.0', 'msvc-16.0'],
'vs12': ['msvc-18.0', 'msvc-17.0', 'msvc-16.0'],
'vs11': ['msvc-17.0', 'msvc-16.0'],
'vs10': ['msvc-16.0']}
# vs15 has not a fixed compiler version and therefore the mapping is generated dynamically.
if self._msvc_registry.is_version_installed((14, 1)):
cl_version = self._msvc_registry.get_compiler_version((14, 1))
msvc_str = 'msvc-' + ver.version_tuple_to_str(cl_version[:2])
if cl_version[1] < 20:
self._dict_to_vs_platform_toolset[msvc_str] = 'v141'
self._dict_generator_alias_to_msvc_toolsets['vs15'] = [msvc_str, 'msvc-19.0', 'msvc-18.0', 'msvc-17.0', 'msvc-16.0']
else:
assert False
def launch(self, params, cmake_argv):
if params.cmk_bin_dir:
self._cmake_finder.set_cmake_search_path([params.cmk_bin_dir])
# Is cmake installed?
if not self._cmake_finder.is_cmake_installed():
return
self._top_dir = os.getcwd()
self._build_root = os.path.join(self._top_dir, 'build')
self._deploy_dir = os.path.join(self._top_dir, 'deploy')
if not os.path.exists(self._build_root):
os.makedirs(self._build_root)
self._check_cmake_params(params)
compiler_info = self._create_compiler_info(params.toolset_str, params.target_arch)
#print(compiler_info)
#return
self._build_tree_info = CMakeBuildTreeInfo(self._build_root, compiler_info, params.cmk_generator_alias)
#print(self._build_tree_info.get_build_dir('release', 'static'))
#print(self._build_tree_info.get_build_dir('release', 'shared'))
#print(self._build_tree_info.get_build_dir('debug', 'static'))
#print(self._build_tree_info.get_build_dir('debug', 'shared'))
# return
if params.cmk_build:
# cmake build
if self._build_tree_create_if_not_exists and (not self._is_build_target_clean(params.cmk_build_target)):
# cleaning a non-existing build tree is really a no-op and does not require a build tree at all.
cmake_argv_config = []
if params.cmk_warning_flags:
cmake_argv_config.extend(params.cmk_warning_flags)
if params.cmk_cache_entries:
cmake_argv_config.extend(params.cmk_cache_entries)
for lnk in params.link_variants:
self._create_default_build_tree(compiler_info, params.cmk_generator_alias, params.build_configs, lnk, cmake_argv_config)
for lnk in params.link_variants:
self.launch_build(params, lnk, cmake_argv)
else:
# cmake build tree create/update
for lnk in params.link_variants:
cmake_argv_config = []
if params.cmk_warning_flags:
cmake_argv_config.extend(params.cmk_warning_flags)
if params.cmk_cache_entries:
cmake_argv_config.extend(params.cmk_cache_entries)
if cmake_argv:
cmake_argv_config.extend(cmake_argv)
# print("warning flags: ", params.cmk_warning_flags)
# print("additional flags: ", cmake_argv_config)
self.launch_config(compiler_info, params.cmk_generator_alias, params.build_configs, lnk, cmake_argv_config)
def launch_config(self, compiler_info, generator_alias, build_configs, lnk_variant, cmake_argv_optional):
cur_dir = os.getcwd()
cmake_argv = []
if self._is_multi_configuration_generator():
tmp_build_configs = [build_configs[0]]
else:
tmp_build_configs = build_configs
for cfg in tmp_build_configs:
b_dir = self._build_tree_info.get_build_dir(cfg, lnk_variant)
if not os.path.exists(b_dir):
os.makedirs(b_dir)
if generator_alias.startswith('vs'):
if compiler_info.compiler_family == 'msvc':
if self._is_vs_64bit_native_toolset_supported(generator_alias):
vs_toolset = self._dict_to_vs_platform_toolset['msvc-' + ver.version_tuple_to_str(compiler_info.version_major_minor)] + ',host=x64'
else:
vs_toolset = self._dict_to_vs_platform_toolset['msvc-' + ver.version_tuple_to_str(compiler_info.version_major_minor)]
elif compiler_info.compiler_family == 'intel':
vs_toolset = "Intel C++ Compiler %d.%d" % (compiler_info.version_major_minor[0], compiler_info.version_major_minor[1])
else:
assert False
cmake_argv = ['-G', self._dict_to_cmake_generator[generator_alias],
'-T', vs_toolset,
'-A', self._dict_to_vs_platform_name[compiler_info.target_arch]]
elif generator_alias == 'xcode':
cmake_argv = ['-G', self._dict_to_cmake_generator[generator_alias]]
elif generator_alias in ['umake', 'mgwmake', 'ninja']:
cmake_argv = ['-G', self._dict_to_cmake_generator[generator_alias],
'-DCMAKE_BUILD_TYPE=' + self._dict_to_cmake_config[cfg]]
if compiler_info.is_cross_compiler():
cmake_argv.append('-DCMAKE_TOOLCHAIN_FILE=' + compiler_info.cmake_toolchain_file)
else:
if compiler_info.cmake_cxx_compiler:
cmake_argv.append('-DCMAKE_CXX_COMPILER=' + compiler_info.cmake_cxx_compiler)
if compiler_info.cmake_c_compiler:
cmake_argv.append('-DCMAKE_C_COMPILER=' + compiler_info.cmake_c_compiler)
if cmake_argv_optional:
# Add any additional arguments to the cmake command line.
cmake_argv.extend(cmake_argv_optional)
if lnk_variant == 'shared':
cmake_argv.append('-DBUILD_SHARED_LIBS=1')
if self._is_multi_configuration_generator():
cmake_config_types = [self._dict_to_cmake_config[x] for x in self._default_config_types]
for b_cfg in build_configs:
if b_cfg not in self._default_config_types:
cmake_config_types.append(self._dict_to_cmake_config[b_cfg])
cmake_argv.append('-DCMAKE_CONFIGURATION_TYPES=' + ';'.join(cmake_config_types))
# cmake_argv.append(self._top_dir)
# print("launch_config(): cmake_args", cmake_argv)
# print("build dir:", b_dir)
# print("top dir:", self._top_dir)
if (not self._sys_info.is_windows()) and (ver.version_compare(self._cmake_finder.get_cmake_version(), (3, 13, 0)) >= 0):
# Not done for windows yet avoiding potential issues with command line length limits.
cmake_argv.extend(['-S', self._top_dir, '-B', b_dir])
retv = self.launch_cmake(cmake_argv)
else:
os.chdir(b_dir)
cmake_argv.append(os.path.relpath(self._top_dir))
retv = self.launch_cmake(cmake_argv)
os.chdir(cur_dir)
if retv != 0:
sys.exit(1)
def launch_build(self, params, lnk_variant, cmake_argv_optional):
if self._is_multi_configuration_generator():
# multiple configurations / build directory
b_dir = self._build_tree_info.get_build_dir(params.build_configs[0], lnk_variant)
if self._is_build_target_clean(params.cmk_build_target) and (not os.path.exists(b_dir)):
return
for cfg in params.build_configs:
cmake_argv = ['--build', b_dir, '--config', self._dict_to_cmake_config[cfg]]
self._add_common_cmake_build_options(cmake_argv, params)
self._add_cmake_build_jobs_option(cmake_argv, params.cmk_generator_alias, params.cmk_build_jobs)
self._add_cmake_build_verbosity_option(cmake_argv, params.cmk_generator_alias, params.cmk_build_verbosity)
if cmake_argv_optional:
self._add_cmake_build_tool_options(cmake_argv, cmake_argv_optional)
retv = self.launch_cmake(cmake_argv)
if retv != 0:
sys.exit(1)
else:
# one build directory / configuration
for cfg in params.build_configs:
b_dir = self._build_tree_info.get_build_dir(cfg, lnk_variant)
if self._is_build_target_clean(params.cmk_build_target) and (not os.path.exists(b_dir)):
continue
cmake_argv = ['--build', b_dir]
self._add_common_cmake_build_options(cmake_argv, params)
self._add_cmake_build_jobs_option(cmake_argv, params.cmk_generator_alias, params.cmk_build_jobs)
self._add_cmake_build_verbosity_option(cmake_argv, params.cmk_generator_alias, params.cmk_build_verbosity)
if cmake_argv_optional:
self._add_cmake_build_tool_options(cmake_argv, cmake_argv_optional)
retv = self.launch_cmake(cmake_argv)
if retv != 0:
sys.exit(1)
def launch_cmake(self, cmake_argv):
argv = [self._cmake_finder.find_cmake()]
argv.extend(cmake_argv)
if self._verbosity_level > 0:
# assemble the cmake command line for logging purposes
joiner = ' '
cmd_line = joiner.join(argv)
print("Launching: " + cmd_line)
retv = util.subproc_call_flushed(argv)
if retv < 0:
self._logger.debug("child was terminated by signal: %d", -retv)
else:
self._logger.debug("child returned: %d", retv)
return retv
def _is_build_target_clean(self, target):
return (target is not None) and (target == 'clean')
def _check_cmake_params(self, params):
if params.cmk_generator_alias is None:
if self._sys_info.is_windows_msys() and (params.toolset_str is not None) and (params.toolset_str == 'gcc'):
params.cmk_generator_alias = 'umake'
else:
params.cmk_generator_alias = self._get_default_cmake_generator()
if params.toolset_str is None:
if self._sys_info.get_platform() == 'linux':
params.toolset_str = 'gcc'
elif self._sys_info.get_platform() == 'macosx':
params.toolset_str = 'clang'
elif self._sys_info.get_platform() == 'windows':
if params.cmk_generator_alias in ['mgwmake', 'umake']:
params.toolset_str = 'gcc'
else:
params.toolset_str = self._dict_generator_alias_to_msvc_toolsets[params.cmk_generator_alias][0]
else:
assert False
elif params.toolset_str == 'msvc':
# toolset=msvc means to select the latest msvc version the selected generator supports.
assert self._sys_info.get_platform() == 'windows'
params.toolset_str = self._dict_generator_alias_to_msvc_toolsets[params.cmk_generator_alias][0]
elif params.toolset_str.startswith('msvc-'):
if params.toolset_str not in self._dict_generator_alias_to_msvc_toolsets[params.cmk_generator_alias]:
raise Exception("The selected generator does not support " + params.toolset_str + ", check toolset and generator arguments.")
if params.target_arch is None:
params.target_arch = self._sys_info.get_os_arch()
if params.cmk_cache_entries:
self._check_cmake_user_cache_entries(params.cmk_cache_entries)
def _check_cmake_user_cache_entries(self, user_cache_entries):
# -D<var>:<type>=<value> or -D<var>=<value> provided by the user cannot override any reserved cache entries like BUILD_SHARED_LIBS, ...
re_cache_entry = re.compile(r'-D(\w+)[=:]')
for cache_opt in user_cache_entries:
re_match = re_cache_entry.match(cache_opt)
if re_match:
cache_var = re_match.group(1)
if cache_var in self._cmk_reserved_cache_vars:
raise Exception("CMake cache entry " + cache_var + " is reserved by "
+ self._script_name + " and may not be overridden via -D<expr>, please contact technical support.")
else:
# Unexpected -D expression, please investigate.
raise Exception("CMake cache entry expression " + cache_opt + " is unsupported, please contact technical support." )
def _get_default_cmake_generator(self):
if 'DEFAULT_CMAKE_GENERATOR' in os.environ:
generator_alias = os.environ['DEFAULT_CMAKE_GENERATOR']
if generator_alias not in self._dict_to_cmake_generator:
raise Exception("CMake generator " + generator_alias + " defined by environment variable DEFAULT_CMAKE_GENERATOR is unsupported.")
return generator_alias
if self._sys_info.get_platform() == 'linux':
generator_alias = 'umake'
elif self._sys_info.get_platform() == 'macosx':
generator_alias = 'xcode'
elif self._sys_info.get_platform() == 'windows':
# e.g. 14.1, 14.0, 12.0 etc.
bb_vs_latest_version = self._msvc_registry.get_latest_version()
if ver.version_compare(bb_vs_latest_version, (14,1)) == 0:
generator_alias = 'vs15'
else:
generator_alias = 'vs' + str(bb_vs_latest_version[0])
else:
assert False
return generator_alias
def _is_multi_configuration_generator(self):
return self._build_tree_info.is_multi_configuration_generator()
def _is_vs_64bit_native_toolset_supported(self, generator_alias):
re_vs_generator = re.compile(r'vs(\d+)$')
re_match = re_vs_generator.match(generator_alias)
if not re_match:
return False
if not self._prefer_vs_native_toolsets:
# Visual Studio native 64bit toolchains are disabled
return False
if self._sys_info.get_os_arch() != 'x86_64':
return False
if int(re_match.group(1), 10) < 12:
# Visual Studio 11 2012 or earlier don't have native 64 bit toolchains.
return False
if ver.version_compare(self._cmake_finder.get_cmake_version(), (3, 8, 0)) < 0:
# cmake too old to support vs native toolchains.
return False
return True
def _create_compiler_info(self, toolset_str, target_arch):
if self._sys_info.is_cray():
return self._create_cray_compiler_info(target_arch)
compiler_info = CMakeCompilerInfo()
if toolset_str and (toolset_str.endswith('.cmake')):
# toolchain file specified -> no need to add any furter compiler details
compiler_info.cmake_toolchain_file = toolset_str
return compiler_info
# native compiler selected or assumed, figure out details to create the build tree folder.
compiler_info.target_arch = target_arch
re_msvc_version = re.compile(r'msvc-(\d+\.\d+)$')
re_match = re_msvc_version.match(toolset_str)
if re_match:
compiler_info.compiler_family = 'msvc'
compiler_info.version_major_minor = ver.version_tuple_from_str(re_match.group(1))
return compiler_info
else:
assert not toolset_str.startswith('msvc')
bb_toolset_info = bldtools.Toolset(self._sys_info, toolset_str)
compiler_info.compiler_family = bb_toolset_info.get_toolset()
compiler_info.version_major_minor = bb_toolset_info.get_version()[:2]
# re_toolset_versioned = re.compile('([a-z]+)-(\d+\.\d+)$')
if self._sys_info.get_platform() == 'linux':
if toolset_str != 'gcc':
compiler_info.cmake_cxx_compiler = bb_toolset_info.get_compiler_command()
cxx_basename = os.path.basename(compiler_info.cmake_cxx_compiler)
# print("cxx_basename: ", cxx_basename)
if compiler_info.compiler_family == 'gcc':
gcc_basename = cxx_basename.replace('g++', 'gcc')
compiler_info.cmake_c_compiler = os.path.join(os.path.dirname(compiler_info.cmake_cxx_compiler), gcc_basename)
elif compiler_info.compiler_family == 'clang':
clang_basename = cxx_basename.replace('++', '')
compiler_info.cmake_c_compiler = os.path.join(os.path.dirname(compiler_info.cmake_cxx_compiler), clang_basename)
elif compiler_info.compiler_family == 'intel':
compiler_info.cmake_c_compiler = os.path.join(os.path.dirname(compiler_info.cmake_cxx_compiler), 'icc')
elif self._sys_info.get_platform() == 'macosx':
# assert compiler_info.compiler_family == 'clang'
if compiler_info.compiler_family == 'clang':
pass
elif compiler_info.compiler_family == 'intel':
compiler_info.cmake_cxx_compiler = bb_toolset_info.get_compiler_command()
compiler_info.cmake_c_compiler = os.path.join(os.path.dirname(compiler_info.cmake_cxx_compiler), 'icc')
else:
assert False
elif self._sys_info.get_platform() == 'windows':
if compiler_info.compiler_family == 'msvc':
pass
elif compiler_info.compiler_family == 'gcc':
# MinGW as native compiler: 64 bit and 32 bit default targets are possible.
compiler_info.mingw = bb_toolset_info.is_mingw()
compiler_info.target_arch = bb_toolset_info.get_platform_info(0).get_target_arch(0)
elif compiler_info.compiler_family == 'intel':
compiler_info.target_arch = bb_toolset_info.get_platform_info(0).get_target_arch(0)
else:
assert False
return compiler_info
def _create_cray_compiler_info(self, target):
compiler_info = CMakeCompilerInfo()
compiler_info.target_arch = target
version_str = None
if 'CRAY_PRGENVGNU' in os.environ:
compiler_info.compiler_family = 'gcc'
version_str = os.environ['GCC_VERSION']
elif 'CRAY_PRGENVCRAY' in os.environ:
compiler_info.compiler_family = 'cray'
version_str = os.environ['CRAY_CC_VERSION']
elif 'CRAY_PRGENVINTEL' in os.environ:
compiler_info.compiler_family = 'intel'
version_str = os.environ['INTEL_VERSION']
else:
assert False
version = ver.version_tuple_from_str(version_str)
assert len(version) >= 2
compiler_info.version_major_minor = version[:2]
return compiler_info
def _create_default_build_tree(self, compiler_info, generator_alias, build_configs, lnk_variant, cmake_argv_optional):
if self._is_multi_configuration_generator():
build_dir = self._build_tree_info.get_build_dir(build_configs[0], lnk_variant)
if not self._is_valid_build_tree(build_dir):
self.launch_config(compiler_info, generator_alias, build_configs, lnk_variant, cmake_argv_optional)
else:
for cfg in build_configs:
build_dir = self._build_tree_info.get_build_dir(cfg, lnk_variant)
if not self._is_valid_build_tree(build_dir):
self.launch_config(compiler_info, generator_alias, [cfg], lnk_variant, cmake_argv_optional)
def _is_valid_build_tree(self, build_dir):
if os.path.exists(os.path.join(build_dir, self._cmk_cache_file)):
return True
if os.path.exists(build_dir):
print(self._script_name + ": warning: build directory " + build_dir + " exists, but cmake cache file " + self._cmk_cache_file + " does not.")
return False
def _add_common_cmake_build_options(self, cmake_argv, params):
if params.cmk_build_target:
cmake_argv.extend(['--target', params.cmk_build_target])
if params.clean_first:
cmake_argv.append('--clean-first')
def _add_cmake_build_jobs_option(self, cmake_argv, generator_alias, build_jobs):
cmake_version = self._cmake_finder.get_cmake_version()
if ver.version_compare(cmake_version, (3, 12)) >= 0:
assert len(cmake_argv) >= 2
if build_jobs >= 2:
if generator_alias.startswith('vs'):
self._add_cmake_build_tool_options(cmake_argv, ['/maxcpucount:' + str(build_jobs)])
else:
cmake_argv.insert(2, str(build_jobs))
cmake_argv.insert(2, '--parallel')
elif build_jobs == 0:
# Use the build engine's native number of jobs.
cmake_argv.insert(2, '--parallel')
elif build_jobs >= 2:
if generator_alias in ['umake', 'ninja']:
self._add_cmake_build_tool_options(cmake_argv, ['-j' + str(build_jobs)])
elif generator_alias.startswith('vs'):
self._add_cmake_build_tool_options(cmake_argv, ['/maxcpucount:' + str(build_jobs)])
elif generator_alias == 'xcode':
self._add_cmake_build_tool_options(cmake_argv, ['-parallelizeTargets', '-jobs', str(build_jobs)])
def _add_cmake_build_verbosity_option(self, cmake_argv, generator_alias, verbosity_level):
if generator_alias.startswith('vs'):
self._add_cmake_build_tool_options(cmake_argv, ['/verbosity:' + verbosity_level])
def _add_cmake_build_tool_options(self, cmake_argv, build_tool_options):
if not build_tool_options:
# no options given -> return
return
assert '--' not in build_tool_options
if '--' not in cmake_argv:
cmake_argv.append('--')
cmake_argv.extend(build_tool_options)
| []
| []
| [
"DEFAULT_CMAKE_GENERATOR",
"INTEL_VERSION",
"GCC_VERSION",
"CRAY_CC_VERSION"
]
| [] | ["DEFAULT_CMAKE_GENERATOR", "INTEL_VERSION", "GCC_VERSION", "CRAY_CC_VERSION"] | python | 4 | 0 | |
runtime/bindings/python/tests/test_inference_engine/test_infer_request.py | # Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import os
import pytest
from openvino import Core, Blob, TensorDesc, StatusCode
def image_path():
path_to_repo = os.environ["DATA_PATH"]
path_to_img = os.path.join(path_to_repo, "validation_set", "224x224", "dog.bmp")
return path_to_img
def model_path(is_myriad=False):
path_to_repo = os.environ["MODELS_PATH"]
if not is_myriad:
test_xml = os.path.join(path_to_repo, "models", "test_model", "test_model_fp32.xml")
test_bin = os.path.join(path_to_repo, "models", "test_model", "test_model_fp32.bin")
else:
test_xml = os.path.join(path_to_repo, "models", "test_model", "test_model_fp16.xml")
test_bin = os.path.join(path_to_repo, "models", "test_model", "test_model_fp16.bin")
return (test_xml, test_bin)
def read_image():
import cv2
n, c, h, w = (1, 3, 32, 32)
image = cv2.imread(path_to_img)
if image is None:
raise FileNotFoundError("Input image not found")
image = cv2.resize(image, (h, w)) / 255
image = image.transpose((2, 0, 1)).astype(np.float32)
image = image.reshape((n, c, h, w))
return image
is_myriad = os.environ.get("TEST_DEVICE") == "MYRIAD"
test_net_xml, test_net_bin = model_path(is_myriad)
path_to_img = image_path()
def test_get_perf_counts(device):
ie_core = Core()
net = ie_core.read_network(test_net_xml, test_net_bin)
ie_core.set_config({"PERF_COUNT": "YES"}, device)
exec_net = ie_core.load_network(net, device)
img = read_image()
request = exec_net.create_infer_request()
td = TensorDesc("FP32", [1, 3, 32, 32], "NCHW")
input_blob = Blob(td, img)
request.set_input({"data": input_blob})
request.infer()
pc = request.get_perf_counts()
assert pc["29"]["status"] == "EXECUTED"
assert pc["29"]["layer_type"] == "FullyConnected"
del exec_net
del ie_core
del net
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU",
reason=f"Can't run test on device {os.environ.get('TEST_DEVICE', 'CPU')}, "
"Dynamic batch fully supported only on CPU")
@pytest.mark.skip(reason="Fix")
def test_set_batch_size(device):
ie_core = Core()
ie_core.set_config({"DYN_BATCH_ENABLED": "YES"}, device)
net = ie_core.read_network(test_net_xml, test_net_bin)
net.batch_size = 10
data = np.ones(shape=net.input_info["data"].input_data.shape)
exec_net = ie_core.load_network(net, device)
data[0] = read_image()[0]
request = exec_net.create_infer_request()
request.set_batch(1)
td = TensorDesc("FP32", [1, 3, 32, 32], "NCHW")
input_blob = Blob(td, data)
request.set_input({"data": input_blob})
request.infer()
assert np.allclose(int(round(request.output_blobs["fc_out"].buffer[0][2])), 1), \
"Incorrect data for 1st batch"
del exec_net
del ie_core
del net
@pytest.mark.skip(reason="Fix")
def test_set_zero_batch_size(device):
ie_core = Core()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device)
request = exec_net.create_infer_request()
with pytest.raises(ValueError) as e:
request.set_batch(0)
assert "Batch size should be positive integer number but 0 specified" in str(e.value)
del exec_net
del ie_core
del net
@pytest.mark.skip(reason="Fix")
def test_set_negative_batch_size(device):
ie_core = Core()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device)
request = exec_net.create_infer_request()
with pytest.raises(ValueError) as e:
request.set_batch(-1)
assert "Batch size should be positive integer number but -1 specified" in str(e.value)
del exec_net
del ie_core
del net
def test_blob_setter(device):
ie_core = Core()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net_1 = ie_core.load_network(network=net, device_name=device)
net.input_info["data"].layout = "NHWC"
exec_net_2 = ie_core.load_network(network=net, device_name=device)
img = read_image()
request1 = exec_net_1.create_infer_request()
tensor_desc = TensorDesc("FP32", [1, 3, img.shape[2], img.shape[3]], "NCHW")
img_blob1 = Blob(tensor_desc, img)
request1.set_input({"data": img_blob1})
request1.infer()
res_1 = np.sort(request1.get_blob("fc_out").buffer)
img = np.transpose(img, axes=(0, 2, 3, 1)).astype(np.float32)
tensor_desc = TensorDesc("FP32", [1, 3, 32, 32], "NHWC")
img_blob = Blob(tensor_desc, img)
request = exec_net_2.create_infer_request()
request.set_blob("data", img_blob)
request.infer()
res_2 = np.sort(request.get_blob("fc_out").buffer)
assert np.allclose(res_1, res_2, atol=1e-2, rtol=1e-2)
def test_cancel(device):
ie_core = Core()
net = ie_core.read_network(test_net_xml, test_net_bin)
exec_net = ie_core.load_network(net, device)
img = read_image()
td = TensorDesc("FP32", [1, 3, 32, 32], "NCHW")
input_blob = Blob(td, img)
request = exec_net.create_infer_request()
def callback(req, code, array):
array.append(42)
data = []
request.set_completion_callback(callback, data)
request.set_input({"data": input_blob})
request.async_infer()
request.cancel()
with pytest.raises(RuntimeError) as e:
request.wait()
assert "[ INFER_CANCELLED ]" in str(e.value)
# check if callback has executed
assert data == [42]
request.async_infer()
status = request.wait()
assert status == StatusCode.OK
assert data == [42, 42]
| []
| []
| [
"DATA_PATH",
"TEST_DEVICE",
"MODELS_PATH"
]
| [] | ["DATA_PATH", "TEST_DEVICE", "MODELS_PATH"] | python | 3 | 0 | |
docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from os.path import abspath
from os.path import dirname
from os.path import join
import django
import django_afip
BASE_DIR = dirname(dirname(abspath(__file__)))
sys.path.insert(0, abspath(join(dirname(__file__), "_ext")))
sys.path.insert(0, abspath(join(BASE_DIR, "testapp")))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "testapp.settings")
django.setup()
# -- Project information -----------------------------------------------------
project = "django-afip"
copyright = "2015-2020, Hugo Osvaldo Barrera"
author = "Hugo Osvaldo Barrera"
# The short X.Y version.
version = django_afip.__version__
# The full version, including alpha/beta/rc tags
release = django_afip.__version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
"django_models",
"sphinx.ext.intersphinx",
]
intersphinx_mapping = {
"django": (
"https://docs.djangoproject.com/en/stable/",
"https://docs.djangoproject.com/en/stable/_objects/",
),
"weasyprint": (
"https://doc.courtbouillon.org/weasyprint/stable/",
None,
),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The master toctree document.
master_doc = "index"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
| []
| []
| []
| [] | [] | python | 0 | 0 | |
pkg/lib/config/config.go | package config
import (
"github.com/BurntSushi/toml"
"log"
"os"
"sync"
)
type Config struct {
App App
Database Database
Redis Redis
}
type App struct {
Appname string
Port string
}
type Database struct {
Host string
Port string
Database string
User string
Password string
SSL string
}
type Redis struct {
Host string
Port string
Password string
}
var cfg Config
var once sync.Once
func GetConfig() Config {
once.Do(func() {
var data string
switch os.Getenv("ENV") {
case "PRODUCTION":
data = "./config/forex.production.toml"
default:
data = "./config/forex.development.toml"
}
if _, err := toml.DecodeFile(data, &cfg); err != nil {
log.Println(err)
}
})
return cfg
}
| [
"\"ENV\""
]
| []
| [
"ENV"
]
| [] | ["ENV"] | go | 1 | 0 | |
artificial_neural_networks/architectures/feedforward_neural_networks/convolutional_neural_networks/cnn_dense_mnist.py | """
Model: Convolutional Neural Network (CNN) with dense (i.e. fully connected) layers
Method: Backpropagation
Architecture: Feedforward Neural Network
Dataset: MNIST
Task: Handwritten Digit Recognition (Multi-class Classification)
Author: Ioannis Kourouklides, www.kourouklides.com
License:
https://github.com/kourouklides/artificial_neural_networks/blob/master/LICENSE
"""
# %%
# IMPORTS
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# standard library imports
import argparse
import os
import random as rn
from timeit import default_timer as timer
# third-party imports
from keras import backend as K
from keras import optimizers
from keras.callbacks import ModelCheckpoint
from keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten
from keras.models import Model
from keras.utils import to_categorical
import numpy as np
from sklearn.metrics import confusion_matrix
import tensorflow as tf
# %%
def cnn_dense_mnist(args):
"""
Main function
"""
# %%
# IMPORTS
# code repository sub-package imports
from artificial_neural_networks.utils.download_mnist import download_mnist
from artificial_neural_networks.utils.generic_utils import save_classif_model
from artificial_neural_networks.utils.vis_utils import plot_confusion_matrix, epoch_plot
# %%
if args.verbose > 0:
print(args)
# For reproducibility
if args.reproducible:
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(args.seed)
rn.seed(args.seed)
tf.set_random_seed(args.seed)
sess = tf.Session(graph=tf.get_default_graph())
K.set_session(sess)
# print(hash("keras"))
# %%
# Load the MNIST dataset
mnist_path = download_mnist()
mnist = np.load(mnist_path)
train_x = mnist['x_train'].astype(np.float32)
train_y = mnist['y_train'].astype(np.int32)
test_x = mnist['x_test'].astype(np.float32)
test_y = mnist['y_test'].astype(np.int32)
mnist.close()
# %%
# PREPROCESSING STEP
scaling_factor = args.scaling_factor
translation = args.translation
img_width = train_x.shape[1]
img_height = train_x.shape[2]
n_train = train_x.shape[0] # number of training examples/samples
n_test = test_x.shape[0] # number of test examples/samples
n_in = img_width * img_height # number of features / dimensions
n_out = np.unique(train_y).shape[0] # number of classes/labels
# Reshape training and test sets
train_x = train_x.reshape(n_train, img_width, img_height, 1)
test_x = test_x.reshape(n_test, img_width, img_height, 1)
# Apply preprocessing
train_x = scaling_factor * (train_x - translation)
test_x = scaling_factor * (test_x - translation)
one_hot = False # It works exactly the same for both True and False
# Convert class vectors to binary class matrices (i.e. One hot encoding)
if one_hot:
train_y = to_categorical(train_y, n_out)
test_y = to_categorical(test_y, n_out)
# %%
# Model hyperparameters and ANN Architecture
N = []
N.append(n_in) # input layer
if args.same_size:
n_layers = args.n_layers
for i in range(n_layers):
N.append(args.layer_size) # hidden layer i
else:
n_layers = len(args.explicit_layer_sizes)
for i in range(n_layers):
N.append(args.explicit_layer_sizes[i]) # hidden layer i
N.append(n_out) # output layer
L = len(N) - 1
x = Input(shape=(img_width, img_height, 1)) # input layer
h = x
h = Conv2D(filters=32, kernel_size=(3, 3), activation='relu')(h)
h = MaxPooling2D(pool_size=(2, 2))(h)
h = Conv2D(filters=64, kernel_size=(3, 3), activation='relu')(h)
h = MaxPooling2D(pool_size=(2, 2))(h)
h = Flatten()(h)
for i in range(1, L):
h = Dense(units=N[i], activation='relu')(h) # hidden layer i
out = Dense(units=n_out, activation='softmax')(h) # output layer
model = Model(inputs=x, outputs=out)
if args.verbose > 0:
model.summary()
if one_hot:
loss_function = 'categorical_crossentropy'
else:
loss_function = 'sparse_categorical_crossentropy'
metrics = ['accuracy']
lr = args.lrearning_rate
epsilon = args.epsilon
optimizer_selection = {
'Adadelta':
optimizers.Adadelta(lr=lr, rho=0.95, epsilon=epsilon, decay=0.0),
'Adagrad':
optimizers.Adagrad(lr=lr, epsilon=epsilon, decay=0.0),
'Adam':
optimizers.Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=epsilon, decay=0.0, amsgrad=False),
'Adamax':
optimizers.Adamax(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=epsilon, decay=0.0),
'Nadam':
optimizers.Nadam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=epsilon, schedule_decay=0.004),
'RMSprop':
optimizers.RMSprop(lr=lr, rho=0.9, epsilon=epsilon, decay=0.0),
'SGD':
optimizers.SGD(lr=lr, momentum=0.0, decay=0.0, nesterov=False)
}
optimizer = optimizer_selection[args.optimizer]
model.compile(optimizer=optimizer, loss=loss_function, metrics=metrics)
# %%
# Save trained models for every epoch
models_path = r'artificial_neural_networks/trained_models/'
model_name = 'mnist_cnn_dense'
weights_path = models_path + model_name + '_weights'
model_path = models_path + model_name + '_model'
file_suffix = '_{epoch:04d}_{val_acc:.4f}_{val_loss:.4f}'
if args.save_weights_only:
file_path = weights_path
else:
file_path = model_path
file_path += file_suffix
# monitor = 'val_loss'
monitor = 'val_acc'
if args.save_models:
checkpoint = ModelCheckpoint(
file_path + '.h5',
monitor=monitor,
verbose=args.verbose,
save_best_only=args.save_best_only,
mode='auto',
save_weights_only=args.save_weights_only)
callbacks = [checkpoint]
else:
callbacks = []
# %%
# TRAINING PHASE
if args.time_training:
start = timer()
model_history = model.fit(
x=train_x,
y=train_y,
validation_data=(test_x, test_y),
batch_size=args.batch_size,
epochs=args.n_epochs,
verbose=args.verbose,
callbacks=callbacks)
if args.time_training:
end = timer()
duration = end - start
print('Total time for training (in seconds):')
print(duration)
# %%
# TESTING PHASE
train_y_pred = np.argmax(model.predict(train_x), axis=1)
test_y_pred = np.argmax(model.predict(test_x), axis=1)
train_score = model.evaluate(x=train_x, y=train_y, verbose=args.verbose)
train_dict = {'loss': train_score[0], 'acc': train_score[1]}
test_score = model.evaluate(x=test_x, y=test_y, verbose=args.verbose)
test_dict = {'val_loss': test_score[0], 'val_acc': test_score[1]}
if args.verbose > 0:
print('Train loss:', train_dict['loss'])
print('Train accuracy:', train_dict['acc'])
print('Test loss:', test_dict['val_loss'])
print('Test accuracy:', test_dict['val_acc'])
# %%
# Data Visualization
if args.plot:
# Confusion matrices
classes = list(range(n_out))
train_cm = confusion_matrix(train_y, train_y_pred)
plot_confusion_matrix(train_cm, classes=classes, title='Confusion matrix for training set')
test_cm = confusion_matrix(test_y, test_y_pred)
plot_confusion_matrix(test_cm, classes=classes, title='Confusion matrix for test set')
# Loss vs epoch
epoch_axis = range(1, args.n_epochs + 1)
train_loss = model_history.history['loss']
test_loss = model_history.history['val_loss']
epoch_plot(epoch_axis, train_loss, test_loss, 'Loss')
# Accuracy vs epoch
train_acc = model_history.history['acc']
test_acc = model_history.history['val_acc']
epoch_plot(epoch_axis, train_acc, test_acc, 'Accuracy')
# %%
# Save the architecture and the lastly trained model
save_classif_model(model, models_path, model_name, weights_path, model_path, file_suffix,
test_dict, args)
# %%
return model
# %%
if __name__ == '__main__':
# %%
# IMPORTS
os.chdir('../../../../')
# code repository sub-package imports
from artificial_neural_networks.utils.generic_utils import none_or_int, none_or_float
# %%
# SETTINGS
parser = argparse.ArgumentParser()
# General settings
parser.add_argument('--verbose', type=int, default=1)
parser.add_argument('--reproducible', type=bool, default=True)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--time_training', type=bool, default=True)
parser.add_argument('--plot', type=bool, default=False)
# Settings for preprocessing and hyperparameters
parser.add_argument('--scaling_factor', type=float, default=(1 / 255))
parser.add_argument('--translation', type=float, default=0)
parser.add_argument('--same_size', type=bool, default=True)
parser.add_argument('--n_layers', type=int, default=2)
parser.add_argument('--layer_size', type=int, default=128)
parser.add_argument('--explicit_layer_sizes', nargs='*', type=int, default=[512, 512])
parser.add_argument('--n_epochs', type=int, default=12)
parser.add_argument('--batch_size', type=none_or_int, default=None)
parser.add_argument('--optimizer', type=str, default='Adadelta')
parser.add_argument('--lrearning_rate', type=float, default=1e0)
parser.add_argument('--epsilon', type=none_or_float, default=None)
# Settings for saving the model
parser.add_argument('--save_architecture', type=bool, default=True)
parser.add_argument('--save_last_weights', type=bool, default=True)
parser.add_argument('--save_last_model', type=bool, default=True)
parser.add_argument('--save_models', type=bool, default=False)
parser.add_argument('--save_weights_only', type=bool, default=False)
parser.add_argument('--save_best', type=bool, default=True)
args = parser.parse_args()
# %%
# MODEL
model_cnn_dense = cnn_dense_mnist(args)
| []
| []
| [
"PYTHONHASHSEED"
]
| [] | ["PYTHONHASHSEED"] | python | 1 | 0 | |
test/org/jruby/embed/ScriptingContainerTest.java | /**
* **** BEGIN LICENSE BLOCK *****
* Version: CPL 1.0/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Common Public
* License Version 1.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.eclipse.org/legal/cpl-v10.html
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
* Copyright (C) 2009-2010 Yoko Harada <[email protected]>
*
* Alternatively, the contents of this file may be used under the terms of
* either of the GNU General Public License Version 2 or later (the "GPL"),
* or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the CPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the CPL, the GPL or the LGPL.
* **** END LICENSE BLOCK *****
*/
package org.jruby.embed;
import org.jruby.embed.internal.ConcurrentLocalContextProvider;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.PrintStream;
import java.io.Reader;
import java.io.StringWriter;
import java.io.Writer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Calendar;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.logging.ConsoleHandler;
import java.util.logging.Handler;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.logging.SimpleFormatter;
import java.util.logging.StreamHandler;
import org.jruby.CompatVersion;
import org.jruby.Profile;
import org.jruby.Ruby;
import org.jruby.RubyInstanceConfig.CompileMode;
import org.jruby.RubyInstanceConfig.LoadServiceCreator;
import org.jruby.ast.Node;
import org.jruby.embed.internal.BiVariableMap;
import org.jruby.embed.internal.LocalContextProvider;
import org.jruby.embed.internal.SingleThreadLocalContextProvider;
import org.jruby.embed.internal.SingletonLocalContextProvider;
import org.jruby.embed.internal.ThreadSafeLocalContextProvider;
import org.jruby.javasupport.JavaEmbedUtils;
import org.jruby.runtime.Constants;
import org.jruby.runtime.builtin.IRubyObject;
import org.jruby.util.ClassCache;
import org.jruby.util.KCode;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.*;
/**
*
* @author yoko
*/
public class ScriptingContainerTest {
static Logger logger0 = Logger.getLogger(MultipleScriptsRunner.class.getName());
static Logger logger1 = Logger.getLogger(MultipleScriptsRunner.class.getName());
static OutputStream outStream = null;
PrintStream pstream = null;
FileWriter writer = null;
String basedir = System.getProperty("user.dir");
public ScriptingContainerTest() {
}
@BeforeClass
public static void setUpClass() throws Exception {
}
@AfterClass
public static void tearDownClass() throws Exception {
outStream.close();
}
@Before
public void setUp() throws FileNotFoundException, IOException {
outStream = new FileOutputStream(System.getProperty("user.dir") + "/build/test-results/run-junit-embed.log", true);
Handler handler = new StreamHandler(outStream, new SimpleFormatter());
logger0.addHandler(handler);
logger0.setUseParentHandlers(false);
logger0.setLevel(Level.INFO);
logger1.setUseParentHandlers(false);
logger1.addHandler(new ConsoleHandler());
logger1.setLevel(Level.WARNING);
pstream = new PrintStream(outStream, true);
writer = new FileWriter(basedir + "/build/test-results/run-junit-embed.txt", true);
}
@After
public void tearDown() throws IOException {
pstream.close();
writer.close();
}
/**
* Test of getProperty method, of class ScriptingContainer.
*/
@Test
public void testGetProperty() {
logger1.info("getProperty");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
String key = "language.extension";
String[] extensions = {"rb"};
String[] result = instance.getProperty(key);
assertArrayEquals(key, extensions, result);
key = "language.name";
String[] names = {"ruby"};
result = instance.getProperty(key);
assertArrayEquals(key, names, result);
instance = null;
}
/**
* Test of getSupportedRubyVersion method, of class ScriptingContainer.
*/
@Test
public void testGetSupportedRubyVersion() {
logger1.info("getSupportedRubyVersion");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
String expResult = "jruby " + Constants.VERSION;
String result = instance.getSupportedRubyVersion();
assertTrue(result.startsWith(expResult));
instance = null;
}
/**
* Test of getProvider method, of class ScriptingContainer.
*/
@Test
public void testGetProvider() {
logger1.info("getProvider");
ScriptingContainer instance = new ScriptingContainer();
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
LocalContextProvider result = instance.getProvider();
assertTrue(result instanceof SingletonLocalContextProvider);
instance = null;
instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
result = instance.getProvider();
assertTrue(result instanceof ThreadSafeLocalContextProvider);
instance = null;
instance = new ScriptingContainer(LocalContextScope.SINGLETHREAD);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
result = instance.getProvider();
assertTrue(result instanceof SingleThreadLocalContextProvider);
instance = null;
instance = new ScriptingContainer(LocalContextScope.SINGLETON);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
result = instance.getProvider();
assertTrue(result instanceof SingletonLocalContextProvider);
instance = null;
instance = new ScriptingContainer(LocalContextScope.CONCURRENT);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
result = instance.getProvider();
assertTrue(result instanceof ConcurrentLocalContextProvider);
instance = null;
}
/**
* Test of getRuntime method, of class ScriptingContainer.
*/
@Test
public void testGetRuntime() {
logger1.info("getRuntime");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
Ruby runtime = JavaEmbedUtils.initialize(new ArrayList());
Ruby result = instance.getProvider().getRuntime();
Class expClazz = runtime.getClass();
Class resultClazz = result.getClass();
assertEquals(expClazz, resultClazz);
instance = null;
}
/**
* Test of getVarMap method, of class ScriptingContainer.
*/
@Test
public void testGetVarMap() {
logger1.info("getVarMap");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
BiVariableMap result = instance.getVarMap();
result.put("@name", "camellia");
assertEquals("camellia", instance.getVarMap().get("@name"));
result.put("COLOR", "red");
assertEquals("red", instance.getVarMap().get("COLOR"));
// class variable injection does not work
//result.put("@@season", "spring");
//assertEquals("spring", instance.getVarMap().get("@@season"));
result.put("$category", "flower");
assertEquals("flower", instance.getVarMap().get("$category"));
result.put("@name", "maple");
assertEquals("maple", instance.getVarMap().get("@name"));
result.put("COLOR", "orangered");
assertEquals("orangered", instance.getVarMap().get("COLOR"));
result.put("$category", "tree");
assertEquals("tree", instance.getVarMap().get("$category"));
result.put("parameter", 1.2345);
assertEquals(1.2345, instance.getVarMap().get("parameter"));
result.put("@coefficient", 4);
assertEquals(4, instance.getVarMap().get("@coefficient"));
result.clear();
instance = null;
}
/**
* Test of getAttributeMap method, of class ScriptingContainer.
*/
@Test
public void testGetAttributeMap() {
logger1.info("getAttributeMap");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
Map result = instance.getAttributeMap();
Object obj = result.get(AttributeName.READER);
assertEquals(obj.getClass(), java.io.InputStreamReader.class);
obj = result.get(AttributeName.WRITER);
assertEquals(obj.getClass(), java.io.PrintWriter.class);
obj = result.get(AttributeName.ERROR_WRITER);
assertEquals(obj.getClass(), java.io.PrintWriter.class);
result.put(AttributeName.BASE_DIR, "/usr/local/lib");
assertEquals("/usr/local/lib", result.get(AttributeName.BASE_DIR));
result.put(AttributeName.LINENUMBER, 5);
assertEquals(5, result.get(AttributeName.LINENUMBER));
result.put("むなしいきもち", "虚");
assertEquals("虚", result.get("むなしいきもち"));
result.clear();
instance = null;
}
/**
* Test of getAttribute method, of class ScriptingContainer.
*/
//@Test
public void testGetAttribute() {
logger1.info("getAttribute");
Object key = null;
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
Object expResult = null;
Object result = instance.getAttribute(key);
assertEquals(expResult, result);
// TODO review the generated test code and remove the default call to fail.
fail("The test case is a prototype.");
}
/**
* Test of setAttribute method, of class ScriptingContainer.
*/
//@Test
public void testSetAttribute() {
logger1.info("setAttribute");
Object key = null;
Object value = null;
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
Object expResult = null;
Object result = instance.setAttribute(key, value);
assertEquals(expResult, result);
// TODO review the generated test code and remove the default call to fail.
fail("The test case is a prototype.");
}
/**
* Test of get method, of class ScriptingContainer.
*/
@Test
public void testGet() {
logger1.info("get");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
String key = null;
try {
instance.get(key);
} catch (NullPointerException e) {
assertEquals("key is null", e.getMessage());
}
key = "";
try {
instance.get(key);
} catch (IllegalArgumentException e) {
assertEquals("key is empty", e.getMessage());
}
key = "a";
Object expResult = null;
Object result = instance.get(key);
assertEquals(expResult, result);
instance.put("@name", "camellia");
assertEquals("camellia", instance.get("@name"));
instance.put("COLOR", "red");
assertEquals("red", instance.get("COLOR"));
// class variables doesn't work
//varMap.put("@@season", "spring");
//assertEquals("spring", instance.get("@@season"));
instance.put("$category", "flower");
assertEquals("flower", instance.get("$category"));
// Bug. Can't retrieve instance variables from Ruby.
instance.runScriptlet("@eular = 2.718281828");
assertEquals(2.718281828, instance.get("@eular"));
instance.runScriptlet("@name = \"holly\"");
assertEquals("holly", instance.get("@name"));
instance.runScriptlet("$category = \"bush\"");
assertEquals("bush", instance.get("$category"));
instance.getVarMap().clear();
instance = null;
instance = new ScriptingContainer(LocalContextScope.THREADSAFE, LocalVariableBehavior.PERSISTENT);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
instance.runScriptlet("ivalue = 200000");
assertEquals(200000L, instance.get("ivalue"));
instance.getVarMap().clear();
instance = null;
}
/**
* Test of put method, of class ScriptingContainer.
*/
@Test
public void testPut() {
logger1.info("put");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
String key = null;
try {
instance.get(key);
} catch (NullPointerException e) {
assertEquals("key is null", e.getMessage());
}
key = "";
try {
instance.get(key);
} catch (IllegalArgumentException e) {
assertEquals("key is empty", e.getMessage());
}
key = "a";
Object value = null;
Object expResult = null;
Object result = instance.put(key, value);
Object newValue = "xyz";
result = instance.put(key, newValue);
assertEquals(expResult, result);
expResult = "xyz";
assertEquals(expResult, instance.get(key));
StringWriter sw = new StringWriter();
instance.setWriter(sw);
instance.put("x", 144.0);
instance.runScriptlet("puts Math.sqrt(x)");
assertEquals("12.0", sw.toString().trim());
sw = new StringWriter();
instance.setWriter(sw);
instance.put("@x", 256.0);
instance.runScriptlet("puts Math.sqrt(@x)");
assertEquals("16.0", sw.toString().trim());
sw = new StringWriter();
instance.setWriter(sw);
instance.put("$x", 9.0);
instance.runScriptlet("puts Math.sqrt($x)");
assertEquals("3.0", sw.toString().trim());
sw = new StringWriter();
instance.setWriter(sw);
instance.put("KMTOMI", 0.621);
instance.runScriptlet("puts \"1 km is #{KMTOMI} miles.\"");
assertEquals("1 km is 0.621 miles.", sw.toString().trim());
instance.getVarMap().clear();
instance = null;
}
/**
* Test of parse method, of class ScriptingContainer.
*/
@Test
public void testParse_String_intArr() {
logger1.info("parse");
String script = null;
int[] lines = null;
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
EmbedEvalUnit expResult = null;
EmbedEvalUnit result = instance.parse(script, lines);
assertEquals(expResult, result);
script = "";
Ruby runtime = JavaEmbedUtils.initialize(new ArrayList());
Node node = runtime.parseEval(script, "<script>", null, 0);
IRubyObject expRet = runtime.runInterpreter(node);
result = instance.parse(script);
IRubyObject ret = result.run();
assertEquals(expRet.toJava(String.class), ret.toJava(String.class));
// Maybe bug. This returns RubyNil, but it should be ""
//assertEquals("", ret.toJava(String.class));
script = "def say_something()" +
"\"はろ〜、わぁ〜るど!\"\n" +
"end\n" +
"say_something";
expRet = runtime.runInterpreter(runtime.parseEval(script, "<script>", null, 0));
ret = instance.parse(script).run();
assertEquals(expRet.toJava(String.class), ret.toJava(String.class));
//sharing variables
instance.put("what", "Trick or Treat.");
script = "\"Did you say, #{what}?\"";
result = instance.parse(script);
ret = result.run();
assertEquals("Did you say, Trick or Treat.?", ret.toJava(String.class));
// line number test
script = "puts \"Hello World!!!\"\nputs \"Have a nice day!";
StringWriter sw = new StringWriter();
instance.setErrorWriter(sw);
try {
instance.parse(script, 1);
} catch (Exception e) {
assertTrue(sw.toString().contains("<script>:3:"));
}
instance.getVarMap().clear();
instance = null;
}
/**
* Test of parse method, of class ScriptingContainer.
*/
@Test
public void testParse_3args_1() throws FileNotFoundException {
logger1.info("parse(reader, filename, lines)");
Reader reader = null;
String filename = "";
int[] lines = null;
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
EmbedEvalUnit expResult = null;
EmbedEvalUnit result = instance.parse(reader, filename, lines);
assertEquals(expResult, result);
filename = basedir + "/test/org/jruby/embed/ruby/iteration.rb";
reader = new FileReader(filename);
instance.put("@t", 2);
result = instance.parse(reader, filename);
IRubyObject ret = result.run();
String expStringResult =
"Trick or Treat!\nTrick or Treat!\n\nHmmm...I'd like trick.";
assertEquals(expStringResult, ret.toJava(String.class));
// line number test
filename = basedir + "/test/org/jruby/embed/ruby/raises_parse_error.rb";
reader = new FileReader(filename);
StringWriter sw = new StringWriter();
instance.setErrorWriter(sw);
try {
instance.parse(reader, filename, 2);
} catch (Exception e) {
logger1.info(sw.toString());
assertTrue(sw.toString().contains(filename + ":7:"));
}
instance.getVarMap().clear();
instance = null;
}
/**
* Test of parse method, of class ScriptingContainer.
*/
@Test
public void testParse_3args_2() {
logger1.info("parse(type, filename, lines)");
PathType type = null;
String filename = "";
int[] lines = null;
String[] paths = {basedir + "/lib", basedir + "/lib/ruby/1.8"};
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setLoadPaths(Arrays.asList(paths));
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
EmbedEvalUnit result;
try {
result = instance.parse(type, filename, lines);
} catch (Throwable t) {
assertTrue(t.getCause() instanceof FileNotFoundException);
t.printStackTrace(new PrintStream(outStream));
}
filename = basedir + "/test/org/jruby/embed/ruby/next_year.rb";
result = instance.parse(PathType.ABSOLUTE, filename);
IRubyObject ret = result.run();
assertEquals(getNextYear(), ret.toJava(Integer.class));
StringWriter sw = new StringWriter();
instance.setWriter(sw);
String[] planets = {"Mercury", "Venus", "Earth", "Mars", "Jupiter", "Saturn", "Uranus", "Neptune"};
instance.put("@list", Arrays.asList(planets));
filename = "/test/org/jruby/embed/ruby/list_printer.rb";
result = instance.parse(PathType.RELATIVE, filename);
ret = result.run();
String expResult = "Mercury >> Venus >> Earth >> Mars >> Jupiter >> Saturn >> Uranus >> Neptune: 8 in total";
assertEquals(expResult, sw.toString().trim());
sw = new StringWriter();
instance.setWriter(sw);
instance.setAttribute(AttributeName.UNICODE_ESCAPE, true);
planets = new String[]{"水星", "金星", "地球", "火星", "木星", "土星", "天王星", "海王星"};
instance.put("@list", Arrays.asList(planets));
filename = "org/jruby/embed/ruby/list_printer.rb";
result = instance.parse(PathType.CLASSPATH, filename);
ret = result.run();
expResult = "水星 >> 金星 >> 地球 >> 火星 >> 木星 >> 土星 >> 天王星 >> 海王星: 8 in total";
assertEquals(expResult, sw.toString().trim());
filename = "org/jruby/embed/ruby/raises_parse_error.rb";
sw = new StringWriter();
instance.setErrorWriter(sw);
try {
instance.parse(PathType.CLASSPATH, filename, 2);
} catch (Exception e) {
logger1.info(sw.toString());
assertTrue(sw.toString().contains(filename + ":7:"));
}
instance.getVarMap().clear();
instance = null;
}
private int getNextYear() {
Calendar calendar = Calendar.getInstance();
int this_year = calendar.get(Calendar.YEAR);
return this_year + 1;
}
/**
* Test of parse method, of class ScriptingContainer.
*/
@Test
public void testParse_3args_3() throws FileNotFoundException {
logger1.info("parse(istream, filename, lines)");
InputStream istream = null;
String filename = "";
int[] lines = null;
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
EmbedEvalUnit expResult = null;
EmbedEvalUnit result = instance.parse(istream, filename, lines);
assertEquals(expResult, result);
filename = basedir + "/test/org/jruby/embed/ruby/law_of_cosines.rb";
istream = new FileInputStream(filename);
result = instance.parse(istream, filename);
instance.put("@a", 1);
instance.put("@b", 1);
instance.put("@c", 1);
IRubyObject ret = result.run();
List<Double> angles = (List) ret.toJava(List.class);
// this result goes to 60.00000000000001,60.00000000000001,59.99999999999999.
// these should be 60.0, 60.0, 60.0. conversion precision error?
for (double angle : angles) {
assertEquals(60.0, angle, 0.00001);
}
filename = basedir + "/test/org/jruby/embed/ruby/raises_parse_error.rb";
StringWriter sw = new StringWriter();
instance.setErrorWriter(sw);
istream = new FileInputStream(filename);
try {
instance.parse(istream, filename, 2);
} catch (Exception e) {
logger1.info(sw.toString());
assertTrue(sw.toString().contains(filename + ":7:"));
}
instance.getVarMap().clear();
instance = null;
}
/**
* Test of runScriptlet method, of class ScriptingContainer.
*/
@Test
public void testRunScriptlet_String() {
logger1.info("runScriptlet(script)");
String script = null;
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
Object expResult = null;
Object result = instance.runScriptlet(script);
assertEquals(expResult, result);
script = "";
expResult = "";
result = instance.runScriptlet(script);
// Maybe bug. This should return "", but RubyNil.
//assertEquals(expResult, result);
script = "def say_something()" +
"\"いけてるね! > JRuby\"\n" +
"end\n" +
"say_something";
result = instance.runScriptlet(script);
expResult = "いけてるね! > JRuby";
assertEquals(expResult, result);
// unicode escape
String str = "\u3053\u3093\u306b\u3061\u306f\u4e16\u754c";
result = instance.runScriptlet("given_str = \"" + str + "\"");
expResult = "こんにちは世界";
assertEquals(expResult, result);
instance.getVarMap().clear();
instance = null;
}
/**
* Test of runScriptlet method, of class ScriptingContainer.
*/
@Test
public void testRunScriptlet_Reader_String() throws FileNotFoundException {
logger1.info("runScriptlet(reader, filename)");
Reader reader = null;
String filename = "";
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
Object expResult = null;
Object result = instance.runScriptlet(reader, filename);
assertEquals(expResult, result);
filename = basedir + "/test/org/jruby/embed/ruby/iteration.rb";
reader = new FileReader(filename);
instance.put("@t", 3);
result = instance.runScriptlet(reader, filename);
expResult =
"Trick or Treat!\nTrick or Treat!\nTrick or Treat!\n\nHmmm...I'd like trick.";
assertEquals(expResult, result);
instance.getVarMap().clear();
instance = null;
}
/**
* Test of runScriptlet method, of class ScriptingContainer.
*/
@Test
public void testRunScriptlet_InputStream_String() throws FileNotFoundException {
logger1.info("runScriptlet(istream, filename)");
InputStream istream = null;
String filename = "";
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
Object expResult = null;
Object result = instance.runScriptlet(istream, filename);
assertEquals(expResult, result);
filename = "org/jruby/embed/ruby/law_of_cosines.rb";
istream = getClass().getClassLoader().getResourceAsStream(filename);
instance.put("@a", 2.0);
instance.put("@b", 2 * Math.sqrt(3.0));
instance.put("@c", 2.0);
List<Double> angles = (List<Double>) instance.runScriptlet(istream, filename);
// this result goes to 30.00000000000004,30.00000000000004,120.0.
// these should be 30.0, 30.0, 120.0. conversion precision error?
logger1.info(angles.get(0) + ", " + angles.get(1) + ", " +angles.get(2));
assertEquals(30.0, angles.get(0), 0.00001);
assertEquals(30.0, angles.get(1), 0.00001);
assertEquals(120.0, angles.get(2), 0.00001);
instance.getVarMap().clear();
instance = null;
}
/**
* Test of runScriptlet method, of class ScriptingContainer.
*/
@Test
public void testRunScriptlet_PathType_String() {
logger1.info("runScriptlet(type, filename)");
PathType type = null;
String filename = "";
String[] paths = {basedir + "/lib/ruby/1.8"};
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setLoadPaths(Arrays.asList(paths));
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
Object expResult = null;
Object result;
try {
result = instance.parse(type, filename);
} catch (Throwable e) {
assertTrue(e.getCause() instanceof FileNotFoundException);
e.printStackTrace(new PrintStream(outStream));
}
// absolute path
filename = basedir + "/test/org/jruby/embed/ruby/next_year.rb";
result = instance.runScriptlet(PathType.ABSOLUTE, filename);
// perhaps, a return type should be in a method argument
// since implicit cast results in a Long type
expResult = new Long(getNextYear());
assertEquals(expResult, result);
instance.setAttribute(AttributeName.BASE_DIR, basedir + "/test/org/jruby/embed");
filename = "/ruby/next_year.rb";
result = instance.runScriptlet(PathType.RELATIVE, filename);
assertEquals(expResult, result);
instance.removeAttribute(AttributeName.BASE_DIR);
StringWriter sw = new StringWriter();
instance.setWriter(sw);
String[] radioactive_isotopes = {"Uranium", "Plutonium", "Carbon", "Radium", "Einstenium", "Nobelium"};
instance.put("@list", Arrays.asList(radioactive_isotopes));
filename = "/test/org/jruby/embed/ruby/list_printer.rb";
result = instance.runScriptlet(PathType.RELATIVE, filename);
expResult = "Uranium >> Plutonium >> Carbon >> Radium >> Einstenium >> Nobelium: 6 in total";
assertEquals(expResult, sw.toString().trim());
sw = new StringWriter();
instance.setWriter(sw);
radioactive_isotopes = new String[]{"ウラン", "プルトニウム", "炭素", "ラジウム", "アインスタイニウム", "ノーベリウム"};
instance.put("@list", Arrays.asList(radioactive_isotopes));
filename = "org/jruby/embed/ruby/list_printer.rb";
result = instance.runScriptlet(PathType.CLASSPATH, filename);
expResult = "ウラン >> プルトニウム >> 炭素 >> ラジウム >> アインスタイニウム >> ノーベリウム: 6 in total";
assertEquals(expResult, sw.toString().trim());
instance.getVarMap().clear();
instance = null;
}
/**
* Test of newRuntimeAdapter method, of class ScriptingContainer.
*/
@Test
public void testNewRuntimeAdapter() {
logger1.info("newRuntimeAdapter");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
EmbedRubyRuntimeAdapter result = instance.newRuntimeAdapter();
String script =
"def volume\n"+
" (Math::PI * (@r ** 2.0) * @h)/3.0\n" +
"end\n" +
"def surface_area\n" +
" Math::PI * @r * Math.sqrt((@r ** 2.0) + (@h ** 2.0)) + Math::PI * (@r ** 2.0)\n" +
"end\n" +
"return volume, surface_area";
instance.put("@r", 1.0);
instance.put("@h", Math.sqrt(3.0));
EmbedEvalUnit unit = result.parse(script);
IRubyObject ret = unit.run();
List<Double> rightCircularCone = (List<Double>) ret.toJava(List.class);
assertEquals(1.813799, rightCircularCone.get(0), 0.000001);
assertEquals(9.424778, rightCircularCone.get(1), 0.000001);
instance.getVarMap().clear();
instance = null;
}
/**
* Test of newObjectAdapter method, of class ScriptingContainer.
*/
@Test
public void testNewObjectAdapter() {
logger1.info("newObjectAdapter");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
EmbedRubyObjectAdapter result = instance.newObjectAdapter();
Class[] interfaces = result.getClass().getInterfaces();
assertEquals(org.jruby.embed.EmbedRubyObjectAdapter.class, interfaces[0]);
}
/**
* Test of callMethod method, of class ScriptingContainer.
*/
@Test
public void testCallMethod_3args() {
logger1.info("callMethod(receiver, methodName, returnType)");
Object receiver = null;
String methodName = "";
Class<Object> returnType = null;
String[] paths = {basedir + "/lib/ruby/1.8"};
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setLoadPaths(Arrays.asList(paths));
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
Object expResult = null;
Object result = instance.callMethod(receiver, methodName, returnType);
assertEquals(expResult, result);
String filename = "org/jruby/embed/ruby/next_year_1.rb";
receiver = instance.runScriptlet(PathType.CLASSPATH, filename);
int next_year = instance.callMethod(receiver, "get_year", Integer.class);
assertEquals(getNextYear(), next_year);
String script =
"def volume\n"+
" (Math::PI * (@r ** 2.0) * @h)/3.0\n" +
"end\n" +
"def surface_area\n" +
" Math::PI * @r * Math.sqrt((@r ** 2.0) + (@h ** 2.0)) + Math::PI * (@r ** 2.0)\n" +
"end";
receiver = instance.runScriptlet(script);
instance.put("@r", 1.0);
instance.put("@h", Math.sqrt(3.0));
double volume = instance.callMethod(instance.getTopSelf(), "volume", Double.class);
assertEquals(1.813799, volume, 0.000001);
double surface_area = instance.callMethod(instance.getTopSelf(), "surface_area", Double.class);
assertEquals(9.424778, surface_area, 0.000001);
instance.getVarMap().clear();
instance = null;
}
/**
* Test of callMethod method, of class ScriptingContainer.
*/
@Test
public void testCallMethod_4args_1() {
logger1.info("callMethod(receiver, methodName, singleArg, returnType)");
Object receiver = null;
String methodName = "";
Object singleArg = null;
Class<Object> returnType = null;
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
Object expResult = null;
Object result = instance.callMethod(receiver, methodName, singleArg, returnType);
assertEquals(expResult, result);
String filename = "org/jruby/embed/ruby/list_printer_1.rb";
receiver = instance.runScriptlet(PathType.CLASSPATH, filename);
methodName = "print_list";
String[] hellos = {"你好", "こんにちは", "Hello", "Здравствуйте"};
singleArg = Arrays.asList(hellos);
StringWriter sw = new StringWriter();
instance.setWriter(sw);
instance.callMethod(receiver, methodName, singleArg, null);
expResult = "Hello >> Здравствуйте >> こんにちは >> 你好: 4 in total";
assertEquals(expResult, sw.toString().trim());
instance.getVarMap().clear();
instance = null;
}
/**
* Test of callMethod method, of class ScriptingContainer.
*/
@Test
public void testCallMethod_4args_2() {
logger1.info("callMethod(receiver, methodName, args, returnType)");
Object receiver = null;
String methodName = "";
Object[] args = null;
Class<Object> returnType = null;
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
Object expResult = null;
Object result = instance.callMethod(receiver, methodName, args, returnType);
assertEquals(expResult, result);
String filename = "org/jruby/embed/ruby/quadratic_formula.rb";
receiver = instance.runScriptlet(PathType.CLASSPATH, filename);
methodName = "solve";
args = new Double[]{12.0, -21.0, -6.0};
List<Double> solutions = instance.callMethod(receiver, methodName, args, List.class);
assertEquals(2, solutions.size());
assertEquals(new Double(-0.25), solutions.get(0));
assertEquals(new Double(2.0), solutions.get(1));
args = new Double[]{1.0, 1.0, 1.0};
try {
solutions = instance.callMethod(receiver, methodName, args, List.class);
} catch (RuntimeException e) {
Throwable t = e.getCause();
assertTrue(t.getMessage().contains("RangeError"));
}
instance.getVarMap().clear();
instance = null;
}
/**
* Test of callMethod method, of class ScriptingContainer.
*/
/*
@Test
public void testCallMethod_5args_1() {
logger1.info("callMethod");
Object receiver = null;
String methodName = "";
Object[] args = null;
Block block = null;
Class<T> returnType = null;
ScriptingContainer instance = new ScriptingContainer();
Object expResult = null;
Object result = instance.callMethod(receiver, methodName, args, block, returnType);
assertEquals(expResult, result);
// TODO review the generated test code and remove the default call to fail.
fail("The test case is a prototype.");
}*/
/**
* Test of callMethod method, of class ScriptingContainer.
*/
@Test
public void testCallMethod_4args_3() {
// Sharing local variables over method call doesn't work.
// Should delete methods with unit argument?
logger1.info("callMethod(receiver, methodName, returnType, unit)");
Class<Object> returnType = null;
EmbedEvalUnit unit = null;
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE, LocalVariableBehavior.PERSISTENT);
instance.setHomeDirectory(basedir);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
// Verify that empty message name returns null
Object result = instance.callMethod(null, "", returnType, unit);
assertEquals(null, result);
String text =
"songs:\n"+
"- Hey Soul Sister\n" +
"- Who Says\n" +
"- Apologize\n" +
"podcasts:\n" +
"- Java Posse\n" +
"- Stack Overflow";
StringWriter sw = new StringWriter();
instance.setWriter(sw);
// local variable doesn't work in this case, so instance variable is used.
instance.put("@text", text);
unit = instance.parse(PathType.CLASSPATH, "org/jruby/embed/ruby/yaml_dump.rb");
Object receiver = unit.run();
IRubyObject nil = instance.getProvider().getRuntime().getNil();
assertSame(nil, receiver);
IRubyObject topSelf = instance.getProvider().getRuntime().getTopSelf();
result = instance.callMethod(topSelf, "dump", null, unit);
Object expResult =
"songs: Hey Soul Sister, Who Says, Apologize\npodcasts: Java Posse, Stack Overflow\n";
assertEquals(expResult, sw.toString());
instance.getVarMap().clear();
instance = null;
}
/**
* Test of callMethod method, of class ScriptingContainer.
*/
@Test
public void testCallMethod_without_returnType() {
logger1.info("callMethod no returnType");
Object receiver = null;
String methodName = "";
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
Object expResult = null;
Object result = instance.callMethod(receiver, methodName);
assertEquals(expResult, result);
String script =
"def say_something\n" +
"return \"Oh, well. I'm stucked\"" +
"end";
receiver = instance.runScriptlet(script);
methodName = "say_something";
String something = (String)instance.callMethod(receiver, methodName);
assertEquals("Oh, well. I'm stucked", something);
script =
"def give_me_foo\n" +
"Java::org.jruby.embed.FooArU.new\n" +
"end";
receiver = instance.runScriptlet(script);
methodName = "give_me_foo";
FooArU foo = (FooArU)instance.callMethod(receiver, methodName);
assertEquals("May I have your name?", foo.askPolitely());
script =
"def give_me_array(*args)\n" +
"args\n" +
"end";
receiver = instance.runScriptlet(script);
methodName = "give_me_array";
List<Double> list =
(List<Double>)instance.callMethod(receiver, methodName, 3.1415, 2.7182, 1.4142);
Double[] a = {3.1415, 2.7182, 1.4142};
List expList = Arrays.asList(a);
assertEquals(expList, list);
}
@Test
public void test_CallMethod_with_non_ruby_receiver() {
logger1.info("callMethod no returnType");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
assertEquals(true, instance.callMethod(null, "nil?"));
assertEquals(true, instance.callMethod(instance.getProvider().getRuntime().getNil(), "nil?"));
assertEquals(false, instance.callMethod("A Java String", "nil?"));
String script =
"ScriptingContainer = Java::org.jruby.embed.ScriptingContainer\n" +
"class ScriptingContainer\n" +
"def say_something\n" +
"'Something'\n" +
"end\n" +
"end\n";
instance.runScriptlet(script);
String something = (String)instance.callMethod(instance, "say_something");
assertEquals("Something", something);
}
/**
* Test of callMethod method, of class ScriptingContainer.
*/
/*
@Test
public void testCallMethod_5args_2() {
logger1.info("callMethod");
Object receiver = null;
String methodName = "";
Object[] args = null;
Class<T> returnType = null;
EmbedEvalUnit unit = null;
ScriptingContainer instance = new ScriptingContainer();
Object expResult = null;
Object result = instance.callMethod(receiver, methodName, args, returnType, unit);
assertEquals(expResult, result);
// TODO review the generated test code and remove the default call to fail.
fail("The test case is a prototype.");
}*/
/**
* Test of callMethod method, of class ScriptingContainer.
*/
/*
@Test
public void testCallMethod_6args() {
logger1.info("callMethod");
Object receiver = null;
String methodName = "";
Object[] args = null;
Block block = null;
Class<T> returnType = null;
EmbedEvalUnit unit = null;
ScriptingContainer instance = new ScriptingContainer();
Object expResult = null;
Object result = instance.callMethod(receiver, methodName, args, block, returnType, unit);
assertEquals(expResult, result);
// TODO review the generated test code and remove the default call to fail.
fail("The test case is a prototype.");
}*/
/**
* Test of callSuper method, of class ScriptingContainer.
*/
/*
@Test
public void testCallSuper_3args() {
logger1.info("callSuper");
Object receiver = null;
Object[] args = null;
Class<T> returnType = null;
ScriptingContainer instance = new ScriptingContainer();
Object expResult = null;
Object result = instance.callSuper(receiver, args, returnType);
assertEquals(expResult, result);
// TODO review the generated test code and remove the default call to fail.
fail("The test case is a prototype.");
}*/
/**
* Test of callSuper method, of class ScriptingContainer.
*/
/*
@Test
public void testCallSuper_4args() {
logger1.info("callSuper");
Object receiver = null;
Object[] args = null;
Block block = null;
Class<T> returnType = null;
ScriptingContainer instance = new ScriptingContainer();
Object expResult = null;
Object result = instance.callSuper(receiver, args, block, returnType);
assertEquals(expResult, result);
// TODO review the generated test code and remove the default call to fail.
fail("The test case is a prototype.");
}*/
/**
* Test of getInstance method, of class ScriptingContainer.
*/
@Test
public void testGetInstance() {
logger1.info("getInstance");
Object receiver = null;
Class<Object> clazz = null;
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
Object expResult = null;
Object result = instance.getInstance(receiver, clazz);
assertEquals(expResult, result);
// calculates Plutonium decay
instance.put("$h", 24100.0); // half-life of Plutonium is 24100 years.
String filename = "org/jruby/embed/ruby/radioactive_decay.rb";
receiver = instance.runScriptlet(PathType.CLASSPATH, filename);
result = instance.getInstance(receiver, RadioActiveDecay.class);
double initial = 10.0; // 10.0 g
double years = 1000; // 1000 years
double amount_left = ((RadioActiveDecay)result).amountAfterYears(initial, years);
assertEquals(9.716483752784367, amount_left, 0.00000000001);
amount_left = 1.0;
years = ((RadioActiveDecay)result).yearsToAmount(initial, amount_left);
assertEquals(80058.46708678544, years, 0.00000000001);
// calculates position and velocity after some seconds have past
instance.put("initial_velocity", 16.0);
instance.put("initial_height", 32.0);
instance.put("system", "english");
filename = "org/jruby/embed/ruby/position_function.rb";
receiver = instance.runScriptlet(PathType.CLASSPATH, filename);
result = instance.getInstance(receiver, PositionFunction.class);
double time = 2.0;
double position = ((PositionFunction)result).getPosition(time);
assertEquals(0.0, position, 0.01);
double velocity = ((PositionFunction)result).getVelocity(time);
assertEquals(-48.0, velocity, 0.01);
List<String> units = ((PositionFunction)result).getUnits();
assertEquals("ft./sec", units.get(0));
assertEquals("ft.", units.get(1));
}
/**
* Test of setReader method, of class ScriptingContainer.
*/
@Test
public void testSetReader() {
logger1.info("setReader");
Reader reader = null;
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
instance.setReader(reader);
instance = null;
}
/**
* Test of getReader method, of class ScriptingContainer.
*/
@Test
public void testGetReader() {
logger1.info("getReader");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
Reader result = instance.getReader();
assertFalse(result == null);
instance = null;
}
/**
* Test of getIn method, of class ScriptingContainer.
*/
@Test
public void testGetIn() {
logger1.info("getIn");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
InputStream result = instance.getInput();
assertFalse(result == null);
instance = null;
}
/**
* Test of setWriter method, of class ScriptingContainer.
*/
@Test
public void testSetWriter() {
logger1.info("setWriter");
Writer sw = null;
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
instance.setWriter(writer);
String filename = System.getProperty("user.dir") + "/test/quiet.rb";
sw = new StringWriter();
Writer esw = new StringWriter();
instance.setWriter(sw);
instance.setErrorWriter(esw);
Object result = instance.runScriptlet(PathType.ABSOLUTE, filename);
String expResult = "foo";
// This never successes.
//assertEquals(expResult, result);
instance = null;
}
/**
* Test of resetWriter method, of class ScriptingContainer.
*/
@Test
public void testResetWriter() {
logger1.info("resetWriter");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
instance.resetWriter();
instance = null;
}
/**
* Test of getWriter method, of class ScriptingContainer.
*/
@Test
public void testGetWriter() {
logger1.info("getWriter");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
Writer result = instance.getWriter();
assertTrue(result == writer);
instance = null;
}
/**
* Test of getOut method, of class ScriptingContainer.
*/
@Test
public void testGetOut() {
logger1.info("getOut");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
PrintStream result = instance.getOutput();
assertTrue(result == pstream);
instance = null;
}
/**
* Test of setErrorWriter method, of class ScriptingContainer.
*/
@Test
public void testSetErrorWriter() {
logger1.info("setErrorWriter");
Writer esw = null;
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
instance.setErrorWriter(esw);
esw = new StringWriter();
instance.setErrorWriter(esw);
instance.runScriptlet("ABC=10;ABC=20");
String expResult = "<script>:1 warning: already initialized constant ABC";
assertEquals(expResult, esw.toString().trim());
instance.getVarMap().clear();
instance = null;
}
/**
* Test of resetErrorWriter method, of class ScriptingContainer.
*/
@Test
public void testResetErrorWriter() {
logger1.info("resetErrorWriter");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
instance.resetErrorWriter();
instance = null;
}
/**
* Test of getErrorWriter method, of class ScriptingContainer.
*/
@Test
public void testGetErrorWriter() {
logger1.info("getErrorWriter");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
Writer result = instance.getErrorWriter();
assertTrue(result == writer);
instance = null;
}
/**
* Test of getErr method, of class ScriptingContainer.
*/
@Test
public void testGetErr() {
logger1.info("getErr");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
PrintStream result = instance.getError();
assertTrue(result == pstream);
instance = null;
}
/**
* Test of methods Java object should have.
*
* Currently, __jtrap is missing and removed from expResult.
*/
// This test is really sensitive to internal API changes and needs frequent update.
// For the time being, this test will be eliminated.
//@Test
public void testMethods() {
logger1.info("");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
String[] expResult = {
"==", "===", "=~", "[]", "[]=", "__id__", "__jcreate!", "__jsend!",
"__send__", "all?", "any?", "class", "class__method",
"clear", "clear__method", "clone", "clone__method", "collect", "com",
"containsKey", "containsKey__method", "containsValue", "containsValue__method",
"contains_key", "contains_key?", "contains_key__method", "contains_key__method?",
"contains_value", "contains_value?", "contains_value__method",
"contains_value__method?", "count", "cycle", "detect", "display", "drop",
"drop_while", "dup", "each", "each_cons", "each_slice", "each_with_index",
"empty", "empty?", "empty__method", "empty__method?", "entries", "entrySet",
"entrySet__method", "entry_set", "entry_set__method", "enum_cons", "enum_for",
"enum_slice", "enum_with_index", "eql?", "equal?", "equals", "equals?",
"equals__method", "equals__method?", "extend", "finalize", "finalize__method",
"find", "find_all", "find_index", "first", "freeze", "frozen?", "get",
"getClass", "getClass__method", "get__method", "get_class", "get_class__method",
"grep", "group_by", "handle_different_imports", "hash", "hashCode",
"hashCode__method", "hash_code", "hash_code__method", "id", "include?",
"include_class", "initialize", "inject", "inspect", "instance_eval",
"instance_exec", "instance_of?", "instance_variable_defined?",
"instance_variable_get", "instance_variable_set", "instance_variables",
"isEmpty", "isEmpty__method", "is_a?", "is_empty", "is_empty?",
"is_empty__method", "is_empty__method?", "java", "java_class", "java_kind_of?",
"java_method", "java_object", "java_object=", "java_send", "javax", "keySet",
"keySet__method", "key_set", "key_set__method", "kind_of?", "map", "marshal_dump",
"marshal_load", "max", "max_by", "member?", "method", "methods", "min",
"min_by", "minmax", "minmax_by", "nil?", "none?", "notify", "notifyAll",
"notifyAll__method", "notify__method", "notify_all", "notify_all__method",
"object_id", "org", "partition", "private_methods", "protected_methods",
"public_methods", "put", "putAll", "putAll__method", "put__method", "put_all",
"put_all__method", "reduce", "reject", "remove", "remove__method", "respond_to?",
"reverse_each", "select", "send", "singleton_methods", "size", "size__method",
"sort", "sort_by", "synchronized", "taint", "tainted?", "take", "take_while",
"tap", "toString", "toString__method", "to_a", "to_enum", "to_java", "to_java_object",
"to_s", "to_string", "to_string__method", "type", "untaint", "values",
"values__method", "wait", "wait__method", "zip"
};
String script = "require 'java'\njava.util.HashMap.new.methods.sort";
List<String> ret = (List<String>)instance.runScriptlet(script);
assertEquals(expResult.length, ret.size());
String[] retMethods = ret.toArray(new String[ret.size()]);
assertArrayEquals(expResult, retMethods);
instance.clear();
instance = null;
}
/**
* Test of getLoadPaths method, of class ScriptingContainer.
*/
@Test
public void testGetLoadPaths() {
logger1.info("getLoadPaths");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
List result = instance.getLoadPaths();
assertTrue(result != null);
assertTrue(result.size() > 0);
instance = null;
}
/**
* Test of setloadPaths method, of class ScriptingContainer.
*/
@Test
public void testSetloadPaths() {
logger1.info("setloadPaths");
List<String> paths = null;
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
instance.setLoadPaths(paths);
List<String> expResult = null;
assertEquals(expResult, instance.getLoadPaths());
paths = Arrays.asList(new String[]{"abc", "def"});
instance.setLoadPaths(paths);
assertArrayEquals(paths.toArray(), instance.getLoadPaths().toArray());
instance = null;
}
/**
* Test of getInput method, of class ScriptingContainer.
*/
@Test
public void testGetInput() {
logger1.info("getInput");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
InputStream expResult = System.in;
InputStream result = instance.getInput();
assertEquals(expResult, result);
}
/**
* Test of setInput method, of class ScriptingContainer.
*/
@Test
public void testSetInput_InputStream() {
logger1.info("setInput");
InputStream istream = null;
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
instance.setInput(istream);
assertEquals(istream, instance.getInput());
istream = System.in;
instance.setInput(istream);
assertTrue(instance.getInput() instanceof InputStream);
instance = null;
}
/**
* Test of setInput method, of class ScriptingContainer.
*/
@Test
public void testSetInput_Reader() {
logger1.info("setInput");
Reader reader = null;
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
instance.setInput(reader);
assertEquals(reader, instance.getInput());
instance = null;
}
/**
* Test of getOutput method, of class ScriptingContainer.
*/
@Test
public void testGetOutput() {
logger1.info("getOutput");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
PrintStream expResult = System.out;
PrintStream result = instance.getOutput();
assertEquals(pstream, result);
instance = null;
}
/**
* Test of setOutput method, of class ScriptingContainer.
*/
@Test
public void testSetOutput_PrintStream() {
logger1.info("setOutput");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
assertEquals(pstream, instance.getOutput());
instance = null;
}
/**
* Test of setOutput method, of class ScriptingContainer.
*/
@Test
public void testSetOutput_Writer() {
logger1.info("setOutput");
Writer ow = null;
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
instance.setOutput(ow);
assertEquals(ow, instance.getOutput());
ow = new StringWriter();
instance.setOutput(ow);
assertTrue(instance.getOutput() instanceof PrintStream);
instance = null;
}
/**
* Test of getError method, of class ScriptingContainer.
*/
@Test
public void testGetError() {
logger1.info("getError");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
PrintStream expResult = System.err;
PrintStream result = instance.getError();
assertEquals(pstream, result);
instance = null;
}
/**
* Test of setError method, of class ScriptingContainer.
*/
@Test
public void testSetError_PrintStream() {
logger1.info("setError");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
assertEquals(pstream, instance.getError());
instance = null;
}
/**
* Test of setError method, of class ScriptingContainer.
*/
@Test
public void testSetError_Writer() {
logger1.info("setError");
Writer ew = null;
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
instance.setError(ew);
assertEquals(ew, instance.getError());
ew = new StringWriter();
instance.setError(ew);
assertTrue(instance.getError() instanceof PrintStream);
instance = null;
}
/**
* Test of getCompileMode method, of class ScriptingContainer.
*/
@Test
public void testGetCompileMode() {
logger1.info("getCompileMode");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
CompileMode expResult = CompileMode.OFF;
CompileMode result = instance.getCompileMode();
assertEquals(expResult, result);
instance = null;
}
/**
* Test of setCompileMode method, of class ScriptingContainer.
*/
@Test
public void testSetCompileMode() {
logger1.info("setCompileMode");
CompileMode mode = null;
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
instance.setCompileMode(mode);
assertEquals(mode, instance.getCompileMode());
mode = CompileMode.FORCE;
instance.setCompileMode(mode);
assertEquals(mode, instance.getCompileMode());
instance = null;
}
/**
* Test of isRunRubyInProcess method, of class ScriptingContainer.
*/
@Test
public void testIsRunRubyInProcess() {
logger1.info("isRunRubyInProcess");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
boolean expResult = true;
boolean result = instance.isRunRubyInProcess();
assertEquals(expResult, result);
instance = null;
}
/**
* Test of setRunRubyInProcess method, of class ScriptingContainer.
*/
@Test
public void testSetRunRubyInProcess() {
logger1.info("setRunRubyInProcess");
boolean inprocess = false;
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
instance.setRunRubyInProcess(inprocess);
assertEquals(inprocess, instance.isRunRubyInProcess());
inprocess = true;
instance.setRunRubyInProcess(inprocess);
assertEquals(inprocess, instance.isRunRubyInProcess());
instance = null;
}
/**
* Test of getCompatVersion method, of class ScriptingContainer.
*/
@Test
public void testGetCompatVersion() {
logger1.info("getCompatVersion");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
CompatVersion expResult = CompatVersion.RUBY1_8;
CompatVersion result = instance.getCompatVersion();
assertEquals(expResult, result);
instance = null;
}
/**
* Test of setCompatVersion method, of class ScriptingContainer.
*/
@Test
public void testSetCompatVersion() {
logger1.info("setCompatVersion");
CompatVersion version = null;
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.SINGLETHREAD);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
instance.setCompatVersion(version);
assertEquals(CompatVersion.RUBY1_8, instance.getCompatVersion());
// CompatVersion can't be changed after Ruby Runtime has been initialized, so
// need to have new Runtime for this test
instance = new ScriptingContainer(LocalContextScope.SINGLETHREAD);
version = CompatVersion.RUBY1_9;
instance.setCompatVersion(version);
assertEquals(version, instance.getCompatVersion());
String result = (String)instance.runScriptlet(PathType.CLASSPATH, "org/jruby/embed/ruby/block-param-scope.rb");
String expResult = "bear";
assertEquals(expResult, result);
instance = null;
}
/**
* Test of isObjectSpaceEnabled method, of class ScriptingContainer.
*/
@Test
public void testIsObjectSpaceEnabled() {
logger1.info("isObjectSpaceEnabled");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
boolean expResult = false;
boolean result = instance.isObjectSpaceEnabled();
assertEquals(expResult, result);
instance = null;
}
/**
* Test of setObjectSpaceEnabled method, of class ScriptingContainer.
*/
@Test
public void testSetObjectSpaceEnabled() {
logger1.info("setObjectSpaceEnabled");
boolean enable = false;
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
instance.setObjectSpaceEnabled(enable);
assertEquals(enable, instance.isObjectSpaceEnabled());
instance = null;
}
/**
* Test of getEnvironment method, of class ScriptingContainer.
*/
@Test
public void testGetEnvironment() {
logger1.info("getEnvironment");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
Map expResult = System.getenv();
Map result = instance.getEnvironment();
assertEquals(expResult, result);
instance = null;
}
/**
* Test of setEnvironment method, of class ScriptingContainer.
*/
@Test
public void testSetEnvironment() {
logger1.info("setEnvironment");
Map environment = null;
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
instance.setEnvironment(environment);
assertEquals(new HashMap(), instance.getEnvironment());
environment = new HashMap();
environment.put("abc", "def");
instance.setEnvironment(environment);
assertEquals(environment, instance.getEnvironment());
instance = null;
}
/**
* Test of getCurrentDirectory method, of class ScriptingContainer.
*/
@Test
public void testGetCurrentDirectory() {
logger1.info("getCurrentDirectory");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
String expResult = System.getProperty("user.dir");
String result = instance.getCurrentDirectory();
assertEquals(expResult, result);
instance = null;
}
/**
* Test of setCurrentDirectory method, of class ScriptingContainer.
*/
@Test
public void testSetCurrentDirectory() {
logger1.info("setCurrentDirectory");
String directory = "";
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
instance.setCurrentDirectory(directory);
assertEquals(directory, instance.getCurrentDirectory());
directory = "abc";
instance.setCurrentDirectory(directory);
assertEquals(directory, instance.getCurrentDirectory());
directory = System.getProperty( "user.home" );
instance = new ScriptingContainer();
instance.setCurrentDirectory(directory);
assertEquals(directory, instance.getCurrentDirectory());
instance = new ScriptingContainer(LocalContextScope.CONCURRENT);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
instance.setCurrentDirectory(directory);
assertEquals(directory, instance.getCurrentDirectory());
instance = new ScriptingContainer(LocalContextScope.SINGLETHREAD);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
instance.setCurrentDirectory(directory);
assertEquals(directory, instance.getCurrentDirectory());
}
/**
* Test of getHomeDirectory method, of class ScriptingContainer.
*/
@Test
public void testGetHomeDirectory() {
logger1.info("getHomeDirectory");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
String expResult = System.getenv("JRUBY_HOME");
if (expResult == null) {
expResult = System.getProperty("jruby.home");
}
if (expResult == null) {
expResult = System.getProperty("java.io.tmpdir");
}
String result = instance.getHomeDirectory();
assertEquals(expResult, result);
instance = null;
}
/**
* Test of setHomeDirectory method, of class ScriptingContainer.
*/
@Test
public void testSetHomeDirectory() {
logger1.info("setHomeDirectory");
String home = ".";
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
instance.setHomeDirectory(home);
assertEquals(System.getProperty("user.dir"), instance.getHomeDirectory());
instance = null;
}
/**
* Test of getClassCache method, of class ScriptingContainer.
*/
@Test
public void testGetClassCache() {
logger1.info("getClassCache");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
ClassCache result = instance.getClassCache();
assertTrue(result.getMax() == instance.getJitMax());
instance = null;
}
/**
* Test of setClassCache method, of class ScriptingContainer.
*/
@Test
public void testSetClassCache() {
logger1.info("setClassCache");
ClassCache cache = null;
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
instance.setClassCache(cache);
assertEquals(cache, instance.getClassCache());
cache = new ClassCache(instance.getProvider().getRuntime().getJRubyClassLoader(), 30);
instance.setClassCache(cache);
assertEquals(cache, instance.getClassCache());
assertTrue(instance.getClassCache().getMax() == 30);
instance = null;
}
/**
* Test of getClassLoader method, of class ScriptingContainer.
*/
@Test
public void testGetClassLoader() {
logger1.info("getClassLoader");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
ClassLoader expResult = this.getClass().getClassLoader();
ClassLoader result = instance.getClassLoader();
assertEquals(expResult, result);
instance = null;
}
/**
* Test of setClassLoader method, of class ScriptingContainer.
*/
@Test
public void testSetClassLoader() {
logger1.info("setClassLoader");
ClassLoader loader = null;
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
instance.setClassLoader(loader);
assertEquals(loader, instance.getClassLoader());
loader = instance.getProvider().getRuntime().getJRubyClassLoader();
instance.setClassLoader(loader);
assertEquals(loader, instance.getClassLoader());
instance = null;
}
/**
* Test of getProfile method, of class ScriptingContainer.
*/
@Test
public void testGetProfile() {
logger1.info("getProfile");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
Profile expResult = Profile.DEFAULT;
Profile result = instance.getProfile();
assertEquals(expResult, result);
instance = null;
}
/**
* Test of setProfile method, of class ScriptingContainer.
*/
@Test
public void testSetProfile() {
logger1.info("setProfile");
Profile profile = null;
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
instance.setProfile(profile);
assertEquals(profile, instance.getProfile());
profile = Profile.ALL;
instance.setProfile(profile);
assertEquals(profile, instance.getProfile());
instance = null;
}
/**
* Test of getLoadServiceCreator method, of class ScriptingContainer.
*/
@Test
public void testGetLoadServiceCreator() {
logger1.info("getLoadServiceCreator");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
LoadServiceCreator expResult = LoadServiceCreator.DEFAULT;
LoadServiceCreator result = instance.getLoadServiceCreator();
assertEquals(expResult, result);
instance = null;
}
/**
* Test of setLoadServiceCreator method, of class ScriptingContainer.
*/
@Test
public void testSetLoadServiceCreator() {
logger1.info("setLoadServiceCreator");
LoadServiceCreator creator = null;
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
instance.setLoadServiceCreator(creator);
instance = null;
}
/**
* Test of getArgv method, of class ScriptingContainer.
*/
@Test
public void testGetArgv() {
logger1.info("getArgv");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
String[] expResult = new String[]{};
String[] result = instance.getArgv();
assertArrayEquals(expResult, result);
instance = null;
}
/**
* Test of setArgv method, of class ScriptingContainer.
*/
@Test
public void testSetArgv() {
logger1.info("setArgv");
String[] argv = null;
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.SINGLETHREAD);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
instance.setArgv(argv);
assertArrayEquals(argv, instance.getArgv());
instance = null;
instance = new ScriptingContainer(LocalContextScope.SINGLETHREAD);
//instance.setError(pstream);
//instance.setOutput(pstream);
//instance.setWriter(writer);
//instance.setErrorWriter(writer);
argv = new String[] {"tree", "woods", "forest"};
instance.setArgv(argv);
String script =
"def print_argv\n" +
"all_of_them = \"\"\n" +
"ARGV.each { |item| all_of_them += item }\n" +
"return all_of_them\n" +
"end\n" +
"print_argv";
String ret = (String)instance.runScriptlet(script);
String expResult = "treewoodsforest";
assertEquals(expResult, ret);
List<String> list = (List<String>)instance.get("ARGV");
//Object[] params = (Object[])instance.get("ARGV");
//assertArrayEquals(argv, params);
instance = null;
}
/**
* Test of setArgv method, of class ScriptingContainer.
*/
@Test
public void testRubyArrayToJava() {
logger1.info("RubyArray to Java");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.SINGLETHREAD);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
String script =
"def get_array\n" +
"return [\"snow\", \"sleet\", \"drizzle\", \"freezing rain\"]\n" +
"end\n";
Object receiver = instance.runScriptlet(script);
String[] params = instance.callMethod(receiver, "get_array", String[].class);
String[] expParams = {"snow", "sleet", "drizzle", "freezing rain"};
assertArrayEquals(expParams, params);
List<String> list = instance.callMethod(receiver, "get_array", List.class);
List<String> expList = Arrays.asList(expParams);
assertEquals(expList, list);
}
/**
* Test of getScriptFilename method, of class ScriptingContainer.
*/
@Test
public void testGetScriptFilename() {
logger1.info("getScriptFilename");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
String expResult = "<script>";
String result = instance.getScriptFilename();
assertEquals(expResult, result);
instance = null;
}
/**
* Test of setScriptFilename method, of class ScriptingContainer.
*/
@Test
public void testSetScriptFilename() {
logger1.info("setScriptFilename");
String filename = "";
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.SINGLETHREAD);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
instance.setScriptFilename(filename);
instance = null;
filename = "["+this.getClass().getCanonicalName()+"]";
instance = new ScriptingContainer(LocalContextScope.SINGLETHREAD);
instance.setScriptFilename(filename);
StringWriter sw = new StringWriter();
instance.setErrorWriter(sw);
try {
instance.runScriptlet("puts \"Hello");
} catch (RuntimeException e) {
assertTrue(sw.toString().contains(filename));
}
instance = null;
}
/**
* Test of getRecordSeparator method, of class ScriptingContainer.
*/
@Test
public void testGetRecordSeparator() {
logger1.info("getRecordSeparator");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
String expResult = "\n";
String result = instance.getRecordSeparator();
assertEquals(expResult, result);
instance = null;
}
/**
* Test of setRecordSeparator method, of class ScriptingContainer.
*/
@Test
public void testSetRecordSeparator() {
logger1.info("setRecordSeparator");
String separator = "";
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
instance.setRecordSeparator(separator);
instance = null;
}
/**
* Test of getKCode method, of class ScriptingContainer.
*/
@Test
public void testGetKCode() {
logger1.info("getKCode");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
KCode expResult = KCode.NONE;
KCode result = instance.getKCode();
assertEquals(expResult, result);
instance = null;
}
/**
* Test of setKCode method, of class ScriptingContainer.
*/
@Test
public void testSetKCode() {
logger1.info("setKCode");
KCode kcode = null;
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.SINGLETHREAD);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
instance.setKCode(kcode);
instance = null;
instance = new ScriptingContainer(LocalContextScope.SINGLETHREAD);
//instance.setError(pstream);
//instance.setOutput(pstream);
//instance.setWriter(writer);
//instance.setErrorWriter(writer);
kcode = KCode.UTF8;
instance.setKCode(kcode);
StringWriter sw = new StringWriter();
instance.setWriter(sw);
instance.runScriptlet("p \"Résumé\"");
String expResult = "\"Résumé\"";
assertEquals(expResult, sw.toString().trim());
instance = null;
}
/**
* Test of getJitLogEvery method, of class ScriptingContainer.
*/
@Test
public void testGetJitLogEvery() {
logger1.info("getJitLogEvery");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
int expResult = 0;
int result = instance.getJitLogEvery();
assertEquals(expResult, result);
instance = null;
}
/**
* Test of setJitLogEvery method, of class ScriptingContainer.
*/
@Test
public void testSetJitLogEvery() {
logger1.info("setJitLogEvery");
int logEvery = 0;
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
instance.setJitLogEvery(logEvery);
instance = null;
}
/**
* Test of getJitThreshold method, of class ScriptingContainer.
*/
@Test
public void testGetJitThreshold() {
logger1.info("getJitThreshold");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
int expResult = 50;
int result = instance.getJitThreshold();
assertEquals(expResult, result);
instance = null;
}
/**
* Test of setJitThreshold method, of class ScriptingContainer.
*/
@Test
public void testSetJitThreshold() {
logger1.info("setJitThreshold");
int threshold = 0;
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
instance.setJitThreshold(threshold);
instance = null;
}
/**
* Test of getJitMax method, of class ScriptingContainer.
*/
@Test
public void testGetJitMax() {
logger1.info("getJitMax");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
int expResult = 4096;
int result = instance.getJitMax();
assertEquals(expResult, result);
instance = null;
}
/**
* Test of setJitMax method, of class ScriptingContainer.
*/
@Test
public void testSetJitMax() {
logger1.info("setJitMax");
int max = 0;
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
instance.setJitMax(max);
instance = null;
}
/**
* Test of getJitMaxSize method, of class ScriptingContainer.
*/
@Test
public void testGetJitMaxSize() {
logger1.info("getJitMaxSize");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
int expResult = 30000;
int result = instance.getJitMaxSize();
assertEquals(expResult, result);
instance = null;
}
/**
* Test of setJitMaxSize method, of class ScriptingContainer.
*/
@Test
public void testSetJitMaxSize() {
logger1.info("setJitMaxSize");
int maxSize = 0;
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
instance.setJitMaxSize(maxSize);
instance = null;
}
/**
* Test of removeAttribute method, of class ScriptingContainer.
*/
@Test
public void testRemoveAttribute() {
logger1.info("removeAttribute");
Object key = null;
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
Object expResult = null;
Object result = instance.removeAttribute(key);
assertEquals(expResult, result);
instance = null;
}
/**
* Test of remove method, of class ScriptingContainer.
*/
@Test
public void testRemove() {
logger1.info("remove");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
String key = "abc";
String value = "def";
instance.put(key, value);
Object expResult = "def";
Object result = instance.remove(key);
assertEquals(expResult, result);
instance = null;
}
/**
* Test of clear method, of class ScriptingContainer.
*/
@Test
public void testClear() {
logger1.info("clear");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setWriter(writer);
instance.setErrorWriter(writer);
instance.clear();
instance.put("abc", "local_def");
instance.put("$abc", "global_def");
instance.put("@abc", "instance_def");
assertEquals(3, instance.getProvider().getVarMap().size());
instance.clear();
assertEquals(0, instance.getProvider().getVarMap().size());
instance = null;
}
/**
* Test of sharing local vars when JIT mode is set, of class ScriptingContainer.
* Test for JRUBY-4695. JIT mode allows sharing variables, but FORCE mode doesn't so far.
*/
@Test
public void testSharingVariableWithCompileMode() {
logger1.info("sharing vars over JIT mode");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
instance.setError(pstream);
instance.setOutput(pstream);
instance.setErrorWriter(writer);
instance.put("my_var", "Hullo!");
StringWriter sw = new StringWriter();
instance.setWriter(sw);
instance.setCompileMode(CompileMode.OFF);
instance.runScriptlet("puts my_var");
assertEquals("Hullo!", sw.toString().trim());
// need to put the lvar again since lvars vanish after eval on a transient setting
instance.put("my_var", "Hullo!");
sw = new StringWriter();
instance.setWriter(sw);
instance.setCompileMode(CompileMode.JIT);
instance.runScriptlet("puts my_var");
assertEquals("Hullo!", sw.toString().trim());
instance.put("my_var", "Hullo!");
sw = new StringWriter();
instance.setWriter(sw);
instance.setCompileMode(CompileMode.FORCE);
instance.runScriptlet("puts my_var");
assertEquals("Hullo!", sw.toString().trim());
instance = null;
}
public void testEmbedEvalUnitCompileModes() {
org.jruby.embed.ScriptingContainer container = new org.jruby.embed.ScriptingContainer();
container.setCompileMode(CompileMode.OFF);
EmbedEvalUnit evalUnit1 = container.parse("$one = \"script 1: success\"");
EmbedEvalUnit evalUnit2 = container.parse("def script2() ; $two = \"script 2: success\"; end; script2()");
evalUnit1.run();
evalUnit2.run();
assertEquals("script 1: success", container.get("$one").toString());
assertEquals("script 2: success", container.get("$two").toString());
container = new org.jruby.embed.ScriptingContainer();
container.setCompileMode(CompileMode.JIT);
evalUnit1 = container.parse("$one = \"script 1: success\"");
evalUnit2 = container.parse("def script2() ; $two = \"script 2: success\"; end; script2()");
evalUnit1.run();
evalUnit2.run();
assertEquals("script 1: success", container.get("$one").toString());
assertEquals("script 2: success", container.get("$two").toString());
container = new org.jruby.embed.ScriptingContainer();
container.setCompileMode(CompileMode.FORCE);
evalUnit1 = container.parse("$one = \"script 1: success\"");
evalUnit2 = container.parse("def script2() ; $two = \"script 2: success\"; end; script2()");
evalUnit1.run();
evalUnit2.run();
assertEquals("script 1: success", container.get("$one").toString());
assertEquals("script 2: success", container.get("$two").toString());
}
/**
* Test of Thread.currentThread().setContextClassLoader method
*/
@Test
public void testNullToContextClassLoader() {
logger1.info("Thread.currentThread().setContextClassLoader(null)");
ScriptingContainer instance = null;
try {
ClassLoader oldClassLoader = Thread.currentThread().getContextClassLoader();
Thread.currentThread().setContextClassLoader(null);
instance = new ScriptingContainer(LocalContextScope.THREADSAFE);
Thread.currentThread().setContextClassLoader(oldClassLoader);
} catch (NullPointerException e) {
fail(e.getMessage());
} finally {
instance = null;
}
}
/**
* Test of setClassLoader method, of SystemPropertyCatcher.
* This method is only used in JSR223 but tested here. Since, JSR223
* is not easy to test internal state.
*/
@Test
public void testSystemPropertyCatcherSetClassloader() {
logger1.info("SystemPropertyCatcher.setClassloader");
System.setProperty(PropertyName.CLASSLOADER.toString(), "container");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.SINGLETHREAD);
org.jruby.embed.util.SystemPropertyCatcher.setClassLoader(instance);
assertEquals(instance.getClass().getClassLoader(), instance.getClassLoader());
System.setProperty(PropertyName.CLASSLOADER.toString(), "context");
instance = new ScriptingContainer(LocalContextScope.SINGLETHREAD);
org.jruby.embed.util.SystemPropertyCatcher.setClassLoader(instance);
assertEquals(Thread.currentThread().getContextClassLoader(), instance.getClassLoader());
}
/**
* Test of setClassLoader method, of SystemPropertyCatcher.
* This method is only used in JSR223 but tested here. Since, JSR223
* is not easy to test internal state.
*/
@Test
public void testScopeInCallMethod() {
logger1.info("Scope in callMethod should not be null");
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.SINGLETHREAD);
Object someInstance = instance.runScriptlet("Object.new");
Object result = instance.callMethod(someInstance, "instance_eval", "self", "<eval>", 1);
assertNotNull(result);
}
@Test
public void testExitTerminatesScript() {
ScriptingContainer instance = new ScriptingContainer(LocalContextScope.SINGLETHREAD);
Object result = instance.runScriptlet("exit 1234");
assertEquals(1234L, result);
}
}
| [
"\"JRUBY_HOME\""
]
| []
| [
"JRUBY_HOME"
]
| [] | ["JRUBY_HOME"] | java | 1 | 0 | |
tools/generate.go | package main
import (
"crypto/sha1"
"fmt"
"io/ioutil"
"net/http"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"text/template"
"github.com/russross/blackfriday"
)
// siteDir is the target directory into which the HTML gets generated. Its
// default is set here but can be changed by an argument passed into the
// program.
var siteDir = "./public"
var cacheDir = "/tmp/gobyexample-cache"
var pygmentizeBin = "./vendor/pygments/pygmentize"
func verbose() bool {
return len(os.Getenv("VERBOSE")) > 0
}
func check(err error) {
if err != nil {
panic(err)
}
}
func ensureDir(dir string) {
err := os.MkdirAll(dir, 0755)
check(err)
}
func copyFile(src, dst string) {
dat, err := ioutil.ReadFile(src)
check(err)
err = ioutil.WriteFile(dst, dat, 0644)
check(err)
}
func pipe(bin string, arg []string, src string) []byte {
cmd := exec.Command(bin, arg...)
in, err := cmd.StdinPipe()
check(err)
out, err := cmd.StdoutPipe()
check(err)
err = cmd.Start()
check(err)
_, err = in.Write([]byte(src))
check(err)
err = in.Close()
check(err)
bytes, err := ioutil.ReadAll(out)
check(err)
err = cmd.Wait()
check(err)
return bytes
}
func sha1Sum(s string) string {
h := sha1.New()
h.Write([]byte(s))
b := h.Sum(nil)
return fmt.Sprintf("%x", b)
}
func mustReadFile(path string) string {
bytes, err := ioutil.ReadFile(path)
check(err)
return string(bytes)
}
func cachedPygmentize(lex string, src string) string {
ensureDir(cacheDir)
arg := []string{"-l", lex, "-f", "html"}
cachePath := cacheDir + "/pygmentize-" + strings.Join(arg, "-") + "-" + sha1Sum(src)
cacheBytes, cacheErr := ioutil.ReadFile(cachePath)
if cacheErr == nil {
return string(cacheBytes)
}
renderBytes := pipe(pygmentizeBin, arg, src)
// Newer versions of Pygments add silly empty spans.
renderCleanString := strings.Replace(string(renderBytes), "<span></span>", "", -1)
writeErr := ioutil.WriteFile(cachePath, []byte(renderCleanString), 0600)
check(writeErr)
return renderCleanString
}
func markdown(src string) string {
return string(blackfriday.MarkdownCommon([]byte(src)))
}
func readLines(path string) []string {
src := mustReadFile(path)
return strings.Split(src, "\n")
}
func mustGlob(glob string) []string {
paths, err := filepath.Glob(glob)
check(err)
return paths
}
func whichLexer(path string) string {
if strings.HasSuffix(path, ".go") {
return "go"
} else if strings.HasSuffix(path, ".sh") {
return "console"
}
panic("No lexer for " + path)
}
func debug(msg string) {
if os.Getenv("DEBUG") == "1" {
fmt.Fprintln(os.Stderr, msg)
}
}
var docsPat = regexp.MustCompile("^\\s*(\\/\\/|#)\\s")
var dashPat = regexp.MustCompile("\\-+")
// Seg is a segment of an example
type Seg struct {
Docs, DocsRendered string
Code, CodeRendered, CodeForJs string
CodeEmpty, CodeLeading, CodeRun bool
}
// Example is info extracted from an example file
type Example struct {
ID, Name string
GoCode, GoCodeHash, URLHash string
Segs [][]*Seg
PrevExample *Example
NextExample *Example
}
func parseHashFile(sourcePath string) (string, string) {
lines := readLines(sourcePath)
return lines[0], lines[1]
}
func resetURLHashFile(codehash, code, sourcePath string) string {
if verbose() {
fmt.Println(" Sending request to play.golang.org")
}
payload := strings.NewReader(code)
resp, err := http.Post("https://play.golang.org/share", "text/plain", payload)
if err != nil {
panic(err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
urlkey := string(body)
data := fmt.Sprintf("%s\n%s\n", codehash, urlkey)
ioutil.WriteFile(sourcePath, []byte(data), 0644)
return urlkey
}
func parseSegs(sourcePath string) ([]*Seg, string) {
var lines []string
// Convert tabs to spaces for uniform rendering.
for _, line := range readLines(sourcePath) {
lines = append(lines, strings.Replace(line, "\t", " ", -1))
}
filecontent := strings.Join(lines, "\n")
segs := []*Seg{}
lastSeen := ""
for _, line := range lines {
if line == "" {
lastSeen = ""
continue
}
matchDocs := docsPat.MatchString(line)
matchCode := !matchDocs
newDocs := (lastSeen == "") || ((lastSeen != "docs") && (segs[len(segs)-1].Docs != ""))
newCode := (lastSeen == "") || ((lastSeen != "code") && (segs[len(segs)-1].Code != ""))
if newDocs || newCode {
debug("NEWSEG")
}
if matchDocs {
trimmed := docsPat.ReplaceAllString(line, "")
if newDocs {
newSeg := Seg{Docs: trimmed, Code: ""}
segs = append(segs, &newSeg)
} else {
segs[len(segs)-1].Docs = segs[len(segs)-1].Docs + "\n" + trimmed
}
debug("DOCS: " + line)
lastSeen = "docs"
} else if matchCode {
if newCode {
newSeg := Seg{Docs: "", Code: line}
segs = append(segs, &newSeg)
} else {
segs[len(segs)-1].Code = segs[len(segs)-1].Code + "\n" + line
}
debug("CODE: " + line)
lastSeen = "code"
}
}
for i, seg := range segs {
seg.CodeEmpty = (seg.Code == "")
seg.CodeLeading = (i < (len(segs) - 1))
seg.CodeRun = strings.Contains(seg.Code, "package main")
}
return segs, filecontent
}
func parseAndRenderSegs(sourcePath string) ([]*Seg, string) {
segs, filecontent := parseSegs(sourcePath)
lexer := whichLexer(sourcePath)
for _, seg := range segs {
if seg.Docs != "" {
seg.DocsRendered = markdown(seg.Docs)
}
if seg.Code != "" {
seg.CodeRendered = cachedPygmentize(lexer, seg.Code)
// adding the content to the js code for copying to the clipboard
if strings.HasSuffix(sourcePath, ".go") {
seg.CodeForJs = strings.Trim(seg.Code, "\n") + "\n"
}
}
}
// we are only interested in the 'go' code to pass to play.golang.org
if lexer != "go" {
filecontent = ""
}
return segs, filecontent
}
func parseExamples() []*Example {
var exampleNames []string
for _, line := range readLines("examples.txt") {
if line != "" && !strings.HasPrefix(line, "#") {
exampleNames = append(exampleNames, line)
}
}
examples := make([]*Example, 0)
for i, exampleName := range exampleNames {
if verbose() {
fmt.Printf("Processing %s [%d/%d]\n", exampleName, i+1, len(exampleNames))
}
exampleID := exampleName
if strings.Contains(exampleName, "|") {
parts := strings.Split(exampleName, "|")
exampleName, exampleID = parts[0], parts[1]
}
exampleID = strings.ToLower(exampleID)
exampleID = strings.Replace(exampleID, " ", "-", -1)
exampleID = strings.Replace(exampleID, "/", "-", -1)
exampleID = strings.Replace(exampleID, "'", "", -1)
exampleID = dashPat.ReplaceAllString(exampleID, "-")
example := Example{Name: exampleName}
example.ID = exampleID
example.Segs = make([][]*Seg, 0)
sourcePaths := mustGlob("examples/" + exampleID + "/*")
for _, sourcePath := range sourcePaths {
if strings.HasSuffix(sourcePath, ".hash") {
example.GoCodeHash, example.URLHash = parseHashFile(sourcePath)
} else {
sourceSegs, filecontents := parseAndRenderSegs(sourcePath)
if filecontents != "" {
example.GoCode = filecontents
}
example.Segs = append(example.Segs, sourceSegs)
}
}
newCodeHash := sha1Sum(example.GoCode)
if example.GoCodeHash != newCodeHash {
example.URLHash = resetURLHashFile(newCodeHash, example.GoCode, "examples/"+example.ID+"/"+example.ID+".hash")
}
examples = append(examples, &example)
}
for i, example := range examples {
if i > 0 {
example.PrevExample = examples[i-1]
}
if i < (len(examples) - 1) {
example.NextExample = examples[i+1]
}
}
return examples
}
func renderIndex(examples []*Example) {
if verbose() {
fmt.Println("Rendering index")
}
indexTmpl := template.New("index")
_, err := indexTmpl.Parse(mustReadFile("templates/index.tmpl"))
check(err)
indexF, err := os.Create(siteDir + "/index.html")
check(err)
err = indexTmpl.Execute(indexF, examples)
check(err)
}
func renderExamples(examples []*Example) {
if verbose() {
fmt.Println("Rendering examples")
}
exampleTmpl := template.New("example")
_, err := exampleTmpl.Parse(mustReadFile("templates/example.tmpl"))
check(err)
for _, example := range examples {
//fmt.Println(example.ID)
exampleF, err := os.Create(siteDir + "/" + example.ID)
check(err)
exampleTmpl.Execute(exampleF, example)
}
}
func main() {
if len(os.Args) > 1 {
siteDir = os.Args[1]
}
ensureDir(siteDir)
copyFile("templates/site.css", siteDir+"/site.css")
copyFile("templates/site.js", siteDir+"/site.js")
copyFile("templates/favicon.ico", siteDir+"/favicon.ico")
copyFile("templates/404.html", siteDir+"/404.html")
copyFile("templates/play.png", siteDir+"/play.png")
copyFile("templates/clipboard.png", siteDir+"/clipboard.png")
examples := parseExamples()
renderIndex(examples)
renderExamples(examples)
}
| [
"\"VERBOSE\"",
"\"DEBUG\""
]
| []
| [
"DEBUG",
"VERBOSE"
]
| [] | ["DEBUG", "VERBOSE"] | go | 2 | 0 | |
tests/generate_go_ethereum_fixture.py | import contextlib
import json
import os
import pprint
import shutil
import signal
import socket
import subprocess
import sys
import tempfile
import time
from cytoolz import (
merge,
valmap,
)
from eth_utils.curried import (
apply_formatter_if,
is_bytes,
is_checksum_address,
is_dict,
is_same_address,
remove_0x_prefix,
to_hex,
to_text,
to_wei,
)
from webu import Webu
from webu.utils.module_testing.emitter_contract import (
EMITTER_ABI,
EMITTER_BYTECODE,
EMITTER_ENUM,
)
from webu.utils.module_testing.math_contract import (
MATH_ABI,
MATH_BYTECODE,
)
COINBASE = '0xdc544d1aa88ff8bbd2f2aec754b1f1e99e1812fd'
COINBASE_PK = '0x58d23b55bc9cdce1f18c2500f40ff4ab7245df9a89505e9b1fa4851f623d241d'
KEYFILE_DATA = '{"address":"dc544d1aa88ff8bbd2f2aec754b1f1e99e1812fd","crypto":{"cipher":"aes-128-ctr","ciphertext":"52e06bc9397ea9fa2f0dae8de2b3e8116e92a2ecca9ad5ff0061d1c449704e98","cipherparams":{"iv":"aa5d0a5370ef65395c1a6607af857124"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":262144,"p":1,"r":8,"salt":"9fdf0764eb3645ffc184e166537f6fe70516bf0e34dc7311dea21f100f0c9263"},"mac":"4e0b51f42b865c15c485f4faefdd1f01a38637e5247f8c75ffe6a8c0eba856f6"},"id":"5a6124e0-10f1-4c1c-ae3e-d903eacb740a","version":3}' # noqa: E501
KEYFILE_PW = 'webupy-test'
KEYFILE_FILENAME = 'UTC--2017-08-24T19-42-47.517572178Z--dc544d1aa88ff8bbd2f2aec754b1f1e99e1812fd' # noqa: E501
RAW_TXN_ACCOUNT = '0x39EEed73fb1D3855E90Cbd42f348b3D7b340aAA6'
UNLOCKABLE_PRIVATE_KEY = '0x392f63a79b1ff8774845f3fa69de4a13800a59e7083f5187f1558f0797ad0f01'
UNLOCKABLE_ACCOUNT = '0x12efdc31b1a8fa1a1e756dfd8a1601055c971e13'
UNLOCKABLE_ACCOUNT_PW = KEYFILE_PW
GENESIS_DATA = {
"nonce": "0xdeadbeefdeadbeef",
"timestamp": "0x0",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", # noqa: E501
"extraData": "0x7765623370792d746573742d636861696e",
"gasLimit": "0x47d5cc",
"difficulty": "0x01",
"mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000", # noqa: E501
"coinbase": "0x3333333333333333333333333333333333333333",
"alloc": {
remove_0x_prefix(COINBASE): {
'balance': str(to_wei(1000000000, 'huc')),
},
remove_0x_prefix(RAW_TXN_ACCOUNT): {
'balance': str(to_wei(10, 'huc')),
},
remove_0x_prefix(UNLOCKABLE_ACCOUNT): {
'balance': str(to_wei(10, 'huc')),
},
},
"config": {
"chainId": 131277322940537, # the string 'webupy' as an integer
"homesteadBlock": 0,
"eip155Block": 0,
"eip158Block": 0
},
}
def ensure_path_exists(dir_path):
"""
Make sure that a path exists
"""
if not os.path.exists(dir_path):
os.makedirs(dir_path)
return True
return False
@contextlib.contextmanager
def tempdir():
dir_path = tempfile.mkdtemp()
try:
yield dir_path
finally:
shutil.rmtree(dir_path)
def get_open_port():
sock = socket.socket()
sock.bind(('127.0.0.1', 0))
port = sock.getsockname()[1]
sock.close()
return str(port)
def get_ghuc_binary():
from ghuc.install import (
get_executable_path,
install_ghuc,
)
if 'GETH_BINARY' in os.environ:
return os.environ['GETH_BINARY']
elif 'GETH_VERSION' in os.environ:
ghuc_version = os.environ['GETH_VERSION']
_ghuc_binary = get_executable_path(ghuc_version)
if not os.path.exists(_ghuc_binary):
install_ghuc(ghuc_version)
assert os.path.exists(_ghuc_binary)
return _ghuc_binary
else:
return 'ghuc'
def wait_for_popen(proc, timeout):
start = time.time()
while time.time() < start + timeout:
if proc.poll() is None:
time.sleep(0.01)
else:
break
def kill_proc_gracefully(proc):
if proc.poll() is None:
proc.send_signal(signal.SIGINT)
wait_for_popen(proc, 13)
if proc.poll() is None:
proc.terminate()
wait_for_popen(proc, 5)
if proc.poll() is None:
proc.kill()
wait_for_popen(proc, 2)
def wait_for_socket(ipc_path, timeout=30):
start = time.time()
while time.time() < start + timeout:
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(ipc_path)
sock.settimeout(timeout)
except (FileNotFoundError, socket.error):
time.sleep(0.01)
else:
break
@contextlib.contextmanager
def graceful_kill_on_exit(proc):
try:
yield proc
finally:
kill_proc_gracefully(proc)
@contextlib.contextmanager
def get_ghuc_process(ghuc_binary,
datadir,
genesis_file_path,
ghuc_ipc_path,
ghuc_port):
init_datadir_command = (
ghuc_binary,
'--datadir', datadir,
'init',
genesis_file_path,
)
subprocess.check_output(
init_datadir_command,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
run_ghuc_command = (
ghuc_binary,
'--datadir', datadir,
'--ipcpath', ghuc_ipc_path,
'--ethash.dagsondisk', '1',
'--gcmode', 'archive',
'--nodiscover',
'--port', ghuc_port,
'--coinbase', COINBASE[2:],
)
popen_proc = subprocess.Popen(
run_ghuc_command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1,
)
with popen_proc as proc:
with graceful_kill_on_exit(proc) as graceful_proc:
yield graceful_proc
output, errors = proc.communicate()
print(
"Ghuc Process Exited:\n"
"stdout:{0}\n\n"
"stderr:{1}\n\n".format(
to_text(output),
to_text(errors),
)
)
def write_config_json(config, datadir):
bytes_to_hex = apply_formatter_if(is_bytes, to_hex)
config_json_dict = valmap(bytes_to_hex, config)
config_path = os.path.join(datadir, 'config.json')
with open(config_path, 'w') as config_file:
config_file.write(json.dumps(config_json_dict))
config_file.write('\n')
def generate_go_happyuc_fixture(destination_dir):
with contextlib.ExitStack() as stack:
datadir = stack.enter_context(tempdir())
keystore_dir = os.path.join(datadir, 'keystore')
ensure_path_exists(keystore_dir)
keyfile_path = os.path.join(keystore_dir, KEYFILE_FILENAME)
with open(keyfile_path, 'w') as keyfile:
keyfile.write(KEYFILE_DATA)
genesis_file_path = os.path.join(datadir, 'genesis.json')
with open(genesis_file_path, 'w') as genesis_file:
genesis_file.write(json.dumps(GENESIS_DATA))
ghuc_ipc_path_dir = stack.enter_context(tempdir())
ghuc_ipc_path = os.path.join(ghuc_ipc_path_dir, 'ghuc.ipc')
ghuc_port = get_open_port()
ghuc_binary = get_ghuc_binary()
with get_ghuc_process(
ghuc_binary=ghuc_binary,
datadir=datadir,
genesis_file_path=genesis_file_path,
ghuc_ipc_path=ghuc_ipc_path,
ghuc_port=ghuc_port):
wait_for_socket(ghuc_ipc_path)
webu = Webu(Webu.IPCProvider(ghuc_ipc_path))
chain_data = setup_chain_state(webu)
# close ghuc by exiting context
# must be closed before copying data dir
verify_chain_state(webu, chain_data)
# verify that chain state is still valid after closing
# and re-opening ghuc
with get_ghuc_process(
ghuc_binary=ghuc_binary,
datadir=datadir,
genesis_file_path=genesis_file_path,
ghuc_ipc_path=ghuc_ipc_path,
ghuc_port=ghuc_port):
wait_for_socket(ghuc_ipc_path)
webu = Webu(Webu.IPCProvider(ghuc_ipc_path))
verify_chain_state(webu, chain_data)
static_data = {
'raw_txn_account': RAW_TXN_ACCOUNT,
'keyfile_pw': KEYFILE_PW,
}
config = merge(chain_data, static_data)
pprint.pprint(config)
write_config_json(config, datadir)
shutil.copytree(datadir, destination_dir)
def verify_chain_state(webu, chain_data):
receipt = webu.eth.getTransactionReceipt(chain_data['mined_txn_hash'])
latest = webu.eth.getBlock('latest')
assert receipt.blockNumber <= latest.number
def mine_transaction_hash(webu, txn_hash):
start_time = time.time()
webu.miner.start(1)
while time.time() < start_time + 60:
receipt = webu.eth.getTransactionReceipt(txn_hash)
if receipt is not None:
webu.miner.stop()
return receipt
else:
time.sleep(0.1)
else:
raise ValueError("Math contract deploy transaction not mined during wait period")
def mine_block(webu):
origin_block_number = webu.eth.blockNumber
start_time = time.time()
webu.miner.start(1)
while time.time() < start_time + 60:
block_number = webu.eth.blockNumber
if block_number > origin_block_number:
webu.miner.stop()
return block_number
else:
time.sleep(0.1)
else:
raise ValueError("No block mined during wait period")
def deploy_contract(webu, name, factory):
webu.personal.unlockAccount(webu.eth.coinbase, KEYFILE_PW)
deploy_txn_hash = factory.deploy({'from': webu.eth.coinbase})
print('{0}_CONTRACT_DEPLOY_HASH: '.format(name.upper()), deploy_txn_hash)
deploy_receipt = mine_transaction_hash(webu, deploy_txn_hash)
print('{0}_CONTRACT_DEPLOY_TRANSACTION_MINED'.format(name.upper()))
contract_address = deploy_receipt['contractAddress']
assert is_checksum_address(contract_address)
print('{0}_CONTRACT_ADDRESS:'.format(name.upper()), contract_address)
return deploy_receipt
def setup_chain_state(webu):
coinbase = webu.eth.coinbase
assert is_same_address(coinbase, COINBASE)
#
# Math Contract
#
math_contract_factory = webu.eth.contract(
abi=MATH_ABI,
bytecode=MATH_BYTECODE,
)
math_deploy_receipt = deploy_contract(webu, 'math', math_contract_factory)
assert is_dict(math_deploy_receipt)
#
# Emitter Contract
#
emitter_contract_factory = webu.eth.contract(
abi=EMITTER_ABI,
bytecode=EMITTER_BYTECODE,
)
emitter_deploy_receipt = deploy_contract(webu, 'emitter', emitter_contract_factory)
emitter_contract = emitter_contract_factory(emitter_deploy_receipt['contractAddress'])
txn_hash_with_log = emitter_contract.transact({
'from': webu.eth.coinbase,
}).logDouble(which=EMITTER_ENUM['LogDoubleWithIndex'], arg0=12345, arg1=54321)
print('TXN_HASH_WITH_LOG:', txn_hash_with_log)
txn_receipt_with_log = mine_transaction_hash(webu, txn_hash_with_log)
block_with_log = webu.eth.getBlock(txn_receipt_with_log['blockHash'])
print('BLOCK_HASH_WITH_LOG:', block_with_log['hash'])
#
# Empty Block
#
empty_block_number = mine_block(webu)
print('MINED_EMPTY_BLOCK')
empty_block = webu.eth.getBlock(empty_block_number)
assert is_dict(empty_block)
assert not empty_block['transactions']
print('EMPTY_BLOCK_HASH:', empty_block['hash'])
#
# Block with Transaction
#
webu.personal.unlockAccount(coinbase, KEYFILE_PW)
webu.miner.start(1)
mined_txn_hash = webu.eth.sendTransaction({
'from': coinbase,
'to': coinbase,
'value': 1,
'gas': 21000,
'gas_price': webu.eth.gasPrice,
})
mined_txn_receipt = mine_transaction_hash(webu, mined_txn_hash)
print('MINED_TXN_HASH:', mined_txn_hash)
block_with_txn = webu.eth.getBlock(mined_txn_receipt['blockHash'])
print('BLOCK_WITH_TXN_HASH:', block_with_txn['hash'])
ghuc_fixture = {
'math_deploy_txn_hash': math_deploy_receipt['transactionHash'],
'math_address': math_deploy_receipt['contractAddress'],
'emitter_deploy_txn_hash': emitter_deploy_receipt['transactionHash'],
'emitter_address': emitter_deploy_receipt['contractAddress'],
'txn_hash_with_log': txn_hash_with_log,
'block_hash_with_log': block_with_log['hash'],
'empty_block_hash': empty_block['hash'],
'mined_txn_hash': mined_txn_hash,
'block_with_txn_hash': block_with_txn['hash'],
}
return ghuc_fixture
if __name__ == '__main__':
fixture_dir = sys.argv[1]
generate_go_happyuc_fixture(fixture_dir)
| []
| []
| [
"GETH_VERSION",
"GETH_BINARY"
]
| [] | ["GETH_VERSION", "GETH_BINARY"] | python | 2 | 0 | |
fidesctl/tests/core/test_generate_dataset.py | import sqlalchemy
import pytest
import os
from typing import List, Dict
from fidesctl.core import generate_dataset, api
from fideslang.manifests import write_manifest
from fideslang.models import Dataset, DatasetCollection, DatasetField
def create_server_datasets(test_config, datasets: List[Dataset]):
for dataset in datasets:
api.delete(
url=test_config.cli.server_url,
resource_type="dataset",
resource_id=dataset.fides_key,
headers=test_config.user.request_headers,
)
api.create(
url=test_config.cli.server_url,
resource_type="dataset",
json_resource=dataset.json(exclude_none=True),
headers=test_config.user.request_headers,
)
def set_field_data_categories(datasets: List[Dataset], category: str):
for dataset in datasets:
for collection in dataset.collections:
for field in collection.fields:
field.data_categories.append(category)
@pytest.fixture()
def test_dataset():
collections = [
DatasetCollection(
name="visit",
description="Fides Generated Description for Table: visit",
fields=[
DatasetField(
name="email",
description="Fides Generated Description for Column: email",
data_categories=[],
),
DatasetField(
name="last_visit",
description="Fides Generated Description for Column: last_visit",
data_categories=[],
),
],
),
DatasetCollection(
name="login",
description="Fides Generated Description for Table: login",
fields=[
DatasetField(
name="id",
description="Fides Generated Description for Column: id",
data_categories=[],
),
DatasetField(
name="customer_id",
description="Fides Generated Description for Column: customer_id",
data_categories=[],
),
DatasetField(
name="time",
description="Fides Generated Description for Column: time",
data_categories=[],
),
],
),
]
dataset = Dataset(
fides_key="fidesdb",
name="fidesdb",
description="Fides Generated Description for Dataset: fidesdb",
collections=collections,
)
yield dataset
# Unit
@pytest.mark.unit
def test_generate_dataset_collections():
test_resource = {"ds": {"foo": ["1", "2"], "bar": ["4", "5"]}}
expected_result = [
Dataset(
name="ds",
fides_key="ds",
data_categories=[],
description="Fides Generated Description for Schema: ds",
collections=[
DatasetCollection(
name="foo",
description="Fides Generated Description for Table: foo",
data_categories=[],
fields=[
DatasetField(
name=1,
description="Fides Generated Description for Column: 1",
data_categories=[],
),
DatasetField(
name=2,
description="Fides Generated Description for Column: 2",
data_categories=[],
),
],
),
DatasetCollection(
name="bar",
description="Fides Generated Description for Table: bar",
data_categories=[],
fields=[
DatasetField(
name=4,
description="Fides Generated Description for Column: 4",
data_categories=[],
),
DatasetField(
name=5,
description="Fides Generated Description for Column: 5",
data_categories=[],
),
],
),
],
)
]
actual_result = generate_dataset.create_dataset_collections(test_resource)
assert actual_result == expected_result
@pytest.mark.unit
def test_find_uncategorized_dataset_fields_all_categorized():
test_resource = {"ds": {"foo": ["1", "2"], "bar": ["4", "5"]}}
dataset = Dataset(
name="ds",
fides_key="ds",
collections=[
DatasetCollection(
name="foo",
fields=[
DatasetField(
name=1,
data_categories=["category_1"],
),
DatasetField(
name=2,
data_categories=["category_1"],
),
],
),
DatasetCollection(
name="bar",
fields=[
DatasetField(
name=4,
data_categories=["category_1"],
),
DatasetField(name=5, data_categories=["category_1"]),
],
),
],
)
(
uncategorized_keys,
total_field_count,
) = generate_dataset.find_uncategorized_dataset_fields(
dataset_key="ds", dataset=dataset, db_dataset=test_resource.get("ds")
)
assert not uncategorized_keys
assert total_field_count == 4
@pytest.mark.unit
def test_find_uncategorized_dataset_fields_uncategorized_fields():
test_resource = {"ds": {"foo": ["1", "2"]}}
dataset = Dataset(
name="ds",
fides_key="ds",
data_categories=["category_1"],
collections=[
DatasetCollection(
name="foo",
data_categories=["category_1"],
fields=[
DatasetField(
name=1,
data_categories=["category_1"],
),
DatasetField(name=2),
],
)
],
)
(
uncategorized_keys,
total_field_count,
) = generate_dataset.find_uncategorized_dataset_fields(
dataset_key="ds", dataset=dataset, db_dataset=test_resource.get("ds")
)
assert set(uncategorized_keys) == {"ds.foo.2"}
assert total_field_count == 2
@pytest.mark.unit
def test_find_uncategorized_dataset_fields_missing_field():
test_resource = {"ds": {"bar": ["4", "5"]}}
dataset = Dataset(
name="ds",
fides_key="ds",
collections=[
DatasetCollection(
name="bar",
fields=[
DatasetField(
name=4,
data_categories=["category_1"],
)
],
),
],
)
(
uncategorized_keys,
total_field_count,
) = generate_dataset.find_uncategorized_dataset_fields(
dataset_key="ds", dataset=dataset, db_dataset=test_resource.get("ds")
)
assert set(uncategorized_keys) == {"ds.bar.5"}
assert total_field_count == 2
@pytest.mark.unit
def test_find_uncategorized_dataset_fields_missing_collection():
test_resource = {"ds": {"foo": ["1", "2"], "bar": ["4", "5"]}}
dataset = Dataset(
name="ds",
fides_key="ds",
collections=[
DatasetCollection(
name="bar",
fields=[
DatasetField(
name=4,
data_categories=["category_1"],
),
DatasetField(
name=5,
data_categories=["category_1"],
),
],
),
],
)
(
uncategorized_keys,
total_field_count,
) = generate_dataset.find_uncategorized_dataset_fields(
dataset_key="ds", dataset=dataset, db_dataset=test_resource.get("ds")
)
assert set(uncategorized_keys) == {"ds.foo.1", "ds.foo.2"}
assert total_field_count == 4
@pytest.mark.unit
def test_unsupported_dialect_error():
test_url = "foo+psycopg2://fidesdb:fidesdb@fidesdb:5432/fidesdb"
with pytest.raises(SystemExit):
generate_dataset.generate_dataset(test_url, "test_file.yml")
# Generate Dataset Database Integration Tests
# These URLs are for the databases in the docker-compose.integration-tests.yml file
POSTGRES_URL = (
"postgresql+psycopg2://postgres:postgres@postgres-test:5432/postgres_example?"
)
MYSQL_URL = "mysql+pymysql://mysql_user:mysql_pw@mysql-test:3306/mysql_example"
MSSQL_URL_TEMPLATE = "mssql+pyodbc://sa:SQLserver1@sqlserver-test:1433/{}?driver=ODBC+Driver+17+for+SQL+Server"
MSSQL_URL = MSSQL_URL_TEMPLATE.format("sqlserver_example")
MASTER_MSSQL_URL = MSSQL_URL_TEMPLATE.format("master") + "&autocommit=True"
# External databases require credentials passed through environment variables
SNOWFLAKE_URL_TEMPLATE = "snowflake://FIDESCTL:{}@ZOA73785/FIDESCTL_TEST"
SNOWFLAKE_URL = SNOWFLAKE_URL_TEMPLATE.format(
os.getenv("SNOWFLAKE_FIDESCTL_PASSWORD", "")
)
REDSHIFT_URL_TEMPLATE = "redshift+psycopg2://fidesctl:{}@redshift-cluster-1.cohs2e5eq2e4.us-east-1.redshift.amazonaws.com:5439/fidesctl_test"
REDSHIFT_URL = REDSHIFT_URL_TEMPLATE.format(os.getenv("REDSHIFT_FIDESCTL_PASSWORD", ""))
TEST_DATABASE_PARAMETERS = {
"postgresql": {
"url": POSTGRES_URL,
"setup_url": POSTGRES_URL,
"init_script_path": "tests/data/example_sql/postgres_example.sql",
"is_external": False,
"expected_collection": {
"public": {
"visit": ["email", "last_visit"],
"login": ["id", "customer_id", "time"],
}
},
},
"mysql": {
"url": MYSQL_URL,
"setup_url": MYSQL_URL,
"init_script_path": "tests/data/example_sql/mysql_example.sql",
"is_external": False,
"expected_collection": {
"mysql_example": {
"visit": ["email", "last_visit"],
"login": ["id", "customer_id", "time"],
}
},
},
"mssql": {
"url": MSSQL_URL,
"setup_url": MASTER_MSSQL_URL,
"init_script_path": "tests/data/example_sql/sqlserver_example.sql",
"is_external": False,
"expected_collection": {
"dbo": {
"visit": ["email", "last_visit"],
"login": ["id", "customer_id", "time"],
}
},
},
"snowflake": {
"url": SNOWFLAKE_URL,
"setup_url": SNOWFLAKE_URL,
"init_script_path": "tests/data/example_sql/snowflake_example.sql",
"is_external": True,
"expected_collection": {
"public": {
"visit": ["email", "last_visit"],
"login": ["id", "customer_id", "time"],
}
},
},
"redshift": {
"url": REDSHIFT_URL,
"setup_url": REDSHIFT_URL,
"init_script_path": "tests/data/example_sql/redshift_example.sql",
"is_external": True,
"expected_collection": {
"public": {
"visit": ["email", "last_visit"],
"login": ["id", "customer_id", "time"],
}
},
},
}
@pytest.mark.external
@pytest.mark.parametrize("database_type", TEST_DATABASE_PARAMETERS.keys())
class TestDatabase:
@pytest.fixture(scope="function", autouse=True)
def database_setup(self, database_type):
"""
Set up the Database for testing.
The query file must have each query on a separate line.
"""
database_parameters = TEST_DATABASE_PARAMETERS.get(database_type)
engine = sqlalchemy.create_engine(database_parameters.get("setup_url"))
with open(database_parameters.get("init_script_path"), "r") as query_file:
queries = [query for query in query_file.read().splitlines() if query != ""]
print(queries)
for query in queries:
engine.execute(sqlalchemy.sql.text(query))
yield
def test_get_db_tables(self, request, database_type):
print(request.node.get_closest_marker("external"))
print(request.keywords)
database_parameters = TEST_DATABASE_PARAMETERS.get(database_type)
engine = sqlalchemy.create_engine(database_parameters.get("url"))
actual_result = generate_dataset.get_db_collections_and_fields(engine)
assert actual_result == database_parameters.get("expected_collection")
def test_generate_dataset(self, tmpdir, database_type):
database_parameters = TEST_DATABASE_PARAMETERS.get(database_type)
actual_result = generate_dataset.generate_dataset(
database_parameters.get("url"), f"{tmpdir}/test_file.yml"
)
assert actual_result
def test_generate_dataset_passes_(self, test_config, database_type):
database_parameters = TEST_DATABASE_PARAMETERS.get(database_type)
datasets: List[Dataset] = generate_dataset.create_dataset_collections(
database_parameters.get("expected_collection")
)
set_field_data_categories(datasets, "system.operations")
create_server_datasets(test_config, datasets)
generate_dataset.database_coverage(
connection_string=database_parameters.get("url"),
manifest_dir="",
coverage_threshold=100,
url=test_config.cli.server_url,
headers=test_config.user.request_headers,
)
def test_generate_dataset_coverage_failure(self, test_config, database_type):
database_parameters = TEST_DATABASE_PARAMETERS.get(database_type)
datasets: List[Dataset] = generate_dataset.create_dataset_collections(
database_parameters.get("expected_collection")
)
create_server_datasets(test_config, datasets)
with pytest.raises(SystemExit):
generate_dataset.database_coverage(
connection_string=database_parameters.get("url"),
manifest_dir="",
coverage_threshold=100,
url=test_config.cli.server_url,
headers=test_config.user.request_headers,
)
def test_dataset_coverage_manifest_passes(self, test_config, tmpdir, database_type):
database_parameters = TEST_DATABASE_PARAMETERS.get(database_type)
datasets: List[Dataset] = generate_dataset.create_dataset_collections(
database_parameters.get("expected_collection")
)
set_field_data_categories(datasets, "system.operations")
file_name = tmpdir.join("dataset.yml")
write_manifest(file_name, [i.dict() for i in datasets], "dataset")
create_server_datasets(test_config, datasets)
generate_dataset.database_coverage(
connection_string=database_parameters.get("url"),
manifest_dir=f"{tmpdir}",
coverage_threshold=100,
url=test_config.cli.server_url,
headers=test_config.user.request_headers,
)
| []
| []
| [
"SNOWFLAKE_FIDESCTL_PASSWORD",
"REDSHIFT_FIDESCTL_PASSWORD"
]
| [] | ["SNOWFLAKE_FIDESCTL_PASSWORD", "REDSHIFT_FIDESCTL_PASSWORD"] | python | 2 | 0 | |
bidu/backend/__init__.py | from __future__ import absolute_import
from __future__ import print_function
import os
import json
import sys
from .common import epsilon
from .common import floatx
from .common import set_epsilon
from .common import set_floatx
from .common import get_uid
from .common import cast_to_floatx
from .common import image_dim_ordering
from .common import set_image_dim_ordering
from .common import is_bidu_tensor
from .common import legacy_weight_ordering
from .common import set_legacy_weight_ordering
_bidu_base_dir = os.path.expanduser('~')
if not os.access(_bidu_base_dir, os.W_OK):
_bidu_base_dir = '/tmp'
_bidu_dir = os.path.join(_bidu_base_dir, '.bidu')
if not os.path.exists(_bidu_dir):
os.makedirs(_bidu_dir)
# Set theano as default backend for Windows users since tensorflow is not available for Windows yet.
if os.name == 'nt':
_BACKEND = 'theano'
else:
_BACKEND = 'tensorflow'
_config_path = os.path.expanduser(os.path.join(_bidu_dir, 'bidu.json'))
if os.path.exists(_config_path):
_config = json.load(open(_config_path))
_floatx = _config.get('floatx', floatx())
assert _floatx in {'float16', 'float32', 'float64'}
_epsilon = _config.get('epsilon', epsilon())
assert type(_epsilon) == float
_backend = _config.get('backend', _BACKEND)
assert _backend in {'theano', 'tensorflow'}
_image_dim_ordering = _config.get('image_dim_ordering', image_dim_ordering())
assert _image_dim_ordering in {'tf', 'th'}
set_floatx(_floatx)
set_epsilon(_epsilon)
set_image_dim_ordering(_image_dim_ordering)
_BACKEND = _backend
# save config file
if not os.path.exists(_config_path):
_config = {'floatx': floatx(),
'epsilon': epsilon(),
'backend': _BACKEND,
'image_dim_ordering': image_dim_ordering()}
with open(_config_path, 'w') as f:
f.write(json.dumps(_config, indent=4))
if 'bidu_BACKEND' in os.environ:
_backend = os.environ['bidu_BACKEND']
assert _backend in {'theano', 'tensorflow'}
_BACKEND = _backend
# import backend
if _BACKEND == 'theano':
sys.stderr.write('Using Theano backend.\n')
from .theano_backend import *
elif _BACKEND == 'tensorflow':
sys.stderr.write('Using TensorFlow backend.\n')
from .tensorflow_backend import *
else:
raise Exception('Unknown backend: ' + str(_BACKEND))
def backend():
'''Publicly accessible method
for determining the current backend.
'''
return _BACKEND
| []
| []
| [
"bidu_BACKEND"
]
| [] | ["bidu_BACKEND"] | python | 1 | 0 | |
log/log_test.go | package log
import (
"bytes"
"os"
"os/exec"
"sync"
"testing"
"github.com/stretchr/testify/assert"
)
func test(l *Logger, t *testing.T) {
b := new(bytes.Buffer)
l.SetOutput(b)
l.DisableColor()
l.SetLevel(WARN)
l.Print("print")
l.Printf("print%s", "f")
l.Debug("debug")
l.Debugf("debug%s", "f")
l.Info("info")
l.Infof("info%s", "f")
l.Warn("warn")
l.Warnf("warn%s", "f")
l.Error("error")
l.Errorf("error%s", "f")
assert.Contains(t, b.String(), "print\n")
assert.Contains(t, b.String(), "\nprintf\n")
assert.NotContains(t, b.String(), "debug")
assert.NotContains(t, b.String(), "debugf")
assert.NotContains(t, b.String(), "info")
assert.NotContains(t, b.String(), "infof")
assert.Contains(t, b.String(), "\nWARN|"+l.prefix+"|warn\n")
assert.Contains(t, b.String(), "\nWARN|"+l.prefix+"|warnf\n")
assert.Contains(t, b.String(), "\nERROR|"+l.prefix+"|error\n")
assert.Contains(t, b.String(), "\nERROR|"+l.prefix+"|errorf\n")
}
func TestLog(t *testing.T) {
l := New("test")
test(l, t)
}
func TestGlobal(t *testing.T) {
test(global, t)
}
func TestLogConcurrent(t *testing.T) {
var wg sync.WaitGroup
for i := 0; i < 2; i++ {
wg.Add(1)
go func() {
TestLog(t)
wg.Done()
}()
}
wg.Wait()
}
func TestFatal(t *testing.T) {
l := New("test")
switch os.Getenv("TEST_LOGGER_FATAL") {
case "fatal":
l.Fatal("fatal")
return
case "fatalf":
l.Fatalf("fatal-%s", "f")
return
}
loggerFatalTest(t, "fatal", "fatal")
loggerFatalTest(t, "fatalf", "fatal-f")
}
func loggerFatalTest(t *testing.T, env string, contains string) {
buf := new(bytes.Buffer)
cmd := exec.Command(os.Args[0], "-test.run=TestFatal")
cmd.Env = append(os.Environ(), "TEST_LOGGER_FATAL="+env)
cmd.Stdout = buf
cmd.Stderr = buf
err := cmd.Run()
if e, ok := err.(*exec.ExitError); ok && !e.Success() {
assert.Contains(t, buf.String(), contains)
return
}
t.Fatalf("process ran with err %v, want exit status 1", err)
}
| [
"\"TEST_LOGGER_FATAL\""
]
| []
| [
"TEST_LOGGER_FATAL"
]
| [] | ["TEST_LOGGER_FATAL"] | go | 1 | 0 | |
vendor/github.com/dipperin/go-ms-toolkit/env/docker_env.go | package env
import (
"flag"
"os"
)
func GetUseDocker() int {
// 也可以在环境变量中设置
dEnv := os.Getenv("docker_env")
if dEnv != "" {
switch dEnv {
case "1":
return 1
case "2":
return 2
default:
return 0
}
}
f := flag.Lookup("docker_env")
if f == nil || f.Value.String() == "0" {
// 非docker
return 0
} else if f.Value.String() == "2" {
// 生产
return 2
} else {
// 默认返回开发和测试的配置
return 1
}
}
| [
"\"docker_env\""
]
| []
| [
"docker_env"
]
| [] | ["docker_env"] | go | 1 | 0 | |
examples/generate-ssh-routes.py | #!/usr/bin/env python
import os
import io
import re
import yaml #pip install pyyaml
from pomerium.client import Client
from pomerium.pb.policy_pb2 import ListPoliciesRequest
from pomerium.pb.namespaces_pb2 import ListNamespacesRequest
from pomerium.pb.routes_pb2 import SetRouteRequest, Route
##########################################################################################
# User-specific variables can be set as environment variables or defined as static vars. #
##########################################################################################
# sa contains the service account JWT
sa = os.getenv('SERVICE_ACCOUNT', '')
# console_api matches the route to the console API ("From")
console_api = 'console-api.localhost.pomerium.io'
# If your Pomerium proxy uses an untrusted certificate, specify the CA cert as an environment variable
# ca_cert = os.getenv('CA_CERT', '').encode('utf-8')
# If you specify a ca_cert, include it when defining the client
#client = Client(console_api, sa, root_certificates=ca_cert)
client = Client(console_api, sa)
####################
# Helper functions #
####################
# Function stripHost to strip subdomain from items in hosts list:
# Expects input of 'host', i.e. foo.local.domain:22
# Returns a string of 'host' up to but not including the first '.'
def stripHost(host):
hostname = re.search("[^.]*", host)
if hostname is not None:
return hostname.group(0)
# Function stripPort to strip the port from items in hosts list:
# Expects input of 'host', i.e. foo.local.domain
# Returns a string of 'hosts after but not including the ':'
def stripPort(host):
port = re.search("(?:.(?!\:))+$", host)
if port is not None:
return port.group(0)
# Function getNS expects an input of 'name' and returns a dictionary object for
# the Namespace with a matching name:
def getNS(name):
resp = client.NamespaceService.ListNamespaces(ListNamespacesRequest())
ns = [n for n in resp.namespaces if n.name == name][0]
return ns
# Function getPol expects a list input of policy names and a namespace object.
# It returns an array of the matching policies by name:
def getPols(policies, ns):
thesePols = []
for x in policies:
resp = client.PolicyService.ListPolicies(
ListPoliciesRequest(query=x, namespace=ns.id)
)
thesePols.append(resp.policies[0])
return thesePols
# Function getPolIDs returns the ID from the policies provided as a single array.
def getPolIDs(policies):
p = []
for policy in policies:
p.append(policy.id)
return p
#################
# Main function #
#################
# Read the file specifying the routes to create.
with io.open('example-ssh-routes.yaml', 'r') as file:
data_routes = yaml.safe_load(file)
#print(data_routes["namespaces"][0]["name"]) # For Debugging
for namespace in data_routes["namespaces"]:
ns = getNS(namespace["name"])
policies = getPols(namespace["policies", ns])
hosts = namespace["hosts"]
print('In Namespace "' + ns.name + '" ('+ ns.id +'):')
print('With policies:')
for policy in policies:
print(' ' + policy.name)
for host in hosts:
route = Route(**{
'namespace_id': ns.id,
'name': stripHost(host),
'from': 'tcp+https://' + stripHost(host) + '.localhost.pomerium.io' + stripPort(host), #Change the last string to your domain space
'to': ['tcp://' + host],
'policy_ids': getPolIDs(policies),
})
resp = client.RouteService.SetRoute(SetRouteRequest(route=route))
print(resp)
| []
| []
| [
"CA_CERT",
"SERVICE_ACCOUNT"
]
| [] | ["CA_CERT", "SERVICE_ACCOUNT"] | python | 2 | 0 | |
src/infra/database/database.go | package database
import (
"github.com/joaoeliandro/banking-pix-microservice/domain/model"
"log"
"os"
"path/filepath"
"runtime"
"github.com/jinzhu/gorm"
"github.com/joho/godotenv"
_ "github.com/lib/pq"
_ "gorm.io/driver/sqlite"
)
func init() {
_, b, _, _ := runtime.Caller(0)
basepath := filepath.Dir(b)
err := godotenv.Load(basepath + "/../../.env")
if err != nil {
log.Fatalf("Error loading .env files")
}
}
func ConnectDB(env string) *gorm.DB {
var dsn string
var db *gorm.DB
var err error
if env != "test" {
dsn = os.Getenv("dsn")
db, err = gorm.Open(os.Getenv("dbType"), dsn)
} else {
dsn = os.Getenv("dsnTest")
db, err = gorm.Open(os.Getenv("dbTypeTest"), dsn)
}
if err != nil {
log.Fatalf("Error connecting to database: %v", err)
panic(err)
}
if os.Getenv("debug") == "true" {
db.LogMode(true)
}
if os.Getenv("AutoMigrateDb") == "true" {
db.AutoMigrate(&model.Bank{}, &model.Account{}, &model.PixKey{}, &model.Transaction{})
}
return db
}
| [
"\"dsn\"",
"\"dbType\"",
"\"dsnTest\"",
"\"dbTypeTest\"",
"\"debug\"",
"\"AutoMigrateDb\""
]
| []
| [
"dbTypeTest",
"debug",
"dsn",
"dsnTest",
"AutoMigrateDb",
"dbType"
]
| [] | ["dbTypeTest", "debug", "dsn", "dsnTest", "AutoMigrateDb", "dbType"] | go | 6 | 0 | |
internal/confluence/space.go | package confluence
import (
"fmt"
"net/url"
"os"
"path/filepath"
"strings"
"github.com/agilepathway/gauge-confluence/internal/confluence/api"
"github.com/agilepathway/gauge-confluence/internal/confluence/api/http"
"github.com/agilepathway/gauge-confluence/internal/confluence/time"
"github.com/agilepathway/gauge-confluence/internal/env"
"github.com/agilepathway/gauge-confluence/internal/git"
"github.com/agilepathway/gauge-confluence/internal/logger"
str "github.com/agilepathway/gauge-confluence/internal/strings"
)
type space struct {
key string
homepage homepage
publishedPages map[string]page // Pages published by current invocation of the plugin, keyed by filepath
lastPublished time.LastPublished
modifiedSinceLastPublished bool
apiClient api.Client
err error
}
// newSpace initialises a new space.
func newSpace(apiClient api.Client) space {
return space{publishedPages: make(map[string]page), apiClient: apiClient}
}
func (s *space) retrieveOrGenerateKey() {
retrievedKey := os.Getenv("CONFLUENCE_SPACE_KEY")
if retrievedKey == "" {
s.key = s.generateKey()
} else {
s.key = retrievedKey
}
}
func (s *space) generateKey() string {
gitWebURL, err := git.WebURL()
if err != nil {
s.err = err
return ""
}
return keyFmt(gitWebURL)
}
func keyFmt(u *url.URL) string {
hostAndPath := u.Host + u.Path
alphanumeric := str.StripNonAlphaNumeric(hostAndPath)
return strings.ToUpper(alphanumeric)
}
func (s *space) checkRequiredConfigVars() {
env.GetRequired("CONFLUENCE_BASE_URL")
env.GetRequired("CONFLUENCE_USERNAME")
env.GetRequired("CONFLUENCE_TOKEN")
}
func (s *space) setup() {
s.checkRequiredConfigVars()
s.retrieveOrGenerateKey()
s.createIfDoesNotAlreadyExist()
s.homepage, s.err = newHomepage(s)
s.checkUnmodifiedSinceLastPublish()
}
func (s *space) checkUnmodifiedSinceLastPublish() {
if s.err != nil {
return
}
s.lastPublished = time.NewLastPublished(s.apiClient, s.homepage.id)
if s.lastPublished.Version == 0 {
if s.isBlank() {
return
}
s.err = fmt.Errorf("the space must be empty when you publish for the first time. "+
"It can contain a homepage but no other pages. Space key: %s", s.key)
return
}
cqlTime := s.lastPublished.Time.CQLFormat(s.homepage.cqlTimeOffset())
s.modifiedSinceLastPublished, s.err = s.apiClient.IsSpaceModifiedSinceLastPublished(s.key, cqlTime)
if s.modifiedSinceLastPublished {
s.err = fmt.Errorf("the space has been modified since the last publish. Space key: %s", s.key)
}
}
func (s *space) createIfDoesNotAlreadyExist() {
if (s.err != nil) || (s.exists()) {
return
}
logger.Infof(true, "Space with key %s does not already exist, creating it ...", s.key)
s.createSpace()
}
func (s *space) createSpace() {
if s.err != nil {
return
}
s.err = s.apiClient.CreateSpace(s.key, s.name(), s.description())
if s.err != nil {
e, ok := s.err.(*http.RequestError)
if ok && e.StatusCode == 403 { //nolint:gomnd
s.err = fmt.Errorf("the Confluence user %s does not have permission to create the Confluence Space. "+
"Either rerun the plugin with a user who does have permissions to create the Space, "+
"or get someone to create the Space manually and then run the plugin again. "+
"Also check the password or token you supplied for the Confluence user is correct",
env.GetRequired("CONFLUENCE_USERNAME"))
}
}
}
func (s *space) name() string {
if s.err != nil {
return ""
}
var gitRemoteURLPath string
gitRemoteURLPath, s.err = git.RemoteURLPath()
return fmt.Sprintf("Gauge specs for %s", gitRemoteURLPath)
}
func (s *space) description() string {
if s.err != nil {
return ""
}
gitWebURL, err := git.WebURL()
if err != nil {
s.err = err
return ""
}
return fmt.Sprintf("Gauge (https://gauge.org) specifications from %s, "+
"published automatically by the Gauge Confluence plugin tool "+
"(https://github.com/agilepathway/gauge-confluence) as living documentation. "+
"Do not edit this Space manually. "+
"You can use Confluence's Include Macro (https://confluence.atlassian.com/doc/include-page-macro-139514.html) "+
"to include these specifications in as many of your existing Confluence Spaces as you wish.", gitWebURL)
}
func (s *space) exists() bool {
doesSpaceExist, err := s.apiClient.DoesSpaceExist(s.key)
s.err = err
return doesSpaceExist
}
func (s *space) isBlank() bool {
totalPagesInSpace, err := s.apiClient.TotalPagesInSpace(s.key)
logger.Debugf(false, "Total pages in Confluence space prior to publishing: %d", totalPagesInSpace)
s.err = err
return totalPagesInSpace <= 1
}
func (s *space) parentPageIDFor(path string) string {
parentDir := filepath.Dir(path)
parentPageID := s.publishedPages[parentDir].id
if parentPageID == "" {
return s.homepage.id
}
return parentPageID
}
// Value contains the LastPublished time
type Value struct {
LastPublished string `json:"lastPublished"`
}
// updateLastPublished stores the time of publishing as a Confluence content property,
// so that in the next run of the plugin it can check that the Confluence space has not
// been edited manually in the meantime.
//
// The content property is attached to the Space homepage rather than to the Space itself, as
// attaching the property to the Space requires admin permissions and we want to allow the
// plugin to be run by non-admin users too.
func (s *space) updateLastPublished() error {
value := Value{
LastPublished: time.Now().String(),
}
logger.Debugf(false, "Updating last published version to: %d", s.lastPublished.Version+1)
return s.apiClient.SetContentProperty(s.homepage.id, time.LastPublishedPropertyKey, value, s.lastPublished.Version+1)
}
func (s *space) deleteAllPagesExceptHomepage() (err error) {
return s.apiClient.DeleteAllPagesInSpaceExceptHomepage(s.key, s.homepage.id)
}
// deleteEmptyDirPages deletes any pages that the plugin has published to in this run
// that are empty directories
func (s *space) deleteEmptyDirPages() (err error) {
for s.hasEmptyDirPages() {
for key, page := range s.emptyDirPages() {
err = s.apiClient.DeletePage(page.id)
if err != nil {
return err
}
delete(s.publishedPages, key)
}
}
return nil
}
func (s *space) hasEmptyDirPages() bool {
return len(s.emptyDirPages()) > 0
}
func (s *space) emptyDirPages() map[string]page {
emptyDirPages := make(map[string]page)
for key, page := range s.publishedPages {
if s.isEmptyDir(page) {
emptyDirPages[key] = page
}
}
return emptyDirPages
}
func (s *space) isEmptyDir(p page) bool {
return p.isDir && s.isChildless(p)
}
func (s *space) isChildless(p page) bool {
return len(s.children(p)) == 0
}
func (s *space) children(page page) []string {
var children []string
for _, p := range s.publishedPages {
if page.id == p.parentID {
children = append(children, p.id)
}
}
return children
}
| [
"\"CONFLUENCE_SPACE_KEY\""
]
| []
| [
"CONFLUENCE_SPACE_KEY"
]
| [] | ["CONFLUENCE_SPACE_KEY"] | go | 1 | 0 | |
libcontainer/factory_linux.go | // +build linux
package libcontainer
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"regexp"
"runtime/debug"
"strconv"
securejoin "github.com/cyphar/filepath-securejoin"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/cgroups/fs"
"github.com/opencontainers/runc/libcontainer/cgroups/systemd"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/configs/validate"
"github.com/opencontainers/runc/libcontainer/intelrdt"
"github.com/opencontainers/runc/libcontainer/mount"
"github.com/opencontainers/runc/libcontainer/utils"
"github.com/opencontainers/runc/libsysbox/sysbox"
"golang.org/x/sys/unix"
)
const (
stateFilename = "state.json"
execFifoFilename = "exec.fifo"
)
var idRegex = regexp.MustCompile(`^[\w+-\.]+$`)
// InitArgs returns an options func to configure a LinuxFactory with the
// provided init binary path and arguments.
func InitArgs(args ...string) func(*LinuxFactory) error {
return func(l *LinuxFactory) (err error) {
if len(args) > 0 {
// Resolve relative paths to ensure that its available
// after directory changes.
if args[0], err = filepath.Abs(args[0]); err != nil {
return newGenericError(err, ConfigInvalid)
}
}
l.InitArgs = args
return nil
}
}
// SystemdCgroups is an options func to configure a LinuxFactory to return
// containers that use systemd to create and manage cgroups.
func SystemdCgroups(l *LinuxFactory) error {
l.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager {
return &systemd.Manager{
Cgroups: config,
Paths: paths,
}
}
return nil
}
// Cgroupfs is an options func to configure a LinuxFactory to return containers
// that use the native cgroups filesystem implementation to create and manage
// cgroups.
func Cgroupfs(l *LinuxFactory) error {
l.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager {
return &fs.Manager{
Cgroups: config,
Paths: paths,
}
}
return nil
}
// RootlessCgroupfs is an options func to configure a LinuxFactory to return
// containers that use the native cgroups filesystem implementation to create
// and manage cgroups. The difference between RootlessCgroupfs and Cgroupfs is
// that RootlessCgroupfs can transparently handle permission errors that occur
// during rootless container (including euid=0 in userns) setup (while still allowing cgroup usage if
// they've been set up properly).
func RootlessCgroupfs(l *LinuxFactory) error {
l.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager {
return &fs.Manager{
Cgroups: config,
Rootless: true,
Paths: paths,
}
}
return nil
}
// IntelRdtfs is an options func to configure a LinuxFactory to return
// containers that use the Intel RDT "resource control" filesystem to
// create and manage Intel RDT resources (e.g., L3 cache, memory bandwidth).
func IntelRdtFs(l *LinuxFactory) error {
l.NewIntelRdtManager = func(config *configs.Config, id string, path string) intelrdt.Manager {
return &intelrdt.IntelRdtManager{
Config: config,
Id: id,
Path: path,
}
}
return nil
}
// TmpfsRoot is an option func to mount LinuxFactory.Root to tmpfs.
func TmpfsRoot(l *LinuxFactory) error {
mounted, err := mount.Mounted(l.Root)
if err != nil {
return err
}
if !mounted {
if err := unix.Mount("tmpfs", l.Root, "tmpfs", 0, ""); err != nil {
return err
}
}
return nil
}
// CriuPath returns an option func to configure a LinuxFactory with the
// provided criupath
func CriuPath(criupath string) func(*LinuxFactory) error {
return func(l *LinuxFactory) error {
l.CriuPath = criupath
return nil
}
}
// SysFs returns an option func that configures a LinuxFactory to return containers that
// use the given sysbox-fs for emulating parts of the container's rootfs.
func SysFs(sysFs *sysbox.Fs) func(*LinuxFactory) error {
return func(l *LinuxFactory) error {
l.SysFs = sysFs
return nil
}
}
// SysMgr returns an option func that configures a LinuxFactory to return containers that
// use the given sysbox-mgr services.
func SysMgr(sysMgr *sysbox.Mgr) func(*LinuxFactory) error {
return func(l *LinuxFactory) error {
l.SysMgr = sysMgr
return nil
}
}
// New returns a linux based container factory based in the root directory and
// configures the factory with the provided option funcs.
func New(root string, options ...func(*LinuxFactory) error) (Factory, error) {
if root != "" {
if err := os.MkdirAll(root, 0700); err != nil {
return nil, newGenericError(err, SystemError)
}
}
l := &LinuxFactory{
Root: root,
InitPath: "/proc/self/exe",
InitArgs: []string{os.Args[0], "init"},
Validator: validate.New(),
CriuPath: "criu",
}
Cgroupfs(l)
for _, opt := range options {
if opt == nil {
continue
}
if err := opt(l); err != nil {
return nil, err
}
}
if l.SysMgr == nil {
l.SysMgr = sysbox.NewMgr("", false)
}
if l.SysFs == nil {
l.SysFs = sysbox.NewFs("", false)
}
return l, nil
}
// LinuxFactory implements the default factory interface for linux based systems.
type LinuxFactory struct {
// Root directory for the factory to store state.
Root string
// InitPath is the path for calling the init responsibilities for spawning
// a container.
InitPath string
// InitArgs are arguments for calling the init responsibilities for spawning
// a container.
InitArgs []string
// CriuPath is the path to the criu binary used for checkpoint and restore of
// containers.
CriuPath string
// New{u,g}uidmapPath is the path to the binaries used for mapping with
// rootless containers.
NewuidmapPath string
NewgidmapPath string
// Validator provides validation to container configurations.
Validator validate.Validator
// NewCgroupsManager returns an initialized cgroups manager for a single container.
NewCgroupsManager func(config *configs.Cgroup, paths map[string]string) cgroups.Manager
// NewIntelRdtManager returns an initialized Intel RDT manager for a single container.
NewIntelRdtManager func(config *configs.Config, id string, path string) intelrdt.Manager
// SysFs is the object representing the sysbox-fs
SysFs *sysbox.Fs
// SysMgr is the object representing the sysbox-mgr
SysMgr *sysbox.Mgr
}
func (l *LinuxFactory) Create(id string, config *configs.Config) (Container, error) {
if l.Root == "" {
return nil, newGenericError(fmt.Errorf("invalid root"), ConfigInvalid)
}
if err := l.validateID(id); err != nil {
return nil, err
}
if err := l.Validator.Validate(config); err != nil {
return nil, newGenericError(err, ConfigInvalid)
}
containerRoot, err := securejoin.SecureJoin(l.Root, id)
if err != nil {
return nil, err
}
if _, err := os.Stat(containerRoot); err == nil {
return nil, newGenericError(fmt.Errorf("container with id exists: %v", id), IdInUse)
} else if !os.IsNotExist(err) {
return nil, newGenericError(err, SystemError)
}
if err := os.MkdirAll(containerRoot, 0711); err != nil {
return nil, newGenericError(err, SystemError)
}
if err := os.Chown(containerRoot, unix.Geteuid(), unix.Getegid()); err != nil {
return nil, newGenericError(err, SystemError)
}
c := &linuxContainer{
id: id,
root: containerRoot,
config: config,
initPath: l.InitPath,
initArgs: l.InitArgs,
criuPath: l.CriuPath,
newuidmapPath: l.NewuidmapPath,
newgidmapPath: l.NewgidmapPath,
cgroupManager: l.NewCgroupsManager(config.Cgroups, nil),
sysMgr: l.SysMgr,
sysFs: l.SysFs,
}
if intelrdt.IsCatEnabled() || intelrdt.IsMbaEnabled() {
c.intelRdtManager = l.NewIntelRdtManager(config, id, "")
}
c.state = &stoppedState{c: c}
return c, nil
}
func (l *LinuxFactory) Load(id string) (Container, error) {
if l.Root == "" {
return nil, newGenericError(fmt.Errorf("invalid root"), ConfigInvalid)
}
//when load, we need to check id is valid or not.
if err := l.validateID(id); err != nil {
return nil, err
}
containerRoot, err := securejoin.SecureJoin(l.Root, id)
if err != nil {
return nil, err
}
state, err := l.loadState(containerRoot, id)
if err != nil {
return nil, err
}
r := &nonChildProcess{
processPid: state.InitProcessPid,
processStartTime: state.InitProcessStartTime,
fds: state.ExternalDescriptors,
}
c := &linuxContainer{
initProcess: r,
initProcessStartTime: state.InitProcessStartTime,
id: id,
config: &state.Config,
initPath: l.InitPath,
initArgs: l.InitArgs,
criuPath: l.CriuPath,
newuidmapPath: l.NewuidmapPath,
newgidmapPath: l.NewgidmapPath,
cgroupManager: l.NewCgroupsManager(state.Config.Cgroups, state.CgroupPaths),
root: containerRoot,
created: state.Created,
sysFs: &state.SysFs,
sysMgr: &state.SysMgr,
}
c.state = &loadedState{c: c}
if err := c.refreshState(); err != nil {
return nil, err
}
if intelrdt.IsCatEnabled() || intelrdt.IsMbaEnabled() {
c.intelRdtManager = l.NewIntelRdtManager(&state.Config, id, state.IntelRdtPath)
}
return c, nil
}
func (l *LinuxFactory) Type() string {
return "libcontainer"
}
// StartInitialization loads a container by opening the pipe fd from the parent to read the configuration and state
// This is a low level implementation detail of the reexec and should not be consumed externally
func (l *LinuxFactory) StartInitialization() (err error) {
var (
pipefd, fifofd int
consoleSocket *os.File
envInitPipe = os.Getenv("_LIBCONTAINER_INITPIPE")
envFifoFd = os.Getenv("_LIBCONTAINER_FIFOFD")
envConsole = os.Getenv("_LIBCONTAINER_CONSOLE")
)
// Get the INITPIPE.
pipefd, err = strconv.Atoi(envInitPipe)
if err != nil {
return fmt.Errorf("unable to convert _LIBCONTAINER_INITPIPE=%s to int: %s", envInitPipe, err)
}
var (
pipe = os.NewFile(uintptr(pipefd), "pipe")
it = initType(os.Getenv("_LIBCONTAINER_INITTYPE"))
)
defer pipe.Close()
// Only init processes have FIFOFD.
fifofd = -1
if it == initStandard {
if fifofd, err = strconv.Atoi(envFifoFd); err != nil {
return fmt.Errorf("unable to convert _LIBCONTAINER_FIFOFD=%s to int: %s", envFifoFd, err)
}
}
if envConsole != "" {
console, err := strconv.Atoi(envConsole)
if err != nil {
return fmt.Errorf("unable to convert _LIBCONTAINER_CONSOLE=%s to int: %s", envConsole, err)
}
consoleSocket = os.NewFile(uintptr(console), "console-socket")
defer consoleSocket.Close()
}
// clear the current process's environment to clean any libcontainer
// specific env vars.
os.Clearenv()
defer func() {
// We have an error during the initialization of the container's init,
// send it back to the parent process in the form of an initError.
if werr := utils.WriteJSON(pipe, syncT{procError}); werr != nil {
fmt.Fprintln(os.Stderr, err)
return
}
if werr := utils.WriteJSON(pipe, newSystemError(err)); werr != nil {
fmt.Fprintln(os.Stderr, err)
return
}
}()
defer func() {
if e := recover(); e != nil {
err = fmt.Errorf("panic from initialization: %v, %v", e, string(debug.Stack()))
}
}()
i, err := newContainerInit(it, pipe, consoleSocket, fifofd)
if err != nil {
return err
}
// If Init succeeds, syscall.Exec will not return, hence none of the defers will be called.
return i.Init()
}
func (l *LinuxFactory) loadState(root, id string) (*State, error) {
stateFilePath, err := securejoin.SecureJoin(root, stateFilename)
if err != nil {
return nil, err
}
f, err := os.Open(stateFilePath)
if err != nil {
if os.IsNotExist(err) {
return nil, newGenericError(fmt.Errorf("container %q does not exist", id), ContainerNotExists)
}
return nil, newGenericError(err, SystemError)
}
defer f.Close()
var state *State
if err := json.NewDecoder(f).Decode(&state); err != nil {
return nil, newGenericError(err, SystemError)
}
return state, nil
}
func (l *LinuxFactory) validateID(id string) error {
if !idRegex.MatchString(id) || string(os.PathSeparator)+id != utils.CleanPath(string(os.PathSeparator)+id) {
return newGenericError(fmt.Errorf("invalid id format: %v", id), InvalidIdFormat)
}
return nil
}
// NewuidmapPath returns an option func to configure a LinuxFactory with the
// provided ..
func NewuidmapPath(newuidmapPath string) func(*LinuxFactory) error {
return func(l *LinuxFactory) error {
l.NewuidmapPath = newuidmapPath
return nil
}
}
// NewgidmapPath returns an option func to configure a LinuxFactory with the
// provided ..
func NewgidmapPath(newgidmapPath string) func(*LinuxFactory) error {
return func(l *LinuxFactory) error {
l.NewgidmapPath = newgidmapPath
return nil
}
}
| [
"\"_LIBCONTAINER_INITPIPE\"",
"\"_LIBCONTAINER_FIFOFD\"",
"\"_LIBCONTAINER_CONSOLE\"",
"\"_LIBCONTAINER_INITTYPE\""
]
| []
| [
"_LIBCONTAINER_FIFOFD",
"_LIBCONTAINER_INITTYPE",
"_LIBCONTAINER_CONSOLE",
"_LIBCONTAINER_INITPIPE"
]
| [] | ["_LIBCONTAINER_FIFOFD", "_LIBCONTAINER_INITTYPE", "_LIBCONTAINER_CONSOLE", "_LIBCONTAINER_INITPIPE"] | go | 4 | 0 | |
server.go | package main
import (
"context"
"io/ioutil"
"net/http"
"os"
"time"
"github.com/byuoitav/atlona-event-forwarder/connection"
"github.com/gorilla/websocket"
"github.com/labstack/echo"
"github.com/byuoitav/common/db/couch"
"github.com/byuoitav/common/log"
"github.com/byuoitav/common/structs"
)
var (
address = os.Getenv("DB_ADDRESS")
username = os.Getenv("DB_USERNAME")
password = os.Getenv("DB_PASSWORD")
loglevel = os.Getenv("LOG_LEVEL")
eventProcessorHost = os.Getenv("EVENT_PROCESSOR_HOST")
conns map[string]*websocket.Conn
)
func init() {
if len(address) == 0 || len(username) == 0 || len(password) == 0 || len(eventProcessorHost) == 0 {
log.L.Fatalf("One of DB_ADDRESS, DB_USERNAME, DB_PASSWORD, EVENT_PROCESSOR_HOST is not set. Failing...")
}
}
func main() {
e := echo.New()
e.GET("/healthz", func(c echo.Context) error {
return c.String(http.StatusOK, "healthy")
})
go e.Start(":9998")
log.SetLevel(loglevel)
db := couch.NewDB(address, username, password)
agwList, err := db.GetDevicesByType("AtlonaGateway")
if err != nil {
log.L.Fatalf("There was an error getting the AGWList: %v", err)
}
log.L.Debugf("Length of AGWlist: %d", len(agwList))
conns = make(map[string]*websocket.Conn)
for _, i := range agwList {
dialer := &websocket.Dialer{}
address := "ws://"
address += i.Address
address += "/ws"
ws, resp, err := dialer.DialContext(context.TODO(), address, nil)
if err != nil {
log.L.Fatalf("unable to open websocket: %s", err)
}
defer resp.Body.Close()
bytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.L.Fatalf("unable to read bytes: %s", bytes)
}
log.L.Debugf("response from opening websocket: %s", bytes)
conns[i.ID] = ws
}
//subscribe
for i := range conns {
err = conns[i].WriteMessage(websocket.BinaryMessage, []byte(`{"callBackId":4,"data":{"action":"SetCurrentPage","state":"{\"Page\":\"roomModifyDevices\"}","controller":"App"}}`))
if err != nil {
log.L.Debugf("unable to read message: %s", err)
}
go connection.ReadMessage(conns[i], i)
go connection.SendKeepAlive(conns[i], i)
}
for {
//wait some time
log.L.Debugf("Waiting to check for list changes")
time.Sleep(1 * time.Minute)
log.L.Debugf("Checking AGWList for changes")
db := couch.NewDB(address, username, password)
newAGWList, err := db.GetDevicesByType("AtlonaGateway")
if err != nil {
log.L.Debugf("there was an issue getting the AGWList: %v", err)
}
//check to see if the length is different
if len(conns) < len(newAGWList) {
newList := make([]structs.Device, len(agwList))
copy(newList, agwList)
log.L.Debugf("comparing the list with the map to find the new one")
for i := 0; i < len(newList); i++ {
//for each object in newList check to see if it exists in the map of conns already
new := newList[i]
match := false
for j := range conns {
if new.ID == j {
match = true
break
}
}
//if it can't be found, create a websocket and add it to the map
if !match {
dialer := &websocket.Dialer{}
address := "ws://"
address += new.Address
address += "/ws"
ws, resp, err := dialer.DialContext(context.TODO(), address, nil)
if err != nil {
log.L.Fatalf("unable to open websocket: %s", err)
}
defer resp.Body.Close()
bytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.L.Fatalf("unable to read bytes: %s", bytes)
}
log.L.Debugf("response from opening websocket: %s", bytes)
conns[new.ID] = ws
//this is the message that tells the gateway to send device update events
err = ws.WriteMessage(websocket.BinaryMessage, []byte(`{"callBackId":4,"data":{"action":"SetCurrentPage","state":"{\"Page\":\"roomModifyDevices\"}","controller":"App"}}`))
if err != nil {
log.L.Debugf("unable to read message: %s", err)
}
go connection.ReadMessage(ws, new.ID)
go connection.SendKeepAlive(ws, new.ID)
}
}
} else if len(conns) > len(newAGWList) {
var found bool
for i := range conns {
found = false
for _, x := range newAGWList {
if i == x.ID {
found = true
break
}
}
if found == false {
delete(conns, i)
}
}
}
}
}
| [
"\"DB_ADDRESS\"",
"\"DB_USERNAME\"",
"\"DB_PASSWORD\"",
"\"LOG_LEVEL\"",
"\"EVENT_PROCESSOR_HOST\""
]
| []
| [
"DB_PASSWORD",
"EVENT_PROCESSOR_HOST",
"LOG_LEVEL",
"DB_USERNAME",
"DB_ADDRESS"
]
| [] | ["DB_PASSWORD", "EVENT_PROCESSOR_HOST", "LOG_LEVEL", "DB_USERNAME", "DB_ADDRESS"] | go | 5 | 0 | |
main.go | package main
import (
"os"
log "github.com/Sirupsen/logrus"
"github.com/joho/godotenv"
"github.com/urfave/cli"
)
func main() {
// Load env-file if it exists first
if env := os.Getenv("PLUGIN_ENV_FILE"); env != "" {
godotenv.Load(env)
}
app := cli.NewApp()
app.Name = "email plugin"
app.Usage = "email plugin"
app.Action = run
app.Version = "2.0.2"
app.Flags = []cli.Flag{
// Plugin environment
cli.StringFlag{
Name: "from",
Usage: "from address",
EnvVar: "PLUGIN_FROM",
},
cli.StringFlag{
Name: "host",
Usage: "smtp host",
EnvVar: "EMAIL_HOST,PLUGIN_HOST",
},
cli.IntFlag{
Name: "port",
Value: DefaultPort,
Usage: "smtp port",
EnvVar: "EMAIL_PORT,PLUGIN_PORT",
},
cli.StringFlag{
Name: "username",
Usage: "smtp server username",
EnvVar: "EMAIL_USERNAME,PLUGIN_USERNAME",
},
cli.StringFlag{
Name: "password",
Usage: "smtp server password",
EnvVar: "EMAIL_PASSWORD,PLUGIN_PASSWORD",
},
cli.BoolFlag{
Name: "skip.verify",
Usage: "skip tls verify",
EnvVar: "PLUGIN_SKIP_VERIFY",
},
cli.StringSliceFlag{
Name: "recipients",
Usage: "recipient addresses",
EnvVar: "EMAIL_RECIPIENTS,PLUGIN_RECIPIENTS",
},
cli.BoolFlag{
Name: "recipients.only",
Usage: "send to recipients only",
EnvVar: "PLUGIN_RECIPIENTS_ONLY",
},
cli.StringFlag{
Name: "template.subject",
Value: DefaultSubject,
Usage: "subject template",
EnvVar: "PLUGIN_SUBJECT",
},
cli.StringFlag{
Name: "template.body",
Value: DefaultTemplate,
Usage: "body template",
EnvVar: "PLUGIN_BODY",
},
cli.StringFlag{
Name: "attachment",
Usage: "attachment filename",
EnvVar: "PLUGIN_ATTACHMENT",
},
// Drone environment
// Repo
cli.StringFlag{
Name: "repo.fullName",
Usage: "repository full name",
EnvVar: "DRONE_REPO",
},
cli.StringFlag{
Name: "repo.owner",
Usage: "repository owner",
EnvVar: "DRONE_REPO_OWNER",
},
cli.StringFlag{
Name: "repo.name",
Usage: "repository name",
EnvVar: "DRONE_REPO_NAME",
},
cli.StringFlag{
Name: "repo.scm",
Value: "git",
Usage: "respository scm",
EnvVar: "DRONE_REPO_SCM",
},
cli.StringFlag{
Name: "repo.link",
Usage: "repository link",
EnvVar: "DRONE_REPO_LINK",
},
cli.StringFlag{
Name: "repo.avatar",
Usage: "repository avatar",
EnvVar: "DRONE_REPO_AVATAR",
},
cli.StringFlag{
Name: "repo.branch",
Value: "master",
Usage: "repository default branch",
EnvVar: "DRONE_REPO_BRANCH",
},
cli.BoolFlag{
Name: "repo.private",
Usage: "repository is private",
EnvVar: "DRONE_REPO_PRIVATE",
},
cli.BoolFlag{
Name: "repo.trusted",
Usage: "repository is trusted",
EnvVar: "DRONE_REPO_TRUSTED",
},
// Remote
cli.StringFlag{
Name: "remote.url",
Usage: "repository clone url",
EnvVar: "DRONE_REMOTE_URL",
},
// Commit
cli.StringFlag{
Name: "commit.sha",
Usage: "git commit sha",
EnvVar: "DRONE_COMMIT_SHA",
},
cli.StringFlag{
Name: "commit.ref",
Value: "refs/heads/master",
Usage: "git commit ref",
EnvVar: "DRONE_COMMIT_REF",
},
cli.StringFlag{
Name: "commit.branch",
Value: "master",
Usage: "git commit branch",
EnvVar: "DRONE_COMMIT_BRANCH",
},
cli.StringFlag{
Name: "commit.link",
Usage: "commit link",
EnvVar: "DRONE_COMMIT_LINK",
},
cli.StringFlag{
Name: "commit.message",
Usage: "git commit message",
EnvVar: "DRONE_COMMIT_MESSAGE",
},
cli.StringFlag{
Name: "commit.author.name",
Usage: "git author name",
EnvVar: "DRONE_COMMIT_AUTHOR",
},
cli.StringFlag{
Name: "commit.author.email",
Usage: "git author email",
EnvVar: "DRONE_COMMIT_AUTHOR_EMAIL",
},
cli.StringFlag{
Name: "commit.author.avatar",
Usage: "git author avatar",
EnvVar: "DRONE_COMMIT_AUTHOR_AVATAR",
},
// Build
cli.IntFlag{
Name: "build.number",
Usage: "build number",
EnvVar: "DRONE_BUILD_NUMBER",
},
cli.StringFlag{
Name: "build.event",
Value: "push",
Usage: "build event",
EnvVar: "DRONE_BUILD_EVENT",
},
cli.StringFlag{
Name: "build.status",
Usage: "build status",
Value: "success",
EnvVar: "DRONE_BUILD_STATUS",
},
cli.StringFlag{
Name: "build.link",
Usage: "build link",
EnvVar: "DRONE_BUILD_LINK",
},
cli.Int64Flag{
Name: "build.created",
Usage: "build created",
EnvVar: "DRONE_BUILD_CREATED",
},
cli.Int64Flag{
Name: "build.started",
Usage: "build started",
EnvVar: "DRONE_BUILD_STARTED",
},
cli.Int64Flag{
Name: "build.finished",
Usage: "build finished",
EnvVar: "DRONE_BUILD_FINISHED",
},
// Prev
cli.StringFlag{
Name: "prev.build.status",
Usage: "prior build status",
EnvVar: "DRONE_PREV_BUILD_STATUS",
},
cli.IntFlag{
Name: "prev.build.number",
Usage: "prior build number",
EnvVar: "DRONE_PREV_BUILD_NUMBER",
},
cli.StringFlag{
Name: "prev.commit.sha",
Usage: "prior commit sha",
EnvVar: "DRONE_PREV_COMMIT_SHA",
},
// Job
cli.IntFlag{
Name: "job.number",
Usage: "job number",
EnvVar: "DRONE_JOB_NUMBER",
},
cli.StringFlag{
Name: "job.status",
Usage: "job status",
EnvVar: "DRONE_JOB_STATUS",
},
cli.IntFlag{
Name: "job.exitCode",
Usage: "job exit code",
EnvVar: "DRONE_JOB_EXIT_CODE",
},
cli.Int64Flag{
Name: "job.started",
Usage: "job started",
EnvVar: "DRONE_JOB_STARTED",
},
cli.Int64Flag{
Name: "job.finished",
Usage: "job finished",
EnvVar: "DRONE_JOB_FINISHED",
},
// Yaml
cli.BoolFlag{
Name: "yaml.signed",
Usage: "yaml is signed",
EnvVar: "DRONE_YAML_SIGNED",
},
cli.BoolFlag{
Name: "yaml.verified",
Usage: "yaml is signed and verified",
EnvVar: "DRONE_YAML_VERIFIED",
},
// Tag
cli.StringFlag{
Name: "tag",
Usage: "git tag",
EnvVar: "DRONE_TAG",
},
// PullRequest
cli.IntFlag{
Name: "pullRequest",
Usage: "pull request number",
EnvVar: "DRONE_PULL_REQUEST",
},
// DeployTo
cli.StringFlag{
Name: "deployTo",
Usage: "deployment target",
EnvVar: "DRONE_DEPLOY_TO",
},
}
if err := app.Run(os.Args); err != nil {
log.Fatal(err)
os.Exit(1)
}
}
func run(c *cli.Context) error {
plugin := Plugin{
Repo: Repo{
FullName: c.String("repo.fullName"),
Owner: c.String("repo.owner"),
Name: c.String("repo.name"),
SCM: c.String("repo.scm"),
Link: c.String("repo.link"),
Avatar: c.String("repo.avatar"),
Branch: c.String("repo.branch"),
Private: c.Bool("repo.private"),
Trusted: c.Bool("repo.trusted"),
},
Remote: Remote{
URL: c.String("remote.url"),
},
Commit: Commit{
Sha: c.String("commit.sha"),
Ref: c.String("commit.ref"),
Branch: c.String("commit.branch"),
Link: c.String("commit.link"),
Message: c.String("commit.message"),
Author: Author{
Name: c.String("commit.author.name"),
Email: c.String("commit.author.email"),
Avatar: c.String("commit.author.avatar"),
},
},
Build: Build{
Number: c.Int("build.number"),
Event: c.String("build.event"),
Status: c.String("build.status"),
Link: c.String("build.link"),
Created: c.Int64("build.created"),
Started: c.Int64("build.started"),
Finished: c.Int64("build.finished"),
},
Prev: Prev{
Build: PrevBuild{
Status: c.String("prev.build.status"),
Number: c.Int("prev.build.number"),
},
Commit: PrevCommit{
Sha: c.String("prev.commit.sha"),
},
},
Job: Job{
Status: c.String("job.status"),
ExitCode: c.Int("job.exitCode"),
Started: c.Int64("job.started"),
Finished: c.Int64("job.finished"),
},
Yaml: Yaml{
Signed: c.Bool("yaml.signed"),
Verified: c.Bool("yaml.verified"),
},
Tag: c.String("tag"),
PullRequest: c.Int("pullRequest"),
DeployTo: c.String("deployTo"),
Config: Config{
From: c.String("from"),
Host: c.String("host"),
Port: c.Int("port"),
Username: c.String("username"),
Password: c.String("password"),
SkipVerify: c.Bool("skip.verify"),
Recipients: c.StringSlice("recipients"),
RecipientsOnly: c.Bool("recipients.only"),
Subject: c.String("template.subject"),
Body: c.String("template.body"),
Attachment: c.String("attachment"),
},
}
return plugin.Exec()
}
| [
"\"PLUGIN_ENV_FILE\""
]
| []
| [
"PLUGIN_ENV_FILE"
]
| [] | ["PLUGIN_ENV_FILE"] | go | 1 | 0 | |
platform/osx/detect.py | import os
import sys
def is_active():
return True
def get_name():
return "OSX"
def can_build():
if (sys.platform == "darwin" or ("OSXCROSS_ROOT" in os.environ)):
return True
return False
def get_opts():
from SCons.Variables import EnumVariable
return [
('osxcross_sdk', 'OSXCross SDK version', 'darwin14'),
EnumVariable('debug_symbols', 'Add debug symbols to release version', 'yes', ('yes', 'no', 'full')),
]
def get_flags():
return [
]
def configure(env):
## Build type
if (env["target"] == "release"):
env.Prepend(CCFLAGS=['-O3', '-ffast-math', '-fomit-frame-pointer', '-ftree-vectorize', '-msse2'])
if (env["debug_symbols"] == "yes"):
env.Prepend(CCFLAGS=['-g1'])
if (env["debug_symbols"] == "full"):
env.Prepend(CCFLAGS=['-g2'])
elif (env["target"] == "release_debug"):
env.Prepend(CCFLAGS=['-O2', '-DDEBUG_ENABLED'])
if (env["debug_symbols"] == "yes"):
env.Prepend(CCFLAGS=['-g1'])
if (env["debug_symbols"] == "full"):
env.Prepend(CCFLAGS=['-g2'])
elif (env["target"] == "debug"):
env.Prepend(CCFLAGS=['-g3', '-DDEBUG_ENABLED', '-DDEBUG_MEMORY_ENABLED'])
## Architecture
is64 = sys.maxsize > 2**32
if (env["bits"] == "default"):
env["bits"] = "64" if is64 else "32"
## Compiler configuration
if "OSXCROSS_ROOT" not in os.environ: # regular native build
if (env["bits"] == "fat"):
env.Append(CCFLAGS=['-arch', 'i386', '-arch', 'x86_64'])
env.Append(LINKFLAGS=['-arch', 'i386', '-arch', 'x86_64'])
elif (env["bits"] == "32"):
env.Append(CCFLAGS=['-arch', 'i386'])
env.Append(LINKFLAGS=['-arch', 'i386'])
else: # 64-bit, default
env.Append(CCFLAGS=['-arch', 'x86_64'])
env.Append(LINKFLAGS=['-arch', 'x86_64'])
else: # osxcross build
root = os.environ.get("OSXCROSS_ROOT", 0)
if env["bits"] == "fat":
basecmd = root + "/target/bin/x86_64-apple-" + env["osxcross_sdk"] + "-"
env.Append(CCFLAGS=['-arch', 'i386', '-arch', 'x86_64'])
env.Append(LINKFLAGS=['-arch', 'i386', '-arch', 'x86_64'])
elif env["bits"] == "32":
basecmd = root + "/target/bin/i386-apple-" + env["osxcross_sdk"] + "-"
else: # 64-bit, default
basecmd = root + "/target/bin/x86_64-apple-" + env["osxcross_sdk"] + "-"
ccache_path = os.environ.get("CCACHE")
if ccache_path == None:
env['CC'] = basecmd + "cc"
env['CXX'] = basecmd + "c++"
else:
# there aren't any ccache wrappers available for OS X cross-compile,
# to enable caching we need to prepend the path to the ccache binary
env['CC'] = ccache_path + ' ' + basecmd + "cc"
env['CXX'] = ccache_path + ' ' + basecmd + "c++"
env['AR'] = basecmd + "ar"
env['RANLIB'] = basecmd + "ranlib"
env['AS'] = basecmd + "as"
if (env["CXX"] == "clang++"):
env.Append(CPPFLAGS=['-DTYPED_METHOD_BIND'])
env["CC"] = "clang"
env["LD"] = "clang++"
## Dependencies
if env['builtin_libtheora']:
env["x86_libtheora_opt_gcc"] = True
## Flags
env.Append(CPPPATH=['#platform/osx'])
env.Append(CPPFLAGS=['-DOSX_ENABLED', '-DUNIX_ENABLED', '-DGLES_ENABLED', '-DAPPLE_STYLE_KEYS', '-DCOREAUDIO_ENABLED'])
env.Append(LINKFLAGS=['-framework', 'Cocoa', '-framework', 'Carbon', '-framework', 'OpenGL', '-framework', 'AGL', '-framework', 'AudioUnit', '-framework', 'CoreAudio', '-lz', '-framework', 'IOKit', '-framework', 'ForceFeedback'])
env.Append(LIBS=['pthread'])
env.Append(CPPFLAGS=['-mmacosx-version-min=10.9'])
env.Append(LINKFLAGS=['-mmacosx-version-min=10.9'])
| []
| []
| [
"CCACHE",
"OSXCROSS_ROOT"
]
| [] | ["CCACHE", "OSXCROSS_ROOT"] | python | 2 | 0 | |
src/com/density/ezsbt/util/SbtPlugin.java | /* Copyright 2015 Density Technologies
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.density.ezsbt.util;
import org.eclipse.jface.preference.IPreferenceStore;
import org.eclipse.ui.plugin.AbstractUIPlugin;
public class SbtPlugin extends AbstractUIPlugin {
static private SbtPlugin instance;
static public SbtPlugin getInstance(){
return instance;
}
public SbtPlugin(){
super();
instance = this;
}
@Override
protected void initializeDefaultPreferences(IPreferenceStore store) {
store.setDefault(PluginConstants.COMMANDS_NAME_KEY,
CommandsConvertor.arrayToString(PluginConstants.DEFAULT_COMMANDS));
store.setDefault(PluginConstants.JAVA_HOME_KEY, getJavaHome());
store.setDefault(PluginConstants.JAVA_OPTIONS_KEY, PluginConstants.DEFAULT_JAVA_OPTIONS);
store.setDefault(PluginConstants.HIDE_RESOLVE_KEY, PluginConstants.DEFAULT_HIDE_RESOLVE);
}
protected String getJavaHome() {
String java_home = null;
if (System.getenv("JAVA_HOME")!=null && !System.getenv("JAVA_HOME").isEmpty()) {
java_home = System.getenv("JAVA_HOME");
} else
java_home = System.getProperty("java.home");
return java_home;
}
}
| [
"\"JAVA_HOME\"",
"\"JAVA_HOME\"",
"\"JAVA_HOME\""
]
| []
| [
"JAVA_HOME"
]
| [] | ["JAVA_HOME"] | java | 1 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "code_classifier.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
janitor/finance.py | """
Finance-specific data cleaning functions.
"""
import json
from datetime import date
from functools import lru_cache
import pandas as pd
import pandas_flavor as pf
import requests
from janitor.errors import JanitorError
from .utils import check, deprecated_alias, is_connected
currency_set = {
"AUD",
"BGN",
"BRL",
"CAD",
"CHF",
"CNY",
"CZK",
"DKK",
"EUR",
"GBP",
"HKD",
"HRK",
"HUF",
"IDR",
"ILS",
"INR",
"ISK",
"JPY",
"KRW",
"MXN",
"MYR",
"NOK",
"NZD",
"PHP",
"PLN",
"RON",
"RUB",
"SEK",
"SGD",
"THB",
"TRY",
"USD",
"ZAR",
}
# Dictionary of recognized World Bank countries and their abbreviations
wb_country_dict = {
"Aruba": "ABW",
"Afghanistan": "AFG",
"Angola": "AGO",
"Albania": "ALB",
"Andorra": "AND",
"Arab World": "ARB",
"United Arab Emirates": "ARE",
"Argentina": "ARG",
"Armenia": "ARM",
"American Samoa": "ASM",
"Antigua and Barbuda": "ATG",
"Australia": "AUS",
"Austria": "AUT",
"Azerbaijan": "AZE",
"Burundi": "BDI",
"Belgium": "BEL",
"Benin": "BEN",
"Burkina Faso": "BFA",
"Bangladesh": "BGD",
"Bulgaria": "BGR",
"Bahrain": "BHR",
"Bahamas, The": "BHS",
"Bosnia and Herzegovina": "BIH",
"Belarus": "BLR",
"Belize": "BLZ",
"Bermuda": "BMU",
"Bolivia": "BOL",
"Brazil": "BRA",
"Barbados": "BRB",
"Brunei Darussalam": "BRN",
"Bhutan": "BTN",
"Botswana": "BWA",
"Central African Republic": "CAF",
"Canada": "CAN",
"Central Europe and the Baltics": "CEB",
"Switzerland": "CHE",
"Channel Islands": "CHI",
"Chile": "CHL",
"China": "CHN",
"Cote d'Ivoire": "CIV",
"Cameroon": "CMR",
"Congo, Dem. Rep.": "COD",
"Congo, Rep.": "COG",
"Colombia": "COL",
"Comoros": "COM",
"Cabo Verde": "CPV",
"Costa Rica": "CRI",
"Caribbean small states": "CSS",
"Cuba": "CUB",
"Curacao": "CUW",
"Cayman Islands": "CYM",
"Cyprus": "CYP",
"Czech Republic": "CZE",
"Germany": "DEU",
"Djibouti": "DJI",
"Dominica": "DMA",
"Denmark": "DNK",
"Dominican Republic": "DOM",
"Algeria": "DZA",
"East Asia & Pacific (excluding high income)": "EAP",
"Early-demographic dividend": "EAR",
"East Asia & Pacific": "EAS",
"Europe & Central Asia (excluding high income)": "ECA",
"Europe & Central Asia": "ECS",
"Ecuador": "ECU",
"Egypt, Arab Rep.": "EGY",
"Euro area": "EMU",
"Eritrea": "ERI",
"Spain": "ESP",
"Estonia": "EST",
"Ethiopia": "ETH",
"European Union": "EUU",
"Fragile and conflict affected situations": "FCS",
"Finland": "FIN",
"Fiji": "FJI",
"France": "FRA",
"Faroe Islands": "FRO",
"Micronesia, Fed. Sts.": "FSM",
"Gabon": "GAB",
"United Kingdom": "GBR",
"Georgia": "GEO",
"Ghana": "GHA",
"Gibraltar": "GIB",
"Guinea": "GIN",
"Gambia, The": "GMB",
"Guinea-Bissau": "GNB",
"Equatorial Guinea": "GNQ",
"Greece": "GRC",
"Grenada": "GRD",
"Greenland": "GRL",
"Guatemala": "GTM",
"Guam": "GUM",
"Guyana": "GUY",
"High income": "HIC",
"Hong Kong SAR, China": "HKG",
"Honduras": "HND",
"Heavily indebted poor countries (HIPC)": "HPC",
"Croatia": "HRV",
"Haiti": "HTI",
"Hungary": "HUN",
"IBRD only": "IBD",
"IDA & IBRD total": "IBT",
"IDA total": "IDA",
"IDA blend": "IDB",
"Indonesia": "IDN",
"IDA only": "IDX",
"Isle of Man": "IMN",
"India": "IND",
"Not classified": "INX",
"Ireland": "IRL",
"Iran, Islamic Rep.": "IRN",
"Iraq": "IRQ",
"Iceland": "ISL",
"Israel": "ISR",
"Italy": "ITA",
"Jamaica": "JAM",
"Jordan": "JOR",
"Japan": "JPN",
"Kazakhstan": "KAZ",
"Kenya": "KEN",
"Kyrgyz Republic": "KGZ",
"Cambodia": "KHM",
"Kiribati": "KIR",
"St. Kitts and Nevis": "KNA",
"Korea, Rep.": "KOR",
"Kuwait": "KWT",
"Latin America & Caribbean (excluding high income)": "LAC",
"Lao PDR": "LAO",
"Lebanon": "LBN",
"Liberia": "LBR",
"Libya": "LBY",
"St. Lucia": "LCA",
"Latin America & Caribbean": "LCN",
"Least developed countries: UN classification": "LDC",
"Low income": "LIC",
"Liechtenstein": "LIE",
"Sri Lanka": "LKA",
"Lower middle income": "LMC",
"Low & middle income": "LMY",
"Lesotho": "LSO",
"Late-demographic dividend": "LTE",
"Lithuania": "LTU",
"Luxembourg": "LUX",
"Latvia": "LVA",
"Macao SAR, China": "MAC",
"St. Martin (French part)": "MAF",
"Morocco": "MAR",
"Monaco": "MCO",
"Moldova": "MDA",
"Madagascar": "MDG",
"Maldives": "MDV",
"Middle East & North Africa": "MEA",
"Mexico": "MEX",
"Marshall Islands": "MHL",
"Middle income": "MIC",
"North Macedonia": "MKD",
"Mali": "MLI",
"Malta": "MLT",
"Myanmar": "MMR",
"Middle East & North Africa (excluding high income)": "MNA",
"Montenegro": "MNE",
"Mongolia": "MNG",
"Northern Mariana Islands": "MNP",
"Mozambique": "MOZ",
"Mauritania": "MRT",
"Mauritius": "MUS",
"Malawi": "MWI",
"Malaysia": "MYS",
"North America": "NAC",
"Namibia": "NAM",
"New Caledonia": "NCL",
"Niger": "NER",
"Nigeria": "NGA",
"Nicaragua": "NIC",
"Netherlands": "NLD",
"Norway": "NOR",
"Nepal": "NPL",
"Nauru": "NRU",
"New Zealand": "NZL",
"OECD members": "OED",
"Oman": "OMN",
"Other small states": "OSS",
"Pakistan": "PAK",
"Panama": "PAN",
"Peru": "PER",
"Philippines": "PHL",
"Palau": "PLW",
"Papua New Guinea": "PNG",
"Poland": "POL",
"Pre-demographic dividend": "PRE",
"Puerto Rico": "PRI",
"Korea, Dem. People's Rep.": "PRK",
"Portugal": "PRT",
"Paraguay": "PRY",
"West Bank and Gaza": "PSE",
"Pacific island small states": "PSS",
"Post-demographic dividend": "PST",
"French Polynesia": "PYF",
"Qatar": "QAT",
"Romania": "ROU",
"Russian Federation": "RUS",
"Rwanda": "RWA",
"South Asia": "SAS",
"Saudi Arabia": "SAU",
"Sudan": "SDN",
"Senegal": "SEN",
"Singapore": "SGP",
"Solomon Islands": "SLB",
"Sierra Leone": "SLE",
"El Salvador": "SLV",
"San Marino": "SMR",
"Somalia": "SOM",
"Serbia": "SRB",
"Sub-Saharan Africa (excluding high income)": "SSA",
"South Sudan": "SSD",
"Sub-Saharan Africa": "SSF",
"Small states": "SST",
"Sao Tome and Principe": "STP",
"Suriname": "SUR",
"Slovak Republic": "SVK",
"Slovenia": "SVN",
"Sweden": "SWE",
"Eswatini": "SWZ",
"Sint Maarten (Dutch part)": "SXM",
"Seychelles": "SYC",
"Syrian Arab Republic": "SYR",
"Turks and Caicos Islands": "TCA",
"Chad": "TCD",
"East Asia & Pacific (IDA & IBRD countries)": "TEA",
"Europe & Central Asia (IDA & IBRD countries)": "TEC",
"Togo": "TGO",
"Thailand": "THA",
"Tajikistan": "TJK",
"Turkmenistan": "TKM",
"Latin America & the Caribbean (IDA & IBRD countries)": "TLA",
"Timor-Leste": "TLS",
"Middle East & North Africa (IDA & IBRD countries)": "TMN",
"Tonga": "TON",
"South Asia (IDA & IBRD)": "TSA",
"Sub-Saharan Africa (IDA & IBRD countries)": "TSS",
"Trinidad and Tobago": "TTO",
"Tunisia": "TUN",
"Turkey": "TUR",
"Tuvalu": "TUV",
"Tanzania": "TZA",
"Uganda": "UGA",
"Ukraine": "UKR",
"Upper middle income": "UMC",
"Uruguay": "URY",
"United States": "USA",
"Uzbekistan": "UZB",
"St. Vincent and the Grenadines": "VCT",
"Venezuela, RB": "VEN",
"British Virgin Islands": "VGB",
"Virgin Islands (U.S.)": "VIR",
"Vietnam": "VNM",
"Vanuatu": "VUT",
"World": "WLD",
"Samoa": "WSM",
"Kosovo": "XKX",
"Yemen, Rep.": "YEM",
"South Africa": "ZAF",
"Zambia": "ZMB",
"Zimbabwe": "ZWE",
}
def _check_currency(currency: str):
"""Check that currency is in supported set."""
if currency not in currency_set:
raise ValueError(
f"currency {currency} not in supported currency set, "
f"{currency_set}"
)
def _check_wb_country(country: str):
"""Check that world bank country is in supported set."""
if (country not in wb_country_dict.keys()) & (
country not in wb_country_dict.values() # noqa: PD011
):
raise ValueError(
f"country {country} not in supported World Bank country dict, "
f"{wb_country_dict}"
)
def _check_wb_years(year: int):
"""Check that year is in world bank dataset years."""
if year < 1960:
raise ValueError("year value must be 1960 or later")
# @lru_cache(maxsize=32)
# def _convert_currency(
# api_key: str,
# from_currency: str = None,
# to_currency: str = None,
# historical_date: Optional[date] = None,
# ) -> float:
# """
# Currency conversion for Pandas DataFrame column.
# Helper function for `convert_currency` method.
# The API used is https://exchangeratesapi.io/.
# """
# url = "http://api.exchangeratesapi.io"
# if historical_date:
# check("historical_date", historical_date, [datetime, date])
# if isinstance(historical_date, datetime):
# if historical_date < datetime(1999, 1, 4):
# raise ValueError(
# "historical_date:datetime must be later than 1999-01-04!"
# )
# string_date = str(historical_date)[:10]
# else:
# if historical_date < date(1999, 1, 4):
# raise ValueError(
# "historical_date:date must be later than 1999-01-04!"
# )
# string_date = str(historical_date)
# url = url + "/%s" % string_date
# else:
# url = url + "/latest"
# _check_currency(from_currency)
# _check_currency(to_currency)
# payload = {
# # "base": from_currency,
# "symbols": to_currency,
# "access_key": api_key,
# }
# result = requests.get(url, params=payload)
# if result.status_code != 200:
# raise ConnectionError(
# "Exchange Rate API failed to receive a 200 "
# "response from the server. "
# "Please try again later."
# )
# currency_dict = json.loads(result.text)
# rate = currency_dict["rates"][to_currency]
# return rate
@pf.register_dataframe_method
@deprecated_alias(colname="column_name")
def convert_currency(
df: pd.DataFrame,
api_key: str,
column_name: str = None,
from_currency: str = None,
to_currency: str = None,
historical_date: date = None,
make_new_column: bool = False,
) -> pd.DataFrame:
"""Deprecated function."""
raise JanitorError(
"The `convert_currency` function has been temporarily disabled due to "
"exchangeratesapi.io disallowing free pinging of its API. "
"(Our tests started to fail due to this issue.) "
"There is no easy way around this problem "
"except to find a new API to call on."
"Please comment on issue #829 "
"(https://github.com/pyjanitor-devs/pyjanitor/issues/829) "
"if you know of an alternative API that we can call on, "
"otherwise the function will be removed in pyjanitor's 1.0 release."
)
# @pf.register_dataframe_method
# @deprecated_alias(colname="column_name")
# def convert_currency(
# df: pd.DataFrame,
# api_key: str,
# column_name: str = None,
# from_currency: str = None,
# to_currency: str = None,
# historical_date: date = None,
# make_new_column: bool = False,
# ) -> pd.DataFrame:
# """
# Converts a column from one currency to another, with an option to
# convert based on historical exchange values.
# On April 10 2021,
# we discovered that there was no more free API available.
# Thus, an API key is required to perform currency conversion.
# API keys should be set as an environment variable,
# for example, `EXCHANGE_RATE_API_KEY``,
# and then passed into the function
# by calling on `os.getenv("EXCHANGE_RATE_APIKEY")``.
# :param df: A pandas dataframe.
# :param api_key: exchangeratesapi.io API key.
# :param column_name: Name of the new column. Should be a string, in order
# for the column name to be compatible with the Feather binary
# format (this is a useful thing to have).
# :param from_currency: The base currency to convert from.
# May be any of: currency_set = {"AUD", "BGN", "BRL", "CAD", "CHF",
# "CNY", "CZK", "DKK", "EUR", "GBP", "HKD", "HRK", "HUF", "IDR",
# "ILS", "INR", "ISK", "JPY", "KRW", "MXN", "MYR", "NOK", "NZD",
# "PHP", "PLN", "RON", "RUB", "SEK", "SGD", "THB", "TRY", "USD",
# "ZAR"}
# :param to_currency: The target currency to convert to.
# May be any of: currency_set = {"AUD", "BGN", "BRL", "CAD", "CHF",
# "CNY", "CZK", "DKK", "EUR", "GBP", "HKD", "HRK", "HUF", "IDR",
# "ILS", "INR", "ISK", "JPY", "KRW", "MXN", "MYR", "NOK", "NZD",
# "PHP", "PLN", "RON", "RUB", "SEK", "SGD", "THB", "TRY", "USD",
# "ZAR"}
# :param historical_date: If supplied,
# get exchange rate on a certain date.
# If not supplied, get the latest exchange rate.
# The exchange rates go back to Jan. 4, 1999.
# :param make_new_column: Generates new column
# for converted currency if True,
# otherwise, converts currency in place.
# :returns: The dataframe with converted currency column.
# .. code-block:: python
# import pandas as pd
# import janitor
# from datetime import date
# data_dict = {
# "a": [1.23452345, 2.456234, 3.2346125] * 3,
# "Bell__Chart": [1/3, 2/7, 3/2] * 3,
# "decorated-elephant": [1/234, 2/13, 3/167] * 3,
# "animals": ["rabbit", "leopard", "lion"] * 3,
# "cities": ["Cambridge", "Shanghai", "Basel"] * 3,
# }
# example_dataframe = pd.DataFrame(data_dict)
# Example: Converting a column from one currency to another
# using rates from 01/01/2018.
# .. code-block:: python
# example_dataframe.convert_currency('a', from_currency='USD',
# to_currency='EUR', historical_date=date(2018,1,1))
# Output:
# .. code-block:: python
# a Bell__Chart decorated-elephant animals cities
# 0 1.029370 0.333333 0.004274 rabbit Cambridge
# 1 2.048056 0.285714 0.153846 leopard Shanghai
# 2 2.697084 1.500000 0.017964 lion Basel
# 3 1.029370 0.333333 0.004274 rabbit Cambridge
# 4 2.048056 0.285714 0.153846 leopard Shanghai
# 5 2.697084 1.500000 0.017964 lion Basel
# 6 1.029370 0.333333 0.004274 rabbit Cambridge
# 7 2.048056 0.285714 0.153846 leopard Shanghai
# 8 2.697084 1.500000 0.017964 lion Basel
# """
# rate = _convert_currency(
# api_key, from_currency, to_currency, historical_date
# )
# if make_new_column:
# # new_column_name = column_name + "_" + to_currency
# column_name = column_name + "_" + to_currency
# df = df.assign(column_name=df[column_name] * rate)
# return df
@lru_cache(maxsize=32)
def _inflate_currency(
country: str = None, currency_year: int = None, to_year: int = None
) -> float:
"""
Currency inflation for Pandas DataFrame column.
Helper function for `inflate_currency` method.
The API used is the World Bank Indicator API:
https://datahelpdesk.worldbank.org/knowledgebase/articles/889392-about-the-indicators-api-documentation
"""
# Check all inputs are correct data type
check("country", country, [str])
check("currency_year", currency_year, [int])
check("to_year", to_year, [int])
# Get WB country abbreviation
_check_wb_country(country)
if country in wb_country_dict.keys():
country = wb_country_dict[country]
else:
# `country` is already a correct abbreviation; do nothing
pass
_check_wb_years(currency_year)
_check_wb_years(to_year)
url = (
"https://api.worldbank.org/v2/country/"
+ country
+ "/indicator/FP.CPI.TOTL?date="
+ str(min(currency_year, to_year))
+ ":"
+ str(max(currency_year, to_year))
+ "&format=json"
)
result = requests.get(url)
if result.status_code != 200:
raise ConnectionError(
"WB Indicator API failed to receive a 200 "
"response from the server. "
"Please try again later."
)
# The API returns a list of two items;
# the second item in the list is what we want
inflation_dict = json.loads(result.text)[1]
# Error checking
if inflation_dict is None:
raise ValueError(
"The WB Indicator API returned nothing. "
"This likely means the currency_year and "
"to_year are outside of the year range for "
"which the WB has inflation data for the "
"specified country."
)
# Create new dict with only the year and inflation values
inflation_dict_ready = {
int(inflation_dict[i]["date"]): float(inflation_dict[i]["value"])
for i in range(len(inflation_dict))
if inflation_dict[i]["value"] is not None
}
# Error catching
if currency_year not in inflation_dict_ready.keys():
raise ValueError(
f"The WB Indicator API does not have inflation "
f"data for {currency_year} for {country}."
)
if to_year not in inflation_dict_ready.keys():
raise ValueError(
f"The WB Indicator API does not have inflation "
f"data for {to_year} for {country}."
)
inflator = (
inflation_dict_ready[to_year] / inflation_dict_ready[currency_year]
)
return inflator
@pf.register_dataframe_method
def inflate_currency(
df: pd.DataFrame,
column_name: str = None,
country: str = None,
currency_year: int = None,
to_year: int = None,
make_new_column: bool = False,
) -> pd.DataFrame:
"""
Inflates a column of monetary values from one year to another, based on
the currency's country.
The provided country can be any economy name or code from the World Bank
[list of economies]
(https://databank.worldbank.org/data/download/site-content/CLASS.xls).
**Note**: This method mutates the original DataFrame.
Method chaining usage example:
>>> import pandas as pd
>>> import janitor.finance
>>> df = pd.DataFrame({"profit":[100.10, 200.20, 300.30, 400.40, 500.50]})
>>> df
profit
0 100.1
1 200.2
2 300.3
3 400.4
4 500.5
>>> df.inflate_currency(
... column_name='profit',
... country='USA',
... currency_year=2015,
... to_year=2018,
... make_new_column=True
... )
profit profit_2018
0 100.1 106.050596
1 200.2 212.101191
2 300.3 318.151787
3 400.4 424.202382
4 500.5 530.252978
:param df: A pandas DataFrame.
:param column_name: Name of the column containing monetary
values to inflate.
:param country: The country associated with the currency being inflated.
May be any economy or code from the World Bank [List of economies]
(https://databank.worldbank.org/data/download/site-content/CLASS.xls).
:param currency_year: The currency year to inflate from.
The year should be 1960 or later.
:param to_year: The currency year to inflate to.
The year should be 1960 or later.
:param make_new_column: Generates new column for inflated currency if
True, otherwise, inflates currency in place.
:returns: The dataframe with inflated currency column.
"""
inflator = _inflate_currency(country, currency_year, to_year)
if make_new_column:
new_column_name = column_name + "_" + str(to_year)
df[new_column_name] = df[column_name] * inflator
else:
df[column_name] = df[column_name] * inflator
return df
def convert_stock(stock_symbol: str) -> str:
"""
This function takes in a stock symbol as a parameter,
queries an API for the companies full name and returns
it
Functional usage example:
```python
import janitor.finance
janitor.finance.convert_stock("aapl")
```
:param stock_symbol: Stock ticker Symbol
:raises ConnectionError: Internet connection is not available
:returns: Full company name
"""
if is_connected("www.google.com"):
stock_symbol = stock_symbol.upper()
return get_symbol(stock_symbol)
else:
raise ConnectionError(
"Connection Error: Client Not Connected to Internet"
)
def get_symbol(symbol: str):
"""
This is a helper function to get a companies full
name based on the stock symbol.
Functional usage example:
```python
import janitor.finance
janitor.finance.get_symbol("aapl")
```
:param symbol: This is our stock symbol that we use
to query the api for the companies full name.
:return: Company full name
"""
result = requests.get(
"http://d.yimg.com/autoc."
+ "finance.yahoo.com/autoc?query={}®ion=1&lang=en".format(symbol)
).json()
for x in result["ResultSet"]["Result"]:
if x["symbol"] == symbol:
return x["name"]
else:
return None
| []
| []
| [
"EXCHANGE_RATE_APIKEY"
]
| [] | ["EXCHANGE_RATE_APIKEY"] | python | 1 | 0 | |
tools/vendor/github.com/minio/minio/cmd/common-main.go | // Copyright (c) 2015-2021 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"crypto/tls"
"crypto/x509"
"encoding/gob"
"errors"
"fmt"
"math/rand"
"net"
"net/http"
"net/url"
"os"
"path/filepath"
"runtime"
"sort"
"strconv"
"strings"
"time"
fcolor "github.com/fatih/color"
"github.com/go-openapi/loads"
dns2 "github.com/miekg/dns"
"github.com/minio/cli"
consoleCerts "github.com/minio/console/pkg/certs"
"github.com/minio/console/restapi"
"github.com/minio/console/restapi/operations"
"github.com/minio/kes"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio/internal/auth"
"github.com/minio/minio/internal/color"
"github.com/minio/minio/internal/config"
"github.com/minio/minio/internal/handlers"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/certs"
"github.com/minio/pkg/console"
"github.com/minio/pkg/ellipses"
"github.com/minio/pkg/env"
xnet "github.com/minio/pkg/net"
)
// serverDebugLog will enable debug printing
var serverDebugLog = env.Get("_MINIO_SERVER_DEBUG", config.EnableOff) == config.EnableOn
var defaultAWSCredProvider []credentials.Provider
func init() {
rand.Seed(time.Now().UTC().UnixNano())
logger.Init(GOPATH, GOROOT)
logger.RegisterError(config.FmtError)
if IsKubernetes() || IsDocker() || IsBOSH() || IsDCOS() || IsKubernetesReplicaSet() || IsPCFTile() {
// 30 seconds matches the orchestrator DNS TTLs, have
// a 5 second timeout to lookup from DNS servers.
globalDNSCache = xhttp.NewDNSCache(30*time.Second, 5*time.Second, logger.LogOnceIf)
} else {
// On bare-metals DNS do not change often, so it is
// safe to assume a higher timeout upto 10 minutes.
globalDNSCache = xhttp.NewDNSCache(10*time.Minute, 5*time.Second, logger.LogOnceIf)
}
initGlobalContext()
globalForwarder = handlers.NewForwarder(&handlers.Forwarder{
PassHost: true,
RoundTripper: newGatewayHTTPTransport(1 * time.Hour),
Logger: func(err error) {
if err != nil && !errors.Is(err, context.Canceled) {
logger.LogIf(GlobalContext, err)
}
},
})
globalTransitionState = newTransitionState()
console.SetColor("Debug", fcolor.New())
gob.Register(StorageErr(""))
defaultAWSCredProvider = []credentials.Provider{
&credentials.IAM{
Client: &http.Client{
Transport: NewGatewayHTTPTransport(),
},
},
}
}
const consolePrefix = "CONSOLE_"
func minioConfigToConsoleFeatures() {
os.Setenv("CONSOLE_PBKDF_PASSPHRASE", restapi.RandomCharString(16))
os.Setenv("CONSOLE_PBKDF_SALT", restapi.RandomCharString(8))
os.Setenv("CONSOLE_MINIO_SERVER", getAPIEndpoints()[0])
if value := os.Getenv("MINIO_LOG_QUERY_URL"); value != "" {
os.Setenv("CONSOLE_LOG_QUERY_URL", value)
}
if value := os.Getenv("MINIO_LOG_QUERY_AUTH_TOKEN"); value != "" {
os.Setenv("CONSOLE_LOG_QUERY_AUTH_TOKEN", value)
}
// Enable if prometheus URL is set.
if value := os.Getenv("MINIO_PROMETHEUS_URL"); value != "" {
os.Setenv("CONSOLE_PROMETHEUS_URL", value)
}
// Enable if LDAP is enabled.
if globalLDAPConfig.Enabled {
os.Setenv("CONSOLE_LDAP_ENABLED", config.EnableOn)
}
// if IDP is enabled, set IDP environment variables
if globalOpenIDConfig.URL != nil {
os.Setenv("CONSOLE_IDP_URL", globalOpenIDConfig.DiscoveryDoc.Issuer)
os.Setenv("CONSOLE_IDP_SCOPES", strings.Join(globalOpenIDConfig.DiscoveryDoc.ScopesSupported, ","))
os.Setenv("CONSOLE_IDP_CLIENT_ID", globalOpenIDConfig.ClientID)
os.Setenv("CONSOLE_IDP_SECRET", globalOpenIDConfig.ClientSecret)
}
os.Setenv("CONSOLE_MINIO_REGION", globalServerRegion)
os.Setenv("CONSOLE_CERT_PASSWD", os.Getenv("MINIO_CERT_PASSWD"))
os.Setenv("CONSOLE_IDP_CALLBACK", getConsoleEndpoints()[0]+"/oauth_callback")
}
func initConsoleServer() (*restapi.Server, error) {
// unset all console_ environment variables.
for _, cenv := range env.List(consolePrefix) {
os.Unsetenv(cenv)
}
// enable all console environment variables
minioConfigToConsoleFeatures()
// set certs dir to minio directory
consoleCerts.GlobalCertsDir = &consoleCerts.ConfigDir{
Path: globalCertsDir.Get(),
}
consoleCerts.GlobalCertsCADir = &consoleCerts.ConfigDir{
Path: globalCertsCADir.Get(),
}
swaggerSpec, err := loads.Embedded(restapi.SwaggerJSON, restapi.FlatSwaggerJSON)
if err != nil {
return nil, err
}
// Initialize MinIO loggers
restapi.LogInfo = logger.Info
restapi.LogError = logger.Error
api := operations.NewConsoleAPI(swaggerSpec)
api.Logger = func(_ string, _ ...interface{}) {
// nothing to log.
}
server := restapi.NewServer(api)
// register all APIs
server.ConfigureAPI()
restapi.GlobalRootCAs, restapi.GlobalPublicCerts, restapi.GlobalTLSCertsManager = globalRootCAs, globalPublicCerts, globalTLSCerts
consolePort, _ := strconv.Atoi(globalMinioConsolePort)
server.Host = globalMinioConsoleHost
server.Port = consolePort
restapi.Port = globalMinioConsolePort
restapi.Hostname = globalMinioConsoleHost
if globalIsTLS {
// If TLS certificates are provided enforce the HTTPS.
server.EnabledListeners = []string{"https"}
server.TLSPort = consolePort
// Need to store tls-port, tls-host un config variables so secure.middleware can read from there
restapi.TLSPort = globalMinioConsolePort
restapi.Hostname = globalMinioConsoleHost
}
// subnet license refresh process
go func() {
// start refreshing subnet license after 5 seconds..
time.Sleep(time.Second * 5)
failedAttempts := 0
for {
if err := restapi.RefreshLicense(); err != nil {
failedAttempts++
// end license refresh after 3 consecutive failed attempts
if failedAttempts >= 3 {
return
}
// wait 5 minutes and retry again
time.Sleep(time.Minute * 5)
continue
}
// if license refreshed successfully reset the counter
failedAttempts = 0
// try to refresh license every 24 hrs
time.Sleep(time.Hour * 24)
}
}()
return server, nil
}
func verifyObjectLayerFeatures(name string, objAPI ObjectLayer) {
if (GlobalKMS != nil) && !objAPI.IsEncryptionSupported() {
logger.Fatal(errInvalidArgument,
"Encryption support is requested but '%s' does not support encryption", name)
}
if strings.HasPrefix(name, "gateway") {
if GlobalGatewaySSE.IsSet() && GlobalKMS == nil {
uiErr := config.ErrInvalidGWSSEEnvValue(nil).Msg("MINIO_GATEWAY_SSE set but KMS is not configured")
logger.Fatal(uiErr, "Unable to start gateway with SSE")
}
}
globalCompressConfigMu.Lock()
if globalCompressConfig.Enabled && !objAPI.IsCompressionSupported() {
logger.Fatal(errInvalidArgument,
"Compression support is requested but '%s' does not support compression", name)
}
globalCompressConfigMu.Unlock()
}
// Check for updates and print a notification message
func checkUpdate(mode string) {
updateURL := minioReleaseInfoURL
if runtime.GOOS == globalWindowsOSName {
updateURL = minioReleaseWindowsInfoURL
}
u, err := url.Parse(updateURL)
if err != nil {
return
}
// Its OK to ignore any errors during doUpdate() here.
crTime, err := GetCurrentReleaseTime()
if err != nil {
return
}
_, lrTime, err := getLatestReleaseTime(u, 2*time.Second, mode)
if err != nil {
return
}
var older time.Duration
var downloadURL string
if lrTime.After(crTime) {
older = lrTime.Sub(crTime)
downloadURL = getDownloadURL(releaseTimeToReleaseTag(lrTime))
}
updateMsg := prepareUpdateMessage(downloadURL, older)
if updateMsg == "" {
return
}
logStartupMessage(prepareUpdateMessage("Run `mc admin update`", lrTime.Sub(crTime)))
}
func newConfigDirFromCtx(ctx *cli.Context, option string, getDefaultDir func() string) (*ConfigDir, bool) {
var dir string
var dirSet bool
switch {
case ctx.IsSet(option):
dir = ctx.String(option)
dirSet = true
case ctx.GlobalIsSet(option):
dir = ctx.GlobalString(option)
dirSet = true
// cli package does not expose parent's option option. Below code is workaround.
if dir == "" || dir == getDefaultDir() {
dirSet = false // Unset to false since GlobalIsSet() true is a false positive.
if ctx.Parent().GlobalIsSet(option) {
dir = ctx.Parent().GlobalString(option)
dirSet = true
}
}
default:
// Neither local nor global option is provided. In this case, try to use
// default directory.
dir = getDefaultDir()
if dir == "" {
logger.FatalIf(errInvalidArgument, "%s option must be provided", option)
}
}
if dir == "" {
logger.FatalIf(errors.New("empty directory"), "%s directory cannot be empty", option)
}
// Disallow relative paths, figure out absolute paths.
dirAbs, err := filepath.Abs(dir)
logger.FatalIf(err, "Unable to fetch absolute path for %s=%s", option, dir)
logger.FatalIf(mkdirAllIgnorePerm(dirAbs), "Unable to create directory specified %s=%s", option, dir)
return &ConfigDir{path: dirAbs}, dirSet
}
func handleCommonCmdArgs(ctx *cli.Context) {
// Get "json" flag from command line argument and
// enable json and quite modes if json flag is turned on.
globalCLIContext.JSON = ctx.IsSet("json") || ctx.GlobalIsSet("json")
if globalCLIContext.JSON {
logger.EnableJSON()
}
// Get quiet flag from command line argument.
globalCLIContext.Quiet = ctx.IsSet("quiet") || ctx.GlobalIsSet("quiet")
if globalCLIContext.Quiet {
logger.EnableQuiet()
}
// Get anonymous flag from command line argument.
globalCLIContext.Anonymous = ctx.IsSet("anonymous") || ctx.GlobalIsSet("anonymous")
if globalCLIContext.Anonymous {
logger.EnableAnonymous()
}
// Fetch address option
addr := ctx.GlobalString("address")
if addr == "" || addr == ":"+GlobalMinioDefaultPort {
addr = ctx.String("address")
}
// Fetch console address option
consoleAddr := ctx.GlobalString("console-address")
if consoleAddr == "" {
consoleAddr = ctx.String("console-address")
}
if consoleAddr == "" {
p, err := xnet.GetFreePort()
if err != nil {
logger.FatalIf(err, "Unable to get free port for console on the host")
}
globalMinioConsolePortAuto = true
consoleAddr = net.JoinHostPort("", p.String())
}
if _, _, err := net.SplitHostPort(consoleAddr); err != nil {
logger.FatalIf(err, "Unable to start listening on console port")
}
if consoleAddr == addr {
logger.FatalIf(errors.New("--console-address cannot be same as --address"), "Unable to start the server")
}
globalMinioHost, globalMinioPort = mustSplitHostPort(addr)
globalMinioConsoleHost, globalMinioConsolePort = mustSplitHostPort(consoleAddr)
if globalMinioPort == globalMinioConsolePort {
logger.FatalIf(errors.New("--console-address port cannot be same as --address port"), "Unable to start the server")
}
globalMinioAddr = addr
// Check "no-compat" flag from command line argument.
globalCLIContext.StrictS3Compat = true
if ctx.IsSet("no-compat") || ctx.GlobalIsSet("no-compat") {
globalCLIContext.StrictS3Compat = false
}
// Set all config, certs and CAs directories.
var configSet, certsSet bool
globalConfigDir, configSet = newConfigDirFromCtx(ctx, "config-dir", defaultConfigDir.Get)
globalCertsDir, certsSet = newConfigDirFromCtx(ctx, "certs-dir", defaultCertsDir.Get)
// Remove this code when we deprecate and remove config-dir.
// This code is to make sure we inherit from the config-dir
// option if certs-dir is not provided.
if !certsSet && configSet {
globalCertsDir = &ConfigDir{path: filepath.Join(globalConfigDir.Get(), certsDir)}
}
globalCertsCADir = &ConfigDir{path: filepath.Join(globalCertsDir.Get(), certsCADir)}
logger.FatalIf(mkdirAllIgnorePerm(globalCertsCADir.Get()), "Unable to create certs CA directory at %s", globalCertsCADir.Get())
}
func handleCommonEnvVars() {
var err error
globalBrowserEnabled, err = config.ParseBool(env.Get(config.EnvBrowser, config.EnableOn))
if err != nil {
logger.Fatal(config.ErrInvalidBrowserValue(err), "Invalid MINIO_BROWSER value in environment variable")
}
globalBrowserRedirect, err = config.ParseBool(env.Get(config.EnvBrowserRedirect, config.EnableOn))
if err != nil {
logger.Fatal(config.ErrInvalidBrowserValue(err), "Invalid MINIO_BROWSER_REDIRECT value in environment variable")
}
globalFSOSync, err = config.ParseBool(env.Get(config.EnvFSOSync, config.EnableOff))
if err != nil {
logger.Fatal(config.ErrInvalidFSOSyncValue(err), "Invalid MINIO_FS_OSYNC value in environment variable")
}
domains := env.Get(config.EnvDomain, "")
if len(domains) != 0 {
for _, domainName := range strings.Split(domains, config.ValueSeparator) {
if _, ok := dns2.IsDomainName(domainName); !ok {
logger.Fatal(config.ErrInvalidDomainValue(nil).Msg("Unknown value `%s`", domainName),
"Invalid MINIO_DOMAIN value in environment variable")
}
globalDomainNames = append(globalDomainNames, domainName)
}
sort.Strings(globalDomainNames)
lcpSuf := lcpSuffix(globalDomainNames)
for _, domainName := range globalDomainNames {
if domainName == lcpSuf && len(globalDomainNames) > 1 {
logger.Fatal(config.ErrOverlappingDomainValue(nil).Msg("Overlapping domains `%s` not allowed", globalDomainNames),
"Invalid MINIO_DOMAIN value in environment variable")
}
}
}
publicIPs := env.Get(config.EnvPublicIPs, "")
if len(publicIPs) != 0 {
minioEndpoints := strings.Split(publicIPs, config.ValueSeparator)
var domainIPs = set.NewStringSet()
for _, endpoint := range minioEndpoints {
if net.ParseIP(endpoint) == nil {
// Checking if the IP is a DNS entry.
addrs, err := net.LookupHost(endpoint)
if err != nil {
logger.FatalIf(err, "Unable to initialize MinIO server with [%s] invalid entry found in MINIO_PUBLIC_IPS", endpoint)
}
for _, addr := range addrs {
domainIPs.Add(addr)
}
}
domainIPs.Add(endpoint)
}
updateDomainIPs(domainIPs)
} else {
// Add found interfaces IP address to global domain IPS,
// loopback addresses will be naturally dropped.
domainIPs := mustGetLocalIP4()
for _, host := range globalEndpoints.Hostnames() {
domainIPs.Add(host)
}
updateDomainIPs(domainIPs)
}
// In place update is true by default if the MINIO_UPDATE is not set
// or is not set to 'off', if MINIO_UPDATE is set to 'off' then
// in-place update is off.
globalInplaceUpdateDisabled = strings.EqualFold(env.Get(config.EnvUpdate, config.EnableOn), config.EnableOff)
// Check if the supported credential env vars, "MINIO_ROOT_USER" and
// "MINIO_ROOT_PASSWORD" are provided
// Warn user if deprecated environment variables,
// "MINIO_ACCESS_KEY" and "MINIO_SECRET_KEY", are defined
// Check all error conditions first
if !env.IsSet(config.EnvRootUser) && env.IsSet(config.EnvRootPassword) {
logger.Fatal(config.ErrMissingEnvCredentialRootUser(nil), "Unable to start MinIO")
} else if env.IsSet(config.EnvRootUser) && !env.IsSet(config.EnvRootPassword) {
logger.Fatal(config.ErrMissingEnvCredentialRootPassword(nil), "Unable to start MinIO")
} else if !env.IsSet(config.EnvRootUser) && !env.IsSet(config.EnvRootPassword) {
if !env.IsSet(config.EnvAccessKey) && env.IsSet(config.EnvSecretKey) {
logger.Fatal(config.ErrMissingEnvCredentialAccessKey(nil), "Unable to start MinIO")
} else if env.IsSet(config.EnvAccessKey) && !env.IsSet(config.EnvSecretKey) {
logger.Fatal(config.ErrMissingEnvCredentialSecretKey(nil), "Unable to start MinIO")
}
}
// At this point, either both environment variables
// are defined or both are not defined.
// Check both cases and authenticate them if correctly defined
var user, password string
haveRootCredentials := false
haveAccessCredentials := false
if env.IsSet(config.EnvRootUser) && env.IsSet(config.EnvRootPassword) {
user = env.Get(config.EnvRootUser, "")
password = env.Get(config.EnvRootPassword, "")
haveRootCredentials = true
} else if env.IsSet(config.EnvAccessKey) && env.IsSet(config.EnvSecretKey) {
user = env.Get(config.EnvAccessKey, "")
password = env.Get(config.EnvSecretKey, "")
haveAccessCredentials = true
}
if haveRootCredentials || haveAccessCredentials {
cred, err := auth.CreateCredentials(user, password)
if err != nil {
logger.Fatal(config.ErrInvalidCredentials(err),
"Unable to validate credentials inherited from the shell environment")
}
if haveAccessCredentials {
msg := fmt.Sprintf("WARNING: %s and %s are deprecated.\n"+
" Please use %s and %s",
config.EnvAccessKey, config.EnvSecretKey,
config.EnvRootUser, config.EnvRootPassword)
logStartupMessage(color.RedBold(msg))
}
globalActiveCred = cred
}
switch {
case env.IsSet(config.EnvKMSSecretKey) && env.IsSet(config.EnvKESEndpoint):
logger.Fatal(errors.New("ambigious KMS configuration"), fmt.Sprintf("The environment contains %q as well as %q", config.EnvKMSSecretKey, config.EnvKESEndpoint))
}
if env.IsSet(config.EnvKMSSecretKey) {
GlobalKMS, err = kms.Parse(env.Get(config.EnvKMSSecretKey, ""))
if err != nil {
logger.Fatal(err, "Unable to parse the KMS secret key inherited from the shell environment")
}
}
if env.IsSet(config.EnvKESEndpoint) {
var endpoints []string
for _, endpoint := range strings.Split(env.Get(config.EnvKESEndpoint, ""), ",") {
if strings.TrimSpace(endpoint) == "" {
continue
}
if !ellipses.HasEllipses(endpoint) {
endpoints = append(endpoints, endpoint)
continue
}
patterns, err := ellipses.FindEllipsesPatterns(endpoint)
if err != nil {
logger.Fatal(err, fmt.Sprintf("Invalid KES endpoint %q", endpoint))
}
for _, lbls := range patterns.Expand() {
endpoints = append(endpoints, strings.Join(lbls, ""))
}
}
certificate, err := tls.LoadX509KeyPair(env.Get(config.EnvKESClientCert, ""), env.Get(config.EnvKESClientKey, ""))
if err != nil {
logger.Fatal(err, "Unable to load KES client certificate as specified by the shell environment")
}
rootCAs, err := certs.GetRootCAs(env.Get(config.EnvKESServerCA, globalCertsCADir.Get()))
if err != nil {
logger.Fatal(err, fmt.Sprintf("Unable to load X.509 root CAs for KES from %q", env.Get(config.EnvKESServerCA, globalCertsCADir.Get())))
}
var defaultKeyID = env.Get(config.EnvKESKeyName, "")
KMS, err := kms.NewWithConfig(kms.Config{
Endpoints: endpoints,
DefaultKeyID: defaultKeyID,
Certificate: certificate,
RootCAs: rootCAs,
})
if err != nil {
logger.Fatal(err, "Unable to initialize a connection to KES as specified by the shell environment")
}
// We check that the default key ID exists or try to create it otherwise.
// This implicitly checks that we can communicate to KES. We don't treat
// a policy error as failure condition since MinIO may not have the permission
// to create keys - just to generate/decrypt data encryption keys.
if err = KMS.CreateKey(defaultKeyID); err != nil && !errors.Is(err, kes.ErrKeyExists) && !errors.Is(err, kes.ErrNotAllowed) {
logger.Fatal(err, "Unable to initialize a connection to KES as specified by the shell environment")
}
GlobalKMS = KMS
}
if tiers := env.Get("_MINIO_DEBUG_REMOTE_TIERS_IMMEDIATELY", ""); tiers != "" {
globalDebugRemoteTiersImmediately = strings.Split(tiers, ",")
}
}
func logStartupMessage(msg string) {
if globalConsoleSys != nil {
globalConsoleSys.Send(msg, string(logger.All))
}
logger.StartupMessage(msg)
}
func getTLSConfig() (x509Certs []*x509.Certificate, manager *certs.Manager, secureConn bool, err error) {
if !(isFile(getPublicCertFile()) && isFile(getPrivateKeyFile())) {
return nil, nil, false, nil
}
if x509Certs, err = config.ParsePublicCertFile(getPublicCertFile()); err != nil {
return nil, nil, false, err
}
manager, err = certs.NewManager(GlobalContext, getPublicCertFile(), getPrivateKeyFile(), config.LoadX509KeyPair)
if err != nil {
return nil, nil, false, err
}
// MinIO has support for multiple certificates. It expects the following structure:
// certs/
// │
// ├─ public.crt
// ├─ private.key
// │
// ├─ example.com/
// │ │
// │ ├─ public.crt
// │ └─ private.key
// └─ foobar.org/
// │
// ├─ public.crt
// └─ private.key
// ...
//
// Therefore, we read all filenames in the cert directory and check
// for each directory whether it contains a public.crt and private.key.
// If so, we try to add it to certificate manager.
root, err := os.Open(globalCertsDir.Get())
if err != nil {
return nil, nil, false, err
}
defer root.Close()
files, err := root.Readdir(-1)
if err != nil {
return nil, nil, false, err
}
for _, file := range files {
// Ignore all
// - regular files
// - "CAs" directory
// - any directory which starts with ".."
if file.Mode().IsRegular() || file.Name() == "CAs" || strings.HasPrefix(file.Name(), "..") {
continue
}
if file.Mode()&os.ModeSymlink == os.ModeSymlink {
file, err = os.Stat(filepath.Join(root.Name(), file.Name()))
if err != nil {
// not accessible ignore
continue
}
if !file.IsDir() {
continue
}
}
var (
certFile = filepath.Join(root.Name(), file.Name(), publicCertFile)
keyFile = filepath.Join(root.Name(), file.Name(), privateKeyFile)
)
if !isFile(certFile) || !isFile(keyFile) {
continue
}
if err = manager.AddCertificate(certFile, keyFile); err != nil {
err = fmt.Errorf("Unable to load TLS certificate '%s,%s': %w", certFile, keyFile, err)
logger.LogIf(GlobalContext, err, logger.Minio)
}
}
secureConn = true
return x509Certs, manager, secureConn, nil
}
// contextCanceled returns whether a context is canceled.
func contextCanceled(ctx context.Context) bool {
select {
case <-ctx.Done():
return true
default:
return false
}
}
| [
"\"MINIO_LOG_QUERY_URL\"",
"\"MINIO_LOG_QUERY_AUTH_TOKEN\"",
"\"MINIO_PROMETHEUS_URL\"",
"\"MINIO_CERT_PASSWD\""
]
| []
| [
"MINIO_LOG_QUERY_URL",
"MINIO_CERT_PASSWD",
"MINIO_LOG_QUERY_AUTH_TOKEN",
"MINIO_PROMETHEUS_URL"
]
| [] | ["MINIO_LOG_QUERY_URL", "MINIO_CERT_PASSWD", "MINIO_LOG_QUERY_AUTH_TOKEN", "MINIO_PROMETHEUS_URL"] | go | 4 | 0 | |
httpstream/httpstream.go | package httpstream
import (
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"strconv"
"code.google.com/p/go.net/websocket"
"github.com/gorilla/mux"
"github.com/gliderlabs/logspout/router"
)
func init() {
router.HttpHandlers.Register(LogStreamer, "logs")
}
func debug(v ...interface{}) {
if os.Getenv("DEBUG") != "" {
log.Println(v...)
}
}
func LogStreamer() http.Handler {
logs := mux.NewRouter()
logsHandler := func(w http.ResponseWriter, req *http.Request) {
params := mux.Vars(req)
route := new(router.Route)
if params["value"] != "" {
switch params["predicate"] {
case "id":
route.FilterID = params["value"]
if len(route.ID) > 12 {
route.FilterID = route.FilterID[:12]
}
case "name":
route.FilterName = params["value"]
}
}
if route.FilterID != "" && !router.Routes.RoutingFrom(route.FilterID) {
http.NotFound(w, req)
return
}
defer debug("http: logs streamer disconnected")
logstream := make(chan *router.Message)
defer close(logstream)
var closer <-chan bool
if req.Header.Get("Upgrade") == "websocket" {
debug("http: logs streamer connected [websocket]")
closerBi := make(chan bool)
defer websocketStreamer(w, req, logstream, closerBi)
closer = closerBi
} else {
debug("http: logs streamer connected [http]")
defer httpStreamer(w, req, logstream, route.MultiContainer())
closer = w.(http.CloseNotifier).CloseNotify()
}
route.OverrideCloser(closer)
router.Routes.Route(route, logstream)
}
logs.HandleFunc("/logs/{predicate:[a-zA-Z]+}:{value}", logsHandler).Methods("GET")
logs.HandleFunc("/logs", logsHandler).Methods("GET")
return logs
}
type Colorizer map[string]int
// returns up to 14 color escape codes (then repeats) for each unique key
func (c Colorizer) Get(key string) string {
i, exists := c[key]
if !exists {
c[key] = len(c)
i = c[key]
}
bright := "1;"
if i%14 > 6 {
bright = ""
}
return "\x1b[" + bright + "3" + strconv.Itoa(7-(i%7)) + "m"
}
func marshal(obj interface{}) []byte {
bytes, err := json.MarshalIndent(obj, "", " ")
if err != nil {
log.Println("marshal:", err)
}
return bytes
}
func normalName(name string) string {
return name[1:]
}
func websocketStreamer(w http.ResponseWriter, req *http.Request, logstream chan *router.Message, closer chan bool) {
websocket.Handler(func(conn *websocket.Conn) {
for logline := range logstream {
if req.URL.Query().Get("source") != "" && logline.Source != req.URL.Query().Get("source") {
continue
}
_, err := conn.Write(append(marshal(logline), '\n'))
if err != nil {
closer <- true
return
}
}
}).ServeHTTP(w, req)
}
func httpStreamer(w http.ResponseWriter, req *http.Request, logstream chan *router.Message, multi bool) {
var colors Colorizer
var usecolor, usejson bool
nameWidth := 16
if req.URL.Query().Get("colors") != "off" {
colors = make(Colorizer)
usecolor = true
}
if req.Header.Get("Accept") == "application/json" {
w.Header().Add("Content-Type", "application/json")
usejson = true
} else {
w.Header().Add("Content-Type", "text/plain")
}
for logline := range logstream {
if req.URL.Query().Get("sources") != "" && logline.Source != req.URL.Query().Get("sources") {
continue
}
if usejson {
w.Write(append(marshal(logline), '\n'))
} else {
if multi {
name := normalName(logline.Container.Name)
if len(name) > nameWidth {
nameWidth = len(name)
}
if usecolor {
w.Write([]byte(fmt.Sprintf(
"%s%"+strconv.Itoa(nameWidth)+"s|%s\x1b[0m\n",
colors.Get(name), name, logline.Data,
)))
} else {
w.Write([]byte(fmt.Sprintf(
"%"+strconv.Itoa(nameWidth)+"s|%s\n", name, logline.Data,
)))
}
} else {
w.Write(append([]byte(logline.Data), '\n'))
}
}
w.(http.Flusher).Flush()
}
}
| [
"\"DEBUG\""
]
| []
| [
"DEBUG"
]
| [] | ["DEBUG"] | go | 1 | 0 | |
ch06/evaluating/01_calculate_centroid.go | package main
import (
"fmt"
"log"
"os"
"path/filepath"
"github.com/go-gota/gota/dataframe"
)
var (
fileName = "iris.csv"
filePath = filepath.Join(os.Getenv("MLGO"), "storage", "data", fileName)
)
type centroid []float64
func main() {
f, err := os.Open(filePath)
if err != nil {
log.Fatal(err)
}
defer f.Close()
df := dataframe.ReadCSV(f)
speciesNames := []string{
"Iris-setosa",
"Iris-versicolor",
"Iris-virginica",
}
centroids := make(map[string]centroid)
for _, species := range speciesNames {
filter := dataframe.F{
Colname: "species",
Comparator: "==",
Comparando: species,
}
filtered := df.Filter(filter)
summary := filtered.Describe()
var c centroid
for _, feature := range df.Names() {
if feature == "column" || feature == "species" {
continue
}
c = append(c, summary.Col(feature).Float()[0])
}
centroids[species] = c
}
for _, species := range speciesNames {
fmt.Printf("%s centroid: %v\n", species, centroids[species])
}
}
| [
"\"MLGO\""
]
| []
| [
"MLGO"
]
| [] | ["MLGO"] | go | 1 | 0 | |
misc/cgo/test/issue7978.go | // Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Issue 7978. Stack tracing didn't work during cgo code after calling a Go
// callback. Make sure GC works and the stack trace is correct.
package cgotest
/*
#include <stdint.h>
void issue7978cb(void);
#if defined(__APPLE__) && defined(__arm__)
// on Darwin/ARM, libSystem doesn't provide implementation of the __sync_fetch_and_add
// primitive, and although gcc supports it, it doesn't inline its definition.
// Clang could inline its definition, so we require clang on Darwin/ARM.
#if defined(__clang__)
#define HAS_SYNC_FETCH_AND_ADD 1
#else
#define HAS_SYNC_FETCH_AND_ADD 0
#endif
#else
#define HAS_SYNC_FETCH_AND_ADD 1
#endif
// use ugly atomic variable sync since that doesn't require calling back into
// Go code or OS dependencies
static void issue7978c(uint32_t *sync) {
#if HAS_SYNC_FETCH_AND_ADD
while(__sync_fetch_and_add(sync, 0) != 0)
;
__sync_fetch_and_add(sync, 1);
while(__sync_fetch_and_add(sync, 0) != 2)
;
issue7978cb();
__sync_fetch_and_add(sync, 1);
while(__sync_fetch_and_add(sync, 0) != 6)
;
#endif
}
*/
import "C"
import (
"os"
"runtime"
"strings"
"sync/atomic"
"testing"
)
var issue7978sync uint32
func issue7978check(t *testing.T, wantFunc string, badFunc string, depth int) {
runtime.GC()
buf := make([]byte, 65536)
trace := string(buf[:runtime.Stack(buf, true)])
for _, goroutine := range strings.Split(trace, "\n\n") {
if strings.Contains(goroutine, "test.issue7978go") {
trace := strings.Split(goroutine, "\n")
// look for the expected function in the stack
for i := 0; i < depth; i++ {
if badFunc != "" && strings.Contains(trace[1+2*i], badFunc) {
t.Errorf("bad stack: found %s in the stack:\n%s", badFunc, goroutine)
return
}
if strings.Contains(trace[1+2*i], wantFunc) {
return
}
}
t.Errorf("bad stack: didn't find %s in the stack:\n%s", wantFunc, goroutine)
return
}
}
t.Errorf("bad stack: goroutine not found. Full stack dump:\n%s", trace)
}
func issue7978wait(store uint32, wait uint32) {
if store != 0 {
atomic.StoreUint32(&issue7978sync, store)
}
for atomic.LoadUint32(&issue7978sync) != wait {
runtime.Gosched()
}
}
//export issue7978cb
func issue7978cb() {
issue7978wait(3, 4)
}
func issue7978go() {
C.issue7978c((*C.uint32_t)(&issue7978sync))
issue7978wait(7, 8)
}
func test7978(t *testing.T) {
if runtime.Compiler == "gccgo" {
t.Skip("gccgo can not do stack traces of C code")
}
if C.HAS_SYNC_FETCH_AND_ADD == 0 {
t.Skip("clang required for __sync_fetch_and_add support on darwin/arm")
}
if os.Getenv("GOTRACEBACK") != "2" {
t.Fatalf("GOTRACEBACK must be 2")
}
issue7978sync = 0
go issue7978go()
// test in c code, before callback
issue7978wait(0, 1)
issue7978check(t, "_Cfunc_issue7978c(", "", 1)
// test in go code, during callback
issue7978wait(2, 3)
issue7978check(t, "test.issue7978cb(", "test.issue7978go", 3)
// test in c code, after callback
issue7978wait(4, 5)
issue7978check(t, "_Cfunc_issue7978c(", "_cgoexpwrap", 1)
// test in go code, after return from cgo
issue7978wait(6, 7)
issue7978check(t, "test.issue7978go(", "", 3)
atomic.StoreUint32(&issue7978sync, 8)
}
| [
"\"GOTRACEBACK\""
]
| []
| [
"GOTRACEBACK"
]
| [] | ["GOTRACEBACK"] | go | 1 | 0 | |
lucky/main.go | package main
import (
"errors"
"fmt"
"io"
"math/rand"
"net/http"
"os"
"strconv"
"time"
"github.com/gin-gonic/gin"
_ "github.com/go-sql-driver/mysql"
"github.com/jinzhu/gorm"
_ "github.com/mattn/go-sqlite3"
)
const (
// MyUser database user name
MyUser = "cloud"
// Password database user password
Password = "passwd"
// Host database host ip
Host = "192.168.1.61"
// MPort database host port
MPort = 3306
// DbName database db name
DbName = "mydb"
// SUCCESS return value
SUCCESS = "success"
// Port listen port
Port = "8080"
)
// Person model
type Person struct {
ID int64 `json:"id" gorm:"auto-increment"`
Name string `json:"name"`
Image string `json:"image"`
ThumbImage string `json:"thumb_image" gorm:"column:thumb_image"`
Lucky bool
Level int64
}
// PersonGroup all persons
type PersonGroup struct {
PersonList *[]Person
}
var personGroup PersonGroup
var (
errInitPerson = errors.New("初始化错误")
)
func getRandom(num int) int {
if num < 0 {
num = 1
}
return rand.Intn(num)
}
func (p *PersonGroup) setLuckyNumber(luckySize, luckylevel int64) ([]Person, error) {
rand.Seed(time.Now().UnixNano())
var ps []Person
personArr := p.PersonList
if personArr == nil {
return nil, errInitPerson
}
size := len(*personArr)
var i int64
for i < luckySize {
r1 := getRandom(size)
for (*personArr)[r1].Lucky {
r1 = getRandom(size)
}
(*personArr)[r1].Lucky = true
(*personArr)[r1].Level = luckylevel
i++
ps = append(ps, (*personArr)[r1])
}
return ps, nil
}
func (p *PersonGroup) getAvailableNumber() int {
var sum int
pl := p.PersonList
for _, p := range *pl {
if !p.Lucky {
sum++
}
}
return sum
}
func mysqlConnectString(driver, host string) (db *gorm.DB, err error) {
connArgs := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?charset=utf8", MyUser, Password, host, MPort, DbName)
db, err = gorm.Open(driver, connArgs)
return
}
func sqliteConnectString(driver, host string) (db *gorm.DB, err error) {
db, err = gorm.Open(driver, host)
return
}
func dbConn(host string) (db *gorm.DB) {
db, err := mysqlConnectString("mysql", host)
//db, err := sqliteConnectString("sqlite3", "gorm.db")
if err != nil {
panic(err.Error())
}
db.Table("t_person").AutoMigrate(&Person{})
return db
}
// GetTotalNum get lucky number by random
func GetTotalNum(c *gin.Context) {
count := personGroup.getAvailableNumber()
if count < 0 {
c.AbortWithStatus(http.StatusInternalServerError)
} else {
c.JSON(http.StatusOK, count)
}
}
// GetAll get all persons
func GetAll(c *gin.Context) {
if personGroup.PersonList != nil {
c.JSON(http.StatusOK, personGroup.PersonList)
} else {
c.AbortWithStatus(http.StatusInternalServerError)
}
}
// GetAllPersonFromDB 获取所有人员信息
func GetAllPersonFromDB(db *gorm.DB) ([]Person, error) {
var personArr []Person
if err := db.Table("t_person").Find(&personArr).Error; err != nil {
return nil, err
}
return personArr, nil
}
// GetLuckyNum 获取remain人数信息
func GetLuckyNum(c *gin.Context) {
luckySize := c.DefaultQuery("luckyNum", "0")
luckyPrize := c.DefaultQuery("luckyPrize", "0")
size, err := strconv.ParseInt(luckySize, 10, 64)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
level, err := strconv.ParseInt(luckyPrize, 10, 64)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
t, err := personGroup.setLuckyNumber(size, level)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, t)
}
func main() {
var host string
if len(os.Args) > 1 {
host = os.Args[1]
} else {
host = Host
}
db := dbConn(host)
defer db.Close()
gin.SetMode(gin.ReleaseMode)
f, _ := os.Create("gin.log")
gin.DefaultWriter = io.MultiWriter(f)
router := gin.Default()
//setResource(router)
router.LoadHTMLGlob("view/*")
router.Static("/vender/static", "./static")
router.GET("/", func(c *gin.Context) {
personList, _ := GetAllPersonFromDB(db)
personGroup = PersonGroup{&personList}
c.HTML(http.StatusOK,
"Index", gin.H{"count": len(personList)})
})
v1 := router.Group("/lucky")
{
v1.GET("/all", GetAll)
v1.GET("/totalNum", GetTotalNum)
v1.GET("/luckyNum", GetLuckyNum)
}
port := Port
if len(os.Getenv("PORT")) > 0 {
port = os.Getenv("PORT")
}
fmt.Println("Listening and serving HTTP on" + ":" + port)
router.Run(":" + port)
}
| [
"\"PORT\"",
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
pkg/util/ecs/detection.go | // Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-2020 Datadog, Inc.
// +build docker
package ecs
import (
"os"
"time"
"github.com/DataDog/datadog-agent/pkg/config"
"github.com/DataDog/datadog-agent/pkg/util/cache"
"github.com/DataDog/datadog-agent/pkg/util/ecs/common"
ecsmeta "github.com/DataDog/datadog-agent/pkg/util/ecs/metadata"
"github.com/DataDog/datadog-agent/pkg/util/log"
)
const (
isFargateInstanceCacheKey = "IsFargateInstanceCacheKey"
hasFargateResourceTagsCacheKey = "HasFargateResourceTagsCacheKey"
hasEC2ResourceTagsCacheKey = "HasEC2ResourceTagsCacheKey"
)
// IsECSInstance returns whether the agent is running in ECS.
func IsECSInstance() bool {
if !config.IsCloudProviderEnabled(common.CloudProviderName) {
return false
}
_, err := ecsmeta.V1()
return err == nil
}
// IsFargateInstance returns whether the agent is in an ECS fargate task.
// It detects it by getting and unmarshalling the metadata API response.
// This function identifies Fargate on ECS only. Make sure to use the Fargate pkg
// to identify Fargate instances in other orchestrators (e.g EKS Fargate)
func IsFargateInstance() bool {
if !config.IsCloudProviderEnabled(common.CloudProviderName) {
return false
}
return queryCacheBool(isFargateInstanceCacheKey, func() (bool, time.Duration) {
// This envvar is set to AWS_ECS_EC2 on classic EC2 instances
// Versions 1.0.0 to 1.3.0 (latest at the time) of the Fargate
// platform set this envvar.
// If Fargate detection were to fail, running a container with
// `env` as cmd will allow to check if it is still present.
if os.Getenv("AWS_EXECUTION_ENV") != "AWS_ECS_FARGATE" {
return newBoolEntry(false)
}
client, err := ecsmeta.V2()
if err != nil {
log.Debugf("error while initializing ECS metadata V2 client: %s", err)
return newBoolEntry(false)
}
_, err = client.GetTask()
if err != nil {
log.Debug(err)
return newBoolEntry(false)
}
return newBoolEntry(true)
})
}
// IsRunningOn returns true if the agent is running on ECS/Fargate
func IsRunningOn() bool {
return IsECSInstance() || IsFargateInstance()
}
// HasEC2ResourceTags returns whether the metadata endpoint in ECS exposes
// resource tags.
func HasEC2ResourceTags() bool {
if !config.IsCloudProviderEnabled(common.CloudProviderName) {
return false
}
return queryCacheBool(hasEC2ResourceTagsCacheKey, func() (bool, time.Duration) {
client, err := ecsmeta.V3FromCurrentTask()
if err != nil {
return newBoolEntry(false)
}
_, err = client.GetTaskWithTags()
return newBoolEntry(err == nil)
})
}
// HasFargateResourceTags returns whether the metadata endpoint in Fargate
// exposes resource tags.
func HasFargateResourceTags() bool {
return queryCacheBool(hasFargateResourceTagsCacheKey, func() (bool, time.Duration) {
client, err := ecsmeta.V2()
if err != nil {
log.Debugf("error while initializing ECS metadata V2 client: %s", err)
return newBoolEntry(false)
}
_, err = client.GetTaskWithTags()
return newBoolEntry(err == nil)
})
}
// GetNTPHosts returns the NTP hosts for ECS/Fargate if it is detected as the cloud provider, otherwise an empty array.
// Docs: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/set-time.html#configure_ntp
func GetNTPHosts() []string {
if IsRunningOn() {
return []string{"169.254.169.123"}
}
return nil
}
func queryCacheBool(cacheKey string, cacheMissEvalFunc func() (bool, time.Duration)) bool {
if !config.IsCloudProviderEnabled(common.CloudProviderName) {
return false
}
if cachedValue, found := cache.Cache.Get(cacheKey); found {
if v, ok := cachedValue.(bool); ok {
return v
}
log.Errorf("Invalid cache format for key %q: forcing a cache miss", cacheKey)
}
newValue, ttl := cacheMissEvalFunc()
cache.Cache.Set(cacheKey, newValue, ttl)
return newValue
}
func newBoolEntry(v bool) (bool, time.Duration) {
if v == true {
return v, 5 * time.Minute
}
return v, cache.NoExpiration
}
| [
"\"AWS_EXECUTION_ENV\""
]
| []
| [
"AWS_EXECUTION_ENV"
]
| [] | ["AWS_EXECUTION_ENV"] | go | 1 | 0 | |
cluster/addons/fluentd-elasticsearch/es-image/elasticsearch_logging_discovery.go | /*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"fmt"
"os"
"strings"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/restclient"
)
func flattenSubsets(subsets []api.EndpointSubset) []string {
ips := []string{}
for _, ss := range subsets {
for _, addr := range ss.Addresses {
ips = append(ips, fmt.Sprintf(`"%s"`, addr.IP))
}
}
return ips
}
func main() {
flag.Parse()
glog.Info("Kubernetes Elasticsearch logging discovery")
cc, err := restclient.InClusterConfig()
if err != nil {
glog.Fatalf("Failed to make client: %v", err)
}
client, err := clientset.NewForConfig(cc)
if err != nil {
glog.Fatalf("Failed to make client: %v", err)
}
namespace := api.NamespaceSystem
envNamespace := os.Getenv("NAMESPACE")
if envNamespace != "" {
if _, err := client.Core().Namespaces().Get(envNamespace); err != nil {
glog.Fatalf("%s namespace doesn't exist: %v", envNamespace, err)
}
namespace = envNamespace
}
var elasticsearch *api.Service
// Look for endpoints associated with the Elasticsearch loggging service.
// First wait for the service to become available.
for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) {
elasticsearch, err = client.Core().Services(namespace).Get("elasticsearch-logging")
if err == nil {
break
}
}
// If we did not find an elasticsearch logging service then log a warning
// and return without adding any unicast hosts.
if elasticsearch == nil {
glog.Warningf("Failed to find the elasticsearch-logging service: %v", err)
return
}
var endpoints *api.Endpoints
addrs := []string{}
// Wait for some endpoints.
count := 0
for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) {
endpoints, err = client.Core().Endpoints(namespace).Get("elasticsearch-logging")
if err != nil {
continue
}
addrs = flattenSubsets(endpoints.Subsets)
glog.Infof("Found %s", addrs)
if len(addrs) > 0 && len(addrs) == count {
break
}
count = len(addrs)
}
// If there was an error finding endpoints then log a warning and quit.
if err != nil {
glog.Warningf("Error finding endpoints: %v", err)
return
}
glog.Infof("Endpoints = %s", addrs)
fmt.Printf("discovery.zen.ping.unicast.hosts: [%s]\n", strings.Join(addrs, ", "))
}
| [
"\"NAMESPACE\""
]
| []
| [
"NAMESPACE"
]
| [] | ["NAMESPACE"] | go | 1 | 0 | |
debug/log/kubernetes/kubernetes_test.go | package kubernetes
import (
"bytes"
"encoding/json"
"io"
"os"
"testing"
"time"
"github.com/go-iot-platform/go-micro/debug/log"
"github.com/stretchr/testify/assert"
)
func TestKubernetes(t *testing.T) {
// TODO: fix local test running
return
if os.Getenv("IN_TRAVIS_CI") == "yes" {
t.Skip("In Travis CI")
}
k := NewLog(log.Name("micro-network"))
r, w, err := os.Pipe()
if err != nil {
t.Fatal(err)
}
s := os.Stderr
os.Stderr = w
meta := make(map[string]string)
write := log.Record{
Timestamp: time.Unix(0, 0).UTC(),
Message: "Test log entry",
Metadata: meta,
}
meta["foo"] = "bar"
k.Write(write)
b := &bytes.Buffer{}
w.Close()
io.Copy(b, r)
os.Stderr = s
var read log.Record
if err := json.Unmarshal(b.Bytes(), &read); err != nil {
t.Fatalf("json.Unmarshal failed: %s", err.Error())
}
assert.Equal(t, write, read, "Write was not equal")
records, err := k.Read()
assert.Nil(t, err, "Read should not error")
assert.NotNil(t, records, "Read should return records")
stream, err := k.Stream()
if err != nil {
t.Fatal(err)
}
records = nil
go stream.Stop()
for s := range stream.Chan() {
records = append(records, s)
}
assert.Equal(t, 0, len(records), "Stream should return nothing")
}
| [
"\"IN_TRAVIS_CI\""
]
| []
| [
"IN_TRAVIS_CI"
]
| [] | ["IN_TRAVIS_CI"] | go | 1 | 0 | |
geekcomputers/spotlight.py | """ Script To Copy Spotlight(Lockscreen) Images from Windows """
import os
import shutil
import errno
import hashlib
from PIL import Image
def md5(fname):
""" Function to return the MD5 Digest of a file """
hash_md5 = hashlib.md5()
with open(fname, "rb") as file_var:
for chunk in iter(lambda: file_var.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def make_folder(folder_name):
"""Function to make the required folers"""
try:
os.makedirs(folder_name)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(folder_name):
pass
else:
print "Error! Could not create a folder"
raise
def get_spotlight_wallpapers(target_folder):
"""Fetches wallpapers from source folder inside AppData to the
newly created folders in C:\\Users\\['user.name']\\Pictures"""
#PATHS REQUIRED TO FETCH AND STORE WALLPAPERS
#Creating necessary folders
source_folder = os.environ['HOME']+"\\AppData\\Local\\Packages\\"
source_folder += "Microsoft.Windows.ContentDeliveryManager_cw5n1h2txyewy"
source_folder += "\\LocalState\\Assets"
spotlight_path_mobile = target_folder+"\\Mobile"
spotlight_path_desktop = target_folder+"\\Desktop"
make_folder(spotlight_path_mobile)
make_folder(spotlight_path_desktop)
#Fetching files from the source dir
for filename in os.listdir(source_folder):
filename = source_folder+"\\"+filename
#if size of file is less than 100 KB, ignore the file
if os.stat(filename).st_size > 100000:
#Check resolution and classify based upon the resolution of the images
#name the file equal to the MD5 of the file, so that no duplicate files are to be copied
img_file = Image.open(filename)
if img_file.size[0] >= 1080:
if img_file.size[0] > img_file.size[1]:
temp_path = spotlight_path_desktop+"\\"+md5(filename)
else:
temp_path = spotlight_path_mobile+"\\"+md5(filename)
#If file doesn't exist, copy the file to the new folders
if not os.path.exists(temp_path+".png"):
shutil.copy(filename, temp_path+".png")
if __name__ == '__main__':
PATH = raw_input("Enter directory path:")
get_spotlight_wallpapers(PATH)
print "Lockscreen images have been copied to \""+PATH+"\""
| []
| []
| [
"HOME"
]
| [] | ["HOME"] | python | 1 | 0 | |
vmwaredriver/vmrun_linux.go | // +build linux
/*
* Copyright 2014 VMware, Inc. All rights reserved. Licensed under the Apache v2 License.
*/
/*
* Copyright 2016 Chris Baumbauer <[email protected]>
*/
package vmwaredriver
import (
"bytes"
"errors"
"fmt"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"syscall"
"github.com/docker/machine/libmachine/log"
)
var (
vmrunbin = setVmwareCmd("vmrun")
vdiskmanbin = setVmwareCmd("vmware-vdiskmanager")
)
var (
ErrMachineExist = errors.New("machine already exists")
ErrMachineNotExist = errors.New("machine does not exist")
ErrVMRUNNotFound = errors.New("VMRUN not found")
)
// detect the vmrun and vmware-vdiskmanager cmds' path if needed
func setVmwareCmd(cmd string) string {
var vmwareBase string
switch runtime.GOOS {
case "darwin":
vmwareBase = "/Applications/VMware Fusion.app/Contents/Library/"
case "linux":
vmwareBase = "/usr/bin/"
}
if path, err := exec.LookPath(cmd); err == nil {
return path
}
return filepath.Join(vmwareBase, cmd)
}
func vmrun(args ...string) (string, string, error) {
// vmrun with nogui on VMware Fusion through at least 8.0.1 doesn't work right
// if the umask is set to not allow world-readable permissions
_ = syscall.Umask(022)
cmd := exec.Command(vmrunbin, args...)
if os.Getenv("MACHINE_DEBUG") != "" {
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
}
var stdout bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout, cmd.Stderr = &stdout, &stderr
log.Debugf("executing: %v %v", vmrunbin, strings.Join(args, " "))
err := cmd.Run()
if err != nil {
if ee, ok := err.(*exec.Error); ok && ee == exec.ErrNotFound {
err = ErrVMRUNNotFound
}
}
return stdout.String(), stderr.String(), err
}
// Make a vmdk disk image with the given size (in MB).
func vdiskmanager(dest string, size int) error {
cmd := exec.Command(vdiskmanbin, "-c", "-t", "0", "-s", fmt.Sprintf("%dMB", size), "-a", "lsilogic", dest)
if os.Getenv("MACHINE_DEBUG") != "" {
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
}
if stdout := cmd.Run(); stdout != nil {
if ee, ok := stdout.(*exec.Error); ok && ee == exec.ErrNotFound {
return ErrVMRUNNotFound
}
}
return nil
}
| [
"\"MACHINE_DEBUG\"",
"\"MACHINE_DEBUG\""
]
| []
| [
"MACHINE_DEBUG"
]
| [] | ["MACHINE_DEBUG"] | go | 1 | 0 | |
python/mxnet/model.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=fixme, invalid-name, too-many-arguments, too-many-locals, too-many-lines
# pylint: disable=too-many-branches, too-many-statements
"""MXNet model module"""
from __future__ import absolute_import, print_function
import os
import time
import logging
import warnings
from collections import namedtuple
import numpy as np
from . import io
from . import ndarray as nd
from . import symbol as sym
from . import optimizer as opt
from . import metric
from . import kvstore as kvs
from .context import Context, cpu
from .initializer import Uniform
from .optimizer import get_updater
from .executor_manager import DataParallelExecutorManager, _check_arguments, _load_data
from .io import DataDesc
from .base import mx_real_t
BASE_ESTIMATOR = object
try:
from sklearn.base import BaseEstimator
BASE_ESTIMATOR = BaseEstimator
except ImportError:
SKLEARN_INSTALLED = False
# Parameter to pass to batch_end_callback
BatchEndParam = namedtuple('BatchEndParams',
['epoch',
'nbatch',
'eval_metric',
'locals'])
def _create_sparse_kvstore(kvstore):
"""Create kvstore assuming some parameters' storage types are row_sparse.
Parameters
----------
kvstore : KVStore or str
The kvstore.
Returns
-------
kvstore : KVStore
update_on_kvstore : bool. Always True.
"""
# always update on kvstore
update_on_kvstore = True
if isinstance(kvstore, kvs.KVStore):
kv = kvstore
elif isinstance(kvstore, str):
kv = kvs.create(kvstore)
else:
raise TypeError("Cannot create '%s' KVStore with row_sparse parameters. "
"The type must be KVStore or str." % kvstore)
return (kv, update_on_kvstore)
def _create_kvstore(kvstore, num_device, arg_params):
"""Create kvstore
This function select and create a proper kvstore if given the kvstore type.
Parameters
----------
kvstore : KVStore or str
The kvstore.
num_device : int
The number of devices
arg_params : dict of str to `NDArray`.
Model parameter, dict of name to `NDArray` of net's weights.
"""
update_on_kvstore = bool(int(os.getenv('MXNET_UPDATE_ON_KVSTORE', "1")))
if kvstore is None:
kv = None
elif isinstance(kvstore, kvs.KVStore):
kv = kvstore
elif isinstance(kvstore, str):
# create kvstore using the string type
if num_device == 1 and 'dist' not in kvstore:
# no need to use kv for single device and single machine
kv = None
else:
kv = kvs.create(kvstore)
if kvstore == 'local':
# automatically select a proper local
max_size = max(np.prod(param.shape) for param in
arg_params.values())
if max_size > 1024 * 1024 * 16:
update_on_kvstore = False
else:
raise TypeError('kvstore must be KVStore, str or None')
if kv is None:
update_on_kvstore = False
return (kv, update_on_kvstore)
def _initialize_kvstore(kvstore, param_arrays, arg_params, param_names, update_on_kvstore):
"""Initialize kvstore"""
for idx, param_on_devs in enumerate(param_arrays):
name = param_names[idx]
kvstore.init(name, arg_params[name])
if update_on_kvstore:
kvstore.pull(name, param_on_devs, priority=-idx)
def _update_params_on_kvstore_nccl(param_arrays, grad_arrays, kvstore, param_names):
"""Perform update of param_arrays from grad_arrays on NCCL kvstore."""
valid_indices = [index for index, grad_list in
enumerate(grad_arrays) if grad_list[0] is not None]
valid_grad_arrays = [grad_arrays[i] for i in valid_indices]
valid_param_arrays = [param_arrays[i] for i in valid_indices]
valid_param_names = [param_names[i] for i in valid_indices]
size = len(valid_grad_arrays)
start = 0
# Use aggregation by default only with NCCL
default_batch = '16'
batch = int(os.getenv('MXNET_UPDATE_AGGREGATION_SIZE', default_batch))
while start < size:
end = start + batch if start + batch < size else size
# push gradient, priority is negative index
kvstore.push(valid_param_names[start:end], valid_grad_arrays[start:end], priority=-start)
# pull back the weights
kvstore.pull(valid_param_names[start:end], valid_param_arrays[start:end], priority=-start)
start = end
def _update_params_on_kvstore(param_arrays, grad_arrays, kvstore, param_names):
"""Perform update of param_arrays from grad_arrays on kvstore."""
for index, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
name = param_names[index]
# push gradient, priority is negative index
kvstore.push(name, grad_list, priority=-index)
# pull back the weights
kvstore.pull(name, arg_list, priority=-index)
def _update_params(param_arrays, grad_arrays, updater, num_device,
kvstore=None, param_names=None):
"""Perform update of param_arrays from grad_arrays not on kvstore."""
updates = [[] for _ in range(num_device)]
for i, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
index = i
if kvstore:
name = param_names[index]
# push gradient, priority is negative index
kvstore.push(name, grad_list, priority=-index)
# pull back the sum gradients, to the same locations.
kvstore.pull(name, grad_list, priority=-index)
for k, p in enumerate(zip(arg_list, grad_list)):
# faked an index here, to make optimizer create diff
# state for the same index but on diff devs, TODO(mli)
# use a better solution later
w, g = p
updates[k].append((index*num_device+k, g, w))
for dev_updates in updates:
# update params if param_arrays and grad_arrays are not empty
if dev_updates:
i, w, g = zip(*dev_updates)
updater(i, w, g)
def _multiple_callbacks(callbacks, *args, **kwargs):
"""Sends args and kwargs to any configured callbacks.
This handles the cases where the 'callbacks' variable
is ``None``, a single function, or a list.
"""
if isinstance(callbacks, list):
for cb in callbacks:
cb(*args, **kwargs)
return
if callbacks:
callbacks(*args, **kwargs)
def _train_multi_device(symbol, ctx, arg_names, param_names, aux_names,
arg_params, aux_params,
begin_epoch, end_epoch, epoch_size, optimizer,
kvstore, update_on_kvstore,
train_data, eval_data=None, eval_metric=None,
epoch_end_callback=None, batch_end_callback=None,
logger=None, work_load_list=None, monitor=None,
eval_end_callback=None,
eval_batch_end_callback=None, sym_gen=None):
"""Internal training function on multiple devices.
This function will also work for single device as well.
Parameters
----------
symbol : Symbol
The network configuration.
ctx : list of Context
The training devices.
arg_names: list of str
Name of all arguments of the network.
param_names: list of str
Name of all trainable parameters of the network.
aux_names: list of str
Name of all auxiliary states of the network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
begin_epoch : int
The begining training epoch.
end_epoch : int
The end training epoch.
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : Optimizer
The optimization algorithm
train_data : DataIter
Training data iterator.
eval_data : DataIter
Validation data iterator.
eval_metric : EvalMetric
An evaluation function or a list of evaluation functions.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback : callable(BatchEndParams)
A callback that is invoked at end of each batch.
This can be used to measure speed, get result from evaluation metric. etc.
kvstore : KVStore
The KVStore.
update_on_kvstore : bool
Whether or not perform weight updating on kvstore.
logger : logging logger
When not specified, default logger will be used.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as ``ctx``.
monitor : Monitor, optional
Monitor installed to executor,
for monitoring outputs, weights, and gradients for debugging.
Notes
-----
- This function will inplace update the NDArrays in `arg_params` and `aux_states`.
"""
if logger is None:
logger = logging
executor_manager = DataParallelExecutorManager(symbol=symbol,
sym_gen=sym_gen,
ctx=ctx,
train_data=train_data,
param_names=param_names,
arg_names=arg_names,
aux_names=aux_names,
work_load_list=work_load_list,
logger=logger)
if monitor:
executor_manager.install_monitor(monitor)
executor_manager.set_params(arg_params, aux_params)
if not update_on_kvstore:
updater = get_updater(optimizer)
else:
kvstore.set_optimizer(optimizer)
if kvstore:
_initialize_kvstore(kvstore=kvstore,
param_arrays=executor_manager.param_arrays,
arg_params=arg_params,
param_names=executor_manager.param_names,
update_on_kvstore=update_on_kvstore)
# Now start training
train_data.reset()
for epoch in range(begin_epoch, end_epoch):
# Training phase
tic = time.time()
eval_metric.reset()
nbatch = 0
# Iterate over training data.
while True:
do_reset = True
for data_batch in train_data:
executor_manager.load_data_batch(data_batch)
if monitor is not None:
monitor.tic()
executor_manager.forward(is_train=True)
executor_manager.backward()
if update_on_kvstore:
if 'nccl' in kvstore.type:
_update_params_on_kvstore_nccl(executor_manager.param_arrays,
executor_manager.grad_arrays,
kvstore, executor_manager.param_names)
else:
_update_params_on_kvstore(executor_manager.param_arrays,
executor_manager.grad_arrays,
kvstore, executor_manager.param_names)
else:
_update_params(executor_manager.param_arrays,
executor_manager.grad_arrays,
updater=updater,
num_device=len(ctx),
kvstore=kvstore,
param_names=executor_manager.param_names)
if monitor is not None:
monitor.toc_print()
# evaluate at end, so we can lazy copy
executor_manager.update_metric(eval_metric, data_batch.label)
nbatch += 1
# batch callback (for print purpose)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=nbatch,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(batch_end_callback, batch_end_params)
# this epoch is done possibly earlier
if epoch_size is not None and nbatch >= epoch_size:
do_reset = False
break
if do_reset:
logger.info('Epoch[%d] Resetting Data Iterator', epoch)
train_data.reset()
# this epoch is done
if epoch_size is None or nbatch >= epoch_size:
break
toc = time.time()
logger.info('Epoch[%d] Time cost=%.3f', epoch, (toc - tic))
if epoch_end_callback or epoch + 1 == end_epoch:
executor_manager.copy_to(arg_params, aux_params)
_multiple_callbacks(epoch_end_callback, epoch, symbol, arg_params, aux_params)
# evaluation
if eval_data:
eval_metric.reset()
eval_data.reset()
total_num_batch = 0
for i, eval_batch in enumerate(eval_data):
executor_manager.load_data_batch(eval_batch)
executor_manager.forward(is_train=False)
executor_manager.update_metric(eval_metric, eval_batch.label)
if eval_batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=i,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(eval_batch_end_callback, batch_end_params)
total_num_batch += 1
if eval_end_callback is not None:
eval_end_params = BatchEndParam(epoch=epoch,
nbatch=total_num_batch,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(eval_end_callback, eval_end_params)
eval_data.reset()
# end of all epochs
def save_checkpoint(prefix, epoch, symbol, arg_params, aux_params, remove_amp_cast=True):
"""Checkpoint the model data into file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
The epoch number of the model.
symbol : Symbol
The input Symbol.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
remove_amp_cast : bool, optional
Whether to remove the amp_cast and amp_multicast operators, before saving the model.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
if symbol is not None:
symbol.save('%s-symbol.json' % prefix, remove_amp_cast=remove_amp_cast)
save_dict = {('arg:%s' % k) : v.as_in_context(cpu()) for k, v in arg_params.items()}
save_dict.update({('aux:%s' % k) : v.as_in_context(cpu()) for k, v in aux_params.items()})
param_name = '%s-%04d.params' % (prefix, epoch)
nd.save(param_name, save_dict)
logging.info('Saved checkpoint to \"%s\"', param_name)
def load_params(prefix, epoch):
"""Load params from a file
"""
save_dict = nd.load("%s-%04d.params" % (prefix, epoch))
arg_params = {}
aux_params = {}
if not save_dict:
logging.warning("Params file '%s' is empty", '%s-%04d.params' % (prefix, epoch))
return (arg_params, aux_params)
for k, v in save_dict.items():
tp, name = k.split(":", 1)
if tp == "arg":
arg_params[name] = v
if tp == "aux":
aux_params[name] = v
return (arg_params, aux_params)
def load_checkpoint(prefix, epoch):
"""Load model checkpoint from file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
Epoch number of model we would like to load.
Returns
-------
symbol : Symbol
The symbol configuration of computation network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- Symbol will be loaded from ``prefix-symbol.json``.
- Parameters will be loaded from ``prefix-epoch.params``.
"""
symbol = sym.load('%s-symbol.json' % prefix)
arg_params, aux_params = load_params(prefix, epoch)
return (symbol, arg_params, aux_params)
from .callback import LogValidationMetricsCallback # pylint: disable=wrong-import-position
class FeedForward(BASE_ESTIMATOR):
"""Model class of MXNet for training and predicting feedforward nets.
This class is designed for a single-data single output supervised network.
Parameters
----------
symbol : Symbol
The symbol configuration of computation network.
ctx : Context or list of Context, optional
The device context of training and prediction.
To use multi GPU training, pass in a list of gpu contexts.
num_epoch : int, optional
Training parameter, number of training epochs(epochs).
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : str or Optimizer, optional
Training parameter, name or optimizer object for training.
initializer : initializer function, optional
Training parameter, the initialization scheme used.
numpy_batch_size : int, optional
The batch size of training data.
Only needed when input array is numpy.
arg_params : dict of str to NDArray, optional
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray, optional
Model parameter, dict of name to NDArray of net's auxiliary states.
allow_extra_params : boolean, optional
Whether allow extra parameters that are not needed by symbol
to be passed by aux_params and ``arg_params``.
If this is True, no error will be thrown when ``aux_params`` and ``arg_params``
contain more parameters than needed.
begin_epoch : int, optional
The begining training epoch.
kwargs : dict
The additional keyword arguments passed to optimizer.
"""
def __init__(self, symbol, ctx=None,
num_epoch=None, epoch_size=None, optimizer='sgd',
initializer=Uniform(0.01),
numpy_batch_size=128,
arg_params=None, aux_params=None,
allow_extra_params=False,
begin_epoch=0,
**kwargs):
warnings.warn(
'\033[91mmxnet.model.FeedForward has been deprecated. ' + \
'Please use mxnet.mod.Module instead.\033[0m',
DeprecationWarning, stacklevel=2)
if isinstance(symbol, sym.Symbol):
self.symbol = symbol
self.sym_gen = None
else:
assert(callable(symbol))
self.symbol = None
self.sym_gen = symbol
# model parameters
self.arg_params = arg_params
self.aux_params = aux_params
self.allow_extra_params = allow_extra_params
self.argument_checked = False
if self.sym_gen is None:
self._check_arguments()
# basic configuration
if ctx is None:
ctx = [cpu()]
elif isinstance(ctx, Context):
ctx = [ctx]
self.ctx = ctx
# training parameters
self.num_epoch = num_epoch
self.epoch_size = epoch_size
self.kwargs = kwargs.copy()
self.optimizer = optimizer
self.initializer = initializer
self.numpy_batch_size = numpy_batch_size
# internal helper state
self._pred_exec = None
self.begin_epoch = begin_epoch
def _check_arguments(self):
"""verify the argument of the default symbol and user provided parameters"""
if self.argument_checked:
return
assert(self.symbol is not None)
self.argument_checked = True
# check if symbol contain duplicated names.
_check_arguments(self.symbol)
# rematch parameters to delete useless ones
if self.allow_extra_params:
if self.arg_params:
arg_names = set(self.symbol.list_arguments())
self.arg_params = {k : v for k, v in self.arg_params.items()
if k in arg_names}
if self.aux_params:
aux_names = set(self.symbol.list_auxiliary_states())
self.aux_params = {k : v for k, v in self.aux_params.items()
if k in aux_names}
@staticmethod
def _is_data_arg(name):
"""Check if name is a data argument."""
return name.endswith('data') or name.endswith('label')
def _init_params(self, inputs, overwrite=False):
"""Initialize weight parameters and auxiliary states."""
inputs = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in inputs]
input_shapes = {item.name: item.shape for item in inputs}
arg_shapes, _, aux_shapes = self.symbol.infer_shape(**input_shapes)
assert arg_shapes is not None
input_dtypes = {item.name: item.dtype for item in inputs}
arg_dtypes, _, aux_dtypes = self.symbol.infer_type(**input_dtypes)
assert arg_dtypes is not None
arg_names = self.symbol.list_arguments()
input_names = input_shapes.keys()
param_names = [key for key in arg_names if key not in input_names]
aux_names = self.symbol.list_auxiliary_states()
param_name_attrs = [x for x in zip(arg_names, arg_shapes, arg_dtypes)
if x[0] in param_names]
arg_params = {k : nd.zeros(shape=s, dtype=t)
for k, s, t in param_name_attrs}
aux_name_attrs = [x for x in zip(aux_names, aux_shapes, aux_dtypes)
if x[0] in aux_names]
aux_params = {k : nd.zeros(shape=s, dtype=t)
for k, s, t in aux_name_attrs}
for k, v in arg_params.items():
if self.arg_params and k in self.arg_params and (not overwrite):
arg_params[k][:] = self.arg_params[k][:]
else:
self.initializer(k, v)
for k, v in aux_params.items():
if self.aux_params and k in self.aux_params and (not overwrite):
aux_params[k][:] = self.aux_params[k][:]
else:
self.initializer(k, v)
self.arg_params = arg_params
self.aux_params = aux_params
return (arg_names, list(param_names), aux_names)
def __getstate__(self):
this = self.__dict__.copy()
this['_pred_exec'] = None
return this
def __setstate__(self, state):
self.__dict__.update(state)
def _init_predictor(self, input_shapes, type_dict=None):
"""Initialize the predictor module for running prediction."""
shapes = {name: self.arg_params[name].shape for name in self.arg_params}
shapes.update(dict(input_shapes))
if self._pred_exec is not None:
arg_shapes, _, _ = self.symbol.infer_shape(**shapes)
assert arg_shapes is not None, "Incomplete input shapes"
pred_shapes = [x.shape for x in self._pred_exec.arg_arrays]
if arg_shapes == pred_shapes:
return
# for now only use the first device
pred_exec = self.symbol.simple_bind(
self.ctx[0], grad_req='null', type_dict=type_dict, **shapes)
pred_exec.copy_params_from(self.arg_params, self.aux_params)
_check_arguments(self.symbol)
self._pred_exec = pred_exec
def _init_iter(self, X, y, is_train):
"""Initialize the iterator given input."""
if isinstance(X, (np.ndarray, nd.NDArray)):
if y is None:
if is_train:
raise ValueError('y must be specified when X is numpy.ndarray')
y = np.zeros(X.shape[0])
if not isinstance(y, (np.ndarray, nd.NDArray)):
raise TypeError('y must be ndarray when X is numpy.ndarray')
if X.shape[0] != y.shape[0]:
raise ValueError("The numbers of data points and labels not equal")
if y.ndim == 2 and y.shape[1] == 1:
y = y.flatten()
if y.ndim != 1:
raise ValueError("Label must be 1D or 2D (with 2nd dimension being 1)")
if is_train:
return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size),
shuffle=is_train, last_batch_handle='roll_over')
else:
return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size), shuffle=False)
if not isinstance(X, io.DataIter):
raise TypeError('X must be DataIter, NDArray or numpy.ndarray')
return X
def _init_eval_iter(self, eval_data):
"""Initialize the iterator given eval_data."""
if eval_data is None:
return eval_data
if isinstance(eval_data, (tuple, list)) and len(eval_data) == 2:
if eval_data[0] is not None:
if eval_data[1] is None and isinstance(eval_data[0], io.DataIter):
return eval_data[0]
input_data = (np.array(eval_data[0]) if isinstance(eval_data[0], list)
else eval_data[0])
input_label = (np.array(eval_data[1]) if isinstance(eval_data[1], list)
else eval_data[1])
return self._init_iter(input_data, input_label, is_train=True)
else:
raise ValueError("Eval data is NONE")
if not isinstance(eval_data, io.DataIter):
raise TypeError('Eval data must be DataIter, or ' \
'NDArray/numpy.ndarray/list pair (i.e. tuple/list of length 2)')
return eval_data
def predict(self, X, num_batch=None, return_data=False, reset=True):
"""Run the prediction, always only use one device.
Parameters
----------
X : mxnet.DataIter
num_batch : int or None
The number of batch to run. Go though all batches if ``None``.
Returns
-------
y : numpy.ndarray or a list of numpy.ndarray if the network has multiple outputs.
The predicted value of the output.
"""
X = self._init_iter(X, None, is_train=False)
if reset:
X.reset()
data_shapes = X.provide_data
data_names = [x[0] for x in data_shapes]
type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())
for x in X.provide_data:
if isinstance(x, DataDesc):
type_dict[x.name] = x.dtype
else:
type_dict[x[0]] = mx_real_t
self._init_predictor(data_shapes, type_dict)
batch_size = X.batch_size
data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]
output_list = [[] for _ in range(len(self._pred_exec.outputs))]
if return_data:
data_list = [[] for _ in X.provide_data]
label_list = [[] for _ in X.provide_label]
i = 0
for batch in X:
_load_data(batch, data_arrays)
self._pred_exec.forward(is_train=False)
padded = batch.pad
real_size = batch_size - padded
for o_list, o_nd in zip(output_list, self._pred_exec.outputs):
o_list.append(o_nd[0:real_size].asnumpy())
if return_data:
for j, x in enumerate(batch.data):
data_list[j].append(x[0:real_size].asnumpy())
for j, x in enumerate(batch.label):
label_list[j].append(x[0:real_size].asnumpy())
i += 1
if num_batch is not None and i == num_batch:
break
outputs = [np.concatenate(x) for x in output_list]
if len(outputs) == 1:
outputs = outputs[0]
if return_data:
data = [np.concatenate(x) for x in data_list]
label = [np.concatenate(x) for x in label_list]
if len(data) == 1:
data = data[0]
if len(label) == 1:
label = label[0]
return outputs, data, label
else:
return outputs
def score(self, X, eval_metric='acc', num_batch=None, batch_end_callback=None, reset=True):
"""Run the model given an input and calculate the score
as assessed by an evaluation metric.
Parameters
----------
X : mxnet.DataIter
eval_metric : metric.metric
The metric for calculating score.
num_batch : int or None
The number of batches to run. Go though all batches if ``None``.
Returns
-------
s : float
The final score.
"""
# setup metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
X = self._init_iter(X, None, is_train=False)
if reset:
X.reset()
data_shapes = X.provide_data
data_names = [x[0] for x in data_shapes]
type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())
for x in X.provide_data:
if isinstance(x, DataDesc):
type_dict[x.name] = x.dtype
else:
type_dict[x[0]] = mx_real_t
self._init_predictor(data_shapes, type_dict)
data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]
for i, batch in enumerate(X):
if num_batch is not None and i == num_batch:
break
_load_data(batch, data_arrays)
self._pred_exec.forward(is_train=False)
eval_metric.update(batch.label, self._pred_exec.outputs)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=0,
nbatch=i,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(batch_end_callback, batch_end_params)
return eval_metric.get()[1]
def fit(self, X, y=None, eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None, kvstore='local', logger=None,
work_load_list=None, monitor=None, eval_end_callback=LogValidationMetricsCallback(),
eval_batch_end_callback=None):
"""Fit the model.
Parameters
----------
X : DataIter, or numpy.ndarray/NDArray
Training data. If `X` is a `DataIter`, the name or (if name not available)
the position of its outputs should match the corresponding variable
names defined in the symbolic graph.
y : numpy.ndarray/NDArray, optional
Training set label.
If X is ``numpy.ndarray`` or `NDArray`, `y` is required to be set.
While y can be 1D or 2D (with 2nd dimension as 1), its first dimension must be
the same as `X`, i.e. the number of data points and labels should be equal.
eval_data : DataIter or numpy.ndarray/list/NDArray pair
If eval_data is numpy.ndarray/list/NDArray pair,
it should be ``(valid_data, valid_label)``.
eval_metric : metric.EvalMetric or str or callable
The evaluation metric. This could be the name of evaluation metric
or a custom evaluation function that returns statistics
based on a minibatch.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback: callable(epoch)
A callback that is invoked at end of each batch for purposes of printing.
kvstore: KVStore or str, optional
The KVStore or a string kvstore type: 'local', 'dist_sync', 'dist_async'
In default uses 'local', often no need to change for single machiine.
logger : logging logger, optional
When not specified, default logger will be used.
work_load_list : float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
Note
----
KVStore behavior
- 'local', multi-devices on a single machine, will automatically choose best type.
- 'dist_sync', multiple machines communicating via BSP.
- 'dist_async', multiple machines with asynchronous communication.
"""
data = self._init_iter(X, y, is_train=True)
eval_data = self._init_eval_iter(eval_data)
if self.sym_gen:
self.symbol = self.sym_gen(data.default_bucket_key) # pylint: disable=no-member
self._check_arguments()
self.kwargs["sym"] = self.symbol
arg_names, param_names, aux_names = \
self._init_params(data.provide_data+data.provide_label)
# setup metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
# create kvstore
(kvstore, update_on_kvstore) = _create_kvstore(
kvstore, len(self.ctx), self.arg_params)
param_idx2name = {}
if update_on_kvstore:
param_idx2name.update(enumerate(param_names))
else:
for i, n in enumerate(param_names):
for k in range(len(self.ctx)):
param_idx2name[i*len(self.ctx)+k] = n
self.kwargs["param_idx2name"] = param_idx2name
# init optmizer
if isinstance(self.optimizer, str):
batch_size = data.batch_size
if kvstore and 'dist' in kvstore.type and '_async' not in kvstore.type:
batch_size *= kvstore.num_workers
optimizer = opt.create(self.optimizer,
rescale_grad=(1.0/batch_size),
**(self.kwargs))
elif isinstance(self.optimizer, opt.Optimizer):
if not optimizer.idx2name:
optimizer.idx2name = param_idx2name.copy()
optimizer = self.optimizer
# do training
_train_multi_device(self.symbol, self.ctx, arg_names, param_names, aux_names,
self.arg_params, self.aux_params,
begin_epoch=self.begin_epoch, end_epoch=self.num_epoch,
epoch_size=self.epoch_size,
optimizer=optimizer,
train_data=data, eval_data=eval_data,
eval_metric=eval_metric,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kvstore, update_on_kvstore=update_on_kvstore,
logger=logger, work_load_list=work_load_list, monitor=monitor,
eval_end_callback=eval_end_callback,
eval_batch_end_callback=eval_batch_end_callback,
sym_gen=self.sym_gen)
def save(self, prefix, epoch=None, remove_amp_cast=True):
"""Checkpoint the model checkpoint into file.
You can also use `pickle` to do the job if you only work on Python.
The advantage of `load` and `save` (as compared to `pickle`) is that
the resulting file can be loaded from other MXNet language bindings.
One can also directly `load`/`save` from/to cloud storage(S3, HDFS)
Parameters
----------
prefix : str
Prefix of model name.
remove_amp_cast : bool, optional
Whether to remove the amp_cast and amp_multicast operators, before saving the model.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
if epoch is None:
epoch = self.num_epoch
assert epoch is not None
save_checkpoint(prefix, epoch, self.symbol, self.arg_params, self.aux_params, remove_amp_cast=remove_amp_cast)
@staticmethod
def load(prefix, epoch, ctx=None, **kwargs):
"""Load model checkpoint from file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
epoch number of model we would like to load.
ctx : Context or list of Context, optional
The device context of training and prediction.
kwargs : dict
Other parameters for model, including `num_epoch`, optimizer and `numpy_batch_size`.
Returns
-------
model : FeedForward
The loaded model that can be used for prediction.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
symbol, arg_params, aux_params = load_checkpoint(prefix, epoch)
return FeedForward(symbol, ctx=ctx,
arg_params=arg_params, aux_params=aux_params,
begin_epoch=epoch,
**kwargs)
@staticmethod
def create(symbol, X, y=None, ctx=None,
num_epoch=None, epoch_size=None, optimizer='sgd', initializer=Uniform(0.01),
eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None,
kvstore='local', logger=None, work_load_list=None,
eval_end_callback=LogValidationMetricsCallback(),
eval_batch_end_callback=None, **kwargs):
"""Functional style to create a model.
This function is more consistent with functional
languages such as R, where mutation is not allowed.
Parameters
----------
symbol : Symbol
The symbol configuration of a computation network.
X : DataIter
Training data.
y : numpy.ndarray, optional
If `X` is a ``numpy.ndarray``, `y` must be set.
ctx : Context or list of Context, optional
The device context of training and prediction.
To use multi-GPU training, pass in a list of GPU contexts.
num_epoch : int, optional
The number of training epochs(epochs).
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : str or Optimizer, optional
The name of the chosen optimizer, or an optimizer object, used for training.
initializer : initializer function, optional
The initialization scheme used.
eval_data : DataIter or numpy.ndarray pair
If `eval_set` is ``numpy.ndarray`` pair, it should
be (`valid_data`, `valid_label`).
eval_metric : metric.EvalMetric or str or callable
The evaluation metric. Can be the name of an evaluation metric
or a custom evaluation function that returns statistics
based on a minibatch.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback: callable(epoch)
A callback that is invoked at end of each batch for print purposes.
kvstore: KVStore or str, optional
The KVStore or a string kvstore type: 'local', 'dist_sync', 'dis_async'.
Defaults to 'local', often no need to change for single machine.
logger : logging logger, optional
When not specified, default logger will be used.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
"""
model = FeedForward(symbol, ctx=ctx, num_epoch=num_epoch,
epoch_size=epoch_size,
optimizer=optimizer, initializer=initializer, **kwargs)
model.fit(X, y, eval_data=eval_data, eval_metric=eval_metric,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kvstore,
logger=logger,
work_load_list=work_load_list,
eval_end_callback=eval_end_callback,
eval_batch_end_callback=eval_batch_end_callback)
return model
| []
| []
| [
"MXNET_UPDATE_AGGREGATION_SIZE",
"MXNET_UPDATE_ON_KVSTORE"
]
| [] | ["MXNET_UPDATE_AGGREGATION_SIZE", "MXNET_UPDATE_ON_KVSTORE"] | python | 2 | 0 | |
cmd/commands/run/run.go | // Copyright 2013 bee authors
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package run
import (
"io/ioutil"
"os"
path "path/filepath"
"runtime"
"strings"
"github.com/beego/bee/v2/cmd/commands"
"github.com/beego/bee/v2/cmd/commands/version"
"github.com/beego/bee/v2/config"
beeLogger "github.com/beego/bee/v2/logger"
"github.com/beego/bee/v2/utils"
)
var CmdRun = &commands.Command{
UsageLine: "run [appname] [watchall] [-main=*.go] [-downdoc=true] [-gendoc=true] [-vendor=true] [-e=folderToExclude] [-ex=extraPackageToWatch] [-tags=goBuildTags] [-runmode=BEEGO_RUNMODE]",
Short: "Run the application by starting a local development server",
Long: `
Run command will supervise the filesystem of the application for any changes, and recompile/restart it.
`,
PreRun: func(cmd *commands.Command, args []string) { version.ShowShortVersionBanner() },
Run: RunApp,
}
var (
mainFiles utils.ListOpts
downdoc utils.DocValue
gendoc utils.DocValue
// The flags list of the paths excluded from watching
excludedPaths utils.StrFlags
// Pass through to -tags arg of "go build"
buildTags string
// Pass through to -ldflags arg of "go build"
buildLDFlags string
// Application path
currpath string
// Application name
appname string
// Channel to signal an Exit
exit chan bool
// Flag to watch the vendor folder
vendorWatch bool
// Current user workspace
currentGoPath string
// Current runmode
runmode string
// Extra args to run application
runargs string
// Extra directories
extraPackages utils.StrFlags
)
var started = make(chan bool)
func init() {
CmdRun.Flag.Var(&mainFiles, "main", "Specify main go files.")
CmdRun.Flag.Var(&gendoc, "gendoc", "Enable auto-generate the docs.")
CmdRun.Flag.Var(&downdoc, "downdoc", "Enable auto-download of the swagger file if it does not exist.")
CmdRun.Flag.Var(&excludedPaths, "e", "List of paths to exclude.")
CmdRun.Flag.BoolVar(&vendorWatch, "vendor", false, "Enable watch vendor folder.")
CmdRun.Flag.StringVar(&buildTags, "tags", "", "Set the build tags. See: https://golang.org/pkg/go/build/")
CmdRun.Flag.StringVar(&buildLDFlags, "ldflags", "", "Set the build ldflags. See: https://golang.org/pkg/go/build/")
CmdRun.Flag.StringVar(&runmode, "runmode", "", "Set the Beego run mode.")
CmdRun.Flag.StringVar(&runargs, "runargs", "", "Extra args to run application")
CmdRun.Flag.Var(&extraPackages, "ex", "List of extra package to watch.")
exit = make(chan bool)
commands.AvailableCommands = append(commands.AvailableCommands, CmdRun)
}
// RunApp locates files to watch, and starts the beego application
func RunApp(cmd *commands.Command, args []string) int {
// The default app path is the current working directory
appPath, _ := os.Getwd()
// If an argument is presented, we use it as the app path
if len(args) != 0 && args[0] != "watchall" {
if path.IsAbs(args[0]) {
appPath = args[0]
} else {
appPath = path.Join(appPath, args[0])
}
}
if utils.IsInGOPATH(appPath) {
if found, _gopath, _path := utils.SearchGOPATHs(appPath); found {
appPath = _path
appname = path.Base(appPath)
currentGoPath = _gopath
} else {
beeLogger.Log.Fatalf("No application '%s' found in your GOPATH", appPath)
}
if strings.HasSuffix(appname, ".go") && utils.IsExist(appPath) {
beeLogger.Log.Warnf("The appname is in conflict with file's current path. Do you want to build appname as '%s'", appname)
beeLogger.Log.Info("Do you want to overwrite it? [yes|no] ")
if !utils.AskForConfirmation() {
return 0
}
}
} else {
beeLogger.Log.Warn("Running application outside of GOPATH")
appname = path.Base(appPath)
currentGoPath = appPath
}
beeLogger.Log.Infof("Using '%s' as 'appname'", appname)
beeLogger.Log.Debugf("Current path: %s", utils.FILE(), utils.LINE(), appPath)
if runmode == "prod" || runmode == "dev" {
os.Setenv("BEEGO_RUNMODE", runmode)
beeLogger.Log.Infof("Using '%s' as 'runmode'", os.Getenv("BEEGO_RUNMODE"))
} else if runmode != "" {
os.Setenv("BEEGO_RUNMODE", runmode)
beeLogger.Log.Warnf("Using '%s' as 'runmode'", os.Getenv("BEEGO_RUNMODE"))
} else if os.Getenv("BEEGO_RUNMODE") != "" {
beeLogger.Log.Warnf("Using '%s' as 'runmode'", os.Getenv("BEEGO_RUNMODE"))
}
var paths []string
readAppDirectories(appPath, &paths)
// Because monitor files has some issues, we watch current directory
// and ignore non-go files.
for _, p := range config.Conf.DirStruct.Others {
paths = append(paths, strings.Replace(p, "$GOPATH", currentGoPath, -1))
}
if len(extraPackages) > 0 {
// get the full path
for _, packagePath := range extraPackages {
if found, _, _fullPath := utils.SearchGOPATHs(packagePath); found {
readAppDirectories(_fullPath, &paths)
} else {
beeLogger.Log.Warnf("No extra package '%s' found in your GOPATH", packagePath)
}
}
// let paths unique
strSet := make(map[string]struct{})
for _, p := range paths {
strSet[p] = struct{}{}
}
paths = make([]string, len(strSet))
index := 0
for i := range strSet {
paths[index] = i
index++
}
}
files := []string{}
for _, arg := range mainFiles {
if len(arg) > 0 {
files = append(files, arg)
}
}
if downdoc == "true" {
if _, err := os.Stat(path.Join(appPath, "swagger", "index.html")); err != nil {
if os.IsNotExist(err) {
downloadFromURL(swaggerlink, "swagger.zip")
unzipAndDelete("swagger.zip")
}
}
}
// Start the Reload server (if enabled)
if config.Conf.EnableReload {
startReloadServer()
}
if gendoc == "true" {
NewWatcher(paths, files, true)
AutoBuild(files, true)
} else {
NewWatcher(paths, files, false)
AutoBuild(files, false)
}
for {
<-exit
runtime.Goexit()
}
}
func readAppDirectories(directory string, paths *[]string) {
fileInfos, err := ioutil.ReadDir(directory)
if err != nil {
return
}
useDirectory := false
for _, fileInfo := range fileInfos {
if strings.HasSuffix(fileInfo.Name(), "docs") {
continue
}
if strings.HasSuffix(fileInfo.Name(), "swagger") {
continue
}
if !vendorWatch && strings.HasSuffix(fileInfo.Name(), "vendor") {
continue
}
if isExcluded(path.Join(directory, fileInfo.Name())) {
continue
}
if fileInfo.IsDir() && fileInfo.Name()[0] != '.' {
readAppDirectories(directory+"/"+fileInfo.Name(), paths)
continue
}
if useDirectory {
continue
}
if path.Ext(fileInfo.Name()) == ".go" || (ifStaticFile(fileInfo.Name()) && config.Conf.EnableReload) {
*paths = append(*paths, directory)
useDirectory = true
}
}
}
// If a file is excluded
func isExcluded(filePath string) bool {
for _, p := range excludedPaths {
absP, err := path.Abs(p)
if err != nil {
beeLogger.Log.Errorf("Cannot get absolute path of '%s'", p)
continue
}
absFilePath, err := path.Abs(filePath)
if err != nil {
beeLogger.Log.Errorf("Cannot get absolute path of '%s'", filePath)
break
}
if strings.HasPrefix(absFilePath, absP) {
beeLogger.Log.Infof("'%s' is not being watched", filePath)
return true
}
}
return false
}
| [
"\"BEEGO_RUNMODE\"",
"\"BEEGO_RUNMODE\"",
"\"BEEGO_RUNMODE\"",
"\"BEEGO_RUNMODE\""
]
| []
| [
"BEEGO_RUNMODE"
]
| [] | ["BEEGO_RUNMODE"] | go | 1 | 0 | |
config/wsgi.py | """
WSGI config for aggregator project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Use Whitenoise to serve static files
# See: https://whitenoise.readthedocs.org/
application = DjangoWhiteNoise(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| []
| []
| [
"DJANGO_SETTINGS_MODULE"
]
| [] | ["DJANGO_SETTINGS_MODULE"] | python | 1 | 0 | |
v3/integrations/nrgraphgophers/nrgraphgophers_example_test.go | // Copyright 2020 New Relic Corporation. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package nrgraphgophers_test
import (
"log"
"net/http"
"os"
graphql "github.com/graph-gophers/graphql-go"
"github.com/graph-gophers/graphql-go/relay"
"github.com/divyanshgaba/go-agent/v3/integrations/nrgraphgophers"
"github.com/divyanshgaba/go-agent/v3/newrelic"
)
type query struct{}
func (*query) Hello() string { return "hello world" }
func Example() {
// First create your New Relic Application:
app, err := newrelic.NewApplication(
newrelic.ConfigAppName("GraphQL App"),
newrelic.ConfigLicense(os.Getenv("NEW_RELIC_LICENSE_KEY")),
newrelic.ConfigDebugLogger(os.Stdout),
)
if nil != err {
panic(err)
}
querySchema := `type Query { hello: String! }`
// Then add a graphql.Tracer(nrgraphgophers.NewTracer()) option to your
// schema parsing to get field and query segment instrumentation:
opt := graphql.Tracer(nrgraphgophers.NewTracer())
schema := graphql.MustParseSchema(querySchema, &query{}, opt)
// Finally, instrument your request handler using newrelic.WrapHandle
// to create transactions for requests:
http.Handle(newrelic.WrapHandle(app, "/", &relay.Handler{Schema: schema}))
log.Fatal(http.ListenAndServe(":8000", nil))
}
| [
"\"NEW_RELIC_LICENSE_KEY\""
]
| []
| [
"NEW_RELIC_LICENSE_KEY"
]
| [] | ["NEW_RELIC_LICENSE_KEY"] | go | 1 | 0 | |
upload_oss.go | package main
import (
"path/filepath"
"fmt"
"os"
"flag"
"reflect"
"./pkg"
"github.com/aliyun/aliyun-oss-go-sdk/oss"
)
// 定义进度条监听器。
type OssProgressListener struct {
}
// 定义进度变更事件处理函数。
func (listener *OssProgressListener) ProgressChanged(event *oss.ProgressEvent) {
switch event.EventType {
case oss.TransferStartedEvent:
fmt.Printf("Transfer Started, ConsumedBytes: %d, TotalBytes %d.\n",
event.ConsumedBytes, event.TotalBytes)
case oss.TransferDataEvent:
fmt.Printf("\rTransfer Data, ConsumedBytes: %d, TotalBytes %d, %d%%.",
event.ConsumedBytes, event.TotalBytes, event.ConsumedBytes*100/event.TotalBytes)
case oss.TransferCompletedEvent:
fmt.Printf("\nTransfer Completed, ConsumedBytes: %d, TotalBytes %d.\n",
event.ConsumedBytes, event.TotalBytes)
case oss.TransferFailedEvent:
fmt.Printf("\nTransfer Failed, ConsumedBytes: %d, TotalBytes %d.\n",
event.ConsumedBytes, event.TotalBytes)
default:
}
}
func main() {
// 读取配置文件
myConfig := new(conf.Config)
myConfig.InitConfig("oss.config")
readVariable := myConfig.Read("default", "readVariable")
var Endpoint, AccessKeyId, AccessKeySecret, bucketName, localFile, remoteFolder string
if readVariable == "1" {
// 读取环境变量
Endpoint = os.Getenv("Endpoint")
AccessKeyId = os.Getenv("AccessKeyId")
AccessKeySecret = os.Getenv("AccessKeySecret")
bucketName = os.Getenv("bucketName")
remoteFolder = os.Getenv("remoteFolder")
localFile = os.Getenv("localFile")
} else { // 读取配置文件
Endpoint = myConfig.Read("oss", "Endpoint")
AccessKeyId = myConfig.Read("oss", "AccessKeyId")
AccessKeySecret = myConfig.Read("oss", "AccessKeySecret")
bucketName = myConfig.Read("upload", "bucketName")
remoteFolder = myConfig.Read("upload", "remoteFolder")
localFile = myConfig.Read("upload", "localFile")
}
// 上传到根目录
if remoteFolder == "/" {
remoteFolder = ""
}
// 接受终端输入参数
flag.Parse()
para1 := flag.Arg(0)
if !isEmpty(para1) {
fmt.Printf("Input parameters: %s\n", para1);
localFile = para1
}
if isEmpty(localFile) {
fmt.Println("Please take parameters. Local file/folder path not define.")
os.Exit(-1)
}
fmt.Println("OSS Go SDK Version: ", oss.Version)
// 创建OSSClient
client, err := oss.New(Endpoint, AccessKeyId, AccessKeySecret)
if err != nil {
fmt.Println("Error:", err)
os.Exit(-1)
}
// 获取存储空间。
bucket, err := client.Bucket(bucketName)
if err != nil {
fmt.Println("Error:", err)
os.Exit(-1)
}
// 获取要上传文件列表
fmt.Println("Need to upload file lists:")
fileList, err := getFilelist(localFile)
if err != nil {
fmt.Println(err)
os.Exit(-1)
}
for _, v := range fileList {
fmt.Println(v)
}
fmt.Println("")
// 上传
fmt.Println("Uploading...")
for _, v := range fileList {
objectName := remoteFolder+v
localFile = v
err = bucket.PutObjectFromFile(objectName, localFile, oss.Progress(&OssProgressListener{}))
if err != nil {
fmt.Println("Error:", err)
os.Exit(-1)
}
}
}
// 获取文件列表
func getFilelist(path string) ([]string, error) {
var fileList []string
err := filepath.Walk(path, func(path string, f os.FileInfo, err error) error {
if ( f == nil ) {
return err
}
if f.IsDir() {
return nil
}
fileList = append(fileList, path);
return nil
})
return fileList, err
}
// 判断值是否为空
func isEmpty(a interface{}) bool {
v := reflect.ValueOf(a)
if v.Kind() == reflect.Ptr {
v=v.Elem()
}
return v.Interface() == reflect.Zero(v.Type()).Interface()
} | [
"\"Endpoint\"",
"\"AccessKeyId\"",
"\"AccessKeySecret\"",
"\"bucketName\"",
"\"remoteFolder\"",
"\"localFile\""
]
| []
| [
"remoteFolder",
"bucketName",
"AccessKeyId",
"Endpoint",
"localFile",
"AccessKeySecret"
]
| [] | ["remoteFolder", "bucketName", "AccessKeyId", "Endpoint", "localFile", "AccessKeySecret"] | go | 6 | 0 | |
serve.go | // Copyright 2014 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package main
import (
//"container/list"
"fmt"
"os"
"os/exec"
"path"
"strconv"
"strings"
"github.com/codegangsta/cli"
qlog "github.com/qiniu/log"
//"github.com/gogits/git"
"github.com/gogits/gogs/models"
"github.com/gogits/gogs/modules/base"
)
var (
COMMANDS_READONLY = map[string]int{
"git-upload-pack": models.AU_WRITABLE,
"git upload-pack": models.AU_WRITABLE,
"git-upload-archive": models.AU_WRITABLE,
}
COMMANDS_WRITE = map[string]int{
"git-receive-pack": models.AU_READABLE,
"git receive-pack": models.AU_READABLE,
}
)
var CmdServ = cli.Command{
Name: "serv",
Usage: "This command just should be called by ssh shell",
Description: `
gogs serv provide access auth for repositories`,
Action: runServ,
Flags: []cli.Flag{},
}
func newLogger(execDir string) {
logPath := execDir + "/log/serv.log"
os.MkdirAll(path.Dir(logPath), os.ModePerm)
f, err := os.OpenFile(logPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, os.ModePerm)
if err != nil {
qlog.Fatal(err)
}
qlog.SetOutput(f)
qlog.Info("Start logging serv...")
}
func parseCmd(cmd string) (string, string) {
ss := strings.SplitN(cmd, " ", 2)
if len(ss) != 2 {
return "", ""
}
verb, args := ss[0], ss[1]
if verb == "git" {
ss = strings.SplitN(args, " ", 2)
args = ss[1]
verb = fmt.Sprintf("%s %s", verb, ss[0])
}
return verb, args
}
func In(b string, sl map[string]int) bool {
_, e := sl[b]
return e
}
func runServ(k *cli.Context) {
execDir, _ := base.ExecDir()
newLogger(execDir)
base.NewConfigContext()
models.LoadModelsConfig()
if models.UseSQLite3 {
os.Chdir(execDir)
}
models.SetEngine()
keys := strings.Split(os.Args[2], "-")
if len(keys) != 2 {
println("auth file format error")
qlog.Fatal("auth file format error")
}
keyId, err := strconv.ParseInt(keys[1], 10, 64)
if err != nil {
println("auth file format error")
qlog.Fatal("auth file format error", err)
}
user, err := models.GetUserByKeyId(keyId)
if err != nil {
println("You have no right to access")
qlog.Fatalf("SSH visit error: %v", err)
}
cmd := os.Getenv("SSH_ORIGINAL_COMMAND")
if cmd == "" {
println("Hi", user.Name, "! You've successfully authenticated, but Gogs does not provide shell access.")
return
}
verb, args := parseCmd(cmd)
repoPath := strings.Trim(args, "'")
rr := strings.SplitN(repoPath, "/", 2)
if len(rr) != 2 {
println("Unavilable repository", args)
qlog.Fatalf("Unavilable repository %v", args)
}
repoUserName := rr[0]
repoName := rr[1]
if strings.HasSuffix(repoName, ".git") {
repoName = repoName[:len(repoName)-4]
}
isWrite := In(verb, COMMANDS_WRITE)
isRead := In(verb, COMMANDS_READONLY)
repoUser, err := models.GetUserByName(repoUserName)
if err != nil {
println("You have no right to access")
qlog.Fatal("Get user failed", err)
}
// access check
switch {
case isWrite:
has, err := models.HasAccess(user.LowerName, path.Join(repoUserName, repoName), models.AU_WRITABLE)
if err != nil {
println("Inernel error:", err)
qlog.Fatal(err)
} else if !has {
println("You have no right to write this repository")
qlog.Fatalf("User %s has no right to write repository %s", user.Name, repoPath)
}
case isRead:
repo, err := models.GetRepositoryByName(repoUser.Id, repoName)
if err != nil {
println("Get repository error:", err)
qlog.Fatal("Get repository error: " + err.Error())
}
if !repo.IsPrivate {
break
}
has, err := models.HasAccess(user.Name, repoPath, models.AU_READABLE)
if err != nil {
println("Inernel error")
qlog.Fatal(err)
}
if !has {
has, err = models.HasAccess(user.Name, repoPath, models.AU_WRITABLE)
if err != nil {
println("Inernel error")
qlog.Fatal(err)
}
}
if !has {
println("You have no right to access this repository")
qlog.Fatal("You have no right to access this repository")
}
default:
println("Unknown command")
qlog.Fatal("Unknown command")
}
models.SetRepoEnvs(user.Id, user.Name, repoName)
gitcmd := exec.Command(verb, repoPath)
gitcmd.Dir = base.RepoRootPath
gitcmd.Stdout = os.Stdout
gitcmd.Stdin = os.Stdin
gitcmd.Stderr = os.Stderr
if err = gitcmd.Run(); err != nil {
println("execute command error:", err.Error())
qlog.Fatal("execute command error: " + err.Error())
}
}
| [
"\"SSH_ORIGINAL_COMMAND\""
]
| []
| [
"SSH_ORIGINAL_COMMAND"
]
| [] | ["SSH_ORIGINAL_COMMAND"] | go | 1 | 0 | |
go/test/endtoend/reparent/utils_test.go | /*
Copyright 2019 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package reparent
import (
"context"
"encoding/json"
"fmt"
"os"
"os/exec"
"path"
"reflect"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"vitess.io/vitess/go/json2"
"vitess.io/vitess/go/vt/log"
querypb "vitess.io/vitess/go/vt/proto/query"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
"github.com/stretchr/testify/require"
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/test/endtoend/cluster"
tmc "vitess.io/vitess/go/vt/vttablet/grpctmclient"
)
var (
// ClusterInstance instance to be used for test with different params
clusterInstance *cluster.LocalProcessCluster
tmClient *tmc.Client
keyspaceName = "ks"
dbName = "vt_" + keyspaceName
username = "vt_dba"
hostname = "localhost"
insertSQL = "insert into vt_insert_test(id, msg) values (%d, 'test %d')"
sqlSchema = `
create table vt_insert_test (
id bigint,
msg varchar(64),
primary key (id)
) Engine=InnoDB
`
)
//region cluster setup/teardown
func setupRangeBasedCluster(ctx context.Context, t *testing.T) {
tablets := setupCluster(ctx, t, shardName, []string{cell1}, []int{2})
masterTablet, replicaTablet = tablets[0], tablets[1]
}
func setupReparentCluster(t *testing.T) {
tablets := setupCluster(context.Background(), t, shardName, []string{cell1, cell2}, []int{3, 1})
tab1, tab2, tab3, tab4 = tablets[0], tablets[1], tablets[2], tablets[3]
}
func teardownCluster() {
clusterInstance.Teardown()
}
func setupCluster(ctx context.Context, t *testing.T, shardName string, cells []string, numTablets []int) []*cluster.Vttablet {
var tablets []*cluster.Vttablet
clusterInstance = cluster.NewCluster(cells[0], hostname)
keyspace := &cluster.Keyspace{Name: keyspaceName}
// Start topo server
err := clusterInstance.StartTopo()
if err != nil {
t.Fatalf("Error starting topo: %s", err.Error())
}
err = clusterInstance.TopoProcess.ManageTopoDir("mkdir", "/vitess/"+cells[0])
if err != nil {
t.Fatalf("Error managing topo: %s", err.Error())
}
numCell := 1
for numCell < len(cells) {
err = clusterInstance.VtctlProcess.AddCellInfo(cells[numCell])
if err != nil {
t.Fatalf("Error managing topo: %s", err.Error())
}
numCell++
}
// Adding another cell in the same cluster
numCell = 0
for numCell < len(cells) {
i := 0
for i < numTablets[numCell] {
i++
tablet := clusterInstance.NewVttabletInstance("replica", 100*(numCell+1)+i, cells[numCell])
tablets = append(tablets, tablet)
}
numCell++
}
shard := &cluster.Shard{Name: shardName}
shard.Vttablets = tablets
clusterInstance.VtTabletExtraArgs = []string{
"-lock_tables_timeout", "5s",
"-enable_semi_sync",
"-track_schema_versions=true",
}
// Initialize Cluster
err = clusterInstance.SetupCluster(keyspace, []cluster.Shard{*shard})
if err != nil {
t.Fatalf("Cannot launch cluster: %s", err.Error())
}
//Start MySql
var mysqlCtlProcessList []*exec.Cmd
for _, shard := range clusterInstance.Keyspaces[0].Shards {
for _, tablet := range shard.Vttablets {
log.Infof("Starting MySql for tablet %v", tablet.Alias)
proc, err := tablet.MysqlctlProcess.StartProcess()
if err != nil {
t.Fatalf("Error starting start mysql: %s", err.Error())
}
mysqlCtlProcessList = append(mysqlCtlProcessList, proc)
}
}
// Wait for mysql processes to start
for _, proc := range mysqlCtlProcessList {
if err := proc.Wait(); err != nil {
t.Fatalf("Error starting mysql: %s", err.Error())
}
}
// create tablet manager client
tmClient = tmc.NewClient()
setupShard(ctx, t, shardName, tablets)
return tablets
}
func setupShard(ctx context.Context, t *testing.T, shardName string, tablets []*cluster.Vttablet) {
for _, tablet := range tablets {
// create database
err := tablet.VttabletProcess.CreateDB(keyspaceName)
require.NoError(t, err)
// Start the tablet
err = tablet.VttabletProcess.Setup()
require.NoError(t, err)
}
for _, tablet := range tablets {
err := tablet.VttabletProcess.WaitForTabletTypes([]string{"SERVING", "NOT_SERVING"})
require.NoError(t, err)
}
// Force the replica to reparent assuming that all the datasets are identical.
err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardMaster",
"-force", fmt.Sprintf("%s/%s", keyspaceName, shardName), tablets[0].Alias)
require.NoError(t, err)
validateTopology(t, true)
// create Tables
runSQL(ctx, t, sqlSchema, tablets[0])
checkMasterTablet(t, tablets[0])
validateTopology(t, false)
time.Sleep(100 * time.Millisecond) // wait for replication to catchup
strArray := getShardReplicationPositions(t, keyspaceName, shardName, true)
assert.Equal(t, len(tablets), len(strArray))
assert.Contains(t, strArray[0], "master") // master first
}
//endregion
//region database queries
func getMysqlConnParam(tablet *cluster.Vttablet) mysql.ConnParams {
connParams := mysql.ConnParams{
Uname: username,
DbName: dbName,
UnixSocket: path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("/vt_%010d/mysql.sock", tablet.TabletUID)),
}
return connParams
}
func runSQL(ctx context.Context, t *testing.T, sql string, tablet *cluster.Vttablet) *sqltypes.Result {
tabletParams := getMysqlConnParam(tablet)
conn, err := mysql.Connect(ctx, &tabletParams)
require.Nil(t, err)
defer conn.Close()
return execute(t, conn, sql)
}
func execute(t *testing.T, conn *mysql.Conn, query string) *sqltypes.Result {
t.Helper()
qr, err := conn.ExecuteFetch(query, 1000, true)
require.Nil(t, err)
return qr
}
//endregion
// region prs/ers
func prs(t *testing.T, tab *cluster.Vttablet) (string, error) {
return prsWithTimeout(t, tab, false, "", "")
}
func prsAvoid(t *testing.T, tab *cluster.Vttablet) (string, error) {
return prsWithTimeout(t, tab, true, "", "")
}
func prsWithTimeout(t *testing.T, tab *cluster.Vttablet, avoid bool, actionTimeout, waitTimeout string) (string, error) {
args := []string{
"PlannedReparentShard",
"-keyspace_shard", fmt.Sprintf("%s/%s", keyspaceName, shardName)}
if actionTimeout != "" {
args = append(args, "-action_timeout", actionTimeout)
}
if waitTimeout != "" {
args = append(args, "-wait_replicas_timeout", waitTimeout)
}
if avoid {
args = append(args, "-avoid_master")
} else {
args = append(args, "-new_master")
}
args = append(args, tab.Alias)
out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput(args...)
return out, err
}
func ers(t *testing.T, tab *cluster.Vttablet, timeout string) (string, error) {
return ersIgnoreTablet(t, tab, timeout, nil)
}
func ersIgnoreTablet(t *testing.T, tab *cluster.Vttablet, timeout string, tabToIgnore *cluster.Vttablet) (string, error) {
args := []string{"EmergencyReparentShard", "-keyspace_shard", fmt.Sprintf("%s/%s", keyspaceName, shardName)}
if tab != nil {
args = append(args, "-new_master", tab.Alias)
}
if timeout != "" {
args = append(args, "-wait_replicas_timeout", "30s")
}
if tabToIgnore != nil {
args = append(args, "-ignore_replicas", tabToIgnore.Alias)
}
return clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput(args...)
}
func checkReparentFromOutside(t *testing.T, tablet *cluster.Vttablet, downMaster bool, baseTime int64) {
result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetShardReplication", cell1, keyspaceShard)
require.Nil(t, err, "error should be Nil")
if !downMaster {
assertNodeCount(t, result, int(3))
} else {
assertNodeCount(t, result, int(2))
}
// make sure the master status page says it's the master
status := tablet.VttabletProcess.GetStatus()
assert.Contains(t, status, "Tablet Type: MASTER")
// make sure the master health stream says it's the master too
// (health check is disabled on these servers, force it first)
err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", tablet.Alias)
require.NoError(t, err)
streamHealth, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput(
"VtTabletStreamHealth",
"-count", "1", tablet.Alias)
require.NoError(t, err)
var streamHealthResponse querypb.StreamHealthResponse
err = json.Unmarshal([]byte(streamHealth), &streamHealthResponse)
require.NoError(t, err)
assert.Equal(t, streamHealthResponse.Target.TabletType, topodatapb.TabletType_MASTER)
assert.True(t, streamHealthResponse.TabletExternallyReparentedTimestamp >= baseTime)
}
// endregion
// region validations
func validateTopology(t *testing.T, pingTablets bool) {
args := []string{"Validate"}
if pingTablets {
args = append(args, "-ping-tablets=true")
}
out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput(args...)
require.Empty(t, out)
require.NoError(t, err)
}
func confirmReplication(t *testing.T, master *cluster.Vttablet, replicas []*cluster.Vttablet) {
ctx := context.Background()
n := 2 // random value ...
// insert data into the new master, check the connected replica work
insertSQL := fmt.Sprintf(insertSQL, n, n)
runSQL(ctx, t, insertSQL, master)
time.Sleep(100 * time.Millisecond)
for _, tab := range replicas {
err := checkInsertedValues(ctx, t, tab, n)
require.NoError(t, err)
}
}
func confirmOldMasterIsHangingAround(t *testing.T) {
out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("Validate")
require.Error(t, err)
require.Contains(t, out, "already has master")
}
// Waits for tablet B to catch up to the replication position of tablet A.
func waitForReplicationPosition(t *testing.T, tabletA *cluster.Vttablet, tabletB *cluster.Vttablet) error {
posA, _ := cluster.GetMasterPosition(t, *tabletA, hostname)
timeout := time.Now().Add(5 * time.Second)
for time.Now().Before(timeout) {
posB, _ := cluster.GetMasterPosition(t, *tabletB, hostname)
if positionAtLeast(t, tabletB, posA, posB) {
return nil
}
time.Sleep(100 * time.Millisecond)
}
return fmt.Errorf("failed to catch up on replication position")
}
func positionAtLeast(t *testing.T, tablet *cluster.Vttablet, a string, b string) bool {
isAtleast := false
val, err := tablet.MysqlctlProcess.ExecuteCommandWithOutput("position", "at_least", a, b)
require.NoError(t, err)
if strings.Contains(val, "true") {
isAtleast = true
}
return isAtleast
}
func assertNodeCount(t *testing.T, result string, want int) {
resultMap := make(map[string]interface{})
err := json.Unmarshal([]byte(result), &resultMap)
require.NoError(t, err)
nodes := reflect.ValueOf(resultMap["nodes"])
got := nodes.Len()
assert.Equal(t, want, got)
}
func checkDBvar(ctx context.Context, t *testing.T, tablet *cluster.Vttablet, variable string, status string) {
tabletParams := getMysqlConnParam(tablet)
conn, err := mysql.Connect(ctx, &tabletParams)
require.NoError(t, err)
defer conn.Close()
qr := execute(t, conn, fmt.Sprintf("show variables like '%s'", variable))
got := fmt.Sprintf("%v", qr.Rows)
want := fmt.Sprintf("[[VARCHAR(\"%s\") VARCHAR(\"%s\")]]", variable, status)
assert.Equal(t, want, got)
}
func checkDBstatus(ctx context.Context, t *testing.T, tablet *cluster.Vttablet, variable string, status string) {
tabletParams := getMysqlConnParam(tablet)
conn, err := mysql.Connect(ctx, &tabletParams)
require.NoError(t, err)
defer conn.Close()
qr := execute(t, conn, fmt.Sprintf("show status like '%s'", variable))
got := fmt.Sprintf("%v", qr.Rows)
want := fmt.Sprintf("[[VARCHAR(\"%s\") VARCHAR(\"%s\")]]", variable, status)
assert.Equal(t, want, got)
}
func checkReplicaStatus(ctx context.Context, t *testing.T, tablet *cluster.Vttablet) {
qr := runSQL(ctx, t, "show slave status", tablet)
IOThreadRunning := fmt.Sprintf("%v", qr.Rows[0][10]) // Slave_IO_Running
SQLThreadRunning := fmt.Sprintf("%v", qr.Rows[0][10]) // Slave_SQL_Running
assert.Equal(t, IOThreadRunning, "VARCHAR(\"No\")")
assert.Equal(t, SQLThreadRunning, "VARCHAR(\"No\")")
}
// Makes sure the tablet type is master, and its health check agrees.
func checkMasterTablet(t *testing.T, tablet *cluster.Vttablet) {
result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetTablet", tablet.Alias)
require.NoError(t, err)
var tabletInfo topodatapb.Tablet
err = json2.Unmarshal([]byte(result), &tabletInfo)
require.NoError(t, err)
assert.Equal(t, topodatapb.TabletType_MASTER, tabletInfo.GetType())
// make sure the health stream is updated
result, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("VtTabletStreamHealth", "-count", "1", tablet.Alias)
require.NoError(t, err)
var streamHealthResponse querypb.StreamHealthResponse
err = json2.Unmarshal([]byte(result), &streamHealthResponse)
require.NoError(t, err)
assert.True(t, streamHealthResponse.GetServing())
tabletType := streamHealthResponse.GetTarget().GetTabletType()
assert.Equal(t, topodatapb.TabletType_MASTER, tabletType)
}
// isHealthyMasterTablet will return if tablet is master AND healthy.
func isHealthyMasterTablet(t *testing.T, tablet *cluster.Vttablet) bool {
result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetTablet", tablet.Alias)
require.Nil(t, err)
var tabletInfo topodatapb.Tablet
err = json2.Unmarshal([]byte(result), &tabletInfo)
require.Nil(t, err)
if tabletInfo.GetType() != topodatapb.TabletType_MASTER {
return false
}
// make sure the health stream is updated
result, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("VtTabletStreamHealth", "-count", "1", tablet.Alias)
require.Nil(t, err)
var streamHealthResponse querypb.StreamHealthResponse
err = json2.Unmarshal([]byte(result), &streamHealthResponse)
require.Nil(t, err)
assert.True(t, streamHealthResponse.GetServing())
tabletType := streamHealthResponse.GetTarget().GetTabletType()
return tabletType == topodatapb.TabletType_MASTER
}
func checkInsertedValues(ctx context.Context, t *testing.T, tablet *cluster.Vttablet, index int) error {
// wait until it gets the data
timeout := time.Now().Add(5 * time.Second)
i := 0
for time.Now().Before(timeout) {
selectSQL := fmt.Sprintf("select msg from vt_insert_test where id=%d", index)
qr := runSQL(ctx, t, selectSQL, tablet)
if len(qr.Rows) == 1 {
return nil
}
t := time.Duration(300 * i)
time.Sleep(t * time.Millisecond)
i++
}
return fmt.Errorf("data is not yet replicated on tablet %s", tablet.Alias)
}
// endregion
// region tablet operations
func stopTablet(t *testing.T, tab *cluster.Vttablet, stopDatabase bool) {
err := tab.VttabletProcess.TearDown()
require.NoError(t, err)
if stopDatabase {
err = tab.MysqlctlProcess.Stop()
require.NoError(t, err)
}
}
func restartTablet(t *testing.T, tab *cluster.Vttablet) {
tab.MysqlctlProcess.InitMysql = false
err := tab.MysqlctlProcess.Start()
require.NoError(t, err)
err = clusterInstance.VtctlclientProcess.InitTablet(tab, tab.Cell, keyspaceName, hostname, shardName)
require.NoError(t, err)
}
func resurrectTablet(ctx context.Context, t *testing.T, tab *cluster.Vttablet) {
tab.MysqlctlProcess.InitMysql = false
err := tab.MysqlctlProcess.Start()
require.NoError(t, err)
err = clusterInstance.VtctlclientProcess.InitTablet(tab, tab.Cell, keyspaceName, hostname, shardName)
require.NoError(t, err)
// As there is already a master the new replica will come directly in SERVING state
tab1.VttabletProcess.ServingStatus = "SERVING"
// Start the tablet
err = tab.VttabletProcess.Setup()
require.NoError(t, err)
err = checkInsertedValues(ctx, t, tab, 2)
require.NoError(t, err)
}
func deleteTablet(t *testing.T, tab *cluster.Vttablet) {
err := clusterInstance.VtctlclientProcess.ExecuteCommand(
"DeleteTablet",
"-allow_master",
tab.Alias)
require.NoError(t, err)
}
// endregion
// region get info
func getNewMaster(t *testing.T) *cluster.Vttablet {
var newMaster *cluster.Vttablet
for _, tablet := range []*cluster.Vttablet{tab2, tab3, tab4} {
if isHealthyMasterTablet(t, tablet) {
newMaster = tablet
break
}
}
require.NotNil(t, newMaster)
return newMaster
}
func getShardReplicationPositions(t *testing.T, keyspaceName, shardName string, doPrint bool) []string {
output, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput(
"ShardReplicationPositions", fmt.Sprintf("%s/%s", keyspaceName, shardName))
require.NoError(t, err)
strArray := strings.Split(output, "\n")
if strArray[len(strArray)-1] == "" {
strArray = strArray[:len(strArray)-1] // Truncate slice, remove empty line
}
if doPrint {
log.Infof("Positions:")
for _, pos := range strArray {
log.Infof("\t%s", pos)
}
}
return strArray
}
// endregion
| [
"\"VTDATAROOT\""
]
| []
| [
"VTDATAROOT"
]
| [] | ["VTDATAROOT"] | go | 1 | 0 | |
src/ga4gh/core/_internal/identifiers.py | """serializes, digests, and identifies GA4GH objects
In GA4GH schemas with nested objects, serialize, digest, and identify
are entangled.
For example, here is a call path for ga4gh_identify called on an Allele:
ga4gh_identify(allele)
+ ga4gh_digest(allele)
++ ga4gh_serialize(allele)
+++ ga4gh_digest(allele.location)
++++ ga4gh_serialize(allele.location)
+++ ga4gh_serialize(allele.state)
For that reason, they are implemented here in one file.
"""
import logging
import os
import re
from canonicaljson import encode_canonical_json
import pkg_resources
import yaml
from .digests import sha512t24u
from .jsonschema import is_array, is_pjs_instance, is_curie_type, is_identifiable, is_literal
__all__ = "ga4gh_digest ga4gh_identify ga4gh_serialize is_ga4gh_identifier parse_ga4gh_identifier".split()
_logger = logging.getLogger(__name__)
# Assume that ga4gh.yaml and vrs.yaml files are in the same directory for now
schema_dir = os.environ.get("VRS_SCHEMA_DIR", pkg_resources.resource_filename(__name__, "data/schema"))
cfg = yaml.safe_load(open(schema_dir + "/ga4gh.yaml"))
type_prefix_map_default = cfg["identifiers"]["type_prefix_map"]
namespace = cfg["identifiers"]["namespace"]
curie_sep = cfg["identifiers"]["curie_sep"]
ref_sep = cfg["identifiers"]["ref_sep"]
ga4gh_ir_regexp = re.compile(cfg["identifiers"]["regexp"])
ns_w_sep = namespace + curie_sep
def is_ga4gh_identifier(ir):
"""
>>> is_ga4gh_identifier("ga4gh:SQ.0123abcd")
True
>>> is_ga4gh_identifier("refseq:NM_01234.5")
False
>>> is_ga4gh_identifier(None)
False
"""
return str(ir).startswith(ns_w_sep)
def parse_ga4gh_identifier(ir):
"""
Parses a GA4GH identifier, returning a dict with type and digest components
>>> parse_ga4gh_identifier("ga4gh:SQ.0123abcd")
{'type': 'SQ', 'digest': '0123abcd'}
>>> parse_ga4gh_identifier("notga4gh:SQ.0123abcd")
Traceback (most recent call last):
...
ValueError: notga4gh:SQ.0123abcd
"""
try:
return ga4gh_ir_regexp.match(str(ir)).groupdict()
except AttributeError as e:
raise ValueError(ir) from e
def ga4gh_identify(vro, type_prefix_map=None):
"""return the GA4GH digest-based id for the object, as a CURIE
(string). Returns None if object is not identifiable.
>>> import ga4gh.vrs
>>> ival = ga4gh.vrs.models.SimpleInterval(start=44908821, end=44908822)
>>> location = ga4gh.vrs.models.Location(sequence_id="ga4gh:SQ.IIB53T8CNeJJdUqzn9V_JnRtQadwWCbl", interval=ival)
>>> ga4gh_identify(location)
'ga4gh:VSL.u5fspwVbQ79QkX6GHLF8tXPCAXFJqRPx'
"""
if type_prefix_map is None:
type_prefix_map = type_prefix_map_default
try:
pfx = type_prefix_map[vro.type]
except KeyError:
_logger.debug("No identifier prefix is defined for %s; check ga4gh.yaml", vro.type)
return None
digest = ga4gh_digest(vro)
ir = f"{namespace}{curie_sep}{pfx}{ref_sep}{digest}"
return ir
def ga4gh_digest(vro):
"""return the GA4GH digest for the object
>>> import ga4gh.vrs
>>> ival = ga4gh.vrs.models.SimpleInterval(start=44908821, end=44908822)
>>> location = ga4gh.vrs.models.Location(sequence_id="ga4gh:SQ.IIB53T8CNeJJdUqzn9V_JnRtQadwWCbl", interval=ival)
>>> ga4gh_digest(location)
'u5fspwVbQ79QkX6GHLF8tXPCAXFJqRPx'
"""
assert is_identifiable(vro), "ga4gh_digest called with non-identifiable object"
return sha512t24u(ga4gh_serialize(vro))
def ga4gh_serialize(vro):
"""serialize object into a canonical format
Briefly:
* format is json
* keys sorted in unicode order (=ascii order for our use)
* no "insignificant" whitespace, as defined in rfc7159§2
* MUST use two-char escapes when available, as defined in rfc7159§7
* UTF-8 encoded
* nested identifiable objects are replaced by their identifiers
* arrays of identifiers are sorted lexographically
These requirements are a distillation of several proposals which
have not yet been ratified.
>>> import ga4gh.vrs
>>> ival = ga4gh.vrs.models.SimpleInterval(start=44908821, end=44908822)
>>> location = ga4gh.vrs.models.Location(sequence_id="ga4gh:SQ.IIB53T8CNeJJdUqzn9V_JnRtQadwWCbl", interval=ival)
>>> ga4gh_serialize(location)
b'{"interval":{"end":44908822,...,"type":"SequenceLocation"}'
"""
def dictify(vro, enref=True):
"""recursively converts (any) object to dictionary prior to
serialization
enref: if True, replace nested identifiable objects with
digests ("enref" is opposite of "de-ref")
"""
if vro is None: # pragma: no cover
return None
if is_literal(vro):
v = vro._value
if is_curie_type(vro):
if is_ga4gh_identifier(v):
# CURIEs are stripped to just the digest so that digests are independent of type prefixes
v = v.split(ref_sep, 1)[1]
return v
if isinstance(vro, str):
v = vro
if is_ga4gh_identifier(v):
v = v.split(ref_sep, 1)[1]
return v
if is_pjs_instance(vro):
if is_identifiable(vro) and enref:
return ga4gh_digest(vro)
d = {k: dictify(vro[k], enref=True)
for k in vro
if not (k.startswith("_") or vro[k] is None)}
return d
if is_array(vro):
if is_curie_type(vro[0]):
return sorted(dictify(o) for o in vro.data)
return [dictify(o) for o in vro.typed_elems]
raise ValueError(f"Don't know how to serialize {vro}") # pragma: no cover
# The canonicaljson package does everything we want. Use that with
# the hope that it will be upward compatible with a future
# ratified proposal for json canonicalization.
#
# The following alternative does the same thing for our use case.
# It's included here as an outline for anyone implementing in
# another language. (canonicaljson escapes unicode characters, as
# required by VRS, but this doesn't apply to any known uses so
# these are equivalent.)
# >> import json
# >> def cjdump(a):
# >> return json.dumps(a, sort_keys=True, separators=(',',':'),
# indent=None).encode("utf-8")
vro_dict = dictify(vro, enref=False)
return encode_canonical_json(vro_dict)
| []
| []
| [
"VRS_SCHEMA_DIR"
]
| [] | ["VRS_SCHEMA_DIR"] | python | 1 | 0 | |
examples/id/generate/generateAUniqueId/main.go | package main
import (
"fmt"
"os"
"go.m3o.com"
"go.m3o.com/id"
)
func main() {
client := m3o.New(os.Getenv("M3O_API_TOKEN"))
rsp, err := client.Id.Generate(&id.GenerateRequest{
Type: "uuid",
})
fmt.Println(rsp, err)
}
| [
"\"M3O_API_TOKEN\""
]
| []
| [
"M3O_API_TOKEN"
]
| [] | ["M3O_API_TOKEN"] | go | 1 | 0 | |
src/test/java/com/appslandia/common/utils/StringFormatTest.java | // The MIT License (MIT)
// Copyright © 2015 AppsLandia. All rights reserved.
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
package com.appslandia.common.utils;
import org.junit.Assert;
import org.junit.Test;
import com.appslandia.common.base.Params;
/**
*
* @author <a href="mailto:[email protected]">Loc Ha</a>
*
*/
public class StringFormatTest {
@Test
public void test_Map() {
String msg = StringFormat.format("this is ${p1} and ${p2}", new Params().set("p1", "v1"));
Assert.assertEquals(msg, "this is v1 and ${}");
msg = StringFormat.format("this is ${p1} and ${p2}", new Params().set("p1", "v1").set("p2", "v2"));
Assert.assertEquals(msg, "this is v1 and v2");
msg = StringFormat.format("this is ${p1} and ${p2}", new Params().set("p1", "v1").set("p2", null));
Assert.assertEquals(msg, "this is v1 and null");
}
@Test
public void test_Array() {
String msg = StringFormat.format("this is ${0} and ${1}", "v1");
Assert.assertEquals(msg, "this is v1 and ${}");
msg = StringFormat.format("this is ${0} and ${1}", "v1", "v2");
Assert.assertEquals(msg, "this is v1 and v2");
msg = StringFormat.format("this is ${0} and ${1}", "v1", null);
Assert.assertEquals(msg, "this is v1 and null");
}
@Test
public void test_ENV() {
if (System.getenv("TEMP") == null) {
return;
}
String dir = StringFormat.format("Temp Dir=${env.TEMP}");
Assert.assertNotNull(dir);
Assert.assertFalse(dir.contains("${env.TEMP}"));
}
@Test
public void test_missingENV() {
String dir = StringFormat.format("Temp Dir=${env.UPLOAD_TEMP}");
Assert.assertNotNull(dir);
Assert.assertEquals(dir, "Temp Dir=${}");
}
@Test
public void test_fmt() {
String msg = StringFormat.fmt("this is {} and {}", "v1");
Assert.assertEquals(msg, "this is v1 and {}");
msg = StringFormat.fmt("this is {} and {}", "v1", "v2");
Assert.assertEquals(msg, "this is v1 and v2");
msg = StringFormat.fmt("this is {} and {}", "v1", null);
Assert.assertEquals(msg, "this is v1 and null");
}
}
| [
"\"TEMP\""
]
| []
| [
"TEMP"
]
| [] | ["TEMP"] | java | 1 | 0 | |
client/grpc/grpc.go | // Package grpc provides a gRPC client
package grpc
import (
"bytes"
"context"
"crypto/tls"
"fmt"
"os"
"sync"
"time"
"github.com/micro/go-micro/broker"
"github.com/micro/go-micro/client"
"github.com/micro/go-micro/codec"
"github.com/micro/go-micro/errors"
"github.com/micro/go-micro/metadata"
"github.com/micro/go-micro/registry"
"github.com/micro/go-micro/selector"
"github.com/micro/go-micro/transport"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/encoding"
gmetadata "google.golang.org/grpc/metadata"
)
type grpcClient struct {
once sync.Once
opts client.Options
pool *pool
}
func init() {
encoding.RegisterCodec(wrapCodec{jsonCodec{}})
encoding.RegisterCodec(wrapCodec{jsonCodec{}})
encoding.RegisterCodec(wrapCodec{bytesCodec{}})
}
// secure returns the dial option for whether its a secure or insecure connection
func (g *grpcClient) secure() grpc.DialOption {
if g.opts.Context != nil {
if v := g.opts.Context.Value(tlsAuth{}); v != nil {
tls := v.(*tls.Config)
creds := credentials.NewTLS(tls)
return grpc.WithTransportCredentials(creds)
}
}
return grpc.WithInsecure()
}
func (g *grpcClient) next(request client.Request, opts client.CallOptions) (selector.Next, error) {
service := request.Service()
// get proxy
if prx := os.Getenv("MICRO_PROXY"); len(prx) > 0 {
service = prx
}
// get proxy address
if prx := os.Getenv("MICRO_PROXY_ADDRESS"); len(prx) > 0 {
opts.Address = prx
}
// return remote address
if len(opts.Address) > 0 {
return func() (*registry.Node, error) {
return ®istry.Node{
Address: opts.Address,
}, nil
}, nil
}
// get next nodes from the selector
next, err := g.opts.Selector.Select(service, opts.SelectOptions...)
if err != nil && err == selector.ErrNotFound {
return nil, errors.NotFound("go.micro.client", err.Error())
} else if err != nil {
return nil, errors.InternalServerError("go.micro.client", err.Error())
}
return next, nil
}
func (g *grpcClient) call(ctx context.Context, node *registry.Node, req client.Request, rsp interface{}, opts client.CallOptions) error {
address := node.Address
if node.Port > 0 {
address = fmt.Sprintf("%s:%d", address, node.Port)
}
header := make(map[string]string)
if md, ok := metadata.FromContext(ctx); ok {
for k, v := range md {
header[k] = v
}
}
// set timeout in nanoseconds
header["timeout"] = fmt.Sprintf("%d", opts.RequestTimeout)
// set the content type for the request
header["x-content-type"] = req.ContentType()
md := gmetadata.New(header)
ctx = gmetadata.NewOutgoingContext(ctx, md)
cf, err := g.newGRPCCodec(req.ContentType())
if err != nil {
return errors.InternalServerError("go.micro.client", err.Error())
}
maxRecvMsgSize := g.maxRecvMsgSizeValue()
maxSendMsgSize := g.maxSendMsgSizeValue()
var grr error
cc, err := g.pool.getConn(address, grpc.WithDefaultCallOptions(grpc.ForceCodec(cf)),
grpc.WithTimeout(opts.DialTimeout), g.secure(),
grpc.WithDefaultCallOptions(
grpc.MaxCallRecvMsgSize(maxRecvMsgSize),
grpc.MaxCallSendMsgSize(maxSendMsgSize),
))
if err != nil {
return errors.InternalServerError("go.micro.client", fmt.Sprintf("Error sending request: %v", err))
}
defer func() {
// defer execution of release
g.pool.release(address, cc, grr)
}()
ch := make(chan error, 1)
go func() {
err := cc.Invoke(ctx, methodToGRPC(req.Service(), req.Endpoint()), req.Body(), rsp, grpc.ForceCodec(cf))
ch <- microError(err)
}()
select {
case err := <-ch:
grr = err
case <-ctx.Done():
grr = ctx.Err()
}
return grr
}
func (g *grpcClient) stream(ctx context.Context, node *registry.Node, req client.Request, opts client.CallOptions) (client.Stream, error) {
address := node.Address
if node.Port > 0 {
address = fmt.Sprintf("%s:%d", address, node.Port)
}
header := make(map[string]string)
if md, ok := metadata.FromContext(ctx); ok {
for k, v := range md {
header[k] = v
}
}
// set timeout in nanoseconds
header["timeout"] = fmt.Sprintf("%d", opts.RequestTimeout)
// set the content type for the request
header["x-content-type"] = req.ContentType()
md := gmetadata.New(header)
ctx = gmetadata.NewOutgoingContext(ctx, md)
cf, err := g.newGRPCCodec(req.ContentType())
if err != nil {
return nil, errors.InternalServerError("go.micro.client", err.Error())
}
var dialCtx context.Context
var cancel context.CancelFunc
if opts.DialTimeout >= 0 {
dialCtx, cancel = context.WithTimeout(ctx, opts.DialTimeout)
} else {
dialCtx, cancel = context.WithCancel(ctx)
}
defer cancel()
wc := wrapCodec{cf}
cc, err := grpc.DialContext(dialCtx, address, grpc.WithDefaultCallOptions(grpc.ForceCodec(wc)), g.secure())
if err != nil {
return nil, errors.InternalServerError("go.micro.client", fmt.Sprintf("Error sending request: %v", err))
}
desc := &grpc.StreamDesc{
StreamName: req.Service() + req.Endpoint(),
ClientStreams: true,
ServerStreams: true,
}
st, err := cc.NewStream(ctx, desc, methodToGRPC(req.Service(), req.Endpoint()))
if err != nil {
return nil, errors.InternalServerError("go.micro.client", fmt.Sprintf("Error creating stream: %v", err))
}
codec := &grpcCodec{
s: st,
c: wc,
}
// set request codec
if r, ok := req.(*grpcRequest); ok {
r.codec = codec
}
rsp := &response{
conn: cc,
stream: st,
codec: cf,
gcodec: codec,
}
return &grpcStream{
context: ctx,
request: req,
response: rsp,
stream: st,
conn: cc,
}, nil
}
func (g *grpcClient) maxRecvMsgSizeValue() int {
if g.opts.Context == nil {
return DefaultMaxRecvMsgSize
}
v := g.opts.Context.Value(maxRecvMsgSizeKey{})
if v == nil {
return DefaultMaxRecvMsgSize
}
return v.(int)
}
func (g *grpcClient) maxSendMsgSizeValue() int {
if g.opts.Context == nil {
return DefaultMaxSendMsgSize
}
v := g.opts.Context.Value(maxSendMsgSizeKey{})
if v == nil {
return DefaultMaxSendMsgSize
}
return v.(int)
}
func (g *grpcClient) newGRPCCodec(contentType string) (encoding.Codec, error) {
codecs := make(map[string]encoding.Codec)
if g.opts.Context != nil {
if v := g.opts.Context.Value(codecsKey{}); v != nil {
codecs = v.(map[string]encoding.Codec)
}
}
if c, ok := codecs[contentType]; ok {
return wrapCodec{c}, nil
}
if c, ok := defaultGRPCCodecs[contentType]; ok {
return wrapCodec{c}, nil
}
return nil, fmt.Errorf("Unsupported Content-Type: %s", contentType)
}
func (g *grpcClient) newCodec(contentType string) (codec.NewCodec, error) {
if c, ok := g.opts.Codecs[contentType]; ok {
return c, nil
}
if cf, ok := defaultRPCCodecs[contentType]; ok {
return cf, nil
}
return nil, fmt.Errorf("Unsupported Content-Type: %s", contentType)
}
func (g *grpcClient) Init(opts ...client.Option) error {
size := g.opts.PoolSize
ttl := g.opts.PoolTTL
for _, o := range opts {
o(&g.opts)
}
// update pool configuration if the options changed
if size != g.opts.PoolSize || ttl != g.opts.PoolTTL {
g.pool.Lock()
g.pool.size = g.opts.PoolSize
g.pool.ttl = int64(g.opts.PoolTTL.Seconds())
g.pool.Unlock()
}
return nil
}
func (g *grpcClient) Options() client.Options {
return g.opts
}
func (g *grpcClient) NewMessage(topic string, msg interface{}, opts ...client.MessageOption) client.Message {
return newGRPCPublication(topic, msg, g.opts.ContentType, opts...)
}
func (g *grpcClient) NewRequest(service, method string, req interface{}, reqOpts ...client.RequestOption) client.Request {
return newGRPCRequest(service, method, req, g.opts.ContentType, reqOpts...)
}
func (g *grpcClient) Call(ctx context.Context, req client.Request, rsp interface{}, opts ...client.CallOption) error {
// make a copy of call opts
callOpts := g.opts.CallOptions
for _, opt := range opts {
opt(&callOpts)
}
next, err := g.next(req, callOpts)
if err != nil {
return err
}
// check if we already have a deadline
d, ok := ctx.Deadline()
if !ok {
// no deadline so we create a new one
ctx, _ = context.WithTimeout(ctx, callOpts.RequestTimeout)
} else {
// got a deadline so no need to setup context
// but we need to set the timeout we pass along
opt := client.WithRequestTimeout(time.Until(d))
opt(&callOpts)
}
// should we noop right here?
select {
case <-ctx.Done():
return errors.New("go.micro.client", fmt.Sprintf("%v", ctx.Err()), 408)
default:
}
// make copy of call method
gcall := g.call
// wrap the call in reverse
for i := len(callOpts.CallWrappers); i > 0; i-- {
gcall = callOpts.CallWrappers[i-1](gcall)
}
// return errors.New("go.micro.client", "request timeout", 408)
call := func(i int) error {
// call backoff first. Someone may want an initial start delay
t, err := callOpts.Backoff(ctx, req, i)
if err != nil {
return errors.InternalServerError("go.micro.client", err.Error())
}
// only sleep if greater than 0
if t.Seconds() > 0 {
time.Sleep(t)
}
// select next node
node, err := next()
if err != nil && err == selector.ErrNotFound {
return errors.NotFound("go.micro.client", err.Error())
} else if err != nil {
return errors.InternalServerError("go.micro.client", err.Error())
}
// make the call
err = gcall(ctx, node, req, rsp, callOpts)
g.opts.Selector.Mark(req.Service(), node, err)
return err
}
ch := make(chan error, callOpts.Retries+1)
var gerr error
for i := 0; i <= callOpts.Retries; i++ {
go func() {
ch <- call(i)
}()
select {
case <-ctx.Done():
return errors.New("go.micro.client", fmt.Sprintf("%v", ctx.Err()), 408)
case err := <-ch:
// if the call succeeded lets bail early
if err == nil {
return nil
}
retry, rerr := callOpts.Retry(ctx, req, i, err)
if rerr != nil {
return rerr
}
if !retry {
return err
}
gerr = err
}
}
return gerr
}
func (g *grpcClient) Stream(ctx context.Context, req client.Request, opts ...client.CallOption) (client.Stream, error) {
// make a copy of call opts
callOpts := g.opts.CallOptions
for _, opt := range opts {
opt(&callOpts)
}
next, err := g.next(req, callOpts)
if err != nil {
return nil, err
}
// #200 - streams shouldn't have a request timeout set on the context
// should we noop right here?
select {
case <-ctx.Done():
return nil, errors.New("go.micro.client", fmt.Sprintf("%v", ctx.Err()), 408)
default:
}
call := func(i int) (client.Stream, error) {
// call backoff first. Someone may want an initial start delay
t, err := callOpts.Backoff(ctx, req, i)
if err != nil {
return nil, errors.InternalServerError("go.micro.client", err.Error())
}
// only sleep if greater than 0
if t.Seconds() > 0 {
time.Sleep(t)
}
node, err := next()
if err != nil && err == selector.ErrNotFound {
return nil, errors.NotFound("go.micro.client", err.Error())
} else if err != nil {
return nil, errors.InternalServerError("go.micro.client", err.Error())
}
stream, err := g.stream(ctx, node, req, callOpts)
g.opts.Selector.Mark(req.Service(), node, err)
return stream, err
}
type response struct {
stream client.Stream
err error
}
ch := make(chan response, callOpts.Retries+1)
var grr error
for i := 0; i <= callOpts.Retries; i++ {
go func() {
s, err := call(i)
ch <- response{s, err}
}()
select {
case <-ctx.Done():
return nil, errors.New("go.micro.client", fmt.Sprintf("%v", ctx.Err()), 408)
case rsp := <-ch:
// if the call succeeded lets bail early
if rsp.err == nil {
return rsp.stream, nil
}
retry, rerr := callOpts.Retry(ctx, req, i, err)
if rerr != nil {
return nil, rerr
}
if !retry {
return nil, rsp.err
}
grr = rsp.err
}
}
return nil, grr
}
func (g *grpcClient) Publish(ctx context.Context, p client.Message, opts ...client.PublishOption) error {
md, ok := metadata.FromContext(ctx)
if !ok {
md = make(map[string]string)
}
md["Content-Type"] = p.ContentType()
cf, err := g.newCodec(p.ContentType())
if err != nil {
return errors.InternalServerError("go.micro.client", err.Error())
}
b := &buffer{bytes.NewBuffer(nil)}
if err := cf(b).Write(&codec.Message{Type: codec.Publication}, p.Payload()); err != nil {
return errors.InternalServerError("go.micro.client", err.Error())
}
g.once.Do(func() {
g.opts.Broker.Connect()
})
return g.opts.Broker.Publish(p.Topic(), &broker.Message{
Header: md,
Body: b.Bytes(),
})
}
func (g *grpcClient) String() string {
return "grpc"
}
func newClient(opts ...client.Option) client.Client {
options := client.Options{
Codecs: make(map[string]codec.NewCodec),
CallOptions: client.CallOptions{
Backoff: client.DefaultBackoff,
Retry: client.DefaultRetry,
Retries: client.DefaultRetries,
RequestTimeout: client.DefaultRequestTimeout,
DialTimeout: transport.DefaultDialTimeout,
},
PoolSize: client.DefaultPoolSize,
PoolTTL: client.DefaultPoolTTL,
}
for _, o := range opts {
o(&options)
}
if len(options.ContentType) == 0 {
options.ContentType = "application/grpc+proto"
}
if options.Broker == nil {
options.Broker = broker.DefaultBroker
}
if options.Registry == nil {
options.Registry = registry.DefaultRegistry
}
if options.Selector == nil {
options.Selector = selector.NewSelector(
selector.Registry(options.Registry),
)
}
rc := &grpcClient{
once: sync.Once{},
opts: options,
pool: newPool(options.PoolSize, options.PoolTTL),
}
c := client.Client(rc)
// wrap in reverse
for i := len(options.Wrappers); i > 0; i-- {
c = options.Wrappers[i-1](c)
}
return c
}
func NewClient(opts ...client.Option) client.Client {
return newClient(opts...)
}
| [
"\"MICRO_PROXY\"",
"\"MICRO_PROXY_ADDRESS\""
]
| []
| [
"MICRO_PROXY",
"MICRO_PROXY_ADDRESS"
]
| [] | ["MICRO_PROXY", "MICRO_PROXY_ADDRESS"] | go | 2 | 0 | |
setup.py | try:
from pip.req import parse_requirements
except ImportError:
# The req module has been moved to pip._internal in the 10 release.
from pip._internal.req import parse_requirements
import lnt
import os
from sys import platform as _platform
import sys
from setuptools import setup, find_packages, Extension
if sys.version_info < (2, 7):
raise RuntimeError("Python 2.7 or higher required.")
cflags = []
if _platform == "darwin":
os.environ["CC"] = "xcrun --sdk macosx clang"
os.environ["CXX"] = "xcrun --sdk macosx clang"
cflags += ['-stdlib=libc++', '-mmacosx-version-min=10.7']
# setuptools expects to be invoked from within the directory of setup.py, but
# it is nice to allow:
# python path/to/setup.py install
# to work (for scripts, etc.)
os.chdir(os.path.dirname(os.path.abspath(__file__)))
cPerf = Extension('lnt.testing.profile.cPerf',
sources=['lnt/testing/profile/cPerf.cpp'],
extra_compile_args=['-std=c++11'] + cflags)
if "--server" in sys.argv:
sys.argv.remove("--server")
req_file = "requirements.server.txt"
else:
req_file = "requirements.client.txt"
try:
install_reqs = parse_requirements(req_file, session=False)
except TypeError:
# In old PIP the session flag cannot be passed.
install_reqs = parse_requirements(req_file)
reqs = [str(ir.req) for ir in install_reqs]
setup(
name="LNT",
version=lnt.__version__,
author=lnt.__author__,
author_email=lnt.__email__,
url='http://llvm.org',
license = 'Apache-2.0 with LLVM exception',
description="LLVM Nightly Test Infrastructure",
keywords='web testing performance development llvm',
long_description="""\
*LNT*
+++++
About
=====
*LNT* is an infrastructure for performance testing. The software itself
consists of two main parts, a web application for accessing and visualizing
performance data, and command line utilities to allow users to generate and
submit test results to the server.
The package was originally written for use in testing LLVM compiler
technologies, but is designed to be usable for the performance testing of any
software.
Documentation
=============
The official *LNT* documentation is available online at:
http://llvm.org/docs/lnt
Source
======
The *LNT* source is available in the LLVM SVN repository:
http://llvm.org/svn/llvm-project/lnt/trunk
""",
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache-2.0 with LLVM exception',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing',
],
zip_safe=False,
# Additional resource extensions we use.
package_data={'lnt.server.ui': ['static/*.ico',
'static/*.js',
'static/*.css',
'static/*.svg',
'static/bootstrap/css/*.css',
'static/bootstrap/js/*.js',
'static/bootstrap/img/*.png',
'static/flot/*.min.js',
'static/d3/*.min.js',
'static/jquery/**/*.min.js',
'templates/*.html',
'templates/reporting/*.html',
'templates/reporting/*.txt'],
'lnt.server.db': ['migrations/*.py'],
},
packages=find_packages(),
test_suite='tests.test_all',
entry_points={
'console_scripts': [
'lnt = lnt.lnttool:main',
],
},
install_requires=reqs,
ext_modules=[cPerf],
python_requires='>=2.7',
)
| []
| []
| [
"CXX",
"CC"
]
| [] | ["CXX", "CC"] | python | 2 | 0 | |
ckan_cloud_operator/logs.py | from logging import CRITICAL, ERROR, WARNING, INFO, DEBUG, getLevelName
import datetime
from distutils.util import strtobool
import os
from ruamel import yaml
from ruamel.yaml.serializer import Serializer as ruamelSerializer
from ruamel.yaml.emitter import Emitter as ruamelEmitter
import sys
def info(*args, **kwargs):
log(INFO, *args, **kwargs)
def debug(*args, **kwargs):
log(DEBUG, *args, **kwargs)
def debug_verbose(*args, **kwargs):
if strtobool(os.environ.get('CKAN_CLOUD_OPERATOR_DEBUG_VERBOSE', 'n')):
debug(yaml.dump([args, kwargs], default_flow_style=False))
def warning(*args, **kwargs):
log(WARNING, *args, **kwargs)
def error(*args, **kwargs):
log(ERROR, *args, **kwargs)
def critical(*args, **kwargs):
log(CRITICAL, *args, **kwargs)
def log(level, *args, **kwargs):
if level == DEBUG and not strtobool(os.environ.get('CKAN_CLOUD_OPERATOR_DEBUG', 'n')): return
msg = datetime.datetime.now().strftime('%Y-%m-%d %H:%M') + ' ' + getLevelName(level) + ' '
if len(kwargs) > 0:
msg += '(' + ','.join([f'{k}="{v}"' for k, v in kwargs.items()]) + ') '
msg += ' '.join(args)
print(msg)
def important_log(level, *args, **kwargs):
if level == DEBUG and not strtobool(os.environ.get('CKAN_CLOUD_OPERATOR_DEBUG', 'n')): return
header = datetime.datetime.now().strftime('%Y-%m-%d %H:%M') + ' ' + getLevelName(level)
print(f'\n{header}')
if len(kwargs) > 0:
metadata = '(' + ','.join([f'{k}="{v}"' for k, v in kwargs.items()]) + ')'
print(metadata)
print('\n')
if len(args) > 0:
title = args[0]
print(f'== {title}')
if len(args) > 1:
msg = ' '.join(args[1:])
print(msg)
def exit_great_success(quiet=False):
if not quiet:
info('Great Success!')
exit(0)
def exit_catastrophic_failure(exitcode=1, quiet=False):
if not quiet:
critical('Catastrophic Failure!')
exit(exitcode)
def debug_yaml_dump(*args, **kwargs):
if len(args) == 1:
debug(yaml.dump(args[0], Dumper=YamlSafeDumper, default_flow_style=False), **kwargs)
else:
debug(yaml.dump(args, Dumper=YamlSafeDumper, default_flow_style=False), **kwargs)
def print_yaml_dump(data, exit_success=False):
yaml.dump(data, sys.stdout, Dumper=YamlSafeDumper, default_flow_style=False)
if exit_success:
exit_great_success(quiet=True)
def yaml_dump(data, *args, **kwargs):
return yaml.dump(data, *args, Dumper=YamlSafeDumper, default_flow_style=False, **kwargs)
class YamlSafeDumper(yaml.SafeDumper):
def ignore_aliases(self, data):
return True
def represent_undefined(self, data):
return None
| []
| []
| [
"CKAN_CLOUD_OPERATOR_DEBUG_VERBOSE",
"CKAN_CLOUD_OPERATOR_DEBUG"
]
| [] | ["CKAN_CLOUD_OPERATOR_DEBUG_VERBOSE", "CKAN_CLOUD_OPERATOR_DEBUG"] | python | 2 | 0 | |
commands/v2/stop_command.go | package v2
import (
"os"
"code.cloudfoundry.org/cli/cf/cmd"
"code.cloudfoundry.org/cli/commands"
"code.cloudfoundry.org/cli/commands/flags"
)
type StopCommand struct {
RequiredArgs flags.AppName `positional-args:"yes"`
usage interface{} `usage:"CF_NAME stop APP_NAME"`
relatedCommands interface{} `related_commands:"restart, scale, start"`
}
func (_ StopCommand) Setup(config commands.Config, ui commands.UI) error {
return nil
}
func (_ StopCommand) Execute(args []string) error {
cmd.Main(os.Getenv("CF_TRACE"), os.Args)
return nil
}
| [
"\"CF_TRACE\""
]
| []
| [
"CF_TRACE"
]
| [] | ["CF_TRACE"] | go | 1 | 0 | |
rbtools/clients/mercurial.py | """A client for Mercurial."""
from __future__ import unicode_literals
import logging
import os
import re
import uuid
import six
from six.moves.urllib.parse import urlsplit, urlunparse
from rbtools.clients import PatchResult, SCMClient, RepositoryInfo
from rbtools.clients.errors import (CreateCommitError,
InvalidRevisionSpecError,
MergeError,
SCMError,
TooManyRevisionsError)
from rbtools.clients.svn import SVNClient
from rbtools.utils.checks import check_install
from rbtools.utils.console import edit_file
from rbtools.utils.errors import EditorError
from rbtools.utils.filesystem import make_empty_files, make_tempfile
from rbtools.utils.process import execute
class MercurialRefType(object):
"""Types of references in Mercurial."""
#: Revision hashes.
REVISION = 'revision'
#: Branch names.
BRANCH = 'branch'
#: Bookmark names.
BOOKMARK = 'bookmark'
#: Tag names.
TAG = 'tag'
#: Unknown references.
UNKNOWN = 'unknown'
class MercurialClient(SCMClient):
"""A client for Mercurial.
This is a wrapper around the hg executable that fetches repository
information and generates compatible diffs.
"""
name = 'Mercurial'
server_tool_names = 'Mercurial,Subversion'
supports_commit_history = True
supports_diff_exclude_patterns = True
supports_parent_diffs = True
can_bookmark = True
can_branch = True
can_merge = True
PRE_CREATION = '/dev/null'
PRE_CREATION_DATE = 'Thu Jan 01 00:00:00 1970 +0000'
NO_PARENT = '0' * 40
# The ASCII field seperator.
_FIELD_SEP = '\x1f'
# The ASCII field separator as an escape sequence.
#
# This is passed to Mercurial, where it is interpreted and transformed into
# the actual character.
_FIELD_SEP_ESC = r'\x1f'
# The ASCII record separator.
_RECORD_SEP = '\x1e'
# The ASCII record separator as an escape sequence.
#
# This is passed to Mercurial, where it is interpreted and transformed into
# the actual character.
_RECORD_SEP_ESC = r'\x1e'
def __init__(self, executable='hg', **kwargs):
"""Initialize the client.
Args:
**kwargs (dict):
Keyword arguments to pass through to the superclass.
"""
super(MercurialClient, self).__init__(**kwargs)
self.hgrc = {}
self._exe = executable
self._type = 'hg'
self._remote_path = ()
self._initted = False
self._hg_env = {
'HGPLAIN': '1',
}
self._hgext_path = os.path.normpath(os.path.join(
os.path.dirname(__file__),
'..', 'helpers', 'hgext.py'))
# `self._remote_path_candidates` is an ordered set of hgrc
# paths that are checked if `tracking` option is not given
# explicitly. The first candidate found to exist will be used,
# falling back to `default` (the last member.)
self._remote_path_candidates = ['reviewboard', 'origin', 'parent',
'default']
@property
def hidden_changesets_supported(self):
"""Return whether the repository supports hidden changesets.
Mercurial 1.9 and above support hidden changesets. These are changesets
that have been hidden from regular repository view. They still exist
and are accessible, but only if the --hidden command argument is
specified.
Since we may encounter hidden changesets (e.g. the user specifies
hidden changesets as part of the revision spec), we need to be aware
of hidden changesets.
"""
if not hasattr(self, '_hidden_changesets_supported'):
# The choice of command is arbitrary. parents for the initial
# revision should be fast.
result = execute([self._exe, 'parents', '--hidden', '-r', '0'],
ignore_errors=True,
with_errors=False,
none_on_ignored_error=True)
self._hidden_changesets_supported = result is not None
return self._hidden_changesets_supported
@property
def hg_root(self):
"""Return the root of the working directory.
This will return the root directory of the current repository. If the
current working directory is not inside a mercurial repository, this
returns None.
"""
if not hasattr(self, '_hg_root'):
self._load_hgrc()
key = 'bundle.mainreporoot'
if key in self.hgrc:
self._hg_root = self.hgrc[key]
else:
self._hg_root = None
return self._hg_root
def _init(self):
"""Initialize the client."""
if self._initted or not self.hg_root:
return
if 'extensions.hgsubversion' in self.hgrc:
svn_info = execute([self._exe, 'svn', 'info'], ignore_errors=True)
else:
svn_info = None
if (svn_info and not svn_info.startswith('abort:') and
not svn_info.startswith('hg: unknown command') and
not svn_info.lower().startswith('not a child of')):
self._type = 'svn'
self._svn_info = svn_info
else:
self._type = 'hg'
for candidate in self._remote_path_candidates:
rc_key = 'paths.%s' % candidate
if rc_key in self.hgrc:
self._remote_path = (candidate, self.hgrc[rc_key])
logging.debug('Using candidate path %r: %r',
self._remote_path[0], self._remote_path[1])
break
self._initted = True
def get_commit_history(self, revisions):
"""Return the commit history specified by the revisions.
Args:
revisions (dict):
A dictionary of revisions to generate history for, as returned
by :py:meth:`parse_revision_spec`.
Returns:
list of dict:
This list of history entries, in order.
Raises:
rbtools.clients.errors.SCMError:
The history is non-linear or there is a commit with no parents.
"""
log_fields = {
'commit_id': '{node}',
'parent_id': '{p1node}',
'author_name': '{author|person}',
'author_email': '{author|email}',
'author_date': '{date|rfc3339date}',
'parent2': '{p2node}',
'commit_message': '{desc}',
}
log_format = self._FIELD_SEP_ESC.join(six.itervalues(log_fields))
log_entries = execute(
[
self._exe,
'log',
'--template',
'%s%s' % (log_format, self._RECORD_SEP_ESC),
'-r',
'%(base)s::%(tip)s and not %(base)s' % revisions,
],
ignore_errors=True,
none_on_ignored_error=True,
results_unicode=True)
if not log_entries:
return None
history = []
field_names = six.viewkeys(log_fields)
# The ASCII record separator will be appended to every record, so if we
# attempt to split the entire output by the record separator, we will
# end up with an empty ``log_entry`` at the end, which will cause
# errors.
for log_entry in log_entries[:-1].split(self._RECORD_SEP):
fields = log_entry.split(self._FIELD_SEP)
entry = dict(zip(field_names, fields))
# We do not want `parent2` to be included in the entry because
# the entry's items are used as the keyword arguments to the
# method that uploads a commit and it would be unexpected.
if entry.pop('parent2') != self.NO_PARENT:
raise SCMError(
'The Mercurial SCMClient only supports posting commit '
'histories that are entirely linear.'
)
elif entry['parent_id'] == self.NO_PARENT:
raise SCMError(
'The Mercurial SCMClient only supports posting commits '
'that have exactly one parent.'
)
history.append(entry)
return history
def get_local_path(self):
"""Return the local path to the working tree.
Returns:
unicode:
The filesystem path of the repository on the client system.
"""
if not check_install([self._exe, '--help']):
logging.debug('Unable to execute "hg --help": skipping Mercurial')
return None
return self.hg_root
def get_repository_info(self):
"""Return repository information for the current working tree.
Returns:
rbtools.clients.RepositoryInfo:
The repository info structure.
"""
if not check_install([self._exe, '--help']):
logging.debug('Unable to execute "hg --help": skipping Mercurial')
return None
self._init()
if not self.hg_root:
# hg aborted => no mercurial repository here.
return None
if self._type == 'svn':
return self._calculate_hgsubversion_repository_info(self._svn_info)
else:
path = self.hg_root
base_path = '/'
if self._remote_path:
path = self._remote_path[1]
base_path = ''
return RepositoryInfo(path=path,
base_path=base_path,
local_path=self.hg_root)
def parse_revision_spec(self, revisions=[]):
"""Parse the given revision spec.
Args:
revisions (list of unicode, optional):
A list of revisions as specified by the user. Items in the list
do not necessarily represent a single revision, since the user
can use SCM-native syntaxes such as ``r1..r2`` or ``r1:r2``.
SCMTool-specific overrides of this method are expected to deal
with such syntaxes.
Raises:
rbtools.clients.errors.InvalidRevisionSpecError:
The given revisions could not be parsed.
rbtools.clients.errors.TooManyRevisionsError:
The specified revisions list contained too many revisions.
Returns:
dict:
A dictionary with the following keys:
``base`` (:py:class:`unicode`):
A revision to use as the base of the resulting diff.
``tip`` (:py:class:`unicode`):
A revision to use as the tip of the resulting diff.
``parent_base`` (:py:class:`unicode`, optional):
The revision to use as the base of a parent diff.
``commit_id`` (:py:class:`unicode`, optional):
The ID of the single commit being posted, if not using a range.
These will be used to generate the diffs to upload to Review Board (or
print). The diff for review will include the changes in (base, tip],
and the parent diff (if necessary) will include (parent, base].
If zero revisions are passed in, this will return the outgoing changes
from the parent of the working directory.
If a single revision is passed in, this will return the parent of that
revision for "base" and the passed-in revision for "tip". This will
result in generating a diff for the changeset specified.
If two revisions are passed in, they will be used for the "base"
and "tip" revisions, respectively.
In all cases, a parent base will be calculated automatically from
changesets not present on the remote.
"""
self._init()
n_revisions = len(revisions)
if n_revisions == 1:
# If there's a single revision, try splitting it based on hg's
# revision range syntax (either :: or ..). If this splits, then
# it's handled as two revisions below.
revisions = re.split(r'\.\.|::', revisions[0])
n_revisions = len(revisions)
result = {}
if n_revisions == 0:
# No revisions: Find the outgoing changes. Only consider the
# working copy revision and ancestors because that makes sense.
# If a user wishes to include other changesets, they can run
# `hg up` or specify explicit revisions as command arguments.
if self._type == 'svn':
result['base'] = self._get_parent_for_hgsubversion()
result['tip'] = '.'
else:
# Ideally, generating a diff for outgoing changes would be as
# simple as just running `hg outgoing --patch <remote>`, but
# there are a couple problems with this. For one, the
# server-side diff parser isn't equipped to filter out diff
# headers such as "comparing with..." and
# "changeset: <rev>:<hash>". Another problem is that the output
# of `hg outgoing` potentially includes changesets across
# multiple branches.
#
# In order to provide the most accurate comparison between
# one's local clone and a given remote (something akin to git's
# diff command syntax `git diff <treeish>..<treeish>`), we have
# to do the following:
#
# - Get the name of the current branch
# - Get a list of outgoing changesets, specifying a custom
# format
# - Filter outgoing changesets by the current branch name
# - Get the "top" and "bottom" outgoing changesets
#
# These changesets are then used as arguments to
# `hg diff -r <rev> -r <rev>`.
#
# Future modifications may need to be made to account for odd
# cases like having multiple diverged branches which share
# partial history--or we can just punish developers for doing
# such nonsense :)
outgoing = \
self._get_bottom_and_top_outgoing_revs_for_remote(rev='.')
if outgoing[0] is None or outgoing[1] is None:
raise InvalidRevisionSpecError(
'There are no outgoing changes')
result['base'] = self._identify_revision(outgoing[0])
result['tip'] = self._identify_revision(outgoing[1])
result['commit_id'] = result['tip']
# Since the user asked us to operate on tip, warn them about a
# dirty working directory.
if (self.has_pending_changes() and
not self.config.get('SUPPRESS_CLIENT_WARNINGS', False)):
logging.warning('Your working directory is not clean. Any '
'changes which have not been committed '
'to a branch will not be included in your '
'review request.')
if self.options.parent_branch:
result['parent_base'] = result['base']
result['base'] = self._identify_revision(
self.options.parent_branch)
elif n_revisions == 1:
# One revision: Use the given revision for tip, and find its parent
# for base.
result['tip'] = self._identify_revision(revisions[0])
result['commit_id'] = result['tip']
result['base'] = self._execute(
[self._exe, 'parents', '--hidden', '-r', result['tip'],
'--template', '{node|short}']).split()[0]
if len(result['base']) != 12:
raise InvalidRevisionSpecError(
"Can't determine parent revision"
)
elif n_revisions == 2:
# Two revisions: Just use the given revisions
result['base'] = self._identify_revision(revisions[0])
result['tip'] = self._identify_revision(revisions[1])
else:
raise TooManyRevisionsError
if 'base' not in result or 'tip' not in result:
raise InvalidRevisionSpecError(
'"%s" does not appear to be a valid revision spec' % revisions)
if self._type == 'hg' and 'parent_base' not in result:
# If there are missing changesets between base and the remote, we
# need to generate a parent diff.
outgoing = self._get_outgoing_changesets(self._get_remote_branch(),
rev=result['base'])
logging.debug('%d outgoing changesets between remote and base.',
len(outgoing))
if not outgoing:
return result
parent_base = self._execute(
[self._exe, 'parents', '--hidden', '-r', outgoing[0][1],
'--template', '{node|short}']).split()
if len(parent_base) == 0:
raise Exception(
'Could not find parent base revision. Ensure upstream '
'repository is not empty.')
result['parent_base'] = parent_base[0]
logging.debug('Identified %s as parent base',
result['parent_base'])
return result
def _identify_revision(self, revision):
"""Identify the given revision.
Args:
revision (unicode):
The revision.
Raises:
rbtools.clients.errors.InvalidRevisionSpecError:
The specified revision could not be identified.
Returns:
unicode:
The global revision ID of the commit.
"""
identify = self._execute(
[self._exe, 'identify', '-i', '--hidden', '-r', str(revision)],
ignore_errors=True, none_on_ignored_error=True)
if identify is None:
raise InvalidRevisionSpecError(
'"%s" does not appear to be a valid revision' % revision)
else:
return identify.split()[0]
def _calculate_hgsubversion_repository_info(self, svn_info):
"""Return repository info for an hgsubversion checkout.
Args:
svn_info (unicode):
The SVN info output.
Returns:
rbtools.clients.RepositoryInfo:
The repository info structure, if available.
"""
def _info(r):
m = re.search(r, svn_info, re.M)
if m:
return urlsplit(m.group(1))
else:
return None
self._type = 'svn'
root = _info(r'^Repository Root: (.+)$')
url = _info(r'^URL: (.+)$')
if not (root and url):
return None
scheme, netloc, path, _, _ = root
root = urlunparse([scheme, root.netloc.split('@')[-1], path,
'', '', ''])
base_path = url.path[len(path):]
return RepositoryInfo(path=root,
base_path=base_path,
local_path=self.hg_root)
def _load_hgrc(self):
"""Load the hgrc file."""
for line in execute([self._exe, 'showconfig'],
env=self._hg_env, split_lines=True):
line = line.split('=', 1)
if len(line) == 2:
key, value = line
else:
key = line[0]
value = ''
self.hgrc[key] = value.strip()
def get_hg_ref_type(self, ref):
"""Return the type of a reference in Mercurial.
This can be used to determine if something is a bookmark, branch,
tag, or revision.
Args:
ref (unicode):
The reference to return the type for.
Returns:
unicode:
The reference type. This will be a value in
:py:class:`MercurialRefType`.
"""
# Check for any bookmarks matching ref.
rc, output = self._execute([self._exe, 'log', '-ql1', '-r',
'bookmark(%s)' % ref],
ignore_errors=True,
return_error_code=True)
if rc == 0:
return MercurialRefType.BOOKMARK
# Check for any bookmarks matching ref.
#
# Ideally, we'd use the same sort of log call we'd use for bookmarks
# and tags, but it works differently for branches, and will
# incorrectly match tags.
branches = self._execute([self._exe, 'branches', '-q']).split()
if ref in branches:
return MercurialRefType.BRANCH
# Check for any tags matching ref.
rc, output = self._execute([self._exe, 'log', '-ql1', '-r',
'tag(%s)' % ref],
ignore_errors=True,
return_error_code=True)
if rc == 0:
return MercurialRefType.TAG
# Now just check that it exists at all. We'll assume it's a revision.
rc, output = self._execute([self._exe, 'identify', '-r', ref],
ignore_errors=True,
return_error_code=True)
if rc == 0:
return MercurialRefType.REVISION
return MercurialRefType.UNKNOWN
def get_raw_commit_message(self, revisions):
"""Return the raw commit message.
This extracts all descriptions in the given revision range and
concatenates them, most recent ones going first.
Args:
revisions (dict):
A dictionary containing ``base`` and ``tip`` keys.
Returns:
unicode:
The commit messages of all commits between (base, tip].
"""
rev1 = revisions['base']
rev2 = revisions['tip']
delim = str(uuid.uuid1())
descs = self._execute(
[self._exe, 'log', '--hidden', '-r', '%s::%s' % (rev1, rev2),
'--template', '{desc}%s' % delim],
env=self._hg_env)
# This initial element in the base changeset, which we don't
# care about. The last element is always empty due to the string
# ending with <delim>.
descs = descs.split(delim)[1:-1]
return '\n\n'.join(desc.strip() for desc in descs)
def diff(self, revisions, include_files=[], exclude_patterns=[],
extra_args=[], with_parent_diff=True, **kwargs):
"""Perform a diff using the given revisions.
This will generate a Git-style diff and parent diff (if needed) for
the provided revisions. The diff will contain additional metadata
headers used by Review Board to locate the appropriate revisions from
the repository.
Args:
revisions (dict):
A dictionary of revisions, as returned by
:py:meth:`parse_revision_spec`.
include_files (list of unicode, optional):
A list of files to whitelist during the diff generation.
exclude_patterns (list of unicode, optional):
A list of shell-style glob patterns to blacklist during diff
generation.
extra_args (list, unused):
Additional arguments to be passed to the diff generation.
Unused for mercurial.
with_parent_diff (bool, optional):
Whether or not to include the parent diff in the result.
**kwargs (dict, unused):
Unused keyword arguments.
Returns:
dict:
A dictionary containing the following keys:
``diff`` (:py:class:`bytes`):
The contents of the diff to upload.
``parent_diff`` (:py:class:`bytes`, optional):
The contents of the parent diff, if available.
``commit_id`` (:py:class:`unicode`, optional):
The commit ID to include when posting, if available.
``base_commit_id` (:py:class:`unicode`, optional):
The ID of the commit that the change is based on, if available.
This is necessary for some hosting services that don't provide
individual file access.
"""
self._init()
diff_args = ['--hidden', '--nodates', '-g']
if self._type == 'svn':
diff_args.append('--svn')
diff_args += include_files
for pattern in exclude_patterns:
diff_args += ['-X', pattern]
node_base_id = revisions['base']
diff = self._run_diff(diff_args,
parent_id=node_base_id,
node_id=revisions['tip'])
if with_parent_diff and 'parent_base' in revisions:
base_commit_id = revisions['parent_base']
parent_diff = self._run_diff(diff_args,
parent_id=base_commit_id,
node_id=node_base_id)
else:
base_commit_id = node_base_id
parent_diff = None
# If reviewboard requests a relative revision via hgweb it will fail
# since hgweb does not support the relative revision syntax (^1, -1).
# Rewrite this relative node id to an absolute node id.
match = re.match(r'^[a-z|A-Z|0-9]*$', base_commit_id)
if not match:
base_commit_id = self._execute(
[self._exe, 'log', '-r', base_commit_id,
'--template', '{node}'],
env=self._hg_env, results_unicode=False)
return {
'diff': diff,
'parent_diff': parent_diff,
'commit_id': revisions.get('commit_id'),
'base_commit_id': base_commit_id,
}
def _run_diff(self, diff_args, parent_id, node_id):
"""Run a diff command and normalize its results.
This will run :command:`hg diff` with the provided arguments for the
provided revision range, performing some normalization on the diff to
prepare it for use in Review Board.
Args:
diff_args (list of unicode):
The arguments to pass to :command:`hg diff` (except for any
revision ranges).
parent_id (unicode):
The ID of the parent commit for the range.
node_id (unicode):
The ID of the latest commit for the range.
Returns:
bytes:
The normalized diff content.
"""
diff = self._execute(
[self._exe, 'diff'] + diff_args + ['-r', parent_id, '-r', node_id],
env=self._hg_env,
log_output_on_error=False,
results_unicode=False)
return self._normalize_diff(diff,
node_id=node_id,
parent_id=parent_id)
def _normalize_diff(self, diff, node_id, parent_id):
"""Normalize a diff, adding any headers that may be needed.
For Git-style diffs, this will ensure the diff starts with information
required for Review Board to identify the commit and its parent. These
are based on headers normally generated by :command:`hg export`.
Args:
diff (bytes):
The generated diff content to prepend to.
node_id (unicode):
The revision of this change.
parent_id (unicode):
The revision of the parent change.
Returns:
bytes:
The normalized diff content.
"""
assert isinstance(diff, bytes)
if diff.lstrip().startswith(b'diff --git'):
diff = (
b'# HG changeset patch\n'
b'# Node ID %(node_id)s\n'
b'# Parent %(parent_id)s\n'
b'%(diff)s'
% {
b'node_id': node_id.encode('utf-8'),
b'parent_id': parent_id.encode('utf-8'),
b'diff': diff,
}
)
return diff
def _get_files_in_changeset(self, rev):
"""Return a set of all files in the specified changeset.
Args:
rev (unicode):
A changeset identifier.
Returns:
set:
A set of filenames in the changeset.
"""
cmd = [self._exe, 'locate', '-r', rev]
files = execute(cmd, env=self._hg_env, ignore_errors=True,
none_on_ignored_error=True)
if files:
files = files.replace('\\', '/') # workaround for issue 3894
return set(files.splitlines())
return set()
def _get_parent_for_hgsubversion(self):
"""Return the parent Subversion branch.
Returns the parent branch defined in the command options if it exists,
otherwise returns the parent Subversion branch of the current
repository.
Returns:
unicode:
The branch branch for the hgsubversion checkout.
"""
return (getattr(self.options, 'tracking', None) or
execute([self._exe, 'parent', '--svn', '--template',
'{node}\n']).strip())
def _get_remote_branch(self):
"""Return the remote branch assoicated with this repository.
If the remote branch is not defined, the parent branch of the
repository is returned.
Returns:
unicode:
The name of the tracking branch.
"""
remote = getattr(self.options, 'tracking', None)
if not remote:
try:
remote = self._remote_path[0]
except IndexError:
remote = None
if not remote:
raise SCMError('Could not determine remote branch to use for '
'diff creation. Specify --tracking-branch to '
'continue.')
return remote
def create_commit(self, message, author, run_editor,
files=[], all_files=False):
"""Commit the given modified files.
This is expected to be called after applying a patch. This commits the
patch using information from the review request, opening the commit
message in $EDITOR to allow the user to update it.
Args:
message (unicode):
The commit message to use.
author (object):
The author of the commit. This is expected to have ``fullname``
and ``email`` attributes.
run_editor (bool):
Whether to run the user's editor on the commmit message before
committing.
files (list of unicode, optional):
The list of filenames to commit.
all_files (bool, optional):
Whether to commit all changed files, ignoring the ``files``
argument.
Raises:
rbtools.clients.errors.CreateCommitError:
The commit message could not be created. It may have been
aborted by the user.
"""
if run_editor:
filename = make_tempfile(message.encode('utf-8'),
prefix='hg-editor-',
suffix='.txt')
try:
modified_message = edit_file(filename)
except EditorError as e:
raise CreateCommitError(six.text_type(e))
finally:
try:
os.unlink(filename)
except OSError:
pass
else:
modified_message = message
if not modified_message.strip():
raise CreateCommitError(
"A commit message wasn't provided. The patched files are in "
"your tree but haven't been committed.")
hg_command = [self._exe, 'commit', '-m', modified_message]
try:
hg_command += ['-u', '%s <%s>' % (author.fullname, author.email)]
except AttributeError:
# Users who have marked their profile as private won't include the
# fullname or email fields in the API payload. Just commit as the
# user running RBTools.
logging.warning('The author has marked their Review Board profile '
'information as private. Committing without '
'author attribution.')
if all_files:
hg_command.append('-A')
else:
hg_command += files
try:
self._execute(hg_command)
except Exception as e:
raise CreateCommitError(six.text_type(e))
def merge(self, target, destination, message, author, squash=False,
run_editor=False, close_branch=False, **kwargs):
"""Merge the target branch with destination branch.
Args:
target (unicode):
The name of the branch to merge.
destination (unicode):
The name of the branch to merge into.
message (unicode):
The commit message to use.
author (object):
The author of the commit. This is expected to have ``fullname``
and ``email`` attributes.
squash (bool, optional):
Whether to squash the commits or do a plain merge. This is not
used for Mercurial.
run_editor (bool, optional):
Whether to run the user's editor on the commmit message before
committing.
close_branch (bool, optional):
Whether to delete the branch after merging.
**kwargs (dict, unused):
Additional keyword arguments passed, for future expansion.
Raises:
rbtools.clients.errors.MergeError:
An error occurred while merging the branch.
"""
ref_type = self.get_hg_ref_type(target)
if ref_type == MercurialRefType.UNKNOWN:
raise MergeError('Could not find a valid branch, tag, bookmark, '
'or revision called "%s".'
% target)
if close_branch and ref_type == MercurialRefType.BRANCH:
try:
self._execute([self._exe, 'update', target])
except Exception as e:
raise MergeError('Could not switch to branch "%s".\n\n%s'
% (target, e))
try:
self._execute([self._exe, 'commit', '-m', message,
'--close-branch'])
except Exception as e:
raise MergeError('Could not close branch "%s".\n\n%s'
% (target, e))
try:
self._execute([self._exe, 'update', destination])
except Exception as e:
raise MergeError('Could not switch to branch "%s".\n\n%s'
% (destination, e))
try:
self._execute([self._exe, 'merge', target])
except Exception as e:
raise MergeError('Could not merge %s "%s" into "%s".\n\n%s'
% (ref_type, target, destination, e))
self.create_commit(message=message,
author=author,
run_editor=run_editor)
if close_branch and ref_type == MercurialRefType.BOOKMARK:
try:
self._execute([self._exe, 'bookmark', '-d', target])
except Exception as e:
raise MergeError('Could not delete bookmark "%s".\n\n%s'
% (target, e))
def _get_current_branch(self):
"""Return the current branch of this repository.
Returns:
unicode:
The name of the currently checked-out branch.
"""
return execute([self._exe, 'branch'], env=self._hg_env).strip()
def _get_bottom_and_top_outgoing_revs_for_remote(self, rev=None):
"""Return the bottom and top outgoing revisions.
Args:
rev (unicode, optional):
An optional revision to limit the results. If specified, only
outgoing changesets which are ancestors of this revision will
be included.
Returns:
tuple:
A 2-tuple containing the bottom and top outgoing revisions for the
changesets between the current branch and the remote branch.
"""
remote = self._get_remote_branch()
current_branch = self._get_current_branch()
outgoing = [o for o in self._get_outgoing_changesets(remote, rev=rev)
if current_branch == o[2]]
if outgoing:
top_rev, bottom_rev = \
self._get_top_and_bottom_outgoing_revs(outgoing)
else:
top_rev = None
bottom_rev = None
return bottom_rev, top_rev
def _get_outgoing_changesets(self, remote, rev=None):
"""Return the outgoing changesets between us and a remote.
Args:
remote (unicode):
The name of the remote.
rev (unicode, optional):
An optional revision to limit the results. If specified, only
outgoing changesets which are ancestors of this revision will
be included.
Returns:
list:
A list of tuples, each containing ``(rev, node, branch)``, for each
outgoing changeset. The list will be sorted in revision order.
"""
outgoing_changesets = []
args = [self._exe, '-q', 'outgoing', '--template',
'{rev}\\t{node|short}\\t{branch}\\n',
remote]
if rev:
args.extend(['-r', rev])
# We must handle the special case where there are no outgoing commits
# as mercurial has a non-zero return value in this case.
raw_outgoing = execute(args,
env=self._hg_env,
extra_ignore_errors=(1,))
for line in raw_outgoing.splitlines():
if not line:
continue
# Ignore warning messages that hg might put in, such as
# "warning: certificate for foo can't be verified (Python too old)"
if line.startswith('warning: '):
continue
rev, node, branch = [f.strip() for f in line.split('\t')]
branch = branch or 'default'
if not rev.isdigit():
raise Exception('Unexpected output from hg: %s' % line)
logging.debug('Found outgoing changeset %s:%s', rev, node)
outgoing_changesets.append((int(rev), node, branch))
return outgoing_changesets
def _get_top_and_bottom_outgoing_revs(self, outgoing_changesets):
"""Return top and bottom outgoing revisions for the given changesets.
Args:
outgoing_changesets (list):
A list of outgoing changesets.
Returns:
tuple:
A 2-tuple containing the top and bottom revisions for the given
outgoing changesets.
"""
revs = set(t[0] for t in outgoing_changesets)
top_rev = max(revs)
bottom_rev = min(revs)
for rev, node, branch in reversed(outgoing_changesets):
parents = execute(
[self._exe, 'log', '-r', str(rev), '--template', '{parents}'],
env=self._hg_env)
parents = re.split(':[^\s]+\s*', parents)
parents = [int(p) for p in parents if p != '']
parents = [p for p in parents if p not in outgoing_changesets]
if len(parents) > 0:
bottom_rev = parents[0]
break
else:
bottom_rev = rev - 1
bottom_rev = max(0, bottom_rev)
return top_rev, bottom_rev
def scan_for_server(self, repository_info):
"""Find the Review Board server matching this repository.
Args:
repository_info (rbtools.clients.RepositoryInfo):
The repository information structure.
Returns:
unicode:
The Review Board server URL, if available.
"""
server_url = self.hgrc.get('reviewboard.url', '').strip()
if server_url:
return server_url
elif self._type == 'svn':
# Try using the reviewboard:url property on the SVN repo, if it
# exists.
return SVNClient().scan_for_server_property(repository_info)
return None
def _execute(self, cmd, *args, **kwargs):
"""Execute an hg command.
Args:
cmd (list of unicode):
A command line to execute.
*args (list):
Addditional arguments to pass to
:py:func:`rbtools.utils.process.execute`.
**kwargs (dict):
Addditional keyword arguments to pass to
:py:func:`rbtools.utils.process.execute`.
Returns:
tuple:
The result of the execute call.
"""
# Don't modify the original arguments passed in. This interferes
# with testing and could mess up callers.
cmd = list(cmd)
if not self.hidden_changesets_supported and '--hidden' in cmd:
cmd = [p for p in cmd if p != '--hidden']
# Add our extension which normalizes settings. This is the easiest
# way to normalize settings since it doesn't require us to chase
# a tail of diff-related config options.
cmd += [
'--config',
'extensions.rbtoolsnormalize=%s' % self._hgext_path
]
return execute(cmd, *args, **kwargs)
def has_pending_changes(self):
"""Check if there are changes waiting to be committed.
Returns:
bool:
``True`` if the working directory has been modified, otherwise
returns ``False``.
"""
status = execute([self._exe, 'status', '--modified', '--added',
'--removed', '--deleted'])
return status != ''
def apply_patch(self, patch_file, base_path=None, base_dir=None, p=None,
revert=False):
"""Apply the given patch.
This will take the given patch file and apply it to the working
directory.
Args:
patch_file (unicode):
The name of the patch file to apply.
base_path (unicode, unused):
The base path that the diff was generated in. All hg diffs are
absolute to the repository root, so this is unused.
base_dir (unicode, unused):
The path of the current working directory relative to the root
of the repository. All hg diffs are absolute to the repository
root, so this is unused.
p (unicode, optional):
The prefix level of the diff.
revert (bool, optional):
Whether the patch should be reverted rather than applied.
Returns:
rbtools.clients.PatchResult:
The result of the patch operation.
"""
cmd = [self._exe, 'patch', '--no-commit']
if p:
cmd += ['-p', p]
cmd.append(patch_file)
rc, data = self._execute(cmd, with_errors=True, return_error_code=True,
results_unicode=False)
return PatchResult(applied=(rc == 0), patch_output=data)
def apply_patch_for_empty_files(self, patch, p_num, revert=False):
"""Return whether any empty files in the patch are applied.
Args:
patch (bytes):
The contents of the patch.
p_num (unicode):
The prefix level of the diff.
revert (bool, optional):
Whether the patch should be reverted rather than applied.
Returns:
``True`` if there are empty files in the patch. ``False`` if there
were no empty files, or if an error occurred while applying the
patch.
"""
patched_empty_files = False
added_files = re.findall(r'--- %s\t%s\n'
r'\+\+\+ b/(\S+)\t[^\r\n\t\f]+\n'
r'(?:[^@]|$)'
% (self.PRE_CREATION,
re.escape(self.PRE_CREATION_DATE)), patch)
deleted_files = re.findall(r'--- a/(\S+)\t[^\r\n\t\f]+\n'
r'\+\+\+ %s\t%s\n'
r'(?:[^@]|$)'
% (self.PRE_CREATION,
re.escape(self.PRE_CREATION_DATE)),
patch)
if added_files:
added_files = self._strip_p_num_slashes(added_files, int(p_num))
make_empty_files(added_files)
result = execute([self._exe, 'add'] + added_files,
ignore_errors=True, none_on_ignored_error=True)
if result is None:
logging.error('Unable to execute "hg add" on: %s',
', '.join(added_files))
else:
patched_empty_files = True
if deleted_files:
deleted_files = self._strip_p_num_slashes(deleted_files,
int(p_num))
result = execute([self._exe, 'remove'] + deleted_files,
ignore_errors=True, none_on_ignored_error=True)
if result is None:
logging.error('Unable to execute "hg remove" on: %s',
', '.join(deleted_files))
else:
patched_empty_files = True
return patched_empty_files
def supports_empty_files(self):
"""Return whether the RB server supports added/deleted empty files.
Returns:
bool:
``True`` if the Review Board server supports showing empty files.
"""
return (self.capabilities and
self.capabilities.has_capability('scmtools', 'mercurial',
'empty_files'))
def get_current_bookmark(self):
"""Return the name of the current bookmark.
Returns:
unicode:
A string with the name of the current bookmark.
"""
return execute([self._exe, 'id', '-B'], ignore_errors=True).strip()
| []
| []
| []
| [] | [] | python | null | null | null |
airflow/cli/commands/task_command.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Task sub-commands"""
import importlib
import json
import logging
import os
import textwrap
from contextlib import contextmanager, redirect_stderr, redirect_stdout, suppress
from typing import List, Optional
from pendulum.parsing.exceptions import ParserError
from sqlalchemy.orm.exc import NoResultFound
from airflow import settings
from airflow.cli.simple_table import AirflowConsole
from airflow.configuration import conf
from airflow.exceptions import AirflowException, DagRunNotFound
from airflow.executors.executor_loader import ExecutorLoader
from airflow.jobs.local_task_job import LocalTaskJob
from airflow.models import DagPickle, TaskInstance
from airflow.models.dag import DAG
from airflow.models.dagrun import DagRun
from airflow.ti_deps.dep_context import DepContext
from airflow.ti_deps.dependencies_deps import SCHEDULER_QUEUED_DEPS
from airflow.utils import cli as cli_utils
from airflow.utils.cli import (
get_dag,
get_dag_by_file_location,
get_dag_by_pickle,
get_dags,
suppress_logs_and_warning,
)
from airflow.utils.dates import timezone
from airflow.utils.log.logging_mixin import StreamLogWriter
from airflow.utils.net import get_hostname
from airflow.utils.session import create_session, provide_session
def _get_dag_run(dag, exec_date_or_run_id, create_if_necssary, session):
dag_run = dag.get_dagrun(run_id=exec_date_or_run_id, session=session)
if dag_run:
return dag_run
execution_date = None
with suppress(ParserError, TypeError):
execution_date = timezone.parse(exec_date_or_run_id)
if create_if_necssary and not execution_date:
return DagRun(dag_id=dag.dag_id, run_id=exec_date_or_run_id)
try:
return (
session.query(DagRun)
.filter(
DagRun.dag_id == dag.dag_id,
DagRun.execution_date == execution_date,
)
.one()
)
except NoResultFound:
if create_if_necssary:
return DagRun(dag.dag_id, execution_date=execution_date)
raise DagRunNotFound(
f"DagRun for {dag.dag_id} with run_id or execution_date of {exec_date_or_run_id!r} not found"
) from None
@provide_session
def _get_ti(task, exec_date_or_run_id, create_if_necssary=False, session=None):
"""Get the task instance through DagRun.run_id, if that fails, get the TI the old way"""
dag_run = _get_dag_run(task.dag, exec_date_or_run_id, create_if_necssary, session)
ti = dag_run.get_task_instance(task.task_id)
if not ti and create_if_necssary:
ti = TaskInstance(task, run_id=None)
ti.dag_run = dag_run
ti.refresh_from_task(task)
return ti
def _run_task_by_selected_method(args, dag: DAG, ti: TaskInstance) -> None:
"""
Runs the task in one of 3 modes
- using LocalTaskJob
- as raw task
- by executor
"""
if args.local:
_run_task_by_local_task_job(args, ti)
elif args.raw:
_run_raw_task(args, ti)
else:
_run_task_by_executor(args, dag, ti)
def _run_task_by_executor(args, dag, ti):
"""
Sends the task to the executor for execution. This can result in the task being started by another host
if the executor implementation does
"""
pickle_id = None
if args.ship_dag:
try:
# Running remotely, so pickling the DAG
with create_session() as session:
pickle = DagPickle(dag)
session.add(pickle)
pickle_id = pickle.id
# TODO: This should be written to a log
print(f'Pickled dag {dag} as pickle_id: {pickle_id}')
except Exception as e:
print('Could not pickle the DAG')
print(e)
raise e
executor = ExecutorLoader.get_default_executor()
executor.job_id = "manual"
executor.start()
print("Sending to executor.")
executor.queue_task_instance(
ti,
mark_success=args.mark_success,
pickle_id=pickle_id,
ignore_all_deps=args.ignore_all_dependencies,
ignore_depends_on_past=args.ignore_depends_on_past,
ignore_task_deps=args.ignore_dependencies,
ignore_ti_state=args.force,
pool=args.pool,
)
executor.heartbeat()
executor.end()
def _run_task_by_local_task_job(args, ti):
"""Run LocalTaskJob, which monitors the raw task execution process"""
run_job = LocalTaskJob(
task_instance=ti,
mark_success=args.mark_success,
pickle_id=args.pickle,
ignore_all_deps=args.ignore_all_dependencies,
ignore_depends_on_past=args.ignore_depends_on_past,
ignore_task_deps=args.ignore_dependencies,
ignore_ti_state=args.force,
pool=args.pool,
external_executor_id=_extract_external_executor_id(args),
)
try:
run_job.run()
finally:
if args.shut_down_logging:
logging.shutdown()
RAW_TASK_UNSUPPORTED_OPTION = [
"ignore_all_dependencies",
"ignore_depends_on_past",
"ignore_dependencies",
"force",
]
def _run_raw_task(args, ti: TaskInstance) -> None:
"""Runs the main task handling code"""
ti._run_raw_task(
mark_success=args.mark_success,
job_id=args.job_id,
pool=args.pool,
error_file=args.error_file,
)
def _extract_external_executor_id(args) -> Optional[str]:
if hasattr(args, "external_executor_id"):
return getattr(args, "external_executor_id")
return os.environ.get("external_executor_id", None)
@contextmanager
def _capture_task_logs(ti):
"""Manage logging context for a task run
- Replace the root logger configuration with the airflow.task configuration
so we can capture logs from any custom loggers used in the task.
- Redirect stdout and stderr to the task instance log, as INFO and WARNING
level messages, respectively.
"""
modify = not settings.DONOT_MODIFY_HANDLERS
if modify:
root_logger, task_logger = logging.getLogger(), logging.getLogger('airflow.task')
orig_level = root_logger.level
root_logger.setLevel(task_logger.level)
orig_handlers = root_logger.handlers.copy()
root_logger.handlers[:] = task_logger.handlers
try:
info_writer = StreamLogWriter(ti.log, logging.INFO)
warning_writer = StreamLogWriter(ti.log, logging.WARNING)
with redirect_stdout(info_writer), redirect_stderr(warning_writer):
yield
finally:
if modify:
# Restore the root logger to its original state.
root_logger.setLevel(orig_level)
root_logger.handlers[:] = orig_handlers
@cli_utils.action_logging
def task_run(args, dag=None):
"""Runs a single task instance"""
# Load custom airflow config
if args.local and args.raw:
raise AirflowException(
"Option --raw and --local are mutually exclusive. "
"Please remove one option to execute the command."
)
if args.raw:
unsupported_options = [o for o in RAW_TASK_UNSUPPORTED_OPTION if getattr(args, o)]
if unsupported_options:
raise AirflowException(
"Option --raw does not work with some of the other options on this command. You "
"can't use --raw option and the following options: {}. You provided the option {}. "
"Delete it to execute the command".format(
", ".join(f"--{o}" for o in RAW_TASK_UNSUPPORTED_OPTION),
", ".join(f"--{o}" for o in unsupported_options),
)
)
if dag and args.pickle:
raise AirflowException("You cannot use the --pickle option when using DAG.cli() method.")
if args.cfg_path:
with open(args.cfg_path) as conf_file:
conf_dict = json.load(conf_file)
if os.path.exists(args.cfg_path):
os.remove(args.cfg_path)
conf.read_dict(conf_dict, source=args.cfg_path)
settings.configure_vars()
settings.MASK_SECRETS_IN_LOGS = True
# IMPORTANT, have to use the NullPool, otherwise, each "run" command may leave
# behind multiple open sleeping connections while heartbeating, which could
# easily exceed the database connection limit when
# processing hundreds of simultaneous tasks.
settings.configure_orm(disable_connection_pool=True)
if args.pickle:
print(f'Loading pickle id: {args.pickle}')
dag = get_dag_by_pickle(args.pickle)
elif not dag:
dag = get_dag(args.subdir, args.dag_id)
else:
# Use DAG from parameter
pass
task = dag.get_task(task_id=args.task_id)
ti = _get_ti(task, args.execution_date_or_run_id)
ti.init_run_context(raw=args.raw)
hostname = get_hostname()
print(f"Running {ti} on host {hostname}")
if args.interactive:
_run_task_by_selected_method(args, dag, ti)
else:
with _capture_task_logs(ti):
_run_task_by_selected_method(args, dag, ti)
@cli_utils.action_logging
def task_failed_deps(args):
"""
Returns the unmet dependencies for a task instance from the perspective of the
scheduler (i.e. why a task instance doesn't get scheduled and then queued by the
scheduler, and then run by an executor).
>>> airflow tasks failed-deps tutorial sleep 2015-01-01
Task instance dependencies not met:
Dagrun Running: Task instance's dagrun did not exist: Unknown reason
Trigger Rule: Task's trigger rule 'all_success' requires all upstream tasks
to have succeeded, but found 1 non-success(es).
"""
dag = get_dag(args.subdir, args.dag_id)
task = dag.get_task(task_id=args.task_id)
ti = _get_ti(task, args.execution_date_or_run_id)
dep_context = DepContext(deps=SCHEDULER_QUEUED_DEPS)
failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))
# TODO, Do we want to print or log this
if failed_deps:
print("Task instance dependencies not met:")
for dep in failed_deps:
print(f"{dep.dep_name}: {dep.reason}")
else:
print("Task instance dependencies are all met.")
@cli_utils.action_logging
@suppress_logs_and_warning
def task_state(args):
"""
Returns the state of a TaskInstance at the command line.
>>> airflow tasks state tutorial sleep 2015-01-01
success
"""
dag = get_dag(args.subdir, args.dag_id)
task = dag.get_task(task_id=args.task_id)
ti = _get_ti(task, args.execution_date_or_run_id)
print(ti.current_state())
@cli_utils.action_logging
@suppress_logs_and_warning
def task_list(args, dag=None):
"""Lists the tasks within a DAG at the command line"""
dag = dag or get_dag(args.subdir, args.dag_id)
if args.tree:
dag.tree_view()
else:
tasks = sorted(t.task_id for t in dag.tasks)
print("\n".join(tasks))
SUPPORTED_DEBUGGER_MODULES: List[str] = [
"pudb",
"web_pdb",
"ipdb",
"pdb",
]
def _guess_debugger():
"""
Trying to guess the debugger used by the user. When it doesn't find any user-installed debugger,
returns ``pdb``.
List of supported debuggers:
* `pudb <https://github.com/inducer/pudb>`__
* `web_pdb <https://github.com/romanvm/python-web-pdb>`__
* `ipdb <https://github.com/gotcha/ipdb>`__
* `pdb <https://docs.python.org/3/library/pdb.html>`__
"""
for mod in SUPPORTED_DEBUGGER_MODULES:
try:
return importlib.import_module(mod)
except ImportError:
continue
return importlib.import_module("pdb")
@cli_utils.action_logging
@suppress_logs_and_warning
@provide_session
def task_states_for_dag_run(args, session=None):
"""Get the status of all task instances in a DagRun"""
dag_run = (
session.query(DagRun)
.filter(DagRun.run_id == args.execution_date_or_run_id, DagRun.dag_id == args.dag_id)
.one_or_none()
)
if not dag_run:
try:
execution_date = timezone.parse(args.execution_date_or_run_id)
dag_run = (
session.query(DagRun)
.filter(DagRun.execution_date == execution_date, DagRun.dag_id == args.dag_id)
.one_or_none()
)
except (ParserError, TypeError) as err:
raise AirflowException(f"Error parsing the supplied execution_date. Error: {str(err)}")
if dag_run is None:
raise DagRunNotFound(
f"DagRun for {args.dag_id} with run_id or execution_date of {args.execution_date_or_run_id!r} "
"not found"
)
AirflowConsole().print_as(
data=dag_run.task_instances,
output=args.output,
mapper=lambda ti: {
"dag_id": ti.dag_id,
"execution_date": dag_run.execution_date.isoformat(),
"task_id": ti.task_id,
"state": ti.state,
"start_date": ti.start_date.isoformat() if ti.start_date else "",
"end_date": ti.end_date.isoformat() if ti.end_date else "",
},
)
@cli_utils.action_logging
def task_test(args, dag=None):
"""Tests task for a given dag_id"""
# We want to log output from operators etc to show up here. Normally
# airflow.task would redirect to a file, but here we want it to propagate
# up to the normal airflow handler.
settings.MASK_SECRETS_IN_LOGS = True
handlers = logging.getLogger('airflow.task').handlers
already_has_stream_handler = False
for handler in handlers:
already_has_stream_handler = isinstance(handler, logging.StreamHandler)
if already_has_stream_handler:
break
if not already_has_stream_handler:
logging.getLogger('airflow.task').propagate = True
env_vars = {'AIRFLOW_TEST_MODE': 'True'}
if args.env_vars:
env_vars.update(args.env_vars)
os.environ.update(env_vars)
dag = dag or get_dag(args.subdir, args.dag_id)
task = dag.get_task(task_id=args.task_id)
# Add CLI provided task_params to task.params
if args.task_params:
passed_in_params = json.loads(args.task_params)
task.params.update(passed_in_params)
if task.params:
task.params.validate()
ti = _get_ti(task, args.execution_date_or_run_id, create_if_necssary=True)
try:
if args.dry_run:
ti.dry_run()
else:
ti.run(ignore_task_deps=True, ignore_ti_state=True, test_mode=True)
except Exception:
if args.post_mortem:
debugger = _guess_debugger()
debugger.post_mortem()
else:
raise
finally:
if not already_has_stream_handler:
# Make sure to reset back to normal. When run for CLI this doesn't
# matter, but it does for test suite
logging.getLogger('airflow.task').propagate = False
@cli_utils.action_logging
@suppress_logs_and_warning
def task_render(args):
"""Renders and displays templated fields for a given task"""
dag = get_dag(args.subdir, args.dag_id)
task = dag.get_task(task_id=args.task_id)
ti = _get_ti(task, args.execution_date_or_run_id, create_if_necssary=True)
ti.render_templates()
for attr in task.__class__.template_fields:
print(
textwrap.dedent(
f""" # ----------------------------------------------------------
# property: {attr}
# ----------------------------------------------------------
{getattr(task, attr)}
"""
)
)
@cli_utils.action_logging
def task_clear(args):
"""Clears all task instances or only those matched by regex for a DAG(s)"""
logging.basicConfig(level=settings.LOGGING_LEVEL, format=settings.SIMPLE_LOG_FORMAT)
if args.dag_id and not args.subdir and not args.dag_regex and not args.task_regex:
dags = [get_dag_by_file_location(args.dag_id)]
else:
# todo clear command only accepts a single dag_id. no reason for get_dags with 's' except regex?
dags = get_dags(args.subdir, args.dag_id, use_regex=args.dag_regex)
if args.task_regex:
for idx, dag in enumerate(dags):
dags[idx] = dag.partial_subset(
task_ids_or_regex=args.task_regex,
include_downstream=args.downstream,
include_upstream=args.upstream,
)
DAG.clear_dags(
dags,
start_date=args.start_date,
end_date=args.end_date,
only_failed=args.only_failed,
only_running=args.only_running,
confirm_prompt=not args.yes,
include_subdags=not args.exclude_subdags,
include_parentdag=not args.exclude_parentdag,
)
| []
| []
| [
"external_executor_id"
]
| [] | ["external_executor_id"] | python | 1 | 0 | |
src/com.mentor.nucleus.bp.core/src/com/mentor/nucleus/bp/core/MessageInSequence_c.java | package com.mentor.nucleus.bp.core;
//====================================================================
//
// File: com.mentor.nucleus.bp.core.MessageInSequence_c.java
//
// WARNING: Do not edit this generated file
// Generated by ../MC-Java/java.arc, $Revision: 1.111 $
//
// (c) Copyright 2005-2014 by Mentor Graphics Corp. All rights reserved.
//
//====================================================================
// No special imports
import java.util.*;
import java.lang.reflect.*;
import org.eclipse.core.resources.IFile;
import org.eclipse.core.resources.IResource;
import org.eclipse.core.runtime.IAdaptable;
import org.eclipse.core.runtime.IPath;
import org.eclipse.core.runtime.Path;
import com.mentor.nucleus.bp.core.util.PersistenceUtil;
import org.eclipse.core.runtime.NullProgressMonitor;
import com.mentor.nucleus.bp.core.ui.marker.UmlProblem;
import com.mentor.nucleus.bp.core.common.*;
abstract class EV_MESSAGE_IN_SEQUENCE extends genericEvent_c {
public abstract int getEvtcode();
}
public class MessageInSequence_c extends NonRootModelElement
implements
IAdaptable,
Cloneable {
// Public Constructors
public MessageInSequence_c(ModelRoot modelRoot, java.util.UUID p_m_msg_id,
java.util.UUID p_m_package_id) {
super(modelRoot);
//pre-process the uuid so that we re-use null uuid instance rather then creating a new one.
m_msg_id = IdAssigner.preprocessUUID(p_m_msg_id);
//pre-process the uuid so that we re-use null uuid instance rather then creating a new one.
m_package_id = IdAssigner.preprocessUUID(p_m_package_id);
Object[] key = {m_msg_id, m_package_id};
addInstanceToMap(key);
}
static public MessageInSequence_c createProxy(ModelRoot modelRoot,
java.util.UUID p_m_msg_id, java.util.UUID p_m_package_id,
String p_contentPath, IPath p_localPath) {
ModelRoot resolvedModelRoot = ModelRoot.findModelRoot(modelRoot,
p_contentPath, p_localPath);
// if a model root was not resolved it is most likely
// due to a missing file of the proxy, defualt back to
// the original model root
if (resolvedModelRoot != null)
modelRoot = resolvedModelRoot;
InstanceList instances = modelRoot
.getInstanceList(MessageInSequence_c.class);
MessageInSequence_c new_inst = null;
synchronized (instances) {
Object[] key = {p_m_msg_id, p_m_package_id};
new_inst = (MessageInSequence_c) instances.get(key);
}
String contentPath = PersistenceUtil.resolveRelativePath(p_localPath,
new Path(p_contentPath));
if (modelRoot.isNewCompareRoot()) {
// for comparisons we do not want to change
// the content path
contentPath = p_contentPath;
}
if (new_inst != null && !modelRoot.isCompareRoot()) {
PersistableModelComponent pmc = new_inst.getPersistableComponent();
if (pmc == null) {
// dangling reference, redo this instance
new_inst.batchUnrelate();
//pre-process the uuid so that we re-use null uuid instance rather then creating a new one.
new_inst.m_msg_id = IdAssigner.preprocessUUID(p_m_msg_id);
//pre-process the uuid so that we re-use null uuid instance rather then creating a new one.
new_inst.m_package_id = IdAssigner
.preprocessUUID(p_m_package_id);
}
}
if (new_inst == null) {
// there is no instance matching the id, create a proxy
// if the resource doesn't exist then this will be a dangling reference
new_inst = new MessageInSequence_c(modelRoot, p_m_msg_id,
p_m_package_id);
new_inst.m_contentPath = contentPath;
}
return new_inst;
}
static public MessageInSequence_c resolveInstance(ModelRoot modelRoot,
java.util.UUID p_m_msg_id, java.util.UUID p_m_package_id) {
InstanceList instances = modelRoot
.getInstanceList(MessageInSequence_c.class);
MessageInSequence_c source = null;
synchronized (instances) {
Object[] key = {p_m_msg_id, p_m_package_id};
source = (MessageInSequence_c) instances.get(key);
if (source != null && !modelRoot.isCompareRoot()) {
source.convertFromProxy();
source.batchUnrelate();
//pre-process the uuid so that we re-use null uuid instance rather then creating a new one.
source.m_msg_id = IdAssigner.preprocessUUID(p_m_msg_id);
//pre-process the uuid so that we re-use null uuid instance rather then creating a new one.
source.m_package_id = IdAssigner.preprocessUUID(p_m_package_id);
return source;
}
}
// there is no instance matching the id
MessageInSequence_c new_inst = new MessageInSequence_c(modelRoot,
p_m_msg_id, p_m_package_id);
return new_inst;
}
public MessageInSequence_c(ModelRoot modelRoot) {
super(modelRoot);
m_msg_id = IdAssigner.NULL_UUID;
m_package_id = IdAssigner.NULL_UUID;
Object[] key = {m_msg_id, m_package_id};
addInstanceToMap(key);
}
public Object getInstanceKey() {
Object[] key = {m_msg_id, m_package_id};
return key;
}
public boolean setInstanceKey(UUID p_newKey) {
boolean changed = false;
// round p1
// round p2
// round p3
// round p4
// round p5
if (m_msg_id != p_newKey) {
m_msg_id = p_newKey;
changed = true;
}
return changed;
}
public boolean equals(Object elem) {
if (!(elem instanceof MessageInSequence_c)) {
return false;
}
// check that the model-roots are the same
if (((NonRootModelElement) elem).getModelRoot() != getModelRoot()) {
return false;
}
return identityEquals(elem);
}
public boolean identityEquals(Object elem) {
if (!(elem instanceof MessageInSequence_c)) {
return false;
}
MessageInSequence_c me = (MessageInSequence_c) elem;
// don't allow an empty id-value to produce a false positive result;
// in this case, use whether the two instances are actually the same
// one in memory, instead
if ((IdAssigner.NULL_UUID.equals(getMsg_id()) || IdAssigner.NULL_UUID
.equals(((MessageInSequence_c) elem).getMsg_id()))
&& this != elem) {
return false;
}
if (!getMsg_id().equals(((MessageInSequence_c) elem).getMsg_id()))
return false;
// don't allow an empty id-value to produce a false positive result;
// in this case, use whether the two instances are actually the same
// one in memory, instead
if ((IdAssigner.NULL_UUID.equals(getPackage_id()) || IdAssigner.NULL_UUID
.equals(((MessageInSequence_c) elem).getPackage_id()))
&& this != elem) {
return false;
}
if (!getPackage_id().equals(
((MessageInSequence_c) elem).getPackage_id()))
return false;
return true;
}
public boolean cachedIdentityEquals(Object elem) {
if (!(elem instanceof MessageInSequence_c)) {
return false;
}
MessageInSequence_c me = (MessageInSequence_c) elem;
if (!getMsg_idCachedValue().equals(
((MessageInSequence_c) elem).getMsg_idCachedValue()))
return false;
if (!getPackage_idCachedValue().equals(
((MessageInSequence_c) elem).getPackage_idCachedValue()))
return false;
return true;
}
// Attributes
private java.util.UUID m_msg_id;
private java.util.UUID m_package_id;
// declare association references from this class
// referring navigation
Sequence_c ContainedInSequence;
public void relateAcrossR953To(Sequence_c target) {
relateAcrossR953To(target, true);
}
public void relateAcrossR953To(Sequence_c target, boolean notifyChanges) {
if (target == null)
return;
if (target == ContainedInSequence)
return; // already related
if (ContainedInSequence != target) {
Object oldKey = getInstanceKey();
if (ContainedInSequence != null) {
ContainedInSequence.clearBackPointerR953To(this);
if (Boolean.valueOf(System.getenv("PTC_MCC_ENABLED")) == true) { //$NON-NLS-1$
Ooaofooa.log
.println(
ILogger.CONSISTENCY,
"MessageInSequence_c.relateAcrossR953To(Sequence_c target)",
"Relate performed across R953 from Message In Sequence to Sequence without unrelate of prior instance.");
}
}
ContainedInSequence = target;
if (IdAssigner.NULL_UUID.equals(target.getPackage_id())) {
// do not update cached value
} else {
// update cached value
m_package_id = target.getPackage_idCachedValue();
}
updateInstanceKey(oldKey, getInstanceKey());
target.setBackPointerR953To(this);
target.addRef();
if (notifyChanges) {
RelationshipChangeModelDelta change = new RelationshipChangeModelDelta(
Modeleventnotification_c.DELTA_ELEMENT_RELATED, this,
target, "953", "");
Ooaofooa.getDefaultInstance().fireModelElementRelationChanged(
change);
}
}
}
public void unrelateAcrossR953From(Sequence_c target) {
unrelateAcrossR953From(target, true);
}
public void unrelateAcrossR953From(Sequence_c target, boolean notifyChanges) {
if (target == null)
return;
if (ContainedInSequence == null)
return; // already unrelated
if (target != ContainedInSequence) {
Exception e = new Exception();
e.fillInStackTrace();
CorePlugin.logError(
"Tried to unrelate from non-related instance across R953",
e);
return;
}
if (target != null) {
target.clearBackPointerR953To(this);
}
if (ContainedInSequence != null) {
m_package_id = ContainedInSequence.getPackage_id();
if (IdAssigner.NULL_UUID.equals(m_package_id)) {
m_package_id = ContainedInSequence.getPackage_idCachedValue();
}
ContainedInSequence = null;
target.removeRef();
if (notifyChanges) {
RelationshipChangeModelDelta change = new RelationshipChangeModelDelta(
Modeleventnotification_c.DELTA_ELEMENT_UNRELATED, this,
target, "953", "");
Ooaofooa.getDefaultInstance().fireModelElementRelationChanged(
change);
}
}
}
public static MessageInSequence_c getOneSQ_MISOnR953(Sequence_c[] targets) {
return getOneSQ_MISOnR953(targets, null);
}
public static MessageInSequence_c getOneSQ_MISOnR953(Sequence_c[] targets,
ClassQueryInterface_c test) {
MessageInSequence_c ret_val = null;
if (targets != null) {
for (int i = 0; i < targets.length && ret_val == null; ++i) {
ret_val = getOneSQ_MISOnR953(targets[i], test);
}
}
return ret_val;
}
public static MessageInSequence_c getOneSQ_MISOnR953(Sequence_c target) {
return getOneSQ_MISOnR953(target, null);
}
public static MessageInSequence_c getOneSQ_MISOnR953(Sequence_c target,
boolean loadComponent) {
return getOneSQ_MISOnR953(target.getModelRoot(), target, null,
loadComponent);
}
public static MessageInSequence_c getOneSQ_MISOnR953(Sequence_c target,
ClassQueryInterface_c test) {
if (target != null) {
return getOneSQ_MISOnR953(target.getModelRoot(), target, test);
}
return null;
}
public static MessageInSequence_c getOneSQ_MISOnR953(ModelRoot modelRoot,
Sequence_c target, ClassQueryInterface_c test) {
return getOneSQ_MISOnR953(modelRoot, target, test, true);
}
public static MessageInSequence_c getOneSQ_MISOnR953(ModelRoot modelRoot,
Sequence_c target, ClassQueryInterface_c test, boolean loadComponent) {
return find_getOneSQ_MISOnR953(modelRoot, target, test);
}
private static MessageInSequence_c find_getOneSQ_MISOnR953(
ModelRoot modelRoot, Sequence_c target, ClassQueryInterface_c test) {
if (target != null) {
synchronized (target.backPointer_ContainsMessageInSequenceContains_R953) {
for (int i = 0; i < target.backPointer_ContainsMessageInSequenceContains_R953
.size(); ++i) {
MessageInSequence_c source = (MessageInSequence_c) target.backPointer_ContainsMessageInSequenceContains_R953
.get(i);
if (source != null
&& (test == null || test.evaluate(source))) {
return source;
}
}
}
}
// not found
return null;
}
public static MessageInSequence_c[] getManySQ_MISsOnR953(
Sequence_c[] targets) {
return getManySQ_MISsOnR953(targets, null);
}
public static MessageInSequence_c[] getManySQ_MISsOnR953(
Sequence_c[] targets, boolean loadComponent) {
return getManySQ_MISsOnR953(targets, null, loadComponent);
}
public static MessageInSequence_c[] getManySQ_MISsOnR953(
Sequence_c[] targets, ClassQueryInterface_c test) {
return getManySQ_MISsOnR953(targets, test, true);
}
public static MessageInSequence_c[] getManySQ_MISsOnR953(
Sequence_c[] targets, ClassQueryInterface_c test,
boolean loadComponent) {
if (targets == null || targets.length == 0 || targets[0] == null)
return new MessageInSequence_c[0];
ModelRoot modelRoot = targets[0].getModelRoot();
InstanceList instances = modelRoot
.getInstanceList(MessageInSequence_c.class);
Vector matches = new Vector();
for (int i = 0; i < targets.length; i++) {
synchronized (targets[i].backPointer_ContainsMessageInSequenceContains_R953) {
for (int j = 0; j < targets[i].backPointer_ContainsMessageInSequenceContains_R953
.size(); ++j) {
MessageInSequence_c source = (MessageInSequence_c) targets[i].backPointer_ContainsMessageInSequenceContains_R953
.get(j);
if (source != null
&& (test == null || test.evaluate(source))) {
matches.add(source);
}
}
}
}
if (matches.size() > 0) {
MessageInSequence_c[] ret_set = new MessageInSequence_c[matches
.size()];
matches.copyInto(ret_set);
return ret_set;
} else {
return new MessageInSequence_c[0];
}
}
public static MessageInSequence_c[] getManySQ_MISsOnR953(Sequence_c target) {
return getManySQ_MISsOnR953(target, null);
}
public static MessageInSequence_c[] getManySQ_MISsOnR953(Sequence_c target,
boolean loadComponent) {
return getManySQ_MISsOnR953(target, null, loadComponent);
}
public static MessageInSequence_c[] getManySQ_MISsOnR953(Sequence_c target,
ClassQueryInterface_c test) {
return getManySQ_MISsOnR953(target, test, true);
}
public static MessageInSequence_c[] getManySQ_MISsOnR953(Sequence_c target,
ClassQueryInterface_c test, boolean loadComponent) {
if (target == null)
return new MessageInSequence_c[0];
ModelRoot modelRoot = target.getModelRoot();
Vector matches = new Vector();
synchronized (target.backPointer_ContainsMessageInSequenceContains_R953) {
for (int i = 0; i < target.backPointer_ContainsMessageInSequenceContains_R953
.size(); ++i) {
MessageInSequence_c source = (MessageInSequence_c) target.backPointer_ContainsMessageInSequenceContains_R953
.get(i);
if (source != null && (test == null || test.evaluate(source))) {
matches.add(source);
}
}
}
if (matches.size() > 0) {
MessageInSequence_c[] ret_set = new MessageInSequence_c[matches
.size()];
matches.copyInto(ret_set);
return ret_set;
} else {
return new MessageInSequence_c[0];
}
}
// referring navigation
Message_c ProvidesContainmentMessage;
public void relateAcrossR954To(Message_c target) {
relateAcrossR954To(target, true);
}
public void relateAcrossR954To(Message_c target, boolean notifyChanges) {
if (target == null)
return;
if (target == ProvidesContainmentMessage)
return; // already related
if (ProvidesContainmentMessage != target) {
Object oldKey = getInstanceKey();
if (ProvidesContainmentMessage != null) {
ProvidesContainmentMessage.clearBackPointerR954To(this);
if (Boolean.valueOf(System.getenv("PTC_MCC_ENABLED")) == true) { //$NON-NLS-1$
Ooaofooa.log
.println(
ILogger.CONSISTENCY,
"MessageInSequence_c.relateAcrossR954To(Message_c target)",
"Relate performed across R954 from Message In Sequence to Message without unrelate of prior instance.");
}
}
ProvidesContainmentMessage = target;
if (IdAssigner.NULL_UUID.equals(target.getMsg_id())) {
// do not update cached value
} else {
// update cached value
m_msg_id = target.getMsg_idCachedValue();
}
updateInstanceKey(oldKey, getInstanceKey());
target.setBackPointerR954To(this);
target.addRef();
if (notifyChanges) {
RelationshipChangeModelDelta change = new RelationshipChangeModelDelta(
Modeleventnotification_c.DELTA_ELEMENT_RELATED, this,
target, "954", "");
Ooaofooa.getDefaultInstance().fireModelElementRelationChanged(
change);
}
}
}
public void unrelateAcrossR954From(Message_c target) {
unrelateAcrossR954From(target, true);
}
public void unrelateAcrossR954From(Message_c target, boolean notifyChanges) {
if (target == null)
return;
if (ProvidesContainmentMessage == null)
return; // already unrelated
if (target != ProvidesContainmentMessage) {
Exception e = new Exception();
e.fillInStackTrace();
CorePlugin.logError(
"Tried to unrelate from non-related instance across R954",
e);
return;
}
if (target != null) {
target.clearBackPointerR954To(this);
}
if (ProvidesContainmentMessage != null) {
m_msg_id = ProvidesContainmentMessage.getMsg_id();
if (IdAssigner.NULL_UUID.equals(m_msg_id)) {
m_msg_id = ProvidesContainmentMessage.getMsg_idCachedValue();
}
ProvidesContainmentMessage = null;
target.removeRef();
if (notifyChanges) {
RelationshipChangeModelDelta change = new RelationshipChangeModelDelta(
Modeleventnotification_c.DELTA_ELEMENT_UNRELATED, this,
target, "954", "");
Ooaofooa.getDefaultInstance().fireModelElementRelationChanged(
change);
}
}
}
public static MessageInSequence_c getOneSQ_MISOnR954(Message_c[] targets) {
return getOneSQ_MISOnR954(targets, null);
}
public static MessageInSequence_c getOneSQ_MISOnR954(Message_c[] targets,
ClassQueryInterface_c test) {
MessageInSequence_c ret_val = null;
if (targets != null) {
for (int i = 0; i < targets.length && ret_val == null; ++i) {
ret_val = getOneSQ_MISOnR954(targets[i], test);
}
}
return ret_val;
}
public static MessageInSequence_c getOneSQ_MISOnR954(Message_c target) {
return getOneSQ_MISOnR954(target, null);
}
public static MessageInSequence_c getOneSQ_MISOnR954(Message_c target,
boolean loadComponent) {
return getOneSQ_MISOnR954(target.getModelRoot(), target, null,
loadComponent);
}
public static MessageInSequence_c getOneSQ_MISOnR954(Message_c target,
ClassQueryInterface_c test) {
if (target != null) {
return getOneSQ_MISOnR954(target.getModelRoot(), target, test);
}
return null;
}
public static MessageInSequence_c getOneSQ_MISOnR954(ModelRoot modelRoot,
Message_c target, ClassQueryInterface_c test) {
return getOneSQ_MISOnR954(modelRoot, target, test, true);
}
public static MessageInSequence_c getOneSQ_MISOnR954(ModelRoot modelRoot,
Message_c target, ClassQueryInterface_c test, boolean loadComponent) {
return find_getOneSQ_MISOnR954(modelRoot, target, test);
}
private static MessageInSequence_c find_getOneSQ_MISOnR954(
ModelRoot modelRoot, Message_c target, ClassQueryInterface_c test) {
if (target != null) {
MessageInSequence_c source = (MessageInSequence_c) target.backPointer_ContainedThroughMessageInSequenceContainedThrough_R954;
if (source != null && (test == null || test.evaluate(source))) {
return source;
}
}
// not found
return null;
}
public static MessageInSequence_c[] getManySQ_MISsOnR954(Message_c[] targets) {
return getManySQ_MISsOnR954(targets, null);
}
public static MessageInSequence_c[] getManySQ_MISsOnR954(
Message_c[] targets, boolean loadComponent) {
return getManySQ_MISsOnR954(targets, null, loadComponent);
}
public static MessageInSequence_c[] getManySQ_MISsOnR954(
Message_c[] targets, ClassQueryInterface_c test) {
return getManySQ_MISsOnR954(targets, test, true);
}
public static MessageInSequence_c[] getManySQ_MISsOnR954(
Message_c[] targets, ClassQueryInterface_c test,
boolean loadComponent) {
if (targets == null || targets.length == 0 || targets[0] == null)
return new MessageInSequence_c[0];
ModelRoot modelRoot = targets[0].getModelRoot();
InstanceList instances = modelRoot
.getInstanceList(MessageInSequence_c.class);
Vector matches = new Vector();
for (int i = 0; i < targets.length; i++) {
MessageInSequence_c source = (MessageInSequence_c) targets[i].backPointer_ContainedThroughMessageInSequenceContainedThrough_R954;
if (source != null && (test == null || test.evaluate(source))) {
matches.add(source);
}
}
if (matches.size() > 0) {
MessageInSequence_c[] ret_set = new MessageInSequence_c[matches
.size()];
matches.copyInto(ret_set);
return ret_set;
} else {
return new MessageInSequence_c[0];
}
}
public static MessageInSequence_c[] getManySQ_MISsOnR954(Message_c target) {
if (target != null) {
Message_c[] targetArray = new Message_c[1];
targetArray[0] = target;
return getManySQ_MISsOnR954(targetArray);
} else {
MessageInSequence_c[] result = new MessageInSequence_c[0];
return result;
}
}
public static MessageInSequence_c[] getManySQ_MISsOnR954(Message_c target,
boolean loadComponent) {
if (target != null) {
Message_c[] targetArray = new Message_c[1];
targetArray[0] = target;
return getManySQ_MISsOnR954(targetArray, loadComponent);
} else {
MessageInSequence_c[] result = new MessageInSequence_c[0];
return result;
}
}
public void batchRelate(ModelRoot modelRoot, boolean notifyChanges,
boolean searchAllRoots) {
batchRelate(modelRoot, false, notifyChanges, searchAllRoots);
}
public void batchRelate(ModelRoot modelRoot, boolean relateProxies,
boolean notifyChanges, boolean searchAllRoots) {
InstanceList instances = null;
ModelRoot baseRoot = modelRoot;
if (ContainedInSequence == null) {
// R953
Sequence_c relInst38800 = (Sequence_c) baseRoot.getInstanceList(
Sequence_c.class).get(new Object[]{m_package_id});
// if there was no local element, check for any global elements
// failing that proceed to check other model roots
if (relInst38800 == null) {
relInst38800 = (Sequence_c) Ooaofooa.getDefaultInstance()
.getInstanceList(Sequence_c.class)
.get(new Object[]{m_package_id});
}
if (relInst38800 == null && searchAllRoots
&& !baseRoot.isCompareRoot()) {
Ooaofooa[] roots = Ooaofooa.getInstances();
for (int i = 0; i < roots.length; i++) {
if (roots[i].isCompareRoot()) {
// never use elements from any compare root
continue;
}
relInst38800 = (Sequence_c) roots[i].getInstanceList(
Sequence_c.class).get(new Object[]{m_package_id});
if (relInst38800 != null)
break;
}
}
//synchronized
if (relInst38800 != null) {
if (relateProxies || !isProxy()
|| (inSameComponent(this, relInst38800) && !isProxy())) {
relInst38800.relateAcrossR953To(this, notifyChanges);
}
}
}
if (ProvidesContainmentMessage == null) {
// R954
Message_c relInst38801 = (Message_c) baseRoot.getInstanceList(
Message_c.class).get(new Object[]{m_msg_id});
// if there was no local element, check for any global elements
// failing that proceed to check other model roots
if (relInst38801 == null) {
relInst38801 = (Message_c) Ooaofooa.getDefaultInstance()
.getInstanceList(Message_c.class)
.get(new Object[]{m_msg_id});
}
if (relInst38801 == null && searchAllRoots
&& !baseRoot.isCompareRoot()) {
Ooaofooa[] roots = Ooaofooa.getInstances();
for (int i = 0; i < roots.length; i++) {
if (roots[i].isCompareRoot()) {
// never use elements from any compare root
continue;
}
relInst38801 = (Message_c) roots[i].getInstanceList(
Message_c.class).get(new Object[]{m_msg_id});
if (relInst38801 != null)
break;
}
}
//synchronized
if (relInst38801 != null) {
if (relateProxies || !isProxy()
|| (inSameComponent(this, relInst38801) && !isProxy())) {
relInst38801.relateAcrossR954To(this, notifyChanges);
}
}
}
}
public void batchUnrelate(boolean notifyChanges) {
NonRootModelElement inst = null;
// R953
// SQ_S
inst = ContainedInSequence;
unrelateAcrossR953From(ContainedInSequence, notifyChanges);
if (inst != null) {
inst.removeRef();
}
// R954
// MSG_M
inst = ProvidesContainmentMessage;
unrelateAcrossR954From(ProvidesContainmentMessage, notifyChanges);
if (inst != null) {
inst.removeRef();
}
}
public static void batchRelateAll(ModelRoot modelRoot,
boolean notifyChanges, boolean searchAllRoots) {
batchRelateAll(modelRoot, notifyChanges, searchAllRoots, false);
}
public static void batchRelateAll(ModelRoot modelRoot, boolean notifyChanges, boolean searchAllRoots, boolean relateProxies)
{
InstanceList instances = modelRoot.getInstanceList(MessageInSequence_c.class);
synchronized(instances) {
Iterator<NonRootModelElement> cursor = instances.iterator() ;
while (cursor.hasNext())
{
final MessageInSequence_c inst = (MessageInSequence_c)cursor.next() ;
inst.batchRelate(modelRoot, relateProxies, notifyChanges, searchAllRoots );
}
}
}
public static void clearInstances(ModelRoot modelRoot) {
InstanceList instances = modelRoot
.getInstanceList(MessageInSequence_c.class);
synchronized (instances) {
for (int i = instances.size() - 1; i >= 0; i--) {
((NonRootModelElement) instances.get(i)).delete_unchecked();
}
}
}
public static MessageInSequence_c MessageInSequenceInstance(
ModelRoot modelRoot, ClassQueryInterface_c test,
boolean loadComponent) {
MessageInSequence_c result = findMessageInSequenceInstance(modelRoot,
test, loadComponent);
if (result == null && loadComponent) {
List pmcs = PersistenceManager.findAllComponents(modelRoot,
MessageInSequence_c.class);
for (int i = 0; i < pmcs.size(); i++) {
PersistableModelComponent component = (PersistableModelComponent) pmcs
.get(i);
if (!component.isLoaded()) {
try {
component.load(new NullProgressMonitor());
result = findMessageInSequenceInstance(modelRoot, test,
loadComponent);
if (result != null)
return result;
} catch (Exception e) {
CorePlugin.logError("Error Loading component", e);
}
}
}
}
if (result != null && loadComponent) {
result.loadProxy();
}
return result;
}
private static MessageInSequence_c findMessageInSequenceInstance(
ModelRoot modelRoot, ClassQueryInterface_c test,
boolean loadComponent) {
InstanceList instances = modelRoot
.getInstanceList(MessageInSequence_c.class);
synchronized (instances) {
for (int i = 0; i < instances.size(); ++i) {
MessageInSequence_c x = (MessageInSequence_c) instances.get(i);
if (test == null || test.evaluate(x)) {
if (x.ensureLoaded(loadComponent))
return x;
}
}
}
return null;
}
public static MessageInSequence_c MessageInSequenceInstance(
ModelRoot modelRoot, ClassQueryInterface_c test) {
return MessageInSequenceInstance(modelRoot, test, true);
}
public static MessageInSequence_c MessageInSequenceInstance(
ModelRoot modelRoot) {
return MessageInSequenceInstance(modelRoot, null, true);
}
public static MessageInSequence_c[] MessageInSequenceInstances(
ModelRoot modelRoot, ClassQueryInterface_c test,
boolean loadComponent) {
if (loadComponent) {
PersistenceManager.ensureAllInstancesLoaded(modelRoot,
MessageInSequence_c.class);
}
InstanceList instances = modelRoot
.getInstanceList(MessageInSequence_c.class);
Vector matches = new Vector();
synchronized (instances) {
for (int i = 0; i < instances.size(); ++i) {
MessageInSequence_c x = (MessageInSequence_c) instances.get(i);
if (test == null || test.evaluate(x)) {
if (x.ensureLoaded(loadComponent))
matches.add(x);
}
}
if (matches.size() > 0) {
MessageInSequence_c[] ret_set = new MessageInSequence_c[matches
.size()];
matches.copyInto(ret_set);
return ret_set;
} else {
return new MessageInSequence_c[0];
}
}
}
public static MessageInSequence_c[] MessageInSequenceInstances(
ModelRoot modelRoot, ClassQueryInterface_c test) {
return MessageInSequenceInstances(modelRoot, test, true);
}
public static MessageInSequence_c[] MessageInSequenceInstances(
ModelRoot modelRoot) {
return MessageInSequenceInstances(modelRoot, null, true);
}
public boolean delete() {
boolean result = super.delete();
boolean delete_error = false;
String errorMsg = "The following relationships were not torn down by the Message In Sequence.dispose call: ";
Sequence_c testR953Inst = Sequence_c.getOneSQ_SOnR953(this, false);
if (testR953Inst != null) {
delete_error = true;
errorMsg = errorMsg + "953 ";
}
Message_c testR954Inst = Message_c.getOneMSG_MOnR954(this, false);
if (testR954Inst != null) {
delete_error = true;
errorMsg = errorMsg + "954 ";
}
if (delete_error == true) {
if (CorePlugin.getDefault().isDebugging()) {
Ooaofooa.log.println(ILogger.DELETE, "Message In Sequence",
errorMsg);
} else {
Exception e = new Exception();
e.fillInStackTrace();
CorePlugin.logError(errorMsg, e);
}
}
return result;
}
// end declare instance pool
// declare attribute accessors
public boolean isUUID(String attributeName) {
if (attributeName.equals("msg_id")) {
return true;
}
if (attributeName.equals("package_id")) {
return true;
}
return false;
}
public String getCompUniqueID() {
UUID tempID = null;
long longID = 0L;
StringBuffer result = new StringBuffer();
tempID = getMsg_id();
if (IdAssigner.NULL_UUID.equals(tempID))
tempID = getMsg_idCachedValue();
result.append(Long.toHexString(tempID.getMostSignificantBits()));
result.append(Long.toHexString(tempID.getLeastSignificantBits()));
tempID = getPackage_id();
if (IdAssigner.NULL_UUID.equals(tempID))
tempID = getPackage_idCachedValue();
result.append(Long.toHexString(tempID.getMostSignificantBits()));
result.append(Long.toHexString(tempID.getLeastSignificantBits()));
return result.toString();
}
// declare attribute accessors
public long getMsg_idLongBased() {
if (ProvidesContainmentMessage != null) {
return ProvidesContainmentMessage.getMsg_idLongBased();
}
return 0;
}
public java.util.UUID getMsg_id() {
if (ProvidesContainmentMessage != null) {
return ProvidesContainmentMessage.getMsg_id();
}
return IdAssigner.NULL_UUID;
}
public java.util.UUID getMsg_idCachedValue() {
if (!IdAssigner.NULL_UUID.equals(m_msg_id))
return m_msg_id;
else
return getMsg_id();
}
public void setMsg_id(java.util.UUID newValue) {
if (newValue != null) {
if (newValue.equals(m_msg_id)) {
return;
}
} else if (m_msg_id != null) {
if (m_msg_id.equals(newValue)) {
return;
}
} else {
return;
}
AttributeChangeModelDelta change = new AttributeChangeModelDelta(
Modeleventnotification_c.DELTA_ATTRIBUTE_CHANGE, this,
"Msg_id", m_msg_id, newValue, true);
m_msg_id = IdAssigner.preprocessUUID(newValue);
Ooaofooa.getDefaultInstance().fireModelElementAttributeChanged(change);
}
public long getPackage_idLongBased() {
if (ContainedInSequence != null) {
return ContainedInSequence.getPackage_idLongBased();
}
return 0;
}
public java.util.UUID getPackage_id() {
if (ContainedInSequence != null) {
return ContainedInSequence.getPackage_id();
}
return IdAssigner.NULL_UUID;
}
public java.util.UUID getPackage_idCachedValue() {
if (!IdAssigner.NULL_UUID.equals(m_package_id))
return m_package_id;
else
return getPackage_id();
}
public void setPackage_id(java.util.UUID newValue) {
if (newValue != null) {
if (newValue.equals(m_package_id)) {
return;
}
} else if (m_package_id != null) {
if (m_package_id.equals(newValue)) {
return;
}
} else {
return;
}
AttributeChangeModelDelta change = new AttributeChangeModelDelta(
Modeleventnotification_c.DELTA_ATTRIBUTE_CHANGE, this,
"Package_id", m_package_id, newValue, true);
m_package_id = IdAssigner.preprocessUUID(newValue);
Ooaofooa.getDefaultInstance().fireModelElementAttributeChanged(change);
}
// end declare accessors
public static void checkClassConsistency(ModelRoot modelRoot) {
Ooaofooa.log
.println(ILogger.OPERATION, "Message In Sequence", //$NON-NLS-1$
" Operation entered: Message In Sequence::checkClassConsistency"); //$NON-NLS-1$
if (Boolean.valueOf(System.getenv("PTC_MCC_ENABLED")) == false) { //$NON-NLS-1$
return;
}
MessageInSequence_c[] objs = MessageInSequence_c
.MessageInSequenceInstances(modelRoot, null, false);
for (int i = 0; i < objs.length; i++) {
objs[i].checkConsistency();
}
}
public boolean checkConsistency() {
Ooaofooa.log.println(ILogger.OPERATION, "Message In Sequence", //$NON-NLS-1$
" Operation entered: Message In Sequence::checkConsistency"); //$NON-NLS-1$
if (Boolean.valueOf(System.getenv("PTC_MCC_ENABLED")) == false) { //$NON-NLS-1$
return true;
}
ModelRoot modelRoot = getModelRoot();
boolean retval = true;
class MessageInSequence_c_test38803_c implements ClassQueryInterface_c {
MessageInSequence_c_test38803_c(java.util.UUID p38804,
java.util.UUID p38805) {
m_p38804 = p38804;
m_p38805 = p38805;
}
private java.util.UUID m_p38804;
private java.util.UUID m_p38805;
public boolean evaluate(Object candidate) {
MessageInSequence_c selected = (MessageInSequence_c) candidate;
boolean retval = false;
retval = (selected.getMsg_id().equals(m_p38804))
& (selected.getPackage_id().equals(m_p38805));
return retval;
}
}
MessageInSequence_c[] objs38802 = MessageInSequence_c
.MessageInSequenceInstances(modelRoot,
new MessageInSequence_c_test38803_c(getMsg_id(),
getPackage_id()));
if (((objs38802.length) == 0)) {
if (CorePlugin.getDefault().isDebugging()) {
Ooaofooa.log
.println(ILogger.CONSISTENCY,
"Message In Sequence", //$NON-NLS-1$
"Consistency: Object: Message In Sequence: Cardinality of an identifier is zero. " //$NON-NLS-1$
+ "Actual Value: " + Integer.toString(objs38802.length)); //$NON-NLS-1$
} else {
Exception e = new Exception();
CorePlugin
.logError(
"Consistency: Object: Message In Sequence: Cardinality of an identifier is zero. " //$NON-NLS-1$
+ "Actual Value: " //$NON-NLS-1$
+ Integer.toString(objs38802.length), e);
}
retval = false;
}
if (((objs38802.length) > 1)) {
if (CorePlugin.getDefault().isDebugging()) {
Ooaofooa.log
.println(ILogger.CONSISTENCY,
"Message In Sequence", //$NON-NLS-1$
"Consistency: Object: Message In Sequence: Cardinality of an identifier is greater than 1. " //$NON-NLS-1$
+ "Actual Value: " //$NON-NLS-1$
+ Integer.toString(objs38802.length)
+ " Msg_ID: " + "Not Printable" + " Package_ID: " + "Not Printable"); //$NON-NLS-1$
} else {
Exception e = new Exception();
CorePlugin
.logError(
"Consistency: Object: Message In Sequence: Cardinality of an identifier is greater than 1. " //$NON-NLS-1$
+ "Actual Value: " //$NON-NLS-1$
+ Integer.toString(objs38802.length)
+ " Msg_ID: " + "Not Printable" + " Package_ID: " + "Not Printable", e); //$NON-NLS-1$
}
retval = false;
}
// Message In Sequence is a referring class in association: rel.Numb = 954
// The participating class is: Message
class Message_c_test38809_c implements ClassQueryInterface_c {
Message_c_test38809_c(java.util.UUID p38810) {
m_p38810 = p38810;
}
private java.util.UUID m_p38810;
public boolean evaluate(Object candidate) {
Message_c selected = (Message_c) candidate;
boolean retval = false;
retval = (selected.getMsg_id().equals(m_p38810));
return retval;
}
}
Message_c[] objs38808 = Message_c.MessageInstances(modelRoot,
new Message_c_test38809_c(getMsg_id()));
// The participant is unconditional
// The multiplicity of the participant is one
if (((objs38808.length) != 1)) {
if (CorePlugin.getDefault().isDebugging()) {
Ooaofooa.log
.println(ILogger.CONSISTENCY,
"Message In Sequence", //$NON-NLS-1$
"Consistency: Object: Message In Sequence: Association: 954: Cardinality of a participant is not equal to 1. " //$NON-NLS-1$
+ "Actual Value: " //$NON-NLS-1$
+ Integer.toString(objs38808.length)
+ " Msg_ID: " + "Not Printable"); //$NON-NLS-1$
} else {
Exception e = new Exception();
CorePlugin
.logError(
"Consistency: Object: Message In Sequence: Association: 954: Cardinality of a participant is not equal to 1. " //$NON-NLS-1$
+ "Actual Value: " //$NON-NLS-1$
+ Integer.toString(objs38808.length)
+ " Msg_ID: " + "Not Printable", e); //$NON-NLS-1$
}
retval = false;
}
// Message In Sequence is a referring class in association: rel.Numb = 953
// The participating class is: Sequence
class Sequence_c_test38812_c implements ClassQueryInterface_c {
Sequence_c_test38812_c(java.util.UUID p38813) {
m_p38813 = p38813;
}
private java.util.UUID m_p38813;
public boolean evaluate(Object candidate) {
Sequence_c selected = (Sequence_c) candidate;
boolean retval = false;
retval = (selected.getPackage_id().equals(m_p38813));
return retval;
}
}
Sequence_c[] objs38811 = Sequence_c.SequenceInstances(modelRoot,
new Sequence_c_test38812_c(getPackage_id()));
// The participant is unconditional
// The multiplicity of the participant is one
if (((objs38811.length) != 1)) {
if (CorePlugin.getDefault().isDebugging()) {
Ooaofooa.log
.println(ILogger.CONSISTENCY,
"Message In Sequence", //$NON-NLS-1$
"Consistency: Object: Message In Sequence: Association: 953: Cardinality of a participant is not equal to 1. " //$NON-NLS-1$
+ "Actual Value: " //$NON-NLS-1$
+ Integer.toString(objs38811.length)
+ " Package_ID: " + "Not Printable"); //$NON-NLS-1$
} else {
Exception e = new Exception();
CorePlugin
.logError(
"Consistency: Object: Message In Sequence: Association: 953: Cardinality of a participant is not equal to 1. " //$NON-NLS-1$
+ "Actual Value: " //$NON-NLS-1$
+ Integer.toString(objs38811.length)
+ " Package_ID: " + "Not Printable", e); //$NON-NLS-1$
}
retval = false;
}
return retval;
}
public Object getAdapter(Class adapter) {
Object superAdapter = super.getAdapter(adapter);
if (superAdapter != null) {
return superAdapter;
}
return null;
}
} // end Message In Sequence
| [
"\"PTC_MCC_ENABLED\"",
"\"PTC_MCC_ENABLED\"",
"\"PTC_MCC_ENABLED\"",
"\"PTC_MCC_ENABLED\""
]
| []
| [
"PTC_MCC_ENABLED"
]
| [] | ["PTC_MCC_ENABLED"] | java | 1 | 0 | |
wgengine/watchdog.go | // Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package wgengine
import (
"log"
"os"
"runtime/pprof"
"strconv"
"strings"
"time"
"inet.af/netaddr"
"tailscale.com/control/controlclient"
"tailscale.com/ipn/ipnstate"
"tailscale.com/net/interfaces"
"tailscale.com/tailcfg"
"tailscale.com/wgengine/filter"
"tailscale.com/wgengine/router"
"tailscale.com/wgengine/tsdns"
"tailscale.com/wgengine/wgcfg"
)
// NewWatchdog wraps an Engine and makes sure that all methods complete
// within a reasonable amount of time.
//
// If they do not, the watchdog crashes the process.
func NewWatchdog(e Engine) Engine {
if v, _ := strconv.ParseBool(os.Getenv("TS_DEBUG_DISABLE_WATCHDOG")); v {
return e
}
return &watchdogEngine{
wrap: e,
logf: log.Printf,
fatalf: log.Fatalf,
maxWait: 45 * time.Second,
}
}
type watchdogEngine struct {
wrap Engine
logf func(format string, args ...interface{})
fatalf func(format string, args ...interface{})
maxWait time.Duration
}
func (e *watchdogEngine) watchdogErr(name string, fn func() error) error {
errCh := make(chan error)
go func() {
errCh <- fn()
}()
t := time.NewTimer(e.maxWait)
select {
case err := <-errCh:
t.Stop()
return err
case <-t.C:
buf := new(strings.Builder)
pprof.Lookup("goroutine").WriteTo(buf, 1)
e.logf("wgengine watchdog stacks:\n%s", buf.String())
e.fatalf("wgengine: watchdog timeout on %s", name)
return nil
}
}
func (e *watchdogEngine) watchdog(name string, fn func()) {
e.watchdogErr(name, func() error {
fn()
return nil
})
}
func (e *watchdogEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config) error {
return e.watchdogErr("Reconfig", func() error { return e.wrap.Reconfig(cfg, routerCfg) })
}
func (e *watchdogEngine) GetFilter() *filter.Filter {
var x *filter.Filter
e.watchdog("GetFilter", func() { x = e.wrap.GetFilter() })
return x
}
func (e *watchdogEngine) SetFilter(filt *filter.Filter) {
e.watchdog("SetFilter", func() { e.wrap.SetFilter(filt) })
}
func (e *watchdogEngine) SetDNSMap(dm *tsdns.Map) {
e.watchdog("SetDNSMap", func() { e.wrap.SetDNSMap(dm) })
}
func (e *watchdogEngine) SetStatusCallback(cb StatusCallback) {
e.watchdog("SetStatusCallback", func() { e.wrap.SetStatusCallback(cb) })
}
func (e *watchdogEngine) UpdateStatus(sb *ipnstate.StatusBuilder) {
e.watchdog("UpdateStatus", func() { e.wrap.UpdateStatus(sb) })
}
func (e *watchdogEngine) SetNetInfoCallback(cb NetInfoCallback) {
e.watchdog("SetNetInfoCallback", func() { e.wrap.SetNetInfoCallback(cb) })
}
func (e *watchdogEngine) RequestStatus() {
e.watchdog("RequestStatus", func() { e.wrap.RequestStatus() })
}
func (e *watchdogEngine) LinkChange(isExpensive bool) {
e.watchdog("LinkChange", func() { e.wrap.LinkChange(isExpensive) })
}
func (e *watchdogEngine) SetLinkChangeCallback(cb func(major bool, newState *interfaces.State)) {
e.watchdog("SetLinkChangeCallback", func() { e.wrap.SetLinkChangeCallback(cb) })
}
func (e *watchdogEngine) SetDERPMap(m *tailcfg.DERPMap) {
e.watchdog("SetDERPMap", func() { e.wrap.SetDERPMap(m) })
}
func (e *watchdogEngine) SetNetworkMap(nm *controlclient.NetworkMap) {
e.watchdog("SetNetworkMap", func() { e.wrap.SetNetworkMap(nm) })
}
func (e *watchdogEngine) AddNetworkMapCallback(callback NetworkMapCallback) func() {
var fn func()
e.watchdog("AddNetworkMapCallback", func() { fn = e.wrap.AddNetworkMapCallback(callback) })
return func() { e.watchdog("RemoveNetworkMapCallback", fn) }
}
func (e *watchdogEngine) DiscoPublicKey() (k tailcfg.DiscoKey) {
e.watchdog("DiscoPublicKey", func() { k = e.wrap.DiscoPublicKey() })
return k
}
func (e *watchdogEngine) Ping(ip netaddr.IP, cb func(*ipnstate.PingResult)) {
e.watchdog("Ping", func() { e.wrap.Ping(ip, cb) })
}
func (e *watchdogEngine) Close() {
e.watchdog("Close", e.wrap.Close)
}
func (e *watchdogEngine) Wait() {
e.wrap.Wait()
}
| [
"\"TS_DEBUG_DISABLE_WATCHDOG\""
]
| []
| [
"TS_DEBUG_DISABLE_WATCHDOG"
]
| [] | ["TS_DEBUG_DISABLE_WATCHDOG"] | go | 1 | 0 | |
src/main/java/com/estafet/batchprime/CamelBatchValidator.java | package com.estafet.batchprime;
import org.apache.camel.Exchange;
import org.apache.camel.builder.RouteBuilder;
import org.apache.camel.model.dataformat.JsonLibrary;
import org.springframework.stereotype.Component;
@Component
public class CamelBatchValidator extends RouteBuilder {
@Override
public void configure() throws Exception {
String validator_url = System.getenv("VALIDATOR_URL");
String rabbitmqUrl = System.getenv("RABBITMQ_URL");
from("netty4-http:http://0.0.0.0:8080/batchPrime")
.unmarshal().json(JsonLibrary.Jackson, PrimeBatch.class)
.split().simple("body.getPrimeList()")
.parallelProcessing()
.convertBodyTo(String.class)
.setHeader(Exchange.HTTP_QUERY, simple("num=${body}"))
.to("http://" + validator_url + "/isPrime?bridgeEndpoint=true")
.convertBodyTo(String.class)
.to("log:foo")
.end()
.to("rabbitmq://" + rabbitmqUrl + "/amq.direct?autoDelete=false&routingKey=register")
.setBody()
.simple("All done here!");
}
}
| [
"\"VALIDATOR_URL\"",
"\"RABBITMQ_URL\""
]
| []
| [
"VALIDATOR_URL",
"RABBITMQ_URL"
]
| [] | ["VALIDATOR_URL", "RABBITMQ_URL"] | java | 2 | 0 | |
python/tvm/autotvm/measure/measure_methods.py | # pylint: disable=invalid-name,too-many-function-args,too-many-nested-blocks
"""
Functions that run on executor for measurement.
These functions are responsible for building the tvm module, uploading it to
remote devices, recording the running time costs, and checking the correctness of the output.
"""
import logging
import shutil
import os
import threading
import time
from random import getrandbits
from collections import namedtuple
import tempfile
import numpy as np
from ... import ir_pass, build, build_config, nd, TVMError, register_func, \
rpc as _rpc, target as _target
from ...contrib import nvcc, ndk
from ..util import get_const_tuple
from ..env import AutotvmGlobalScope
from ..task.space import InstantiationError
from .measure import MeasureResult, MeasureErrorNo, Builder, Runner
from .local_executor import LocalExecutor
logger = logging.getLogger('autotvm')
class BuildResult(namedtuple("BuildResult", ('filename', 'arg_info', 'error', 'time_cost'))):
"""
Stores all the necessary inputs for a measurement.
Parameters
----------
filename : str
The filename of generated library
arg_info : Tuple
The shape and dtype information of tvm tensor arguments
error : Exception
The error happens during compilation.
time_cost : float
The time cost of building
"""
class LocalBuilder(Builder):
"""Run compilation on local machine
Parameters
----------
timeout: float
The timeout of a compilation
n_parallel: int
The number of tasks run in parallel. "None" will use all cpu cores
build_func: callable or str
If is 'default', use default build function
If is 'ndk', use function for android ndk
If is callable, use it as custom build function
"""
def __init__(self, timeout=10, n_parallel=None, build_func='default'):
super(LocalBuilder, self).__init__(timeout, n_parallel)
if isinstance(build_func, str):
if build_func == 'default':
build_func = default_build_func
elif build_func == 'ndk':
build_func = android_ndk_build_func
else:
raise ValueError("Invalid build_func" + build_func)
self.build_func = build_func
self.executor = LocalExecutor(timeout=timeout)
self.tmp_dir = tempfile.mkdtemp()
def build(self, measure_inputs):
results = []
shutil.rmtree(self.tmp_dir)
self.tmp_dir = tempfile.mkdtemp()
for i in range(0, len(measure_inputs), self.n_parallel):
futures = []
for inp in measure_inputs[i:i + self.n_parallel]:
ret = self.executor.submit(self.build_func,
inp,
self.tmp_dir,
**self.build_kwargs)
futures.append(ret)
for future in futures:
res = future.get()
if isinstance(res, Exception):
# timeout or fleet error, return MeasureResult directly
results.append(MeasureResult((res,), MeasureErrorNo.BUILD_TIMEOUT,
self.timeout, time.time()))
elif res.error is not None:
# instantiation error
if isinstance(res.error, InstantiationError):
results.append(MeasureResult((res.error,),
MeasureErrorNo.INSTANTIATION_ERROR,
res.time_cost, time.time()))
else:
if "InstantiationError" in str(res.error):
msg = str(res.error)
try:
msg = msg.split('\n')[-2].split(": ")[1]
except Exception: # pylint: disable=broad-except
pass
results.append(MeasureResult((InstantiationError(msg),),
MeasureErrorNo.INSTANTIATION_ERROR,
res.time_cost, time.time()))
else: # tvm error
results.append(MeasureResult((res.error,),
MeasureErrorNo.COMPILE_HOST,
res.time_cost, time.time()))
else:
# return BuildResult
results.append(res)
return results
class RPCRunner(Runner):
"""Run generated code on remove devices.
This function will ask a RPC Tracker to get device for measurement.
Parameters
----------
timeout: float
The timeout of a compilation
n_parallel: int
The number of tasks run in parallel. "None" will use all cpu cores
key: str
The key of the device registered in the tracker
host: str
The host address of RPC Tracker
port: int
The port of RPC Tracker
number: int
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int, optional
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first "1" is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms: int, optional
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval: float, optional
The cool down interval between two measurements.
check_correctness: bool, optional
Whether check correctness after measurement. This will use llvm cpu target to
call your template and get the reference output.
This can work for TOPI templates, but may not work for your custom template.
"""
def __init__(self,
key, host, port, priority=1,
timeout=10, n_parallel=None,
number=4, repeat=3, min_repeat_ms=0, cooldown_interval=0.1,
check_correctness=False):
super(RPCRunner, self).__init__(timeout, n_parallel)
self.key = key
self.host = host
self.port = port
self.priority = priority
self.timeout = timeout
self.number = number
self.repeat = repeat
self.min_repeat_ms = min_repeat_ms
self.ref_input = None
self.ref_output = None
self.check_correctness = check_correctness
self.cooldown_interval = cooldown_interval
self.executor = LocalExecutor()
def set_task(self, task):
self.task = task
if check_remote(task.target, self.key, self.host, self.port):
logger.info("Get devices for measurement successfully!")
else:
raise RuntimeError("Cannot get remote devices from the tracker. "
"Please check the status of tracker by "
"'python -m tvm.exec.query_rpc_tracker --port [THE PORT YOU USE]' "
"and make sure you have free devices on the queue status.")
if self.check_correctness:
# use llvm cpu to generate a reference input/output
# this option works for tuning topi, but might not work for you custom op
with _target.create("llvm"):
s, arg_bufs = task.instantiate(task.config_space.get(0))
self.ref_input = [np.random.uniform(size=get_const_tuple(x.shape)).astype(x.dtype)
for x in arg_bufs]
func = build(s, arg_bufs, "llvm")
tvm_buf = [nd.array(x) for x in self.ref_input]
func(*tvm_buf)
self.ref_output = [x.asnumpy() for x in tvm_buf]
def get_build_kwargs(self):
kwargs = {}
if 'cuda' in self.task.target.keys or 'opencl' in self.task.target.keys:
remote = request_remote(self.key, self.host, self.port)
ctx = remote.context(str(self.task.target), 0)
max_dims = ctx.max_thread_dimensions
kwargs['check_gpu'] = {
'max_shared_memory_per_block': ctx.max_shared_memory_per_block,
'max_threads_per_block': ctx.max_threads_per_block,
'max_thread_x': max_dims[0],
'max_thread_y': max_dims[1],
'max_thread_z': max_dims[2],
}
if 'cuda' in self.task.target.keys:
kwargs["cuda_arch"] = "sm_" + "".join(ctx.compute_version.split('.'))
return kwargs
def run(self, measure_inputs, build_results):
results = []
remote_args = (self.key, self.host, self.port, self.priority, self.timeout)
for i in range(0, len(measure_inputs), self.n_parallel):
futures = []
for measure_inp, build_res in zip(measure_inputs[i:i+self.n_parallel],
build_results[i:i+self.n_parallel]):
ret = self.executor.submit(run_through_rpc,
measure_inp,
build_res,
self.number,
self.repeat,
self.min_repeat_ms,
self.cooldown_interval,
remote_args,
self.ref_input,
self.ref_output)
futures.append(ret)
for future in futures:
res = future.get()
if isinstance(res, Exception): # executor error or timeout
results.append(MeasureResult((str(res),), MeasureErrorNo.RUN_TIMEOUT,
self.timeout, time.time()))
else:
results.append(res)
return results
class LocalRunner(RPCRunner):
"""Run generated code on local devices.
Parameters
----------
timeout: float
The timeout of a compilation
number: int
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int, optional
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first one is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms: int, optional
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval: float, optional
The cool down interval between two measurements.
check_correctness: bool, optional
Whether check correctness after measurement. This will use llvm cpu target to
call your template and get the reference output.
This can work for TOPI templates, but may not work for your custom template.
Note
----
This is a "fake" local mode. We start a silent rpc tracker and rpc server
for the user. In this way we reuse timeout/isolation mechanism in RPC infrastructure.
"""
def __init__(self,
timeout=10,
number=4, repeat=3, min_repeat_ms=0, cooldown_interval=0.1,
check_correctness=False):
super(LocalRunner, self).__init__('', None, None, 0,
timeout=timeout, n_parallel=1,
number=number, repeat=repeat,
min_repeat_ms=min_repeat_ms,
cooldown_interval=cooldown_interval,
check_correctness=check_correctness)
self.tracker = None
self.server = None
def set_task(self, task):
self.task = task
from ...rpc.tracker import Tracker
from ...rpc.server import Server
tracker = Tracker('0.0.0.0', port=9000, port_end=10000, silent=True)
device_key = '$local$device$%d' % tracker.port
server = Server('0.0.0.0', port=9000, port_end=10000,
key=device_key,
use_popen=True, silent=True,
tracker_addr=(tracker.host, tracker.port))
self.key = device_key
self.host = tracker.host
self.port = tracker.port
super(LocalRunner, self).set_task(task)
return server, tracker
def _build_func_common(measure_input, check_gpu=None, cuda_arch=None, build_option=None):
"""Common part for building a configuration"""
target, task, config = measure_input
with target:
s, args = task.instantiate(config)
# check invalidity of template and code hash consistency
if not config.valid():
raise InstantiationError(config.errors)
opts = build_option or {}
if check_gpu: # Add verify pass to filter out invalid configs in advance.
opts["add_lower_pass"] = [(2, gpu_verify_pass(**check_gpu))]
if cuda_arch:
set_cuda_target_arch(cuda_arch)
with build_config(**opts):
func = build(s, args, target_host=task.target_host)
return func, tuple((get_const_tuple(x.shape), x.dtype) for x in args)
def default_build_func(measure_input, tmp_dir, **kwargs):
"""
Default build func. This can work for cuda, opencl, llvm backend
Parameters
----------
measure_input: MeasureInput
The input of measurement
tmp_dir: str
The path of temporary directory to export generated library
"""
tic = time.time()
try:
filename = os.path.join(tmp_dir, "tmp_func_%0x.tar" % getrandbits(64))
func, arg_info = _build_func_common(measure_input, **kwargs)
func.export_library(filename)
except Exception as e: # pylint: disable=broad-except
return BuildResult(None, None, e, time.time() - tic)
return BuildResult(filename, arg_info, None, time.time() - tic)
def android_ndk_build_func(measure_input, tmp_dir, **kwargs):
"""
Build function for android device using ndk.
Parameters
----------
measure_input: MeasureInput
The input of measurement
tmp_dir: str
The path of temporary directory to export generated library
"""
tic = time.time()
try:
filename = os.path.join(tmp_dir, "tmp_func_%0x.so" % getrandbits(64))
func, arg_info = _build_func_common(measure_input, **kwargs)
func.export_library(filename, ndk.create_shared)
except Exception as e: # pylint: disable=broad-except
return BuildResult(None, None, e, time.time() - tic)
return BuildResult(filename, arg_info, None, time.time() - tic)
def run_through_rpc(measure_input, build_result,
number, repeat, min_repeat_ms, cooldown_interval,
remote_args, ref_input=None, ref_output=None):
"""Run a generated library through rpc
Parameters
----------
measure_input: MeasureInput
The raw measure input
build_result: BuildResult
The result returned from Builder. This contains the path to the generated library.
number: int
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int, optional
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first one is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms: int, optional
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval: float
The cool down interval between two measurements
remote_args: Tuple
The argument for request_remote
ref_input: List of np.ndarray
The reference input used for checking correctness
ref_output: List of np.ndarray
The reference output used for checking correctness
"""
if isinstance(build_result, MeasureResult):
return build_result
tic = time.time()
errno = MeasureErrorNo.NO_ERROR
try:
# upload built module
remote = request_remote(*remote_args)
remote.upload(build_result.filename)
func = remote.load_module(os.path.split(build_result.filename)[1])
ctx = remote.context(str(measure_input.target), 0)
time_f = func.time_evaluator(
func.entry_name, ctx, number=number, repeat=repeat, min_repeat_ms=min_repeat_ms)
# set input
if ref_input:
args = [nd.array(x, ctx=ctx) for x in ref_input]
else:
# create empty arrays on the remote device and copy them once.
# This can avoid some memory issues that make the measurement results unreliable.
args = [nd.empty(x[0], dtype=x[1], ctx=ctx) for x in build_result.arg_info]
args = [nd.array(x, ctx=ctx) for x in args]
ctx.sync()
costs = time_f(*args).results
# clean up remote files
remote.remove(build_result.filename)
remote.remove(os.path.splitext(build_result.filename)[0] + '.so')
remote.remove('')
if len(costs) > 2: # remove largest and smallest value to reduce variance
costs = list(costs)
costs.sort()
costs = tuple(costs[1:-1])
# check correctness of output
if ref_output:
for expected, real in zip(ref_output, args):
if not np.allclose(expected, real.asnumpy(), rtol=1e-4):
logger.warning("Wrong Answer!")
errno = MeasureErrorNo.WRONG_ANSWER
except TVMError as exc:
msg = str(exc)
if "Stack trace returned" in msg:
msg = msg[:msg.index("Stack trace returned")]
if "CUDA Source" in msg:
msg = msg[:msg.index("CUDA Source")]
costs = (RuntimeError(msg[:1024]),)
errno = MeasureErrorNo.RUNTIME_DEVICE
tstamp = time.time()
time.sleep(cooldown_interval)
return MeasureResult(costs, errno, tstamp - tic + build_result.time_cost, tstamp)
def request_remote(device_key, host=None, port=None, priority=1, timeout=60):
"""Request a remote session
Parameters
----------
device_key: string
The device key of registered device in tracker
host: host, optional
The host address of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_HOST"
port: int, optional
The port of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_PORT"
priority: int, optional
The priority of this request, larger is more prior
timeout: float, optional
The timeout of this session (units: second)
Returns
------
session: RPCSession
"""
# connect to the tracker
host = host or os.environ['TVM_TRACKER_HOST']
port = port or int(os.environ['TVM_TRACKER_PORT'])
tracker = _rpc.connect_tracker(host, port)
remote = tracker.request(device_key, priority=priority,
session_timeout=timeout)
return remote
def check_remote(target, device_key, host=None, port=None, priority=100, timeout=10):
"""
Check the availability of a remote device
Parameters
----------
target: Target
The wanted compilation target
device_key: string
device key of registered device in tracker
host: host, optional
The host address of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_HOST"
port: int, optional
The port address of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_PORT"
priority: int, optional
The priority of this request, larger is more prior
timeout: float, optional
The timeout of this check (units: seconds).
Returns
-------
available: bool
True if can find available device
"""
def _check():
remote = request_remote(device_key, host, port, priority)
ctx = remote.context(str(target))
while not ctx.exist: # wait until we get an available device
pass
t = threading.Thread(target=_check,)
t.start()
t.join(timeout)
return not t.is_alive()
@register_func
def tvm_callback_cuda_compile(code):
"""use nvcc to generate ptx code for better optimization"""
ptx = nvcc.compile_cuda(code, target="ptx", arch=AutotvmGlobalScope.current.cuda_target_arch)
return ptx
def set_cuda_target_arch(arch):
"""set target architecture of nvcc compiler
Parameters
----------
arch: str
The argument of nvcc -arch. (e.g. "sm_51", "sm_62")
"""
AutotvmGlobalScope.current.cuda_target_arch = arch
def gpu_verify_pass(**kwargs):
"""Verify the validity of a gpu kernel.
This pass will check memory usage and number of threads per block.
"""
def verify_pass(stmt):
valid = ir_pass.VerifyGPUCode(stmt, kwargs)
if not valid:
raise InstantiationError("Skipped because of invalid gpu kernel")
return stmt
return verify_pass
| []
| []
| [
"TVM_TRACKER_HOST",
"TVM_TRACKER_PORT"
]
| [] | ["TVM_TRACKER_HOST", "TVM_TRACKER_PORT"] | python | 2 | 0 | |
stream/tests/test_client.py | from dateutil.tz import tzlocal
import stream
import time
from stream.exceptions import ApiKeyException, InputException
import random
import jwt
try:
from unittest.case import TestCase
except ImportError:
from unittest import TestCase
import json
import os
import sys
import datetime
import datetime as dt
import copy
import requests
from stream import serializer
from requests.exceptions import MissingSchema
from itertools import count
from uuid import uuid1
from uuid import uuid4
try:
from urlparse import urlparse, parse_qs
except ImportError:
from urllib.parse import urlparse, parse_qs
def connect_debug():
try:
key = os.environ["STREAM_KEY"]
secret = os.environ["STREAM_SECRET"]
except KeyError:
print(
"To run the tests the STREAM_KEY and STREAM_SECRET variables "
"need to be available. \n"
"Please create a pull request if you are an external "
"contributor, because these variables are automatically added "
"by Travis."
)
sys.exit(1)
return stream.connect(key, secret, location="qa", timeout=30)
client = connect_debug()
counter = count()
test_identifier = uuid4()
def get_unique_postfix():
return "---test_%s-feed_%s" % (test_identifier, next(counter))
def getfeed(feed_slug, user_id):
"""
Adds the random postfix to the user id
"""
return client.feed(feed_slug, user_id + get_unique_postfix())
def api_request_parse_validator(test):
def wrapper(meth):
def _parse_response(*args, **kwargs):
response = meth(*args, **kwargs)
test.assertTrue("duration" in response)
return response
return _parse_response
return wrapper
class ClientTest(TestCase):
def setUp(self):
client._parse_response = api_request_parse_validator(self)(
client._parse_response
)
# DEBUG account details
user1 = getfeed("user", "1")
user2 = getfeed("user", "2")
aggregated2 = getfeed("aggregated", "2")
aggregated3 = getfeed("aggregated", "3")
topic1 = getfeed("topic", "1")
flat3 = getfeed("flat", "3")
self.c = client
self.user1 = user1
self.user2 = user2
self.aggregated2 = aggregated2
self.aggregated3 = aggregated3
self.topic1 = topic1
self.flat3 = flat3
self.local_tests = False
if "LOCAL" in os.environ:
self.local_tests = os.environ["LOCAL"]
def _test_sleep(self, production_wait, local_wait):
"""
when testing against a live API, sometimes we need a small sleep to
ensure data stability, however when testing locally the wait does
not need to be as long
:param production_wait: float, number of seconds to sleep when hitting real API
:param local_wait: float, number of seconds to sleep when hitting localhost API
:return: None
"""
sleep_time = production_wait
if self.local_tests:
sleep_time = local_wait
time.sleep(sleep_time)
def test_collections_url(self):
feed_url = client.get_full_url(relative_url="meta/", service_name="api")
if self.local_tests:
self.assertEqual(feed_url, "http://localhost:8000/api/v1.0/meta/")
else:
self.assertEqual(
feed_url, "https://qa-api.stream-io-api.com/api/v1.0/meta/"
)
def test_personalization_url(self):
feed_url = client.get_full_url(
relative_url="recommended", service_name="personalization"
)
if self.local_tests:
self.assertEqual(
feed_url, "http://localhost:8000/personalization/v1.0/recommended"
)
else:
self.assertEqual(
feed_url,
"https://qa-personalization.stream-io-api.com/personalization/v1.0/recommended",
)
def test_api_url(self):
feed_url = client.get_full_url(service_name="api", relative_url="feed/")
if self.local_tests:
self.assertEqual(feed_url, "http://localhost:8000/api/v1.0/feed/")
else:
self.assertEqual(
feed_url, "https://qa-api.stream-io-api.com/api/v1.0/feed/"
)
def test_collections_url_default(self):
client = stream.connect("key", "secret")
feed_url = client.get_full_url(relative_url="meta/", service_name="api")
if not self.local_tests:
self.assertEqual(feed_url, "https://api.stream-io-api.com/api/v1.0/meta/")
def test_personalization_url_default(self):
client = stream.connect("key", "secret")
feed_url = client.get_full_url(
relative_url="recommended", service_name="personalization"
)
if not self.local_tests:
self.assertEqual(
feed_url,
"https://personalization.stream-io-api.com/personalization/v1.0/recommended",
)
def test_api_url_default(self):
client = stream.connect("key", "secret")
feed_url = client.get_full_url(service_name="api", relative_url="feed/")
if not self.local_tests:
self.assertEqual(feed_url, "https://api.stream-io-api.com/api/v1.0/feed/")
def test_collections_url_location(self):
client = stream.connect("key", "secret", location="tokyo")
feed_url = client.get_full_url(relative_url="meta/", service_name="api")
if not self.local_tests:
self.assertEqual(
feed_url, "https://tokyo-api.stream-io-api.com/api/v1.0/meta/"
)
def test_personalization_url_location(self):
client = stream.connect("key", "secret", location="tokyo")
feed_url = client.get_full_url(
relative_url="recommended", service_name="personalization"
)
if not self.local_tests:
self.assertEqual(
feed_url,
"https://tokyo-personalization.stream-io-api.com/personalization/v1.0/recommended",
)
def test_api_url_location(self):
client = stream.connect("key", "secret", location="tokyo")
feed_url = client.get_full_url(service_name="api", relative_url="feed/")
if not self.local_tests:
self.assertEqual(
feed_url, "https://tokyo-api.stream-io-api.com/api/v1.0/feed/"
)
def test_update_activities_create(self):
activities = [
{
"actor": "user:1",
"verb": "do",
"object": "object:1",
"foreign_id": "object:1",
"time": datetime.datetime.utcnow().isoformat(),
}
]
self.c.update_activities(activities)
def test_update_activities_illegal_argument(self):
activities = dict()
def invalid_activities():
self.c.update_activities(activities)
self.assertRaises(TypeError, invalid_activities)
def test_update_activities_update(self):
activities = []
for i in range(0, 10):
activities.append(
{
"actor": "user:1",
"verb": "do",
"object": "object:%s" % i,
"foreign_id": "object:%s" % i,
"time": datetime.datetime.utcnow().isoformat(),
}
)
activities_created = self.user1.add_activities(activities)["activities"]
activities = copy.deepcopy(activities_created)
for activity in activities:
activity.pop("id")
activity["popularity"] = 100
self.c.update_activities(activities)
activities_updated = self.user1.get(limit=len(activities))["results"]
activities_updated.reverse()
for i, activity in enumerate(activities_updated):
self.assertEqual(activities_created[i].get("id"), activity.get("id"))
self.assertEqual(activity["popularity"], 100)
def test_heroku(self):
url = "https://thierry:[email protected]/?app_id=1"
os.environ["STREAM_URL"] = url
client = stream.connect()
self.assertEqual(client.api_key, "thierry")
self.assertEqual(client.api_secret, "pass")
self.assertEqual(client.app_id, "1")
def test_heroku_no_location(self):
url = "https://bvt88g4kvc63:twc5ywfste5bm2ngqkzs7ukxk3pn96yweghjrxcmcrarnt3j4dqj3tucbhym5wfd@stream-io-api.com/?app_id=669"
os.environ["STREAM_URL"] = url
client = stream.connect()
self.assertEqual(client.api_key, "bvt88g4kvc63")
self.assertEqual(
client.api_secret,
"twc5ywfste5bm2ngqkzs7ukxk3pn96yweghjrxcmcrarnt3j4dqj3tucbhym5wfd",
)
self.assertEqual(client.app_id, "669")
feed_url = client.get_full_url("api", "feed/")
if self.local_tests:
self.assertEqual(feed_url, "http://localhost:8000/api/v1.0/feed/")
else:
self.assertEqual(feed_url, "https://api.stream-io-api.com/api/v1.0/feed/")
def test_heroku_location_compat(self):
url = "https://ahj2ndz7gsan:gthc2t9gh7pzq52f6cky8w4r4up9dr6rju9w3fjgmkv6cdvvav2ufe5fv7e2r9qy@us-east.getstream.io/?app_id=1"
os.environ["STREAM_URL"] = url
client = stream.connect()
self.assertEqual(client.api_key, "ahj2ndz7gsan")
self.assertEqual(
client.api_secret,
"gthc2t9gh7pzq52f6cky8w4r4up9dr6rju9w3fjgmkv6cdvvav2ufe5fv7e2r9qy",
)
feed_url = client.get_full_url("api", "feed/")
if self.local_tests:
self.assertEqual(feed_url, "http://localhost:8000/api/v1.0/feed/")
else:
self.assertEqual(
feed_url, "https://us-east-api.stream-io-api.com/api/v1.0/feed/"
)
self.assertEqual(client.app_id, "1")
def test_heroku_location(self):
url = "https://ahj2ndz7gsan:gthc2t9gh7pzq52f6cky8w4r4up9dr6rju9w3fjgmkv6cdvvav2ufe5fv7e2r9qy@us-east.stream-io-api.com/?app_id=1"
os.environ["STREAM_URL"] = url
client = stream.connect()
self.assertEqual(client.api_key, "ahj2ndz7gsan")
self.assertEqual(
client.api_secret,
"gthc2t9gh7pzq52f6cky8w4r4up9dr6rju9w3fjgmkv6cdvvav2ufe5fv7e2r9qy",
)
feed_url = client.get_full_url("api", "feed/")
if self.local_tests:
self.assertEqual(feed_url, "http://localhost:8000/api/v1.0/feed/")
else:
self.assertEqual(
feed_url, "https://us-east-api.stream-io-api.com/api/v1.0/feed/"
)
self.assertEqual(client.app_id, "1")
def test_heroku_overwrite(self):
url = "https://thierry:[email protected]/?app_id=1"
os.environ["STREAM_URL"] = url
client = stream.connect("a", "b", "c")
self.assertEqual(client.api_key, "a")
self.assertEqual(client.api_secret, "b")
self.assertEqual(client.app_id, "c")
def test_location_support(self):
client = stream.connect("a", "b", "c", location="us-east")
full_location = "https://us-east-api.stream-io-api.com/api/v1.0/feed/"
if self.local_tests:
full_location = "http://localhost:8000/api/v1.0/feed/"
self.assertEqual(client.location, "us-east")
feed_url = client.get_full_url("api", "feed/")
self.assertEqual(feed_url, full_location)
# test a wrong location, can only work on non-local test running
if not self.local_tests:
client = stream.connect("a", "b", "c", location="nonexistant")
def get_feed():
client.feed("user", "1").get()
self.assertRaises(requests.exceptions.ConnectionError, get_feed)
def test_invalid_feed_values(self):
def invalid_feed_slug():
client.feed("user:", "1")
self.assertRaises(ValueError, invalid_feed_slug)
def invalid_user_id():
client.feed("user:", "1-a")
self.assertRaises(ValueError, invalid_user_id)
def invalid_follow_feed_slug():
self.user1.follow("user:", "1")
self.assertRaises(ValueError, invalid_follow_feed_slug)
def invalid_follow_user_id():
self.user1.follow("user", "1-:a")
self.assertRaises(ValueError, invalid_follow_user_id)
def test_token_retrieval(self):
self.user1.token
self.user1.get_readonly_token()
def test_user_session_token(self):
client = stream.connect(self.c.api_key, self.c.api_secret)
token = client.create_user_session_token("user")
payload = jwt.decode(token, self.c.api_secret)
self.assertEqual(payload["user_id"], "user")
token = client.create_user_session_token("user", client="python", testing=True)
payload = jwt.decode(token, self.c.api_secret)
self.assertEqual(payload["client"], "python")
self.assertEqual(payload["testing"], True)
def test_add_activity(self):
feed = getfeed("user", "py1")
activity_data = {"actor": 1, "verb": "tweet", "object": 1}
response = feed.add_activity(activity_data)
activity_id = response["id"]
activities = feed.get(limit=1)["results"]
self.assertEqual(activities[0]["id"], activity_id)
def test_add_activity_to_inplace_change(self):
feed = getfeed("user", "py1")
team_feed = getfeed("user", "teamy")
activity_data = {"actor": 1, "verb": "tweet", "object": 1}
activity_data["to"] = [team_feed.id]
feed.add_activity(activity_data)
self.assertEqual(activity_data["to"], [team_feed.id])
def test_add_activities_to_inplace_change(self):
feed = getfeed("user", "py1")
team_feed = getfeed("user", "teamy")
activity_data = {"actor": 1, "verb": "tweet", "object": 1}
activity_data["to"] = [team_feed.id]
feed.add_activities([activity_data])
self.assertEqual(activity_data["to"], [team_feed.id])
def test_add_activity_to(self):
# test for sending an activities to the team feed using to
feeds = ["user", "teamy", "team_follower"]
user_feed, team_feed, team_follower_feed = map(
lambda x: getfeed("user", x), feeds
)
team_follower_feed.follow(team_feed.slug, team_feed.user_id)
activity_data = {"actor": 1, "verb": "tweet", "object": 1, "to": [team_feed.id]}
response = user_feed.add_activity(activity_data)
activity_id = response["id"]
# see if the new activity is also in the team feed
activities = team_feed.get(limit=1)["results"]
self.assertEqual(activities[0]["id"], activity_id)
self.assertEqual(activities[0]["origin"], None)
# see if the fanout process also works
activities = team_follower_feed.get(limit=1)["results"]
self.assertEqual(activities[0]["id"], activity_id)
self.assertEqual(activities[0]["origin"], team_feed.id)
# and validate removing also works
user_feed.remove_activity(response["id"])
# check the user pyto feed
activities = team_feed.get(limit=1)["results"]
self.assertFirstActivityIDNotEqual(activities, activity_id)
# and the flat feed
activities = team_follower_feed.get(limit=1)["results"]
self.assertFirstActivityIDNotEqual(activities, activity_id)
def test_add_activity_to_type_error(self):
user_feed = getfeed("user", "1")
activity_data = {"actor": 1, "verb": "tweet", "object": 1, "to": "string"}
self.assertRaises(TypeError, user_feed.add_activity, activity_data)
def assertFirstActivityIDEqual(self, activities, correct_activity_id):
activity_id = None
if activities:
activity_id = activities[0]["id"]
self.assertEqual(activity_id, correct_activity_id)
def assertFirstActivityIDNotEqual(self, activities, correct_activity_id):
activity_id = None
if activities:
activity_id = activities[0]["id"]
self.assertNotEqual(activity_id, correct_activity_id)
def test_remove_activity(self):
activity_data = {"actor": 1, "verb": "tweet", "object": 1}
activity_id = self.user1.add_activity(activity_data)["id"]
activities = self.user1.get(limit=8)["results"]
self.assertEqual(len(activities), 1)
self.user1.remove_activity(activity_id)
# verify that no activities were returned
activities = self.user1.get(limit=8)["results"]
self.assertEqual(len(activities), 0)
def test_remove_activity_by_foreign_id(self):
activity_data = {
"actor": 1,
"verb": "tweet",
"object": 1,
"foreign_id": "tweet:10",
}
self.user1.add_activity(activity_data)["id"]
activities = self.user1.get(limit=8)["results"]
self.assertEqual(len(activities), 1)
self.user1.remove_activity(foreign_id="tweet:10")
# verify that no activities were returned
activities = self.user1.get(limit=8)["results"]
self.assertEqual(len(activities), 0)
# verify this doesnt raise an error, but fails silently
self.user1.remove_activity(foreign_id="tweet:unknowandmissing")
def test_add_activities(self):
activity_data = [
{"actor": 1, "verb": "tweet", "object": 1},
{"actor": 2, "verb": "watch", "object": 2},
]
response = self.user1.add_activities(activity_data)
activity_ids = [a["id"] for a in response["activities"]]
activities = self.user1.get(limit=2)["results"]
get_activity_ids = [a["id"] for a in activities]
self.assertEqual(get_activity_ids, activity_ids[::-1])
def test_add_activities_to(self):
pyto2 = getfeed("user", "pyto2")
pyto3 = getfeed("user", "pyto3")
to = [pyto2.id, pyto3.id]
activity_data = [
{"actor": 1, "verb": "tweet", "object": 1, "to": to},
{"actor": 2, "verb": "watch", "object": 2, "to": to},
]
response = self.user1.add_activities(activity_data)
activity_ids = [a["id"] for a in response["activities"]]
activities = self.user1.get(limit=2)["results"]
get_activity_ids = [a["id"] for a in activities]
self.assertEqual(get_activity_ids, activity_ids[::-1])
# test first target
activities = pyto2.get(limit=2)["results"]
get_activity_ids = [a["id"] for a in activities]
self.assertEqual(get_activity_ids, activity_ids[::-1])
# test second target
activities = pyto3.get(limit=2)["results"]
get_activity_ids = [a["id"] for a in activities]
self.assertEqual(get_activity_ids, activity_ids[::-1])
def test_follow_and_source(self):
feed = getfeed("user", "test_follow")
agg_feed = getfeed("aggregated", "test_follow")
actor_id = random.randint(10, 100000)
activity_data = {"actor": actor_id, "verb": "tweet", "object": 1}
activity_id = feed.add_activity(activity_data)["id"]
agg_feed.follow(feed.slug, feed.user_id)
activities = agg_feed.get(limit=3)["results"]
activity = self._get_first_aggregated_activity(activities)
activity_id_found = activity["id"] if activity is not None else None
self.assertEqual(activity["origin"], feed.id)
self.assertEqual(activity_id_found, activity_id)
def test_follow_activity_copy_limit(self):
feed = getfeed("user", "test_follow_acl")
feed1 = getfeed("user", "test_follow_acl1")
actor_id = random.randint(10, 100000)
feed1.add_activity({"actor": actor_id, "verb": "tweet", "object": 1})
feed.follow(feed1.slug, feed1.user_id, activity_copy_limit=0)
activities = feed.get(limit=5)["results"]
self.assertEqual(len(activities), 0)
def test_follow_and_delete(self):
user_feed = getfeed("user", "test_follow")
agg_feed = getfeed("aggregated", "test_follow")
actor_id = random.randint(10, 100000)
activity_data = {"actor": actor_id, "verb": "tweet", "object": 1}
activity_id = user_feed.add_activity(activity_data)["id"]
agg_feed.follow(user_feed.slug, user_feed.user_id)
user_feed.remove_activity(activity_id)
activities = agg_feed.get(limit=3)["results"]
activity = self._get_first_aggregated_activity(activities)
activity_id_found = activity["id"] if activity is not None else None
self.assertNotEqual(activity_id_found, activity_id)
def test_flat_follow(self):
feed = getfeed("user", "test_flat_follow")
activity_data = {"actor": 1, "verb": "tweet", "object": 1}
activity_id = feed.add_activity(activity_data)["id"]
self.flat3.follow(feed.slug, feed.user_id)
activities = self.flat3.get(limit=3)["results"]
activity = self._get_first_activity(activities)
activity_id_found = activity["id"] if activity is not None else None
self.assertEqual(activity_id_found, activity_id)
self.flat3.unfollow(feed.slug, feed.user_id)
activities = self.flat3.get(limit=3)["results"]
self.assertEqual(len(activities), 0)
def test_flat_follow_no_copy(self):
feed = getfeed("user", "test_flat_follow_no_copy")
follower = getfeed("flat", "test_flat_follow_no_copy")
activity_data = {"actor": 1, "verb": "tweet", "object": 1}
feed.add_activity(activity_data)["id"]
follower.follow(feed.slug, feed.user_id, activity_copy_limit=0)
activities = follower.get(limit=3)["results"]
self.assertEqual(activities, [])
def test_flat_follow_copy_one(self):
feed = getfeed("user", "test_flat_follow_copy_one")
follower = getfeed("flat", "test_flat_follow_copy_one")
activity_data = {
"actor": 1,
"verb": "tweet",
"object": 1,
"foreign_id": "test:1",
}
feed.add_activity(activity_data)["id"]
activity_data = {
"actor": 1,
"verb": "tweet",
"object": 1,
"foreign_id": "test:2",
}
feed.add_activity(activity_data)["id"]
follower.follow(feed.slug, feed.user_id, activity_copy_limit=1)
activities = follower.get(limit=3)["results"]
# verify we get the latest activity
self.assertEqual(activities[0]["foreign_id"], "test:2")
def _get_first_aggregated_activity(self, activities):
try:
return activities[0]["activities"][0]
except IndexError:
pass
def _get_first_activity(self, activities):
try:
return activities[0]
except IndexError:
pass
def test_empty_followings(self):
asocial = getfeed("user", "asocialpython")
followings = asocial.following()
self.assertEqual(followings["results"], [])
def test_get_followings(self):
social = getfeed("user", "psocial")
social.follow("user", "apy")
social.follow("user", "bpy")
social.follow("user", "cpy")
followings = social.following(offset=0, limit=2)
self.assertEqual(len(followings["results"]), 2)
self.assertEqual(followings["results"][0]["feed_id"], social.id)
self.assertEqual(followings["results"][0]["target_id"], "user:cpy")
followings = social.following(offset=1, limit=2)
self.assertEqual(len(followings["results"]), 2)
self.assertEqual(followings["results"][0]["feed_id"], social.id)
self.assertEqual(followings["results"][0]["target_id"], "user:bpy")
def test_empty_followers(self):
asocial = getfeed("user", "asocialpython")
followers = asocial.followers()
self.assertEqual(len(followers["results"]), 0)
self.assertEqual(followers["results"], [])
def test_get_followers(self):
social = getfeed("user", "psocial")
spammy1 = getfeed("user", "spammy1")
spammy2 = getfeed("user", "spammy2")
spammy3 = getfeed("user", "spammy3")
for feed in [spammy1, spammy2, spammy3]:
feed.follow("user", social.user_id)
followers = social.followers(offset=0, limit=2)
self.assertEqual(len(followers["results"]), 2)
self.assertEqual(followers["results"][0]["feed_id"], spammy3.id)
self.assertEqual(followers["results"][0]["target_id"], social.id)
followers = social.followers(offset=1, limit=2)
self.assertEqual(len(followers["results"]), 2)
self.assertEqual(followers["results"][0]["feed_id"], spammy2.id)
self.assertEqual(followers["results"][0]["target_id"], social.id)
def test_empty_do_i_follow(self):
social = getfeed("user", "psocial")
social.follow("user", "apy")
social.follow("user", "bpy")
followings = social.following(feeds=["user:missingpy"])
self.assertEqual(len(followings["results"]), 0)
self.assertEqual(followings["results"], [])
def test_do_i_follow(self):
social = getfeed("user", "psocial")
social.follow("user", "apy")
social.follow("user", "bpy")
followings = social.following(feeds=["user:apy"])
self.assertEqual(len(followings["results"]), 1)
self.assertEqual(followings["results"][0]["feed_id"], social.id)
self.assertEqual(followings["results"][0]["target_id"], "user:apy")
def test_update_activity_to_targets(self):
time = datetime.datetime.utcnow().isoformat()
foreign_id = "user:1"
activity_data = {
"actor": 1,
"verb": "tweet",
"object": 1,
"foreign_id": foreign_id,
"time": time,
}
activity_data["to"] = ["user:1", "user:2"]
self.user1.add_activity(activity_data)
ret = self.user1.update_activity_to_targets(
foreign_id, time, new_targets=["user:3", "user:2"]
)
self.assertEqual(len(ret["activity"]["to"]), 2)
self.assertTrue("user:2" in ret["activity"]["to"])
self.assertTrue("user:3" in ret["activity"]["to"])
ret = self.user1.update_activity_to_targets(
foreign_id,
time,
added_targets=["user:4", "user:5"],
removed_targets=["user:3"],
)
self.assertEqual(len(ret["activity"]["to"]), 3)
self.assertTrue("user:2" in ret["activity"]["to"])
self.assertTrue("user:4" in ret["activity"]["to"])
self.assertTrue("user:5" in ret["activity"]["to"])
def test_get(self):
activity_data = {"actor": 1, "verb": "tweet", "object": 1}
activity_id = self.user1.add_activity(activity_data)["id"]
activity_data = {"actor": 2, "verb": "add", "object": 2}
activity_id_two = self.user1.add_activity(activity_data)["id"]
activity_data = {"actor": 3, "verb": "watch", "object": 2}
activity_id_three = self.user1.add_activity(activity_data)["id"]
activities = self.user1.get(limit=2)["results"]
# verify the first two results
self.assertEqual(len(activities), 2)
self.assertEqual(activities[0]["id"], activity_id_three)
self.assertEqual(activities[1]["id"], activity_id_two)
# try offset based
activities = self.user1.get(limit=2, offset=1)["results"]
self.assertEqual(activities[0]["id"], activity_id_two)
# try id_lt based
activities = self.user1.get(limit=2, id_lt=activity_id_two)["results"]
self.assertEqual(activities[0]["id"], activity_id)
def test_get_not_marked_seen(self):
notification_feed = getfeed("notification", "test_mark_seen")
activities = notification_feed.get(limit=3)["results"]
for activity in activities:
self.assertFalse(activity["is_seen"])
def test_mark_seen_on_get(self):
notification_feed = getfeed("notification", "test_mark_seen")
activities = notification_feed.get(limit=100)["results"]
for activity in activities:
notification_feed.remove_activity(activity["id"])
old_activities = [
notification_feed.add_activity({"actor": 1, "verb": "tweet", "object": 1}),
notification_feed.add_activity({"actor": 2, "verb": "add", "object": 2}),
notification_feed.add_activity({"actor": 3, "verb": "watch", "object": 3}),
]
notification_feed.get(
mark_seen=[old_activities[0]["id"], old_activities[1]["id"]]
)
activities = notification_feed.get(limit=3)["results"]
# is the seen state correct
for activity in activities:
# using a loop in case we're retrieving activities in a different order than old_activities
if old_activities[0]["id"] == activity["id"]:
self.assertTrue(activity["is_seen"])
if old_activities[1]["id"] == activity["id"]:
self.assertTrue(activity["is_seen"])
if old_activities[2]["id"] == activity["id"]:
self.assertFalse(activity["is_seen"])
# see if the state properly resets after we add another activity
notification_feed.add_activity(
{"actor": 3, "verb": "watch", "object": 3}
) # ['id']
activities = notification_feed.get(limit=3)["results"]
self.assertFalse(activities[0]["is_seen"])
self.assertEqual(len(activities[0]["activities"]), 2)
def test_mark_read_by_id(self):
notification_feed = getfeed("notification", "py2")
activities = notification_feed.get(limit=3)["results"]
ids = []
for activity in activities:
ids.append(activity["id"])
self.assertFalse(activity["is_read"])
ids = ids[:2]
notification_feed.get(mark_read=ids)
activities = notification_feed.get(limit=3)["results"]
for activity in activities:
if activity["id"] in ids:
self.assertTrue(activity["is_read"])
self.assertFalse(activity["is_seen"])
def test_api_key_exception(self):
self.c = stream.connect(
"5crf3bhfzesnMISSING",
"tfq2sdqpj9g446sbv653x3aqmgn33hsn8uzdc9jpskaw8mj6vsnhzswuwptuj9su",
)
self.user1 = self.c.feed("user", "1")
activity_data = {
"actor": 1,
"verb": "tweet",
"object": 1,
"debug_example_undefined": "test",
}
self.assertRaises(
ApiKeyException, lambda: self.user1.add_activity(activity_data)
)
def test_complex_field(self):
activity_data = {
"actor": 1,
"verb": "tweet",
"object": 1,
"participants": ["Tommaso", "Thierry"],
}
response = self.user1.add_activity(activity_data)
activity_id = response["id"]
activities = self.user1.get(limit=1)["results"]
self.assertEqual(activities[0]["id"], activity_id)
self.assertEqual(activities[0]["participants"], ["Tommaso", "Thierry"])
def assertDatetimeAlmostEqual(self, a, b):
difference = abs(a - b)
if difference > datetime.timedelta(milliseconds=1):
self.assertEqual(a, b)
def assertClearlyNotEqual(self, a, b):
difference = abs(a - b)
if difference < datetime.timedelta(milliseconds=1):
raise ValueError("the dates are too close")
def test_uniqueness(self):
"""
In order for things to be considere unique they need:
a.) The same time and activity data
b.) The same time and foreign id
"""
utcnow = datetime.datetime.utcnow()
activity_data = {"actor": 1, "verb": "tweet", "object": 1, "time": utcnow}
self.user1.add_activity(activity_data)
self.user1.add_activity(activity_data)
activities = self.user1.get(limit=2)["results"]
self.assertDatetimeAlmostEqual(activities[0]["time"], utcnow)
if len(activities) > 1:
self.assertClearlyNotEqual(activities[1]["time"], utcnow)
def test_uniqueness_topic(self):
"""
In order for things to be considere unique they need:
a.) The same time and activity data, or
b.) The same time and foreign id
"""
# follow both the topic and the user
self.flat3.follow("topic", self.topic1.user_id)
self.flat3.follow("user", self.user1.user_id)
# add the same activity twice
now = datetime.datetime.now(tzlocal())
tweet = "My Way %s" % get_unique_postfix()
activity_data = {
"actor": 1,
"verb": "tweet",
"object": 1,
"time": now,
"tweet": tweet,
}
self.topic1.add_activity(activity_data)
self.user1.add_activity(activity_data)
# verify that flat3 contains the activity exactly once
response = self.flat3.get(limit=3)
activity_tweets = [a.get("tweet") for a in response["results"]]
self.assertEqual(activity_tweets.count(tweet), 1)
def test_uniqueness_foreign_id(self):
now = datetime.datetime.now(tzlocal())
utcnow = (now - now.utcoffset()).replace(tzinfo=None)
activity_data = {
"actor": 1,
"verb": "tweet",
"object": 1,
"foreign_id": "tweet:11",
"time": utcnow,
}
self.user1.add_activity(activity_data)
activity_data = {
"actor": 2,
"verb": "tweet",
"object": 3,
"foreign_id": "tweet:11",
"time": utcnow,
}
self.user1.add_activity(activity_data)
activities = self.user1.get(limit=10)["results"]
# the second post should have overwritten the first one (because they
# had same id)
self.assertEqual(len(activities), 1)
self.assertEqual(activities[0]["object"], "3")
self.assertEqual(activities[0]["foreign_id"], "tweet:11")
self.assertDatetimeAlmostEqual(activities[0]["time"], utcnow)
def test_time_ordering(self):
"""
datetime.datetime.utcnow() is our recommended approach
so if we add an activity
add one using time
add another activity it should be in the right spot
"""
# timedelta is used to "make sure" that ordering is known even though
# server time is not
custom_time = datetime.datetime.utcnow() - dt.timedelta(days=1)
feed = self.user2
for index, activity_time in enumerate([None, custom_time, None]):
self._test_sleep(1, 1) # so times are a bit different
activity_data = {
"actor": 1,
"verb": "tweet",
"object": 1,
"foreign_id": "tweet:%s" % index,
"time": activity_time,
}
feed.add_activity(activity_data)
activities = feed.get(limit=3)["results"]
# the second post should have overwritten the first one (because they
# had same id)
self.assertEqual(activities[0]["foreign_id"], "tweet:2")
self.assertEqual(activities[1]["foreign_id"], "tweet:0")
self.assertEqual(activities[2]["foreign_id"], "tweet:1")
self.assertDatetimeAlmostEqual(activities[2]["time"], custom_time)
def test_missing_actor(self):
activity_data = {
"verb": "tweet",
"object": 1,
"debug_example_undefined": "test",
}
doit = lambda: self.user1.add_activity(activity_data)
try:
doit()
raise ValueError("should have raised InputException")
except InputException:
pass
def test_wrong_feed_spec(self):
self.c = stream.connect(
"5crf3bhfzesnMISSING",
"tfq2sdqpj9g446sbv653x3aqmgn33hsn8uzdc9jpskaw8mj6vsnhzswuwptuj9su",
)
self.assertRaises(TypeError, lambda: getfeed("user1"))
def test_serialization(self):
today = datetime.date.today()
then = datetime.datetime.now().replace(microsecond=0)
now = datetime.datetime.now()
data = dict(
string="string",
float=0.1,
int=1,
date=today,
datetime=now,
datetimenomicro=then,
)
serialized = serializer.dumps(data)
loaded = serializer.loads(serialized)
self.assertEqual(data, loaded)
def test_follow_many(self):
sources = [getfeed("user", str(i)).id for i in range(10)]
targets = [getfeed("flat", str(i)).id for i in range(10)]
feeds = [{"source": s, "target": t} for s, t in zip(sources, targets)]
self.c.follow_many(feeds)
for target in targets:
follows = self.c.feed(*target.split(":")).followers()["results"]
self.assertEqual(len(follows), 1)
self.assertTrue(follows[0]["feed_id"] in sources)
self.assertEqual(follows[0]["target_id"], target)
for source in sources:
follows = self.c.feed(*source.split(":")).following()["results"]
self.assertEqual(len(follows), 1)
self.assertEqual(follows[0]["feed_id"], source)
self.assertTrue(follows[0]["target_id"] in targets)
def test_follow_many_acl(self):
sources = [getfeed("user", str(i)) for i in range(10)]
# ensure every source is empty first
for feed in sources:
activities = feed.get(limit=100)["results"]
for activity in activities:
feed.remove_activity(activity["id"])
targets = [getfeed("flat", str(i)) for i in range(10)]
# ensure every source is empty first
for feed in targets:
activities = feed.get(limit=100)["results"]
for activity in activities:
feed.remove_activity(activity["id"])
# add activity to each target feed
activity = {
"actor": "barry",
"object": "09",
"verb": "tweet",
"time": datetime.datetime.utcnow().isoformat(),
}
for feed in targets:
feed.add_activity(activity)
self.assertEqual(len(feed.get(limit=5)["results"]), 1)
sources_id = [feed.id for feed in sources]
targets_id = [target.id for target in targets]
feeds = [{"source": s, "target": t} for s, t in zip(sources_id, targets_id)]
self.c.follow_many(feeds, activity_copy_limit=0)
for feed in sources:
activities = feed.get(limit=5)["results"]
self.assertEqual(len(activities), 0)
def test_add_to_many(self):
activity = {"actor": 1, "verb": "tweet", "object": 1, "custom": "data"}
feeds = [getfeed("flat", str(i)).id for i in range(10, 20)]
self.c.add_to_many(activity, feeds)
for feed in feeds:
feed = self.c.feed(*feed.split(":"))
self.assertEqual(feed.get()["results"][0]["custom"], "data")
def test_create_email_redirect(self):
target_url = "http://google.com/?a=b&c=d"
user_id = "tommaso"
impression = {
"foreign_ids": ["tweet:1", "tweet:2", "tweet:3", "tweet:4", "tweet:5"],
"feed_id": "user:global",
"user_id": user_id,
"location": "email",
}
engagement = {
"user_id": user_id,
"label": "click",
"feed_id": "user:global",
"location": "email",
"position": 3,
"foreign_id": "tweet:1",
}
events = [impression, engagement]
redirect_url = self.c.create_redirect_url(target_url, user_id, events)
parsed_url = urlparse(redirect_url)
qs = parse_qs(parsed_url.query)
decoded = jwt.decode(qs["authorization"][0], self.c.api_secret)
self.assertEqual(
decoded,
{
"resource": "redirect_and_track",
"action": "*",
"feed_id": "*",
"user_id": "tommaso",
},
)
expected_params = {
"auth_type": "jwt",
"url": target_url,
"api_key": self.c.api_key,
}
for k, v in expected_params.items():
self.assertEqual(qs[k][0], v)
self.assertEqual(json.loads(qs["events"][0]), events)
def test_email_redirect_invalid_target(self):
engagement = {
"foreign_id": "tweet:1",
"label": "click",
"position": 3,
"user_id": "tommaso",
"location": "email",
"feed_id": "user:global",
}
impression = {
"foreign_ids": ["tweet:1", "tweet:2", "tweet:3", "tweet:4", "tweet:5"],
"user_id": "tommaso",
"location": "email",
"feed_id": "user:global",
}
events = [impression, engagement]
# no protocol specified, this should raise an error
target_url = "google.com"
user_id = "tommaso"
create_redirect = lambda: self.c.create_redirect_url(
target_url, user_id, events
)
self.assertRaises(MissingSchema, create_redirect)
def test_follow_redirect_url(self):
target_url = "http://google.com/?a=b&c=d"
events = []
user_id = "tommaso"
redirect_url = self.c.create_redirect_url(target_url, user_id, events)
res = requests.get(redirect_url)
res.raise_for_status()
self.assertTrue("google" in res.url)
def test_get_activities_empty_ids(self):
response = self.c.get_activities(ids=[str(uuid1())])
self.assertEqual(len(response["results"]), 0)
def test_get_activities_empty_foreign_ids(self):
response = self.c.get_activities(
foreign_id_times=[("fid-x", datetime.datetime.utcnow())]
)
self.assertEqual(len(response["results"]), 0)
def test_get_activities_full(self):
dt = datetime.datetime.utcnow()
fid = "awesome-test"
activity = {
"actor": "barry",
"object": "09",
"verb": "tweet",
"time": dt,
"foreign_id": fid,
}
feed = getfeed("user", "test_get_activity")
response = feed.add_activity(activity)
response = self.c.get_activities(ids=[response["id"]])
self.assertEqual(len(response["results"]), 1)
self.assertEqual(activity["foreign_id"], response["results"][0]["foreign_id"])
response = self.c.get_activities(foreign_id_times=[(fid, dt)])
self.assertEqual(len(response["results"]), 1)
self.assertEqual(activity["foreign_id"], response["results"][0]["foreign_id"])
def test_activity_partial_update(self):
now = datetime.datetime.utcnow()
feed = self.c.feed("user", uuid4())
feed.add_activity(
{
"actor": "barry",
"object": "09",
"verb": "tweet",
"time": now,
"foreign_id": "fid:123",
"product": {"name": "shoes", "price": 9.99, "color": "blue"},
}
)
activity = feed.get()["results"][0]
set = {
"product.name": "boots",
"product.price": 7.99,
"popularity": 1000,
"foo": {"bar": {"baz": "qux"}},
}
unset = ["product.color"]
# partial update by ID
self.c.activity_partial_update(id=activity["id"], set=set, unset=unset)
updated = feed.get()["results"][0]
expected = activity
expected["product"] = {"name": "boots", "price": 7.99}
expected["popularity"] = 1000
expected["foo"] = {"bar": {"baz": "qux"}}
self.assertEqual(updated, expected)
# partial update by foreign ID + time
set = {"foo.bar.baz": 42, "popularity": 9000}
unset = ["product.price"]
self.c.activity_partial_update(
foreign_id=activity["foreign_id"],
time=activity["time"],
set=set,
unset=unset,
)
updated = feed.get()["results"][0]
expected["product"] = {"name": "boots"}
expected["foo"] = {"bar": {"baz": 42}}
expected["popularity"] = 9000
self.assertEqual(updated, expected)
def test_create_reference(self):
ref = self.c.collections.create_reference("item", "42")
self.assertEqual(ref, "SO:item:42")
def test_create_user_reference(self):
ref = self.c.users.create_reference("42")
self.assertEqual(ref, "SU:42")
def test_reaction_add(self):
self.c.reactions.add("like", "54a60c1e-4ee3-494b-a1e3-50c06acb5ed4", "mike")
def test_reaction_get(self):
response = self.c.reactions.add(
"like", "54a60c1e-4ee3-494b-a1e3-50c06acb5ed4", "mike"
)
reaction = self.c.reactions.get(response["id"])
self.assertEqual(reaction["parent"], "")
self.assertEqual(reaction["data"], {})
self.assertEqual(reaction["latest_children"], {})
self.assertEqual(reaction["children_counts"], {})
self.assertEqual(
reaction["activity_id"], "54a60c1e-4ee3-494b-a1e3-50c06acb5ed4"
)
self.assertEqual(reaction["kind"], "like")
self.assertTrue("created_at" in reaction)
self.assertTrue("updated_at" in reaction)
self.assertTrue("id" in reaction)
def test_reaction_update(self):
response = self.c.reactions.add(
"like", "54a60c1e-4ee3-494b-a1e3-50c06acb5ed4", "mike"
)
self.c.reactions.update(response["id"], {"changed": True})
def test_reaction_delete(self):
response = self.c.reactions.add(
"like", "54a60c1e-4ee3-494b-a1e3-50c06acb5ed4", "mike"
)
self.c.reactions.delete(response["id"])
def test_reaction_add_child(self):
response = self.c.reactions.add(
"like", "54a60c1e-4ee3-494b-a1e3-50c06acb5ed4", "mike"
)
self.c.reactions.add_child("like", response["id"], "rob")
def test_reaction_filter_random(self):
self.c.reactions.filter(
kind="like",
reaction_id="54a60c1e-4ee3-494b-a1e3-50c06acb5ed4",
id_lte="54a60c1e-4ee3-494b-a1e3-50c06acb5ed4",
)
self.c.reactions.filter(
activity_id="54a60c1e-4ee3-494b-a1e3-50c06acb5ed4",
id_lte="54a60c1e-4ee3-494b-a1e3-50c06acb5ed4",
)
self.c.reactions.filter(
user_id="mike", id_lte="54a60c1e-4ee3-494b-a1e3-50c06acb5ed4"
)
def _first_result_should_be(self, response, element):
el = element.copy()
el.pop("duration")
self.assertEqual(len(response["results"]), 1)
self.assertEqual(response["results"][0], el)
def test_reaction_filter(self):
activity_id = str(uuid1())
user = str(uuid1())
response = self.c.reactions.add("like", activity_id, user)
child = self.c.reactions.add_child("like", response["id"], user)
reaction = self.c.reactions.get(response["id"])
response = self.c.reactions.add("comment", activity_id, user)
reaction_comment = self.c.reactions.get(response["id"])
r = self.c.reactions.filter(reaction_id=reaction["id"])
self._first_result_should_be(r, child)
r = self.c.reactions.filter(kind="like", activity_id=activity_id, id_lte=reaction["id"])
self._first_result_should_be(r, reaction)
r = self.c.reactions.filter(kind="like", user_id=user, id_lte=reaction["id"])
self._first_result_should_be(r, reaction)
r = self.c.reactions.filter(kind="comment", activity_id=activity_id)
self._first_result_should_be(r, reaction_comment)
def test_user_add(self):
self.c.users.add(str(uuid1()))
def test_user_add_get_or_create(self):
user_id = str(uuid1())
r1 = self.c.users.add(user_id)
r2 = self.c.users.add(user_id, get_or_create=True)
self.assertEqual(r1["id"], r2["id"])
self.assertEqual(r1["created_at"], r2["created_at"])
self.assertEqual(r1["updated_at"], r2["updated_at"])
def test_user_get(self):
response = self.c.users.add(str(uuid1()))
user = self.c.users.get(response["id"])
self.assertEqual(user["data"], {})
self.assertTrue("created_at" in user)
self.assertTrue("updated_at" in user)
self.assertTrue("id" in user)
def test_user_update(self):
response = self.c.users.add(str(uuid1()))
self.c.users.update(response["id"], {"changed": True})
def test_user_delete(self):
response = self.c.users.add(str(uuid1()))
self.c.users.delete(response["id"])
def test_collections_add(self):
self.c.collections.add("items", {"data": 1}, id=str(uuid1()), user_id="tom")
def test_collections_add_no_id(self):
self.c.collections.add("items", {"data": 1})
def test_collections_get(self):
response = self.c.collections.add("items", {"data": 1}, id=str(uuid1()))
entry = self.c.collections.get("items", response["id"])
self.assertEqual(entry["data"], {"data": 1})
self.assertTrue("created_at" in entry)
self.assertTrue("updated_at" in entry)
self.assertTrue("id" in entry)
def test_collections_update(self):
response = self.c.collections.add("items", {"data": 1}, str(uuid1()))
self.c.collections.update("items", response["id"], data={"changed": True})
entry = self.c.collections.get("items", response["id"])
self.assertEqual(entry["data"], {"changed": True})
def test_collections_delete(self):
response = self.c.collections.add("items", {"data": 1}, str(uuid1()))
self.c.collections.delete("items", response["id"])
def test_feed_enrichment_collection(self):
entry = self.c.collections.add("items", {"name": "time machine"})
entry.pop("duration")
f = getfeed("user", "mike")
activity_data = {
"actor": "mike",
"verb": "buy",
"object": self.c.collections.create_reference(entry=entry),
}
f.add_activity(activity_data)
response = f.get()
self.assertTrue(
set(activity_data.items()).issubset(set(response["results"][0].items()))
)
enriched_response = f.get(enrich=True)
self.assertEqual(enriched_response["results"][0]["object"], entry)
def test_feed_enrichment_user(self):
user = self.c.users.add(str(uuid1()), {"name": "Mike"})
user.pop("duration")
f = getfeed("user", "mike")
activity_data = {
"actor": self.c.users.create_reference(user),
"verb": "buy",
"object": "time machine",
}
f.add_activity(activity_data)
response = f.get()
self.assertTrue(
set(activity_data.items()).issubset(set(response["results"][0].items()))
)
enriched_response = f.get(enrich=True)
self.assertEqual(enriched_response["results"][0]["actor"], user)
def test_feed_enrichment_own_reaction(self):
f = getfeed("user", "mike")
activity_data = {"actor": "mike", "verb": "buy", "object": "object"}
response = f.add_activity(activity_data)
reaction = self.c.reactions.add("like", response["id"], "mike")
reaction.pop("duration")
enriched_response = f.get(reactions={"own": True}, user_id="mike")
self.assertEqual(
enriched_response["results"][0]["own_reactions"]["like"][0], reaction
)
def test_feed_enrichment_recent_reaction(self):
f = getfeed("user", "mike")
activity_data = {"actor": "mike", "verb": "buy", "object": "object"}
response = f.add_activity(activity_data)
reaction = self.c.reactions.add("like", response["id"], "mike")
reaction.pop("duration")
enriched_response = f.get(reactions={"recent": True})
self.assertEqual(
enriched_response["results"][0]["latest_reactions"]["like"][0], reaction
)
def test_feed_enrichment_reaction_counts(self):
f = getfeed("user", "mike")
activity_data = {"actor": "mike", "verb": "buy", "object": "object"}
response = f.add_activity(activity_data)
reaction = self.c.reactions.add("like", response["id"], "mike")
reaction.pop("duration")
enriched_response = f.get(reactions={"counts": True})
self.assertEqual(enriched_response["results"][0]["reaction_counts"]["like"], 1)
| []
| []
| [
"LOCAL",
"STREAM_SECRET",
"STREAM_URL",
"STREAM_KEY"
]
| [] | ["LOCAL", "STREAM_SECRET", "STREAM_URL", "STREAM_KEY"] | python | 4 | 0 | |
bot/bot.go | package bot
import (
"dogfact/fact"
"fmt"
"log"
"math/rand"
"os"
"os/signal"
"strconv"
"strings"
"syscall"
"github.com/bwmarrin/discordgo"
)
var TWORD = "dog"
func Start() {
dg, err := discordgo.New("Bot " + os.Getenv("TOKEN"))
if err != nil {
panic(err)
}
dg.AddHandler(onMsg)
dg.Identify.Intents = discordgo.IntentsGuildMessages
err = dg.Open()
if err != nil {
panic(err)
}
log.Println("Running")
sc := make(chan os.Signal, 1)
signal.Notify(sc, syscall.SIGINT, syscall.SIGTERM, os.Interrupt)
<-sc
dg.Close()
fmt.Println("")
log.Println("Stopped")
}
func onMsg(s *discordgo.Session, m *discordgo.MessageCreate) {
if m.Author.ID == s.State.User.ID {
return
}
if m.Author.Bot {
return
}
message := strings.ToLower(m.Content)
if message == "how many facts do you have?" {
fcount := fmt.Sprintf("I have %d facts for you", len(fact.List))
s.ChannelMessageSend(m.ChannelID, fcount)
log.Println("Asked how many facts")
return
}
if strings.HasPrefix(message, "fact #") {
message = strings.Trim(message, "fact #")
message = strings.TrimFunc(message, func(r rune) bool {
return r < '0' || r > '9'
})
num, err := strconv.Atoi(message)
if err != nil {
s.ChannelMessageSend(m.ChannelID, "Cannot understand the number "+message)
return
}
num -= 1
if num > len(fact.List)-1 {
s.ChannelMessageSend(m.ChannelID, "Don't have as many facts!")
return
}
msg := fmt.Sprintf("Fact #%s: %s", message, fact.List[num])
s.ChannelMessageSend(m.ChannelID, msg)
log.Printf("Dispenced fact #%d", num+1)
}
if strings.Contains(message, TWORD) {
s.ChannelTyping(m.ChannelID)
i := rand.Intn(len(fact.List))
msg := fmt.Sprintf("Fact #%d: %s", i+1, fact.List[i])
s.ChannelMessageSend(m.ChannelID, msg)
log.Printf("Dispenced fact #%d", i+1)
return
}
}
| [
"\"TOKEN\""
]
| []
| [
"TOKEN"
]
| [] | ["TOKEN"] | go | 1 | 0 | |
cmd/validation-gorm/main.go | package main
import (
"fmt"
"os"
"gorm.io/driver/postgres"
"gorm.io/gorm"
)
func main() {
dsn := fmt.Sprintf("host=%s port=%s user=%s password=%s dbname=%s sslmode=%s", os.Getenv("DBHOST"), os.Getenv("DBPORT"), os.Getenv("DBUSER"), os.Getenv("DBPASS"), os.Getenv("DBNAME"), os.Getenv("SSLMODE"))
fmt.Println("dsn:", dsn)
db, err := gorm.Open(postgres.Open(dsn), &gorm.Config{})
if err != nil {
panic("failed to connect database")
}
fmt.Println("Connected. db.Error", db.Error)
// TODO: Enjoy!
}
| [
"\"DBHOST\"",
"\"DBPORT\"",
"\"DBUSER\"",
"\"DBPASS\"",
"\"DBNAME\"",
"\"SSLMODE\""
]
| []
| [
"DBPASS",
"DBHOST",
"DBUSER",
"DBNAME",
"DBPORT",
"SSLMODE"
]
| [] | ["DBPASS", "DBHOST", "DBUSER", "DBNAME", "DBPORT", "SSLMODE"] | go | 6 | 0 | |
HR_gradingStudents.py | #!/bin/python3
import os
import sys
#
# Complete the gradingStudents function below.
#
def gradingStudents(grades):
# another elegant solution with a lambda function
# map(lambda x: 5*(1 + x//5) if (x > 37 and ((x%5) > 2)) else x, grades)
# but we will use this for the solution
result = []
for i in grades:
if i >= 38:
if i % 5 >= 3:
i += (5 - i % 5)
result.append(i)
return result
if __name__ == '__main__':
f = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
grades = []
for _ in range(n):
grades_item = int(input())
grades.append(grades_item)
result = gradingStudents(grades)
f.write('\n'.join(map(str, result)))
f.write('\n')
f.close()
| []
| []
| [
"OUTPUT_PATH"
]
| [] | ["OUTPUT_PATH"] | python | 1 | 0 | |
database_query_handler.py | import psycopg2
import psycopg2.extras
class DBHandler:
"""
Handles I/O concerning the database to hide its implementation from client services.
"""
def __init__(self,
postgres_username=None,
postgres_password=None,
db_username='dbpedia_app',
db_password='dummy_password'):
# ordinarily you would get these from some secret store
# e.g. heroku has a specific url that you parse to get both
# or os.environ storage (like those used for API keys and the like)
user_name = db_username
password = db_password
# check to see if the db exists locally, create it if necessary
if postgres_password is not None and postgres_username is not None:
try:
connection = psycopg2.connect("dbname='postgres' user='%s' "
"host='localhost' password='%s'"
% (postgres_username, postgres_password))
connection.autocommit = True
cursor = connection.cursor()
# queries the postgres catalog to see if 'dbpedia' exists
# if not, creates it
cursor.execute("SELECT COUNT(*) = 0 FROM pg_catalog.pg_database WHERE datname = 'dbpedia'")
not_exists_row = cursor.fetchone()
not_exists = not_exists_row[0]
if not_exists:
cursor.execute("CREATE USER %s PASSWORD '%s'" % (user_name, password))
cursor.execute('CREATE DATABASE dbpedia OWNER %s' % (user_name,))
connection.close()
except:
# Presume if credentials are passed the user wants to perform this check/DB construction
# fail via error propagation
raise
try:
self.connection = psycopg2.connect("dbname='dbpedia' user='%s' host='localhost' password='%s'"
% (user_name, password))
except:
raise AssertionError('Failed to connect to dbpedia database. Has the local dbpedia been created?')
def __del__(self):
self.connection.close()
def commit(self):
self.connection.commit()
def schema_exists(self):
"""
Checks the estimated number of tuples in the subjects table to determine if data exists
:return:
"""
with self.connection.cursor() as cursor:
cursor.execute('select reltuples FROM pg_class where relname = %s', ('subjects',))
result = cursor.fetchone()[0]
return result > 0
def build_table_schema(self, schema_name, schema_file_path):
"""
Loads the dbpedia schema used for supporting downstream analysis. If the schema already exists, it is
dropped (deleted) and recreated.
:param schema_name:
:param schema_file_path:
:return:
"""
# do not call with user input given the manual query construction here
with self.connection.cursor() as cursor:
cursor.execute('DROP SCHEMA IF EXISTS %s CASCADE' % schema_name)
schema_file = open(schema_file_path, 'rU').read()
cursor.execute(schema_file)
def build_indices(self):
"""
Builds the following indices:
Index on name for subjects
Index on predicate for predicate_object
Index on subject_id for predicate object
:return:
"""
with self.connection.cursor() as cursor:
cursor.execute('DROP INDEX IF EXISTS dbpedia.pv_subject_id_idx')
cursor.execute('DROP INDEX IF EXISTS dbpedia.subject_idx')
cursor.execute('DROP INDEX IF EXISTS dbpedia.pv_predicate_idx')
cursor.execute('create index subject_idx on dbpedia.subjects (name)')
cursor.execute('create index pv_subject_id_idx on dbpedia.predicate_object (subject_id)')
cursor.execute('create index pv_predicate_idx on dbpedia.predicate_object (predicate);')
def insert_spo_tuple(self, spo_tuple):
"""
Handles the insertion of spo tuples into the db. Workflow:
Attempt to find the subject table entry corresponding to your subject. If found, use that ID for
inserting your po values. Otherwise, insert your subject into the subject table and use that ID
instead. The resulting id, predicate, object tuple is then inserted into the predicate_object table.
:param spo_tuple:
:return:
"""
(subject, predicate, db_object) = spo_tuple
with self.connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as cursor:
cursor.execute('select subject_id from dbpedia.subjects '
'where name = %s', (subject,))
results = cursor.fetchone()
if results is None or len(results) == 0:
cursor.execute('INSERT INTO dbpedia.subjects (name) VALUES (%s) '
'returning subject_id', (subject,))
results = cursor.fetchone()
id = results['subject_id']
# now we have the correct id in either case, insert the values into the db
cursor.execute('INSERT INTO dbpedia.predicate_object (subject_id, predicate, object) '
'VALUES (%s, %s, %s)', (id, predicate, db_object))
def get_person_metadata(self, person_name, use_exact_match=False):
"""
Returns all metadata associated with the provided person_name. However, does not actually check
to see if the identifier corresponds to a person or not; the class of the identifier will
be included in the returned metadata though. DBPedia People only contains people predicate
types as well.
Use_exact_match toggles between two behaviors: if True, then uses the exact identifier provided
to query against the subject table (WHERE = identifier). If False, uses the LIKE operator
to attempt to find similar IDs that are not exactly the same. Results will still be a superset
of the use_exact_match = True case.
:param person_name:
:param use_exact_match:
:return:
"""
# wikipedia replaces all spaces with under scores
# upper case to make case sensitive
person_name = person_name.replace(' ', '_').upper()
with self.connection.cursor() as cursor:
# get id associated with this person
# get all similar IDs
if not use_exact_match:
cursor.execute('SELECT subject_id, name FROM dbpedia.subjects WHERE upper(name) '
'LIKE %s',
('%%' + person_name + '%%',))
else:
cursor.execute('SELECT subject_id, name FROM dbpedia.subjects WHERE upper(name) = %s',
(person_name,))
results = cursor.fetchall()
# no person matches the input name
# return empty list
if results is None:
return []
subject_id_list = [x[0] for x in results]
# get all metadata associated with the subject_ids
cursor.execute('select dbpedia.subjects.name, predicate, object '
'FROM dbpedia.predicate_object '
'INNER JOIN dbpedia.subjects on (dbpedia.subjects.subject_id = dbpedia.predicate_object.subject_id) '
'WHERE dbpedia.predicate_object.subject_id = ANY(%s)', (subject_id_list,))
# this should never be none
# Sort results by name and return
return sorted(cursor.fetchall(), key=lambda x: x[0])
def get_tuples_by_predicate(self, predicate_of_interest):
"""
Extracts SPO tuples based on the predicate value passed to the function. This query will be slow since
you are querying such a large fraction of the po table at once (unless your predicate does not exist).
Predicates:
Name
Type
Gender
Description
Birthdate
GivenName
Surname
BirthPlace
DeathDate
DeathPlace
:param predicate_of_interest:
:return:
"""
with self.connection.cursor() as cursor:
cursor.execute('select dbpedia.subjects.name, '
'predicate, '
'object '
'FROM dbpedia.predicate_object '
'INNER JOIN dbpedia.subjects on (dbpedia.subjects.subject_id = dbpedia.predicate_object.subject_id) '
'WHERE upper(dbpedia.predicate_object.predicate) = upper(%s)', (predicate_of_interest,))
results = cursor.fetchall()
if results is None:
return []
else:
return results
| []
| []
| []
| [] | [] | python | 0 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.