filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
test/e2e/framework/util.go | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"syscall"
"text/tabwriter"
"time"
"github.com/golang/glog"
"golang.org/x/crypto/ssh"
"golang.org/x/net/websocket"
"google.golang.org/api/googleapi"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
gomegatypes "github.com/onsi/gomega/types"
batch "k8s.io/api/batch/v1"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
utilyaml "k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientset "k8s.io/client-go/kubernetes"
scaleclient "k8s.io/client-go/scale"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/api/testapi"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
appsinternal "k8s.io/kubernetes/pkg/apis/apps"
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
api "k8s.io/kubernetes/pkg/apis/core"
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/conditions"
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/controller"
nodectlr "k8s.io/kubernetes/pkg/controller/nodelifecycle"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
"k8s.io/kubernetes/pkg/scheduler/schedulercache"
sshutil "k8s.io/kubernetes/pkg/ssh"
"k8s.io/kubernetes/pkg/util/system"
taintutils "k8s.io/kubernetes/pkg/util/taints"
utilversion "k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
testutil "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
uexec "k8s.io/utils/exec"
)
const (
// How long to wait for the pod to be listable
PodListTimeout = time.Minute
// Initial pod start can be delayed O(minutes) by slow docker pulls
// TODO: Make this 30 seconds once #4566 is resolved.
PodStartTimeout = 5 * time.Minute
// If there are any orphaned namespaces to clean up, this test is running
// on a long lived cluster. A long wait here is preferably to spurious test
// failures caused by leaked resources from a previous test run.
NamespaceCleanupTimeout = 15 * time.Minute
// Some pods can take much longer to get ready due to volume attach/detach latency.
slowPodStartTimeout = 15 * time.Minute
// How long to wait for a service endpoint to be resolvable.
ServiceStartTimeout = 1 * time.Minute
// How often to Poll pods, nodes and claims.
Poll = 2 * time.Second
pollShortTimeout = 1 * time.Minute
pollLongTimeout = 5 * time.Minute
// service accounts are provisioned after namespace creation
// a service account is required to support pod creation in a namespace as part of admission control
ServiceAccountProvisionTimeout = 2 * time.Minute
// How long to try single API calls (like 'get' or 'list'). Used to prevent
// transient failures from failing tests.
// TODO: client should not apply this timeout to Watch calls. Increased from 30s until that is fixed.
SingleCallTimeout = 5 * time.Minute
// How long nodes have to be "ready" when a test begins. They should already
// be "ready" before the test starts, so this is small.
NodeReadyInitialTimeout = 20 * time.Second
// How long pods have to be "ready" when a test begins.
PodReadyBeforeTimeout = 5 * time.Minute
// How long pods have to become scheduled onto nodes
podScheduledBeforeTimeout = PodListTimeout + (20 * time.Second)
podRespondingTimeout = 15 * time.Minute
ServiceRespondingTimeout = 2 * time.Minute
EndpointRegisterTimeout = time.Minute
// How long claims have to become dynamically provisioned
ClaimProvisionTimeout = 5 * time.Minute
// How long claims have to become bound
ClaimBindingTimeout = 3 * time.Minute
// How long claims have to become deleted
ClaimDeletingTimeout = 3 * time.Minute
// How long PVs have to beome reclaimed
PVReclaimingTimeout = 3 * time.Minute
// How long PVs have to become bound
PVBindingTimeout = 3 * time.Minute
// How long PVs have to become deleted
PVDeletingTimeout = 3 * time.Minute
// How long a node is allowed to become "Ready" after it is restarted before
// the test is considered failed.
RestartNodeReadyAgainTimeout = 5 * time.Minute
// How long a pod is allowed to become "running" and "ready" after a node
// restart before test is considered failed.
RestartPodReadyAgainTimeout = 5 * time.Minute
// Number of objects that gc can delete in a second.
// GC issues 2 requestes for single delete.
gcThroughput = 10
// Minimal number of nodes for the cluster to be considered large.
largeClusterThreshold = 100
// TODO(justinsb): Avoid hardcoding this.
awsMasterIP = "172.20.0.9"
// ssh port
sshPort = "22"
// ImagePrePullingTimeout is the time we wait for the e2e-image-puller
// static pods to pull the list of seeded images. If they don't pull
// images within this time we simply log their output and carry on
// with the tests.
ImagePrePullingTimeout = 5 * time.Minute
)
var (
BusyBoxImage = "busybox"
// Label allocated to the image puller static pod that runs on each node
// before e2es.
ImagePullerLabels = map[string]string{"name": "e2e-image-puller"}
// For parsing Kubectl version for version-skewed testing.
gitVersionRegexp = regexp.MustCompile("GitVersion:\"(v.+?)\"")
// Slice of regexps for names of pods that have to be running to consider a Node "healthy"
requiredPerNodePods = []*regexp.Regexp{
regexp.MustCompile(".*kube-proxy.*"),
regexp.MustCompile(".*fluentd-elasticsearch.*"),
regexp.MustCompile(".*node-problem-detector.*"),
}
// Serve hostname image name
ServeHostnameImage = imageutils.GetE2EImage(imageutils.ServeHostname)
)
type Address struct {
internalIP string
externalIP string
hostname string
}
// GetServerArchitecture fetches the architecture of the cluster's apiserver.
func GetServerArchitecture(c clientset.Interface) string {
arch := ""
sVer, err := c.Discovery().ServerVersion()
if err != nil || sVer.Platform == "" {
// If we failed to get the server version for some reason, default to amd64.
arch = "amd64"
} else {
// Split the platform string into OS and Arch separately.
// The platform string may for example be "linux/amd64", "linux/arm" or "windows/amd64".
osArchArray := strings.Split(sVer.Platform, "/")
arch = osArchArray[1]
}
return arch
}
// GetPauseImageName fetches the pause image name for the same architecture as the apiserver.
func GetPauseImageName(c clientset.Interface) string {
return imageutils.GetE2EImageWithArch(imageutils.Pause, GetServerArchitecture(c))
}
func GetServicesProxyRequest(c clientset.Interface, request *restclient.Request) (*restclient.Request, error) {
return request.Resource("services").SubResource("proxy"), nil
}
// unique identifier of the e2e run
var RunId = uuid.NewUUID()
type CreateTestingNSFn func(baseName string, c clientset.Interface, labels map[string]string) (*v1.Namespace, error)
type ContainerFailures struct {
status *v1.ContainerStateTerminated
Restarts int
}
func GetMasterHost() string {
masterUrl, err := url.Parse(TestContext.Host)
ExpectNoError(err)
return masterUrl.Host
}
func nowStamp() string {
return time.Now().Format(time.StampMilli)
}
func log(level string, format string, args ...interface{}) {
fmt.Fprintf(GinkgoWriter, nowStamp()+": "+level+": "+format+"\n", args...)
}
func Logf(format string, args ...interface{}) {
log("INFO", format, args...)
}
func Failf(format string, args ...interface{}) {
FailfWithOffset(1, format, args...)
}
// FailfWithOffset calls "Fail" and logs the error at "offset" levels above its caller
// (for example, for call chain f -> g -> FailfWithOffset(1, ...) error would be logged for "f").
func FailfWithOffset(offset int, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
log("INFO", msg)
ginkgowrapper.Fail(nowStamp()+": "+msg, 1+offset)
}
func Skipf(format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
log("INFO", msg)
ginkgowrapper.Skip(nowStamp() + ": " + msg)
}
func SkipUnlessNodeCountIsAtLeast(minNodeCount int) {
if TestContext.CloudConfig.NumNodes < minNodeCount {
Skipf("Requires at least %d nodes (not %d)", minNodeCount, TestContext.CloudConfig.NumNodes)
}
}
func SkipUnlessNodeCountIsAtMost(maxNodeCount int) {
if TestContext.CloudConfig.NumNodes > maxNodeCount {
Skipf("Requires at most %d nodes (not %d)", maxNodeCount, TestContext.CloudConfig.NumNodes)
}
}
func SkipUnlessAtLeast(value int, minValue int, message string) {
if value < minValue {
Skipf(message)
}
}
func SkipIfProviderIs(unsupportedProviders ...string) {
if ProviderIs(unsupportedProviders...) {
Skipf("Not supported for providers %v (found %s)", unsupportedProviders, TestContext.Provider)
}
}
func SkipUnlessLocalEphemeralStorageEnabled() {
if !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) {
Skipf("Only supported when %v feature is enabled", features.LocalStorageCapacityIsolation)
}
}
func SkipUnlessSSHKeyPresent() {
if _, err := GetSigner(TestContext.Provider); err != nil {
Skipf("No SSH Key for provider %s: '%v'", TestContext.Provider, err)
}
}
func SkipUnlessProviderIs(supportedProviders ...string) {
if !ProviderIs(supportedProviders...) {
Skipf("Only supported for providers %v (not %s)", supportedProviders, TestContext.Provider)
}
}
func SkipUnlessClusterMonitoringModeIs(supportedMonitoring ...string) {
if !ClusterMonitoringModeIs(supportedMonitoring...) {
Skipf("Only next monitoring modes are supported %v (not %s)", supportedMonitoring, TestContext.ClusterMonitoringMode)
}
}
func SkipUnlessMasterOSDistroIs(supportedMasterOsDistros ...string) {
if !MasterOSDistroIs(supportedMasterOsDistros...) {
Skipf("Only supported for master OS distro %v (not %s)", supportedMasterOsDistros, TestContext.MasterOSDistro)
}
}
func SkipUnlessNodeOSDistroIs(supportedNodeOsDistros ...string) {
if !NodeOSDistroIs(supportedNodeOsDistros...) {
Skipf("Only supported for node OS distro %v (not %s)", supportedNodeOsDistros, TestContext.NodeOSDistro)
}
}
func SkipIfContainerRuntimeIs(runtimes ...string) {
for _, runtime := range runtimes {
if runtime == TestContext.ContainerRuntime {
Skipf("Not supported under container runtime %s", runtime)
}
}
}
func RunIfContainerRuntimeIs(runtimes ...string) {
for _, runtime := range runtimes {
if runtime == TestContext.ContainerRuntime {
return
}
}
Skipf("Skipped because container runtime %q is not in %s", TestContext.ContainerRuntime, runtimes)
}
func RunIfSystemSpecNameIs(names ...string) {
for _, name := range names {
if name == TestContext.SystemSpecName {
return
}
}
Skipf("Skipped because system spec name %q is not in %v", TestContext.SystemSpecName, names)
}
func ProviderIs(providers ...string) bool {
for _, provider := range providers {
if strings.ToLower(provider) == strings.ToLower(TestContext.Provider) {
return true
}
}
return false
}
func ClusterMonitoringModeIs(monitoringModes ...string) bool {
for _, mode := range monitoringModes {
if strings.ToLower(mode) == strings.ToLower(TestContext.ClusterMonitoringMode) {
return true
}
}
return false
}
func MasterOSDistroIs(supportedMasterOsDistros ...string) bool {
for _, distro := range supportedMasterOsDistros {
if strings.ToLower(distro) == strings.ToLower(TestContext.MasterOSDistro) {
return true
}
}
return false
}
func NodeOSDistroIs(supportedNodeOsDistros ...string) bool {
for _, distro := range supportedNodeOsDistros {
if strings.ToLower(distro) == strings.ToLower(TestContext.NodeOSDistro) {
return true
}
}
return false
}
func ProxyMode(f *Framework) (string, error) {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "kube-proxy-mode-detector",
Namespace: f.Namespace.Name,
},
Spec: v1.PodSpec{
HostNetwork: true,
Containers: []v1.Container{
{
Name: "detector",
Image: imageutils.GetE2EImage(imageutils.Net),
Command: []string{"/bin/sleep", "3600"},
},
},
},
}
f.PodClient().CreateSync(pod)
defer f.PodClient().DeleteSync(pod.Name, &metav1.DeleteOptions{}, DefaultPodDeletionTimeout)
cmd := "curl -q -s --connect-timeout 1 http://localhost:10249/proxyMode"
stdout, err := RunHostCmd(pod.Namespace, pod.Name, cmd)
if err != nil {
return "", err
}
Logf("ProxyMode: %s", stdout)
return stdout, nil
}
func SkipUnlessServerVersionGTE(v *utilversion.Version, c discovery.ServerVersionInterface) {
gte, err := ServerVersionGTE(v, c)
if err != nil {
Failf("Failed to get server version: %v", err)
}
if !gte {
Skipf("Not supported for server versions before %q", v)
}
}
func SkipIfMissingResource(clientPool dynamic.ClientPool, gvr schema.GroupVersionResource, namespace string) {
dynamicClient, err := clientPool.ClientForGroupVersionResource(gvr)
if err != nil {
Failf("Unexpected error getting dynamic client for %v: %v", gvr.GroupVersion(), err)
}
apiResource := metav1.APIResource{Name: gvr.Resource, Namespaced: true}
_, err = dynamicClient.Resource(&apiResource, namespace).List(metav1.ListOptions{})
if err != nil {
// not all resources support list, so we ignore those
if apierrs.IsMethodNotSupported(err) || apierrs.IsNotFound(err) || apierrs.IsForbidden(err) {
Skipf("Could not find %s resource, skipping test: %#v", gvr, err)
}
Failf("Unexpected error getting %v: %v", gvr, err)
}
}
// ProvidersWithSSH are those providers where each node is accessible with SSH
var ProvidersWithSSH = []string{"gce", "gke", "aws", "local"}
type podCondition func(pod *v1.Pod) (bool, error)
// logPodStates logs basic info of provided pods for debugging.
func logPodStates(pods []v1.Pod) {
// Find maximum widths for pod, node, and phase strings for column printing.
maxPodW, maxNodeW, maxPhaseW, maxGraceW := len("POD"), len("NODE"), len("PHASE"), len("GRACE")
for i := range pods {
pod := &pods[i]
if len(pod.ObjectMeta.Name) > maxPodW {
maxPodW = len(pod.ObjectMeta.Name)
}
if len(pod.Spec.NodeName) > maxNodeW {
maxNodeW = len(pod.Spec.NodeName)
}
if len(pod.Status.Phase) > maxPhaseW {
maxPhaseW = len(pod.Status.Phase)
}
}
// Increase widths by one to separate by a single space.
maxPodW++
maxNodeW++
maxPhaseW++
maxGraceW++
// Log pod info. * does space padding, - makes them left-aligned.
Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s",
maxPodW, "POD", maxNodeW, "NODE", maxPhaseW, "PHASE", maxGraceW, "GRACE", "CONDITIONS")
for _, pod := range pods {
grace := ""
if pod.DeletionGracePeriodSeconds != nil {
grace = fmt.Sprintf("%ds", *pod.DeletionGracePeriodSeconds)
}
Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s",
maxPodW, pod.ObjectMeta.Name, maxNodeW, pod.Spec.NodeName, maxPhaseW, pod.Status.Phase, maxGraceW, grace, pod.Status.Conditions)
}
Logf("") // Final empty line helps for readability.
}
// errorBadPodsStates create error message of basic info of bad pods for debugging.
func errorBadPodsStates(badPods []v1.Pod, desiredPods int, ns, desiredState string, timeout time.Duration) string {
errStr := fmt.Sprintf("%d / %d pods in namespace %q are NOT in %s state in %v\n", len(badPods), desiredPods, ns, desiredState, timeout)
// Print bad pods info only if there are fewer than 10 bad pods
if len(badPods) > 10 {
return errStr + "There are too many bad pods. Please check log for details."
}
buf := bytes.NewBuffer(nil)
w := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0)
fmt.Fprintln(w, "POD\tNODE\tPHASE\tGRACE\tCONDITIONS")
for _, badPod := range badPods {
grace := ""
if badPod.DeletionGracePeriodSeconds != nil {
grace = fmt.Sprintf("%ds", *badPod.DeletionGracePeriodSeconds)
}
podInfo := fmt.Sprintf("%s\t%s\t%s\t%s\t%+v",
badPod.ObjectMeta.Name, badPod.Spec.NodeName, badPod.Status.Phase, grace, badPod.Status.Conditions)
fmt.Fprintln(w, podInfo)
}
w.Flush()
return errStr + buf.String()
}
// WaitForPodsSuccess waits till all labels matching the given selector enter
// the Success state. The caller is expected to only invoke this method once the
// pods have been created.
func WaitForPodsSuccess(c clientset.Interface, ns string, successPodLabels map[string]string, timeout time.Duration) error {
successPodSelector := labels.SelectorFromSet(successPodLabels)
start, badPods, desiredPods := time.Now(), []v1.Pod{}, 0
if wait.PollImmediate(30*time.Second, timeout, func() (bool, error) {
podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: successPodSelector.String()})
if err != nil {
Logf("Error getting pods in namespace %q: %v", ns, err)
if IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
if len(podList.Items) == 0 {
Logf("Waiting for pods to enter Success, but no pods in %q match label %v", ns, successPodLabels)
return true, nil
}
badPods = []v1.Pod{}
desiredPods = len(podList.Items)
for _, pod := range podList.Items {
if pod.Status.Phase != v1.PodSucceeded {
badPods = append(badPods, pod)
}
}
successPods := len(podList.Items) - len(badPods)
Logf("%d / %d pods in namespace %q are in Success state (%d seconds elapsed)",
successPods, len(podList.Items), ns, int(time.Since(start).Seconds()))
if len(badPods) == 0 {
return true, nil
}
return false, nil
}) != nil {
logPodStates(badPods)
LogPodsWithLabels(c, ns, successPodLabels, Logf)
return errors.New(errorBadPodsStates(badPods, desiredPods, ns, "SUCCESS", timeout))
}
return nil
}
// WaitForPodsRunningReady waits up to timeout to ensure that all pods in
// namespace ns are either running and ready, or failed but controlled by a
// controller. Also, it ensures that at least minPods are running and
// ready. It has separate behavior from other 'wait for' pods functions in
// that it requests the list of pods on every iteration. This is useful, for
// example, in cluster startup, because the number of pods increases while
// waiting. All pods that are in SUCCESS state are not counted.
//
// If ignoreLabels is not empty, pods matching this selector are ignored.
func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedNotReadyPods int32, timeout time.Duration, ignoreLabels map[string]string) error {
ignoreSelector := labels.SelectorFromSet(ignoreLabels)
start := time.Now()
Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready",
timeout, minPods, ns)
wg := sync.WaitGroup{}
wg.Add(1)
var ignoreNotReady bool
badPods := []v1.Pod{}
desiredPods := 0
notReady := int32(0)
if wait.PollImmediate(Poll, timeout, func() (bool, error) {
// We get the new list of pods, replication controllers, and
// replica sets in every iteration because more pods come
// online during startup and we want to ensure they are also
// checked.
replicas, replicaOk := int32(0), int32(0)
rcList, err := c.CoreV1().ReplicationControllers(ns).List(metav1.ListOptions{})
if err != nil {
Logf("Error getting replication controllers in namespace '%s': %v", ns, err)
if IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
for _, rc := range rcList.Items {
replicas += *rc.Spec.Replicas
replicaOk += rc.Status.ReadyReplicas
}
rsList, err := c.ExtensionsV1beta1().ReplicaSets(ns).List(metav1.ListOptions{})
if err != nil {
Logf("Error getting replication sets in namespace %q: %v", ns, err)
if IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
for _, rs := range rsList.Items {
replicas += *rs.Spec.Replicas
replicaOk += rs.Status.ReadyReplicas
}
podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{})
if err != nil {
Logf("Error getting pods in namespace '%s': %v", ns, err)
if IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
nOk := int32(0)
notReady = int32(0)
badPods = []v1.Pod{}
desiredPods = len(podList.Items)
for _, pod := range podList.Items {
if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(pod.Labels)) {
continue
}
res, err := testutil.PodRunningReady(&pod)
switch {
case res && err == nil:
nOk++
case pod.Status.Phase == v1.PodSucceeded:
Logf("The status of Pod %s is Succeeded which is unexpected", pod.ObjectMeta.Name)
badPods = append(badPods, pod)
// it doesn't make sense to wait for this pod
return false, errors.New("unexpected Succeeded pod state")
case pod.Status.Phase != v1.PodFailed:
Logf("The status of Pod %s is %s (Ready = false), waiting for it to be either Running (with Ready = true) or Failed", pod.ObjectMeta.Name, pod.Status.Phase)
notReady++
badPods = append(badPods, pod)
default:
if metav1.GetControllerOf(&pod) == nil {
Logf("Pod %s is Failed, but it's not controlled by a controller", pod.ObjectMeta.Name)
badPods = append(badPods, pod)
}
//ignore failed pods that are controlled by some controller
}
}
Logf("%d / %d pods in namespace '%s' are running and ready (%d seconds elapsed)",
nOk, len(podList.Items), ns, int(time.Since(start).Seconds()))
Logf("expected %d pod replicas in namespace '%s', %d are Running and Ready.", replicas, ns, replicaOk)
if replicaOk == replicas && nOk >= minPods && len(badPods) == 0 {
return true, nil
}
ignoreNotReady = (notReady <= allowedNotReadyPods)
logPodStates(badPods)
return false, nil
}) != nil {
if !ignoreNotReady {
return errors.New(errorBadPodsStates(badPods, desiredPods, ns, "RUNNING and READY", timeout))
}
Logf("Number of not-ready pods (%d) is below the allowed threshold (%d).", notReady, allowedNotReadyPods)
}
return nil
}
func kubectlLogPod(c clientset.Interface, pod v1.Pod, containerNameSubstr string, logFunc func(ftm string, args ...interface{})) {
for _, container := range pod.Spec.Containers {
if strings.Contains(container.Name, containerNameSubstr) {
// Contains() matches all strings if substr is empty
logs, err := GetPodLogs(c, pod.Namespace, pod.Name, container.Name)
if err != nil {
logs, err = getPreviousPodLogs(c, pod.Namespace, pod.Name, container.Name)
if err != nil {
logFunc("Failed to get logs of pod %v, container %v, err: %v", pod.Name, container.Name, err)
}
}
logFunc("Logs of %v/%v:%v on node %v", pod.Namespace, pod.Name, container.Name, pod.Spec.NodeName)
logFunc("%s : STARTLOG\n%s\nENDLOG for container %v:%v:%v", containerNameSubstr, logs, pod.Namespace, pod.Name, container.Name)
}
}
}
func LogFailedContainers(c clientset.Interface, ns string, logFunc func(ftm string, args ...interface{})) {
podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{})
if err != nil {
logFunc("Error getting pods in namespace '%s': %v", ns, err)
return
}
logFunc("Running kubectl logs on non-ready containers in %v", ns)
for _, pod := range podList.Items {
if res, err := testutil.PodRunningReady(&pod); !res || err != nil {
kubectlLogPod(c, pod, "", Logf)
}
}
}
func LogPodsWithLabels(c clientset.Interface, ns string, match map[string]string, logFunc func(ftm string, args ...interface{})) {
podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.SelectorFromSet(match).String()})
if err != nil {
logFunc("Error getting pods in namespace %q: %v", ns, err)
return
}
logFunc("Running kubectl logs on pods with labels %v in %v", match, ns)
for _, pod := range podList.Items {
kubectlLogPod(c, pod, "", logFunc)
}
}
func LogContainersInPodsWithLabels(c clientset.Interface, ns string, match map[string]string, containerSubstr string, logFunc func(ftm string, args ...interface{})) {
podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.SelectorFromSet(match).String()})
if err != nil {
Logf("Error getting pods in namespace %q: %v", ns, err)
return
}
for _, pod := range podList.Items {
kubectlLogPod(c, pod, containerSubstr, logFunc)
}
}
// DeleteNamespaces deletes all namespaces that match the given delete and skip filters.
// Filter is by simple strings.Contains; first skip filter, then delete filter.
// Returns the list of deleted namespaces or an error.
func DeleteNamespaces(c clientset.Interface, deleteFilter, skipFilter []string) ([]string, error) {
By("Deleting namespaces")
nsList, err := c.CoreV1().Namespaces().List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
var deleted []string
var wg sync.WaitGroup
OUTER:
for _, item := range nsList.Items {
if skipFilter != nil {
for _, pattern := range skipFilter {
if strings.Contains(item.Name, pattern) {
continue OUTER
}
}
}
if deleteFilter != nil {
var shouldDelete bool
for _, pattern := range deleteFilter {
if strings.Contains(item.Name, pattern) {
shouldDelete = true
break
}
}
if !shouldDelete {
continue OUTER
}
}
wg.Add(1)
deleted = append(deleted, item.Name)
go func(nsName string) {
defer wg.Done()
defer GinkgoRecover()
Expect(c.CoreV1().Namespaces().Delete(nsName, nil)).To(Succeed())
Logf("namespace : %v api call to delete is complete ", nsName)
}(item.Name)
}
wg.Wait()
return deleted, nil
}
func WaitForNamespacesDeleted(c clientset.Interface, namespaces []string, timeout time.Duration) error {
By("Waiting for namespaces to vanish")
nsMap := map[string]bool{}
for _, ns := range namespaces {
nsMap[ns] = true
}
//Now POLL until all namespaces have been eradicated.
return wait.Poll(2*time.Second, timeout,
func() (bool, error) {
nsList, err := c.CoreV1().Namespaces().List(metav1.ListOptions{})
if err != nil {
return false, err
}
for _, item := range nsList.Items {
if _, ok := nsMap[item.Name]; ok {
return false, nil
}
}
return true, nil
})
}
func waitForServiceAccountInNamespace(c clientset.Interface, ns, serviceAccountName string, timeout time.Duration) error {
w, err := c.CoreV1().ServiceAccounts(ns).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: serviceAccountName}))
if err != nil {
return err
}
_, err = watch.Until(timeout, w, conditions.ServiceAccountHasSecrets)
return err
}
func WaitForPodCondition(c clientset.Interface, ns, podName, desc string, timeout time.Duration, condition podCondition) error {
Logf("Waiting up to %v for pod %q in namespace %q to be %q", timeout, podName, ns, desc)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
if err != nil {
if apierrs.IsNotFound(err) {
Logf("Pod %q in namespace %q not found. Error: %v", podName, ns, err)
return err
}
Logf("Get pod %q in namespace %q failed, ignoring for %v. Error: %v", podName, ns, Poll, err)
continue
}
// log now so that current pod info is reported before calling `condition()`
Logf("Pod %q: Phase=%q, Reason=%q, readiness=%t. Elapsed: %v",
podName, pod.Status.Phase, pod.Status.Reason, podutil.IsPodReady(pod), time.Since(start))
if done, err := condition(pod); done {
if err == nil {
Logf("Pod %q satisfied condition %q", podName, desc)
}
return err
}
}
return fmt.Errorf("Gave up after waiting %v for pod %q to be %q", timeout, podName, desc)
}
// WaitForMatchPodsCondition finds match pods based on the input ListOptions.
// waits and checks if all match pods are in the given podCondition
func WaitForMatchPodsCondition(c clientset.Interface, opts metav1.ListOptions, desc string, timeout time.Duration, condition podCondition) error {
Logf("Waiting up to %v for matching pods' status to be %s", timeout, desc)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(opts)
if err != nil {
return err
}
conditionNotMatch := []string{}
for _, pod := range pods.Items {
done, err := condition(&pod)
if done && err != nil {
return fmt.Errorf("Unexpected error: %v", err)
}
if !done {
conditionNotMatch = append(conditionNotMatch, format.Pod(&pod))
}
}
if len(conditionNotMatch) <= 0 {
return err
}
Logf("%d pods are not %s: %v", len(conditionNotMatch), desc, conditionNotMatch)
}
return fmt.Errorf("gave up waiting for matching pods to be '%s' after %v", desc, timeout)
}
// WaitForDefaultServiceAccountInNamespace waits for the default service account to be provisioned
// the default service account is what is associated with pods when they do not specify a service account
// as a result, pods are not able to be provisioned in a namespace until the service account is provisioned
func WaitForDefaultServiceAccountInNamespace(c clientset.Interface, namespace string) error {
return waitForServiceAccountInNamespace(c, namespace, "default", ServiceAccountProvisionTimeout)
}
// WaitForPersistentVolumePhase waits for a PersistentVolume to be in a specific phase or until timeout occurs, whichever comes first.
func WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase, c clientset.Interface, pvName string, Poll, timeout time.Duration) error {
Logf("Waiting up to %v for PersistentVolume %s to have phase %s", timeout, pvName, phase)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
if err != nil {
Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err)
continue
} else {
if pv.Status.Phase == phase {
Logf("PersistentVolume %s found and phase=%s (%v)", pvName, phase, time.Since(start))
return nil
} else {
Logf("PersistentVolume %s found but phase is %s instead of %s.", pvName, pv.Status.Phase, phase)
}
}
}
return fmt.Errorf("PersistentVolume %s not in phase %s within %v", pvName, phase, timeout)
}
// WaitForPersistentVolumeDeleted waits for a PersistentVolume to get deleted or until timeout occurs, whichever comes first.
func WaitForPersistentVolumeDeleted(c clientset.Interface, pvName string, Poll, timeout time.Duration) error {
Logf("Waiting up to %v for PersistentVolume %s to get deleted", timeout, pvName)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
if err == nil {
Logf("PersistentVolume %s found and phase=%s (%v)", pvName, pv.Status.Phase, time.Since(start))
continue
} else {
if apierrs.IsNotFound(err) {
Logf("PersistentVolume %s was removed", pvName)
return nil
} else {
Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err)
}
}
}
return fmt.Errorf("PersistentVolume %s still exists within %v", pvName, timeout)
}
// WaitForPersistentVolumeClaimPhase waits for a PersistentVolumeClaim to be in a specific phase or until timeout occurs, whichever comes first.
func WaitForPersistentVolumeClaimPhase(phase v1.PersistentVolumeClaimPhase, c clientset.Interface, ns string, pvcName string, Poll, timeout time.Duration) error {
Logf("Waiting up to %v for PersistentVolumeClaim %s to have phase %s", timeout, pvcName, phase)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{})
if err != nil {
Logf("Failed to get claim %q, retrying in %v. Error: %v", pvcName, Poll, err)
continue
} else {
if pvc.Status.Phase == phase {
Logf("PersistentVolumeClaim %s found and phase=%s (%v)", pvcName, phase, time.Since(start))
return nil
} else {
Logf("PersistentVolumeClaim %s found but phase is %s instead of %s.", pvcName, pvc.Status.Phase, phase)
}
}
}
return fmt.Errorf("PersistentVolumeClaim %s not in phase %s within %v", pvcName, phase, timeout)
}
// CreateTestingNS should be used by every test, note that we append a common prefix to the provided test name.
// Please see NewFramework instead of using this directly.
func CreateTestingNS(baseName string, c clientset.Interface, labels map[string]string) (*v1.Namespace, error) {
if labels == nil {
labels = map[string]string{}
}
labels["e2e-run"] = string(RunId)
namespaceObj := &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
GenerateName: fmt.Sprintf("e2e-tests-%v-", baseName),
Namespace: "",
Labels: labels,
},
Status: v1.NamespaceStatus{},
}
// Be robust about making the namespace creation call.
var got *v1.Namespace
if err := wait.PollImmediate(Poll, 30*time.Second, func() (bool, error) {
var err error
got, err = c.CoreV1().Namespaces().Create(namespaceObj)
if err != nil {
Logf("Unexpected error while creating namespace: %v", err)
return false, nil
}
return true, nil
}); err != nil {
return nil, err
}
if TestContext.VerifyServiceAccount {
if err := WaitForDefaultServiceAccountInNamespace(c, got.Name); err != nil {
// Even if we fail to create serviceAccount in the namespace,
// we have successfully create a namespace.
// So, return the created namespace.
return got, err
}
}
return got, nil
}
// CheckTestingNSDeletedExcept checks whether all e2e based existing namespaces are in the Terminating state
// and waits until they are finally deleted. It ignores namespace skip.
func CheckTestingNSDeletedExcept(c clientset.Interface, skip string) error {
// TODO: Since we don't have support for bulk resource deletion in the API,
// while deleting a namespace we are deleting all objects from that namespace
// one by one (one deletion == one API call). This basically exposes us to
// throttling - currently controller-manager has a limit of max 20 QPS.
// Once #10217 is implemented and used in namespace-controller, deleting all
// object from a given namespace should be much faster and we will be able
// to lower this timeout.
// However, now Density test is producing ~26000 events and Load capacity test
// is producing ~35000 events, thus assuming there are no other requests it will
// take ~30 minutes to fully delete the namespace. Thus I'm setting it to 60
// minutes to avoid any timeouts here.
timeout := 60 * time.Minute
Logf("Waiting for terminating namespaces to be deleted...")
for start := time.Now(); time.Since(start) < timeout; time.Sleep(15 * time.Second) {
namespaces, err := c.CoreV1().Namespaces().List(metav1.ListOptions{})
if err != nil {
Logf("Listing namespaces failed: %v", err)
continue
}
terminating := 0
for _, ns := range namespaces.Items {
if strings.HasPrefix(ns.ObjectMeta.Name, "e2e-tests-") && ns.ObjectMeta.Name != skip {
if ns.Status.Phase == v1.NamespaceActive {
return fmt.Errorf("Namespace %s is active", ns.ObjectMeta.Name)
}
terminating++
}
}
if terminating == 0 {
return nil
}
}
return fmt.Errorf("Waiting for terminating namespaces to be deleted timed out")
}
// deleteNS deletes the provided namespace, waits for it to be completely deleted, and then checks
// whether there are any pods remaining in a non-terminating state.
func deleteNS(c clientset.Interface, clientPool dynamic.ClientPool, namespace string, timeout time.Duration) error {
startTime := time.Now()
if err := c.CoreV1().Namespaces().Delete(namespace, nil); err != nil {
return err
}
// wait for namespace to delete or timeout.
err := wait.PollImmediate(2*time.Second, timeout, func() (bool, error) {
if _, err := c.CoreV1().Namespaces().Get(namespace, metav1.GetOptions{}); err != nil {
if apierrs.IsNotFound(err) {
return true, nil
}
Logf("Error while waiting for namespace to be terminated: %v", err)
return false, nil
}
return false, nil
})
// verify there is no more remaining content in the namespace
remainingContent, cerr := hasRemainingContent(c, clientPool, namespace)
if cerr != nil {
return cerr
}
// if content remains, let's dump information about the namespace, and system for flake debugging.
remainingPods := 0
missingTimestamp := 0
if remainingContent {
// log information about namespace, and set of namespaces in api server to help flake detection
logNamespace(c, namespace)
logNamespaces(c, namespace)
// if we can, check if there were pods remaining with no timestamp.
remainingPods, missingTimestamp, _ = countRemainingPods(c, namespace)
}
// a timeout waiting for namespace deletion happened!
if err != nil {
// some content remains in the namespace
if remainingContent {
// pods remain
if remainingPods > 0 {
if missingTimestamp != 0 {
// pods remained, but were not undergoing deletion (namespace controller is probably culprit)
return fmt.Errorf("namespace %v was not deleted with limit: %v, pods remaining: %v, pods missing deletion timestamp: %v", namespace, err, remainingPods, missingTimestamp)
}
// but they were all undergoing deletion (kubelet is probably culprit, check NodeLost)
return fmt.Errorf("namespace %v was not deleted with limit: %v, pods remaining: %v", namespace, err, remainingPods)
}
// other content remains (namespace controller is probably screwed up)
return fmt.Errorf("namespace %v was not deleted with limit: %v, namespaced content other than pods remain", namespace, err)
}
// no remaining content, but namespace was not deleted (namespace controller is probably wedged)
return fmt.Errorf("namespace %v was not deleted with limit: %v, namespace is empty but is not yet removed", namespace, err)
}
Logf("namespace %v deletion completed in %s", namespace, time.Now().Sub(startTime))
return nil
}
// logNamespaces logs the number of namespaces by phase
// namespace is the namespace the test was operating against that failed to delete so it can be grepped in logs
func logNamespaces(c clientset.Interface, namespace string) {
namespaceList, err := c.CoreV1().Namespaces().List(metav1.ListOptions{})
if err != nil {
Logf("namespace: %v, unable to list namespaces: %v", namespace, err)
return
}
numActive := 0
numTerminating := 0
for _, namespace := range namespaceList.Items {
if namespace.Status.Phase == v1.NamespaceActive {
numActive++
} else {
numTerminating++
}
}
Logf("namespace: %v, total namespaces: %v, active: %v, terminating: %v", namespace, len(namespaceList.Items), numActive, numTerminating)
}
// logNamespace logs detail about a namespace
func logNamespace(c clientset.Interface, namespace string) {
ns, err := c.CoreV1().Namespaces().Get(namespace, metav1.GetOptions{})
if err != nil {
if apierrs.IsNotFound(err) {
Logf("namespace: %v no longer exists", namespace)
return
}
Logf("namespace: %v, unable to get namespace due to error: %v", namespace, err)
return
}
Logf("namespace: %v, DeletionTimetamp: %v, Finalizers: %v, Phase: %v", ns.Name, ns.DeletionTimestamp, ns.Spec.Finalizers, ns.Status.Phase)
}
// countRemainingPods queries the server to count number of remaining pods, and number of pods that had a missing deletion timestamp.
func countRemainingPods(c clientset.Interface, namespace string) (int, int, error) {
// check for remaining pods
pods, err := c.CoreV1().Pods(namespace).List(metav1.ListOptions{})
if err != nil {
return 0, 0, err
}
// nothing remains!
if len(pods.Items) == 0 {
return 0, 0, nil
}
// stuff remains, log about it
logPodStates(pods.Items)
// check if there were any pods with missing deletion timestamp
numPods := len(pods.Items)
missingTimestamp := 0
for _, pod := range pods.Items {
if pod.DeletionTimestamp == nil {
missingTimestamp++
}
}
return numPods, missingTimestamp, nil
}
// isDynamicDiscoveryError returns true if the error is a group discovery error
// only for groups expected to be created/deleted dynamically during e2e tests
func isDynamicDiscoveryError(err error) bool {
if !discovery.IsGroupDiscoveryFailedError(err) {
return false
}
discoveryErr := err.(*discovery.ErrGroupDiscoveryFailed)
for gv := range discoveryErr.Groups {
switch gv.Group {
case "mygroup.example.com":
// custom_resource_definition
// garbage_collector
case "wardle.k8s.io":
// aggregator
default:
Logf("discovery error for unexpected group: %#v", gv)
return false
}
}
return true
}
// hasRemainingContent checks if there is remaining content in the namespace via API discovery
func hasRemainingContent(c clientset.Interface, clientPool dynamic.ClientPool, namespace string) (bool, error) {
// some tests generate their own framework.Client rather than the default
// TODO: ensure every test call has a configured clientPool
if clientPool == nil {
return false, nil
}
// find out what content is supported on the server
// Since extension apiserver is not always available, e.g. metrics server sometimes goes down,
// add retry here.
resources, err := waitForServerPreferredNamespacedResources(c.Discovery(), 30*time.Second)
if err != nil {
return false, err
}
groupVersionResources, err := discovery.GroupVersionResources(resources)
if err != nil {
return false, err
}
// TODO: temporary hack for https://github.com/kubernetes/kubernetes/issues/31798
ignoredResources := sets.NewString("bindings")
contentRemaining := false
// dump how many of resource type is on the server in a log.
for gvr := range groupVersionResources {
// get a client for this group version...
dynamicClient, err := clientPool.ClientForGroupVersionResource(gvr)
if err != nil {
// not all resource types support list, so some errors here are normal depending on the resource type.
Logf("namespace: %s, unable to get client - gvr: %v, error: %v", namespace, gvr, err)
continue
}
// get the api resource
apiResource := metav1.APIResource{Name: gvr.Resource, Namespaced: true}
// TODO: temporary hack for https://github.com/kubernetes/kubernetes/issues/31798
if ignoredResources.Has(apiResource.Name) {
Logf("namespace: %s, resource: %s, ignored listing per whitelist", namespace, apiResource.Name)
continue
}
obj, err := dynamicClient.Resource(&apiResource, namespace).List(metav1.ListOptions{})
if err != nil {
// not all resources support list, so we ignore those
if apierrs.IsMethodNotSupported(err) || apierrs.IsNotFound(err) || apierrs.IsForbidden(err) {
continue
}
// skip unavailable servers
if apierrs.IsServiceUnavailable(err) {
continue
}
return false, err
}
unstructuredList, ok := obj.(*unstructured.UnstructuredList)
if !ok {
return false, fmt.Errorf("namespace: %s, resource: %s, expected *unstructured.UnstructuredList, got %#v", namespace, apiResource.Name, obj)
}
if len(unstructuredList.Items) > 0 {
Logf("namespace: %s, resource: %s, items remaining: %v", namespace, apiResource.Name, len(unstructuredList.Items))
contentRemaining = true
}
}
return contentRemaining, nil
}
func ContainerInitInvariant(older, newer runtime.Object) error {
oldPod := older.(*v1.Pod)
newPod := newer.(*v1.Pod)
if len(oldPod.Spec.InitContainers) == 0 {
return nil
}
if len(oldPod.Spec.InitContainers) != len(newPod.Spec.InitContainers) {
return fmt.Errorf("init container list changed")
}
if oldPod.UID != newPod.UID {
return fmt.Errorf("two different pods exist in the condition: %s vs %s", oldPod.UID, newPod.UID)
}
if err := initContainersInvariants(oldPod); err != nil {
return err
}
if err := initContainersInvariants(newPod); err != nil {
return err
}
oldInit, _, _ := podInitialized(oldPod)
newInit, _, _ := podInitialized(newPod)
if oldInit && !newInit {
// TODO: we may in the future enable resetting PodInitialized = false if the kubelet needs to restart it
// from scratch
return fmt.Errorf("pod cannot be initialized and then regress to not being initialized")
}
return nil
}
func podInitialized(pod *v1.Pod) (ok bool, failed bool, err error) {
allInit := true
initFailed := false
for _, s := range pod.Status.InitContainerStatuses {
switch {
case initFailed && s.State.Waiting == nil:
return allInit, initFailed, fmt.Errorf("container %s is after a failed container but isn't waiting", s.Name)
case allInit && s.State.Waiting == nil:
return allInit, initFailed, fmt.Errorf("container %s is after an initializing container but isn't waiting", s.Name)
case s.State.Terminated == nil:
allInit = false
case s.State.Terminated.ExitCode != 0:
allInit = false
initFailed = true
case !s.Ready:
return allInit, initFailed, fmt.Errorf("container %s initialized but isn't marked as ready", s.Name)
}
}
return allInit, initFailed, nil
}
func initContainersInvariants(pod *v1.Pod) error {
allInit, initFailed, err := podInitialized(pod)
if err != nil {
return err
}
if !allInit || initFailed {
for _, s := range pod.Status.ContainerStatuses {
if s.State.Waiting == nil || s.RestartCount != 0 {
return fmt.Errorf("container %s is not waiting but initialization not complete", s.Name)
}
if s.State.Waiting.Reason != "PodInitializing" {
return fmt.Errorf("container %s should have reason PodInitializing: %s", s.Name, s.State.Waiting.Reason)
}
}
}
_, c := podutil.GetPodCondition(&pod.Status, v1.PodInitialized)
if c == nil {
return fmt.Errorf("pod does not have initialized condition")
}
if c.LastTransitionTime.IsZero() {
return fmt.Errorf("PodInitialized condition should always have a transition time")
}
switch {
case c.Status == v1.ConditionUnknown:
return fmt.Errorf("PodInitialized condition should never be Unknown")
case c.Status == v1.ConditionTrue && (initFailed || !allInit):
return fmt.Errorf("PodInitialized condition was True but all not all containers initialized")
case c.Status == v1.ConditionFalse && (!initFailed && allInit):
return fmt.Errorf("PodInitialized condition was False but all containers initialized")
}
return nil
}
type InvariantFunc func(older, newer runtime.Object) error
func CheckInvariants(events []watch.Event, fns ...InvariantFunc) error {
errs := sets.NewString()
for i := range events {
j := i + 1
if j >= len(events) {
continue
}
for _, fn := range fns {
if err := fn(events[i].Object, events[j].Object); err != nil {
errs.Insert(err.Error())
}
}
}
if errs.Len() > 0 {
return fmt.Errorf("invariants violated:\n* %s", strings.Join(errs.List(), "\n* "))
}
return nil
}
// Waits default amount of time (PodStartTimeout) for the specified pod to become running.
// Returns an error if timeout occurs first, or pod goes in to failed state.
func WaitForPodRunningInNamespace(c clientset.Interface, pod *v1.Pod) error {
if pod.Status.Phase == v1.PodRunning {
return nil
}
return waitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, PodStartTimeout)
}
// Waits default amount of time (PodStartTimeout) for the specified pod to become running.
// Returns an error if timeout occurs first, or pod goes in to failed state.
func WaitForPodNameRunningInNamespace(c clientset.Interface, podName, namespace string) error {
return waitTimeoutForPodRunningInNamespace(c, podName, namespace, PodStartTimeout)
}
// Waits an extended amount of time (slowPodStartTimeout) for the specified pod to become running.
// The resourceVersion is used when Watching object changes, it tells since when we care
// about changes to the pod. Returns an error if timeout occurs first, or pod goes in to failed state.
func waitForPodRunningInNamespaceSlow(c clientset.Interface, podName, namespace string) error {
return waitTimeoutForPodRunningInNamespace(c, podName, namespace, slowPodStartTimeout)
}
func waitTimeoutForPodRunningInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
return wait.PollImmediate(Poll, timeout, podRunning(c, podName, namespace))
}
func podRunning(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
if err != nil {
return false, err
}
switch pod.Status.Phase {
case v1.PodRunning:
return true, nil
case v1.PodFailed, v1.PodSucceeded:
return false, conditions.ErrPodCompleted
}
return false, nil
}
}
// Waits default amount of time (DefaultPodDeletionTimeout) for the specified pod to stop running.
// Returns an error if timeout occurs first.
func WaitForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace string) error {
return WaitTimeoutForPodNoLongerRunningInNamespace(c, podName, namespace, DefaultPodDeletionTimeout)
}
func WaitTimeoutForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
return wait.PollImmediate(Poll, timeout, podCompleted(c, podName, namespace))
}
func podCompleted(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
if err != nil {
return false, err
}
switch pod.Status.Phase {
case v1.PodFailed, v1.PodSucceeded:
return true, nil
}
return false, nil
}
}
func waitTimeoutForPodReadyInNamespace(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
return wait.PollImmediate(Poll, timeout, podRunningAndReady(c, podName, namespace))
}
func podRunningAndReady(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
if err != nil {
return false, err
}
switch pod.Status.Phase {
case v1.PodFailed, v1.PodSucceeded:
return false, conditions.ErrPodCompleted
case v1.PodRunning:
return podutil.IsPodReady(pod), nil
}
return false, nil
}
}
// WaitForPodNotPending returns an error if it took too long for the pod to go out of pending state.
// The resourceVersion is used when Watching object changes, it tells since when we care
// about changes to the pod.
func WaitForPodNotPending(c clientset.Interface, ns, podName string) error {
return wait.PollImmediate(Poll, PodStartTimeout, podNotPending(c, podName, ns))
}
func podNotPending(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
if err != nil {
return false, err
}
switch pod.Status.Phase {
case v1.PodPending:
return false, nil
default:
return true, nil
}
}
}
// waitForPodTerminatedInNamespace returns an error if it takes too long for the pod to terminate,
// if the pod Get api returns an error (IsNotFound or other), or if the pod failed (and thus did not
// terminate) with an unexpected reason. Typically called to test that the passed-in pod is fully
// terminated (reason==""), but may be called to detect if a pod did *not* terminate according to
// the supplied reason.
func waitForPodTerminatedInNamespace(c clientset.Interface, podName, reason, namespace string) error {
return WaitForPodCondition(c, namespace, podName, "terminated due to deadline exceeded", PodStartTimeout, func(pod *v1.Pod) (bool, error) {
// Only consider Failed pods. Successful pods will be deleted and detected in
// waitForPodCondition's Get call returning `IsNotFound`
if pod.Status.Phase == v1.PodFailed {
if pod.Status.Reason == reason { // short-circuit waitForPodCondition's loop
return true, nil
} else {
return true, fmt.Errorf("Expected pod %q in namespace %q to be terminated with reason %q, got reason: %q", podName, namespace, reason, pod.Status.Reason)
}
}
return false, nil
})
}
// waitForPodNotFoundInNamespace returns an error if it takes too long for the pod to fully terminate.
// Unlike `waitForPodTerminatedInNamespace`, the pod's Phase and Reason are ignored. If the pod Get
// api returns IsNotFound then the wait stops and nil is returned. If the Get api returns an error other
// than "not found" then that error is returned and the wait stops.
func waitForPodNotFoundInNamespace(c clientset.Interface, podName, ns string, timeout time.Duration) error {
return wait.PollImmediate(Poll, timeout, func() (bool, error) {
_, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
if apierrs.IsNotFound(err) {
return true, nil // done
}
if err != nil {
return true, err // stop wait with error
}
return false, nil
})
}
// waitForPodSuccessInNamespaceTimeout returns nil if the pod reached state success, or an error if it reached failure or ran too long.
func waitForPodSuccessInNamespaceTimeout(c clientset.Interface, podName string, namespace string, timeout time.Duration) error {
return WaitForPodCondition(c, namespace, podName, "success or failure", timeout, func(pod *v1.Pod) (bool, error) {
if pod.Spec.RestartPolicy == v1.RestartPolicyAlways {
return true, fmt.Errorf("pod %q will never terminate with a succeeded state since its restart policy is Always", podName)
}
switch pod.Status.Phase {
case v1.PodSucceeded:
By("Saw pod success")
return true, nil
case v1.PodFailed:
return true, fmt.Errorf("pod %q failed with status: %+v", podName, pod.Status)
default:
return false, nil
}
})
}
// WaitForPodSuccessInNamespace returns nil if the pod reached state success, or an error if it reached failure or until podStartupTimeout.
func WaitForPodSuccessInNamespace(c clientset.Interface, podName string, namespace string) error {
return waitForPodSuccessInNamespaceTimeout(c, podName, namespace, PodStartTimeout)
}
// WaitForPodSuccessInNamespaceSlow returns nil if the pod reached state success, or an error if it reached failure or until slowPodStartupTimeout.
func WaitForPodSuccessInNamespaceSlow(c clientset.Interface, podName string, namespace string) error {
return waitForPodSuccessInNamespaceTimeout(c, podName, namespace, slowPodStartTimeout)
}
// WaitForRCToStabilize waits till the RC has a matching generation/replica count between spec and status.
func WaitForRCToStabilize(c clientset.Interface, ns, name string, timeout time.Duration) error {
options := metav1.ListOptions{FieldSelector: fields.Set{
"metadata.name": name,
"metadata.namespace": ns,
}.AsSelector().String()}
w, err := c.CoreV1().ReplicationControllers(ns).Watch(options)
if err != nil {
return err
}
_, err = watch.Until(timeout, w, func(event watch.Event) (bool, error) {
switch event.Type {
case watch.Deleted:
return false, apierrs.NewNotFound(schema.GroupResource{Resource: "replicationcontrollers"}, "")
}
switch rc := event.Object.(type) {
case *v1.ReplicationController:
if rc.Name == name && rc.Namespace == ns &&
rc.Generation <= rc.Status.ObservedGeneration &&
*(rc.Spec.Replicas) == rc.Status.Replicas {
return true, nil
}
Logf("Waiting for rc %s to stabilize, generation %v observed generation %v spec.replicas %d status.replicas %d",
name, rc.Generation, rc.Status.ObservedGeneration, *(rc.Spec.Replicas), rc.Status.Replicas)
}
return false, nil
})
return err
}
func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labels.Selector, interval, timeout time.Duration) error {
return wait.PollImmediate(interval, timeout, func() (bool, error) {
Logf("Waiting for pod %s to disappear", podName)
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.CoreV1().Pods(ns).List(options)
if err != nil {
if IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
found := false
for _, pod := range pods.Items {
if pod.Name == podName {
Logf("Pod %s still exists", podName)
found = true
break
}
}
if !found {
Logf("Pod %s no longer exists", podName)
return true, nil
}
return false, nil
})
}
// WaitForPodNameUnschedulableInNamespace returns an error if it takes too long for the pod to become Pending
// and have condition Status equal to Unschedulable,
// if the pod Get api returns an error (IsNotFound or other), or if the pod failed with an unexpected reason.
// Typically called to test that the passed-in pod is Pending and Unschedulable.
func WaitForPodNameUnschedulableInNamespace(c clientset.Interface, podName, namespace string) error {
return WaitForPodCondition(c, namespace, podName, "Unschedulable", PodStartTimeout, func(pod *v1.Pod) (bool, error) {
// Only consider Failed pods. Successful pods will be deleted and detected in
// waitForPodCondition's Get call returning `IsNotFound`
if pod.Status.Phase == v1.PodPending {
for _, cond := range pod.Status.Conditions {
if cond.Type == v1.PodScheduled && cond.Status == v1.ConditionFalse && cond.Reason == "Unschedulable" {
return true, nil
}
}
}
if pod.Status.Phase == v1.PodRunning || pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed {
return true, fmt.Errorf("Expected pod %q in namespace %q to be in phase Pending, but got phase: %v", podName, namespace, pod.Status.Phase)
}
return false, nil
})
}
// WaitForService waits until the service appears (exist == true), or disappears (exist == false)
func WaitForService(c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error {
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
_, err := c.CoreV1().Services(namespace).Get(name, metav1.GetOptions{})
switch {
case err == nil:
Logf("Service %s in namespace %s found.", name, namespace)
return exist, nil
case apierrs.IsNotFound(err):
Logf("Service %s in namespace %s disappeared.", name, namespace)
return !exist, nil
case !IsRetryableAPIError(err):
Logf("Non-retryable failure while getting service.")
return false, err
default:
Logf("Get service %s in namespace %s failed: %v", name, namespace, err)
return false, nil
}
})
if err != nil {
stateMsg := map[bool]string{true: "to appear", false: "to disappear"}
return fmt.Errorf("error waiting for service %s/%s %s: %v", namespace, name, stateMsg[exist], err)
}
return nil
}
// WaitForServiceWithSelector waits until any service with given selector appears (exist == true), or disappears (exist == false)
func WaitForServiceWithSelector(c clientset.Interface, namespace string, selector labels.Selector, exist bool, interval,
timeout time.Duration) error {
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
services, err := c.CoreV1().Services(namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
switch {
case len(services.Items) != 0:
Logf("Service with %s in namespace %s found.", selector.String(), namespace)
return exist, nil
case len(services.Items) == 0:
Logf("Service with %s in namespace %s disappeared.", selector.String(), namespace)
return !exist, nil
case !IsRetryableAPIError(err):
Logf("Non-retryable failure while listing service.")
return false, err
default:
Logf("List service with %s in namespace %s failed: %v", selector.String(), namespace, err)
return false, nil
}
})
if err != nil {
stateMsg := map[bool]string{true: "to appear", false: "to disappear"}
return fmt.Errorf("error waiting for service with %s in namespace %s %s: %v", selector.String(), namespace, stateMsg[exist], err)
}
return nil
}
//WaitForServiceEndpointsNum waits until the amount of endpoints that implement service to expectNum.
func WaitForServiceEndpointsNum(c clientset.Interface, namespace, serviceName string, expectNum int, interval, timeout time.Duration) error {
return wait.Poll(interval, timeout, func() (bool, error) {
Logf("Waiting for amount of service:%s endpoints to be %d", serviceName, expectNum)
list, err := c.CoreV1().Endpoints(namespace).List(metav1.ListOptions{})
if err != nil {
return false, err
}
for _, e := range list.Items {
if e.Name == serviceName && countEndpointsNum(&e) == expectNum {
return true, nil
}
}
return false, nil
})
}
func countEndpointsNum(e *v1.Endpoints) int {
num := 0
for _, sub := range e.Subsets {
num += len(sub.Addresses)
}
return num
}
func WaitForEndpoint(c clientset.Interface, ns, name string) error {
for t := time.Now(); time.Since(t) < EndpointRegisterTimeout; time.Sleep(Poll) {
endpoint, err := c.CoreV1().Endpoints(ns).Get(name, metav1.GetOptions{})
if apierrs.IsNotFound(err) {
Logf("Endpoint %s/%s is not ready yet", ns, name)
continue
}
Expect(err).NotTo(HaveOccurred())
if len(endpoint.Subsets) == 0 || len(endpoint.Subsets[0].Addresses) == 0 {
Logf("Endpoint %s/%s is not ready yet", ns, name)
continue
} else {
return nil
}
}
return fmt.Errorf("Failed to get endpoints for %s/%s", ns, name)
}
// Context for checking pods responses by issuing GETs to them (via the API
// proxy) and verifying that they answer with there own pod name.
type podProxyResponseChecker struct {
c clientset.Interface
ns string
label labels.Selector
controllerName string
respondName bool // Whether the pod should respond with its own name.
pods *v1.PodList
}
func PodProxyResponseChecker(c clientset.Interface, ns string, label labels.Selector, controllerName string, respondName bool, pods *v1.PodList) podProxyResponseChecker {
return podProxyResponseChecker{c, ns, label, controllerName, respondName, pods}
}
// CheckAllResponses issues GETs to all pods in the context and verify they
// reply with their own pod name.
func (r podProxyResponseChecker) CheckAllResponses() (done bool, err error) {
successes := 0
options := metav1.ListOptions{LabelSelector: r.label.String()}
currentPods, err := r.c.CoreV1().Pods(r.ns).List(options)
Expect(err).NotTo(HaveOccurred())
for i, pod := range r.pods.Items {
// Check that the replica list remains unchanged, otherwise we have problems.
if !isElementOf(pod.UID, currentPods) {
return false, fmt.Errorf("pod with UID %s is no longer a member of the replica set. Must have been restarted for some reason. Current replica set: %v", pod.UID, currentPods)
}
ctx, cancel := context.WithTimeout(context.Background(), SingleCallTimeout)
defer cancel()
body, err := r.c.CoreV1().RESTClient().Get().
Context(ctx).
Namespace(r.ns).
Resource("pods").
SubResource("proxy").
Name(string(pod.Name)).
Do().
Raw()
if err != nil {
if ctx.Err() != nil {
// We may encounter errors here because of a race between the pod readiness and apiserver
// proxy. So, we log the error and retry if this occurs.
Logf("Controller %s: Failed to Get from replica %d [%s]: %v\n pod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
return false, nil
}
Logf("Controller %s: Failed to GET from replica %d [%s]: %v\npod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
continue
}
// The response checker expects the pod's name unless !respondName, in
// which case it just checks for a non-empty response.
got := string(body)
what := ""
if r.respondName {
what = "expected"
want := pod.Name
if got != want {
Logf("Controller %s: Replica %d [%s] expected response %q but got %q",
r.controllerName, i+1, pod.Name, want, got)
continue
}
} else {
what = "non-empty"
if len(got) == 0 {
Logf("Controller %s: Replica %d [%s] expected non-empty response",
r.controllerName, i+1, pod.Name)
continue
}
}
successes++
Logf("Controller %s: Got %s result from replica %d [%s]: %q, %d of %d required successes so far",
r.controllerName, what, i+1, pod.Name, got, successes, len(r.pods.Items))
}
if successes < len(r.pods.Items) {
return false, nil
}
return true, nil
}
// ServerVersionGTE returns true if v is greater than or equal to the server
// version.
//
// TODO(18726): This should be incorporated into client.VersionInterface.
func ServerVersionGTE(v *utilversion.Version, c discovery.ServerVersionInterface) (bool, error) {
serverVersion, err := c.ServerVersion()
if err != nil {
return false, fmt.Errorf("Unable to get server version: %v", err)
}
sv, err := utilversion.ParseSemantic(serverVersion.GitVersion)
if err != nil {
return false, fmt.Errorf("Unable to parse server version %q: %v", serverVersion.GitVersion, err)
}
return sv.AtLeast(v), nil
}
func SkipUnlessKubectlVersionGTE(v *utilversion.Version) {
gte, err := KubectlVersionGTE(v)
if err != nil {
Failf("Failed to get kubectl version: %v", err)
}
if !gte {
Skipf("Not supported for kubectl versions before %q", v)
}
}
// KubectlVersionGTE returns true if the kubectl version is greater than or
// equal to v.
func KubectlVersionGTE(v *utilversion.Version) (bool, error) {
kv, err := KubectlVersion()
if err != nil {
return false, err
}
return kv.AtLeast(v), nil
}
// KubectlVersion gets the version of kubectl that's currently being used (see
// --kubectl-path in e2e.go to use an alternate kubectl).
func KubectlVersion() (*utilversion.Version, error) {
output := RunKubectlOrDie("version", "--client")
matches := gitVersionRegexp.FindStringSubmatch(output)
if len(matches) != 2 {
return nil, fmt.Errorf("Could not find kubectl version in output %v", output)
}
// Don't use the full match, as it contains "GitVersion:\"" and a
// trailing "\"". Just use the submatch.
return utilversion.ParseSemantic(matches[1])
}
func PodsResponding(c clientset.Interface, ns, name string, wantName bool, pods *v1.PodList) error {
By("trying to dial each unique pod")
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
return wait.PollImmediate(Poll, podRespondingTimeout, PodProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses)
}
func PodsCreated(c clientset.Interface, ns, name string, replicas int32) (*v1.PodList, error) {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
return PodsCreatedByLabel(c, ns, name, replicas, label)
}
func PodsCreatedByLabel(c clientset.Interface, ns, name string, replicas int32, label labels.Selector) (*v1.PodList, error) {
timeout := 2 * time.Minute
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
options := metav1.ListOptions{LabelSelector: label.String()}
// List the pods, making sure we observe all the replicas.
pods, err := c.CoreV1().Pods(ns).List(options)
if err != nil {
return nil, err
}
created := []v1.Pod{}
for _, pod := range pods.Items {
if pod.DeletionTimestamp != nil {
continue
}
created = append(created, pod)
}
Logf("Pod name %s: Found %d pods out of %d", name, len(created), replicas)
if int32(len(created)) == replicas {
pods.Items = created
return pods, nil
}
}
return nil, fmt.Errorf("Pod name %s: Gave up waiting %v for %d pods to come up", name, timeout, replicas)
}
func podsRunning(c clientset.Interface, pods *v1.PodList) []error {
// Wait for the pods to enter the running state. Waiting loops until the pods
// are running so non-running pods cause a timeout for this test.
By("ensuring each pod is running")
e := []error{}
error_chan := make(chan error)
for _, pod := range pods.Items {
go func(p v1.Pod) {
error_chan <- WaitForPodRunningInNamespace(c, &p)
}(pod)
}
for range pods.Items {
err := <-error_chan
if err != nil {
e = append(e, err)
}
}
return e
}
func VerifyPods(c clientset.Interface, ns, name string, wantName bool, replicas int32) error {
return podRunningMaybeResponding(c, ns, name, wantName, replicas, true)
}
func VerifyPodsRunning(c clientset.Interface, ns, name string, wantName bool, replicas int32) error {
return podRunningMaybeResponding(c, ns, name, wantName, replicas, false)
}
func podRunningMaybeResponding(c clientset.Interface, ns, name string, wantName bool, replicas int32, checkResponding bool) error {
pods, err := PodsCreated(c, ns, name, replicas)
if err != nil {
return err
}
e := podsRunning(c, pods)
if len(e) > 0 {
return fmt.Errorf("failed to wait for pods running: %v", e)
}
if checkResponding {
err = PodsResponding(c, ns, name, wantName, pods)
if err != nil {
return fmt.Errorf("failed to wait for pods responding: %v", err)
}
}
return nil
}
func ServiceResponding(c clientset.Interface, ns, name string) error {
By(fmt.Sprintf("trying to dial the service %s.%s via the proxy", ns, name))
return wait.PollImmediate(Poll, ServiceRespondingTimeout, func() (done bool, err error) {
proxyRequest, errProxy := GetServicesProxyRequest(c, c.CoreV1().RESTClient().Get())
if errProxy != nil {
Logf("Failed to get services proxy request: %v:", errProxy)
return false, nil
}
ctx, cancel := context.WithTimeout(context.Background(), SingleCallTimeout)
defer cancel()
body, err := proxyRequest.Namespace(ns).
Context(ctx).
Name(name).
Do().
Raw()
if err != nil {
if ctx.Err() != nil {
Failf("Failed to GET from service %s: %v", name, err)
return true, err
}
Logf("Failed to GET from service %s: %v:", name, err)
return false, nil
}
got := string(body)
if len(got) == 0 {
Logf("Service %s: expected non-empty response", name)
return false, err // stop polling
}
Logf("Service %s: found nonempty answer: %s", name, got)
return true, nil
})
}
func RestclientConfig(kubeContext string) (*clientcmdapi.Config, error) {
Logf(">>> kubeConfig: %s", TestContext.KubeConfig)
if TestContext.KubeConfig == "" {
return nil, fmt.Errorf("KubeConfig must be specified to load client config")
}
c, err := clientcmd.LoadFromFile(TestContext.KubeConfig)
if err != nil {
return nil, fmt.Errorf("error loading KubeConfig: %v", err.Error())
}
if kubeContext != "" {
Logf(">>> kubeContext: %s", kubeContext)
c.CurrentContext = kubeContext
}
return c, nil
}
type ClientConfigGetter func() (*restclient.Config, error)
func LoadConfig() (*restclient.Config, error) {
if TestContext.NodeE2E {
// This is a node e2e test, apply the node e2e configuration
return &restclient.Config{Host: TestContext.Host}, nil
}
c, err := RestclientConfig(TestContext.KubeContext)
if err != nil {
if TestContext.KubeConfig == "" {
return restclient.InClusterConfig()
} else {
return nil, err
}
}
return clientcmd.NewDefaultClientConfig(*c, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: TestContext.Host}}).ClientConfig()
}
func LoadInternalClientset() (*internalclientset.Clientset, error) {
config, err := LoadConfig()
if err != nil {
return nil, fmt.Errorf("error creating client: %v", err.Error())
}
return internalclientset.NewForConfig(config)
}
func LoadClientset() (*clientset.Clientset, error) {
config, err := LoadConfig()
if err != nil {
return nil, fmt.Errorf("error creating client: %v", err.Error())
}
return clientset.NewForConfig(config)
}
// randomSuffix provides a random string to append to pods,services,rcs.
// TODO: Allow service names to have the same form as names
// for pods and replication controllers so we don't
// need to use such a function and can instead
// use the UUID utility function.
func randomSuffix() string {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
return strconv.Itoa(r.Int() % 10000)
}
func ExpectNoError(err error, explain ...interface{}) {
ExpectNoErrorWithOffset(1, err, explain...)
}
// ExpectNoErrorWithOffset checks if "err" is set, and if so, fails assertion while logging the error at "offset" levels above its caller
// (for example, for call chain f -> g -> ExpectNoErrorWithOffset(1, ...) error would be logged for "f").
func ExpectNoErrorWithOffset(offset int, err error, explain ...interface{}) {
if err != nil {
Logf("Unexpected error occurred: %v", err)
}
ExpectWithOffset(1+offset, err).NotTo(HaveOccurred(), explain...)
}
func ExpectNoErrorWithRetries(fn func() error, maxRetries int, explain ...interface{}) {
var err error
for i := 0; i < maxRetries; i++ {
err = fn()
if err == nil {
return
}
Logf("(Attempt %d of %d) Unexpected error occurred: %v", i+1, maxRetries, err)
}
ExpectWithOffset(1, err).NotTo(HaveOccurred(), explain...)
}
// Stops everything from filePath from namespace ns and checks if everything matching selectors from the given namespace is correctly stopped.
func Cleanup(filePath, ns string, selectors ...string) {
By("using delete to clean up resources")
var nsArg string
if ns != "" {
nsArg = fmt.Sprintf("--namespace=%s", ns)
}
RunKubectlOrDie("delete", "--grace-period=0", "-f", filePath, nsArg)
AssertCleanup(ns, selectors...)
}
// Asserts that cleanup of a namespace wrt selectors occurred.
func AssertCleanup(ns string, selectors ...string) {
var nsArg string
if ns != "" {
nsArg = fmt.Sprintf("--namespace=%s", ns)
}
for _, selector := range selectors {
resources := RunKubectlOrDie("get", "rc,svc", "-l", selector, "--no-headers", nsArg)
if resources != "" {
Failf("Resources left running after stop:\n%s", resources)
}
pods := RunKubectlOrDie("get", "pods", "-l", selector, nsArg, "-o", "go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ \"\\n\" }}{{ end }}{{ end }}")
if pods != "" {
Failf("Pods left unterminated after stop:\n%s", pods)
}
}
}
// KubectlCmd runs the kubectl executable through the wrapper script.
func KubectlCmd(args ...string) *exec.Cmd {
defaultArgs := []string{}
// Reference a --server option so tests can run anywhere.
if TestContext.Host != "" {
defaultArgs = append(defaultArgs, "--"+clientcmd.FlagAPIServer+"="+TestContext.Host)
}
if TestContext.KubeConfig != "" {
defaultArgs = append(defaultArgs, "--"+clientcmd.RecommendedConfigPathFlag+"="+TestContext.KubeConfig)
// Reference the KubeContext
if TestContext.KubeContext != "" {
defaultArgs = append(defaultArgs, "--"+clientcmd.FlagContext+"="+TestContext.KubeContext)
}
} else {
if TestContext.CertDir != "" {
defaultArgs = append(defaultArgs,
fmt.Sprintf("--certificate-authority=%s", filepath.Join(TestContext.CertDir, "ca.crt")),
fmt.Sprintf("--client-certificate=%s", filepath.Join(TestContext.CertDir, "kubecfg.crt")),
fmt.Sprintf("--client-key=%s", filepath.Join(TestContext.CertDir, "kubecfg.key")))
}
}
kubectlArgs := append(defaultArgs, args...)
//We allow users to specify path to kubectl, so you can test either "kubectl" or "cluster/kubectl.sh"
//and so on.
cmd := exec.Command(TestContext.KubectlPath, kubectlArgs...)
//caller will invoke this and wait on it.
return cmd
}
// kubectlBuilder is used to build, customize and execute a kubectl Command.
// Add more functions to customize the builder as needed.
type kubectlBuilder struct {
cmd *exec.Cmd
timeout <-chan time.Time
}
func NewKubectlCommand(args ...string) *kubectlBuilder {
b := new(kubectlBuilder)
b.cmd = KubectlCmd(args...)
return b
}
func (b *kubectlBuilder) WithEnv(env []string) *kubectlBuilder {
b.cmd.Env = env
return b
}
func (b *kubectlBuilder) WithTimeout(t <-chan time.Time) *kubectlBuilder {
b.timeout = t
return b
}
func (b kubectlBuilder) WithStdinData(data string) *kubectlBuilder {
b.cmd.Stdin = strings.NewReader(data)
return &b
}
func (b kubectlBuilder) WithStdinReader(reader io.Reader) *kubectlBuilder {
b.cmd.Stdin = reader
return &b
}
func (b kubectlBuilder) ExecOrDie() string {
str, err := b.Exec()
Logf("stdout: %q", str)
// In case of i/o timeout error, try talking to the apiserver again after 2s before dying.
// Note that we're still dying after retrying so that we can get visibility to triage it further.
if isTimeout(err) {
Logf("Hit i/o timeout error, talking to the server 2s later to see if it's temporary.")
time.Sleep(2 * time.Second)
retryStr, retryErr := RunKubectl("version")
Logf("stdout: %q", retryStr)
Logf("err: %v", retryErr)
}
Expect(err).NotTo(HaveOccurred())
return str
}
func isTimeout(err error) bool {
switch err := err.(type) {
case net.Error:
if err.Timeout() {
return true
}
case *url.Error:
if err, ok := err.Err.(net.Error); ok && err.Timeout() {
return true
}
}
return false
}
func (b kubectlBuilder) Exec() (string, error) {
var stdout, stderr bytes.Buffer
cmd := b.cmd
cmd.Stdout, cmd.Stderr = &stdout, &stderr
Logf("Running '%s %s'", cmd.Path, strings.Join(cmd.Args[1:], " ")) // skip arg[0] as it is printed separately
if err := cmd.Start(); err != nil {
return "", fmt.Errorf("error starting %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v\n", cmd, cmd.Stdout, cmd.Stderr, err)
}
errCh := make(chan error, 1)
go func() {
errCh <- cmd.Wait()
}()
select {
case err := <-errCh:
if err != nil {
var rc int = 127
if ee, ok := err.(*exec.ExitError); ok {
rc = int(ee.Sys().(syscall.WaitStatus).ExitStatus())
Logf("rc: %d", rc)
}
return "", uexec.CodeExitError{
Err: fmt.Errorf("error running %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v\n", cmd, cmd.Stdout, cmd.Stderr, err),
Code: rc,
}
}
case <-b.timeout:
b.cmd.Process.Kill()
return "", fmt.Errorf("timed out waiting for command %v:\nCommand stdout:\n%v\nstderr:\n%v\n", cmd, cmd.Stdout, cmd.Stderr)
}
Logf("stderr: %q", stderr.String())
return stdout.String(), nil
}
// RunKubectlOrDie is a convenience wrapper over kubectlBuilder
func RunKubectlOrDie(args ...string) string {
return NewKubectlCommand(args...).ExecOrDie()
}
// RunKubectl is a convenience wrapper over kubectlBuilder
func RunKubectl(args ...string) (string, error) {
return NewKubectlCommand(args...).Exec()
}
// RunKubectlOrDieInput is a convenience wrapper over kubectlBuilder that takes input to stdin
func RunKubectlOrDieInput(data string, args ...string) string {
return NewKubectlCommand(args...).WithStdinData(data).ExecOrDie()
}
// runKubemciWithKubeconfig is a convenience wrapper over runKubemciCmd
func runKubemciWithKubeconfig(args ...string) (string, error) {
if TestContext.KubeConfig != "" {
args = append(args, "--"+clientcmd.RecommendedConfigPathFlag+"="+TestContext.KubeConfig)
}
return runKubemciCmd(args...)
}
// runKubemciCmd is a convenience wrapper over kubectlBuilder to run kubemci.
// It assumes that kubemci exists in PATH.
func runKubemciCmd(args ...string) (string, error) {
// kubemci is assumed to be in PATH.
kubemci := "kubemci"
b := new(kubectlBuilder)
if TestContext.KubeConfig != "" {
args = append(args, "--"+clientcmd.RecommendedConfigPathFlag+"="+TestContext.KubeConfig)
}
args = append(args, "--gcp-project="+TestContext.CloudConfig.ProjectID)
b.cmd = exec.Command(kubemci, args...)
return b.Exec()
}
func StartCmdAndStreamOutput(cmd *exec.Cmd) (stdout, stderr io.ReadCloser, err error) {
stdout, err = cmd.StdoutPipe()
if err != nil {
return
}
stderr, err = cmd.StderrPipe()
if err != nil {
return
}
Logf("Asynchronously running '%s %s'", cmd.Path, strings.Join(cmd.Args, " "))
err = cmd.Start()
return
}
// Rough equivalent of ctrl+c for cleaning up processes. Intended to be run in defer.
func TryKill(cmd *exec.Cmd) {
if err := cmd.Process.Kill(); err != nil {
Logf("ERROR failed to kill command %v! The process may leak", cmd)
}
}
// testContainerOutputMatcher runs the given pod in the given namespace and waits
// for all of the containers in the podSpec to move into the 'Success' status, and tests
// the specified container log against the given expected output using the given matcher.
func (f *Framework) testContainerOutputMatcher(scenarioName string,
pod *v1.Pod,
containerIndex int,
expectedOutput []string,
matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) {
By(fmt.Sprintf("Creating a pod to test %v", scenarioName))
if containerIndex < 0 || containerIndex >= len(pod.Spec.Containers) {
Failf("Invalid container index: %d", containerIndex)
}
ExpectNoError(f.MatchContainerOutput(pod, pod.Spec.Containers[containerIndex].Name, expectedOutput, matcher))
}
// MatchContainerOutput creates a pod and waits for all it's containers to exit with success.
// It then tests that the matcher with each expectedOutput matches the output of the specified container.
func (f *Framework) MatchContainerOutput(
pod *v1.Pod,
containerName string,
expectedOutput []string,
matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) error {
ns := pod.ObjectMeta.Namespace
if ns == "" {
ns = f.Namespace.Name
}
podClient := f.PodClientNS(ns)
createdPod := podClient.Create(pod)
defer func() {
By("delete the pod")
podClient.DeleteSync(createdPod.Name, &metav1.DeleteOptions{}, DefaultPodDeletionTimeout)
}()
// Wait for client pod to complete.
podErr := WaitForPodSuccessInNamespace(f.ClientSet, createdPod.Name, ns)
// Grab its logs. Get host first.
podStatus, err := podClient.Get(createdPod.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get pod status: %v", err)
}
if podErr != nil {
// Pod failed. Dump all logs from all containers to see what's wrong
for _, container := range podStatus.Spec.Containers {
logs, err := GetPodLogs(f.ClientSet, ns, podStatus.Name, container.Name)
if err != nil {
Logf("Failed to get logs from node %q pod %q container %q: %v",
podStatus.Spec.NodeName, podStatus.Name, container.Name, err)
continue
}
Logf("Output of node %q pod %q container %q: %s", podStatus.Spec.NodeName, podStatus.Name, container.Name, logs)
}
return fmt.Errorf("expected pod %q success: %v", createdPod.Name, podErr)
}
Logf("Trying to get logs from node %s pod %s container %s: %v",
podStatus.Spec.NodeName, podStatus.Name, containerName, err)
// Sometimes the actual containers take a second to get started, try to get logs for 60s
logs, err := GetPodLogs(f.ClientSet, ns, podStatus.Name, containerName)
if err != nil {
Logf("Failed to get logs from node %q pod %q container %q. %v",
podStatus.Spec.NodeName, podStatus.Name, containerName, err)
return fmt.Errorf("failed to get logs from %s for %s: %v", podStatus.Name, containerName, err)
}
for _, expected := range expectedOutput {
m := matcher(expected)
matches, err := m.Match(logs)
if err != nil {
return fmt.Errorf("expected %q in container output: %v", expected, err)
} else if !matches {
return fmt.Errorf("expected %q in container output: %s", expected, m.FailureMessage(logs))
}
}
return nil
}
type EventsLister func(opts metav1.ListOptions, ns string) (*v1.EventList, error)
func DumpEventsInNamespace(eventsLister EventsLister, namespace string) {
By(fmt.Sprintf("Collecting events from namespace %q.", namespace))
events, err := eventsLister(metav1.ListOptions{}, namespace)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Found %d events.", len(events.Items)))
// Sort events by their first timestamp
sortedEvents := events.Items
if len(sortedEvents) > 1 {
sort.Sort(byFirstTimestamp(sortedEvents))
}
for _, e := range sortedEvents {
Logf("At %v - event for %v: %v %v: %v", e.FirstTimestamp, e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
}
// Note that we don't wait for any Cleanup to propagate, which means
// that if you delete a bunch of pods right before ending your test,
// you may or may not see the killing/deletion/Cleanup events.
}
func DumpAllNamespaceInfo(c clientset.Interface, namespace string) {
DumpEventsInNamespace(func(opts metav1.ListOptions, ns string) (*v1.EventList, error) {
return c.CoreV1().Events(ns).List(opts)
}, namespace)
// If cluster is large, then the following logs are basically useless, because:
// 1. it takes tens of minutes or hours to grab all of them
// 2. there are so many of them that working with them are mostly impossible
// So we dump them only if the cluster is relatively small.
maxNodesForDump := 20
if nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}); err == nil {
if len(nodes.Items) <= maxNodesForDump {
dumpAllPodInfo(c)
dumpAllNodeInfo(c)
} else {
Logf("skipping dumping cluster info - cluster too large")
}
} else {
Logf("unable to fetch node list: %v", err)
}
}
// byFirstTimestamp sorts a slice of events by first timestamp, using their involvedObject's name as a tie breaker.
type byFirstTimestamp []v1.Event
func (o byFirstTimestamp) Len() int { return len(o) }
func (o byFirstTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
func (o byFirstTimestamp) Less(i, j int) bool {
if o[i].FirstTimestamp.Equal(&o[j].FirstTimestamp) {
return o[i].InvolvedObject.Name < o[j].InvolvedObject.Name
}
return o[i].FirstTimestamp.Before(&o[j].FirstTimestamp)
}
func dumpAllPodInfo(c clientset.Interface) {
pods, err := c.CoreV1().Pods("").List(metav1.ListOptions{})
if err != nil {
Logf("unable to fetch pod debug info: %v", err)
}
logPodStates(pods.Items)
}
func dumpAllNodeInfo(c clientset.Interface) {
// It should be OK to list unschedulable Nodes here.
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
Logf("unable to fetch node list: %v", err)
return
}
names := make([]string, len(nodes.Items))
for ix := range nodes.Items {
names[ix] = nodes.Items[ix].Name
}
DumpNodeDebugInfo(c, names, Logf)
}
func DumpNodeDebugInfo(c clientset.Interface, nodeNames []string, logFunc func(fmt string, args ...interface{})) {
for _, n := range nodeNames {
logFunc("\nLogging node info for node %v", n)
node, err := c.CoreV1().Nodes().Get(n, metav1.GetOptions{})
if err != nil {
logFunc("Error getting node info %v", err)
}
logFunc("Node Info: %v", node)
logFunc("\nLogging kubelet events for node %v", n)
for _, e := range getNodeEvents(c, n) {
logFunc("source %v type %v message %v reason %v first ts %v last ts %v, involved obj %+v",
e.Source, e.Type, e.Message, e.Reason, e.FirstTimestamp, e.LastTimestamp, e.InvolvedObject)
}
logFunc("\nLogging pods the kubelet thinks is on node %v", n)
podList, err := GetKubeletPods(c, n)
if err != nil {
logFunc("Unable to retrieve kubelet pods for node %v: %v", n, err)
continue
}
for _, p := range podList.Items {
logFunc("%v started at %v (%d+%d container statuses recorded)", p.Name, p.Status.StartTime, len(p.Status.InitContainerStatuses), len(p.Status.ContainerStatuses))
for _, c := range p.Status.InitContainerStatuses {
logFunc("\tInit container %v ready: %v, restart count %v",
c.Name, c.Ready, c.RestartCount)
}
for _, c := range p.Status.ContainerStatuses {
logFunc("\tContainer %v ready: %v, restart count %v",
c.Name, c.Ready, c.RestartCount)
}
}
HighLatencyKubeletOperations(c, 10*time.Second, n, logFunc)
// TODO: Log node resource info
}
}
// logNodeEvents logs kubelet events from the given node. This includes kubelet
// restart and node unhealthy events. Note that listing events like this will mess
// with latency metrics, beware of calling it during a test.
func getNodeEvents(c clientset.Interface, nodeName string) []v1.Event {
selector := fields.Set{
"involvedObject.kind": "Node",
"involvedObject.name": nodeName,
"involvedObject.namespace": metav1.NamespaceAll,
"source": "kubelet",
}.AsSelector().String()
options := metav1.ListOptions{FieldSelector: selector}
events, err := c.CoreV1().Events(metav1.NamespaceSystem).List(options)
if err != nil {
Logf("Unexpected error retrieving node events %v", err)
return []v1.Event{}
}
return events.Items
}
// waitListSchedulableNodesOrDie is a wrapper around listing nodes supporting retries.
func waitListSchedulableNodesOrDie(c clientset.Interface) *v1.NodeList {
var nodes *v1.NodeList
var err error
if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
nodes, err = c.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
if IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
return true, nil
}) != nil {
ExpectNoError(err, "Non-retryable failure or timed out while listing nodes for e2e cluster.")
}
return nodes
}
// Node is schedulable if:
// 1) doesn't have "unschedulable" field set
// 2) it's Ready condition is set to true
// 3) doesn't have NetworkUnavailable condition set to true
func isNodeSchedulable(node *v1.Node) bool {
nodeReady := IsNodeConditionSetAsExpected(node, v1.NodeReady, true)
networkReady := IsNodeConditionUnset(node, v1.NodeNetworkUnavailable) ||
IsNodeConditionSetAsExpectedSilent(node, v1.NodeNetworkUnavailable, false)
return !node.Spec.Unschedulable && nodeReady && networkReady
}
// Test whether a fake pod can be scheduled on "node", given its current taints.
func isNodeUntainted(node *v1.Node) bool {
fakePod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: testapi.Groups[v1.GroupName].GroupVersion().String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "fake-not-scheduled",
Namespace: "fake-not-scheduled",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "fake-not-scheduled",
Image: "fake-not-scheduled",
},
},
},
}
nodeInfo := schedulercache.NewNodeInfo()
nodeInfo.SetNode(node)
fit, _, err := predicates.PodToleratesNodeTaints(fakePod, nil, nodeInfo)
if err != nil {
Failf("Can't test predicates for node %s: %v", node.Name, err)
return false
}
return fit
}
// GetReadySchedulableNodesOrDie addresses the common use case of getting nodes you can do work on.
// 1) Needs to be schedulable.
// 2) Needs to be ready.
// If EITHER 1 or 2 is not true, most tests will want to ignore the node entirely.
func GetReadySchedulableNodesOrDie(c clientset.Interface) (nodes *v1.NodeList) {
nodes = waitListSchedulableNodesOrDie(c)
// previous tests may have cause failures of some nodes. Let's skip
// 'Not Ready' nodes, just in case (there is no need to fail the test).
FilterNodes(nodes, func(node v1.Node) bool {
return isNodeSchedulable(&node) && isNodeUntainted(&node)
})
return nodes
}
func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) error {
Logf("Waiting up to %v for all (but %d) nodes to be schedulable", timeout, TestContext.AllowedNotReadyNodes)
var notSchedulable []*v1.Node
attempt := 0
return wait.PollImmediate(30*time.Second, timeout, func() (bool, error) {
attempt++
notSchedulable = nil
opts := metav1.ListOptions{
ResourceVersion: "0",
FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector().String(),
}
nodes, err := c.CoreV1().Nodes().List(opts)
if err != nil {
Logf("Unexpected error listing nodes: %v", err)
if IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
for i := range nodes.Items {
node := &nodes.Items[i]
if !isNodeSchedulable(node) {
notSchedulable = append(notSchedulable, node)
}
}
// Framework allows for <TestContext.AllowedNotReadyNodes> nodes to be non-ready,
// to make it possible e.g. for incorrect deployment of some small percentage
// of nodes (which we allow in cluster validation). Some nodes that are not
// provisioned correctly at startup will never become ready (e.g. when something
// won't install correctly), so we can't expect them to be ready at any point.
//
// However, we only allow non-ready nodes with some specific reasons.
if len(notSchedulable) > 0 {
// In large clusters, log them only every 10th pass.
if len(nodes.Items) >= largeClusterThreshold && attempt%10 == 0 {
Logf("Unschedulable nodes:")
for i := range notSchedulable {
Logf("-> %s Ready=%t Network=%t",
notSchedulable[i].Name,
IsNodeConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeReady, true),
IsNodeConditionSetAsExpectedSilent(notSchedulable[i], v1.NodeNetworkUnavailable, false))
}
Logf("================================")
}
}
return len(notSchedulable) <= TestContext.AllowedNotReadyNodes, nil
})
}
func GetPodSecretUpdateTimeout(c clientset.Interface) time.Duration {
// With SecretManager(ConfigMapManager), we may have to wait up to full sync period +
// TTL of secret(configmap) to elapse before the Kubelet projects the update into the
// volume and the container picks it up.
// So this timeout is based on default Kubelet sync period (1 minute) + maximum TTL for
// secret(configmap) that's based on cluster size + additional time as a fudge factor.
secretTTL, err := GetNodeTTLAnnotationValue(c)
if err != nil {
Logf("Couldn't get node TTL annotation (using default value of 0): %v", err)
}
podLogTimeout := 240*time.Second + secretTTL
return podLogTimeout
}
func GetNodeTTLAnnotationValue(c clientset.Interface) (time.Duration, error) {
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil || len(nodes.Items) == 0 {
return time.Duration(0), fmt.Errorf("Couldn't list any nodes to get TTL annotation: %v", err)
}
// Since TTL the kubelet is using is stored in node object, for the timeout
// purpose we take it from the first node (all of them should be the same).
node := &nodes.Items[0]
if node.Annotations == nil {
return time.Duration(0), fmt.Errorf("No annotations found on the node")
}
value, ok := node.Annotations[v1.ObjectTTLAnnotationKey]
if !ok {
return time.Duration(0), fmt.Errorf("No TTL annotation found on the node")
}
intValue, err := strconv.Atoi(value)
if err != nil {
return time.Duration(0), fmt.Errorf("Cannot convert TTL annotation from %#v to int", *node)
}
return time.Duration(intValue) * time.Second, nil
}
func AddOrUpdateLabelOnNode(c clientset.Interface, nodeName string, labelKey, labelValue string) {
ExpectNoError(testutil.AddLabelsToNode(c, nodeName, map[string]string{labelKey: labelValue}))
}
func AddOrUpdateLabelOnNodeAndReturnOldValue(c clientset.Interface, nodeName string, labelKey, labelValue string) string {
var oldValue string
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
ExpectNoError(err)
oldValue = node.Labels[labelKey]
ExpectNoError(testutil.AddLabelsToNode(c, nodeName, map[string]string{labelKey: labelValue}))
return oldValue
}
func ExpectNodeHasLabel(c clientset.Interface, nodeName string, labelKey string, labelValue string) {
By("verifying the node has the label " + labelKey + " " + labelValue)
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
ExpectNoError(err)
Expect(node.Labels[labelKey]).To(Equal(labelValue))
}
func RemoveTaintOffNode(c clientset.Interface, nodeName string, taint v1.Taint) {
ExpectNoError(controller.RemoveTaintOffNode(c, nodeName, nil, &taint))
VerifyThatTaintIsGone(c, nodeName, &taint)
}
func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taint v1.Taint) {
ExpectNoError(controller.AddOrUpdateTaintOnNode(c, nodeName, &taint))
}
// RemoveLabelOffNode is for cleaning up labels temporarily added to node,
// won't fail if target label doesn't exist or has been removed.
func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKey string) {
By("removing the label " + labelKey + " off the node " + nodeName)
ExpectNoError(testutil.RemoveLabelOffNode(c, nodeName, []string{labelKey}))
By("verifying the node doesn't have the label " + labelKey)
ExpectNoError(testutil.VerifyLabelsRemoved(c, nodeName, []string{labelKey}))
}
func VerifyThatTaintIsGone(c clientset.Interface, nodeName string, taint *v1.Taint) {
By("verifying the node doesn't have the taint " + taint.ToString())
nodeUpdated, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
ExpectNoError(err)
if taintutils.TaintExists(nodeUpdated.Spec.Taints, taint) {
Failf("Failed removing taint " + taint.ToString() + " of the node " + nodeName)
}
}
func ExpectNodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) {
By("verifying the node has the taint " + taint.ToString())
if has, err := NodeHasTaint(c, nodeName, taint); !has {
ExpectNoError(err)
Failf("Failed to find taint %s on node %s", taint.ToString(), nodeName)
}
}
func NodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) (bool, error) {
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil {
return false, err
}
nodeTaints := node.Spec.Taints
if len(nodeTaints) == 0 || !taintutils.TaintExists(nodeTaints, taint) {
return false, nil
}
return true, nil
}
//AddOrUpdateAvoidPodOnNode adds avoidPods annotations to node, will override if it exists
func AddOrUpdateAvoidPodOnNode(c clientset.Interface, nodeName string, avoidPods v1.AvoidPods) {
err := wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil {
if IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
taintsData, err := json.Marshal(avoidPods)
ExpectNoError(err)
if node.Annotations == nil {
node.Annotations = make(map[string]string)
}
node.Annotations[v1.PreferAvoidPodsAnnotationKey] = string(taintsData)
_, err = c.CoreV1().Nodes().Update(node)
if err != nil {
if !apierrs.IsConflict(err) {
ExpectNoError(err)
} else {
Logf("Conflict when trying to add/update avoidPonds %v to %v", avoidPods, nodeName)
}
}
return true, nil
})
ExpectNoError(err)
}
//RemoveAnnotationOffNode removes AvoidPods annotations from the node. It does not fail if no such annotation exists.
func RemoveAvoidPodsOffNode(c clientset.Interface, nodeName string) {
err := wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if err != nil {
if IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
if node.Annotations == nil {
return true, nil
}
delete(node.Annotations, v1.PreferAvoidPodsAnnotationKey)
_, err = c.CoreV1().Nodes().Update(node)
if err != nil {
if !apierrs.IsConflict(err) {
ExpectNoError(err)
} else {
Logf("Conflict when trying to remove avoidPods to %v", nodeName)
}
}
return true, nil
})
ExpectNoError(err)
}
func ScaleResource(
clientset clientset.Interface,
internalClientset internalclientset.Interface,
scalesGetter scaleclient.ScalesGetter,
ns, name string,
size uint,
wait bool,
kind schema.GroupKind,
gr schema.GroupResource,
) error {
By(fmt.Sprintf("Scaling %v %s in namespace %s to %d", kind, name, ns, size))
scaler := kubectl.ScalerFor(kind, internalClientset.Batch(), scalesGetter, gr)
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
if err := scaler.Scale(ns, name, size, nil, waitForScale, waitForReplicas); err != nil {
return fmt.Errorf("error while scaling RC %s to %d replicas: %v", name, size, err)
}
if !wait {
return nil
}
return WaitForControlledPodsRunning(clientset, ns, name, kind)
}
// Wait up to 10 minutes for pods to become Running.
func WaitForControlledPodsRunning(c clientset.Interface, ns, name string, kind schema.GroupKind) error {
rtObject, err := getRuntimeObjectForKind(c, kind, ns, name)
if err != nil {
return err
}
selector, err := getSelectorFromRuntimeObject(rtObject)
if err != nil {
return err
}
err = testutil.WaitForPodsWithLabelRunning(c, ns, selector)
if err != nil {
return fmt.Errorf("Error while waiting for replication controller %s pods to be running: %v", name, err)
}
return nil
}
// Wait up to PodListTimeout for getting pods of the specified controller name and return them.
func WaitForControlledPods(c clientset.Interface, ns, name string, kind schema.GroupKind) (pods *v1.PodList, err error) {
rtObject, err := getRuntimeObjectForKind(c, kind, ns, name)
if err != nil {
return nil, err
}
selector, err := getSelectorFromRuntimeObject(rtObject)
if err != nil {
return nil, err
}
return WaitForPodsWithLabel(c, ns, selector)
}
// Returns true if all the specified pods are scheduled, else returns false.
func podsWithLabelScheduled(c clientset.Interface, ns string, label labels.Selector) (bool, error) {
PodStore := testutil.NewPodStore(c, ns, label, fields.Everything())
defer PodStore.Stop()
pods := PodStore.List()
if len(pods) == 0 {
return false, nil
}
for _, pod := range pods {
if pod.Spec.NodeName == "" {
return false, nil
}
}
return true, nil
}
// Wait for all matching pods to become scheduled and at least one
// matching pod exists. Return the list of matching pods.
func WaitForPodsWithLabelScheduled(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) {
err = wait.PollImmediate(Poll, podScheduledBeforeTimeout,
func() (bool, error) {
pods, err = WaitForPodsWithLabel(c, ns, label)
if err != nil {
return false, err
}
for _, pod := range pods.Items {
if pod.Spec.NodeName == "" {
return false, nil
}
}
return true, nil
})
return pods, err
}
// Wait up to PodListTimeout for getting pods with certain label
func WaitForPodsWithLabel(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) {
for t := time.Now(); time.Since(t) < PodListTimeout; time.Sleep(Poll) {
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err = c.CoreV1().Pods(ns).List(options)
if err != nil {
if IsRetryableAPIError(err) {
continue
}
return
}
if len(pods.Items) > 0 {
break
}
}
if pods == nil || len(pods.Items) == 0 {
err = fmt.Errorf("Timeout while waiting for pods with label %v", label)
}
return
}
// Wait for exact amount of matching pods to become running and ready.
// Return the list of matching pods.
func WaitForPodsWithLabelRunningReady(c clientset.Interface, ns string, label labels.Selector, num int, timeout time.Duration) (pods *v1.PodList, err error) {
var current int
err = wait.Poll(Poll, timeout,
func() (bool, error) {
pods, err := WaitForPodsWithLabel(c, ns, label)
if err != nil {
Logf("Failed to list pods: %v", err)
if IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
current = 0
for _, pod := range pods.Items {
if flag, err := testutil.PodRunningReady(&pod); err == nil && flag == true {
current++
}
}
if current != num {
Logf("Got %v pods running and ready, expect: %v", current, num)
return false, nil
}
return true, nil
})
return pods, err
}
func getRuntimeObjectForKind(c clientset.Interface, kind schema.GroupKind, ns, name string) (runtime.Object, error) {
switch kind {
case api.Kind("ReplicationController"):
return c.CoreV1().ReplicationControllers(ns).Get(name, metav1.GetOptions{})
case extensionsinternal.Kind("ReplicaSet"), appsinternal.Kind("ReplicaSet"):
return c.ExtensionsV1beta1().ReplicaSets(ns).Get(name, metav1.GetOptions{})
case extensionsinternal.Kind("Deployment"), appsinternal.Kind("Deployment"):
return c.ExtensionsV1beta1().Deployments(ns).Get(name, metav1.GetOptions{})
case extensionsinternal.Kind("DaemonSet"):
return c.ExtensionsV1beta1().DaemonSets(ns).Get(name, metav1.GetOptions{})
case batchinternal.Kind("Job"):
return c.BatchV1().Jobs(ns).Get(name, metav1.GetOptions{})
default:
return nil, fmt.Errorf("Unsupported kind when getting runtime object: %v", kind)
}
}
func deleteResource(c clientset.Interface, kind schema.GroupKind, ns, name string, deleteOption *metav1.DeleteOptions) error {
switch kind {
case api.Kind("ReplicationController"):
return c.CoreV1().ReplicationControllers(ns).Delete(name, deleteOption)
case extensionsinternal.Kind("ReplicaSet"), appsinternal.Kind("ReplicaSet"):
return c.ExtensionsV1beta1().ReplicaSets(ns).Delete(name, deleteOption)
case extensionsinternal.Kind("Deployment"), appsinternal.Kind("Deployment"):
return c.ExtensionsV1beta1().Deployments(ns).Delete(name, deleteOption)
case extensionsinternal.Kind("DaemonSet"):
return c.ExtensionsV1beta1().DaemonSets(ns).Delete(name, deleteOption)
case batchinternal.Kind("Job"):
return c.BatchV1().Jobs(ns).Delete(name, deleteOption)
default:
return fmt.Errorf("Unsupported kind when deleting: %v", kind)
}
}
func getSelectorFromRuntimeObject(obj runtime.Object) (labels.Selector, error) {
switch typed := obj.(type) {
case *v1.ReplicationController:
return labels.SelectorFromSet(typed.Spec.Selector), nil
case *extensions.ReplicaSet:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
case *extensions.Deployment:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
case *extensions.DaemonSet:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
case *batch.Job:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
default:
return nil, fmt.Errorf("Unsupported kind when getting selector: %v", obj)
}
}
func getReplicasFromRuntimeObject(obj runtime.Object) (int32, error) {
switch typed := obj.(type) {
case *v1.ReplicationController:
if typed.Spec.Replicas != nil {
return *typed.Spec.Replicas, nil
}
return 0, nil
case *extensions.ReplicaSet:
if typed.Spec.Replicas != nil {
return *typed.Spec.Replicas, nil
}
return 0, nil
case *extensions.Deployment:
if typed.Spec.Replicas != nil {
return *typed.Spec.Replicas, nil
}
return 0, nil
case *batch.Job:
// TODO: currently we use pause pods so that's OK. When we'll want to switch to Pods
// that actually finish we need a better way to do this.
if typed.Spec.Parallelism != nil {
return *typed.Spec.Parallelism, nil
}
return 0, nil
default:
return -1, fmt.Errorf("Unsupported kind when getting number of replicas: %v", obj)
}
}
func getReaperForKind(internalClientset internalclientset.Interface, kind schema.GroupKind) (kubectl.Reaper, error) {
return kubectl.ReaperFor(kind, internalClientset)
}
// DeleteResourceAndPods deletes a given resource and all pods it spawned
func DeleteResourceAndPods(clientset clientset.Interface, internalClientset internalclientset.Interface, kind schema.GroupKind, ns, name string) error {
By(fmt.Sprintf("deleting %v %s in namespace %s", kind, name, ns))
rtObject, err := getRuntimeObjectForKind(clientset, kind, ns, name)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("%v %s not found: %v", kind, name, err)
return nil
}
return err
}
selector, err := getSelectorFromRuntimeObject(rtObject)
if err != nil {
return err
}
reaper, err := getReaperForKind(internalClientset, kind)
if err != nil {
return err
}
ps, err := podStoreForSelector(clientset, ns, selector)
if err != nil {
return err
}
defer ps.Stop()
startTime := time.Now()
err = reaper.Stop(ns, name, 0, nil)
if apierrs.IsNotFound(err) {
Logf("%v %s was already deleted: %v", kind, name, err)
return nil
}
if err != nil {
return fmt.Errorf("error while stopping %v: %s: %v", kind, name, err)
}
deleteTime := time.Now().Sub(startTime)
Logf("Deleting %v %s took: %v", kind, name, deleteTime)
err = waitForPodsInactive(ps, 100*time.Millisecond, 10*time.Minute)
if err != nil {
return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err)
}
terminatePodTime := time.Now().Sub(startTime) - deleteTime
Logf("Terminating %v %s pods took: %v", kind, name, terminatePodTime)
// this is to relieve namespace controller's pressure when deleting the
// namespace after a test.
err = waitForPodsGone(ps, 100*time.Millisecond, 10*time.Minute)
if err != nil {
return fmt.Errorf("error while waiting for pods gone %s: %v", name, err)
}
gcPodTime := time.Now().Sub(startTime) - terminatePodTime
Logf("Garbage collecting %v %s pods took: %v", kind, name, gcPodTime)
return nil
}
// DeleteResourceAndWaitForGC deletes only given resource and waits for GC to delete the pods.
func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns, name string) error {
By(fmt.Sprintf("deleting %v %s in namespace %s, will wait for the garbage collector to delete the pods", kind, name, ns))
rtObject, err := getRuntimeObjectForKind(c, kind, ns, name)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("%v %s not found: %v", kind, name, err)
return nil
}
return err
}
selector, err := getSelectorFromRuntimeObject(rtObject)
if err != nil {
return err
}
replicas, err := getReplicasFromRuntimeObject(rtObject)
if err != nil {
return err
}
ps, err := podStoreForSelector(c, ns, selector)
if err != nil {
return err
}
defer ps.Stop()
startTime := time.Now()
falseVar := false
deleteOption := &metav1.DeleteOptions{OrphanDependents: &falseVar}
err = deleteResource(c, kind, ns, name, deleteOption)
if err != nil && apierrs.IsNotFound(err) {
Logf("%v %s was already deleted: %v", kind, name, err)
return nil
}
if err != nil {
return err
}
deleteTime := time.Now().Sub(startTime)
Logf("Deleting %v %s took: %v", kind, name, deleteTime)
var interval, timeout time.Duration
switch {
case replicas < 100:
interval = 100 * time.Millisecond
case replicas < 1000:
interval = 1 * time.Second
default:
interval = 10 * time.Second
}
if replicas < 5000 {
timeout = 10 * time.Minute
} else {
timeout = time.Duration(replicas/gcThroughput) * time.Second
// gcThroughput is pretty strict now, add a bit more to it
timeout = timeout + 3*time.Minute
}
err = waitForPodsInactive(ps, interval, timeout)
if err != nil {
return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err)
}
terminatePodTime := time.Now().Sub(startTime) - deleteTime
Logf("Terminating %v %s pods took: %v", kind, name, terminatePodTime)
err = waitForPodsGone(ps, interval, 10*time.Minute)
if err != nil {
return fmt.Errorf("error while waiting for pods gone %s: %v", name, err)
}
return nil
}
// podStoreForSelector creates a PodStore that monitors pods from given namespace matching given selector.
// It waits until the reflector does a List() before returning.
func podStoreForSelector(c clientset.Interface, ns string, selector labels.Selector) (*testutil.PodStore, error) {
ps := testutil.NewPodStore(c, ns, selector, fields.Everything())
err := wait.Poll(100*time.Millisecond, 2*time.Minute, func() (bool, error) {
if len(ps.Reflector.LastSyncResourceVersion()) != 0 {
return true, nil
}
return false, nil
})
return ps, err
}
// waitForPodsInactive waits until there are no active pods left in the PodStore.
// This is to make a fair comparison of deletion time between DeleteRCAndPods
// and DeleteRCAndWaitForGC, because the RC controller decreases status.replicas
// when the pod is inactvie.
func waitForPodsInactive(ps *testutil.PodStore, interval, timeout time.Duration) error {
return wait.PollImmediate(interval, timeout, func() (bool, error) {
pods := ps.List()
for _, pod := range pods {
if controller.IsPodActive(pod) {
return false, nil
}
}
return true, nil
})
}
// waitForPodsGone waits until there are no pods left in the PodStore.
func waitForPodsGone(ps *testutil.PodStore, interval, timeout time.Duration) error {
return wait.PollImmediate(interval, timeout, func() (bool, error) {
if pods := ps.List(); len(pods) == 0 {
return true, nil
}
return false, nil
})
}
func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds int) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
options := metav1.ListOptions{LabelSelector: label.String()}
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
pods, err := c.CoreV1().Pods(ns).List(options)
if err != nil {
return false, nil
}
for _, pod := range pods.Items {
if !podutil.IsPodAvailable(&pod, int32(minReadySeconds), metav1.Now()) {
return false, nil
}
}
return true, nil
})
}
// Waits for the number of events on the given object to reach a desired count.
func WaitForEvents(c clientset.Interface, ns string, objOrRef runtime.Object, desiredEventsCount int) error {
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
events, err := c.CoreV1().Events(ns).Search(legacyscheme.Scheme, objOrRef)
if err != nil {
return false, fmt.Errorf("error in listing events: %s", err)
}
eventsCount := len(events.Items)
if eventsCount == desiredEventsCount {
return true, nil
}
if eventsCount < desiredEventsCount {
return false, nil
}
// Number of events has exceeded the desired count.
return false, fmt.Errorf("number of events has exceeded the desired count, eventsCount: %d, desiredCount: %d", eventsCount, desiredEventsCount)
})
}
// Waits for the number of events on the given object to be at least a desired count.
func WaitForPartialEvents(c clientset.Interface, ns string, objOrRef runtime.Object, atLeastEventsCount int) error {
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
events, err := c.CoreV1().Events(ns).Search(legacyscheme.Scheme, objOrRef)
if err != nil {
return false, fmt.Errorf("error in listing events: %s", err)
}
eventsCount := len(events.Items)
if eventsCount >= atLeastEventsCount {
return true, nil
}
return false, nil
})
}
type updateDSFunc func(*extensions.DaemonSet)
func UpdateDaemonSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateDSFunc) (ds *extensions.DaemonSet, err error) {
daemonsets := c.ExtensionsV1beta1().DaemonSets(namespace)
var updateErr error
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
if ds, err = daemonsets.Get(name, metav1.GetOptions{}); err != nil {
if IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(ds)
if ds, err = daemonsets.Update(ds); err == nil {
Logf("Updating DaemonSet %s", name)
return true, nil
}
updateErr = err
return false, nil
})
if pollErr == wait.ErrWaitTimeout {
pollErr = fmt.Errorf("couldn't apply the provided updated to DaemonSet %q: %v", name, updateErr)
}
return ds, pollErr
}
// NodeAddresses returns the first address of the given type of each node.
func NodeAddresses(nodelist *v1.NodeList, addrType v1.NodeAddressType) []string {
hosts := []string{}
for _, n := range nodelist.Items {
for _, addr := range n.Status.Addresses {
// Use the first external IP address we find on the node, and
// use at most one per node.
// TODO(roberthbailey): Use the "preferred" address for the node, once
// such a thing is defined (#2462).
if addr.Type == addrType {
hosts = append(hosts, addr.Address)
break
}
}
}
return hosts
}
// NodeSSHHosts returns SSH-able host names for all schedulable nodes - this excludes master node.
// It returns an error if it can't find an external IP for every node, though it still returns all
// hosts that it found in that case.
func NodeSSHHosts(c clientset.Interface) ([]string, error) {
nodelist := waitListSchedulableNodesOrDie(c)
// TODO(roberthbailey): Use the "preferred" address for the node, once such a thing is defined (#2462).
hosts := NodeAddresses(nodelist, v1.NodeExternalIP)
// Error if any node didn't have an external IP.
if len(hosts) != len(nodelist.Items) {
return hosts, fmt.Errorf(
"only found %d external IPs on nodes, but found %d nodes. Nodelist: %v",
len(hosts), len(nodelist.Items), nodelist)
}
sshHosts := make([]string, 0, len(hosts))
for _, h := range hosts {
sshHosts = append(sshHosts, net.JoinHostPort(h, sshPort))
}
return sshHosts, nil
}
type SSHResult struct {
User string
Host string
Cmd string
Stdout string
Stderr string
Code int
}
// NodeExec execs the given cmd on node via SSH. Note that the nodeName is an sshable name,
// eg: the name returned by framework.GetMasterHost(). This is also not guaranteed to work across
// cloud providers since it involves ssh.
func NodeExec(nodeName, cmd string) (SSHResult, error) {
return SSH(cmd, net.JoinHostPort(nodeName, sshPort), TestContext.Provider)
}
// SSH synchronously SSHs to a node running on provider and runs cmd. If there
// is no error performing the SSH, the stdout, stderr, and exit code are
// returned.
func SSH(cmd, host, provider string) (SSHResult, error) {
result := SSHResult{Host: host, Cmd: cmd}
// Get a signer for the provider.
signer, err := GetSigner(provider)
if err != nil {
return result, fmt.Errorf("error getting signer for provider %s: '%v'", provider, err)
}
// RunSSHCommand will default to Getenv("USER") if user == "", but we're
// defaulting here as well for logging clarity.
result.User = os.Getenv("KUBE_SSH_USER")
if result.User == "" {
result.User = os.Getenv("USER")
}
stdout, stderr, code, err := sshutil.RunSSHCommand(cmd, result.User, host, signer)
result.Stdout = stdout
result.Stderr = stderr
result.Code = code
return result, err
}
func LogSSHResult(result SSHResult) {
remote := fmt.Sprintf("%s@%s", result.User, result.Host)
Logf("ssh %s: command: %s", remote, result.Cmd)
Logf("ssh %s: stdout: %q", remote, result.Stdout)
Logf("ssh %s: stderr: %q", remote, result.Stderr)
Logf("ssh %s: exit code: %d", remote, result.Code)
}
func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*SSHResult, error) {
Logf("Getting external IP address for %s", node.Name)
host := ""
for _, a := range node.Status.Addresses {
if a.Type == v1.NodeExternalIP {
host = net.JoinHostPort(a.Address, sshPort)
break
}
}
if host == "" {
// No external IPs were found, let's try to use internal as plan B
for _, a := range node.Status.Addresses {
if a.Type == v1.NodeInternalIP {
host = net.JoinHostPort(a.Address, sshPort)
break
}
}
}
if host == "" {
return nil, fmt.Errorf("couldn't find any IP address for node %s", node.Name)
}
Logf("SSH %q on %s(%s)", cmd, node.Name, host)
result, err := SSH(cmd, host, provider)
LogSSHResult(result)
if result.Code != 0 || err != nil {
return nil, fmt.Errorf("failed running %q: %v (exit code %d)",
cmd, err, result.Code)
}
return &result, nil
}
func IssueSSHCommand(cmd, provider string, node *v1.Node) error {
_, err := IssueSSHCommandWithResult(cmd, provider, node)
if err != nil {
return err
}
return nil
}
// NewHostExecPodSpec returns the pod spec of hostexec pod
func NewHostExecPodSpec(ns, name string) *v1.Pod {
immediate := int64(0)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: ns,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "hostexec",
Image: imageutils.GetE2EImage(imageutils.Hostexec),
ImagePullPolicy: v1.PullIfNotPresent,
},
},
HostNetwork: true,
SecurityContext: &v1.PodSecurityContext{},
TerminationGracePeriodSeconds: &immediate,
},
}
return pod
}
// RunHostCmd runs the given cmd in the context of the given pod using `kubectl exec`
// inside of a shell.
func RunHostCmd(ns, name, cmd string) (string, error) {
return RunKubectl("exec", fmt.Sprintf("--namespace=%v", ns), name, "--", "/bin/sh", "-c", cmd)
}
// RunHostCmdOrDie calls RunHostCmd and dies on error.
func RunHostCmdOrDie(ns, name, cmd string) string {
stdout, err := RunHostCmd(ns, name, cmd)
Logf("stdout: %v", stdout)
ExpectNoError(err)
return stdout
}
// RunHostCmdWithRetries calls RunHostCmd and retries all errors
// until it succeeds or the specified timeout expires.
// This can be used with idempotent commands to deflake transient Node issues.
func RunHostCmdWithRetries(ns, name, cmd string, interval, timeout time.Duration) (string, error) {
start := time.Now()
for {
out, err := RunHostCmd(ns, name, cmd)
if err == nil {
return out, nil
}
if elapsed := time.Since(start); elapsed > timeout {
return out, fmt.Errorf("RunHostCmd still failed after %v: %v", elapsed, err)
}
Logf("Waiting %v to retry failed RunHostCmd: %v", interval, err)
time.Sleep(interval)
}
}
// LaunchHostExecPod launches a hostexec pod in the given namespace and waits
// until it's Running
func LaunchHostExecPod(client clientset.Interface, ns, name string) *v1.Pod {
hostExecPod := NewHostExecPodSpec(ns, name)
pod, err := client.CoreV1().Pods(ns).Create(hostExecPod)
ExpectNoError(err)
err = WaitForPodRunningInNamespace(client, pod)
ExpectNoError(err)
return pod
}
// newExecPodSpec returns the pod spec of exec pod
func newExecPodSpec(ns, generateName string) *v1.Pod {
immediate := int64(0)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: generateName,
Namespace: ns,
},
Spec: v1.PodSpec{
TerminationGracePeriodSeconds: &immediate,
Containers: []v1.Container{
{
Name: "exec",
Image: BusyBoxImage,
Command: []string{"sh", "-c", "while true; do sleep 5; done"},
},
},
},
}
return pod
}
// CreateExecPodOrFail creates a simple busybox pod in a sleep loop used as a
// vessel for kubectl exec commands.
// Returns the name of the created pod.
func CreateExecPodOrFail(client clientset.Interface, ns, generateName string, tweak func(*v1.Pod)) string {
Logf("Creating new exec pod")
execPod := newExecPodSpec(ns, generateName)
if tweak != nil {
tweak(execPod)
}
created, err := client.CoreV1().Pods(ns).Create(execPod)
Expect(err).NotTo(HaveOccurred())
err = wait.PollImmediate(Poll, 5*time.Minute, func() (bool, error) {
retrievedPod, err := client.CoreV1().Pods(execPod.Namespace).Get(created.Name, metav1.GetOptions{})
if err != nil {
if IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
return retrievedPod.Status.Phase == v1.PodRunning, nil
})
Expect(err).NotTo(HaveOccurred())
return created.Name
}
func CreatePodOrFail(c clientset.Interface, ns, name string, labels map[string]string, containerPorts []v1.ContainerPort) {
By(fmt.Sprintf("Creating pod %s in namespace %s", name, ns))
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: labels,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "pause",
Image: GetPauseImageName(c),
Ports: containerPorts,
// Add a dummy environment variable to work around a docker issue.
// https://github.com/docker/docker/issues/14203
Env: []v1.EnvVar{{Name: "FOO", Value: " "}},
},
},
},
}
_, err := c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
}
func DeletePodOrFail(c clientset.Interface, ns, name string) {
By(fmt.Sprintf("Deleting pod %s in namespace %s", name, ns))
err := c.CoreV1().Pods(ns).Delete(name, nil)
Expect(err).NotTo(HaveOccurred())
}
// GetSigner returns an ssh.Signer for the provider ("gce", etc.) that can be
// used to SSH to their nodes.
func GetSigner(provider string) (ssh.Signer, error) {
// Get the directory in which SSH keys are located.
keydir := filepath.Join(os.Getenv("HOME"), ".ssh")
// Select the key itself to use. When implementing more providers here,
// please also add them to any SSH tests that are disabled because of signer
// support.
keyfile := ""
key := ""
switch provider {
case "gce", "gke", "kubemark":
keyfile = "google_compute_engine"
case "aws":
// If there is an env. variable override, use that.
aws_keyfile := os.Getenv("AWS_SSH_KEY")
if len(aws_keyfile) != 0 {
return sshutil.MakePrivateKeySignerFromFile(aws_keyfile)
}
// Otherwise revert to home dir
keyfile = "kube_aws_rsa"
case "local", "vsphere":
keyfile = os.Getenv("LOCAL_SSH_KEY") // maybe?
if len(keyfile) == 0 {
keyfile = "id_rsa"
}
case "skeleton":
keyfile = os.Getenv("KUBE_SSH_KEY")
if len(keyfile) == 0 {
keyfile = "id_rsa"
}
default:
return nil, fmt.Errorf("GetSigner(...) not implemented for %s", provider)
}
if len(key) == 0 {
key = filepath.Join(keydir, keyfile)
}
return sshutil.MakePrivateKeySignerFromFile(key)
}
// CheckPodsRunningReady returns whether all pods whose names are listed in
// podNames in namespace ns are running and ready, using c and waiting at most
// timeout.
func CheckPodsRunningReady(c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool {
return CheckPodsCondition(c, ns, podNames, timeout, testutil.PodRunningReady, "running and ready")
}
// CheckPodsRunningReadyOrSucceeded returns whether all pods whose names are
// listed in podNames in namespace ns are running and ready, or succeeded; use
// c and waiting at most timeout.
func CheckPodsRunningReadyOrSucceeded(c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool {
return CheckPodsCondition(c, ns, podNames, timeout, testutil.PodRunningReadyOrSucceeded, "running and ready, or succeeded")
}
// CheckPodsCondition returns whether all pods whose names are listed in podNames
// in namespace ns are in the condition, using c and waiting at most timeout.
func CheckPodsCondition(c clientset.Interface, ns string, podNames []string, timeout time.Duration, condition podCondition, desc string) bool {
np := len(podNames)
Logf("Waiting up to %v for %d pods to be %s: %s", timeout, np, desc, podNames)
type waitPodResult struct {
success bool
podName string
}
result := make(chan waitPodResult, len(podNames))
for _, podName := range podNames {
// Launch off pod readiness checkers.
go func(name string) {
err := WaitForPodCondition(c, ns, name, desc, timeout, condition)
result <- waitPodResult{err == nil, name}
}(podName)
}
// Wait for them all to finish.
success := true
for range podNames {
res := <-result
if !res.success {
Logf("Pod %[1]s failed to be %[2]s.", res.podName, desc)
success = false
}
}
Logf("Wanted all %d pods to be %s. Result: %t. Pods: %v", np, desc, success, podNames)
return success
}
// WaitForNodeToBeReady returns whether node name is ready within timeout.
func WaitForNodeToBeReady(c clientset.Interface, name string, timeout time.Duration) bool {
return WaitForNodeToBe(c, name, v1.NodeReady, true, timeout)
}
// WaitForNodeToBeNotReady returns whether node name is not ready (i.e. the
// readiness condition is anything but ready, e.g false or unknown) within
// timeout.
func WaitForNodeToBeNotReady(c clientset.Interface, name string, timeout time.Duration) bool {
return WaitForNodeToBe(c, name, v1.NodeReady, false, timeout)
}
func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionType, wantTrue, silent bool) bool {
// Check the node readiness condition (logging all).
for _, cond := range node.Status.Conditions {
// Ensure that the condition type and the status matches as desired.
if cond.Type == conditionType {
// For NodeReady condition we need to check Taints as well
if cond.Type == v1.NodeReady {
hasNodeControllerTaints := false
// For NodeReady we need to check if Taints are gone as well
taints := node.Spec.Taints
for _, taint := range taints {
if taint.MatchTaint(nodectlr.UnreachableTaintTemplate) || taint.MatchTaint(nodectlr.NotReadyTaintTemplate) {
hasNodeControllerTaints = true
break
}
}
if wantTrue {
if (cond.Status == v1.ConditionTrue) && !hasNodeControllerTaints {
return true
} else {
msg := ""
if !hasNodeControllerTaints {
msg = fmt.Sprintf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
} else {
msg = fmt.Sprintf("Condition %s of node %s is %v, but Node is tainted by NodeController with %v. Failure",
conditionType, node.Name, cond.Status == v1.ConditionTrue, taints)
}
if !silent {
Logf(msg)
}
return false
}
} else {
// TODO: check if the Node is tainted once we enable NC notReady/unreachable taints by default
if cond.Status != v1.ConditionTrue {
return true
}
if !silent {
Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
}
return false
}
}
if (wantTrue && (cond.Status == v1.ConditionTrue)) || (!wantTrue && (cond.Status != v1.ConditionTrue)) {
return true
} else {
if !silent {
Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
}
return false
}
}
}
if !silent {
Logf("Couldn't find condition %v on node %v", conditionType, node.Name)
}
return false
}
func IsNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionType, wantTrue bool) bool {
return isNodeConditionSetAsExpected(node, conditionType, wantTrue, false)
}
func IsNodeConditionSetAsExpectedSilent(node *v1.Node, conditionType v1.NodeConditionType, wantTrue bool) bool {
return isNodeConditionSetAsExpected(node, conditionType, wantTrue, true)
}
func IsNodeConditionUnset(node *v1.Node, conditionType v1.NodeConditionType) bool {
for _, cond := range node.Status.Conditions {
if cond.Type == conditionType {
return false
}
}
return true
}
// WaitForNodeToBe returns whether node "name's" condition state matches wantTrue
// within timeout. If wantTrue is true, it will ensure the node condition status
// is ConditionTrue; if it's false, it ensures the node condition is in any state
// other than ConditionTrue (e.g. not true or unknown).
func WaitForNodeToBe(c clientset.Interface, name string, conditionType v1.NodeConditionType, wantTrue bool, timeout time.Duration) bool {
Logf("Waiting up to %v for node %s condition %s to be %t", timeout, name, conditionType, wantTrue)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
node, err := c.CoreV1().Nodes().Get(name, metav1.GetOptions{})
if err != nil {
Logf("Couldn't get node %s", name)
continue
}
if IsNodeConditionSetAsExpected(node, conditionType, wantTrue) {
return true
}
}
Logf("Node %s didn't reach desired %s condition status (%t) within %v", name, conditionType, wantTrue, timeout)
return false
}
// Checks whether all registered nodes are ready.
// TODO: we should change the AllNodesReady call in AfterEach to WaitForAllNodesHealthy,
// and figure out how to do it in a configurable way, as we can't expect all setups to run
// default test add-ons.
func AllNodesReady(c clientset.Interface, timeout time.Duration) error {
Logf("Waiting up to %v for all (but %d) nodes to be ready", timeout, TestContext.AllowedNotReadyNodes)
var notReady []*v1.Node
err := wait.PollImmediate(Poll, timeout, func() (bool, error) {
notReady = nil
// It should be OK to list unschedulable Nodes here.
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
if IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
for i := range nodes.Items {
node := &nodes.Items[i]
if !IsNodeConditionSetAsExpected(node, v1.NodeReady, true) {
notReady = append(notReady, node)
}
}
// Framework allows for <TestContext.AllowedNotReadyNodes> nodes to be non-ready,
// to make it possible e.g. for incorrect deployment of some small percentage
// of nodes (which we allow in cluster validation). Some nodes that are not
// provisioned correctly at startup will never become ready (e.g. when something
// won't install correctly), so we can't expect them to be ready at any point.
return len(notReady) <= TestContext.AllowedNotReadyNodes, nil
})
if err != nil && err != wait.ErrWaitTimeout {
return err
}
if len(notReady) > TestContext.AllowedNotReadyNodes {
msg := ""
for _, node := range notReady {
msg = fmt.Sprintf("%s, %s", msg, node.Name)
}
return fmt.Errorf("Not ready nodes: %#v", msg)
}
return nil
}
// checks whether all registered nodes are ready and all required Pods are running on them.
func WaitForAllNodesHealthy(c clientset.Interface, timeout time.Duration) error {
Logf("Waiting up to %v for all nodes to be ready", timeout)
var notReady []v1.Node
var missingPodsPerNode map[string][]string
err := wait.PollImmediate(Poll, timeout, func() (bool, error) {
notReady = nil
// It should be OK to list unschedulable Nodes here.
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{ResourceVersion: "0"})
if err != nil {
if IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
for _, node := range nodes.Items {
if !IsNodeConditionSetAsExpected(&node, v1.NodeReady, true) {
notReady = append(notReady, node)
}
}
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{ResourceVersion: "0"})
if err != nil {
return false, err
}
systemPodsPerNode := make(map[string][]string)
for _, pod := range pods.Items {
if pod.Namespace == metav1.NamespaceSystem && pod.Status.Phase == v1.PodRunning {
if pod.Spec.NodeName != "" {
systemPodsPerNode[pod.Spec.NodeName] = append(systemPodsPerNode[pod.Spec.NodeName], pod.Name)
}
}
}
missingPodsPerNode = make(map[string][]string)
for _, node := range nodes.Items {
if !system.IsMasterNode(node.Name) {
for _, requiredPod := range requiredPerNodePods {
foundRequired := false
for _, presentPod := range systemPodsPerNode[node.Name] {
if requiredPod.MatchString(presentPod) {
foundRequired = true
break
}
}
if !foundRequired {
missingPodsPerNode[node.Name] = append(missingPodsPerNode[node.Name], requiredPod.String())
}
}
}
}
return len(notReady) == 0 && len(missingPodsPerNode) == 0, nil
})
if err != nil && err != wait.ErrWaitTimeout {
return err
}
if len(notReady) > 0 {
return fmt.Errorf("Not ready nodes: %v", notReady)
}
if len(missingPodsPerNode) > 0 {
return fmt.Errorf("Not running system Pods: %v", missingPodsPerNode)
}
return nil
}
// Filters nodes in NodeList in place, removing nodes that do not
// satisfy the given condition
// TODO: consider merging with pkg/client/cache.NodeLister
func FilterNodes(nodeList *v1.NodeList, fn func(node v1.Node) bool) {
var l []v1.Node
for _, node := range nodeList.Items {
if fn(node) {
l = append(l, node)
}
}
nodeList.Items = l
}
// ParseKVLines parses output that looks like lines containing "<key>: <val>"
// and returns <val> if <key> is found. Otherwise, it returns the empty string.
func ParseKVLines(output, key string) string {
delim := ":"
key = key + delim
for _, line := range strings.Split(output, "\n") {
pieces := strings.SplitAfterN(line, delim, 2)
if len(pieces) != 2 {
continue
}
k, v := pieces[0], pieces[1]
if k == key {
return strings.TrimSpace(v)
}
}
return ""
}
func RestartKubeProxy(host string) error {
// TODO: Make it work for all providers.
if !ProviderIs("gce", "gke", "aws") {
return fmt.Errorf("unsupported provider: %s", TestContext.Provider)
}
// kubelet will restart the kube-proxy since it's running in a static pod
Logf("Killing kube-proxy on node %v", host)
result, err := SSH("sudo pkill kube-proxy", host, TestContext.Provider)
if err != nil || result.Code != 0 {
LogSSHResult(result)
return fmt.Errorf("couldn't restart kube-proxy: %v", err)
}
// wait for kube-proxy to come back up
sshCmd := "sudo /bin/sh -c 'pgrep kube-proxy | wc -l'"
err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
Logf("Waiting for kubeproxy to come back up with %v on %v", sshCmd, host)
result, err := SSH(sshCmd, host, TestContext.Provider)
if err != nil {
return false, err
}
if result.Code != 0 {
LogSSHResult(result)
return false, fmt.Errorf("failed to run command, exited %d", result.Code)
}
if result.Stdout == "0\n" {
return false, nil
}
Logf("kube-proxy is back up.")
return true, nil
})
if err != nil {
return fmt.Errorf("kube-proxy didn't recover: %v", err)
}
return nil
}
func RestartKubelet(host string) error {
// TODO: Make it work for all providers and distros.
supportedProviders := []string{"gce", "aws", "vsphere"}
if !ProviderIs(supportedProviders...) {
return fmt.Errorf("unsupported provider: %s, supported providers are: %v", TestContext.Provider, supportedProviders)
}
if ProviderIs("gce") && !NodeOSDistroIs("debian", "gci") {
return fmt.Errorf("unsupported node OS distro: %s", TestContext.NodeOSDistro)
}
var cmd string
if ProviderIs("gce") && NodeOSDistroIs("debian") {
cmd = "sudo /etc/init.d/kubelet restart"
} else if ProviderIs("vsphere") {
var sudoPresent bool
sshResult, err := SSH("sudo --version", host, TestContext.Provider)
if err != nil {
return fmt.Errorf("Unable to ssh to host %s with error %v", host, err)
}
if !strings.Contains(sshResult.Stderr, "command not found") {
sudoPresent = true
}
sshResult, err = SSH("systemctl --version", host, TestContext.Provider)
if !strings.Contains(sshResult.Stderr, "command not found") {
cmd = "systemctl restart kubelet"
} else {
cmd = "service kubelet restart"
}
if sudoPresent {
cmd = fmt.Sprintf("sudo %s", cmd)
}
} else {
cmd = "sudo systemctl restart kubelet"
}
Logf("Restarting kubelet via ssh on host %s with command %s", host, cmd)
result, err := SSH(cmd, host, TestContext.Provider)
if err != nil || result.Code != 0 {
LogSSHResult(result)
return fmt.Errorf("couldn't restart kubelet: %v", err)
}
return nil
}
func WaitForKubeletUp(host string) error {
cmd := "curl http://localhost:" + strconv.Itoa(ports.KubeletReadOnlyPort) + "/healthz"
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
result, err := SSH(cmd, host, TestContext.Provider)
if err != nil || result.Code != 0 {
LogSSHResult(result)
}
if result.Stdout == "ok" {
return nil
}
}
return fmt.Errorf("waiting for kubelet timed out")
}
func RestartApiserver(c discovery.ServerVersionInterface) error {
// TODO: Make it work for all providers.
if !ProviderIs("gce", "gke", "aws") {
return fmt.Errorf("unsupported provider: %s", TestContext.Provider)
}
if ProviderIs("gce", "aws") {
return sshRestartMaster()
}
// GKE doesn't allow ssh access, so use a same-version master
// upgrade to teardown/recreate master.
v, err := c.ServerVersion()
if err != nil {
return err
}
return masterUpgradeGKE(v.GitVersion[1:]) // strip leading 'v'
}
func sshRestartMaster() error {
if !ProviderIs("gce", "aws") {
return fmt.Errorf("unsupported provider: %s", TestContext.Provider)
}
var command string
if ProviderIs("gce") {
command = "pidof kube-apiserver | xargs sudo kill"
} else {
command = "sudo /etc/init.d/kube-apiserver restart"
}
Logf("Restarting master via ssh, running: %v", command)
result, err := SSH(command, net.JoinHostPort(GetMasterHost(), sshPort), TestContext.Provider)
if err != nil || result.Code != 0 {
LogSSHResult(result)
return fmt.Errorf("couldn't restart apiserver: %v", err)
}
return nil
}
func WaitForApiserverUp(c clientset.Interface) error {
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
body, err := c.CoreV1().RESTClient().Get().AbsPath("/healthz").Do().Raw()
if err == nil && string(body) == "ok" {
return nil
}
}
return fmt.Errorf("waiting for apiserver timed out")
}
func RestartControllerManager() error {
// TODO: Make it work for all providers and distros.
if !ProviderIs("gce", "aws") {
return fmt.Errorf("unsupported provider: %s", TestContext.Provider)
}
if ProviderIs("gce") && !MasterOSDistroIs("gci") {
return fmt.Errorf("unsupported master OS distro: %s", TestContext.MasterOSDistro)
}
cmd := "pidof kube-controller-manager | xargs sudo kill"
Logf("Restarting controller-manager via ssh, running: %v", cmd)
result, err := SSH(cmd, net.JoinHostPort(GetMasterHost(), sshPort), TestContext.Provider)
if err != nil || result.Code != 0 {
LogSSHResult(result)
return fmt.Errorf("couldn't restart controller-manager: %v", err)
}
return nil
}
func WaitForControllerManagerUp() error {
cmd := "curl http://localhost:" + strconv.Itoa(ports.InsecureKubeControllerManagerPort) + "/healthz"
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
result, err := SSH(cmd, net.JoinHostPort(GetMasterHost(), sshPort), TestContext.Provider)
if err != nil || result.Code != 0 {
LogSSHResult(result)
}
if result.Stdout == "ok" {
return nil
}
}
return fmt.Errorf("waiting for controller-manager timed out")
}
// CheckForControllerManagerHealthy checks that the controller manager does not crash within "duration"
func CheckForControllerManagerHealthy(duration time.Duration) error {
var PID string
cmd := "pidof kube-controller-manager"
for start := time.Now(); time.Since(start) < duration; time.Sleep(5 * time.Second) {
result, err := SSH(cmd, net.JoinHostPort(GetMasterHost(), sshPort), TestContext.Provider)
if err != nil {
// We don't necessarily know that it crashed, pipe could just be broken
LogSSHResult(result)
return fmt.Errorf("master unreachable after %v", time.Since(start))
} else if result.Code != 0 {
LogSSHResult(result)
return fmt.Errorf("SSH result code not 0. actually: %v after %v", result.Code, time.Since(start))
} else if result.Stdout != PID {
if PID == "" {
PID = result.Stdout
} else {
//its dead
return fmt.Errorf("controller manager crashed, old PID: %s, new PID: %s", PID, result.Stdout)
}
} else {
Logf("kube-controller-manager still healthy after %v", time.Since(start))
}
}
return nil
}
// Returns number of ready Nodes excluding Master Node.
func NumberOfReadyNodes(c clientset.Interface) (int, error) {
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
Logf("Failed to list nodes: %v", err)
return 0, err
}
// Filter out not-ready nodes.
FilterNodes(nodes, func(node v1.Node) bool {
return IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
})
return len(nodes.Items), nil
}
// WaitForReadyNodes waits until the cluster has desired size and there is no not-ready nodes in it.
// By cluster size we mean number of Nodes excluding Master Node.
func WaitForReadyNodes(c clientset.Interface, size int, timeout time.Duration) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
Logf("Failed to list nodes: %v", err)
continue
}
numNodes := len(nodes.Items)
// Filter out not-ready nodes.
FilterNodes(nodes, func(node v1.Node) bool {
return IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
})
numReady := len(nodes.Items)
if numNodes == size && numReady == size {
Logf("Cluster has reached the desired number of ready nodes %d", size)
return nil
}
Logf("Waiting for ready nodes %d, current ready %d, not ready nodes %d", size, numNodes, numNodes-numReady)
}
return fmt.Errorf("timeout waiting %v for number of ready nodes to be %d", timeout, size)
}
func GenerateMasterRegexp(prefix string) string {
return prefix + "(-...)?"
}
// waitForMasters waits until the cluster has the desired number of ready masters in it.
func WaitForMasters(masterPrefix string, c clientset.Interface, size int, timeout time.Duration) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
Logf("Failed to list nodes: %v", err)
continue
}
// Filter out nodes that are not master replicas
FilterNodes(nodes, func(node v1.Node) bool {
res, err := regexp.Match(GenerateMasterRegexp(masterPrefix), ([]byte)(node.Name))
if err != nil {
Logf("Failed to match regexp to node name: %v", err)
return false
}
return res
})
numNodes := len(nodes.Items)
// Filter out not-ready nodes.
FilterNodes(nodes, func(node v1.Node) bool {
return IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
})
numReady := len(nodes.Items)
if numNodes == size && numReady == size {
Logf("Cluster has reached the desired number of masters %d", size)
return nil
}
Logf("Waiting for the number of masters %d, current %d, not ready master nodes %d", size, numNodes, numNodes-numReady)
}
return fmt.Errorf("timeout waiting %v for the number of masters to be %d", timeout, size)
}
// GetHostExternalAddress gets the node for a pod and returns the first External
// address. Returns an error if the node the pod is on doesn't have an External
// address.
func GetHostExternalAddress(client clientset.Interface, p *v1.Pod) (externalAddress string, err error) {
node, err := client.CoreV1().Nodes().Get(p.Spec.NodeName, metav1.GetOptions{})
if err != nil {
return "", err
}
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeExternalIP {
if address.Address != "" {
externalAddress = address.Address
break
}
}
}
if externalAddress == "" {
err = fmt.Errorf("No external address for pod %v on node %v",
p.Name, p.Spec.NodeName)
}
return
}
type extractRT struct {
http.Header
}
func (rt *extractRT) RoundTrip(req *http.Request) (*http.Response, error) {
rt.Header = req.Header
return &http.Response{}, nil
}
// headersForConfig extracts any http client logic necessary for the provided
// config.
func headersForConfig(c *restclient.Config) (http.Header, error) {
extract := &extractRT{}
rt, err := restclient.HTTPWrappersForConfig(c, extract)
if err != nil {
return nil, err
}
if _, err := rt.RoundTrip(&http.Request{}); err != nil {
return nil, err
}
return extract.Header, nil
}
// OpenWebSocketForURL constructs a websocket connection to the provided URL, using the client
// config, with the specified protocols.
func OpenWebSocketForURL(url *url.URL, config *restclient.Config, protocols []string) (*websocket.Conn, error) {
tlsConfig, err := restclient.TLSConfigFor(config)
if err != nil {
return nil, fmt.Errorf("failed to create tls config: %v", err)
}
if tlsConfig != nil {
url.Scheme = "wss"
if !strings.Contains(url.Host, ":") {
url.Host += ":443"
}
} else {
url.Scheme = "ws"
if !strings.Contains(url.Host, ":") {
url.Host += ":80"
}
}
headers, err := headersForConfig(config)
if err != nil {
return nil, fmt.Errorf("failed to load http headers: %v", err)
}
cfg, err := websocket.NewConfig(url.String(), "http://localhost")
if err != nil {
return nil, fmt.Errorf("failed to create websocket config: %v", err)
}
cfg.Header = headers
cfg.TlsConfig = tlsConfig
cfg.Protocol = protocols
return websocket.DialConfig(cfg)
}
// Looks for the given string in the log of a specific pod container
func LookForStringInLog(ns, podName, container, expectedString string, timeout time.Duration) (result string, err error) {
return LookForString(expectedString, timeout, func() string {
return RunKubectlOrDie("logs", podName, container, fmt.Sprintf("--namespace=%v", ns))
})
}
// Looks for the given string in a file in a specific pod container
func LookForStringInFile(ns, podName, container, file, expectedString string, timeout time.Duration) (result string, err error) {
return LookForString(expectedString, timeout, func() string {
return RunKubectlOrDie("exec", podName, "-c", container, fmt.Sprintf("--namespace=%v", ns), "--", "cat", file)
})
}
// Looks for the given string in the output of a command executed in a specific pod container
func LookForStringInPodExec(ns, podName string, command []string, expectedString string, timeout time.Duration) (result string, err error) {
return LookForString(expectedString, timeout, func() string {
// use the first container
args := []string{"exec", podName, fmt.Sprintf("--namespace=%v", ns), "--"}
args = append(args, command...)
return RunKubectlOrDie(args...)
})
}
// Looks for the given string in the output of fn, repeatedly calling fn until
// the timeout is reached or the string is found. Returns last log and possibly
// error if the string was not found.
func LookForString(expectedString string, timeout time.Duration, fn func() string) (result string, err error) {
for t := time.Now(); time.Since(t) < timeout; time.Sleep(Poll) {
result = fn()
if strings.Contains(result, expectedString) {
return
}
}
err = fmt.Errorf("Failed to find \"%s\", last result: \"%s\"", expectedString, result)
return
}
// getSvcNodePort returns the node port for the given service:port.
func getSvcNodePort(client clientset.Interface, ns, name string, svcPort int) (int, error) {
svc, err := client.CoreV1().Services(ns).Get(name, metav1.GetOptions{})
if err != nil {
return 0, err
}
for _, p := range svc.Spec.Ports {
if p.Port == int32(svcPort) {
if p.NodePort != 0 {
return int(p.NodePort), nil
}
}
}
return 0, fmt.Errorf(
"No node port found for service %v, port %v", name, svcPort)
}
// GetNodePortURL returns the url to a nodeport Service.
func GetNodePortURL(client clientset.Interface, ns, name string, svcPort int) (string, error) {
nodePort, err := getSvcNodePort(client, ns, name, svcPort)
if err != nil {
return "", err
}
// This list of nodes must not include the master, which is marked
// unschedulable, since the master doesn't run kube-proxy. Without
// kube-proxy NodePorts won't work.
var nodes *v1.NodeList
if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
nodes, err = client.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
if IsRetryableAPIError(err) {
return false, nil
}
return false, err
}
return true, nil
}) != nil {
return "", err
}
if len(nodes.Items) == 0 {
return "", fmt.Errorf("Unable to list nodes in cluster.")
}
for _, node := range nodes.Items {
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeExternalIP {
if address.Address != "" {
return fmt.Sprintf("http://%v:%v", address.Address, nodePort), nil
}
}
}
}
return "", fmt.Errorf("Failed to find external address for service %v", name)
}
// TODO(random-liu): Change this to be a member function of the framework.
func GetPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) {
return getPodLogsInternal(c, namespace, podName, containerName, false)
}
func getPreviousPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) {
return getPodLogsInternal(c, namespace, podName, containerName, true)
}
// utility function for gomega Eventually
func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName string, previous bool) (string, error) {
logs, err := c.CoreV1().RESTClient().Get().
Resource("pods").
Namespace(namespace).
Name(podName).SubResource("log").
Param("container", containerName).
Param("previous", strconv.FormatBool(previous)).
Do().
Raw()
if err != nil {
return "", err
}
if err == nil && strings.Contains(string(logs), "Internal Error") {
return "", fmt.Errorf("Fetched log contains \"Internal Error\": %q.", string(logs))
}
return string(logs), err
}
func GetGCECloud() (*gcecloud.GCECloud, error) {
gceCloud, ok := TestContext.CloudConfig.Provider.(*gcecloud.GCECloud)
if !ok {
return nil, fmt.Errorf("failed to convert CloudConfig.Provider to GCECloud: %#v", TestContext.CloudConfig.Provider)
}
return gceCloud, nil
}
// EnsureLoadBalancerResourcesDeleted ensures that cloud load balancer resources that were created
// are actually cleaned up. Currently only implemented for GCE/GKE.
func EnsureLoadBalancerResourcesDeleted(ip, portRange string) error {
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
return ensureGCELoadBalancerResourcesDeleted(ip, portRange)
}
return nil
}
func ensureGCELoadBalancerResourcesDeleted(ip, portRange string) error {
gceCloud, err := GetGCECloud()
if err != nil {
return err
}
project := TestContext.CloudConfig.ProjectID
region, err := gcecloud.GetGCERegion(TestContext.CloudConfig.Zone)
if err != nil {
return fmt.Errorf("could not get region for zone %q: %v", TestContext.CloudConfig.Zone, err)
}
return wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) {
service := gceCloud.ComputeServices().GA
list, err := service.ForwardingRules.List(project, region).Do()
if err != nil {
return false, err
}
for _, item := range list.Items {
if item.PortRange == portRange && item.IPAddress == ip {
Logf("found a load balancer: %v", item)
return false, nil
}
}
return true, nil
})
}
// The following helper functions can block/unblock network from source
// host to destination host by manipulating iptable rules.
// This function assumes it can ssh to the source host.
//
// Caution:
// Recommend to input IP instead of hostnames. Using hostnames will cause iptables to
// do a DNS lookup to resolve the name to an IP address, which will
// slow down the test and cause it to fail if DNS is absent or broken.
//
// Suggested usage pattern:
// func foo() {
// ...
// defer UnblockNetwork(from, to)
// BlockNetwork(from, to)
// ...
// }
//
func BlockNetwork(from string, to string) {
Logf("block network traffic from %s to %s", from, to)
iptablesRule := fmt.Sprintf("OUTPUT --destination %s --jump REJECT", to)
dropCmd := fmt.Sprintf("sudo iptables --insert %s", iptablesRule)
if result, err := SSH(dropCmd, from, TestContext.Provider); result.Code != 0 || err != nil {
LogSSHResult(result)
Failf("Unexpected error: %v", err)
}
}
func UnblockNetwork(from string, to string) {
Logf("Unblock network traffic from %s to %s", from, to)
iptablesRule := fmt.Sprintf("OUTPUT --destination %s --jump REJECT", to)
undropCmd := fmt.Sprintf("sudo iptables --delete %s", iptablesRule)
// Undrop command may fail if the rule has never been created.
// In such case we just lose 30 seconds, but the cluster is healthy.
// But if the rule had been created and removing it failed, the node is broken and
// not coming back. Subsequent tests will run or fewer nodes (some of the tests
// may fail). Manual intervention is required in such case (recreating the
// cluster solves the problem too).
err := wait.Poll(time.Millisecond*100, time.Second*30, func() (bool, error) {
result, err := SSH(undropCmd, from, TestContext.Provider)
if result.Code == 0 && err == nil {
return true, nil
}
LogSSHResult(result)
if err != nil {
Logf("Unexpected error: %v", err)
}
return false, nil
})
if err != nil {
Failf("Failed to remove the iptable REJECT rule. Manual intervention is "+
"required on host %s: remove rule %s, if exists", from, iptablesRule)
}
}
func isElementOf(podUID types.UID, pods *v1.PodList) bool {
for _, pod := range pods.Items {
if pod.UID == podUID {
return true
}
}
return false
}
// timeout for proxy requests.
const proxyTimeout = 2 * time.Minute
// NodeProxyRequest performs a get on a node proxy endpoint given the nodename and rest client.
func NodeProxyRequest(c clientset.Interface, node, endpoint string) (restclient.Result, error) {
// proxy tends to hang in some cases when Node is not ready. Add an artificial timeout for this call.
// This will leak a goroutine if proxy hangs. #22165
var result restclient.Result
finished := make(chan struct{})
go func() {
result = c.CoreV1().RESTClient().Get().
Resource("nodes").
SubResource("proxy").
Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)).
Suffix(endpoint).
Do()
finished <- struct{}{}
}()
select {
case <-finished:
return result, nil
case <-time.After(proxyTimeout):
return restclient.Result{}, nil
}
}
// GetKubeletPods retrieves the list of pods on the kubelet
func GetKubeletPods(c clientset.Interface, node string) (*v1.PodList, error) {
return getKubeletPods(c, node, "pods")
}
// GetKubeletRunningPods retrieves the list of running pods on the kubelet. The pods
// includes necessary information (e.g., UID, name, namespace for
// pods/containers), but do not contain the full spec.
func GetKubeletRunningPods(c clientset.Interface, node string) (*v1.PodList, error) {
return getKubeletPods(c, node, "runningpods")
}
func getKubeletPods(c clientset.Interface, node, resource string) (*v1.PodList, error) {
result := &v1.PodList{}
client, err := NodeProxyRequest(c, node, resource)
if err != nil {
return &v1.PodList{}, err
}
if err = client.Into(result); err != nil {
return &v1.PodList{}, err
}
return result, nil
}
// LaunchWebserverPod launches a pod serving http on port 8080 to act
// as the target for networking connectivity checks. The ip address
// of the created pod will be returned if the pod is launched
// successfully.
func LaunchWebserverPod(f *Framework, podName, nodeName string) (ip string) {
containerName := fmt.Sprintf("%s-container", podName)
port := 8080
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: containerName,
Image: imageutils.GetE2EImage(imageutils.Porter),
Env: []v1.EnvVar{{Name: fmt.Sprintf("SERVE_PORT_%d", port), Value: "foo"}},
Ports: []v1.ContainerPort{{ContainerPort: int32(port)}},
},
},
NodeName: nodeName,
RestartPolicy: v1.RestartPolicyNever,
},
}
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
_, err := podClient.Create(pod)
ExpectNoError(err)
ExpectNoError(f.WaitForPodRunning(podName))
createdPod, err := podClient.Get(podName, metav1.GetOptions{})
ExpectNoError(err)
ip = net.JoinHostPort(createdPod.Status.PodIP, strconv.Itoa(port))
Logf("Target pod IP:port is %s", ip)
return
}
type PingCommand string
const (
IPv4PingCommand PingCommand = "ping"
IPv6PingCommand PingCommand = "ping6"
)
// CheckConnectivityToHost launches a pod to test connectivity to the specified
// host. An error will be returned if the host is not reachable from the pod.
//
// An empty nodeName will use the schedule to choose where the pod is executed.
func CheckConnectivityToHost(f *Framework, nodeName, podName, host string, pingCmd PingCommand, timeout int) error {
contName := fmt.Sprintf("%s-container", podName)
command := []string{
string(pingCmd),
"-c", "3", // send 3 pings
"-W", "2", // wait at most 2 seconds for a reply
"-w", strconv.Itoa(timeout),
host,
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: contName,
Image: BusyBoxImage,
Command: command,
},
},
NodeName: nodeName,
RestartPolicy: v1.RestartPolicyNever,
},
}
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
_, err := podClient.Create(pod)
if err != nil {
return err
}
err = WaitForPodSuccessInNamespace(f.ClientSet, podName, f.Namespace.Name)
if err != nil {
logs, logErr := GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, contName)
if logErr != nil {
Logf("Warning: Failed to get logs from pod %q: %v", pod.Name, logErr)
} else {
Logf("pod %s/%s logs:\n%s", f.Namespace.Name, pod.Name, logs)
}
}
return err
}
// CoreDump SSHs to the master and all nodes and dumps their logs into dir.
// It shells out to cluster/log-dump/log-dump.sh to accomplish this.
func CoreDump(dir string) {
if TestContext.DisableLogDump {
Logf("Skipping dumping logs from cluster")
return
}
var cmd *exec.Cmd
if TestContext.LogexporterGCSPath != "" {
Logf("Dumping logs from nodes to GCS directly at path: %s", TestContext.LogexporterGCSPath)
cmd = exec.Command(path.Join(TestContext.RepoRoot, "cluster", "log-dump", "log-dump.sh"), dir, TestContext.LogexporterGCSPath)
} else {
Logf("Dumping logs locally to: %s", dir)
cmd = exec.Command(path.Join(TestContext.RepoRoot, "cluster", "log-dump", "log-dump.sh"), dir)
}
cmd.Env = append(os.Environ(), fmt.Sprintf("LOG_DUMP_SYSTEMD_SERVICES=%s", parseSystemdServices(TestContext.SystemdServices)))
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
Logf("Error running cluster/log-dump/log-dump.sh: %v", err)
}
}
// parseSystemdServices converts services separator from comma to space.
func parseSystemdServices(services string) string {
return strings.TrimSpace(strings.Replace(services, ",", " ", -1))
}
func UpdatePodWithRetries(client clientset.Interface, ns, name string, update func(*v1.Pod)) (*v1.Pod, error) {
for i := 0; i < 3; i++ {
pod, err := client.CoreV1().Pods(ns).Get(name, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("Failed to get pod %q: %v", name, err)
}
update(pod)
pod, err = client.CoreV1().Pods(ns).Update(pod)
if err == nil {
return pod, nil
}
if !apierrs.IsConflict(err) && !apierrs.IsServerTimeout(err) {
return nil, fmt.Errorf("Failed to update pod %q: %v", name, err)
}
}
return nil, fmt.Errorf("Too many retries updating Pod %q", name)
}
func GetPodsInNamespace(c clientset.Interface, ns string, ignoreLabels map[string]string) ([]*v1.Pod, error) {
pods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{})
if err != nil {
return []*v1.Pod{}, err
}
ignoreSelector := labels.SelectorFromSet(ignoreLabels)
filtered := []*v1.Pod{}
for _, p := range pods.Items {
if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(p.Labels)) {
continue
}
filtered = append(filtered, &p)
}
return filtered, nil
}
// RunCmd runs cmd using args and returns its stdout and stderr. It also outputs
// cmd's stdout and stderr to their respective OS streams.
func RunCmd(command string, args ...string) (string, string, error) {
return RunCmdEnv(nil, command, args...)
}
// RunCmdEnv runs cmd with the provided environment and args and
// returns its stdout and stderr. It also outputs cmd's stdout and
// stderr to their respective OS streams.
func RunCmdEnv(env []string, command string, args ...string) (string, string, error) {
Logf("Running %s %v", command, args)
var bout, berr bytes.Buffer
cmd := exec.Command(command, args...)
// We also output to the OS stdout/stderr to aid in debugging in case cmd
// hangs and never returns before the test gets killed.
//
// This creates some ugly output because gcloud doesn't always provide
// newlines.
cmd.Stdout = io.MultiWriter(os.Stdout, &bout)
cmd.Stderr = io.MultiWriter(os.Stderr, &berr)
cmd.Env = env
err := cmd.Run()
stdout, stderr := bout.String(), berr.String()
if err != nil {
return "", "", fmt.Errorf("error running %s %v; got error %v, stdout %q, stderr %q",
command, args, err, stdout, stderr)
}
return stdout, stderr, nil
}
// retryCmd runs cmd using args and retries it for up to SingleCallTimeout if
// it returns an error. It returns stdout and stderr.
func retryCmd(command string, args ...string) (string, string, error) {
var err error
stdout, stderr := "", ""
wait.Poll(Poll, SingleCallTimeout, func() (bool, error) {
stdout, stderr, err = RunCmd(command, args...)
if err != nil {
Logf("Got %v", err)
return false, nil
}
return true, nil
})
return stdout, stderr, err
}
// GetPodsScheduled returns a number of currently scheduled and not scheduled Pods.
func GetPodsScheduled(masterNodes sets.String, pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Pod) {
for _, pod := range pods.Items {
if !masterNodes.Has(pod.Spec.NodeName) {
if pod.Spec.NodeName != "" {
_, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
Expect(scheduledCondition != nil).To(Equal(true))
Expect(scheduledCondition.Status).To(Equal(v1.ConditionTrue))
scheduledPods = append(scheduledPods, pod)
} else {
_, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
Expect(scheduledCondition != nil).To(Equal(true))
Expect(scheduledCondition.Status).To(Equal(v1.ConditionFalse))
if scheduledCondition.Reason == "Unschedulable" {
notScheduledPods = append(notScheduledPods, pod)
}
}
}
}
return
}
// WaitForStableCluster waits until all existing pods are scheduled and returns their amount.
func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int {
timeout := 10 * time.Minute
startTime := time.Now()
allPods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
ExpectNoError(err)
// API server returns also Pods that succeeded. We need to filter them out.
currentPods := make([]v1.Pod, 0, len(allPods.Items))
for _, pod := range allPods.Items {
if pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed {
currentPods = append(currentPods, pod)
}
}
allPods.Items = currentPods
scheduledPods, currentlyNotScheduledPods := GetPodsScheduled(masterNodes, allPods)
for len(currentlyNotScheduledPods) != 0 {
time.Sleep(2 * time.Second)
allPods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
ExpectNoError(err)
scheduledPods, currentlyNotScheduledPods = GetPodsScheduled(masterNodes, allPods)
if startTime.Add(timeout).Before(time.Now()) {
Failf("Timed out after %v waiting for stable cluster.", timeout)
break
}
}
return len(scheduledPods)
}
// GetMasterAndWorkerNodesOrDie will return a list masters and schedulable worker nodes
func GetMasterAndWorkerNodesOrDie(c clientset.Interface) (sets.String, *v1.NodeList) {
nodes := &v1.NodeList{}
masters := sets.NewString()
all, _ := c.CoreV1().Nodes().List(metav1.ListOptions{})
for _, n := range all.Items {
if system.IsMasterNode(n.Name) {
masters.Insert(n.Name)
} else if isNodeSchedulable(&n) && isNodeUntainted(&n) {
nodes.Items = append(nodes.Items, n)
}
}
return masters, nodes
}
func ListNamespaceEvents(c clientset.Interface, ns string) error {
ls, err := c.CoreV1().Events(ns).List(metav1.ListOptions{})
if err != nil {
return err
}
for _, event := range ls.Items {
glog.Infof("Event(%#v): type: '%v' reason: '%v' %v", event.InvolvedObject, event.Type, event.Reason, event.Message)
}
return nil
}
// E2ETestNodePreparer implements testutil.TestNodePreparer interface, which is used
// to create/modify Nodes before running a test.
type E2ETestNodePreparer struct {
client clientset.Interface
// Specifies how many nodes should be modified using the given strategy.
// Only one strategy can be applied to a single Node, so there needs to
// be at least <sum_of_keys> Nodes in the cluster.
countToStrategy []testutil.CountToStrategy
nodeToAppliedStrategy map[string]testutil.PrepareNodeStrategy
}
func NewE2ETestNodePreparer(client clientset.Interface, countToStrategy []testutil.CountToStrategy) testutil.TestNodePreparer {
return &E2ETestNodePreparer{
client: client,
countToStrategy: countToStrategy,
nodeToAppliedStrategy: make(map[string]testutil.PrepareNodeStrategy),
}
}
func (p *E2ETestNodePreparer) PrepareNodes() error {
nodes := GetReadySchedulableNodesOrDie(p.client)
numTemplates := 0
for k := range p.countToStrategy {
numTemplates += k
}
if numTemplates > len(nodes.Items) {
return fmt.Errorf("Can't prepare Nodes. Got more templates than existing Nodes.")
}
index := 0
sum := 0
for _, v := range p.countToStrategy {
sum += v.Count
for ; index < sum; index++ {
if err := testutil.DoPrepareNode(p.client, &nodes.Items[index], v.Strategy); err != nil {
glog.Errorf("Aborting node preparation: %v", err)
return err
}
p.nodeToAppliedStrategy[nodes.Items[index].Name] = v.Strategy
}
}
return nil
}
func (p *E2ETestNodePreparer) CleanupNodes() error {
var encounteredError error
nodes := GetReadySchedulableNodesOrDie(p.client)
for i := range nodes.Items {
var err error
name := nodes.Items[i].Name
strategy, found := p.nodeToAppliedStrategy[name]
if found {
if err = testutil.DoCleanupNode(p.client, name, strategy); err != nil {
glog.Errorf("Skipping cleanup of Node: failed update of %v: %v", name, err)
encounteredError = err
}
}
}
return encounteredError
}
func GetClusterID(c clientset.Interface) (string, error) {
cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(gcecloud.UIDConfigMapName, metav1.GetOptions{})
if err != nil || cm == nil {
return "", fmt.Errorf("error getting cluster ID: %v", err)
}
clusterID, clusterIDExists := cm.Data[gcecloud.UIDCluster]
providerID, providerIDExists := cm.Data[gcecloud.UIDProvider]
if !clusterIDExists {
return "", fmt.Errorf("cluster ID not set")
}
if providerIDExists {
return providerID, nil
}
return clusterID, nil
}
// CleanupGCEResources cleans up GCE Service Type=LoadBalancer resources with
// the given name. The name is usually the UUID of the Service prefixed with an
// alpha-numeric character ('a') to work around cloudprovider rules.
func CleanupGCEResources(c clientset.Interface, loadBalancerName, zone string) (retErr error) {
gceCloud, err := GetGCECloud()
if err != nil {
return err
}
region, err := gcecloud.GetGCERegion(zone)
if err != nil {
return fmt.Errorf("error parsing GCE/GKE region from zone %q: %v", zone, err)
}
if err := gceCloud.DeleteFirewall(gcecloud.MakeFirewallName(loadBalancerName)); err != nil &&
!IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) {
retErr = err
}
if err := gceCloud.DeleteRegionForwardingRule(loadBalancerName, region); err != nil &&
!IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) {
retErr = fmt.Errorf("%v\n%v", retErr, err)
}
if err := gceCloud.DeleteRegionAddress(loadBalancerName, region); err != nil &&
!IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) {
retErr = fmt.Errorf("%v\n%v", retErr, err)
}
clusterID, err := GetClusterID(c)
if err != nil {
retErr = fmt.Errorf("%v\n%v", retErr, err)
return
}
hcNames := []string{gcecloud.MakeNodesHealthCheckName(clusterID)}
hc, getErr := gceCloud.GetHttpHealthCheck(loadBalancerName)
if getErr != nil && !IsGoogleAPIHTTPErrorCode(getErr, http.StatusNotFound) {
retErr = fmt.Errorf("%v\n%v", retErr, getErr)
return
}
if hc != nil {
hcNames = append(hcNames, hc.Name)
}
if err := gceCloud.DeleteExternalTargetPoolAndChecks(&v1.Service{}, loadBalancerName, region, clusterID, hcNames...); err != nil &&
!IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) {
retErr = fmt.Errorf("%v\n%v", retErr, err)
}
return
}
// IsHTTPErrorCode returns true if the error is a google api
// error matching the corresponding HTTP error code.
func IsGoogleAPIHTTPErrorCode(err error, code int) bool {
apiErr, ok := err.(*googleapi.Error)
return ok && apiErr.Code == code
}
// getMaster populates the externalIP, internalIP and hostname fields of the master.
// If any of these is unavailable, it is set to "".
func getMaster(c clientset.Interface) Address {
master := Address{}
// Populate the internal IP.
eps, err := c.CoreV1().Endpoints(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
if err != nil {
Failf("Failed to get kubernetes endpoints: %v", err)
}
if len(eps.Subsets) != 1 || len(eps.Subsets[0].Addresses) != 1 {
Failf("There are more than 1 endpoints for kubernetes service: %+v", eps)
}
master.internalIP = eps.Subsets[0].Addresses[0].IP
// Populate the external IP/hostname.
url, err := url.Parse(TestContext.Host)
if err != nil {
Failf("Failed to parse hostname: %v", err)
}
if net.ParseIP(url.Host) != nil {
// TODO: Check that it is external IP (not having a reserved IP address as per RFC1918).
master.externalIP = url.Host
} else {
master.hostname = url.Host
}
return master
}
// GetMasterAddress returns the hostname/external IP/internal IP as appropriate for e2e tests on a particular provider
// which is the address of the interface used for communication with the kubelet.
func GetMasterAddress(c clientset.Interface) string {
master := getMaster(c)
switch TestContext.Provider {
case "gce", "gke":
return master.externalIP
case "aws":
return awsMasterIP
default:
Failf("This test is not supported for provider %s and should be disabled", TestContext.Provider)
}
return ""
}
// GetNodeExternalIP returns node external IP concatenated with port 22 for ssh
// e.g. 1.2.3.4:22
func GetNodeExternalIP(node *v1.Node) string {
Logf("Getting external IP address for %s", node.Name)
host := ""
for _, a := range node.Status.Addresses {
if a.Type == v1.NodeExternalIP {
host = net.JoinHostPort(a.Address, sshPort)
break
}
}
if host == "" {
Failf("Couldn't get the external IP of host %s with addresses %v", node.Name, node.Status.Addresses)
}
return host
}
// SimpleGET executes a get on the given url, returns error if non-200 returned.
func SimpleGET(c *http.Client, url, host string) (string, error) {
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return "", err
}
req.Host = host
res, err := c.Do(req)
if err != nil {
return "", err
}
defer res.Body.Close()
rawBody, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", err
}
body := string(rawBody)
if res.StatusCode != http.StatusOK {
err = fmt.Errorf(
"GET returned http error %v", res.StatusCode)
}
return body, err
}
// PollURL polls till the url responds with a healthy http code. If
// expectUnreachable is true, it breaks on first non-healthy http code instead.
func PollURL(route, host string, timeout time.Duration, interval time.Duration, httpClient *http.Client, expectUnreachable bool) error {
var lastBody string
pollErr := wait.PollImmediate(interval, timeout, func() (bool, error) {
var err error
lastBody, err = SimpleGET(httpClient, route, host)
if err != nil {
Logf("host %v path %v: %v unreachable", host, route, err)
return expectUnreachable, nil
}
Logf("host %v path %v: reached", host, route)
return !expectUnreachable, nil
})
if pollErr != nil {
return fmt.Errorf("Failed to execute a successful GET within %v, Last response body for %v, host %v:\n%v\n\n%v\n",
timeout, route, host, lastBody, pollErr)
}
return nil
}
func DescribeIng(ns string) {
Logf("\nOutput of kubectl describe ing:\n")
desc, _ := RunKubectl(
"describe", "ing", fmt.Sprintf("--namespace=%v", ns))
Logf(desc)
}
// NewTestPod returns a pod that has the specified requests and limits
func (f *Framework) NewTestPod(name string, requests v1.ResourceList, limits v1.ResourceList) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "pause",
Image: GetPauseImageName(f.ClientSet),
Resources: v1.ResourceRequirements{
Requests: requests,
Limits: limits,
},
},
},
},
}
}
// create empty file at given path on the pod.
func CreateEmptyFileOnPod(namespace string, podName string, filePath string) error {
_, err := RunKubectl("exec", fmt.Sprintf("--namespace=%s", namespace), podName, "--", "/bin/sh", "-c", fmt.Sprintf("touch %s", filePath))
return err
}
// GetAzureCloud returns azure cloud provider
func GetAzureCloud() (*azure.Cloud, error) {
cloud, ok := TestContext.CloudConfig.Provider.(*azure.Cloud)
if !ok {
return nil, fmt.Errorf("failed to convert CloudConfig.Provider to Azure: %#v", TestContext.CloudConfig.Provider)
}
return cloud, nil
}
func PrintSummaries(summaries []TestDataSummary, testBaseName string) {
now := time.Now()
for i := range summaries {
Logf("Printing summary: %v", summaries[i].SummaryKind())
switch TestContext.OutputPrintType {
case "hr":
if TestContext.ReportDir == "" {
Logf(summaries[i].PrintHumanReadable())
} else {
// TODO: learn to extract test name and append it to the kind instead of timestamp.
filePath := path.Join(TestContext.ReportDir, summaries[i].SummaryKind()+"_"+testBaseName+"_"+now.Format(time.RFC3339)+".txt")
if err := ioutil.WriteFile(filePath, []byte(summaries[i].PrintHumanReadable()), 0644); err != nil {
Logf("Failed to write file %v with test performance data: %v", filePath, err)
}
}
case "json":
fallthrough
default:
if TestContext.OutputPrintType != "json" {
Logf("Unknown output type: %v. Printing JSON", TestContext.OutputPrintType)
}
if TestContext.ReportDir == "" {
Logf("%v JSON\n%v", summaries[i].SummaryKind(), summaries[i].PrintJSON())
Logf("Finished")
} else {
// TODO: learn to extract test name and append it to the kind instead of timestamp.
filePath := path.Join(TestContext.ReportDir, summaries[i].SummaryKind()+"_"+testBaseName+"_"+now.Format(time.RFC3339)+".json")
Logf("Writing to %s", filePath)
if err := ioutil.WriteFile(filePath, []byte(summaries[i].PrintJSON()), 0644); err != nil {
Logf("Failed to write file %v with test performance data: %v", filePath, err)
}
}
}
}
}
func DumpDebugInfo(c clientset.Interface, ns string) {
sl, _ := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
for _, s := range sl.Items {
desc, _ := RunKubectl("describe", "po", s.Name, fmt.Sprintf("--namespace=%v", ns))
Logf("\nOutput of kubectl describe %v:\n%v", s.Name, desc)
l, _ := RunKubectl("logs", s.Name, fmt.Sprintf("--namespace=%v", ns), "--tail=100")
Logf("\nLast 100 log lines of %v:\n%v", s.Name, l)
}
}
func IsRetryableAPIError(err error) bool {
return apierrs.IsTimeout(err) || apierrs.IsServerTimeout(err) || apierrs.IsTooManyRequests(err)
}
// DsFromManifest reads a .json/yaml file and returns the daemonset in it.
func DsFromManifest(url string) (*extensions.DaemonSet, error) {
var controller extensions.DaemonSet
Logf("Parsing ds from %v", url)
var response *http.Response
var err error
for i := 1; i <= 5; i++ {
response, err = http.Get(url)
if err == nil && response.StatusCode == 200 {
break
}
time.Sleep(time.Duration(i) * time.Second)
}
if err != nil {
return nil, fmt.Errorf("failed to get url: %v", err)
}
if response.StatusCode != 200 {
return nil, fmt.Errorf("invalid http response status: %v", response.StatusCode)
}
defer response.Body.Close()
data, err := ioutil.ReadAll(response.Body)
if err != nil {
return nil, fmt.Errorf("failed to read html response body: %v", err)
}
json, err := utilyaml.ToJSON(data)
if err != nil {
return nil, fmt.Errorf("failed to parse data to json: %v", err)
}
err = runtime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), json, &controller)
if err != nil {
return nil, fmt.Errorf("failed to decode DaemonSet spec: %v", err)
}
return &controller, nil
}
// waitForServerPreferredNamespacedResources waits until server preferred namespaced resources could be successfully discovered.
// TODO: Fix https://github.com/kubernetes/kubernetes/issues/55768 and remove the following retry.
func waitForServerPreferredNamespacedResources(d discovery.DiscoveryInterface, timeout time.Duration) ([]*metav1.APIResourceList, error) {
Logf("Waiting up to %v for server preferred namespaced resources to be successfully discovered", timeout)
var resources []*metav1.APIResourceList
if err := wait.PollImmediate(Poll, timeout, func() (bool, error) {
var err error
resources, err = d.ServerPreferredNamespacedResources()
if err == nil || isDynamicDiscoveryError(err) {
return true, nil
}
if !discovery.IsGroupDiscoveryFailedError(err) {
return false, err
}
Logf("Error discoverying server preferred namespaced resources: %v, retrying in %v.", err, Poll)
return false, nil
}); err != nil {
return nil, err
}
return resources, nil
}
// WaitForPersistentVolumeClaimDeleted waits for a PersistentVolumeClaim to be removed from the system until timeout occurs, whichever comes first.
func WaitForPersistentVolumeClaimDeleted(c clientset.Interface, ns string, pvcName string, Poll, timeout time.Duration) error {
Logf("Waiting up to %v for PersistentVolumeClaim %s to be removed", timeout, pvcName)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
_, err := c.CoreV1().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{})
if err != nil {
if apierrs.IsNotFound(err) {
Logf("Claim %q in namespace %q doesn't exist in the system", pvcName, ns)
return nil
}
Logf("Failed to get claim %q in namespace %q, retrying in %v. Error: %v", pvcName, ns, Poll, err)
}
}
return fmt.Errorf("PersistentVolumeClaim %s is not removed from the system within %v", pvcName, timeout)
}
| [
"\"KUBE_SSH_USER\"",
"\"USER\"",
"\"HOME\"",
"\"AWS_SSH_KEY\"",
"\"LOCAL_SSH_KEY\"",
"\"KUBE_SSH_KEY\""
]
| []
| [
"KUBE_SSH_KEY",
"LOCAL_SSH_KEY",
"AWS_SSH_KEY",
"KUBE_SSH_USER",
"USER",
"HOME"
]
| [] | ["KUBE_SSH_KEY", "LOCAL_SSH_KEY", "AWS_SSH_KEY", "KUBE_SSH_USER", "USER", "HOME"] | go | 6 | 0 | |
f1tenth_gym_ros/scripts/gym_bridge_bare.py | #!/usr/bin/env python3
import rospy
import os
from sensor_msgs.msg import LaserScan
from ackermann_msgs.msg import AckermannDriveStamped
from f1tenth_gym_ros.msg import RaceInfo
current_dir = os.path.abspath(os.path.dirname(__file__))
package_dir = os.path.abspath(os.path.join(current_dir, ".."))
import numpy as np
import gym
import time
class Agent(object):
def __init__(self, id, drive_callback):
self.id = id
self.scan_topic = '/%s/scan' % self.id
self.drive_topic = '/%s/drive' % self.id
self.collision = False
self.requested_steer = 0.0
self.requested_speed = 0.0
self.drive_published = False
self.scan = False
self.scan_pub = rospy.Publisher(self.scan_topic, LaserScan, queue_size=1)
self.drive_sub = rospy.Subscriber(self.drive_topic, AckermannDriveStamped, drive_callback, queue_size=1)
class GymBridge(object):
def __init__(self):
# get env vars
self.race_scenario = int(os.environ.get('RACE_SCENARIO', 0))
self.agents = []
# this is filled when match is finished
self.info = {}
self.agents.append(Agent(os.environ.get("EGO_ID"), self.drive_callback))
if self.race_scenario > 0:
self.agents.append(Agent(os.environ.get('OPP_ID'), self.opp_drive_callback))
# Topic Names
self.race_info_topic = rospy.get_param('race_info_topic')
# Map
self.map_path = os.environ.get('RACE_MAP_PATH')
self.map_img_ext = os.environ.get('RACE_MAP_IMG_EXT')
# Scan simulation params
scan_fov = rospy.get_param('scan_fov')
scan_beams = rospy.get_param('scan_beams')
self.angle_min = -scan_fov / 2.
self.angle_max = scan_fov / 2.
self.angle_inc = scan_fov / scan_beams
# publishers
self.info_pub = rospy.Publisher(self.race_info_topic, RaceInfo, queue_size=1)
# Launch
driver_count = 2 if self.race_scenario > 0 else 1
self.env = gym.make('f110_gym:f110-v0',
map=self.map_path[:-5],
map_ext=self.map_img_ext, num_agents=driver_count)
# init gym backend
poses = [[0. + (i * 0.75), 0. - (i*1.5), np.radians(60)] for i in range(driver_count)]
self.obs, _, self.done, _ = self.env.reset(poses=np.array(poses))
if os.environ.get("DISPLAY"):
self.env.render()
self.update_sim_state()
# Timer
self.timer = rospy.Timer(rospy.Duration(0.004), self.timer_callback)
self.drive_timer = rospy.Timer(rospy.Duration(0.02), self.drive_timer_callback)
def spin(self):
print("Starting F1Tenth Bridge")
# run until challenge completes or ros shuts down
while not self.done and not rospy.core.is_shutdown():
rospy.rostime.wallsleep(0.04)
if os.environ.get("DISPLAY"):
self.env.render()
print("Shutting down F1Tenth Bridge")
def update_sim_state(self):
for i, scan in enumerate(self.obs['scans']):
self.agents[i].scan = scan
for i, collision in enumerate(self.obs['collisions']):
self.agents[i].collision = bool(collision)
def drive_callback(self, drive_msg):
self.agents[0].requested_speed = drive_msg.drive.speed
self.agents[0].requested_steer = drive_msg.drive.steering_angle
self.agents[0].drive_published = True
def opp_drive_callback(self, drive_msg):
self.agents[1].requested_speed = drive_msg.drive.speed
self.agents[1].requested_steer = drive_msg.drive.steering_angle
self.agents[1].drive_published = True
def drive_timer_callback(self, timer):
published = all([a.drive_published for a in self.agents])
# until all agents started publishing, we wait
if not published:
return
# update simulation
actions = [[a.requested_steer, a.requested_speed] for a in self.agents]
self.obs, _, self.done, _ = self.env.step(np.array(actions))
# update scan data
self.update_sim_state()
# if match is completed, we set the bridge.info property
if self.done:
info = {
"ego_collision": self.agents[0].collision,
"ego_elapsed_time": float(self.obs['lap_times'][0]),
"ego_lap_count": float(self.obs['lap_counts'][0]),
}
if len(self.agents) > 1:
info["opp_collision"] = self.agents[1].collision
info["opp_elapsed_time"] = float(self.obs['lap_times'][1])
info["opp_lap_count"] = float(self.obs['lap_counts'][1])
self.info = info
def timer_callback(self, timer):
# once match is completed, stop publishing information
if self.done:
return
ts = rospy.Time.now()
def generate_scan_message(name, ranges):
scan = LaserScan()
scan.header.stamp = ts
scan.header.frame_id = '%s/laser' % name
scan.angle_min = self.angle_min
scan.angle_max = self.angle_max
scan.angle_increment = self.angle_inc
scan.range_min = 0.
scan.range_max = 30.
scan.ranges = ranges
return scan
names = ["ego_racecar", "opp_racecar"]
for i, agent in enumerate(self.agents):
scan = generate_scan_message(names[i], agent.scan)
try:
agent.scan_pub.publish(scan)
except Exception as e:
pass
# pub race info
self.publish_race_info(ts)
def publish_race_info(self, ts):
info = RaceInfo()
info.header.stamp = ts
info.ego_collision = self.agents[0].collision
info.ego_elapsed_time = self.obs['lap_times'][0]
info.ego_lap_count = self.obs['lap_counts'][0]
info.opp_collision = self.agents[1].collision if len(self.agents) > 1 else 0
info.opp_elapsed_time = self.obs['lap_times'][1] if len(self.agents) > 1 else 0.0
info.opp_lap_count = self.obs['lap_counts'][1] if len(self.agents) > 1 else 0
self.info_pub.publish(info)
if __name__ == '__main__':
rospy.init_node('gym_bridge')
gym_bridge = GymBridge()
gym_bridge.spin()
# once we're here, we know that competition is completed, so we publish info to API
print (gym_bridge.info)
time.sleep(1)
| []
| []
| [
"RACE_MAP_PATH",
"RACE_MAP_IMG_EXT",
"RACE_SCENARIO",
"EGO_ID",
"OPP_ID",
"DISPLAY"
]
| [] | ["RACE_MAP_PATH", "RACE_MAP_IMG_EXT", "RACE_SCENARIO", "EGO_ID", "OPP_ID", "DISPLAY"] | python | 6 | 0 | |
vk.py | import requests
import json
import reddit
import vk_requests
import time
import os
GROUP_ID = "173009640"
APP_ID = "6730383"
LOGIN = os.environ.get("VK_MEMES_LOGIN", '')
PASSWORD = os.environ.get("VK_MEMES_PASSWORD", '')
def main():
print("start")
api = vk_requests.create_api(app_id=APP_ID, login=LOGIN, password=PASSWORD, scope='wall,photos')
posts = reddit.get_photos_from_reddit('memes', 24)
print("downloaded {count} posts".format(count=len(posts)))
current_time = int(time.time())
publish_time = current_time + 60*42
for post in posts:
photo = '/tmp/'+post['photo']
upload_url = api.photos.getWallUploadServer(group_id=GROUP_ID)['upload_url']
request = requests.post(upload_url, files={'photo': open(photo, "rb")})
params = {
'server': request.json()['server'],
'photo': request.json()['photo'],
'hash': request.json()['hash'],
'group_id': GROUP_ID
}
save_r = api.photos.saveWallPhoto(**params)
photo_id = save_r[0]['id']
owner_id = save_r[0]['owner_id']
params = {'attachments': 'photo{owner_id}_{photo_id}'.format(owner_id=owner_id, photo_id=photo_id),
'message': post['title'],
'owner_id': '-' + GROUP_ID,
'from_group': '1',
'publish_date': publish_time,
}
api.wall.post(**params)
print("published {title}".format(title=post['title']))
publish_time = publish_time + 60 * 60
time.sleep(1)
print("success")
if __name__ == '__main__':
main()
| []
| []
| [
"VK_MEMES_LOGIN",
"VK_MEMES_PASSWORD"
]
| [] | ["VK_MEMES_LOGIN", "VK_MEMES_PASSWORD"] | python | 2 | 0 | |
venv/lib/python3.9/site-packages/IPython/core/interactiveshell.py | # -*- coding: utf-8 -*-
"""Main IPython class."""
#-----------------------------------------------------------------------------
# Copyright (C) 2001 Janko Hauser <[email protected]>
# Copyright (C) 2001-2007 Fernando Perez. <[email protected]>
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
import abc
import ast
import atexit
import builtins as builtin_mod
import dis
import functools
import inspect
import os
import re
import runpy
import subprocess
import sys
import tempfile
import traceback
import types
import warnings
from ast import stmt
from io import open as io_open
from logging import error
from pathlib import Path
from typing import Callable
from typing import List as ListType
from typing import Optional, Tuple
from warnings import warn
from pickleshare import PickleShareDB
from tempfile import TemporaryDirectory
from traitlets import (
Any,
Bool,
CaselessStrEnum,
Dict,
Enum,
Instance,
Integer,
List,
Type,
Unicode,
default,
observe,
validate,
)
from traitlets.config.configurable import SingletonConfigurable
from traitlets.utils.importstring import import_item
import IPython.core.hooks
from IPython.core import magic, oinspect, page, prefilter, ultratb
from IPython.core.alias import Alias, AliasManager
from IPython.core.autocall import ExitAutocall
from IPython.core.builtin_trap import BuiltinTrap
from IPython.core.compilerop import CachingCompiler, check_linecache_ipython
from IPython.core.debugger import InterruptiblePdb
from IPython.core.display_trap import DisplayTrap
from IPython.core.displayhook import DisplayHook
from IPython.core.displaypub import DisplayPublisher
from IPython.core.error import InputRejected, UsageError
from IPython.core.events import EventManager, available_events
from IPython.core.extensions import ExtensionManager
from IPython.core.formatters import DisplayFormatter
from IPython.core.history import HistoryManager
from IPython.core.inputtransformer2 import ESC_MAGIC, ESC_MAGIC2
from IPython.core.logger import Logger
from IPython.core.macro import Macro
from IPython.core.payload import PayloadManager
from IPython.core.prefilter import PrefilterManager
from IPython.core.profiledir import ProfileDir
from IPython.core.usage import default_banner
from IPython.display import display
from IPython.paths import get_ipython_dir
from IPython.testing.skipdoctest import skip_doctest
from IPython.utils import PyColorize, io, openpy, py3compat
from IPython.utils.decorators import undoc
from IPython.utils.io import ask_yes_no
from IPython.utils.ipstruct import Struct
from IPython.utils.path import ensure_dir_exists, get_home_dir, get_py_filename
from IPython.utils.process import getoutput, system
from IPython.utils.strdispatch import StrDispatch
from IPython.utils.syspathcontext import prepended_to_syspath
from IPython.utils.text import DollarFormatter, LSString, SList, format_screen
sphinxify: Optional[Callable]
try:
import docrepr.sphinxify as sphx
def sphinxify(oinfo):
wrapped_docstring = sphx.wrap_main_docstring(oinfo)
def sphinxify_docstring(docstring):
with TemporaryDirectory() as dirname:
return {
"text/html": sphx.sphinxify(wrapped_docstring, dirname),
"text/plain": docstring,
}
return sphinxify_docstring
except ImportError:
sphinxify = None
class ProvisionalWarning(DeprecationWarning):
"""
Warning class for unstable features
"""
pass
from ast import Module
_assign_nodes = (ast.AugAssign, ast.AnnAssign, ast.Assign)
_single_targets_nodes = (ast.AugAssign, ast.AnnAssign)
#-----------------------------------------------------------------------------
# Await Helpers
#-----------------------------------------------------------------------------
# we still need to run things using the asyncio eventloop, but there is no
# async integration
from .async_helpers import (
_asyncio_runner,
_curio_runner,
_pseudo_sync_runner,
_should_be_async,
_trio_runner,
)
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# compiled regexps for autoindent management
dedent_re = re.compile(r'^\s+raise|^\s+return|^\s+pass')
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
@undoc
def softspace(file, newvalue):
"""Copied from code.py, to remove the dependency"""
oldvalue = 0
try:
oldvalue = file.softspace
except AttributeError:
pass
try:
file.softspace = newvalue
except (AttributeError, TypeError):
# "attribute-less object" or "read-only attributes"
pass
return oldvalue
@undoc
def no_op(*a, **kw):
pass
class SpaceInInput(Exception): pass
class SeparateUnicode(Unicode):
r"""A Unicode subclass to validate separate_in, separate_out, etc.
This is a Unicode based trait that converts '0'->'' and ``'\\n'->'\n'``.
"""
def validate(self, obj, value):
if value == '0': value = ''
value = value.replace('\\n','\n')
return super(SeparateUnicode, self).validate(obj, value)
@undoc
class DummyMod(object):
"""A dummy module used for IPython's interactive module when
a namespace must be assigned to the module's __dict__."""
__spec__ = None
class ExecutionInfo(object):
"""The arguments used for a call to :meth:`InteractiveShell.run_cell`
Stores information about what is going to happen.
"""
raw_cell = None
store_history = False
silent = False
shell_futures = True
def __init__(self, raw_cell, store_history, silent, shell_futures):
self.raw_cell = raw_cell
self.store_history = store_history
self.silent = silent
self.shell_futures = shell_futures
def __repr__(self):
name = self.__class__.__qualname__
raw_cell = ((self.raw_cell[:50] + '..')
if len(self.raw_cell) > 50 else self.raw_cell)
return '<%s object at %x, raw_cell="%s" store_history=%s silent=%s shell_futures=%s>' %\
(name, id(self), raw_cell, self.store_history, self.silent, self.shell_futures)
class ExecutionResult(object):
"""The result of a call to :meth:`InteractiveShell.run_cell`
Stores information about what took place.
"""
execution_count = None
error_before_exec = None
error_in_exec: Optional[BaseException] = None
info = None
result = None
def __init__(self, info):
self.info = info
@property
def success(self):
return (self.error_before_exec is None) and (self.error_in_exec is None)
def raise_error(self):
"""Reraises error if `success` is `False`, otherwise does nothing"""
if self.error_before_exec is not None:
raise self.error_before_exec
if self.error_in_exec is not None:
raise self.error_in_exec
def __repr__(self):
name = self.__class__.__qualname__
return '<%s object at %x, execution_count=%s error_before_exec=%s error_in_exec=%s info=%s result=%s>' %\
(name, id(self), self.execution_count, self.error_before_exec, self.error_in_exec, repr(self.info), repr(self.result))
class InteractiveShell(SingletonConfigurable):
"""An enhanced, interactive shell for Python."""
_instance = None
ast_transformers = List([], help=
"""
A list of ast.NodeTransformer subclass instances, which will be applied
to user input before code is run.
"""
).tag(config=True)
autocall = Enum((0,1,2), default_value=0, help=
"""
Make IPython automatically call any callable object even if you didn't
type explicit parentheses. For example, 'str 43' becomes 'str(43)'
automatically. The value can be '0' to disable the feature, '1' for
'smart' autocall, where it is not applied if there are no more
arguments on the line, and '2' for 'full' autocall, where all callable
objects are automatically called (even if no arguments are present).
"""
).tag(config=True)
autoindent = Bool(True, help=
"""
Autoindent IPython code entered interactively.
"""
).tag(config=True)
autoawait = Bool(True, help=
"""
Automatically run await statement in the top level repl.
"""
).tag(config=True)
loop_runner_map ={
'asyncio':(_asyncio_runner, True),
'curio':(_curio_runner, True),
'trio':(_trio_runner, True),
'sync': (_pseudo_sync_runner, False)
}
loop_runner = Any(default_value="IPython.core.interactiveshell._asyncio_runner",
allow_none=True,
help="""Select the loop runner that will be used to execute top-level asynchronous code"""
).tag(config=True)
@default('loop_runner')
def _default_loop_runner(self):
return import_item("IPython.core.interactiveshell._asyncio_runner")
@validate('loop_runner')
def _import_runner(self, proposal):
if isinstance(proposal.value, str):
if proposal.value in self.loop_runner_map:
runner, autoawait = self.loop_runner_map[proposal.value]
self.autoawait = autoawait
return runner
runner = import_item(proposal.value)
if not callable(runner):
raise ValueError('loop_runner must be callable')
return runner
if not callable(proposal.value):
raise ValueError('loop_runner must be callable')
return proposal.value
automagic = Bool(True, help=
"""
Enable magic commands to be called without the leading %.
"""
).tag(config=True)
banner1 = Unicode(default_banner,
help="""The part of the banner to be printed before the profile"""
).tag(config=True)
banner2 = Unicode('',
help="""The part of the banner to be printed after the profile"""
).tag(config=True)
cache_size = Integer(1000, help=
"""
Set the size of the output cache. The default is 1000, you can
change it permanently in your config file. Setting it to 0 completely
disables the caching system, and the minimum value accepted is 3 (if
you provide a value less than 3, it is reset to 0 and a warning is
issued). This limit is defined because otherwise you'll spend more
time re-flushing a too small cache than working
"""
).tag(config=True)
color_info = Bool(True, help=
"""
Use colors for displaying information about objects. Because this
information is passed through a pager (like 'less'), and some pagers
get confused with color codes, this capability can be turned off.
"""
).tag(config=True)
colors = CaselessStrEnum(('Neutral', 'NoColor','LightBG','Linux'),
default_value='Neutral',
help="Set the color scheme (NoColor, Neutral, Linux, or LightBG)."
).tag(config=True)
debug = Bool(False).tag(config=True)
disable_failing_post_execute = Bool(False,
help="Don't call post-execute functions that have failed in the past."
).tag(config=True)
display_formatter = Instance(DisplayFormatter, allow_none=True)
displayhook_class = Type(DisplayHook)
display_pub_class = Type(DisplayPublisher)
compiler_class = Type(CachingCompiler)
sphinxify_docstring = Bool(False, help=
"""
Enables rich html representation of docstrings. (This requires the
docrepr module).
""").tag(config=True)
@observe("sphinxify_docstring")
def _sphinxify_docstring_changed(self, change):
if change['new']:
warn("`sphinxify_docstring` is provisional since IPython 5.0 and might change in future versions." , ProvisionalWarning)
enable_html_pager = Bool(False, help=
"""
(Provisional API) enables html representation in mime bundles sent
to pagers.
""").tag(config=True)
@observe("enable_html_pager")
def _enable_html_pager_changed(self, change):
if change['new']:
warn("`enable_html_pager` is provisional since IPython 5.0 and might change in future versions.", ProvisionalWarning)
data_pub_class = None
exit_now = Bool(False)
exiter = Instance(ExitAutocall)
@default('exiter')
def _exiter_default(self):
return ExitAutocall(self)
# Monotonically increasing execution counter
execution_count = Integer(1)
filename = Unicode("<ipython console>")
ipython_dir= Unicode('').tag(config=True) # Set to get_ipython_dir() in __init__
# Used to transform cells before running them, and check whether code is complete
input_transformer_manager = Instance('IPython.core.inputtransformer2.TransformerManager',
())
@property
def input_transformers_cleanup(self):
return self.input_transformer_manager.cleanup_transforms
input_transformers_post = List([],
help="A list of string input transformers, to be applied after IPython's "
"own input transformations."
)
@property
def input_splitter(self):
"""Make this available for backward compatibility (pre-7.0 release) with existing code.
For example, ipykernel ipykernel currently uses
`shell.input_splitter.check_complete`
"""
from warnings import warn
warn("`input_splitter` is deprecated since IPython 7.0, prefer `input_transformer_manager`.",
DeprecationWarning, stacklevel=2
)
return self.input_transformer_manager
logstart = Bool(False, help=
"""
Start logging to the default log file in overwrite mode.
Use `logappend` to specify a log file to **append** logs to.
"""
).tag(config=True)
logfile = Unicode('', help=
"""
The name of the logfile to use.
"""
).tag(config=True)
logappend = Unicode('', help=
"""
Start logging to the given file in append mode.
Use `logfile` to specify a log file to **overwrite** logs to.
"""
).tag(config=True)
object_info_string_level = Enum((0,1,2), default_value=0,
).tag(config=True)
pdb = Bool(False, help=
"""
Automatically call the pdb debugger after every exception.
"""
).tag(config=True)
display_page = Bool(False,
help="""If True, anything that would be passed to the pager
will be displayed as regular output instead."""
).tag(config=True)
show_rewritten_input = Bool(True,
help="Show rewritten input, e.g. for autocall."
).tag(config=True)
quiet = Bool(False).tag(config=True)
history_length = Integer(10000,
help='Total length of command history'
).tag(config=True)
history_load_length = Integer(1000, help=
"""
The number of saved history entries to be loaded
into the history buffer at startup.
"""
).tag(config=True)
ast_node_interactivity = Enum(['all', 'last', 'last_expr', 'none', 'last_expr_or_assign'],
default_value='last_expr',
help="""
'all', 'last', 'last_expr' or 'none', 'last_expr_or_assign' specifying
which nodes should be run interactively (displaying output from expressions).
"""
).tag(config=True)
# TODO: this part of prompt management should be moved to the frontends.
# Use custom TraitTypes that convert '0'->'' and '\\n'->'\n'
separate_in = SeparateUnicode('\n').tag(config=True)
separate_out = SeparateUnicode('').tag(config=True)
separate_out2 = SeparateUnicode('').tag(config=True)
wildcards_case_sensitive = Bool(True).tag(config=True)
xmode = CaselessStrEnum(('Context', 'Plain', 'Verbose', 'Minimal'),
default_value='Context',
help="Switch modes for the IPython exception handlers."
).tag(config=True)
# Subcomponents of InteractiveShell
alias_manager = Instance('IPython.core.alias.AliasManager', allow_none=True)
prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager', allow_none=True)
builtin_trap = Instance('IPython.core.builtin_trap.BuiltinTrap', allow_none=True)
display_trap = Instance('IPython.core.display_trap.DisplayTrap', allow_none=True)
extension_manager = Instance('IPython.core.extensions.ExtensionManager', allow_none=True)
payload_manager = Instance('IPython.core.payload.PayloadManager', allow_none=True)
history_manager = Instance('IPython.core.history.HistoryAccessorBase', allow_none=True)
magics_manager = Instance('IPython.core.magic.MagicsManager', allow_none=True)
profile_dir = Instance('IPython.core.application.ProfileDir', allow_none=True)
@property
def profile(self):
if self.profile_dir is not None:
name = os.path.basename(self.profile_dir.location)
return name.replace('profile_','')
# Private interface
_post_execute = Dict()
# Tracks any GUI loop loaded for pylab
pylab_gui_select = None
last_execution_succeeded = Bool(True, help='Did last executed command succeeded')
last_execution_result = Instance('IPython.core.interactiveshell.ExecutionResult', help='Result of executing the last command', allow_none=True)
def __init__(self, ipython_dir=None, profile_dir=None,
user_module=None, user_ns=None,
custom_exceptions=((), None), **kwargs):
# This is where traits with a config_key argument are updated
# from the values on config.
super(InteractiveShell, self).__init__(**kwargs)
if 'PromptManager' in self.config:
warn('As of IPython 5.0 `PromptManager` config will have no effect'
' and has been replaced by TerminalInteractiveShell.prompts_class')
self.configurables = [self]
# These are relatively independent and stateless
self.init_ipython_dir(ipython_dir)
self.init_profile_dir(profile_dir)
self.init_instance_attrs()
self.init_environment()
# Check if we're in a virtualenv, and set up sys.path.
self.init_virtualenv()
# Create namespaces (user_ns, user_global_ns, etc.)
self.init_create_namespaces(user_module, user_ns)
# This has to be done after init_create_namespaces because it uses
# something in self.user_ns, but before init_sys_modules, which
# is the first thing to modify sys.
# TODO: When we override sys.stdout and sys.stderr before this class
# is created, we are saving the overridden ones here. Not sure if this
# is what we want to do.
self.save_sys_module_state()
self.init_sys_modules()
# While we're trying to have each part of the code directly access what
# it needs without keeping redundant references to objects, we have too
# much legacy code that expects ip.db to exist.
self.db = PickleShareDB(os.path.join(self.profile_dir.location, 'db'))
self.init_history()
self.init_encoding()
self.init_prefilter()
self.init_syntax_highlighting()
self.init_hooks()
self.init_events()
self.init_pushd_popd_magic()
self.init_user_ns()
self.init_logger()
self.init_builtins()
# The following was in post_config_initialization
self.init_inspector()
self.raw_input_original = input
self.init_completer()
# TODO: init_io() needs to happen before init_traceback handlers
# because the traceback handlers hardcode the stdout/stderr streams.
# This logic in in debugger.Pdb and should eventually be changed.
self.init_io()
self.init_traceback_handlers(custom_exceptions)
self.init_prompts()
self.init_display_formatter()
self.init_display_pub()
self.init_data_pub()
self.init_displayhook()
self.init_magics()
self.init_alias()
self.init_logstart()
self.init_pdb()
self.init_extension_manager()
self.init_payload()
self.events.trigger('shell_initialized', self)
atexit.register(self.atexit_operations)
# The trio runner is used for running Trio in the foreground thread. It
# is different from `_trio_runner(async_fn)` in `async_helpers.py`
# which calls `trio.run()` for every cell. This runner runs all cells
# inside a single Trio event loop. If used, it is set from
# `ipykernel.kernelapp`.
self.trio_runner = None
def get_ipython(self):
"""Return the currently running IPython instance."""
return self
#-------------------------------------------------------------------------
# Trait changed handlers
#-------------------------------------------------------------------------
@observe('ipython_dir')
def _ipython_dir_changed(self, change):
ensure_dir_exists(change['new'])
def set_autoindent(self,value=None):
"""Set the autoindent flag.
If called with no arguments, it acts as a toggle."""
if value is None:
self.autoindent = not self.autoindent
else:
self.autoindent = value
def set_trio_runner(self, tr):
self.trio_runner = tr
#-------------------------------------------------------------------------
# init_* methods called by __init__
#-------------------------------------------------------------------------
def init_ipython_dir(self, ipython_dir):
if ipython_dir is not None:
self.ipython_dir = ipython_dir
return
self.ipython_dir = get_ipython_dir()
def init_profile_dir(self, profile_dir):
if profile_dir is not None:
self.profile_dir = profile_dir
return
self.profile_dir = ProfileDir.create_profile_dir_by_name(
self.ipython_dir, "default"
)
def init_instance_attrs(self):
self.more = False
# command compiler
self.compile = self.compiler_class()
# Make an empty namespace, which extension writers can rely on both
# existing and NEVER being used by ipython itself. This gives them a
# convenient location for storing additional information and state
# their extensions may require, without fear of collisions with other
# ipython names that may develop later.
self.meta = Struct()
# Temporary files used for various purposes. Deleted at exit.
# The files here are stored with Path from Pathlib
self.tempfiles = []
self.tempdirs = []
# keep track of where we started running (mainly for crash post-mortem)
# This is not being used anywhere currently.
self.starting_dir = os.getcwd()
# Indentation management
self.indent_current_nsp = 0
# Dict to track post-execution functions that have been registered
self._post_execute = {}
def init_environment(self):
"""Any changes we need to make to the user's environment."""
pass
def init_encoding(self):
# Get system encoding at startup time. Certain terminals (like Emacs
# under Win32 have it set to None, and we need to have a known valid
# encoding to use in the raw_input() method
try:
self.stdin_encoding = sys.stdin.encoding or 'ascii'
except AttributeError:
self.stdin_encoding = 'ascii'
@observe('colors')
def init_syntax_highlighting(self, changes=None):
# Python source parser/formatter for syntax highlighting
pyformat = PyColorize.Parser(style=self.colors, parent=self).format
self.pycolorize = lambda src: pyformat(src,'str')
def refresh_style(self):
# No-op here, used in subclass
pass
def init_pushd_popd_magic(self):
# for pushd/popd management
self.home_dir = get_home_dir()
self.dir_stack = []
def init_logger(self):
self.logger = Logger(self.home_dir, logfname='ipython_log.py',
logmode='rotate')
def init_logstart(self):
"""Initialize logging in case it was requested at the command line.
"""
if self.logappend:
self.magic('logstart %s append' % self.logappend)
elif self.logfile:
self.magic('logstart %s' % self.logfile)
elif self.logstart:
self.magic('logstart')
def init_builtins(self):
# A single, static flag that we set to True. Its presence indicates
# that an IPython shell has been created, and we make no attempts at
# removing on exit or representing the existence of more than one
# IPython at a time.
builtin_mod.__dict__['__IPYTHON__'] = True
builtin_mod.__dict__['display'] = display
self.builtin_trap = BuiltinTrap(shell=self)
@observe('colors')
def init_inspector(self, changes=None):
# Object inspector
self.inspector = oinspect.Inspector(oinspect.InspectColors,
PyColorize.ANSICodeColors,
self.colors,
self.object_info_string_level)
def init_io(self):
# implemented in subclasses, TerminalInteractiveShell does call
# colorama.init().
pass
def init_prompts(self):
# Set system prompts, so that scripts can decide if they are running
# interactively.
sys.ps1 = 'In : '
sys.ps2 = '...: '
sys.ps3 = 'Out: '
def init_display_formatter(self):
self.display_formatter = DisplayFormatter(parent=self)
self.configurables.append(self.display_formatter)
def init_display_pub(self):
self.display_pub = self.display_pub_class(parent=self, shell=self)
self.configurables.append(self.display_pub)
def init_data_pub(self):
if not self.data_pub_class:
self.data_pub = None
return
self.data_pub = self.data_pub_class(parent=self)
self.configurables.append(self.data_pub)
def init_displayhook(self):
# Initialize displayhook, set in/out prompts and printing system
self.displayhook = self.displayhook_class(
parent=self,
shell=self,
cache_size=self.cache_size,
)
self.configurables.append(self.displayhook)
# This is a context manager that installs/revmoes the displayhook at
# the appropriate time.
self.display_trap = DisplayTrap(hook=self.displayhook)
@staticmethod
def get_path_links(p: Path):
"""Gets path links including all symlinks
Examples
--------
In [1]: from IPython.core.interactiveshell import InteractiveShell
In [2]: import sys, pathlib
In [3]: paths = InteractiveShell.get_path_links(pathlib.Path(sys.executable))
In [4]: len(paths) == len(set(paths))
Out[4]: True
In [5]: bool(paths)
Out[5]: True
"""
paths = [p]
while p.is_symlink():
new_path = Path(os.readlink(p))
if not new_path.is_absolute():
new_path = p.parent / new_path
p = new_path
paths.append(p)
return paths
def init_virtualenv(self):
"""Add the current virtualenv to sys.path so the user can import modules from it.
This isn't perfect: it doesn't use the Python interpreter with which the
virtualenv was built, and it ignores the --no-site-packages option. A
warning will appear suggesting the user installs IPython in the
virtualenv, but for many cases, it probably works well enough.
Adapted from code snippets online.
http://blog.ufsoft.org/2009/1/29/ipython-and-virtualenv
"""
if 'VIRTUAL_ENV' not in os.environ:
# Not in a virtualenv
return
elif os.environ["VIRTUAL_ENV"] == "":
warn("Virtual env path set to '', please check if this is intended.")
return
p = Path(sys.executable)
p_venv = Path(os.environ["VIRTUAL_ENV"])
# fallback venv detection:
# stdlib venv may symlink sys.executable, so we can't use realpath.
# but others can symlink *to* the venv Python, so we can't just use sys.executable.
# So we just check every item in the symlink tree (generally <= 3)
paths = self.get_path_links(p)
# In Cygwin paths like "c:\..." and '\cygdrive\c\...' are possible
if p_venv.parts[1] == "cygdrive":
drive_name = p_venv.parts[2]
p_venv = (drive_name + ":/") / Path(*p_venv.parts[3:])
if any(p_venv == p.parents[1] for p in paths):
# Our exe is inside or has access to the virtualenv, don't need to do anything.
return
if sys.platform == "win32":
virtual_env = str(Path(os.environ["VIRTUAL_ENV"], "Lib", "site-packages"))
else:
virtual_env_path = Path(
os.environ["VIRTUAL_ENV"], "lib", "python{}.{}", "site-packages"
)
p_ver = sys.version_info[:2]
# Predict version from py[thon]-x.x in the $VIRTUAL_ENV
re_m = re.search(r"\bpy(?:thon)?([23])\.(\d+)\b", os.environ["VIRTUAL_ENV"])
if re_m:
predicted_path = Path(str(virtual_env_path).format(*re_m.groups()))
if predicted_path.exists():
p_ver = re_m.groups()
virtual_env = str(virtual_env_path).format(*p_ver)
warn(
"Attempting to work in a virtualenv. If you encounter problems, "
"please install IPython inside the virtualenv."
)
import site
sys.path.insert(0, virtual_env)
site.addsitedir(virtual_env)
#-------------------------------------------------------------------------
# Things related to injections into the sys module
#-------------------------------------------------------------------------
def save_sys_module_state(self):
"""Save the state of hooks in the sys module.
This has to be called after self.user_module is created.
"""
self._orig_sys_module_state = {'stdin': sys.stdin,
'stdout': sys.stdout,
'stderr': sys.stderr,
'excepthook': sys.excepthook}
self._orig_sys_modules_main_name = self.user_module.__name__
self._orig_sys_modules_main_mod = sys.modules.get(self.user_module.__name__)
def restore_sys_module_state(self):
"""Restore the state of the sys module."""
try:
for k, v in self._orig_sys_module_state.items():
setattr(sys, k, v)
except AttributeError:
pass
# Reset what what done in self.init_sys_modules
if self._orig_sys_modules_main_mod is not None:
sys.modules[self._orig_sys_modules_main_name] = self._orig_sys_modules_main_mod
#-------------------------------------------------------------------------
# Things related to the banner
#-------------------------------------------------------------------------
@property
def banner(self):
banner = self.banner1
if self.profile and self.profile != 'default':
banner += '\nIPython profile: %s\n' % self.profile
if self.banner2:
banner += '\n' + self.banner2
return banner
def show_banner(self, banner=None):
if banner is None:
banner = self.banner
sys.stdout.write(banner)
#-------------------------------------------------------------------------
# Things related to hooks
#-------------------------------------------------------------------------
def init_hooks(self):
# hooks holds pointers used for user-side customizations
self.hooks = Struct()
self.strdispatchers = {}
# Set all default hooks, defined in the IPython.hooks module.
hooks = IPython.core.hooks
for hook_name in hooks.__all__:
# default hooks have priority 100, i.e. low; user hooks should have
# 0-100 priority
self.set_hook(hook_name, getattr(hooks, hook_name), 100)
if self.display_page:
self.set_hook('show_in_pager', page.as_hook(page.display_page), 90)
def set_hook(self, name, hook, priority=50, str_key=None, re_key=None):
"""set_hook(name,hook) -> sets an internal IPython hook.
IPython exposes some of its internal API as user-modifiable hooks. By
adding your function to one of these hooks, you can modify IPython's
behavior to call at runtime your own routines."""
# At some point in the future, this should validate the hook before it
# accepts it. Probably at least check that the hook takes the number
# of args it's supposed to.
f = types.MethodType(hook,self)
# check if the hook is for strdispatcher first
if str_key is not None:
sdp = self.strdispatchers.get(name, StrDispatch())
sdp.add_s(str_key, f, priority )
self.strdispatchers[name] = sdp
return
if re_key is not None:
sdp = self.strdispatchers.get(name, StrDispatch())
sdp.add_re(re.compile(re_key), f, priority )
self.strdispatchers[name] = sdp
return
dp = getattr(self.hooks, name, None)
if name not in IPython.core.hooks.__all__:
print("Warning! Hook '%s' is not one of %s" % \
(name, IPython.core.hooks.__all__ ))
if name in IPython.core.hooks.deprecated:
alternative = IPython.core.hooks.deprecated[name]
raise ValueError(
"Hook {} has been deprecated since IPython 5.0. Use {} instead.".format(
name, alternative
)
)
if not dp:
dp = IPython.core.hooks.CommandChainDispatcher()
try:
dp.add(f,priority)
except AttributeError:
# it was not commandchain, plain old func - replace
dp = f
setattr(self.hooks,name, dp)
#-------------------------------------------------------------------------
# Things related to events
#-------------------------------------------------------------------------
def init_events(self):
self.events = EventManager(self, available_events)
self.events.register("pre_execute", self._clear_warning_registry)
def register_post_execute(self, func):
"""DEPRECATED: Use ip.events.register('post_run_cell', func)
Register a function for calling after code execution.
"""
raise ValueError(
"ip.register_post_execute is deprecated since IPython 1.0, use "
"ip.events.register('post_run_cell', func) instead."
)
def _clear_warning_registry(self):
# clear the warning registry, so that different code blocks with
# overlapping line number ranges don't cause spurious suppression of
# warnings (see gh-6611 for details)
if "__warningregistry__" in self.user_global_ns:
del self.user_global_ns["__warningregistry__"]
#-------------------------------------------------------------------------
# Things related to the "main" module
#-------------------------------------------------------------------------
def new_main_mod(self, filename, modname):
"""Return a new 'main' module object for user code execution.
``filename`` should be the path of the script which will be run in the
module. Requests with the same filename will get the same module, with
its namespace cleared.
``modname`` should be the module name - normally either '__main__' or
the basename of the file without the extension.
When scripts are executed via %run, we must keep a reference to their
__main__ module around so that Python doesn't
clear it, rendering references to module globals useless.
This method keeps said reference in a private dict, keyed by the
absolute path of the script. This way, for multiple executions of the
same script we only keep one copy of the namespace (the last one),
thus preventing memory leaks from old references while allowing the
objects from the last execution to be accessible.
"""
filename = os.path.abspath(filename)
try:
main_mod = self._main_mod_cache[filename]
except KeyError:
main_mod = self._main_mod_cache[filename] = types.ModuleType(
modname,
doc="Module created for script run in IPython")
else:
main_mod.__dict__.clear()
main_mod.__name__ = modname
main_mod.__file__ = filename
# It seems pydoc (and perhaps others) needs any module instance to
# implement a __nonzero__ method
main_mod.__nonzero__ = lambda : True
return main_mod
def clear_main_mod_cache(self):
"""Clear the cache of main modules.
Mainly for use by utilities like %reset.
Examples
--------
In [15]: import IPython
In [16]: m = _ip.new_main_mod(IPython.__file__, 'IPython')
In [17]: len(_ip._main_mod_cache) > 0
Out[17]: True
In [18]: _ip.clear_main_mod_cache()
In [19]: len(_ip._main_mod_cache) == 0
Out[19]: True
"""
self._main_mod_cache.clear()
#-------------------------------------------------------------------------
# Things related to debugging
#-------------------------------------------------------------------------
def init_pdb(self):
# Set calling of pdb on exceptions
# self.call_pdb is a property
self.call_pdb = self.pdb
def _get_call_pdb(self):
return self._call_pdb
def _set_call_pdb(self,val):
if val not in (0,1,False,True):
raise ValueError('new call_pdb value must be boolean')
# store value in instance
self._call_pdb = val
# notify the actual exception handlers
self.InteractiveTB.call_pdb = val
call_pdb = property(_get_call_pdb,_set_call_pdb,None,
'Control auto-activation of pdb at exceptions')
def debugger(self,force=False):
"""Call the pdb debugger.
Keywords:
- force(False): by default, this routine checks the instance call_pdb
flag and does not actually invoke the debugger if the flag is false.
The 'force' option forces the debugger to activate even if the flag
is false.
"""
if not (force or self.call_pdb):
return
if not hasattr(sys,'last_traceback'):
error('No traceback has been produced, nothing to debug.')
return
self.InteractiveTB.debugger(force=True)
#-------------------------------------------------------------------------
# Things related to IPython's various namespaces
#-------------------------------------------------------------------------
default_user_namespaces = True
def init_create_namespaces(self, user_module=None, user_ns=None):
# Create the namespace where the user will operate. user_ns is
# normally the only one used, and it is passed to the exec calls as
# the locals argument. But we do carry a user_global_ns namespace
# given as the exec 'globals' argument, This is useful in embedding
# situations where the ipython shell opens in a context where the
# distinction between locals and globals is meaningful. For
# non-embedded contexts, it is just the same object as the user_ns dict.
# FIXME. For some strange reason, __builtins__ is showing up at user
# level as a dict instead of a module. This is a manual fix, but I
# should really track down where the problem is coming from. Alex
# Schmolck reported this problem first.
# A useful post by Alex Martelli on this topic:
# Re: inconsistent value from __builtins__
# Von: Alex Martelli <[email protected]>
# Datum: Freitag 01 Oktober 2004 04:45:34 nachmittags/abends
# Gruppen: comp.lang.python
# Michael Hohn <[email protected]> wrote:
# > >>> print type(builtin_check.get_global_binding('__builtins__'))
# > <type 'dict'>
# > >>> print type(__builtins__)
# > <type 'module'>
# > Is this difference in return value intentional?
# Well, it's documented that '__builtins__' can be either a dictionary
# or a module, and it's been that way for a long time. Whether it's
# intentional (or sensible), I don't know. In any case, the idea is
# that if you need to access the built-in namespace directly, you
# should start with "import __builtin__" (note, no 's') which will
# definitely give you a module. Yeah, it's somewhat confusing:-(.
# These routines return a properly built module and dict as needed by
# the rest of the code, and can also be used by extension writers to
# generate properly initialized namespaces.
if (user_ns is not None) or (user_module is not None):
self.default_user_namespaces = False
self.user_module, self.user_ns = self.prepare_user_module(user_module, user_ns)
# A record of hidden variables we have added to the user namespace, so
# we can list later only variables defined in actual interactive use.
self.user_ns_hidden = {}
# Now that FakeModule produces a real module, we've run into a nasty
# problem: after script execution (via %run), the module where the user
# code ran is deleted. Now that this object is a true module (needed
# so doctest and other tools work correctly), the Python module
# teardown mechanism runs over it, and sets to None every variable
# present in that module. Top-level references to objects from the
# script survive, because the user_ns is updated with them. However,
# calling functions defined in the script that use other things from
# the script will fail, because the function's closure had references
# to the original objects, which are now all None. So we must protect
# these modules from deletion by keeping a cache.
#
# To avoid keeping stale modules around (we only need the one from the
# last run), we use a dict keyed with the full path to the script, so
# only the last version of the module is held in the cache. Note,
# however, that we must cache the module *namespace contents* (their
# __dict__). Because if we try to cache the actual modules, old ones
# (uncached) could be destroyed while still holding references (such as
# those held by GUI objects that tend to be long-lived)>
#
# The %reset command will flush this cache. See the cache_main_mod()
# and clear_main_mod_cache() methods for details on use.
# This is the cache used for 'main' namespaces
self._main_mod_cache = {}
# A table holding all the namespaces IPython deals with, so that
# introspection facilities can search easily.
self.ns_table = {'user_global':self.user_module.__dict__,
'user_local':self.user_ns,
'builtin':builtin_mod.__dict__
}
@property
def user_global_ns(self):
return self.user_module.__dict__
def prepare_user_module(self, user_module=None, user_ns=None):
"""Prepare the module and namespace in which user code will be run.
When IPython is started normally, both parameters are None: a new module
is created automatically, and its __dict__ used as the namespace.
If only user_module is provided, its __dict__ is used as the namespace.
If only user_ns is provided, a dummy module is created, and user_ns
becomes the global namespace. If both are provided (as they may be
when embedding), user_ns is the local namespace, and user_module
provides the global namespace.
Parameters
----------
user_module : module, optional
The current user module in which IPython is being run. If None,
a clean module will be created.
user_ns : dict, optional
A namespace in which to run interactive commands.
Returns
-------
A tuple of user_module and user_ns, each properly initialised.
"""
if user_module is None and user_ns is not None:
user_ns.setdefault("__name__", "__main__")
user_module = DummyMod()
user_module.__dict__ = user_ns
if user_module is None:
user_module = types.ModuleType("__main__",
doc="Automatically created module for IPython interactive environment")
# We must ensure that __builtin__ (without the final 's') is always
# available and pointing to the __builtin__ *module*. For more details:
# http://mail.python.org/pipermail/python-dev/2001-April/014068.html
user_module.__dict__.setdefault('__builtin__', builtin_mod)
user_module.__dict__.setdefault('__builtins__', builtin_mod)
if user_ns is None:
user_ns = user_module.__dict__
return user_module, user_ns
def init_sys_modules(self):
# We need to insert into sys.modules something that looks like a
# module but which accesses the IPython namespace, for shelve and
# pickle to work interactively. Normally they rely on getting
# everything out of __main__, but for embedding purposes each IPython
# instance has its own private namespace, so we can't go shoving
# everything into __main__.
# note, however, that we should only do this for non-embedded
# ipythons, which really mimic the __main__.__dict__ with their own
# namespace. Embedded instances, on the other hand, should not do
# this because they need to manage the user local/global namespaces
# only, but they live within a 'normal' __main__ (meaning, they
# shouldn't overtake the execution environment of the script they're
# embedded in).
# This is overridden in the InteractiveShellEmbed subclass to a no-op.
main_name = self.user_module.__name__
sys.modules[main_name] = self.user_module
def init_user_ns(self):
"""Initialize all user-visible namespaces to their minimum defaults.
Certain history lists are also initialized here, as they effectively
act as user namespaces.
Notes
-----
All data structures here are only filled in, they are NOT reset by this
method. If they were not empty before, data will simply be added to
them.
"""
# This function works in two parts: first we put a few things in
# user_ns, and we sync that contents into user_ns_hidden so that these
# initial variables aren't shown by %who. After the sync, we add the
# rest of what we *do* want the user to see with %who even on a new
# session (probably nothing, so they really only see their own stuff)
# The user dict must *always* have a __builtin__ reference to the
# Python standard __builtin__ namespace, which must be imported.
# This is so that certain operations in prompt evaluation can be
# reliably executed with builtins. Note that we can NOT use
# __builtins__ (note the 's'), because that can either be a dict or a
# module, and can even mutate at runtime, depending on the context
# (Python makes no guarantees on it). In contrast, __builtin__ is
# always a module object, though it must be explicitly imported.
# For more details:
# http://mail.python.org/pipermail/python-dev/2001-April/014068.html
ns = {}
# make global variables for user access to the histories
ns['_ih'] = self.history_manager.input_hist_parsed
ns['_oh'] = self.history_manager.output_hist
ns['_dh'] = self.history_manager.dir_hist
# user aliases to input and output histories. These shouldn't show up
# in %who, as they can have very large reprs.
ns['In'] = self.history_manager.input_hist_parsed
ns['Out'] = self.history_manager.output_hist
# Store myself as the public api!!!
ns['get_ipython'] = self.get_ipython
ns['exit'] = self.exiter
ns['quit'] = self.exiter
# Sync what we've added so far to user_ns_hidden so these aren't seen
# by %who
self.user_ns_hidden.update(ns)
# Anything put into ns now would show up in %who. Think twice before
# putting anything here, as we really want %who to show the user their
# stuff, not our variables.
# Finally, update the real user's namespace
self.user_ns.update(ns)
@property
def all_ns_refs(self):
"""Get a list of references to all the namespace dictionaries in which
IPython might store a user-created object.
Note that this does not include the displayhook, which also caches
objects from the output."""
return [self.user_ns, self.user_global_ns, self.user_ns_hidden] + \
[m.__dict__ for m in self._main_mod_cache.values()]
def reset(self, new_session=True, aggressive=False):
"""Clear all internal namespaces, and attempt to release references to
user objects.
If new_session is True, a new history session will be opened.
"""
# Clear histories
self.history_manager.reset(new_session)
# Reset counter used to index all histories
if new_session:
self.execution_count = 1
# Reset last execution result
self.last_execution_succeeded = True
self.last_execution_result = None
# Flush cached output items
if self.displayhook.do_full_cache:
self.displayhook.flush()
# The main execution namespaces must be cleared very carefully,
# skipping the deletion of the builtin-related keys, because doing so
# would cause errors in many object's __del__ methods.
if self.user_ns is not self.user_global_ns:
self.user_ns.clear()
ns = self.user_global_ns
drop_keys = set(ns.keys())
drop_keys.discard('__builtin__')
drop_keys.discard('__builtins__')
drop_keys.discard('__name__')
for k in drop_keys:
del ns[k]
self.user_ns_hidden.clear()
# Restore the user namespaces to minimal usability
self.init_user_ns()
if aggressive and not hasattr(self, "_sys_modules_keys"):
print("Cannot restore sys.module, no snapshot")
elif aggressive:
print("culling sys module...")
current_keys = set(sys.modules.keys())
for k in current_keys - self._sys_modules_keys:
if k.startswith("multiprocessing"):
continue
del sys.modules[k]
# Restore the default and user aliases
self.alias_manager.clear_aliases()
self.alias_manager.init_aliases()
# Now define aliases that only make sense on the terminal, because they
# need direct access to the console in a way that we can't emulate in
# GUI or web frontend
if os.name == 'posix':
for cmd in ('clear', 'more', 'less', 'man'):
if cmd not in self.magics_manager.magics['line']:
self.alias_manager.soft_define_alias(cmd, cmd)
# Flush the private list of module references kept for script
# execution protection
self.clear_main_mod_cache()
def del_var(self, varname, by_name=False):
"""Delete a variable from the various namespaces, so that, as
far as possible, we're not keeping any hidden references to it.
Parameters
----------
varname : str
The name of the variable to delete.
by_name : bool
If True, delete variables with the given name in each
namespace. If False (default), find the variable in the user
namespace, and delete references to it.
"""
if varname in ('__builtin__', '__builtins__'):
raise ValueError("Refusing to delete %s" % varname)
ns_refs = self.all_ns_refs
if by_name: # Delete by name
for ns in ns_refs:
try:
del ns[varname]
except KeyError:
pass
else: # Delete by object
try:
obj = self.user_ns[varname]
except KeyError as e:
raise NameError("name '%s' is not defined" % varname) from e
# Also check in output history
ns_refs.append(self.history_manager.output_hist)
for ns in ns_refs:
to_delete = [n for n, o in ns.items() if o is obj]
for name in to_delete:
del ns[name]
# Ensure it is removed from the last execution result
if self.last_execution_result.result is obj:
self.last_execution_result = None
# displayhook keeps extra references, but not in a dictionary
for name in ('_', '__', '___'):
if getattr(self.displayhook, name) is obj:
setattr(self.displayhook, name, None)
def reset_selective(self, regex=None):
"""Clear selective variables from internal namespaces based on a
specified regular expression.
Parameters
----------
regex : string or compiled pattern, optional
A regular expression pattern that will be used in searching
variable names in the users namespaces.
"""
if regex is not None:
try:
m = re.compile(regex)
except TypeError as e:
raise TypeError('regex must be a string or compiled pattern') from e
# Search for keys in each namespace that match the given regex
# If a match is found, delete the key/value pair.
for ns in self.all_ns_refs:
for var in ns:
if m.search(var):
del ns[var]
def push(self, variables, interactive=True):
"""Inject a group of variables into the IPython user namespace.
Parameters
----------
variables : dict, str or list/tuple of str
The variables to inject into the user's namespace. If a dict, a
simple update is done. If a str, the string is assumed to have
variable names separated by spaces. A list/tuple of str can also
be used to give the variable names. If just the variable names are
give (list/tuple/str) then the variable values looked up in the
callers frame.
interactive : bool
If True (default), the variables will be listed with the ``who``
magic.
"""
vdict = None
# We need a dict of name/value pairs to do namespace updates.
if isinstance(variables, dict):
vdict = variables
elif isinstance(variables, (str, list, tuple)):
if isinstance(variables, str):
vlist = variables.split()
else:
vlist = variables
vdict = {}
cf = sys._getframe(1)
for name in vlist:
try:
vdict[name] = eval(name, cf.f_globals, cf.f_locals)
except:
print('Could not get variable %s from %s' %
(name,cf.f_code.co_name))
else:
raise ValueError('variables must be a dict/str/list/tuple')
# Propagate variables to user namespace
self.user_ns.update(vdict)
# And configure interactive visibility
user_ns_hidden = self.user_ns_hidden
if interactive:
for name in vdict:
user_ns_hidden.pop(name, None)
else:
user_ns_hidden.update(vdict)
def drop_by_id(self, variables):
"""Remove a dict of variables from the user namespace, if they are the
same as the values in the dictionary.
This is intended for use by extensions: variables that they've added can
be taken back out if they are unloaded, without removing any that the
user has overwritten.
Parameters
----------
variables : dict
A dictionary mapping object names (as strings) to the objects.
"""
for name, obj in variables.items():
if name in self.user_ns and self.user_ns[name] is obj:
del self.user_ns[name]
self.user_ns_hidden.pop(name, None)
#-------------------------------------------------------------------------
# Things related to object introspection
#-------------------------------------------------------------------------
def _ofind(self, oname, namespaces=None):
"""Find an object in the available namespaces.
self._ofind(oname) -> dict with keys: found,obj,ospace,ismagic
Has special code to detect magic functions.
"""
oname = oname.strip()
if not oname.startswith(ESC_MAGIC) and \
not oname.startswith(ESC_MAGIC2) and \
not all(a.isidentifier() for a in oname.split(".")):
return {'found': False}
if namespaces is None:
# Namespaces to search in:
# Put them in a list. The order is important so that we
# find things in the same order that Python finds them.
namespaces = [ ('Interactive', self.user_ns),
('Interactive (global)', self.user_global_ns),
('Python builtin', builtin_mod.__dict__),
]
ismagic = False
isalias = False
found = False
ospace = None
parent = None
obj = None
# Look for the given name by splitting it in parts. If the head is
# found, then we look for all the remaining parts as members, and only
# declare success if we can find them all.
oname_parts = oname.split('.')
oname_head, oname_rest = oname_parts[0],oname_parts[1:]
for nsname,ns in namespaces:
try:
obj = ns[oname_head]
except KeyError:
continue
else:
for idx, part in enumerate(oname_rest):
try:
parent = obj
# The last part is looked up in a special way to avoid
# descriptor invocation as it may raise or have side
# effects.
if idx == len(oname_rest) - 1:
obj = self._getattr_property(obj, part)
else:
obj = getattr(obj, part)
except:
# Blanket except b/c some badly implemented objects
# allow __getattr__ to raise exceptions other than
# AttributeError, which then crashes IPython.
break
else:
# If we finish the for loop (no break), we got all members
found = True
ospace = nsname
break # namespace loop
# Try to see if it's magic
if not found:
obj = None
if oname.startswith(ESC_MAGIC2):
oname = oname.lstrip(ESC_MAGIC2)
obj = self.find_cell_magic(oname)
elif oname.startswith(ESC_MAGIC):
oname = oname.lstrip(ESC_MAGIC)
obj = self.find_line_magic(oname)
else:
# search without prefix, so run? will find %run?
obj = self.find_line_magic(oname)
if obj is None:
obj = self.find_cell_magic(oname)
if obj is not None:
found = True
ospace = 'IPython internal'
ismagic = True
isalias = isinstance(obj, Alias)
# Last try: special-case some literals like '', [], {}, etc:
if not found and oname_head in ["''",'""','[]','{}','()']:
obj = eval(oname_head)
found = True
ospace = 'Interactive'
return {
'obj':obj,
'found':found,
'parent':parent,
'ismagic':ismagic,
'isalias':isalias,
'namespace':ospace
}
@staticmethod
def _getattr_property(obj, attrname):
"""Property-aware getattr to use in object finding.
If attrname represents a property, return it unevaluated (in case it has
side effects or raises an error.
"""
if not isinstance(obj, type):
try:
# `getattr(type(obj), attrname)` is not guaranteed to return
# `obj`, but does so for property:
#
# property.__get__(self, None, cls) -> self
#
# The universal alternative is to traverse the mro manually
# searching for attrname in class dicts.
attr = getattr(type(obj), attrname)
except AttributeError:
pass
else:
# This relies on the fact that data descriptors (with both
# __get__ & __set__ magic methods) take precedence over
# instance-level attributes:
#
# class A(object):
# @property
# def foobar(self): return 123
# a = A()
# a.__dict__['foobar'] = 345
# a.foobar # == 123
#
# So, a property may be returned right away.
if isinstance(attr, property):
return attr
# Nothing helped, fall back.
return getattr(obj, attrname)
def _object_find(self, oname, namespaces=None):
"""Find an object and return a struct with info about it."""
return Struct(self._ofind(oname, namespaces))
def _inspect(self, meth, oname, namespaces=None, **kw):
"""Generic interface to the inspector system.
This function is meant to be called by pdef, pdoc & friends.
"""
info = self._object_find(oname, namespaces)
docformat = (
sphinxify(self.object_inspect(oname)) if self.sphinxify_docstring else None
)
if info.found:
pmethod = getattr(self.inspector, meth)
# TODO: only apply format_screen to the plain/text repr of the mime
# bundle.
formatter = format_screen if info.ismagic else docformat
if meth == 'pdoc':
pmethod(info.obj, oname, formatter)
elif meth == 'pinfo':
pmethod(
info.obj,
oname,
formatter,
info,
enable_html_pager=self.enable_html_pager,
**kw,
)
else:
pmethod(info.obj, oname)
else:
print('Object `%s` not found.' % oname)
return 'not found' # so callers can take other action
def object_inspect(self, oname, detail_level=0):
"""Get object info about oname"""
with self.builtin_trap:
info = self._object_find(oname)
if info.found:
return self.inspector.info(info.obj, oname, info=info,
detail_level=detail_level
)
else:
return oinspect.object_info(name=oname, found=False)
def object_inspect_text(self, oname, detail_level=0):
"""Get object info as formatted text"""
return self.object_inspect_mime(oname, detail_level)['text/plain']
def object_inspect_mime(self, oname, detail_level=0, omit_sections=()):
"""Get object info as a mimebundle of formatted representations.
A mimebundle is a dictionary, keyed by mime-type.
It must always have the key `'text/plain'`.
"""
with self.builtin_trap:
info = self._object_find(oname)
if info.found:
docformat = (
sphinxify(self.object_inspect(oname))
if self.sphinxify_docstring
else None
)
return self.inspector._get_info(
info.obj,
oname,
info=info,
detail_level=detail_level,
formatter=docformat,
omit_sections=omit_sections,
)
else:
raise KeyError(oname)
#-------------------------------------------------------------------------
# Things related to history management
#-------------------------------------------------------------------------
def init_history(self):
"""Sets up the command history, and starts regular autosaves."""
self.history_manager = HistoryManager(shell=self, parent=self)
self.configurables.append(self.history_manager)
#-------------------------------------------------------------------------
# Things related to exception handling and tracebacks (not debugging)
#-------------------------------------------------------------------------
debugger_cls = InterruptiblePdb
def init_traceback_handlers(self, custom_exceptions):
# Syntax error handler.
self.SyntaxTB = ultratb.SyntaxTB(color_scheme='NoColor', parent=self)
# The interactive one is initialized with an offset, meaning we always
# want to remove the topmost item in the traceback, which is our own
# internal code. Valid modes: ['Plain','Context','Verbose','Minimal']
self.InteractiveTB = ultratb.AutoFormattedTB(mode = 'Plain',
color_scheme='NoColor',
tb_offset = 1,
check_cache=check_linecache_ipython,
debugger_cls=self.debugger_cls, parent=self)
# The instance will store a pointer to the system-wide exception hook,
# so that runtime code (such as magics) can access it. This is because
# during the read-eval loop, it may get temporarily overwritten.
self.sys_excepthook = sys.excepthook
# and add any custom exception handlers the user may have specified
self.set_custom_exc(*custom_exceptions)
# Set the exception mode
self.InteractiveTB.set_mode(mode=self.xmode)
def set_custom_exc(self, exc_tuple, handler):
"""set_custom_exc(exc_tuple, handler)
Set a custom exception handler, which will be called if any of the
exceptions in exc_tuple occur in the mainloop (specifically, in the
run_code() method).
Parameters
----------
exc_tuple : tuple of exception classes
A *tuple* of exception classes, for which to call the defined
handler. It is very important that you use a tuple, and NOT A
LIST here, because of the way Python's except statement works. If
you only want to trap a single exception, use a singleton tuple::
exc_tuple == (MyCustomException,)
handler : callable
handler must have the following signature::
def my_handler(self, etype, value, tb, tb_offset=None):
...
return structured_traceback
Your handler must return a structured traceback (a list of strings),
or None.
This will be made into an instance method (via types.MethodType)
of IPython itself, and it will be called if any of the exceptions
listed in the exc_tuple are caught. If the handler is None, an
internal basic one is used, which just prints basic info.
To protect IPython from crashes, if your handler ever raises an
exception or returns an invalid result, it will be immediately
disabled.
Notes
-----
WARNING: by putting in your own exception handler into IPython's main
execution loop, you run a very good chance of nasty crashes. This
facility should only be used if you really know what you are doing.
"""
if not isinstance(exc_tuple, tuple):
raise TypeError("The custom exceptions must be given as a tuple.")
def dummy_handler(self, etype, value, tb, tb_offset=None):
print('*** Simple custom exception handler ***')
print('Exception type :', etype)
print('Exception value:', value)
print('Traceback :', tb)
def validate_stb(stb):
"""validate structured traceback return type
return type of CustomTB *should* be a list of strings, but allow
single strings or None, which are harmless.
This function will *always* return a list of strings,
and will raise a TypeError if stb is inappropriate.
"""
msg = "CustomTB must return list of strings, not %r" % stb
if stb is None:
return []
elif isinstance(stb, str):
return [stb]
elif not isinstance(stb, list):
raise TypeError(msg)
# it's a list
for line in stb:
# check every element
if not isinstance(line, str):
raise TypeError(msg)
return stb
if handler is None:
wrapped = dummy_handler
else:
def wrapped(self,etype,value,tb,tb_offset=None):
"""wrap CustomTB handler, to protect IPython from user code
This makes it harder (but not impossible) for custom exception
handlers to crash IPython.
"""
try:
stb = handler(self,etype,value,tb,tb_offset=tb_offset)
return validate_stb(stb)
except:
# clear custom handler immediately
self.set_custom_exc((), None)
print("Custom TB Handler failed, unregistering", file=sys.stderr)
# show the exception in handler first
stb = self.InteractiveTB.structured_traceback(*sys.exc_info())
print(self.InteractiveTB.stb2text(stb))
print("The original exception:")
stb = self.InteractiveTB.structured_traceback(
(etype,value,tb), tb_offset=tb_offset
)
return stb
self.CustomTB = types.MethodType(wrapped,self)
self.custom_exceptions = exc_tuple
def excepthook(self, etype, value, tb):
"""One more defense for GUI apps that call sys.excepthook.
GUI frameworks like wxPython trap exceptions and call
sys.excepthook themselves. I guess this is a feature that
enables them to keep running after exceptions that would
otherwise kill their mainloop. This is a bother for IPython
which expects to catch all of the program exceptions with a try:
except: statement.
Normally, IPython sets sys.excepthook to a CrashHandler instance, so if
any app directly invokes sys.excepthook, it will look to the user like
IPython crashed. In order to work around this, we can disable the
CrashHandler and replace it with this excepthook instead, which prints a
regular traceback using our InteractiveTB. In this fashion, apps which
call sys.excepthook will generate a regular-looking exception from
IPython, and the CrashHandler will only be triggered by real IPython
crashes.
This hook should be used sparingly, only in places which are not likely
to be true IPython errors.
"""
self.showtraceback((etype, value, tb), tb_offset=0)
def _get_exc_info(self, exc_tuple=None):
"""get exc_info from a given tuple, sys.exc_info() or sys.last_type etc.
Ensures sys.last_type,value,traceback hold the exc_info we found,
from whichever source.
raises ValueError if none of these contain any information
"""
if exc_tuple is None:
etype, value, tb = sys.exc_info()
else:
etype, value, tb = exc_tuple
if etype is None:
if hasattr(sys, 'last_type'):
etype, value, tb = sys.last_type, sys.last_value, \
sys.last_traceback
if etype is None:
raise ValueError("No exception to find")
# Now store the exception info in sys.last_type etc.
# WARNING: these variables are somewhat deprecated and not
# necessarily safe to use in a threaded environment, but tools
# like pdb depend on their existence, so let's set them. If we
# find problems in the field, we'll need to revisit their use.
sys.last_type = etype
sys.last_value = value
sys.last_traceback = tb
return etype, value, tb
def show_usage_error(self, exc):
"""Show a short message for UsageErrors
These are special exceptions that shouldn't show a traceback.
"""
print("UsageError: %s" % exc, file=sys.stderr)
def get_exception_only(self, exc_tuple=None):
"""
Return as a string (ending with a newline) the exception that
just occurred, without any traceback.
"""
etype, value, tb = self._get_exc_info(exc_tuple)
msg = traceback.format_exception_only(etype, value)
return ''.join(msg)
def showtraceback(self, exc_tuple=None, filename=None, tb_offset=None,
exception_only=False, running_compiled_code=False):
"""Display the exception that just occurred.
If nothing is known about the exception, this is the method which
should be used throughout the code for presenting user tracebacks,
rather than directly invoking the InteractiveTB object.
A specific showsyntaxerror() also exists, but this method can take
care of calling it if needed, so unless you are explicitly catching a
SyntaxError exception, don't try to analyze the stack manually and
simply call this method."""
try:
try:
etype, value, tb = self._get_exc_info(exc_tuple)
except ValueError:
print('No traceback available to show.', file=sys.stderr)
return
if issubclass(etype, SyntaxError):
# Though this won't be called by syntax errors in the input
# line, there may be SyntaxError cases with imported code.
self.showsyntaxerror(filename, running_compiled_code)
elif etype is UsageError:
self.show_usage_error(value)
else:
if exception_only:
stb = ['An exception has occurred, use %tb to see '
'the full traceback.\n']
stb.extend(self.InteractiveTB.get_exception_only(etype,
value))
else:
try:
# Exception classes can customise their traceback - we
# use this in IPython.parallel for exceptions occurring
# in the engines. This should return a list of strings.
if hasattr(value, "_render_traceback_"):
stb = value._render_traceback_()
else:
stb = self.InteractiveTB.structured_traceback(
etype, value, tb, tb_offset=tb_offset
)
except Exception:
print(
"Unexpected exception formatting exception. Falling back to standard exception"
)
traceback.print_exc()
return None
self._showtraceback(etype, value, stb)
if self.call_pdb:
# drop into debugger
self.debugger(force=True)
return
# Actually show the traceback
self._showtraceback(etype, value, stb)
except KeyboardInterrupt:
print('\n' + self.get_exception_only(), file=sys.stderr)
def _showtraceback(self, etype, evalue, stb: str):
"""Actually show a traceback.
Subclasses may override this method to put the traceback on a different
place, like a side channel.
"""
val = self.InteractiveTB.stb2text(stb)
try:
print(val)
except UnicodeEncodeError:
print(val.encode("utf-8", "backslashreplace").decode())
def showsyntaxerror(self, filename=None, running_compiled_code=False):
"""Display the syntax error that just occurred.
This doesn't display a stack trace because there isn't one.
If a filename is given, it is stuffed in the exception instead
of what was there before (because Python's parser always uses
"<string>" when reading from a string).
If the syntax error occurred when running a compiled code (i.e. running_compile_code=True),
longer stack trace will be displayed.
"""
etype, value, last_traceback = self._get_exc_info()
if filename and issubclass(etype, SyntaxError):
try:
value.filename = filename
except:
# Not the format we expect; leave it alone
pass
# If the error occurred when executing compiled code, we should provide full stacktrace.
elist = traceback.extract_tb(last_traceback) if running_compiled_code else []
stb = self.SyntaxTB.structured_traceback(etype, value, elist)
self._showtraceback(etype, value, stb)
# This is overridden in TerminalInteractiveShell to show a message about
# the %paste magic.
def showindentationerror(self):
"""Called by _run_cell when there's an IndentationError in code entered
at the prompt.
This is overridden in TerminalInteractiveShell to show a message about
the %paste magic."""
self.showsyntaxerror()
@skip_doctest
def set_next_input(self, s, replace=False):
""" Sets the 'default' input string for the next command line.
Example::
In [1]: _ip.set_next_input("Hello Word")
In [2]: Hello Word_ # cursor is here
"""
self.rl_next_input = s
def _indent_current_str(self):
"""return the current level of indentation as a string"""
return self.input_splitter.get_indent_spaces() * ' '
#-------------------------------------------------------------------------
# Things related to text completion
#-------------------------------------------------------------------------
def init_completer(self):
"""Initialize the completion machinery.
This creates completion machinery that can be used by client code,
either interactively in-process (typically triggered by the readline
library), programmatically (such as in test suites) or out-of-process
(typically over the network by remote frontends).
"""
from IPython.core.completer import IPCompleter
from IPython.core.completerlib import (
cd_completer,
magic_run_completer,
module_completer,
reset_completer,
)
self.Completer = IPCompleter(shell=self,
namespace=self.user_ns,
global_namespace=self.user_global_ns,
parent=self,
)
self.configurables.append(self.Completer)
# Add custom completers to the basic ones built into IPCompleter
sdisp = self.strdispatchers.get('complete_command', StrDispatch())
self.strdispatchers['complete_command'] = sdisp
self.Completer.custom_completers = sdisp
self.set_hook('complete_command', module_completer, str_key = 'import')
self.set_hook('complete_command', module_completer, str_key = 'from')
self.set_hook('complete_command', module_completer, str_key = '%aimport')
self.set_hook('complete_command', magic_run_completer, str_key = '%run')
self.set_hook('complete_command', cd_completer, str_key = '%cd')
self.set_hook('complete_command', reset_completer, str_key = '%reset')
@skip_doctest
def complete(self, text, line=None, cursor_pos=None):
"""Return the completed text and a list of completions.
Parameters
----------
text : string
A string of text to be completed on. It can be given as empty and
instead a line/position pair are given. In this case, the
completer itself will split the line like readline does.
line : string, optional
The complete line that text is part of.
cursor_pos : int, optional
The position of the cursor on the input line.
Returns
-------
text : string
The actual text that was completed.
matches : list
A sorted list with all possible completions.
Notes
-----
The optional arguments allow the completion to take more context into
account, and are part of the low-level completion API.
This is a wrapper around the completion mechanism, similar to what
readline does at the command line when the TAB key is hit. By
exposing it as a method, it can be used by other non-readline
environments (such as GUIs) for text completion.
Examples
--------
In [1]: x = 'hello'
In [2]: _ip.complete('x.l')
Out[2]: ('x.l', ['x.ljust', 'x.lower', 'x.lstrip'])
"""
# Inject names into __builtin__ so we can complete on the added names.
with self.builtin_trap:
return self.Completer.complete(text, line, cursor_pos)
def set_custom_completer(self, completer, pos=0) -> None:
"""Adds a new custom completer function.
The position argument (defaults to 0) is the index in the completers
list where you want the completer to be inserted.
`completer` should have the following signature::
def completion(self: Completer, text: string) -> List[str]:
raise NotImplementedError
It will be bound to the current Completer instance and pass some text
and return a list with current completions to suggest to the user.
"""
newcomp = types.MethodType(completer, self.Completer)
self.Completer.custom_matchers.insert(pos,newcomp)
def set_completer_frame(self, frame=None):
"""Set the frame of the completer."""
if frame:
self.Completer.namespace = frame.f_locals
self.Completer.global_namespace = frame.f_globals
else:
self.Completer.namespace = self.user_ns
self.Completer.global_namespace = self.user_global_ns
#-------------------------------------------------------------------------
# Things related to magics
#-------------------------------------------------------------------------
def init_magics(self):
from IPython.core import magics as m
self.magics_manager = magic.MagicsManager(shell=self,
parent=self,
user_magics=m.UserMagics(self))
self.configurables.append(self.magics_manager)
# Expose as public API from the magics manager
self.register_magics = self.magics_manager.register
self.register_magics(m.AutoMagics, m.BasicMagics, m.CodeMagics,
m.ConfigMagics, m.DisplayMagics, m.ExecutionMagics,
m.ExtensionMagics, m.HistoryMagics, m.LoggingMagics,
m.NamespaceMagics, m.OSMagics, m.PackagingMagics,
m.PylabMagics, m.ScriptMagics,
)
self.register_magics(m.AsyncMagics)
# Register Magic Aliases
mman = self.magics_manager
# FIXME: magic aliases should be defined by the Magics classes
# or in MagicsManager, not here
mman.register_alias('ed', 'edit')
mman.register_alias('hist', 'history')
mman.register_alias('rep', 'recall')
mman.register_alias('SVG', 'svg', 'cell')
mman.register_alias('HTML', 'html', 'cell')
mman.register_alias('file', 'writefile', 'cell')
# FIXME: Move the color initialization to the DisplayHook, which
# should be split into a prompt manager and displayhook. We probably
# even need a centralize colors management object.
self.run_line_magic('colors', self.colors)
# Defined here so that it's included in the documentation
@functools.wraps(magic.MagicsManager.register_function)
def register_magic_function(self, func, magic_kind='line', magic_name=None):
self.magics_manager.register_function(
func, magic_kind=magic_kind, magic_name=magic_name
)
def _find_with_lazy_load(self, /, type_, magic_name: str):
"""
Try to find a magic potentially lazy-loading it.
Parameters
----------
type_: "line"|"cell"
the type of magics we are trying to find/lazy load.
magic_name: str
The name of the magic we are trying to find/lazy load
Note that this may have any side effects
"""
finder = {"line": self.find_line_magic, "cell": self.find_cell_magic}[type_]
fn = finder(magic_name)
if fn is not None:
return fn
lazy = self.magics_manager.lazy_magics.get(magic_name)
if lazy is None:
return None
self.run_line_magic("load_ext", lazy)
res = finder(magic_name)
return res
def run_line_magic(self, magic_name: str, line, _stack_depth=1):
"""Execute the given line magic.
Parameters
----------
magic_name : str
Name of the desired magic function, without '%' prefix.
line : str
The rest of the input line as a single string.
_stack_depth : int
If run_line_magic() is called from magic() then _stack_depth=2.
This is added to ensure backward compatibility for use of 'get_ipython().magic()'
"""
fn = self._find_with_lazy_load("line", magic_name)
if fn is None:
lazy = self.magics_manager.lazy_magics.get(magic_name)
if lazy:
self.run_line_magic("load_ext", lazy)
fn = self.find_line_magic(magic_name)
if fn is None:
cm = self.find_cell_magic(magic_name)
etpl = "Line magic function `%%%s` not found%s."
extra = '' if cm is None else (' (But cell magic `%%%%%s` exists, '
'did you mean that instead?)' % magic_name )
raise UsageError(etpl % (magic_name, extra))
else:
# Note: this is the distance in the stack to the user's frame.
# This will need to be updated if the internal calling logic gets
# refactored, or else we'll be expanding the wrong variables.
# Determine stack_depth depending on where run_line_magic() has been called
stack_depth = _stack_depth
if getattr(fn, magic.MAGIC_NO_VAR_EXPAND_ATTR, False):
# magic has opted out of var_expand
magic_arg_s = line
else:
magic_arg_s = self.var_expand(line, stack_depth)
# Put magic args in a list so we can call with f(*a) syntax
args = [magic_arg_s]
kwargs = {}
# Grab local namespace if we need it:
if getattr(fn, "needs_local_scope", False):
kwargs['local_ns'] = self.get_local_scope(stack_depth)
with self.builtin_trap:
result = fn(*args, **kwargs)
return result
def get_local_scope(self, stack_depth):
"""Get local scope at given stack depth.
Parameters
----------
stack_depth : int
Depth relative to calling frame
"""
return sys._getframe(stack_depth + 1).f_locals
def run_cell_magic(self, magic_name, line, cell):
"""Execute the given cell magic.
Parameters
----------
magic_name : str
Name of the desired magic function, without '%' prefix.
line : str
The rest of the first input line as a single string.
cell : str
The body of the cell as a (possibly multiline) string.
"""
fn = self._find_with_lazy_load("cell", magic_name)
if fn is None:
lm = self.find_line_magic(magic_name)
etpl = "Cell magic `%%{0}` not found{1}."
extra = '' if lm is None else (' (But line magic `%{0}` exists, '
'did you mean that instead?)'.format(magic_name))
raise UsageError(etpl.format(magic_name, extra))
elif cell == '':
message = '%%{0} is a cell magic, but the cell body is empty.'.format(magic_name)
if self.find_line_magic(magic_name) is not None:
message += ' Did you mean the line magic %{0} (single %)?'.format(magic_name)
raise UsageError(message)
else:
# Note: this is the distance in the stack to the user's frame.
# This will need to be updated if the internal calling logic gets
# refactored, or else we'll be expanding the wrong variables.
stack_depth = 2
if getattr(fn, magic.MAGIC_NO_VAR_EXPAND_ATTR, False):
# magic has opted out of var_expand
magic_arg_s = line
else:
magic_arg_s = self.var_expand(line, stack_depth)
kwargs = {}
if getattr(fn, "needs_local_scope", False):
kwargs['local_ns'] = self.user_ns
with self.builtin_trap:
args = (magic_arg_s, cell)
result = fn(*args, **kwargs)
return result
def find_line_magic(self, magic_name):
"""Find and return a line magic by name.
Returns None if the magic isn't found."""
return self.magics_manager.magics['line'].get(magic_name)
def find_cell_magic(self, magic_name):
"""Find and return a cell magic by name.
Returns None if the magic isn't found."""
return self.magics_manager.magics['cell'].get(magic_name)
def find_magic(self, magic_name, magic_kind='line'):
"""Find and return a magic of the given type by name.
Returns None if the magic isn't found."""
return self.magics_manager.magics[magic_kind].get(magic_name)
def magic(self, arg_s):
"""
DEPRECATED
Deprecated since IPython 0.13 (warning added in
8.1), use run_line_magic(magic_name, parameter_s).
Call a magic function by name.
Input: a string containing the name of the magic function to call and
any additional arguments to be passed to the magic.
magic('name -opt foo bar') is equivalent to typing at the ipython
prompt:
In[1]: %name -opt foo bar
To call a magic without arguments, simply use magic('name').
This provides a proper Python function to call IPython's magics in any
valid Python code you can type at the interpreter, including loops and
compound statements.
"""
warnings.warn(
"`magic(...)` is deprecated since IPython 0.13 (warning added in "
"8.1), use run_line_magic(magic_name, parameter_s).",
DeprecationWarning,
stacklevel=2,
)
# TODO: should we issue a loud deprecation warning here?
magic_name, _, magic_arg_s = arg_s.partition(' ')
magic_name = magic_name.lstrip(prefilter.ESC_MAGIC)
return self.run_line_magic(magic_name, magic_arg_s, _stack_depth=2)
#-------------------------------------------------------------------------
# Things related to macros
#-------------------------------------------------------------------------
def define_macro(self, name, themacro):
"""Define a new macro
Parameters
----------
name : str
The name of the macro.
themacro : str or Macro
The action to do upon invoking the macro. If a string, a new
Macro object is created by passing the string to it.
"""
from IPython.core import macro
if isinstance(themacro, str):
themacro = macro.Macro(themacro)
if not isinstance(themacro, macro.Macro):
raise ValueError('A macro must be a string or a Macro instance.')
self.user_ns[name] = themacro
#-------------------------------------------------------------------------
# Things related to the running of system commands
#-------------------------------------------------------------------------
def system_piped(self, cmd):
"""Call the given cmd in a subprocess, piping stdout/err
Parameters
----------
cmd : str
Command to execute (can not end in '&', as background processes are
not supported. Should not be a command that expects input
other than simple text.
"""
if cmd.rstrip().endswith('&'):
# this is *far* from a rigorous test
# We do not support backgrounding processes because we either use
# pexpect or pipes to read from. Users can always just call
# os.system() or use ip.system=ip.system_raw
# if they really want a background process.
raise OSError("Background processes not supported.")
# we explicitly do NOT return the subprocess status code, because
# a non-None value would trigger :func:`sys.displayhook` calls.
# Instead, we store the exit_code in user_ns.
self.user_ns['_exit_code'] = system(self.var_expand(cmd, depth=1))
def system_raw(self, cmd):
"""Call the given cmd in a subprocess using os.system on Windows or
subprocess.call using the system shell on other platforms.
Parameters
----------
cmd : str
Command to execute.
"""
cmd = self.var_expand(cmd, depth=1)
# warn if there is an IPython magic alternative.
main_cmd = cmd.split()[0]
has_magic_alternatives = ("pip", "conda", "cd")
if main_cmd in has_magic_alternatives:
warnings.warn(
(
"You executed the system command !{0} which may not work "
"as expected. Try the IPython magic %{0} instead."
).format(main_cmd)
)
# protect os.system from UNC paths on Windows, which it can't handle:
if sys.platform == 'win32':
from IPython.utils._process_win32 import AvoidUNCPath
with AvoidUNCPath() as path:
if path is not None:
cmd = '"pushd %s &&"%s' % (path, cmd)
try:
ec = os.system(cmd)
except KeyboardInterrupt:
print('\n' + self.get_exception_only(), file=sys.stderr)
ec = -2
else:
# For posix the result of the subprocess.call() below is an exit
# code, which by convention is zero for success, positive for
# program failure. Exit codes above 128 are reserved for signals,
# and the formula for converting a signal to an exit code is usually
# signal_number+128. To more easily differentiate between exit
# codes and signals, ipython uses negative numbers. For instance
# since control-c is signal 2 but exit code 130, ipython's
# _exit_code variable will read -2. Note that some shells like
# csh and fish don't follow sh/bash conventions for exit codes.
executable = os.environ.get('SHELL', None)
try:
# Use env shell instead of default /bin/sh
ec = subprocess.call(cmd, shell=True, executable=executable)
except KeyboardInterrupt:
# intercept control-C; a long traceback is not useful here
print('\n' + self.get_exception_only(), file=sys.stderr)
ec = 130
if ec > 128:
ec = -(ec - 128)
# We explicitly do NOT return the subprocess status code, because
# a non-None value would trigger :func:`sys.displayhook` calls.
# Instead, we store the exit_code in user_ns. Note the semantics
# of _exit_code: for control-c, _exit_code == -signal.SIGNIT,
# but raising SystemExit(_exit_code) will give status 254!
self.user_ns['_exit_code'] = ec
# use piped system by default, because it is better behaved
system = system_piped
def getoutput(self, cmd, split=True, depth=0):
"""Get output (possibly including stderr) from a subprocess.
Parameters
----------
cmd : str
Command to execute (can not end in '&', as background processes are
not supported.
split : bool, optional
If True, split the output into an IPython SList. Otherwise, an
IPython LSString is returned. These are objects similar to normal
lists and strings, with a few convenience attributes for easier
manipulation of line-based output. You can use '?' on them for
details.
depth : int, optional
How many frames above the caller are the local variables which should
be expanded in the command string? The default (0) assumes that the
expansion variables are in the stack frame calling this function.
"""
if cmd.rstrip().endswith('&'):
# this is *far* from a rigorous test
raise OSError("Background processes not supported.")
out = getoutput(self.var_expand(cmd, depth=depth+1))
if split:
out = SList(out.splitlines())
else:
out = LSString(out)
return out
#-------------------------------------------------------------------------
# Things related to aliases
#-------------------------------------------------------------------------
def init_alias(self):
self.alias_manager = AliasManager(shell=self, parent=self)
self.configurables.append(self.alias_manager)
#-------------------------------------------------------------------------
# Things related to extensions
#-------------------------------------------------------------------------
def init_extension_manager(self):
self.extension_manager = ExtensionManager(shell=self, parent=self)
self.configurables.append(self.extension_manager)
#-------------------------------------------------------------------------
# Things related to payloads
#-------------------------------------------------------------------------
def init_payload(self):
self.payload_manager = PayloadManager(parent=self)
self.configurables.append(self.payload_manager)
#-------------------------------------------------------------------------
# Things related to the prefilter
#-------------------------------------------------------------------------
def init_prefilter(self):
self.prefilter_manager = PrefilterManager(shell=self, parent=self)
self.configurables.append(self.prefilter_manager)
# Ultimately this will be refactored in the new interpreter code, but
# for now, we should expose the main prefilter method (there's legacy
# code out there that may rely on this).
self.prefilter = self.prefilter_manager.prefilter_lines
def auto_rewrite_input(self, cmd):
"""Print to the screen the rewritten form of the user's command.
This shows visual feedback by rewriting input lines that cause
automatic calling to kick in, like::
/f x
into::
------> f(x)
after the user's input prompt. This helps the user understand that the
input line was transformed automatically by IPython.
"""
if not self.show_rewritten_input:
return
# This is overridden in TerminalInteractiveShell to use fancy prompts
print("------> " + cmd)
#-------------------------------------------------------------------------
# Things related to extracting values/expressions from kernel and user_ns
#-------------------------------------------------------------------------
def _user_obj_error(self):
"""return simple exception dict
for use in user_expressions
"""
etype, evalue, tb = self._get_exc_info()
stb = self.InteractiveTB.get_exception_only(etype, evalue)
exc_info = {
"status": "error",
"traceback": stb,
"ename": etype.__name__,
"evalue": py3compat.safe_unicode(evalue),
}
return exc_info
def _format_user_obj(self, obj):
"""format a user object to display dict
for use in user_expressions
"""
data, md = self.display_formatter.format(obj)
value = {
'status' : 'ok',
'data' : data,
'metadata' : md,
}
return value
def user_expressions(self, expressions):
"""Evaluate a dict of expressions in the user's namespace.
Parameters
----------
expressions : dict
A dict with string keys and string values. The expression values
should be valid Python expressions, each of which will be evaluated
in the user namespace.
Returns
-------
A dict, keyed like the input expressions dict, with the rich mime-typed
display_data of each value.
"""
out = {}
user_ns = self.user_ns
global_ns = self.user_global_ns
for key, expr in expressions.items():
try:
value = self._format_user_obj(eval(expr, global_ns, user_ns))
except:
value = self._user_obj_error()
out[key] = value
return out
#-------------------------------------------------------------------------
# Things related to the running of code
#-------------------------------------------------------------------------
def ex(self, cmd):
"""Execute a normal python statement in user namespace."""
with self.builtin_trap:
exec(cmd, self.user_global_ns, self.user_ns)
def ev(self, expr):
"""Evaluate python expression expr in user namespace.
Returns the result of evaluation
"""
with self.builtin_trap:
return eval(expr, self.user_global_ns, self.user_ns)
def safe_execfile(self, fname, *where, exit_ignore=False, raise_exceptions=False, shell_futures=False):
"""A safe version of the builtin execfile().
This version will never throw an exception, but instead print
helpful error messages to the screen. This only works on pure
Python files with the .py extension.
Parameters
----------
fname : string
The name of the file to be executed.
*where : tuple
One or two namespaces, passed to execfile() as (globals,locals).
If only one is given, it is passed as both.
exit_ignore : bool (False)
If True, then silence SystemExit for non-zero status (it is always
silenced for zero status, as it is so common).
raise_exceptions : bool (False)
If True raise exceptions everywhere. Meant for testing.
shell_futures : bool (False)
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
"""
fname = Path(fname).expanduser().resolve()
# Make sure we can open the file
try:
with fname.open("rb"):
pass
except:
warn('Could not open file <%s> for safe execution.' % fname)
return
# Find things also in current directory. This is needed to mimic the
# behavior of running a script from the system command line, where
# Python inserts the script's directory into sys.path
dname = str(fname.parent)
with prepended_to_syspath(dname), self.builtin_trap:
try:
glob, loc = (where + (None, ))[:2]
py3compat.execfile(
fname, glob, loc,
self.compile if shell_futures else None)
except SystemExit as status:
# If the call was made with 0 or None exit status (sys.exit(0)
# or sys.exit() ), don't bother showing a traceback, as both of
# these are considered normal by the OS:
# > python -c'import sys;sys.exit(0)'; echo $?
# 0
# > python -c'import sys;sys.exit()'; echo $?
# 0
# For other exit status, we show the exception unless
# explicitly silenced, but only in short form.
if status.code:
if raise_exceptions:
raise
if not exit_ignore:
self.showtraceback(exception_only=True)
except:
if raise_exceptions:
raise
# tb offset is 2 because we wrap execfile
self.showtraceback(tb_offset=2)
def safe_execfile_ipy(self, fname, shell_futures=False, raise_exceptions=False):
"""Like safe_execfile, but for .ipy or .ipynb files with IPython syntax.
Parameters
----------
fname : str
The name of the file to execute. The filename must have a
.ipy or .ipynb extension.
shell_futures : bool (False)
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
raise_exceptions : bool (False)
If True raise exceptions everywhere. Meant for testing.
"""
fname = Path(fname).expanduser().resolve()
# Make sure we can open the file
try:
with fname.open("rb"):
pass
except:
warn('Could not open file <%s> for safe execution.' % fname)
return
# Find things also in current directory. This is needed to mimic the
# behavior of running a script from the system command line, where
# Python inserts the script's directory into sys.path
dname = str(fname.parent)
def get_cells():
"""generator for sequence of code blocks to run"""
if fname.suffix == ".ipynb":
from nbformat import read
nb = read(fname, as_version=4)
if not nb.cells:
return
for cell in nb.cells:
if cell.cell_type == 'code':
yield cell.source
else:
yield fname.read_text(encoding="utf-8")
with prepended_to_syspath(dname):
try:
for cell in get_cells():
result = self.run_cell(cell, silent=True, shell_futures=shell_futures)
if raise_exceptions:
result.raise_error()
elif not result.success:
break
except:
if raise_exceptions:
raise
self.showtraceback()
warn('Unknown failure executing file: <%s>' % fname)
def safe_run_module(self, mod_name, where):
"""A safe version of runpy.run_module().
This version will never throw an exception, but instead print
helpful error messages to the screen.
`SystemExit` exceptions with status code 0 or None are ignored.
Parameters
----------
mod_name : string
The name of the module to be executed.
where : dict
The globals namespace.
"""
try:
try:
where.update(
runpy.run_module(str(mod_name), run_name="__main__",
alter_sys=True)
)
except SystemExit as status:
if status.code:
raise
except:
self.showtraceback()
warn('Unknown failure executing module: <%s>' % mod_name)
def run_cell(self, raw_cell, store_history=False, silent=False, shell_futures=True):
"""Run a complete IPython cell.
Parameters
----------
raw_cell : str
The code (including IPython code such as %magic functions) to run.
store_history : bool
If True, the raw and translated cell will be stored in IPython's
history. For user code calling back into IPython's machinery, this
should be set to False.
silent : bool
If True, avoid side-effects, such as implicit displayhooks and
and logging. silent=True forces store_history=False.
shell_futures : bool
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
Returns
-------
result : :class:`ExecutionResult`
"""
result = None
try:
result = self._run_cell(
raw_cell, store_history, silent, shell_futures)
finally:
self.events.trigger('post_execute')
if not silent:
self.events.trigger('post_run_cell', result)
return result
def _run_cell(self, raw_cell:str, store_history:bool, silent:bool, shell_futures:bool) -> ExecutionResult:
"""Internal method to run a complete IPython cell."""
# we need to avoid calling self.transform_cell multiple time on the same thing
# so we need to store some results:
preprocessing_exc_tuple = None
try:
transformed_cell = self.transform_cell(raw_cell)
except Exception:
transformed_cell = raw_cell
preprocessing_exc_tuple = sys.exc_info()
assert transformed_cell is not None
coro = self.run_cell_async(
raw_cell,
store_history=store_history,
silent=silent,
shell_futures=shell_futures,
transformed_cell=transformed_cell,
preprocessing_exc_tuple=preprocessing_exc_tuple,
)
# run_cell_async is async, but may not actually need an eventloop.
# when this is the case, we want to run it using the pseudo_sync_runner
# so that code can invoke eventloops (for example via the %run , and
# `%paste` magic.
if self.trio_runner:
runner = self.trio_runner
elif self.should_run_async(
raw_cell,
transformed_cell=transformed_cell,
preprocessing_exc_tuple=preprocessing_exc_tuple,
):
runner = self.loop_runner
else:
runner = _pseudo_sync_runner
try:
return runner(coro)
except BaseException as e:
info = ExecutionInfo(raw_cell, store_history, silent, shell_futures)
result = ExecutionResult(info)
result.error_in_exec = e
self.showtraceback(running_compiled_code=True)
return result
def should_run_async(
self, raw_cell: str, *, transformed_cell=None, preprocessing_exc_tuple=None
) -> bool:
"""Return whether a cell should be run asynchronously via a coroutine runner
Parameters
----------
raw_cell : str
The code to be executed
Returns
-------
result: bool
Whether the code needs to be run with a coroutine runner or not
.. versionadded:: 7.0
"""
if not self.autoawait:
return False
if preprocessing_exc_tuple is not None:
return False
assert preprocessing_exc_tuple is None
if transformed_cell is None:
warnings.warn(
"`should_run_async` will not call `transform_cell`"
" automatically in the future. Please pass the result to"
" `transformed_cell` argument and any exception that happen"
" during the"
"transform in `preprocessing_exc_tuple` in"
" IPython 7.17 and above.",
DeprecationWarning,
stacklevel=2,
)
try:
cell = self.transform_cell(raw_cell)
except Exception:
# any exception during transform will be raised
# prior to execution
return False
else:
cell = transformed_cell
return _should_be_async(cell)
async def run_cell_async(
self,
raw_cell: str,
store_history=False,
silent=False,
shell_futures=True,
*,
transformed_cell: Optional[str] = None,
preprocessing_exc_tuple: Optional[Any] = None
) -> ExecutionResult:
"""Run a complete IPython cell asynchronously.
Parameters
----------
raw_cell : str
The code (including IPython code such as %magic functions) to run.
store_history : bool
If True, the raw and translated cell will be stored in IPython's
history. For user code calling back into IPython's machinery, this
should be set to False.
silent : bool
If True, avoid side-effects, such as implicit displayhooks and
and logging. silent=True forces store_history=False.
shell_futures : bool
If True, the code will share future statements with the interactive
shell. It will both be affected by previous __future__ imports, and
any __future__ imports in the code will affect the shell. If False,
__future__ imports are not shared in either direction.
transformed_cell: str
cell that was passed through transformers
preprocessing_exc_tuple:
trace if the transformation failed.
Returns
-------
result : :class:`ExecutionResult`
.. versionadded:: 7.0
"""
info = ExecutionInfo(
raw_cell, store_history, silent, shell_futures)
result = ExecutionResult(info)
if (not raw_cell) or raw_cell.isspace():
self.last_execution_succeeded = True
self.last_execution_result = result
return result
if silent:
store_history = False
if store_history:
result.execution_count = self.execution_count
def error_before_exec(value):
if store_history:
self.execution_count += 1
result.error_before_exec = value
self.last_execution_succeeded = False
self.last_execution_result = result
return result
self.events.trigger('pre_execute')
if not silent:
self.events.trigger('pre_run_cell', info)
if transformed_cell is None:
warnings.warn(
"`run_cell_async` will not call `transform_cell`"
" automatically in the future. Please pass the result to"
" `transformed_cell` argument and any exception that happen"
" during the"
"transform in `preprocessing_exc_tuple` in"
" IPython 7.17 and above.",
DeprecationWarning,
stacklevel=2,
)
# If any of our input transformation (input_transformer_manager or
# prefilter_manager) raises an exception, we store it in this variable
# so that we can display the error after logging the input and storing
# it in the history.
try:
cell = self.transform_cell(raw_cell)
except Exception:
preprocessing_exc_tuple = sys.exc_info()
cell = raw_cell # cell has to exist so it can be stored/logged
else:
preprocessing_exc_tuple = None
else:
if preprocessing_exc_tuple is None:
cell = transformed_cell
else:
cell = raw_cell
# Store raw and processed history
if store_history and raw_cell.strip(" %") != "paste":
self.history_manager.store_inputs(self.execution_count, cell, raw_cell)
if not silent:
self.logger.log(cell, raw_cell)
# Display the exception if input processing failed.
if preprocessing_exc_tuple is not None:
self.showtraceback(preprocessing_exc_tuple)
if store_history:
self.execution_count += 1
return error_before_exec(preprocessing_exc_tuple[1])
# Our own compiler remembers the __future__ environment. If we want to
# run code with a separate __future__ environment, use the default
# compiler
compiler = self.compile if shell_futures else self.compiler_class()
_run_async = False
with self.builtin_trap:
cell_name = compiler.cache(cell, self.execution_count, raw_code=raw_cell)
with self.display_trap:
# Compile to bytecode
try:
code_ast = compiler.ast_parse(cell, filename=cell_name)
except self.custom_exceptions as e:
etype, value, tb = sys.exc_info()
self.CustomTB(etype, value, tb)
return error_before_exec(e)
except IndentationError as e:
self.showindentationerror()
return error_before_exec(e)
except (OverflowError, SyntaxError, ValueError, TypeError,
MemoryError) as e:
self.showsyntaxerror()
return error_before_exec(e)
# Apply AST transformations
try:
code_ast = self.transform_ast(code_ast)
except InputRejected as e:
self.showtraceback()
return error_before_exec(e)
# Give the displayhook a reference to our ExecutionResult so it
# can fill in the output value.
self.displayhook.exec_result = result
# Execute the user code
interactivity = "none" if silent else self.ast_node_interactivity
has_raised = await self.run_ast_nodes(code_ast.body, cell_name,
interactivity=interactivity, compiler=compiler, result=result)
self.last_execution_succeeded = not has_raised
self.last_execution_result = result
# Reset this so later displayed values do not modify the
# ExecutionResult
self.displayhook.exec_result = None
if store_history:
# Write output to the database. Does nothing unless
# history output logging is enabled.
self.history_manager.store_output(self.execution_count)
# Each cell is a *single* input, regardless of how many lines it has
self.execution_count += 1
return result
def transform_cell(self, raw_cell):
"""Transform an input cell before parsing it.
Static transformations, implemented in IPython.core.inputtransformer2,
deal with things like ``%magic`` and ``!system`` commands.
These run on all input.
Dynamic transformations, for things like unescaped magics and the exit
autocall, depend on the state of the interpreter.
These only apply to single line inputs.
These string-based transformations are followed by AST transformations;
see :meth:`transform_ast`.
"""
# Static input transformations
cell = self.input_transformer_manager.transform_cell(raw_cell)
if len(cell.splitlines()) == 1:
# Dynamic transformations - only applied for single line commands
with self.builtin_trap:
# use prefilter_lines to handle trailing newlines
# restore trailing newline for ast.parse
cell = self.prefilter_manager.prefilter_lines(cell) + '\n'
lines = cell.splitlines(keepends=True)
for transform in self.input_transformers_post:
lines = transform(lines)
cell = ''.join(lines)
return cell
def transform_ast(self, node):
"""Apply the AST transformations from self.ast_transformers
Parameters
----------
node : ast.Node
The root node to be transformed. Typically called with the ast.Module
produced by parsing user input.
Returns
-------
An ast.Node corresponding to the node it was called with. Note that it
may also modify the passed object, so don't rely on references to the
original AST.
"""
for transformer in self.ast_transformers:
try:
node = transformer.visit(node)
except InputRejected:
# User-supplied AST transformers can reject an input by raising
# an InputRejected. Short-circuit in this case so that we
# don't unregister the transform.
raise
except Exception:
warn("AST transformer %r threw an error. It will be unregistered." % transformer)
self.ast_transformers.remove(transformer)
if self.ast_transformers:
ast.fix_missing_locations(node)
return node
def _update_code_co_name(self, code):
"""Python 3.10 changed the behaviour so that whenever a code object
is assembled in the compile(ast) the co_firstlineno would be == 1.
This makes pydevd/debugpy think that all cells invoked are the same
since it caches information based on (co_firstlineno, co_name, co_filename).
Given that, this function changes the code 'co_name' to be unique
based on the first real lineno of the code (which also has a nice
side effect of customizing the name so that it's not always <module>).
See: https://github.com/ipython/ipykernel/issues/841
"""
if not hasattr(code, "replace"):
# It may not be available on older versions of Python (only
# available for 3.8 onwards).
return code
try:
first_real_line = next(dis.findlinestarts(code))[1]
except StopIteration:
return code
return code.replace(co_name="<cell line: %s>" % (first_real_line,))
async def run_ast_nodes(
self,
nodelist: ListType[stmt],
cell_name: str,
interactivity="last_expr",
compiler=compile,
result=None,
):
"""Run a sequence of AST nodes. The execution mode depends on the
interactivity parameter.
Parameters
----------
nodelist : list
A sequence of AST nodes to run.
cell_name : str
Will be passed to the compiler as the filename of the cell. Typically
the value returned by ip.compile.cache(cell).
interactivity : str
'all', 'last', 'last_expr' , 'last_expr_or_assign' or 'none',
specifying which nodes should be run interactively (displaying output
from expressions). 'last_expr' will run the last node interactively
only if it is an expression (i.e. expressions in loops or other blocks
are not displayed) 'last_expr_or_assign' will run the last expression
or the last assignment. Other values for this parameter will raise a
ValueError.
compiler : callable
A function with the same interface as the built-in compile(), to turn
the AST nodes into code objects. Default is the built-in compile().
result : ExecutionResult, optional
An object to store exceptions that occur during execution.
Returns
-------
True if an exception occurred while running code, False if it finished
running.
"""
if not nodelist:
return
if interactivity == 'last_expr_or_assign':
if isinstance(nodelist[-1], _assign_nodes):
asg = nodelist[-1]
if isinstance(asg, ast.Assign) and len(asg.targets) == 1:
target = asg.targets[0]
elif isinstance(asg, _single_targets_nodes):
target = asg.target
else:
target = None
if isinstance(target, ast.Name):
nnode = ast.Expr(ast.Name(target.id, ast.Load()))
ast.fix_missing_locations(nnode)
nodelist.append(nnode)
interactivity = 'last_expr'
_async = False
if interactivity == 'last_expr':
if isinstance(nodelist[-1], ast.Expr):
interactivity = "last"
else:
interactivity = "none"
if interactivity == 'none':
to_run_exec, to_run_interactive = nodelist, []
elif interactivity == 'last':
to_run_exec, to_run_interactive = nodelist[:-1], nodelist[-1:]
elif interactivity == 'all':
to_run_exec, to_run_interactive = [], nodelist
else:
raise ValueError("Interactivity was %r" % interactivity)
try:
def compare(code):
is_async = inspect.CO_COROUTINE & code.co_flags == inspect.CO_COROUTINE
return is_async
# refactor that to just change the mod constructor.
to_run = []
for node in to_run_exec:
to_run.append((node, "exec"))
for node in to_run_interactive:
to_run.append((node, "single"))
for node, mode in to_run:
if mode == "exec":
mod = Module([node], [])
elif mode == "single":
mod = ast.Interactive([node])
with compiler.extra_flags(
getattr(ast, "PyCF_ALLOW_TOP_LEVEL_AWAIT", 0x0)
if self.autoawait
else 0x0
):
code = compiler(mod, cell_name, mode)
code = self._update_code_co_name(code)
asy = compare(code)
if await self.run_code(code, result, async_=asy):
return True
# Flush softspace
if softspace(sys.stdout, 0):
print()
except:
# It's possible to have exceptions raised here, typically by
# compilation of odd code (such as a naked 'return' outside a
# function) that did parse but isn't valid. Typically the exception
# is a SyntaxError, but it's safest just to catch anything and show
# the user a traceback.
# We do only one try/except outside the loop to minimize the impact
# on runtime, and also because if any node in the node list is
# broken, we should stop execution completely.
if result:
result.error_before_exec = sys.exc_info()[1]
self.showtraceback()
return True
return False
async def run_code(self, code_obj, result=None, *, async_=False):
"""Execute a code object.
When an exception occurs, self.showtraceback() is called to display a
traceback.
Parameters
----------
code_obj : code object
A compiled code object, to be executed
result : ExecutionResult, optional
An object to store exceptions that occur during execution.
async_ : Bool (Experimental)
Attempt to run top-level asynchronous code in a default loop.
Returns
-------
False : successful execution.
True : an error occurred.
"""
# special value to say that anything above is IPython and should be
# hidden.
__tracebackhide__ = "__ipython_bottom__"
# Set our own excepthook in case the user code tries to call it
# directly, so that the IPython crash handler doesn't get triggered
old_excepthook, sys.excepthook = sys.excepthook, self.excepthook
# we save the original sys.excepthook in the instance, in case config
# code (such as magics) needs access to it.
self.sys_excepthook = old_excepthook
outflag = True # happens in more places, so it's easier as default
try:
try:
if async_:
await eval(code_obj, self.user_global_ns, self.user_ns)
else:
exec(code_obj, self.user_global_ns, self.user_ns)
finally:
# Reset our crash handler in place
sys.excepthook = old_excepthook
except SystemExit as e:
if result is not None:
result.error_in_exec = e
self.showtraceback(exception_only=True)
warn("To exit: use 'exit', 'quit', or Ctrl-D.", stacklevel=1)
except self.custom_exceptions:
etype, value, tb = sys.exc_info()
if result is not None:
result.error_in_exec = value
self.CustomTB(etype, value, tb)
except:
if result is not None:
result.error_in_exec = sys.exc_info()[1]
self.showtraceback(running_compiled_code=True)
else:
outflag = False
return outflag
# For backwards compatibility
runcode = run_code
def check_complete(self, code: str) -> Tuple[str, str]:
"""Return whether a block of code is ready to execute, or should be continued
Parameters
----------
code : string
Python input code, which can be multiline.
Returns
-------
status : str
One of 'complete', 'incomplete', or 'invalid' if source is not a
prefix of valid code.
indent : str
When status is 'incomplete', this is some whitespace to insert on
the next line of the prompt.
"""
status, nspaces = self.input_transformer_manager.check_complete(code)
return status, ' ' * (nspaces or 0)
#-------------------------------------------------------------------------
# Things related to GUI support and pylab
#-------------------------------------------------------------------------
active_eventloop = None
def enable_gui(self, gui=None):
raise NotImplementedError('Implement enable_gui in a subclass')
def enable_matplotlib(self, gui=None):
"""Enable interactive matplotlib and inline figure support.
This takes the following steps:
1. select the appropriate eventloop and matplotlib backend
2. set up matplotlib for interactive use with that backend
3. configure formatters for inline figure display
4. enable the selected gui eventloop
Parameters
----------
gui : optional, string
If given, dictates the choice of matplotlib GUI backend to use
(should be one of IPython's supported backends, 'qt', 'osx', 'tk',
'gtk', 'wx' or 'inline'), otherwise we use the default chosen by
matplotlib (as dictated by the matplotlib build-time options plus the
user's matplotlibrc configuration file). Note that not all backends
make sense in all contexts, for example a terminal ipython can't
display figures inline.
"""
from matplotlib_inline.backend_inline import configure_inline_support
from IPython.core import pylabtools as pt
gui, backend = pt.find_gui_and_backend(gui, self.pylab_gui_select)
if gui != 'inline':
# If we have our first gui selection, store it
if self.pylab_gui_select is None:
self.pylab_gui_select = gui
# Otherwise if they are different
elif gui != self.pylab_gui_select:
print('Warning: Cannot change to a different GUI toolkit: %s.'
' Using %s instead.' % (gui, self.pylab_gui_select))
gui, backend = pt.find_gui_and_backend(self.pylab_gui_select)
pt.activate_matplotlib(backend)
configure_inline_support(self, backend)
# Now we must activate the gui pylab wants to use, and fix %run to take
# plot updates into account
self.enable_gui(gui)
self.magics_manager.registry['ExecutionMagics'].default_runner = \
pt.mpl_runner(self.safe_execfile)
return gui, backend
def enable_pylab(self, gui=None, import_all=True, welcome_message=False):
"""Activate pylab support at runtime.
This turns on support for matplotlib, preloads into the interactive
namespace all of numpy and pylab, and configures IPython to correctly
interact with the GUI event loop. The GUI backend to be used can be
optionally selected with the optional ``gui`` argument.
This method only adds preloading the namespace to InteractiveShell.enable_matplotlib.
Parameters
----------
gui : optional, string
If given, dictates the choice of matplotlib GUI backend to use
(should be one of IPython's supported backends, 'qt', 'osx', 'tk',
'gtk', 'wx' or 'inline'), otherwise we use the default chosen by
matplotlib (as dictated by the matplotlib build-time options plus the
user's matplotlibrc configuration file). Note that not all backends
make sense in all contexts, for example a terminal ipython can't
display figures inline.
import_all : optional, bool, default: True
Whether to do `from numpy import *` and `from pylab import *`
in addition to module imports.
welcome_message : deprecated
This argument is ignored, no welcome message will be displayed.
"""
from IPython.core.pylabtools import import_pylab
gui, backend = self.enable_matplotlib(gui)
# We want to prevent the loading of pylab to pollute the user's
# namespace as shown by the %who* magics, so we execute the activation
# code in an empty namespace, and we update *both* user_ns and
# user_ns_hidden with this information.
ns = {}
import_pylab(ns, import_all)
# warn about clobbered names
ignored = {"__builtins__"}
both = set(ns).intersection(self.user_ns).difference(ignored)
clobbered = [ name for name in both if self.user_ns[name] is not ns[name] ]
self.user_ns.update(ns)
self.user_ns_hidden.update(ns)
return gui, backend, clobbered
#-------------------------------------------------------------------------
# Utilities
#-------------------------------------------------------------------------
def var_expand(self, cmd, depth=0, formatter=DollarFormatter()):
"""Expand python variables in a string.
The depth argument indicates how many frames above the caller should
be walked to look for the local namespace where to expand variables.
The global namespace for expansion is always the user's interactive
namespace.
"""
ns = self.user_ns.copy()
try:
frame = sys._getframe(depth+1)
except ValueError:
# This is thrown if there aren't that many frames on the stack,
# e.g. if a script called run_line_magic() directly.
pass
else:
ns.update(frame.f_locals)
try:
# We have to use .vformat() here, because 'self' is a valid and common
# name, and expanding **ns for .format() would make it collide with
# the 'self' argument of the method.
cmd = formatter.vformat(cmd, args=[], kwargs=ns)
except Exception:
# if formatter couldn't format, just let it go untransformed
pass
return cmd
def mktempfile(self, data=None, prefix='ipython_edit_'):
"""Make a new tempfile and return its filename.
This makes a call to tempfile.mkstemp (created in a tempfile.mkdtemp),
but it registers the created filename internally so ipython cleans it up
at exit time.
Optional inputs:
- data(None): if data is given, it gets written out to the temp file
immediately, and the file is closed again."""
dir_path = Path(tempfile.mkdtemp(prefix=prefix))
self.tempdirs.append(dir_path)
handle, filename = tempfile.mkstemp(".py", prefix, dir=str(dir_path))
os.close(handle) # On Windows, there can only be one open handle on a file
file_path = Path(filename)
self.tempfiles.append(file_path)
if data:
file_path.write_text(data, encoding="utf-8")
return filename
def ask_yes_no(self, prompt, default=None, interrupt=None):
if self.quiet:
return True
return ask_yes_no(prompt,default,interrupt)
def show_usage(self):
"""Show a usage message"""
page.page(IPython.core.usage.interactive_usage)
def extract_input_lines(self, range_str, raw=False):
"""Return as a string a set of input history slices.
Parameters
----------
range_str : str
The set of slices is given as a string, like "~5/6-~4/2 4:8 9",
since this function is for use by magic functions which get their
arguments as strings. The number before the / is the session
number: ~n goes n back from the current session.
If empty string is given, returns history of current session
without the last input.
raw : bool, optional
By default, the processed input is used. If this is true, the raw
input history is used instead.
Notes
-----
Slices can be described with two notations:
* ``N:M`` -> standard python form, means including items N...(M-1).
* ``N-M`` -> include items N..M (closed endpoint).
"""
lines = self.history_manager.get_range_by_str(range_str, raw=raw)
text = "\n".join(x for _, _, x in lines)
# Skip the last line, as it's probably the magic that called this
if not range_str:
if "\n" not in text:
text = ""
else:
text = text[: text.rfind("\n")]
return text
def find_user_code(self, target, raw=True, py_only=False, skip_encoding_cookie=True, search_ns=False):
"""Get a code string from history, file, url, or a string or macro.
This is mainly used by magic functions.
Parameters
----------
target : str
A string specifying code to retrieve. This will be tried respectively
as: ranges of input history (see %history for syntax), url,
corresponding .py file, filename, or an expression evaluating to a
string or Macro in the user namespace.
If empty string is given, returns complete history of current
session, without the last line.
raw : bool
If true (default), retrieve raw history. Has no effect on the other
retrieval mechanisms.
py_only : bool (default False)
Only try to fetch python code, do not try alternative methods to decode file
if unicode fails.
Returns
-------
A string of code.
ValueError is raised if nothing is found, and TypeError if it evaluates
to an object of another type. In each case, .args[0] is a printable
message.
"""
code = self.extract_input_lines(target, raw=raw) # Grab history
if code:
return code
try:
if target.startswith(('http://', 'https://')):
return openpy.read_py_url(target, skip_encoding_cookie=skip_encoding_cookie)
except UnicodeDecodeError as e:
if not py_only :
# Deferred import
from urllib.request import urlopen
response = urlopen(target)
return response.read().decode('latin1')
raise ValueError(("'%s' seem to be unreadable.") % target) from e
potential_target = [target]
try :
potential_target.insert(0,get_py_filename(target))
except IOError:
pass
for tgt in potential_target :
if os.path.isfile(tgt): # Read file
try :
return openpy.read_py_file(tgt, skip_encoding_cookie=skip_encoding_cookie)
except UnicodeDecodeError as e:
if not py_only :
with io_open(tgt,'r', encoding='latin1') as f :
return f.read()
raise ValueError(("'%s' seem to be unreadable.") % target) from e
elif os.path.isdir(os.path.expanduser(tgt)):
raise ValueError("'%s' is a directory, not a regular file." % target)
if search_ns:
# Inspect namespace to load object source
object_info = self.object_inspect(target, detail_level=1)
if object_info['found'] and object_info['source']:
return object_info['source']
try: # User namespace
codeobj = eval(target, self.user_ns)
except Exception as e:
raise ValueError(("'%s' was not found in history, as a file, url, "
"nor in the user namespace.") % target) from e
if isinstance(codeobj, str):
return codeobj
elif isinstance(codeobj, Macro):
return codeobj.value
raise TypeError("%s is neither a string nor a macro." % target,
codeobj)
def _atexit_once(self):
"""
At exist operation that need to be called at most once.
Second call to this function per instance will do nothing.
"""
if not getattr(self, "_atexit_once_called", False):
self._atexit_once_called = True
# Clear all user namespaces to release all references cleanly.
self.reset(new_session=False)
# Close the history session (this stores the end time and line count)
# this must be *before* the tempfile cleanup, in case of temporary
# history db
self.history_manager.end_session()
self.history_manager = None
#-------------------------------------------------------------------------
# Things related to IPython exiting
#-------------------------------------------------------------------------
def atexit_operations(self):
"""This will be executed at the time of exit.
Cleanup operations and saving of persistent data that is done
unconditionally by IPython should be performed here.
For things that may depend on startup flags or platform specifics (such
as having readline or not), register a separate atexit function in the
code that has the appropriate information, rather than trying to
clutter
"""
self._atexit_once()
# Cleanup all tempfiles and folders left around
for tfile in self.tempfiles:
try:
tfile.unlink()
self.tempfiles.remove(tfile)
except FileNotFoundError:
pass
del self.tempfiles
for tdir in self.tempdirs:
try:
tdir.rmdir()
self.tempdirs.remove(tdir)
except FileNotFoundError:
pass
del self.tempdirs
# Restore user's cursor
if hasattr(self, "editing_mode") and self.editing_mode == "vi":
sys.stdout.write("\x1b[0 q")
sys.stdout.flush()
def cleanup(self):
self.restore_sys_module_state()
# Overridden in terminal subclass to change prompts
def switch_doctest_mode(self, mode):
pass
class InteractiveShellABC(metaclass=abc.ABCMeta):
"""An abstract base class for InteractiveShell."""
InteractiveShellABC.register(InteractiveShell)
| []
| []
| [
"SHELL",
"VIRTUAL_ENV"
]
| [] | ["SHELL", "VIRTUAL_ENV"] | python | 2 | 0 | |
ticketmatic/settings/pricing/pricetypes/operations_test.go | package pricetypes
import (
"os"
"testing"
"time"
"github.com/ticketmatic/tm-go/ticketmatic"
)
func TestGet(t *testing.T) {
var err error
accountcode := os.Getenv("TM_TEST_ACCOUNTCODE")
accesskey := os.Getenv("TM_TEST_ACCESSKEY")
secretkey := os.Getenv("TM_TEST_SECRETKEY")
c := ticketmatic.NewClient(accountcode, accesskey, secretkey)
req, err := Getlist(c, &ticketmatic.PriceTypeQuery{})
if err != nil {
t.Fatal(err)
}
if len(req.Data) <= 0 {
t.Errorf("Unexpected req.Data length, got %#v, expected greater than %#v", len(req.Data), 0)
}
req2, err := Getlist(c, &ticketmatic.PriceTypeQuery{
Filter: "select id from conf.pricetype where typeid=2301",
})
if err != nil {
t.Fatal(err)
}
if len(req.Data) <= len(req2.Data) {
t.Errorf("Unexpected req.Data length, got %#v, expected greater than %#v", len(req.Data), len(req2.Data))
}
}
func TestCreatedelete(t *testing.T) {
var err error
accountcode := os.Getenv("TM_TEST_ACCOUNTCODE")
accesskey := os.Getenv("TM_TEST_ACCESSKEY")
secretkey := os.Getenv("TM_TEST_SECRETKEY")
c := ticketmatic.NewClient(accountcode, accesskey, secretkey)
req, err := Getlist(c, &ticketmatic.PriceTypeQuery{})
if err != nil {
t.Fatal(err)
}
if len(req.Data) <= 0 {
t.Errorf("Unexpected req.Data length, got %#v, expected greater than %#v", len(req.Data), 0)
}
req2, err := Create(c, &ticketmatic.PriceType{
Typeid: 2301,
Name: "test",
})
if err != nil {
t.Fatal(err)
}
if req2.Name != "test" {
t.Errorf("Unexpected req2.Name, got %#v, expected %#v", req2.Name, "test")
}
if time.Since(req2.Createdts.Time()) > 24*time.Hour {
t.Errorf("Unexpected req2.Createdts time, should be recent, got %s", req2.Createdts.Time())
}
req3, err := Getlist(c, &ticketmatic.PriceTypeQuery{})
if err != nil {
t.Fatal(err)
}
if len(req3.Data) <= len(req.Data) {
t.Errorf("Unexpected req3.Data length, got %#v, expected greater than %#v", len(req3.Data), len(req.Data))
}
req4, err := Get(c, req2.Id)
if err != nil {
t.Fatal(err)
}
if req4.Name != "test" {
t.Errorf("Unexpected req4.Name, got %#v, expected %#v", req4.Name, "test")
}
err = Delete(c, req2.Id)
if err != nil {
t.Fatal(err)
}
req6, err := Getlist(c, &ticketmatic.PriceTypeQuery{})
if err != nil {
t.Fatal(err)
}
if len(req.Data) != len(req6.Data) {
t.Errorf("Unexpected req.Data length, got %#v, expected %#v", len(req.Data), len(req6.Data))
}
}
func TestTranslations(t *testing.T) {
var err error
accountcode := os.Getenv("TM_TEST_ACCOUNTCODE")
accesskey := os.Getenv("TM_TEST_ACCESSKEY")
secretkey := os.Getenv("TM_TEST_SECRETKEY")
c := ticketmatic.NewClient(accountcode, accesskey, secretkey)
req, err := Get(c, 4)
if err != nil {
t.Fatal(err)
}
if req.Name != "Free ticket" {
t.Errorf("Unexpected req.Name, got %#v, expected %#v", req.Name, "Free ticket")
}
c.Language = "nl"
req2, err := Get(c, 4)
if err != nil {
t.Fatal(err)
}
if req2.Name != "Gratis ticket" {
t.Errorf("Unexpected req2.Name, got %#v, expected %#v", req2.Name, "Gratis ticket")
}
updated, err := Update(c, 4, &ticketmatic.PriceType{
Name: "Vrijkaart",
})
if err != nil {
t.Fatal(err)
}
if updated.Name != "Vrijkaart" {
t.Errorf("Unexpected updated.Name, got %#v, expected %#v", updated.Name, "Vrijkaart")
}
c.Language = "en"
req3, err := Get(c, 4)
if err != nil {
t.Fatal(err)
}
if req3.Name != "Free ticket" {
t.Errorf("Unexpected req3.Name, got %#v, expected %#v", req3.Name, "Free ticket")
}
c.Language = "nl"
updated2, err := Update(c, 4, &ticketmatic.PriceType{
Name: "Gratis ticket",
})
if err != nil {
t.Fatal(err)
}
if updated2.Name != "Gratis ticket" {
t.Errorf("Unexpected updated2.Name, got %#v, expected %#v", updated2.Name, "Gratis ticket")
}
translations, err := Translations(c, 4)
if err != nil {
t.Fatal(err)
}
if translations["nameen"] != "Free ticket" {
t.Errorf(`Unexpected translations["nameen"], got %#v, expected %#v`, translations["nameen"], "Free ticket")
}
if translations["namenl"] != "Gratis ticket" {
t.Errorf(`Unexpected translations["namenl"], got %#v, expected %#v`, translations["namenl"], "Gratis ticket")
}
}
func TestBadfilter(t *testing.T) {
var err error
accountcode := os.Getenv("TM_TEST_ACCOUNTCODE")
accesskey := os.Getenv("TM_TEST_ACCESSKEY")
secretkey := os.Getenv("TM_TEST_SECRETKEY")
c := ticketmatic.NewClient(accountcode, accesskey, secretkey)
_, err = Getlist(c, &ticketmatic.PriceTypeQuery{
Filter: "INVALID QUERY",
})
if err == nil {
t.Fatal("Expected an error!")
}
}
| [
"\"TM_TEST_ACCOUNTCODE\"",
"\"TM_TEST_ACCESSKEY\"",
"\"TM_TEST_SECRETKEY\"",
"\"TM_TEST_ACCOUNTCODE\"",
"\"TM_TEST_ACCESSKEY\"",
"\"TM_TEST_SECRETKEY\"",
"\"TM_TEST_ACCOUNTCODE\"",
"\"TM_TEST_ACCESSKEY\"",
"\"TM_TEST_SECRETKEY\"",
"\"TM_TEST_ACCOUNTCODE\"",
"\"TM_TEST_ACCESSKEY\"",
"\"TM_TEST_SECRETKEY\""
]
| []
| [
"TM_TEST_ACCESSKEY",
"TM_TEST_ACCOUNTCODE",
"TM_TEST_SECRETKEY"
]
| [] | ["TM_TEST_ACCESSKEY", "TM_TEST_ACCOUNTCODE", "TM_TEST_SECRETKEY"] | go | 3 | 0 | |
unicode.go | // Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Unicode is a command-line tool for studying Unicode characters.
usage: unicode [-c] [-d] [-n] [-t]
-c: args are hex; output characters (xyz)
-n: args are characters; output hex (23 or 23-44)
-g: args are regular expressions for matching names
-d: output textual description
-t: output plain text, not one char per line
-U: output full Unicode description
Default behavior sniffs the arguments to select -c vs. -n.
For some options you will need UnicodeData.txt installed.
Use curl or wget or your favorite webirific tool to copy
ftp://ftp.unicode.org/Public/UNIDATA/UnicodeData.txt
to
$GOPATH/src/robpike.io/cmd/unicode
*/
package main // import "robpike.io/cmd/unicode"
import (
"bufio"
"bytes"
"flag"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
)
var (
doNum = flag.Bool("n", false, "output numeric values")
doChar = flag.Bool("c", false, "output characters")
doText = flag.Bool("t", false, "output plain text")
doDesc = flag.Bool("d", false, "describe the characters from the Unicode database, in simple form")
doUnic = flag.Bool("u", false, "describe the characters from the Unicode database, in Unicode form")
doUNIC = flag.Bool("U", false, "describe the characters from the Unicode database, in glorious detail")
doGrep = flag.Bool("g", false, "grep for argument string in data")
doPrintUnicodeTxt = flag.Bool("db", false, "convert UnicodeData.txt to unicode.txt format on stdout")
)
var printRange = false
var (
unicodeTxt string
unicodeDataTxt string
goroot string
gopath string
)
func init() {
goroot = os.Getenv("GOROOT")
gopath = os.Getenv("GOPATH")
}
func getUnicode() {
if unicodeTxt == "" {
// Discover paths for unicode files.
if !*doPrintUnicodeTxt {
unicodeTxt = getPath("unicode.txt")
}
unicodeDataTxt = getPath("UnicodeData.txt")
}
}
func getPath(base string) string {
if goroot != "" {
f := filepath.Join(goroot, "src/robpike.io/cmd/unicode", base)
if _, err := os.Stat(f); err == nil {
return f
}
}
if gopath != "" {
f := filepath.Join(gopath, "src/robpike.io/cmd/unicode", base)
if _, err := os.Stat(f); err == nil {
return f
}
}
fmt.Fprintf(os.Stderr, "unicode: can't find %s\n", base)
os.Exit(1)
return ""
}
func main() {
flag.Usage = usage
flag.Parse()
mode()
getUnicode()
var codes []rune
switch {
case *doPrintUnicodeTxt:
printUnicodeTxt()
return
case *doGrep:
codes = argsAreRegexps()
case *doChar:
codes = argsAreNumbers()
case *doNum:
codes = argsAreChars()
}
if *doUnic || *doUNIC {
desc(codes, unicodeDataTxt)
return
}
if *doDesc {
desc(codes, unicodeTxt)
return
}
if *doText {
fmt.Printf("%s\n", string(codes))
return
}
b := new(bytes.Buffer)
for i, c := range codes {
switch {
case printRange:
fmt.Fprintf(b, "%.4x %c", c, c)
if i%4 == 3 {
fmt.Fprint(b, "\n")
} else {
fmt.Fprint(b, "\t")
}
case *doChar:
fmt.Fprintf(b, "%c\n", c)
case *doNum:
fmt.Fprintf(b, "%.4x\n", c)
}
}
if b.Len() > 0 && b.Bytes()[b.Len()-1] != '\n' {
fmt.Fprint(b, "\n")
}
fmt.Print(b)
}
func fatalf(format string, args ...interface{}) {
fmt.Fprintf(os.Stderr, format+"\n", args...)
os.Exit(2)
}
const usageText = `usage: unicode [-c] [-d] [-n] [-t]
-c: args are hex; output characters (xyz)
-n: args are characters; output hex (23 or 23-44)
-g: args are regular expressions for matching names
-d: output textual description
-t: output plain text, not one char per line
-U: output full Unicode description
Default behavior sniffs the arguments to select -c vs. -n.
For some options you will need UnicodeData.txt installed.
Use curl or wget or your favorite webirific tool to copy
ftp://ftp.unicode.org/Public/UNIDATA/UnicodeData.txt
to
$GOPATH/src/robpike.io/cmd/unicode`
func usage() {
fatalf("%s", usageText)
}
// Mode determines whether we have numeric or character input.
// If there are no flags, we sniff the first argument.
func mode() {
if *doPrintUnicodeTxt {
return
}
if len(flag.Args()) == 0 {
usage()
}
// If grepping names, we need an output format defined; default is numeric.
if *doGrep && !(*doNum || *doChar || *doDesc || *doUnic || *doUNIC) {
*doNum = true
}
if *doNum || *doChar {
return
}
alldigits := true
numDash := 0
for _, r := range strings.Join(flag.Args(), "") {
if !strings.ContainsRune("0123456789abcdefABCDEF-", r) {
alldigits = false
}
if r == '-' {
numDash++
}
}
// If there is one '-' it's a range; if zero it's just a hex number.
if alldigits && numDash <= 1 {
*doChar = true
return
}
*doNum = true
}
func argsAreChars() []rune {
var codes []rune
for i, a := range flag.Args() {
for _, r := range a {
codes = append(codes, r)
}
// Add space between arguments if output is plain text.
if *doText && i < len(flag.Args())-1 {
codes = append(codes, ' ')
}
}
return codes
}
func argsAreNames() []rune {
var codes []rune
for i, a := range flag.Args() {
for _, r := range a {
codes = append(codes, r)
}
// Add space between arguments if output is plain text.
if *doText && i < len(flag.Args())-1 {
codes = append(codes, ' ')
}
}
return codes
}
func parseRune(s string) rune {
r, err := strconv.ParseInt(s, 16, 22)
if err != nil {
fatalf("%s", err)
}
return rune(r)
}
func argsAreNumbers() []rune {
var codes []rune
for _, a := range flag.Args() {
if s := strings.Split(a, "-"); len(s) == 2 {
printRange = true
r1 := parseRune(s[0])
r2 := parseRune(s[1])
if r2 < r1 {
usage()
}
for ; r1 <= r2; r1++ {
codes = append(codes, r1)
}
continue
}
codes = append(codes, parseRune(a))
}
return codes
}
func argsAreRegexps() []rune {
var codes []rune
lines := getFile(unicodeTxt)
for _, a := range flag.Args() {
re, err := regexp.Compile(a)
if err != nil {
fatalf("%s", err)
}
for i, line := range lines {
if re.MatchString(line) {
r, _ := runeOfLine(i, line)
codes = append(codes, r)
}
}
}
return codes
}
var files = make(map[string][]string)
func getFile(file string) []string {
lines := files[file]
if lines != nil {
return lines
}
text, err := ioutil.ReadFile(file)
if err != nil {
fatalf("%s", err)
}
lines = strings.Split(string(text), "\n")
// We get an empty final line; drop it.
if len(lines) > 0 && len(lines[len(lines)-1]) == 0 {
lines = lines[:len(lines)-1]
}
files[file] = lines
return lines
}
func runeOfLine(i int, line string) (r rune, tab int) {
tab = strings.IndexAny(line, "\t;")
if tab < 0 {
fatalf("malformed database: line %d", i)
}
return parseRune(line[0:tab]), tab
}
func desc(codes []rune, file string) {
lines := getFile(file)
runeData := make(map[rune]string)
for i, l := range lines {
r, tab := runeOfLine(i, l)
runeData[r] = l[tab+1:]
}
if *doUNIC {
for _, r := range codes {
fmt.Printf("%#U %s", r, dumpUnicode(runeData[r]))
}
} else {
for _, r := range codes {
fmt.Printf("%#U %s\n", r, runeData[r])
}
}
}
var prop = [...]string{
"",
"category: ",
"canonical combining classes: ",
"bidirectional category: ",
"character decomposition mapping: ",
"decimal digit value: ",
"digit value: ",
"numeric value: ",
"mirrored: ",
"Unicode 1.0 name: ",
"10646 comment field: ",
"uppercase mapping: ",
"lowercase mapping: ",
"titlecase mapping: ",
}
func dumpUnicode(s string) []byte {
fields := strings.Split(s, ";")
if len(fields) == 0 {
return []byte{'\n'}
}
b := new(bytes.Buffer)
if len(fields) != len(prop) {
fmt.Fprintf(b, "%s: can't print: expected %d fields, got %d\n", s, len(prop), len(fields))
return b.Bytes()
}
for i, f := range fields {
if f == "" {
continue
}
if i > 0 {
b.WriteByte('\t')
}
fmt.Fprintf(b, "%s%s\n", prop[i], f)
}
return b.Bytes()
}
func printUnicodeTxt() {
lines := getFile(unicodeDataTxt)
w := bufio.NewWriter(os.Stdout)
defer w.Flush()
for _, line := range lines {
fields := strings.Split(strings.ToLower(line), ";")
desc := fields[1]
if fields[10] != "" {
desc += "; " + fields[10]
}
fmt.Fprintf(w, "%s\t%s\n", fields[0], desc)
}
}
| [
"\"GOROOT\"",
"\"GOPATH\""
]
| []
| [
"GOPATH",
"GOROOT"
]
| [] | ["GOPATH", "GOROOT"] | go | 2 | 0 | |
migrate/starter/starter.go | // Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package starter
import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"path"
"strings"
"syscall"
"github.com/coreos/etcd/client"
"github.com/coreos/etcd/etcdmain"
"github.com/coreos/etcd/migrate"
"github.com/coreos/etcd/pkg/fileutil"
"github.com/coreos/etcd/pkg/flags"
"github.com/coreos/etcd/pkg/osutil"
"github.com/coreos/etcd/pkg/types"
etcdversion "github.com/coreos/etcd/version"
"github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context"
)
type version string
const (
internalV1 version = "1"
internalV2 version = "2"
internalV2Proxy version = "2.proxy"
internalUnknown version = "unknown"
v0_4 version = "v0.4"
v2_0 version = "v2.0"
v2_0Proxy version = "v2.0 proxy"
empty version = "empty"
unknown version = "unknown"
defaultInternalV1etcdBinaryDir = "/usr/libexec/etcd/internal_versions/"
)
var (
v2SpecialFlags = []string{
"initial-cluster",
"listen-peer-urls",
"listen-client-urls",
"proxy",
}
)
func StartDesiredVersion(args []string) {
fs, err := parseConfig(args)
if err != nil {
return
}
if fs.Lookup("version").Value.String() == "true" {
fmt.Println("etcd version", etcdversion.Version)
os.Exit(0)
}
ver := checkInternalVersion(fs)
log.Printf("starter: start etcd version %s", ver)
switch ver {
case internalV1:
startInternalV1()
case internalV2:
case internalV2Proxy:
if _, err := os.Stat(standbyInfo4(fs.Lookup("data-dir").Value.String())); err != nil {
log.Printf("starter: Detect standby_info file exists, and add --proxy=on flag to ensure it runs in v2.0 proxy mode.")
log.Printf("starter: Before removing v0.4 data, --proxy=on flag MUST be added.")
}
// append proxy flag to args to trigger proxy mode
os.Args = append(os.Args, "-proxy=on")
default:
log.Panicf("starter: unhandled start version")
}
}
func checkInternalVersion(fs *flag.FlagSet) version {
// If it uses 2.0 env var explicitly, start 2.0
for _, name := range v2SpecialFlags {
if fs.Lookup(name).Value.String() != "" {
return internalV2
}
}
dataDir := fs.Lookup("data-dir").Value.String()
if dataDir == "" {
log.Fatalf("starter: please set --data-dir or ETCD_DATA_DIR for etcd")
}
// check the data directory
dataver, err := checkVersion(dataDir)
if err != nil {
log.Fatalf("starter: failed to detect etcd version in %v: %v", dataDir, err)
}
log.Printf("starter: detect etcd version %s in %s", dataver, dataDir)
switch dataver {
case v2_0:
return internalV2
case v2_0Proxy:
return internalV2Proxy
case v0_4:
standbyInfo, err := migrate.DecodeStandbyInfo4FromFile(standbyInfo4(dataDir))
if err != nil && !os.IsNotExist(err) {
log.Fatalf("starter: failed to decode standbyInfo in %v: %v", dataDir, err)
}
inStandbyMode := standbyInfo != nil && standbyInfo.Running
if inStandbyMode {
ver, err := checkInternalVersionByClientURLs(standbyInfo.ClientURLs(), clientTLSInfo(fs))
if err != nil {
log.Printf("starter: failed to check start version through peers: %v", err)
return internalV1
}
if ver == internalV2 {
osutil.Unsetenv("ETCD_DISCOVERY")
os.Args = append(os.Args, "-initial-cluster", standbyInfo.InitialCluster())
return internalV2Proxy
}
return ver
}
ver, err := checkInternalVersionByDataDir4(dataDir)
if err != nil {
log.Fatalf("starter: failed to check start version in %v: %v", dataDir, err)
}
return ver
case empty:
discovery := fs.Lookup("discovery").Value.String()
dpeers, err := getPeersFromDiscoveryURL(discovery)
if err != nil {
log.Printf("starter: failed to get peers from discovery %s: %v", discovery, err)
}
peerStr := fs.Lookup("peers").Value.String()
ppeers := getPeersFromPeersFlag(peerStr, peerTLSInfo(fs))
urls := getClientURLsByPeerURLs(append(dpeers, ppeers...), peerTLSInfo(fs))
ver, err := checkInternalVersionByClientURLs(urls, clientTLSInfo(fs))
if err != nil {
log.Printf("starter: failed to check start version through peers: %v", err)
return internalV2
}
return ver
}
// never reach here
log.Panicf("starter: unhandled etcd version in %v", dataDir)
return internalUnknown
}
func checkVersion(dataDir string) (version, error) {
names, err := fileutil.ReadDir(dataDir)
if err != nil {
if os.IsNotExist(err) {
err = nil
}
return empty, err
}
if len(names) == 0 {
return empty, nil
}
nameSet := types.NewUnsafeSet(names...)
if nameSet.ContainsAll([]string{"member"}) {
return v2_0, nil
}
if nameSet.ContainsAll([]string{"proxy"}) {
return v2_0Proxy, nil
}
if nameSet.ContainsAll([]string{"snapshot", "conf", "log"}) {
return v0_4, nil
}
if nameSet.ContainsAll([]string{"standby_info"}) {
return v0_4, nil
}
return unknown, fmt.Errorf("failed to check version")
}
func checkInternalVersionByDataDir4(dataDir string) (version, error) {
// check v0.4 snapshot
snap4, err := migrate.DecodeLatestSnapshot4FromDir(snapDir4(dataDir))
if err != nil {
return internalUnknown, err
}
if snap4 != nil {
st := &migrate.Store4{}
if err := json.Unmarshal(snap4.State, st); err != nil {
return internalUnknown, err
}
dir := st.Root.Children["_etcd"]
n, ok := dir.Children["next-internal-version"]
if ok && n.Value == "2" {
return internalV2, nil
}
}
// check v0.4 log
ents4, err := migrate.DecodeLog4FromFile(logFile4(dataDir))
if err != nil {
return internalUnknown, err
}
for _, e := range ents4 {
cmd, err := migrate.NewCommand4(e.GetCommandName(), e.GetCommand(), nil)
if err != nil {
return internalUnknown, err
}
setcmd, ok := cmd.(*migrate.SetCommand)
if !ok {
continue
}
if setcmd.Key == "/_etcd/next-internal-version" && setcmd.Value == "2" {
return internalV2, nil
}
}
return internalV1, nil
}
func getClientURLsByPeerURLs(peers []string, tls *TLSInfo) []string {
c, err := newDefaultClient(tls)
if err != nil {
log.Printf("starter: new client error: %v", err)
return nil
}
var urls []string
for _, u := range peers {
resp, err := c.Get(u + "/etcdURL")
if err != nil {
log.Printf("starter: failed to get /etcdURL from %s", u)
continue
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Printf("starter: failed to read body from %s", u)
continue
}
urls = append(urls, string(b))
}
return urls
}
func checkInternalVersionByClientURLs(urls []string, tls *TLSInfo) (version, error) {
c, err := newDefaultClient(tls)
if err != nil {
return internalUnknown, err
}
for _, u := range urls {
resp, err := c.Get(u + "/version")
if err != nil {
log.Printf("starter: failed to get /version from %s", u)
continue
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Printf("starter: failed to read body from %s", u)
continue
}
var m map[string]string
err = json.Unmarshal(b, &m)
if err != nil {
log.Printf("starter: failed to unmarshal body %s from %s", b, u)
continue
}
switch m["internalVersion"] {
case "1":
return internalV1, nil
case "2":
return internalV2, nil
default:
log.Printf("starter: unrecognized internal version %s from %s", m["internalVersion"], u)
}
}
return internalUnknown, fmt.Errorf("failed to get version from urls %v", urls)
}
func getPeersFromDiscoveryURL(discoverURL string) ([]string, error) {
if discoverURL == "" {
return nil, nil
}
u, err := url.Parse(discoverURL)
if err != nil {
return nil, err
}
token := u.Path
u.Path = ""
c, err := client.NewHTTPClient(&http.Transport{}, []string{u.String()})
if err != nil {
return nil, err
}
dc := client.NewDiscoveryKeysAPI(c)
ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
resp, err := dc.Get(ctx, token)
cancel()
if err != nil {
return nil, err
}
peers := make([]string, 0)
// append non-config keys to peers
for _, n := range resp.Node.Nodes {
if g := path.Base(n.Key); g == "_config" || g == "_state" {
continue
}
peers = append(peers, n.Value)
}
return peers, nil
}
func getPeersFromPeersFlag(str string, tls *TLSInfo) []string {
peers := trimSplit(str, ",")
for i, p := range peers {
peers[i] = tls.Scheme() + "://" + p
}
return peers
}
func startInternalV1() {
p := os.Getenv("ETCD_BINARY_DIR")
if p == "" {
p = defaultInternalV1etcdBinaryDir
}
p = path.Join(p, "1")
err := syscall.Exec(p, os.Args, syscall.Environ())
if err != nil {
log.Fatalf("starter: failed to execute internal v1 etcd: %v", err)
}
}
func newDefaultClient(tls *TLSInfo) (*http.Client, error) {
tr := &http.Transport{}
if tls.Scheme() == "https" {
tlsConfig, err := tls.ClientConfig()
if err != nil {
return nil, err
}
tr.TLSClientConfig = tlsConfig
}
return &http.Client{Transport: tr}, nil
}
type value struct {
s string
}
func (v *value) String() string { return v.s }
func (v *value) Set(s string) error {
v.s = s
return nil
}
func (v *value) IsBoolFlag() bool { return true }
// parseConfig parses out the input config from cmdline arguments and
// environment variables.
func parseConfig(args []string) (*flag.FlagSet, error) {
fs := flag.NewFlagSet("full flagset", flag.ContinueOnError)
etcdmain.NewConfig().VisitAll(func(f *flag.Flag) {
fs.Var(&value{}, f.Name, "")
})
if err := fs.Parse(args); err != nil {
return nil, err
}
if err := flags.SetFlagsFromEnv(fs); err != nil {
return nil, err
}
return fs, nil
}
func clientTLSInfo(fs *flag.FlagSet) *TLSInfo {
return &TLSInfo{
CAFile: fs.Lookup("ca-file").Value.String(),
CertFile: fs.Lookup("cert-file").Value.String(),
KeyFile: fs.Lookup("key-file").Value.String(),
}
}
func peerTLSInfo(fs *flag.FlagSet) *TLSInfo {
return &TLSInfo{
CAFile: fs.Lookup("peer-ca-file").Value.String(),
CertFile: fs.Lookup("peer-cert-file").Value.String(),
KeyFile: fs.Lookup("peer-key-file").Value.String(),
}
}
func snapDir4(dataDir string) string {
return path.Join(dataDir, "snapshot")
}
func logFile4(dataDir string) string {
return path.Join(dataDir, "log")
}
func standbyInfo4(dataDir string) string {
return path.Join(dataDir, "standby_info")
}
func trimSplit(s, sep string) []string {
trimmed := strings.Split(s, sep)
for i := range trimmed {
trimmed[i] = strings.TrimSpace(trimmed[i])
}
return trimmed
}
| [
"\"ETCD_BINARY_DIR\""
]
| []
| [
"ETCD_BINARY_DIR"
]
| [] | ["ETCD_BINARY_DIR"] | go | 1 | 0 | |
examples/tosser.py | #!/usr/bin/env python3
"""
Shows how to toss a capsule to a container.
"""
from mujoco_py import load_model_from_path, MjSim, MjViewer
import os
model = load_model_from_path("xmls/tosser.xml")
sim = MjSim(model)
viewer = MjViewer(sim)
sim_state = sim.get_state()
while True:
sim.set_state(sim_state)
for i in range(1000):
if i < 150:
sim.data.ctrl[:] = 0.0
else:
sim.data.ctrl[:] = -1.0
sim.step()
viewer.render()
if os.getenv('TESTING') is not None:
break
| []
| []
| [
"TESTING"
]
| [] | ["TESTING"] | python | 1 | 0 | |
molecule/default/tests/test_default.py | import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_configuration(host):
sshd = host.file('/etc/ssh/sshd_config')
assert sshd.contains(r'^PermitRootLogin no$')
assert sshd.contains(r'^X11Forwarding no$')
assert sshd.contains(r'^UsePAM yes$')
assert sshd.contains(r'\sPermitTTY no$')
ssh = host.file('/etc/ssh/ssh_config')
assert ssh.contains(r'^User test$')
assert ssh.contains(r'^Host \*$')
assert ssh.contains(r'\sPort 23$')
def test_service(host):
ssh = host.service('ssh')
assert ssh.is_running
assert ssh.is_enabled
assert host.socket('tcp://0.0.0.0:22').is_listening
| []
| []
| [
"MOLECULE_INVENTORY_FILE"
]
| [] | ["MOLECULE_INVENTORY_FILE"] | python | 1 | 0 | |
cmd/podman/root.go | package main
import (
"fmt"
"os"
"path/filepath"
"runtime"
"runtime/pprof"
"strconv"
"strings"
"github.com/containers/common/pkg/completion"
"github.com/containers/common/pkg/config"
"github.com/containers/podman/v3/cmd/podman/common"
"github.com/containers/podman/v3/cmd/podman/registry"
"github.com/containers/podman/v3/cmd/podman/validate"
"github.com/containers/podman/v3/libpod/define"
"github.com/containers/podman/v3/pkg/checkpoint/crutils"
"github.com/containers/podman/v3/pkg/domain/entities"
"github.com/containers/podman/v3/pkg/parallel"
"github.com/containers/podman/v3/pkg/rootless"
"github.com/containers/podman/v3/version"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
// HelpTemplate is the help template for podman commands
// This uses the short and long options.
// command should not use this.
const helpTemplate = `{{.Short}}
Description:
{{.Long}}
{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}`
// UsageTemplate is the usage template for podman commands
// This blocks the displaying of the global options. The main podman
// command should not use this.
const usageTemplate = `Usage:{{if (and .Runnable (not .HasAvailableSubCommands))}}
{{.UseLine}}{{end}}{{if .HasAvailableSubCommands}}
{{.UseLine}} [command]{{end}}{{if gt (len .Aliases) 0}}
Aliases:
{{.NameAndAliases}}{{end}}{{if .HasExample}}
Examples:
{{.Example}}{{end}}{{if .HasAvailableSubCommands}}
Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
{{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}}
Options:
{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}}
{{end}}
`
var (
rootCmd = &cobra.Command{
Use: filepath.Base(os.Args[0]) + " [options]",
Long: "Manage pods, containers and images",
SilenceUsage: true,
SilenceErrors: true,
TraverseChildren: true,
PersistentPreRunE: persistentPreRunE,
RunE: validate.SubCommandExists,
PersistentPostRunE: persistentPostRunE,
Version: version.Version.String(),
DisableFlagsInUseLine: true,
}
logLevel = "warn"
useSyslog bool
requireCleanup = true
)
func init() {
// Hooks are called before PersistentPreRunE()
cobra.OnInitialize(
loggingHook,
syslogHook,
earlyInitHook,
)
rootFlags(rootCmd, registry.PodmanConfig())
rootCmd.SetUsageTemplate(usageTemplate)
}
func Execute() {
if err := rootCmd.ExecuteContext(registry.GetContextWithOptions()); err != nil {
if registry.GetExitCode() == 0 {
registry.SetExitCode(define.ExecErrorCodeGeneric)
}
if registry.IsRemote() {
if strings.Contains(err.Error(), "unable to connect to Podman") {
fmt.Fprintln(os.Stderr, "Cannot connect to Podman. Please verify your connection to the Linux system using `podman system connection list`, or try `podman machine init` and `podman machine start` to manage a new Linux VM")
}
}
fmt.Fprintln(os.Stderr, formatError(err))
}
os.Exit(registry.GetExitCode())
}
func persistentPreRunE(cmd *cobra.Command, args []string) error {
logrus.Debugf("Called %s.PersistentPreRunE(%s)", cmd.Name(), strings.Join(os.Args, " "))
// Help, completion and commands with subcommands are special cases, no need for more setup
// Completion cmd is used to generate the shell scripts
if cmd.Name() == "help" || cmd.Name() == "completion" || cmd.HasSubCommands() {
requireCleanup = false
return nil
}
cfg := registry.PodmanConfig()
if cfg.NoOut {
null, _ := os.Open(os.DevNull)
os.Stdout = null
}
// Currently it is only possible to restore a container with the same runtime
// as used for checkpointing. It should be possible to make crun and runc
// compatible to restore a container with another runtime then checkpointed.
// Currently that does not work.
// To make it easier for users we will look into the checkpoint archive and
// set the runtime to the one used during checkpointing.
if !registry.IsRemote() && cmd.Name() == "restore" {
if cmd.Flag("import").Changed {
runtime, err := crutils.CRGetRuntimeFromArchive(cmd.Flag("import").Value.String())
if err != nil {
return errors.Wrapf(
err,
"failed extracting runtime information from %s",
cmd.Flag("import").Value.String(),
)
}
if cfg.RuntimePath == "" {
// If the user did not select a runtime, this takes the one from
// the checkpoint archives and tells Podman to use it for the restore.
runtimeFlag := cmd.Root().Flags().Lookup("runtime")
if runtimeFlag == nil {
return errors.Errorf(
"setting runtime to '%s' for restore",
*runtime,
)
}
runtimeFlag.Value.Set(*runtime)
runtimeFlag.Changed = true
logrus.Debugf("Checkpoint was created using '%s'. Restore will use the same runtime", *runtime)
} else if cfg.RuntimePath != *runtime {
// If the user selected a runtime on the command-line this checks if
// it is the same then during checkpointing and errors out if not.
return errors.Errorf(
"checkpoint archive %s was created with runtime '%s' and cannot be restored with runtime '%s'",
cmd.Flag("import").Value.String(),
*runtime,
cfg.RuntimePath,
)
}
}
}
// --connection is not as "special" as --remote so we can wait and process it here
conn := cmd.Root().LocalFlags().Lookup("connection")
if conn != nil && conn.Changed {
cfg.Engine.ActiveService = conn.Value.String()
var err error
cfg.URI, cfg.Identity, err = cfg.ActiveDestination()
if err != nil {
return errors.Wrap(err, "failed to resolve active destination")
}
if err := cmd.Root().LocalFlags().Set("url", cfg.URI); err != nil {
return errors.Wrap(err, "failed to override --url flag")
}
if err := cmd.Root().LocalFlags().Set("identity", cfg.Identity); err != nil {
return errors.Wrap(err, "failed to override --identity flag")
}
}
// Special case if command is hidden completion command ("__complete","__completeNoDesc")
// Since __completeNoDesc is an alias the cm.Name is always __complete
if cmd.Name() == cobra.ShellCompRequestCmd {
// Parse the cli arguments after the the completion cmd (always called as second argument)
// This ensures that the --url, --identity and --connection flags are properly set
compCmd, _, err := cmd.Root().Traverse(os.Args[2:])
if err != nil {
return err
}
// If we don't complete the root cmd hide all root flags
// so they won't show up in the completions on subcommands.
if compCmd != compCmd.Root() {
compCmd.Root().Flags().VisitAll(func(flag *pflag.Flag) {
flag.Hidden = true
})
}
// No need for further setup the completion logic setups the engines as needed.
requireCleanup = false
return nil
}
// Prep the engines
if _, err := registry.NewImageEngine(cmd, args); err != nil {
return err
}
if _, err := registry.NewContainerEngine(cmd, args); err != nil {
return err
}
// Hard code TMPDIR functions to use /var/tmp, if user did not override
if _, ok := os.LookupEnv("TMPDIR"); !ok {
if tmpdir, err := cfg.ImageCopyTmpDir(); err != nil {
logrus.Warnf("Failed to retrieve default tmp dir: %s", err.Error())
} else {
os.Setenv("TMPDIR", tmpdir)
}
}
context := cmd.Root().LocalFlags().Lookup("context")
if context.Value.String() != "default" {
return errors.New("podman does not support swarm, the only --context value allowed is \"default\"")
}
if !registry.IsRemote() {
if cmd.Flag("cpu-profile").Changed {
f, err := os.Create(cfg.CPUProfile)
if err != nil {
return err
}
if err := pprof.StartCPUProfile(f); err != nil {
return err
}
}
if cmd.Flag("memory-profile").Changed {
// Same value as the default in github.com/pkg/profile.
runtime.MemProfileRate = 4096
if rate := os.Getenv("MemProfileRate"); rate != "" {
r, err := strconv.Atoi(rate)
if err != nil {
return err
}
runtime.MemProfileRate = r
}
}
if cfg.MaxWorks <= 0 {
return errors.Errorf("maximum workers must be set to a positive number (got %d)", cfg.MaxWorks)
}
if err := parallel.SetMaxThreads(uint(cfg.MaxWorks)); err != nil {
return err
}
}
// Setup Rootless environment, IFF:
// 1) in ABI mode
// 2) running as non-root
// 3) command doesn't require Parent Namespace
_, found := cmd.Annotations[registry.ParentNSRequired]
if !registry.IsRemote() && rootless.IsRootless() && !found {
_, noMoveProcess := cmd.Annotations[registry.NoMoveProcess]
err := registry.ContainerEngine().SetupRootless(registry.Context(), noMoveProcess)
if err != nil {
return err
}
}
return nil
}
func persistentPostRunE(cmd *cobra.Command, args []string) error {
logrus.Debugf("Called %s.PersistentPostRunE(%s)", cmd.Name(), strings.Join(os.Args, " "))
if !requireCleanup {
return nil
}
registry.ImageEngine().Shutdown(registry.Context())
registry.ContainerEngine().Shutdown(registry.Context())
if registry.IsRemote() {
return nil
}
// CPU and memory profiling.
if cmd.Flag("cpu-profile").Changed {
pprof.StopCPUProfile()
}
if cmd.Flag("memory-profile").Changed {
f, err := os.Create(registry.PodmanConfig().MemoryProfile)
if err != nil {
return errors.Wrap(err, "creating memory profile")
}
defer f.Close()
runtime.GC() // get up-to-date GC statistics
if err := pprof.WriteHeapProfile(f); err != nil {
return errors.Wrap(err, "writing memory profile")
}
}
return nil
}
func loggingHook() {
var found bool
for _, l := range common.LogLevels {
if l == strings.ToLower(logLevel) {
found = true
break
}
}
if !found {
fmt.Fprintf(os.Stderr, "Log Level %q is not supported, choose from: %s\n", logLevel, strings.Join(common.LogLevels, ", "))
os.Exit(1)
}
level, err := logrus.ParseLevel(logLevel)
if err != nil {
fmt.Fprint(os.Stderr, err.Error())
os.Exit(1)
}
logrus.SetLevel(level)
if logrus.IsLevelEnabled(logrus.InfoLevel) {
logrus.Infof("%s filtering at log level %s", os.Args[0], logrus.GetLevel())
}
}
func rootFlags(cmd *cobra.Command, opts *entities.PodmanConfig) {
cfg := opts.Config
srv, uri, ident := resolveDestination()
lFlags := cmd.Flags()
connectionFlagName := "connection"
lFlags.StringVarP(&opts.Engine.ActiveService, connectionFlagName, "c", srv, "Connection to use for remote Podman service")
_ = cmd.RegisterFlagCompletionFunc(connectionFlagName, common.AutocompleteSystemConnections)
urlFlagName := "url"
lFlags.StringVar(&opts.URI, urlFlagName, uri, "URL to access Podman service (CONTAINER_HOST)")
_ = cmd.RegisterFlagCompletionFunc(urlFlagName, completion.AutocompleteDefault)
// Context option added just for compatibility with DockerCLI.
lFlags.String("context", "default", "Name of the context to use to connect to the daemon (This flag is a NOOP and provided solely for scripting compatibility.)")
_ = lFlags.MarkHidden("context")
identityFlagName := "identity"
lFlags.StringVar(&opts.Identity, identityFlagName, ident, "path to SSH identity file, (CONTAINER_SSHKEY)")
_ = cmd.RegisterFlagCompletionFunc(identityFlagName, completion.AutocompleteDefault)
lFlags.BoolVar(&opts.NoOut, "noout", false, "do not output to stdout")
lFlags.BoolVarP(&opts.Remote, "remote", "r", registry.IsRemote(), "Access remote Podman service")
pFlags := cmd.PersistentFlags()
if registry.IsRemote() {
if err := lFlags.MarkHidden("remote"); err != nil {
logrus.Warnf("Unable to mark --remote flag as hidden: %s", err.Error())
}
opts.Remote = true
} else {
cgroupManagerFlagName := "cgroup-manager"
pFlags.StringVar(&cfg.Engine.CgroupManager, cgroupManagerFlagName, cfg.Engine.CgroupManager, "Cgroup manager to use (\"cgroupfs\"|\"systemd\")")
_ = cmd.RegisterFlagCompletionFunc(cgroupManagerFlagName, common.AutocompleteCgroupManager)
pFlags.StringVar(&opts.CPUProfile, "cpu-profile", "", "Path for the cpu-profiling results")
pFlags.StringVar(&opts.MemoryProfile, "memory-profile", "", "Path for the memory-profiling results")
conmonFlagName := "conmon"
pFlags.StringVar(&opts.ConmonPath, conmonFlagName, "", "Path of the conmon binary")
_ = cmd.RegisterFlagCompletionFunc(conmonFlagName, completion.AutocompleteDefault)
networkCmdPathFlagName := "network-cmd-path"
pFlags.StringVar(&cfg.Engine.NetworkCmdPath, networkCmdPathFlagName, cfg.Engine.NetworkCmdPath, "Path to the command for configuring the network")
_ = cmd.RegisterFlagCompletionFunc(networkCmdPathFlagName, completion.AutocompleteDefault)
cniConfigDirFlagName := "cni-config-dir"
pFlags.StringVar(&cfg.Network.NetworkConfigDir, cniConfigDirFlagName, cfg.Network.NetworkConfigDir, "Path of the configuration directory for CNI networks")
_ = cmd.RegisterFlagCompletionFunc(cniConfigDirFlagName, completion.AutocompleteDefault)
pFlags.StringVar(&cfg.Containers.DefaultMountsFile, "default-mounts-file", cfg.Containers.DefaultMountsFile, "Path to default mounts file")
eventsBackendFlagName := "events-backend"
pFlags.StringVar(&cfg.Engine.EventsLogger, eventsBackendFlagName, cfg.Engine.EventsLogger, `Events backend to use ("file"|"journald"|"none")`)
_ = cmd.RegisterFlagCompletionFunc(eventsBackendFlagName, common.AutocompleteEventBackend)
hooksDirFlagName := "hooks-dir"
pFlags.StringSliceVar(&cfg.Engine.HooksDir, hooksDirFlagName, cfg.Engine.HooksDir, "Set the OCI hooks directory path (may be set multiple times)")
_ = cmd.RegisterFlagCompletionFunc(hooksDirFlagName, completion.AutocompleteDefault)
pFlags.IntVar(&opts.MaxWorks, "max-workers", (runtime.NumCPU()*3)+1, "The maximum number of workers for parallel operations")
namespaceFlagName := "namespace"
pFlags.StringVar(&cfg.Engine.Namespace, namespaceFlagName, cfg.Engine.Namespace, "Set the libpod namespace, used to create separate views of the containers and pods on the system")
_ = cmd.RegisterFlagCompletionFunc(namespaceFlagName, completion.AutocompleteNone)
networkBackendFlagName := "network-backend"
pFlags.StringVar(&cfg.Network.NetworkBackend, networkBackendFlagName, cfg.Network.NetworkBackend, `Network backend to use ("cni"|"netavark")`)
_ = cmd.RegisterFlagCompletionFunc(networkBackendFlagName, common.AutocompleteNetworkBackend)
pFlags.MarkHidden(networkBackendFlagName)
rootFlagName := "root"
pFlags.StringVar(&cfg.Engine.StaticDir, rootFlagName, "", "Path to the root directory in which data, including images, is stored")
_ = cmd.RegisterFlagCompletionFunc(rootFlagName, completion.AutocompleteDefault)
pFlags.StringVar(&opts.RegistriesConf, "registries-conf", "", "Path to a registries.conf to use for image processing")
runrootFlagName := "runroot"
pFlags.StringVar(&opts.Runroot, runrootFlagName, "", "Path to the 'run directory' where all state information is stored")
_ = cmd.RegisterFlagCompletionFunc(runrootFlagName, completion.AutocompleteDefault)
runtimeFlagName := "runtime"
pFlags.StringVar(&opts.RuntimePath, runtimeFlagName, "", "Path to the OCI-compatible binary used to run containers, default is /usr/bin/runc")
_ = cmd.RegisterFlagCompletionFunc(runtimeFlagName, completion.AutocompleteDefault)
// -s is deprecated due to conflict with -s on subcommands
storageDriverFlagName := "storage-driver"
pFlags.StringVar(&opts.StorageDriver, storageDriverFlagName, "", "Select which storage driver is used to manage storage of images and containers (default is overlay)")
_ = cmd.RegisterFlagCompletionFunc(storageDriverFlagName, completion.AutocompleteNone) //TODO: what can we recommend here?
tmpdirFlagName := "tmpdir"
pFlags.StringVar(&opts.Engine.TmpDir, tmpdirFlagName, "", "Path to the tmp directory for libpod state content.\n\nNote: use the environment variable 'TMPDIR' to change the temporary storage location for container images, '/var/tmp'.\n")
_ = cmd.RegisterFlagCompletionFunc(tmpdirFlagName, completion.AutocompleteDefault)
pFlags.BoolVar(&opts.Trace, "trace", false, "Enable opentracing output (default false)")
// Hide these flags for both ABI and Tunneling
for _, f := range []string{
"cpu-profile",
"default-mounts-file",
"max-workers",
"memory-profile",
"registries-conf",
"trace",
} {
if err := pFlags.MarkHidden(f); err != nil {
logrus.Warnf("Unable to mark %s flag as hidden: %s", f, err.Error())
}
}
}
storageOptFlagName := "storage-opt"
pFlags.StringArrayVar(&opts.StorageOpts, storageOptFlagName, []string{}, "Used to pass an option to the storage driver")
_ = cmd.RegisterFlagCompletionFunc(storageOptFlagName, completion.AutocompleteNone)
// Override default --help information of `--help` global flag
var dummyHelp bool
pFlags.BoolVar(&dummyHelp, "help", false, "Help for podman")
logLevelFlagName := "log-level"
pFlags.StringVar(&logLevel, logLevelFlagName, logLevel, fmt.Sprintf("Log messages above specified level (%s)", strings.Join(common.LogLevels, ", ")))
_ = rootCmd.RegisterFlagCompletionFunc(logLevelFlagName, common.AutocompleteLogLevel)
// Only create these flags for ABI connections
if !registry.IsRemote() {
runtimeflagFlagName := "runtime-flag"
pFlags.StringArrayVar(&opts.RuntimeFlags, runtimeflagFlagName, []string{}, "add global flags for the container runtime")
_ = rootCmd.RegisterFlagCompletionFunc(runtimeflagFlagName, completion.AutocompleteNone)
pFlags.BoolVar(&useSyslog, "syslog", false, "Output logging information to syslog as well as the console (default false)")
}
}
func resolveDestination() (string, string, string) {
if uri, found := os.LookupEnv("CONTAINER_HOST"); found {
var ident string
if v, found := os.LookupEnv("CONTAINER_SSHKEY"); found {
ident = v
}
return "", uri, ident
}
cfg, err := config.ReadCustomConfig()
if err != nil {
logrus.Warning(errors.Wrap(err, "unable to read local containers.conf"))
return "", registry.DefaultAPIAddress(), ""
}
uri, ident, err := cfg.ActiveDestination()
if err != nil {
return "", registry.DefaultAPIAddress(), ""
}
return cfg.Engine.ActiveService, uri, ident
}
func formatError(err error) string {
var message string
if errors.Cause(err) == define.ErrOCIRuntime {
// OCIRuntimeErrors include the reason for the failure in the
// second to last message in the error chain.
message = fmt.Sprintf(
"Error: %s: %s",
define.ErrOCIRuntime.Error(),
strings.TrimSuffix(err.Error(), ": "+define.ErrOCIRuntime.Error()),
)
} else {
if logrus.IsLevelEnabled(logrus.TraceLevel) {
message = fmt.Sprintf("Error: %+v", err)
} else {
message = fmt.Sprintf("Error: %v", err)
}
}
return message
}
| [
"\"MemProfileRate\""
]
| []
| [
"MemProfileRate"
]
| [] | ["MemProfileRate"] | go | 1 | 0 | |
core/postfix/start.py | #!/usr/bin/python3
import os
import glob
import shutil
import multiprocessing
import logging as log
import sys
from podop import run_server
from pwd import getpwnam
from socrate import system, conf
log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING"))
def start_podop():
os.setuid(getpwnam('postfix').pw_uid)
os.mkdir('/dev/shm/postfix',mode=0o700)
url = "http://" + os.environ["ADMIN_ADDRESS"] + "/internal/postfix/"
# TODO: Remove verbosity setting from Podop?
run_server(0, "postfix", "/tmp/podop.socket", [
("transport", "url", url + "transport/§"),
("alias", "url", url + "alias/§"),
("domain", "url", url + "domain/§"),
("mailbox", "url", url + "mailbox/§"),
("recipientmap", "url", url + "recipient/map/§"),
("sendermap", "url", url + "sender/map/§"),
("senderaccess", "url", url + "sender/access/§"),
("senderlogin", "url", url + "sender/login/§"),
("senderrate", "url", url + "sender/rate/§")
])
def is_valid_postconf_line(line):
return not line.startswith("#") \
and not line == ''
# Actual startup script
os.environ["FRONT_ADDRESS"] = system.get_host_address_from_environment("FRONT", "front")
os.environ["ADMIN_ADDRESS"] = system.get_host_address_from_environment("ADMIN", "admin")
os.environ["ANTISPAM_MILTER_ADDRESS"] = system.get_host_address_from_environment("ANTISPAM_MILTER", "antispam:11332")
os.environ["LMTP_ADDRESS"] = system.get_host_address_from_environment("LMTP", "imap:2525")
os.environ["OUTCLEAN"] = os.environ["HOSTNAMES"].split(",")[0]
try:
_to_lookup = os.environ["OUTCLEAN"]
# Ensure we lookup a FQDN: @see #1884
if not _to_lookup.endswith('.'):
_to_lookup += '.'
os.environ["OUTCLEAN_ADDRESS"] = system.resolve_hostname(_to_lookup)
except:
os.environ["OUTCLEAN_ADDRESS"] = "10.10.10.10"
for postfix_file in glob.glob("/conf/*.cf"):
conf.jinja(postfix_file, os.environ, os.path.join("/etc/postfix", os.path.basename(postfix_file)))
if os.path.exists("/overrides/postfix.cf"):
for line in open("/overrides/postfix.cf").read().strip().split("\n"):
if is_valid_postconf_line(line):
os.system('postconf -e "{}"'.format(line))
if os.path.exists("/overrides/postfix.master"):
for line in open("/overrides/postfix.master").read().strip().split("\n"):
if is_valid_postconf_line(line):
os.system('postconf -Me "{}"'.format(line))
for map_file in glob.glob("/overrides/*.map"):
destination = os.path.join("/etc/postfix", os.path.basename(map_file))
shutil.copyfile(map_file, destination)
os.system("postmap {}".format(destination))
os.remove(destination)
if not os.path.exists("/etc/postfix/tls_policy.map.db"):
with open("/etc/postfix/tls_policy.map", "w") as f:
for domain in ['gmail.com', 'yahoo.com', 'hotmail.com', 'aol.com', 'outlook.com', 'comcast.net', 'icloud.com', 'msn.com', 'hotmail.co.uk', 'live.com', 'yahoo.co.in', 'me.com', 'mail.ru', 'cox.net', 'yahoo.co.uk', 'verizon.net', 'ymail.com', 'hotmail.it', 'kw.com', 'yahoo.com.tw', 'mac.com', 'live.se', 'live.nl', 'yahoo.com.br', 'googlemail.com', 'libero.it', 'web.de', 'allstate.com', 'btinternet.com', 'online.no', 'yahoo.com.au', 'live.dk', 'earthlink.net', 'yahoo.fr', 'yahoo.it', 'gmx.de', 'hotmail.fr', 'shawinc.com', 'yahoo.de', 'moe.edu.sg', 'naver.com', 'bigpond.com', 'statefarm.com', 'remax.net', 'rocketmail.com', 'live.no', 'yahoo.ca', 'bigpond.net.au', 'hotmail.se', 'gmx.at', 'live.co.uk', 'mail.com', 'yahoo.in', 'yandex.ru', 'qq.com', 'charter.net', 'indeedemail.com', 'alice.it', 'hotmail.de', 'bluewin.ch', 'optonline.net', 'wp.pl', 'yahoo.es', 'hotmail.no', 'pindotmedia.com', 'orange.fr', 'live.it', 'yahoo.co.id', 'yahoo.no', 'hotmail.es', 'morganstanley.com', 'wellsfargo.com', 'wanadoo.fr', 'facebook.com', 'yahoo.se', 'fema.dhs.gov', 'rogers.com', 'yahoo.com.hk', 'live.com.au', 'nic.in', 'nab.com.au', 'ubs.com', 'shaw.ca', 'umich.edu', 'westpac.com.au', 'yahoo.com.mx', 'yahoo.com.sg', 'farmersagent.com', 'yahoo.dk', 'dhs.gov']:
f.write(f'{domain}\tsecure\n')
os.system("postmap /etc/postfix/tls_policy.map")
if "RELAYUSER" in os.environ:
path = "/etc/postfix/sasl_passwd"
conf.jinja("/conf/sasl_passwd", os.environ, path)
os.system("postmap {}".format(path))
# Run Podop and Postfix
multiprocessing.Process(target=start_podop).start()
os.system("/usr/libexec/postfix/post-install meta_directory=/etc/postfix create-missing")
# Before starting postfix, we need to check permissions on /queue
# in the event that postfix,postdrop id have changed
os.system("postfix set-permissions")
os.system("postfix start-fg")
| []
| []
| [
"OUTCLEAN_ADDRESS",
"LOG_LEVEL",
"ANTISPAM_MILTER_ADDRESS",
"FRONT_ADDRESS",
"LMTP_ADDRESS",
"HOSTNAMES",
"ADMIN_ADDRESS",
"OUTCLEAN"
]
| [] | ["OUTCLEAN_ADDRESS", "LOG_LEVEL", "ANTISPAM_MILTER_ADDRESS", "FRONT_ADDRESS", "LMTP_ADDRESS", "HOSTNAMES", "ADMIN_ADDRESS", "OUTCLEAN"] | python | 8 | 0 | |
dali/test/python/test_operator_warp.py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import nvidia.dali as dali
from nvidia.dali.backend_impl import TensorListGPU
import numpy as np
import math
from numpy.testing import assert_array_equal, assert_allclose
import os
import cv2
from test_utils import check_batch
from test_utils import compare_pipelines
from test_utils import RandomDataIterator
import random
test_data_root = os.environ['DALI_EXTRA_PATH']
caffe_db_folder = os.path.join(test_data_root, 'db', 'lmdb')
def gen_transform(angle, zoom, dst_cx, dst_cy, src_cx, src_cy):
t1 = np.array([[1, 0, -dst_cx], [0, 1, -dst_cy], [0, 0, 1]])
cosa = math.cos(angle)/zoom
sina = math.sin(angle)/zoom
r = np.array([
[cosa, -sina, 0],
[sina, cosa, 0],
[0, 0, 1]])
t2 = np.array([[1, 0, src_cx], [0, 1, src_cy], [0, 0, 1]])
return (np.matmul(t2, np.matmul(r, t1)))[0:2,0:3]
def gen_transforms(n, step):
a = 0.0
step = step * (math.pi/180)
out = np.zeros([n, 2, 3])
for i in range(n):
out[i,:,:] = gen_transform(a, 2, 160, 120, 100, 100)
a = a + step
return out.astype(np.float32)
def ToCVMatrix(matrix):
offset = np.matmul(matrix, np.array([[0.5], [0.5], [1]]))
result = matrix.copy()
result[0][2] = offset[0] - 0.5
result[1][2] = offset[1] - 0.5
return result
def CVWarp(output_type, input_type, warp_matrix = None, inv_map = False):
def warp_fn(img, matrix):
size = (320, 240)
matrix = ToCVMatrix(matrix)
if output_type == dali.types.FLOAT or input_type == dali.types.FLOAT:
img = np.float32(img)
out = cv2.warpAffine(img, matrix, size, borderMode = cv2.BORDER_CONSTANT, borderValue = [42,42,42],
flags = (cv2.INTER_LINEAR|cv2.WARP_INVERSE_MAP) if inv_map else cv2.INTER_LINEAR);
if output_type == dali.types.UINT8 and input_type == dali.types.FLOAT:
out = np.uint8(np.clip(out, 0, 255))
return out
if warp_matrix:
m = np.array(warp_matrix)
def warp_fixed(img):
return warp_fn(img, m)
return warp_fixed
return warp_fn
class WarpPipeline(Pipeline):
def __init__(self, device, batch_size, output_type, input_type, use_input, num_threads=3, device_id=0, num_gpus=1, inv_map=False):
super(WarpPipeline, self).__init__(batch_size, num_threads, device_id, seed=7865, exec_async=False, exec_pipelined=False)
self.use_input = use_input
self.use_dynamic_size = use_input # avoid Cartesian product
self.name = device
self.input = ops.CaffeReader(path = caffe_db_folder, shard_id = device_id, num_shards = num_gpus)
self.decode = ops.ImageDecoder(device = "cpu", output_type = types.RGB)
if input_type != dali.types.UINT8:
self.cast = ops.Cast(device = device, dtype = input_type)
else:
self.cast = None
static_size = None if self.use_dynamic_size else (240,320)
if use_input:
self.transform_source = ops.ExternalSource(lambda: gen_transforms(self.batch_size, 10))
self.warp = ops.WarpAffine(device = device, size=static_size, fill_value = 42, dtype = output_type, inverse_map=inv_map)
else:
warp_matrix = (0.1, 0.9, 10, 0.8, -0.2, -20)
self.warp = ops.WarpAffine(device = device, size=static_size, matrix = warp_matrix, fill_value = 42, dtype = output_type, inverse_map=inv_map)
self.iter = 0
def define_graph(self):
self.jpegs, self.labels = self.input(name = "Reader")
images = self.decode(self.jpegs)
if self.warp.device == "gpu":
images = images.gpu()
if self.cast:
images = self.cast(images)
dynamic_size = types.Constant(np.array([240, 320], dtype=np.float32)) if self.use_dynamic_size else None
if self.use_input:
transform = self.transform_source()
outputs = self.warp(images, transform, size = dynamic_size)
else:
outputs = self.warp(images, size = dynamic_size)
return outputs
class CVPipeline(Pipeline):
def __init__(self, batch_size, output_type, input_type, use_input, num_threads=3, device_id=0, num_gpus=1, inv_map=False):
super(CVPipeline, self).__init__(batch_size, num_threads, device_id, seed=7865, exec_async=False, exec_pipelined=False)
self.use_input = use_input
self.name = "cv"
self.input = ops.CaffeReader(path = caffe_db_folder, shard_id = device_id, num_shards = num_gpus)
self.decode = ops.ImageDecoder(device = "cpu", output_type = types.RGB)
if self.use_input:
self.transform_source = ops.ExternalSource(lambda: gen_transforms(self.batch_size, 10))
self.warp = ops.PythonFunction(function=CVWarp(output_type, input_type, inv_map=inv_map))
else:
self.warp = ops.PythonFunction(function=CVWarp(output_type, input_type, [[0.1, 0.9, 10], [0.8, -0.2, -20]], inv_map))
self.set_layout = ops.Reshape(layout="HWC")
self.iter = 0
def define_graph(self):
self.jpegs, self.labels = self.input(name = "Reader")
images = self.decode(self.jpegs)
if self.use_input:
self.transform = self.transform_source()
outputs = self.warp(images, self.transform)
else:
outputs = self.warp(images)
outputs = self.set_layout(outputs)
return outputs
def compare(pipe1, pipe2, eps):
epoch_size = pipe1.epoch_size("Reader")
batch_size = pipe1.batch_size
niter = (epoch_size + batch_size - 1) // batch_size
compare_pipelines(pipe1, pipe2, batch_size, niter, eps);
io_types = [
(dali.types.UINT8, dali.types.UINT8),
(dali.types.UINT8, dali.types.FLOAT),
(dali.types.FLOAT, dali.types.UINT8),
(dali.types.FLOAT, dali.types.FLOAT)
]
def test_cpu_vs_cv():
random.seed(1009)
for batch_size in [1, 4, 19]:
for use_input in [False, True]:
for (itype, otype) in io_types:
inv_map = random.choice([False, True])
print("Testing cpu vs cv",
"\nbatch size: ", batch_size,
" matrix as input: ", use_input,
" input_type: ", itype,
" output_type: ", otype,
" map_inverse:", inv_map)
cv_pipeline = CVPipeline(batch_size, otype, itype, use_input, inv_map=inv_map);
cv_pipeline.build();
cpu_pipeline = WarpPipeline("cpu", batch_size, otype, itype, use_input, inv_map=inv_map);
cpu_pipeline.build();
compare(cv_pipeline, cpu_pipeline, 8)
def test_gpu_vs_cv():
random.seed(1007)
for batch_size in [1, 4, 19]:
for use_input in [False, True]:
for (itype, otype) in io_types:
inv_map = random.choice([False, True])
print("Testing gpu vs cv",
"\nbatch size: ", batch_size,
" matrix as input: ", use_input,
" input_type: ", itype,
" output_type: ", otype,
" map_inverse:", inv_map)
cv_pipeline = CVPipeline(batch_size, otype, itype, use_input, inv_map=inv_map);
cv_pipeline.build();
gpu_pipeline = WarpPipeline("gpu", batch_size, otype, itype, use_input, inv_map=inv_map);
gpu_pipeline.build();
compare(cv_pipeline, gpu_pipeline, 8)
def test_gpu_vs_cpu():
random.seed(1005)
for batch_size in [1, 4, 19]:
for use_input in [False, True]:
for (itype, otype) in io_types:
inv_map = random.choice([False, True])
print("Testing gpu vs cpu",
"\nbatch size: ", batch_size,
" matrix as input: ", use_input,
" input_type: ", itype,
" output_type: ", otype,
" map_inverse:", inv_map)
cpu_pipeline = WarpPipeline("cpu", batch_size, otype, itype, use_input, inv_map=inv_map);
cpu_pipeline.build();
gpu_pipeline = WarpPipeline("gpu", batch_size, otype, itype, use_input, inv_map=inv_map);
gpu_pipeline.build();
compare(cpu_pipeline, gpu_pipeline, 1)
| []
| []
| [
"DALI_EXTRA_PATH"
]
| [] | ["DALI_EXTRA_PATH"] | python | 1 | 0 | |
python/hsfs/core/transformation_function_engine.py | #
# Copyright 2021 Logical Clocks AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy
import datetime
from functools import partial
from hsfs import training_dataset, training_dataset_feature, transformation_function
from hsfs.core import transformation_function_api, statistics_api
from hsfs.client.exceptions import RestAPIError
from hsfs.core.builtin_transformation_function import BuiltInTransformationFunction
class TransformationFunctionEngine:
BUILTIN_FN_NAMES = [
"min_max_scaler",
"standard_scaler",
"robust_scaler",
"label_encoder",
]
def __init__(self, feature_store_id):
self._feature_store_id = feature_store_id
self._transformation_function_api = (
transformation_function_api.TransformationFunctionApi(feature_store_id)
)
self._statistics_api = statistics_api.StatisticsApi(
feature_store_id, training_dataset.TrainingDataset.ENTITY_TYPE
)
def save(self, transformation_fn_instance):
if self.is_builtin(transformation_fn_instance):
raise ValueError(
"Transformation function name '{name:}' with version 1 is reserved for built-in hsfs "
"functions. Please use other name or version".format(
name=transformation_fn_instance.name
)
)
if not callable(transformation_fn_instance.transformation_fn):
raise ValueError("transformer must be callable")
self._transformation_function_api.register_transformation_fn(
transformation_fn_instance
)
def get_transformation_fn(self, name, version=None):
transformation_fn_instances = (
self._transformation_function_api.get_transformation_fn(name, version)
)
return transformation_fn_instances[0]
def get_transformation_fns(self):
transformation_fn_instances = (
self._transformation_function_api.get_transformation_fn(
name=None, version=None
)
)
transformation_fns = []
for transformation_fn_instance in transformation_fn_instances:
transformation_fns.append(transformation_fn_instance)
return transformation_fns
def delete(self, transformation_function_instance):
self._transformation_function_api.delete(transformation_function_instance)
def get_td_transformation_fn(self, training_dataset):
attached_transformation_fns = (
self._transformation_function_api.get_td_transformation_fn(training_dataset)
)
transformation_fn_dict = {}
for attached_transformation_fn in attached_transformation_fns:
transformation_fn_dict[
attached_transformation_fn.name
] = attached_transformation_fn.transformation_function
return transformation_fn_dict
def attach_transformation_fn(self, training_dataset):
if training_dataset._transformation_functions:
for (
feature_name,
transformation_fn,
) in training_dataset._transformation_functions.items():
if feature_name in training_dataset.label:
raise ValueError(
"Online transformations for training dataset labels are not supported."
)
training_dataset._features.append(
training_dataset_feature.TrainingDatasetFeature(
name=feature_name,
feature_group_feature_name=feature_name,
type=transformation_fn.output_type,
label=False,
transformation_function=transformation_fn,
)
)
def register_builtin_transformation_fns(self):
for name in self.BUILTIN_FN_NAMES:
try:
self._transformation_function_api.get_transformation_fn(name, 1)[0]
except RestAPIError as e:
if (
e.response.json().get("errorMsg")
== "Transformation function does not exist"
):
builtin_fn = BuiltInTransformationFunction(name)
(
builtin_source_code,
output_type,
) = builtin_fn.generate_source_code()
transformation_fn_instance = (
transformation_function.TransformationFunction(
featurestore_id=self._feature_store_id,
name=name,
version=1,
output_type=output_type,
builtin_source_code=builtin_source_code,
)
)
self._transformation_function_api.register_transformation_fn(
transformation_fn_instance
)
elif (
e.response.json().get("errorMsg")
== "The provided transformation function name and version already exists"
):
Warning(e.response.json().get("errorMsg"))
def is_builtin(self, transformation_fn_instance):
return (
transformation_fn_instance.name in self.BUILTIN_FN_NAMES
and transformation_fn_instance.version == 1
)
@staticmethod
def populate_builtin_fn_arguments(
feature_name, transformation_function_instance, stat_content
):
if transformation_function_instance.name == "min_max_scaler":
min_value, max_value = BuiltInTransformationFunction.min_max_scaler_stats(
stat_content, feature_name
)
transformation_function_instance.transformation_fn = partial(
transformation_function_instance.transformation_fn,
min_value=min_value,
max_value=max_value,
)
elif transformation_function_instance.name == "standard_scaler":
mean, std_dev = BuiltInTransformationFunction.standard_scaler_stats(
stat_content, feature_name
)
transformation_function_instance.transformation_fn = partial(
transformation_function_instance.transformation_fn,
mean=mean,
std_dev=std_dev,
)
elif transformation_function_instance.name == "robust_scaler":
robust_scaler_stats = BuiltInTransformationFunction.robust_scaler_stats(
stat_content, feature_name
)
transformation_function_instance.transformation_fn = partial(
transformation_function_instance.transformation_fn,
p25=robust_scaler_stats[24],
p50=robust_scaler_stats[49],
p75=robust_scaler_stats[74],
)
elif transformation_function_instance.name == "label_encoder":
value_to_index = BuiltInTransformationFunction.encoder_stats(
stat_content, feature_name
)
transformation_function_instance.transformation_fn = partial(
transformation_function_instance.transformation_fn,
value_to_index=value_to_index,
)
else:
raise ValueError("Not implemented")
return transformation_function_instance
def populate_builtin_attached_fns(self, attached_transformation_fns, stat_content):
for ft_name in attached_transformation_fns:
if self.is_builtin(attached_transformation_fns[ft_name]):
# check if its built-in transformation function and populated with statistics arguments
transformation_fn = self.populate_builtin_fn_arguments(
ft_name, attached_transformation_fns[ft_name], stat_content
)
attached_transformation_fns[ft_name] = transformation_fn
return attached_transformation_fns
@staticmethod
def infer_spark_type(output_type):
if output_type in (str, "str", "string"):
return "StringType()"
elif output_type in (bytes,):
return "BinaryType()"
elif output_type in (numpy.int8, "int8", "byte"):
return "ByteType()"
elif output_type in (numpy.int16, "int16", "short"):
return "ShortType()"
elif output_type in (int, "int", numpy.int, numpy.int32):
return "IntegerType()"
elif output_type in (numpy.int64, "int64", "long", "bigint"):
return "LongType()"
elif output_type in (float, "float", numpy.float):
return "FloatType()"
elif output_type in (numpy.float64, "float64", "double"):
return "DoubleType()"
elif output_type in (datetime.datetime, numpy.datetime64):
return "TimestampType()"
elif output_type in (datetime.date,):
return "DateType()"
elif output_type in (bool, "boolean", "bool", numpy.bool):
return "BooleanType()"
else:
raise TypeError("Not supported type %s." % output_type)
| []
| []
| []
| [] | [] | python | null | null | null |
app.py | from flask import Flask, render_template, request, make_response
import sqlite3
from rake_nltk import Rake
import nltk
from nltk.corpus import wordnet
import PyDictionary
import json
from nltk.stem import WordNetLemmatizer
import os
from datetime import datetime
from functools import wraps, update_wrapper
app = Flask(__name__)
# SQLite Database
def create_questions_table():
conn = sqlite3.connect('database.db')
conn.execute('CREATE TABLE IF NOT EXISTS QuestionsTable (id INTEGER PRIMARY KEY,Question TEXT,Answer TEXT, '
'Key TEXT)')
conn.close()
# CALL TO TABLE CREATE
create_questions_table()
def nocache(view):
@wraps(view)
def no_cache(*args, **kwargs):
response = make_response(view(*args, **kwargs))
response.headers['Last-Modified'] = datetime.now()
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0'
response.headers['Pragma'] = 'no-cache'
response.headers['Expires'] = '-1'
return response
return update_wrapper(no_cache, view)
def load_question(number):
conn = sqlite3.connect('database.db')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute(f"select Question from QuestionsTable where id={number}")
return cur.fetchone()
def load_answer(number):
conn = sqlite3.connect('database.db')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute(f"select Answer from QuestionsTable where id={number}")
return cur.fetchone()
def load_key(number):
conn = sqlite3.connect('database.db')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute(f"select Key from QuestionsTable where id={number}")
return cur.fetchone()
questions = [
"What is Internet?"
]
answers = [
'A global computer network providing a variety of information and communication facilities, consisting of interconnected networks using standardized communication protocols.'
]
KEY = [{
"communication": 7,
"computer": 16,
"consisting": 7,
"facility": 7,
"global": 7,
"information": 7,
"interconnected": 7,
"network": 7,
"protocol": 7,
"providing": 7,
"standardized": 7,
"using": 7,
"variety": 7
}]
# GET ITEM COUNT
def items_present():
conn = sqlite3.connect('database.db')
x = conn.execute('select count(Question) from QuestionsTable')
count = x.fetchone()[0]
conn.close()
return count
def break_phrases(list):
a = []
for x in list:
if len(x.split()) == 1:
a.append(x)
else:
a.extend(x.split())
return a
def lematize(lista):
w = WordNetLemmatizer()
a = list(map(w.lemmatize, lista))
return a
@app.route("/", methods=['GET'])
@nocache
def index():
count_itm = items_present()
if request.method == 'GET' and request.args.get("q") != "" and request.args.get("q") is not None:
index_question = request.args.get("q")
if int(index_question) <= count_itm:
return render_template("index.html", questionsCount=count_itm, questionToLoad=request.args.get("q"),
questionData=load_question(index_question))
return render_template("index.html", questionsCount=count_itm)
@app.route("/about")
@nocache
def about():
return render_template("about.html")
@app.route("/add")
@nocache
def add():
return render_template("add.html")
@app.route("/checkAnswer")
@nocache
def check():
max_score = 10
if request.method == 'GET' and request.args.get("a") != "" and request.args.get("a") is not None:
answer_data = request.args.get("a")
question_index = int(request.args.get("q"))
trained_data, test_data = Rake(), Rake()
load_ans = load_answer(question_index)[0]
trained_data.extract_keywords_from_text(load_ans)
test_data.extract_keywords_from_text(answer_data)
testlist = []
trainlist = []
test_data_keywords = lematize(break_phrases(test_data.get_ranked_phrases()))
for x in test_data_keywords:
testlist.append(x)
result = 0
dict = eval(load_key(question_index)[0])
z = 0
for x in testlist:
if x in dict.keys():
print(x)
result = result + (dict[x] * max_score) / 100
print(result, dict[x])
else:
syn = PyDictionary.PyDictionary().synonym(testlist[z])
if syn == None:
continue
print(syn)
for j in syn:
if j in testlist:
print(trainlist[question_index - 1], j)
print(dict)
dict[j] = (dict[x] * max_score) / 100
result = result + dict[j] * max_score
matched.append(question_index - 1)
z += 1
if result > 10:
result = 10;
return str(result) + "#" + load_ans
@app.route("/loadDemo")
@nocache
def add_demo_questions():
msg = "UNKNOWN"
conn = sqlite3.connect('database.db')
try:
for i in range(0, len(questions)):
conn.execute("INSERT INTO QuestionsTable (Question,Answer,Key) VALUES(?, ?, ?)",
(questions[i], answers[i], json.dumps(KEY[i])))
conn.commit()
msg = "ADDED"
except:
return "ERROR"
finally:
conn.close()
return msg
@app.route("/emptyTable")
@nocache
def delete_table_data():
con = sqlite3.connect("database.db")
con.execute('delete from QuestionsTable')
con.commit()
con.close()
return "DONE"
def add_data(data_listing, each_val):
values_list2 = []
z = 0
for x in range(0, len(data_listing)):
values_list2.append(each_val)
z += each_val
values_list2[0] += (100 - sum(values_list2))
print(values_list2)
return values_list2
def add_to_table(question, answer, key):
conn = sqlite3.connect('database.db')
conn.execute("INSERT INTO QuestionsTable (Question,Answer,Key) VALUES(?, ?, ?)",
(question, answer, key))
conn.commit()
conn.close()
@app.route("/addQuestion", methods=['GET'])
def add_question():
answer_data = request.args.get("a")
question_data = request.args.get("q")
r = Rake()
r.extract_keywords_from_text(answer_data)
data_list = lematize(break_phrases(r.get_ranked_phrases()))
data_list = list(set(data_list))
each_value = int(100 / len(data_list))
values_list = add_data(data_list, each_value)
print(values_list)
print(data_list)
dict_data = {data_list[i]: values_list[i] for i in range(0, len(data_list))}
print(dict_data)
add_to_table(question_data, answer_data, json.dumps(dict_data))
return dict_data
#host='0.0.0.0'
if __name__ == '__main__':
app.run(debug=True, port=int(os.environ.get('PORT', 8080)))
| []
| []
| [
"PORT"
]
| [] | ["PORT"] | python | 1 | 0 | |
upgrade.go | // Copyright (c) 2013-2014 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"io"
"os"
"path/filepath"
)
// dirEmpty returns whether or not the specified directory path is empty.
func dirEmpty(dirPath string) (bool, error) {
f, err := os.Open(dirPath)
if err != nil {
return false, err
}
defer f.Close()
// Read the names of a max of one entry from the directory. When the
// directory is empty, an io.EOF error will be returned, so allow it.
names, err := f.Readdirnames(1)
if err != nil && err != io.EOF {
return false, err
}
return len(names) == 0, nil
}
// oldBtcdHomeDir returns the OS specific home directory btcd used prior to
// version 0.3.3. This has since been replaced with dogutil.AppDataDir, but
// this function is still provided for the automatic upgrade path.
func oldBtcdHomeDir() string {
// Search for Windows APPDATA first. This won't exist on POSIX OSes.
appData := os.Getenv("APPDATA")
if appData != "" {
return filepath.Join(appData, "btcd")
}
// Fall back to standard HOME directory that works for most POSIX OSes.
home := os.Getenv("HOME")
if home != "" {
return filepath.Join(home, ".btcd")
}
// In the worst case, use the current directory.
return "."
}
// upgradeDBPathNet moves the database for a specific network from its
// location prior to btcd version 0.2.0 and uses heuristics to ascertain the old
// database type to rename to the new format.
func upgradeDBPathNet(oldDbPath, netName string) error {
// Prior to version 0.2.0, the database was named the same thing for
// both sqlite and leveldb. Use heuristics to figure out the type
// of the database and move it to the new path and name introduced with
// version 0.2.0 accordingly.
fi, err := os.Stat(oldDbPath)
if err == nil {
oldDbType := "sqlite"
if fi.IsDir() {
oldDbType = "leveldb"
}
// The new database name is based on the database type and
// resides in a directory named after the network type.
newDbRoot := filepath.Join(filepath.Dir(cfg.DataDir), netName)
newDbName := blockDbNamePrefix + "_" + oldDbType
if oldDbType == "sqlite" {
newDbName = newDbName + ".db"
}
newDbPath := filepath.Join(newDbRoot, newDbName)
// Create the new path if needed.
err = os.MkdirAll(newDbRoot, 0700)
if err != nil {
return err
}
// Move and rename the old database.
err := os.Rename(oldDbPath, newDbPath)
if err != nil {
return err
}
}
return nil
}
// upgradeDBPaths moves the databases from their locations prior to btcd
// version 0.2.0 to their new locations.
func upgradeDBPaths() error {
// Prior to version 0.2.0, the databases were in the "db" directory and
// their names were suffixed by "testnet" and "regtest" for their
// respective networks. Check for the old database and update it to the
// new path introduced with version 0.2.0 accordingly.
oldDbRoot := filepath.Join(oldBtcdHomeDir(), "db")
upgradeDBPathNet(filepath.Join(oldDbRoot, "btcd.db"), "mainnet")
upgradeDBPathNet(filepath.Join(oldDbRoot, "btcd_testnet.db"), "testnet")
upgradeDBPathNet(filepath.Join(oldDbRoot, "btcd_regtest.db"), "regtest")
// Remove the old db directory.
return os.RemoveAll(oldDbRoot)
}
// upgradeDataPaths moves the application data from its location prior to btcd
// version 0.3.3 to its new location.
func upgradeDataPaths() error {
// No need to migrate if the old and new home paths are the same.
oldHomePath := oldBtcdHomeDir()
newHomePath := defaultHomeDir
if oldHomePath == newHomePath {
return nil
}
// Only migrate if the old path exists and the new one doesn't.
if fileExists(oldHomePath) && !fileExists(newHomePath) {
// Create the new path.
btcdLog.Infof("Migrating application home path from '%s' to '%s'",
oldHomePath, newHomePath)
err := os.MkdirAll(newHomePath, 0700)
if err != nil {
return err
}
// Move old btcd.conf into new location if needed.
oldConfPath := filepath.Join(oldHomePath, defaultConfigFilename)
newConfPath := filepath.Join(newHomePath, defaultConfigFilename)
if fileExists(oldConfPath) && !fileExists(newConfPath) {
err := os.Rename(oldConfPath, newConfPath)
if err != nil {
return err
}
}
// Move old data directory into new location if needed.
oldDataPath := filepath.Join(oldHomePath, defaultDataDirname)
newDataPath := filepath.Join(newHomePath, defaultDataDirname)
if fileExists(oldDataPath) && !fileExists(newDataPath) {
err := os.Rename(oldDataPath, newDataPath)
if err != nil {
return err
}
}
// Remove the old home if it is empty or show a warning if not.
ohpEmpty, err := dirEmpty(oldHomePath)
if err != nil {
return err
}
if ohpEmpty {
err := os.Remove(oldHomePath)
if err != nil {
return err
}
} else {
btcdLog.Warnf("Not removing '%s' since it contains files "+
"not created by this application. You may "+
"want to manually move them or delete them.",
oldHomePath)
}
}
return nil
}
// doUpgrades performs upgrades to btcd as new versions require it.
func doUpgrades() error {
err := upgradeDBPaths()
if err != nil {
return err
}
return upgradeDataPaths()
}
| [
"\"APPDATA\"",
"\"HOME\""
]
| []
| [
"APPDATA",
"HOME"
]
| [] | ["APPDATA", "HOME"] | go | 2 | 0 | |
pkg/bbgo/environment.go | package bbgo
import (
"bytes"
"context"
"fmt"
"image/png"
"io/ioutil"
stdlog "log"
"math/rand"
"os"
"strings"
"sync"
"time"
"github.com/codingconcepts/env"
"github.com/pkg/errors"
"github.com/pquerna/otp"
log "github.com/sirupsen/logrus"
"github.com/slack-go/slack"
"github.com/spf13/viper"
"gopkg.in/tucnak/telebot.v2"
"github.com/c9s/bbgo/pkg/cmd/cmdutil"
"github.com/c9s/bbgo/pkg/fixedpoint"
"github.com/c9s/bbgo/pkg/interact"
"github.com/c9s/bbgo/pkg/notifier/slacknotifier"
"github.com/c9s/bbgo/pkg/notifier/telegramnotifier"
"github.com/c9s/bbgo/pkg/service"
"github.com/c9s/bbgo/pkg/slack/slacklog"
"github.com/c9s/bbgo/pkg/types"
"github.com/c9s/bbgo/pkg/util"
)
func init() {
// randomize pulling
rand.Seed(time.Now().UnixNano())
}
var LoadedExchangeStrategies = make(map[string]SingleExchangeStrategy)
var LoadedCrossExchangeStrategies = make(map[string]CrossExchangeStrategy)
func RegisterStrategy(key string, s interface{}) {
loaded := 0
if d, ok := s.(SingleExchangeStrategy); ok {
LoadedExchangeStrategies[key] = d
loaded++
}
if d, ok := s.(CrossExchangeStrategy); ok {
LoadedCrossExchangeStrategies[key] = d
loaded++
}
if loaded == 0 {
panic(fmt.Errorf("%T does not implement SingleExchangeStrategy or CrossExchangeStrategy", s))
}
}
var emptyTime time.Time
type SyncStatus int
const (
SyncNotStarted SyncStatus = iota
Syncing
SyncDone
)
// Environment presents the real exchange data layer
type Environment struct {
// Notifiability here for environment is for the streaming data notification
// note that, for back tests, we don't need notification.
Notifiability
PersistenceServiceFacade *service.PersistenceServiceFacade
DatabaseService *service.DatabaseService
OrderService *service.OrderService
TradeService *service.TradeService
ProfitService *service.ProfitService
PositionService *service.PositionService
BacktestService *service.BacktestService
RewardService *service.RewardService
MarginService *service.MarginService
SyncService *service.SyncService
AccountService *service.AccountService
WithdrawService *service.WithdrawService
DepositService *service.DepositService
// startTime is the time of start point (which is used in the backtest)
startTime time.Time
// syncStartTime is the time point we want to start the sync (for trades and orders)
syncStartTime time.Time
syncMutex sync.Mutex
syncStatusMutex sync.Mutex
syncStatus SyncStatus
syncConfig *SyncConfig
sessions map[string]*ExchangeSession
}
func NewEnvironment() *Environment {
return &Environment{
// default trade scan time
syncStartTime: time.Now().AddDate(-1, 0, 0), // defaults to sync from 1 year ago
sessions: make(map[string]*ExchangeSession),
startTime: time.Now(),
syncStatus: SyncNotStarted,
PersistenceServiceFacade: &service.PersistenceServiceFacade{
Memory: service.NewMemoryService(),
},
}
}
func (environ *Environment) Session(name string) (*ExchangeSession, bool) {
s, ok := environ.sessions[name]
return s, ok
}
func (environ *Environment) Sessions() map[string]*ExchangeSession {
return environ.sessions
}
func (environ *Environment) SelectSessions(names ...string) map[string]*ExchangeSession {
if len(names) == 0 {
return environ.sessions
}
sessions := make(map[string]*ExchangeSession)
for _, name := range names {
if s, ok := environ.Session(name); ok {
sessions[name] = s
}
}
return sessions
}
func (environ *Environment) ConfigureDatabase(ctx context.Context) error {
// configureDB configures the database service based on the environment variable
if driver, ok := os.LookupEnv("DB_DRIVER"); ok {
if dsn, ok := os.LookupEnv("DB_DSN"); ok {
return environ.ConfigureDatabaseDriver(ctx, driver, dsn)
}
} else if dsn, ok := os.LookupEnv("SQLITE3_DSN"); ok {
return environ.ConfigureDatabaseDriver(ctx, "sqlite3", dsn)
} else if dsn, ok := os.LookupEnv("MYSQL_URL"); ok {
return environ.ConfigureDatabaseDriver(ctx, "mysql", dsn)
}
return nil
}
func (environ *Environment) ConfigureDatabaseDriver(ctx context.Context, driver string, dsn string) error {
environ.DatabaseService = service.NewDatabaseService(driver, dsn)
err := environ.DatabaseService.Connect()
if err != nil {
return err
}
if err := environ.DatabaseService.Upgrade(ctx); err != nil {
return err
}
// get the db connection pool object to create other services
db := environ.DatabaseService.DB
environ.OrderService = &service.OrderService{DB: db}
environ.TradeService = &service.TradeService{DB: db}
environ.RewardService = &service.RewardService{DB: db}
environ.AccountService = &service.AccountService{DB: db}
environ.ProfitService = &service.ProfitService{DB: db}
environ.PositionService = &service.PositionService{DB: db}
environ.MarginService = &service.MarginService{DB: db}
environ.WithdrawService = &service.WithdrawService{DB: db}
environ.DepositService = &service.DepositService{DB: db}
environ.SyncService = &service.SyncService{
TradeService: environ.TradeService,
OrderService: environ.OrderService,
RewardService: environ.RewardService,
MarginService: environ.MarginService,
WithdrawService: &service.WithdrawService{DB: db},
DepositService: &service.DepositService{DB: db},
}
return nil
}
// AddExchangeSession adds the existing exchange session or pre-created exchange session
func (environ *Environment) AddExchangeSession(name string, session *ExchangeSession) *ExchangeSession {
// update Notifiability from the environment
session.Notifiability = environ.Notifiability
environ.sessions[name] = session
return session
}
// AddExchange adds the given exchange with the session name, this is the default
func (environ *Environment) AddExchange(name string, exchange types.Exchange) (session *ExchangeSession) {
session = NewExchangeSession(name, exchange)
return environ.AddExchangeSession(name, session)
}
func (environ *Environment) ConfigureExchangeSessions(userConfig *Config) error {
// if sessions are not defined, we detect the sessions automatically
if len(userConfig.Sessions) == 0 {
return environ.AddExchangesByViperKeys()
}
return environ.AddExchangesFromSessionConfig(userConfig.Sessions)
}
func (environ *Environment) AddExchangesByViperKeys() error {
for _, n := range types.SupportedExchanges {
if viper.IsSet(string(n) + "-api-key") {
exchange, err := cmdutil.NewExchangeWithEnvVarPrefix(n, "")
if err != nil {
return err
}
environ.AddExchange(n.String(), exchange)
}
}
return nil
}
func (environ *Environment) AddExchangesFromSessionConfig(sessions map[string]*ExchangeSession) error {
for sessionName, session := range sessions {
if err := session.InitExchange(sessionName, nil); err != nil {
return err
}
environ.AddExchangeSession(sessionName, session)
}
return nil
}
func (environ *Environment) IsBackTesting() bool {
return environ.BacktestService != nil
}
// Init prepares the data that will be used by the strategies
func (environ *Environment) Init(ctx context.Context) (err error) {
for n := range environ.sessions {
var session = environ.sessions[n]
if err = session.Init(ctx, environ); err != nil {
// we can skip initialized sessions
if err != ErrSessionAlreadyInitialized {
return err
}
}
}
return
}
// Start initializes the symbols data streams
func (environ *Environment) Start(ctx context.Context) (err error) {
for n := range environ.sessions {
var session = environ.sessions[n]
if err = session.InitSymbols(ctx, environ); err != nil {
return err
}
}
return
}
func (environ *Environment) ConfigurePersistence(conf *PersistenceConfig) error {
if conf.Redis != nil {
if err := env.Set(conf.Redis); err != nil {
return err
}
environ.PersistenceServiceFacade.Redis = service.NewRedisPersistenceService(conf.Redis)
}
if conf.Json != nil {
if _, err := os.Stat(conf.Json.Directory); os.IsNotExist(err) {
if err2 := os.MkdirAll(conf.Json.Directory, 0777); err2 != nil {
log.WithError(err2).Errorf("can not create directory: %s", conf.Json.Directory)
return err2
}
}
environ.PersistenceServiceFacade.Json = &service.JsonPersistenceService{Directory: conf.Json.Directory}
}
return nil
}
// ConfigureNotificationRouting configures the notification rules
// for symbol-based routes, we should register the same symbol rules for each session.
// for session-based routes, we should set the fixed callbacks for each session
func (environ *Environment) ConfigureNotificationRouting(conf *NotificationConfig) error {
// configure routing here
if conf.SymbolChannels != nil {
environ.SymbolChannelRouter.AddRoute(conf.SymbolChannels)
}
if conf.SessionChannels != nil {
environ.SessionChannelRouter.AddRoute(conf.SessionChannels)
}
if conf.Routing != nil {
// configure passive object notification routing
switch conf.Routing.Trade {
case "$silent": // silent, do not setup notification
case "$session":
defaultTradeUpdateHandler := func(trade types.Trade) {
environ.Notify(&trade)
}
for name := range environ.sessions {
session := environ.sessions[name]
// if we can route session name to channel successfully...
channel, ok := environ.SessionChannelRouter.Route(name)
if ok {
session.UserDataStream.OnTradeUpdate(func(trade types.Trade) {
environ.NotifyTo(channel, &trade)
})
} else {
session.UserDataStream.OnTradeUpdate(defaultTradeUpdateHandler)
}
}
case "$symbol":
// configure object routes for Trade
environ.ObjectChannelRouter.Route(func(obj interface{}) (channel string, ok bool) {
trade, matched := obj.(*types.Trade)
if !matched {
return
}
channel, ok = environ.SymbolChannelRouter.Route(trade.Symbol)
return
})
// use same handler for each session
handler := func(trade types.Trade) {
channel, ok := environ.RouteObject(&trade)
if ok {
environ.NotifyTo(channel, &trade)
} else {
environ.Notify(&trade)
}
}
for _, session := range environ.sessions {
session.UserDataStream.OnTradeUpdate(handler)
}
}
switch conf.Routing.Order {
case "$silent": // silent, do not setup notification
case "$session":
defaultOrderUpdateHandler := func(order types.Order) {
text := util.Render(TemplateOrderReport, order)
environ.Notify(text, &order)
}
for name := range environ.sessions {
session := environ.sessions[name]
// if we can route session name to channel successfully...
channel, ok := environ.SessionChannelRouter.Route(name)
if ok {
session.UserDataStream.OnOrderUpdate(func(order types.Order) {
text := util.Render(TemplateOrderReport, order)
environ.NotifyTo(channel, text, &order)
})
} else {
session.UserDataStream.OnOrderUpdate(defaultOrderUpdateHandler)
}
}
case "$symbol":
// add object route
environ.ObjectChannelRouter.Route(func(obj interface{}) (channel string, ok bool) {
order, matched := obj.(*types.Order)
if !matched {
return
}
channel, ok = environ.SymbolChannelRouter.Route(order.Symbol)
return
})
// use same handler for each session
handler := func(order types.Order) {
text := util.Render(TemplateOrderReport, order)
channel, ok := environ.RouteObject(&order)
if ok {
environ.NotifyTo(channel, text, &order)
} else {
environ.Notify(text, &order)
}
}
for _, session := range environ.sessions {
session.UserDataStream.OnOrderUpdate(handler)
}
}
switch conf.Routing.SubmitOrder {
case "$silent": // silent, do not setup notification
case "$symbol":
// add object route
environ.ObjectChannelRouter.Route(func(obj interface{}) (channel string, ok bool) {
order, matched := obj.(*types.SubmitOrder)
if !matched {
return
}
channel, ok = environ.SymbolChannelRouter.Route(order.Symbol)
return
})
}
// currently, not used
// FIXME: this is causing cyclic import
/*
switch conf.Routing.PnL {
case "$symbol":
environ.ObjectChannelRouter.Route(func(obj interface{}) (channel string, ok bool) {
report, matched := obj.(*pnl.AverageCostPnlReport)
if !matched {
return
}
channel, ok = environ.SymbolChannelRouter.Route(report.Symbol)
return
})
}
*/
}
return nil
}
func (environ *Environment) SetStartTime(t time.Time) *Environment {
environ.startTime = t
return environ
}
// SetSyncStartTime overrides the default trade scan time (-7 days)
func (environ *Environment) SetSyncStartTime(t time.Time) *Environment {
environ.syncStartTime = t
return environ
}
func (environ *Environment) BindSync(config *SyncConfig) {
// skip this if we are running back-test
if environ.BacktestService != nil {
return
}
// If trade service is configured, we have the db configured
if environ.TradeService == nil {
return
}
if config == nil || config.UserDataStream == nil {
return
}
environ.syncConfig = config
tradeWriterCreator := func(session *ExchangeSession) func(trade types.Trade) {
return func(trade types.Trade) {
trade.IsMargin = session.Margin
trade.IsFutures = session.Futures
if session.Margin {
trade.IsIsolated = session.IsolatedMargin
} else if session.Futures {
trade.IsIsolated = session.IsolatedFutures
}
// The StrategyID field and the PnL field needs to be updated by the strategy.
// trade.StrategyID, trade.PnL
if err := environ.TradeService.Insert(trade); err != nil {
log.WithError(err).Errorf("trade insert error: %+v", trade)
}
}
}
orderWriterCreator := func(session *ExchangeSession) func(order types.Order) {
return func(order types.Order) {
order.IsMargin = session.Margin
order.IsFutures = session.Futures
if session.Margin {
order.IsIsolated = session.IsolatedMargin
} else if session.Futures {
order.IsIsolated = session.IsolatedFutures
}
switch order.Status {
case types.OrderStatusFilled, types.OrderStatusCanceled:
if order.ExecutedQuantity.Sign() > 0 {
if err := environ.OrderService.Insert(order); err != nil {
log.WithError(err).Errorf("order insert error: %+v", order)
}
}
}
}
}
for _, session := range environ.sessions {
// avoid using the iterator variable.
s2 := session
// if trade sync is on, we will write all received trades
if config.UserDataStream.Trades {
tradeWriter := tradeWriterCreator(s2)
session.UserDataStream.OnTradeUpdate(tradeWriter)
}
if config.UserDataStream.FilledOrders {
orderWriter := orderWriterCreator(s2)
session.UserDataStream.OnOrderUpdate(orderWriter)
}
}
}
func (environ *Environment) Connect(ctx context.Context) error {
log.Debugf("starting interaction...")
if err := interact.Start(ctx); err != nil {
return err
}
for n := range environ.sessions {
// avoid using the placeholder variable for the session because we use that in the callbacks
var session = environ.sessions[n]
var logger = log.WithField("session", n)
if len(session.Subscriptions) == 0 {
logger.Warnf("exchange session %s has no subscriptions", session.Name)
} else {
// add the subscribe requests to the stream
for _, s := range session.Subscriptions {
logger.Infof("subscribing %s %s %v", s.Symbol, s.Channel, s.Options)
session.MarketDataStream.Subscribe(s.Channel, s.Symbol, s.Options)
}
}
logger.Infof("connecting %s market data stream...", session.Name)
if err := session.MarketDataStream.Connect(ctx); err != nil {
return err
}
if !session.PublicOnly {
logger.Infof("connecting %s user data stream...", session.Name)
if err := session.UserDataStream.Connect(ctx); err != nil {
return err
}
}
}
return nil
}
func (environ *Environment) IsSyncing() (status SyncStatus) {
environ.syncStatusMutex.Lock()
status = environ.syncStatus
environ.syncStatusMutex.Unlock()
return status
}
func (environ *Environment) setSyncing(status SyncStatus) {
environ.syncStatusMutex.Lock()
environ.syncStatus = status
environ.syncStatusMutex.Unlock()
}
func (environ *Environment) syncWithUserConfig(ctx context.Context, userConfig *Config) error {
syncSymbols := userConfig.Sync.Symbols
sessions := environ.sessions
selectedSessions := userConfig.Sync.Sessions
if len(selectedSessions) > 0 {
sessions = environ.SelectSessions(selectedSessions...)
}
for _, session := range sessions {
if err := environ.syncSession(ctx, session, syncSymbols...); err != nil {
return err
}
if userConfig.Sync.DepositHistory {
if err := environ.SyncService.SyncDepositHistory(ctx, session.Exchange); err != nil {
return err
}
}
if userConfig.Sync.WithdrawHistory {
if err := environ.SyncService.SyncWithdrawHistory(ctx, session.Exchange); err != nil {
return err
}
}
if userConfig.Sync.RewardHistory {
if err := environ.SyncService.SyncRewardHistory(ctx, session.Exchange); err != nil {
return err
}
}
if userConfig.Sync.MarginHistory {
if err := environ.SyncService.SyncMarginHistory(ctx, session.Exchange,
userConfig.Sync.Since.Time(),
userConfig.Sync.MarginAssets...); err != nil {
return err
}
}
}
return nil
}
// Sync syncs all registered exchange sessions
func (environ *Environment) Sync(ctx context.Context, userConfig ...*Config) error {
if environ.SyncService == nil {
return nil
}
// for paper trade mode, skip sync
if util.IsPaperTrade() {
return nil
}
environ.syncMutex.Lock()
defer environ.syncMutex.Unlock()
environ.setSyncing(Syncing)
defer environ.setSyncing(SyncDone)
// sync by the defined user config
if len(userConfig) > 0 && userConfig[0] != nil && userConfig[0].Sync != nil {
return environ.syncWithUserConfig(ctx, userConfig[0])
}
// the default sync logics
for _, session := range environ.sessions {
if err := environ.syncSession(ctx, session); err != nil {
return err
}
if len(userConfig) == 0 || userConfig[0].Sync == nil {
continue
}
if userConfig[0].Sync.DepositHistory {
if err := environ.SyncService.SyncDepositHistory(ctx, session.Exchange); err != nil {
return err
}
}
if userConfig[0].Sync.WithdrawHistory {
if err := environ.SyncService.SyncWithdrawHistory(ctx, session.Exchange); err != nil {
return err
}
}
if userConfig[0].Sync.RewardHistory {
if err := environ.SyncService.SyncRewardHistory(ctx, session.Exchange); err != nil {
return err
}
}
}
return nil
}
func (environ *Environment) RecordAsset(t time.Time, session *ExchangeSession, assets types.AssetMap) {
// skip for back-test
if environ.BacktestService != nil {
return
}
if environ.DatabaseService == nil || environ.AccountService == nil {
return
}
if err := environ.AccountService.InsertAsset(
t,
session.Name,
session.ExchangeName,
session.SubAccount,
session.Margin,
session.IsolatedMargin,
session.IsolatedMarginSymbol,
assets); err != nil {
log.WithError(err).Errorf("can not insert asset record")
}
}
func (environ *Environment) RecordPosition(position *types.Position, trade types.Trade, profit *types.Profit) {
// skip for back-test
if environ.BacktestService != nil {
return
}
if environ.DatabaseService == nil || environ.ProfitService == nil || environ.PositionService == nil {
return
}
if position.Strategy == "" && profit.Strategy != "" {
position.Strategy = profit.Strategy
}
if position.StrategyInstanceID == "" && profit.StrategyInstanceID != "" {
position.StrategyInstanceID = profit.StrategyInstanceID
}
if profit != nil {
if err := environ.PositionService.Insert(position, trade, profit.Profit); err != nil {
log.WithError(err).Errorf("can not insert position record")
}
if err := environ.ProfitService.Insert(*profit); err != nil {
log.WithError(err).Errorf("can not insert profit record: %+v", profit)
}
} else {
if err := environ.PositionService.Insert(position, trade, fixedpoint.Zero); err != nil {
log.WithError(err).Errorf("can not insert position record")
}
}
}
func (environ *Environment) RecordProfit(profit types.Profit) {
// skip for back-test
if environ.BacktestService != nil {
return
}
if environ.DatabaseService == nil {
return
}
if environ.ProfitService == nil {
return
}
if err := environ.ProfitService.Insert(profit); err != nil {
log.WithError(err).Errorf("can not insert profit record: %+v", profit)
}
}
func (environ *Environment) SyncSession(ctx context.Context, session *ExchangeSession, defaultSymbols ...string) error {
if environ.SyncService == nil {
return nil
}
environ.syncMutex.Lock()
defer environ.syncMutex.Unlock()
environ.setSyncing(Syncing)
defer environ.setSyncing(SyncDone)
return environ.syncSession(ctx, session, defaultSymbols...)
}
func (environ *Environment) syncSession(ctx context.Context, session *ExchangeSession, defaultSymbols ...string) error {
symbols, err := session.getSessionSymbols(defaultSymbols...)
if err != nil {
return err
}
log.Infof("syncing symbols %v from session %s", symbols, session.Name)
return environ.SyncService.SyncSessionSymbols(ctx, session.Exchange, environ.syncStartTime, symbols...)
}
func (environ *Environment) ConfigureNotificationSystem(userConfig *Config) error {
environ.Notifiability = Notifiability{
SymbolChannelRouter: NewPatternChannelRouter(nil),
SessionChannelRouter: NewPatternChannelRouter(nil),
ObjectChannelRouter: NewObjectChannelRouter(),
}
// setup default notification config
if userConfig.Notifications == nil {
userConfig.Notifications = &NotificationConfig{
Routing: &SlackNotificationRouting{
Trade: "$session",
Order: "$silent",
SubmitOrder: "$silent",
},
}
}
var persistence = environ.PersistenceServiceFacade.Get()
err := environ.setupInteraction(persistence)
if err != nil {
return err
}
// setup slack
slackToken := viper.GetString("slack-token")
if len(slackToken) > 0 && userConfig.Notifications != nil {
environ.setupSlack(userConfig, slackToken, persistence)
}
// check if telegram bot token is defined
telegramBotToken := viper.GetString("telegram-bot-token")
if len(telegramBotToken) > 0 {
if err := environ.setupTelegram(userConfig, telegramBotToken, persistence); err != nil {
return err
}
}
if userConfig.Notifications != nil {
if err := environ.ConfigureNotificationRouting(userConfig.Notifications); err != nil {
return err
}
}
return nil
}
// getAuthStoreID returns the authentication store id
// if telegram bot token is defined, the bot id will be used.
// if not, env var $USER will be used.
// if both are not defined, a default "default" will be used.
func getAuthStoreID() string {
telegramBotToken := viper.GetString("telegram-bot-token")
if len(telegramBotToken) > 0 {
tt := strings.Split(telegramBotToken, ":")
return tt[0]
}
userEnv := os.Getenv("USER")
if userEnv != "" {
return userEnv
}
return "default"
}
func (environ *Environment) setupInteraction(persistence service.PersistenceService) error {
var otpQRCodeImagePath = fmt.Sprintf("otp.png")
var key *otp.Key
var keyURL string
var authStore = environ.getAuthStore(persistence)
if v, ok := util.GetEnvVarBool("FLUSH_OTP_KEY"); v && ok {
log.Warnf("flushing otp key...")
if err := authStore.Reset(); err != nil {
return err
}
}
if err := authStore.Load(&keyURL); err != nil {
log.Warnf("telegram session not found, generating new one-time password key for new telegram session...")
newKey, err := setupNewOTPKey(otpQRCodeImagePath)
if err != nil {
return errors.Wrapf(err, "failed to setup totp (time-based one time password) key")
}
key = newKey
keyURL = key.URL()
if err := authStore.Save(keyURL); err != nil {
return err
}
printOtpAuthGuide(otpQRCodeImagePath)
} else if keyURL != "" {
key, err = otp.NewKeyFromURL(keyURL)
if err != nil {
log.WithError(err).Errorf("can not load otp key from url: %s, generating new otp key", keyURL)
newKey, err := setupNewOTPKey(otpQRCodeImagePath)
if err != nil {
return errors.Wrapf(err, "failed to setup totp (time-based one time password) key")
}
key = newKey
keyURL = key.URL()
if err := authStore.Save(keyURL); err != nil {
return err
}
printOtpAuthGuide(otpQRCodeImagePath)
} else {
log.Infof("otp key loaded: %s", util.MaskKey(key.Secret()))
printOtpAuthGuide(otpQRCodeImagePath)
}
}
authStrict := false
authMode := interact.AuthModeToken
authToken := viper.GetString("telegram-bot-auth-token")
if authToken != "" && key != nil {
authStrict = true
} else if authToken != "" {
authMode = interact.AuthModeToken
} else if key != nil {
authMode = interact.AuthModeOTP
}
if authMode == interact.AuthModeToken {
log.Debugf("found interaction auth token, using token mode for authorization...")
printAuthTokenGuide(authToken)
}
interact.AddCustomInteraction(&interact.AuthInteract{
Strict: authStrict,
Mode: authMode,
Token: authToken, // can be empty string here
OneTimePasswordKey: key, // can be nil here
})
return nil
}
func (environ *Environment) getAuthStore(persistence service.PersistenceService) service.Store {
id := getAuthStoreID()
return persistence.NewStore("bbgo", "auth", id)
}
func (environ *Environment) setupSlack(userConfig *Config, slackToken string, persistence service.PersistenceService) {
conf := userConfig.Notifications.Slack
if conf == nil {
return
}
if !strings.HasPrefix(slackToken, "xoxb-") {
log.Error("SLACK_BOT_TOKEN must have the prefix \"xoxb-\".")
return
}
// app-level token (for specific api)
slackAppToken := viper.GetString("slack-app-token")
if !strings.HasPrefix(slackAppToken, "xapp-") {
log.Errorf("SLACK_APP_TOKEN must have the prefix \"xapp-\".")
return
}
if conf.ErrorChannel != "" {
log.Debugf("found slack configured, setting up log hook...")
log.AddHook(slacklog.NewLogHook(slackToken, conf.ErrorChannel))
}
log.Debugf("adding slack notifier with default channel: %s", conf.DefaultChannel)
var slackOpts = []slack.Option{
slack.OptionLog(stdlog.New(os.Stdout, "api: ", stdlog.Lshortfile|stdlog.LstdFlags)),
slack.OptionAppLevelToken(slackAppToken),
}
if b, ok := util.GetEnvVarBool("DEBUG_SLACK"); ok {
slackOpts = append(slackOpts, slack.OptionDebug(b))
}
var client = slack.New(slackToken, slackOpts...)
var notifier = slacknotifier.New(client, conf.DefaultChannel)
environ.AddNotifier(notifier)
// allocate a store, so that we can save the chatID for the owner
var messenger = interact.NewSlack(client)
var sessions = interact.SlackSessionMap{}
var sessionStore = persistence.NewStore("bbgo", "slack")
if err := sessionStore.Load(&sessions); err != nil {
} else {
// TODO: this is not necessary for slack, but we should find a way to restore the sessions
/*
for _, session := range sessions {
if session.IsAuthorized() {
// notifier.AddChat(session.Chat)
}
}
messenger.RestoreSessions(sessions)
messenger.OnAuthorized(func(userSession *interact.SlackSession) {
if userSession.IsAuthorized() {
// notifier.AddChat(userSession.Chat)
}
})
*/
}
interact.AddMessenger(messenger)
}
func (environ *Environment) setupTelegram(userConfig *Config, telegramBotToken string, persistence service.PersistenceService) error {
tt := strings.Split(telegramBotToken, ":")
telegramID := tt[0]
bot, err := telebot.NewBot(telebot.Settings{
// You can also set custom API URL.
// If field is empty it equals to "https://api.telegram.org".
// URL: "http://195.129.111.17:8012",
Token: telegramBotToken,
Poller: &telebot.LongPoller{Timeout: 10 * time.Second},
})
if err != nil {
return err
}
var opts []telegramnotifier.Option
if userConfig.Notifications != nil && userConfig.Notifications.Telegram != nil {
log.Infof("telegram broadcast is enabled")
opts = append(opts, telegramnotifier.UseBroadcast())
}
var notifier = telegramnotifier.New(bot, opts...)
environ.Notifiability.AddNotifier(notifier)
// allocate a store, so that we can save the chatID for the owner
var messenger = interact.NewTelegram(bot)
var sessions = interact.TelegramSessionMap{}
var sessionStore = persistence.NewStore("bbgo", "telegram", telegramID)
if err := sessionStore.Load(&sessions); err != nil {
if err != service.ErrPersistenceNotExists {
log.WithError(err).Errorf("unexpected persistence error")
}
} else {
for _, session := range sessions {
if session.IsAuthorized() {
notifier.AddChat(session.Chat)
}
}
// you must restore the session after the notifier updates
messenger.RestoreSessions(sessions)
}
messenger.OnAuthorized(func(userSession *interact.TelegramSession) {
if userSession.IsAuthorized() {
notifier.AddChat(userSession.Chat)
}
log.Infof("user session %d got authorized, saving telegram sessions...", userSession.User.ID)
if err := sessionStore.Save(messenger.Sessions()); err != nil {
log.WithError(err).Errorf("telegram session save error")
}
})
interact.AddMessenger(messenger)
return nil
}
func writeOTPKeyAsQRCodePNG(key *otp.Key, imagePath string) error {
// Convert TOTP key into a PNG
var buf bytes.Buffer
img, err := key.Image(512, 512)
if err != nil {
return err
}
if err := png.Encode(&buf, img); err != nil {
return err
}
if err := ioutil.WriteFile(imagePath, buf.Bytes(), 0644); err != nil {
return err
}
return nil
}
// setupNewOTPKey generates a new otp key and save the secret as a qrcode image
func setupNewOTPKey(qrcodeImagePath string) (*otp.Key, error) {
key, err := service.NewDefaultTotpKey()
if err != nil {
return nil, errors.Wrapf(err, "failed to setup totp (time-based one time password) key")
}
printOtpKey(key)
if err := writeOTPKeyAsQRCodePNG(key, qrcodeImagePath); err != nil {
return nil, err
}
return key, nil
}
func printOtpKey(key *otp.Key) {
fmt.Println("")
fmt.Println("====================================================================")
fmt.Println(" PLEASE STORE YOUR OTP KEY SAFELY ")
fmt.Println("====================================================================")
fmt.Printf(" Issuer: %s\n", key.Issuer())
fmt.Printf(" AccountName: %s\n", key.AccountName())
fmt.Printf(" Secret: %s\n", key.Secret())
fmt.Printf(" Key URL: %s\n", key.URL())
fmt.Println("====================================================================")
fmt.Println("")
}
func printOtpAuthGuide(qrcodeImagePath string) {
fmt.Printf(`
To scan your OTP QR code, please run the following command:
open %s
For telegram, send the auth command with the generated one-time password to the bbo bot you created to enable the notification:
/auth
`, qrcodeImagePath)
}
func printAuthTokenGuide(token string) {
fmt.Printf(`
For telegram, send the following command to the bbgo bot you created to enable the notification:
/auth
And then enter your token
%s
`, token)
}
func (session *ExchangeSession) getSessionSymbols(defaultSymbols ...string) ([]string, error) {
if session.IsolatedMargin {
return []string{session.IsolatedMarginSymbol}, nil
}
if len(defaultSymbols) > 0 {
return defaultSymbols, nil
}
return session.FindPossibleSymbols()
}
| [
"\"USER\""
]
| []
| [
"USER"
]
| [] | ["USER"] | go | 1 | 0 | |
examples/advanced_create_instance.py | import os
from datacrunch import DataCrunchClient
from datacrunch.exceptions import APIException
"""
In this hypothetical example, we check if we have enough balance
to deploy an 8V100.48V instance for a week.
If there's not enough balance, we deploy a 4V100.20V instance.
This example uses the balance service to check the current balance,
the instace_types service to check instance type details (price per hour)
We also perform other basic tasks such as creating the client and adding a new SSH key.
"""
# The instance types we want to deploy
INSTANCE_TYPE_8V = '8V100.48V'
INSTANCE_TYPE_4V = '4V100.20V'
# Arbitrary duration for the example
DURATION = 24 * 7 # one week
# Get client secret from environment variable
CLIENT_SECRET = os.environ['DATACRUNCH_CLIENT_SECRET']
CLIENT_ID = 'Ibk5bdxV64lKAWOqYnvSi' # Replace with your client ID
try:
# Create datcrunch client
datacrunch = DataCrunchClient(CLIENT_ID, CLIENT_SECRET)
# Create new SSH key
public_key = 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAI0qq2Qjt5GPi7DKdcnBHOkvk8xNsG9dA607tnWagOkHC test_key'
ssh_key = datacrunch.ssh_keys.create('my test key', public_key)
# Get all SSH keys
ssh_keys = datacrunch.ssh_keys.get()
ssh_keys_ids = list(map(lambda ssh_key: ssh_key.id, ssh_keys))
# Get our current balance
balance = datacrunch.balance.get()
print(balance.amount)
# Get instance types
instance_types = datacrunch.instance_types.get()
# Deploy 8V instance if enough balance for a week, otherwise deploy a 4V
for instance_details in instance_types:
if instance_details.instance_type == INSTANCE_TYPE_8V:
price_per_hour = instance_details.price_per_hour
if price_per_hour * DURATION < balance.amount:
# Deploy a new 8V instance
instance = datacrunch.instances.create(instance_type=INSTANCE_TYPE_8V,
image='fastai',
ssh_key_ids=ssh_keys_ids,
hostname='example',
description='large instance',
os_volumes={
"name": "Large OS volume",
"size": 95
})
else:
# Deploy a new 4V instance
instance = datacrunch.instances.create(instance_type=INSTANCE_TYPE_4V,
image='fastai',
ssh_key_ids=ssh_keys_ids,
hostname='example',
description='medium instance')
except APIException as exception:
print(exception)
| []
| []
| [
"DATACRUNCH_CLIENT_SECRET"
]
| [] | ["DATACRUNCH_CLIENT_SECRET"] | python | 1 | 0 | |
powerline_shell/segments/virtual_env.py | import os
from ..utils import BasicSegment
class Segment(BasicSegment):
def add_to_powerline(self):
env = os.getenv('VIRTUAL_ENV') \
or os.getenv('CONDA_ENV_PATH') \
or os.getenv('CONDA_DEFAULT_ENV')
if not env:
return
env_name = os.path.basename(env)
bg = self.powerline.theme.VIRTUAL_ENV_BG
fg = self.powerline.theme.VIRTUAL_ENV_FG
self.powerline.append(" " + env_name + " ", fg, bg)
| []
| []
| [
"VIRTUAL_ENV",
"CONDA_ENV_PATH",
"CONDA_DEFAULT_ENV"
]
| [] | ["VIRTUAL_ENV", "CONDA_ENV_PATH", "CONDA_DEFAULT_ENV"] | python | 3 | 0 | |
test/unit/models/test_superblocks.py | import pytest
import sys
import os
import time
os.environ['SENTINEL_ENV'] = 'test'
os.environ['SENTINEL_CONFIG'] = os.path.normpath(os.path.join(os.path.dirname(__file__), '../../test_sentinel.conf'))
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '../../../lib')))
import misc
import config
from models import GovernanceObject, Proposal, Superblock, Vote
# clear DB tables before each execution
def setup():
# clear tables first...
Vote.delete().execute()
Proposal.delete().execute()
Superblock.delete().execute()
GovernanceObject.delete().execute()
def teardown():
pass
# list of proposal govobjs to import for testing
@pytest.fixture
def go_list_proposals():
items = [
{u'AbsoluteYesCount': 1000,
u'AbstainCount': 7,
u'CollateralHash': u'acb67ec3f3566c9b94a26b70b36c1f74a010a37c0950c22d683cc50da324fdca',
u'DataHex': u'5b5b2270726f706f73616c222c207b22656e645f65706f6368223a20323132323532303430302c20226e616d65223a20226465616e2d6d696c6c65722d35343933222c20227061796d656e745f61646472657373223a2022795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e74223a2032352e37352c202273746172745f65706f6368223a20313437343236313038362c202274797065223a20312c202275726c223a2022687474703a2f2f6461736863656e7472616c2e6f72672f6465616e2d6d696c6c65722d35343933227d5d5d',
u'DataString': u'[["proposal", {"end_epoch": 2122520400, "name": "dean-miller-5493", "payment_address": "yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui", "payment_amount": 25.75, "start_epoch": 1474261086, "type": 1, "url": "http://ravendarkcentral.org/dean-miller-5493"}]]',
u'Hash': u'dfd7d63979c0b62456b63d5fc5306dbec451180adee85876cbf5b28c69d1a86c',
u'IsValidReason': u'',
u'NoCount': 25,
u'YesCount': 1025,
u'fBlockchainValidity': True,
u'fCachedDelete': False,
u'fCachedEndorsed': False,
u'fCachedFunding': False,
u'fCachedValid': True},
{u'AbsoluteYesCount': 1000,
u'AbstainCount': 29,
u'CollateralHash': u'3efd23283aa98c2c33f80e4d9ed6f277d195b72547b6491f43280380f6aac810',
u'DataHex': u'5b5b2270726f706f73616c222c207b22656e645f65706f6368223a20323132323532303430302c20226e616d65223a20226665726e616e64657a2d37363235222c20227061796d656e745f61646472657373223a2022795443363268755234595145506e39414a486a6e517878726548536267416f617456222c20227061796d656e745f616d6f756e74223a2033322e30312c202273746172745f65706f6368223a20313437343236313038362c202274797065223a20312c202275726c223a2022687474703a2f2f6461736863656e7472616c2e6f72672f6665726e616e64657a2d37363235227d5d5d',
u'DataString': u'[["proposal", {"end_epoch": 2122520400, "name": "fernandez-7625", "payment_address": "yTC62huR4YQEPn9AJHjnQxxreHSbgAoatV", "payment_amount": 32.01, "start_epoch": 1474261086, "type": 1, "url": "http://ravendarkcentral.org/fernandez-7625"}]]',
u'Hash': u'0523445762025b2e01a2cd34f1d10f4816cf26ee1796167e5b029901e5873630',
u'IsValidReason': u'',
u'NoCount': 56,
u'YesCount': 1056,
u'fBlockchainValidity': True,
u'fCachedDelete': False,
u'fCachedEndorsed': False,
u'fCachedFunding': False,
u'fCachedValid': True},
]
return items
# list of superblock govobjs to import for testing
@pytest.fixture
def go_list_superblocks():
items = [
{u'AbsoluteYesCount': 1,
u'AbstainCount': 0,
u'CollateralHash': u'0000000000000000000000000000000000000000000000000000000000000000',
u'DataHex': u'5b5b2274726967676572222c207b226576656e745f626c6f636b5f686569676874223a2037323639362c20227061796d656e745f616464726573736573223a2022795965384b77796155753559737753596d42337133727978385854557539793755697c795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e7473223a202232352e37353030303030307c32352e3735303030303030222c202274797065223a20327d5d5d',
u'DataString': u'[["trigger", {"event_block_height": 72696, "payment_addresses": "yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui|yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui", "payment_amounts": "25.75000000|25.7575000000", "type": 2}]]',
u'Hash': u'667c4a53eb81ba14d02860fdb4779e830eb8e98306f9145f3789d347cbeb0721',
u'IsValidReason': u'',
u'NoCount': 0,
u'YesCount': 1,
u'fBlockchainValidity': True,
u'fCachedDelete': False,
u'fCachedEndorsed': False,
u'fCachedFunding': False,
u'fCachedValid': True},
{u'AbsoluteYesCount': 1,
u'AbstainCount': 0,
u'CollateralHash': u'0000000000000000000000000000000000000000000000000000000000000000',
u'DataHex': u'5b5b2274726967676572222c207b226576656e745f626c6f636b5f686569676874223a2037323639362c20227061796d656e745f616464726573736573223a2022795965384b77796155753559737753596d42337133727978385854557539793755697c795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e7473223a202232352e37353030303030307c32352e3735303030303030222c202274797065223a20327d5d5d',
u'DataString': u'[["trigger", {"event_block_height": 72696, "payment_addresses": "yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui|yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui", "payment_amounts": "25.75000000|25.75000000", "type": 2}]]',
u'Hash': u'8f91ffb105739ec7d5b6c0b12000210fcfcc0837d3bb8ca6333ba93ab5fc0bdf',
u'IsValidReason': u'',
u'NoCount': 0,
u'YesCount': 1,
u'fBlockchainValidity': True,
u'fCachedDelete': False,
u'fCachedEndorsed': False,
u'fCachedFunding': False,
u'fCachedValid': True},
{u'AbsoluteYesCount': 1,
u'AbstainCount': 0,
u'CollateralHash': u'0000000000000000000000000000000000000000000000000000000000000000',
u'DataHex': u'5b5b2274726967676572222c207b226576656e745f626c6f636b5f686569676874223a2037323639362c20227061796d656e745f616464726573736573223a2022795965384b77796155753559737753596d42337133727978385854557539793755697c795965384b77796155753559737753596d4233713372797838585455753979375569222c20227061796d656e745f616d6f756e7473223a202232352e37353030303030307c32352e3735303030303030222c202274797065223a20327d5d5d',
u'DataString': u'[["trigger", {"event_block_height": 72696, "payment_addresses": "yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui|yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui", "payment_amounts": "25.75000000|25.75000000", "type": 2}]]',
u'Hash': u'bc2834f357da7504138566727c838e6ada74d079e63b6104701f4f8eb05dae36',
u'IsValidReason': u'',
u'NoCount': 0,
u'YesCount': 1,
u'fBlockchainValidity': True,
u'fCachedDelete': False,
u'fCachedEndorsed': False,
u'fCachedFunding': False,
u'fCachedValid': True},
]
return items
@pytest.fixture
def superblock():
sb = Superblock(
event_block_height=62500,
payment_addresses='yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui|yTC62huR4YQEPn9AJHjnQxxreHSbgAoatV',
payment_amounts='5|3',
proposal_hashes='e8a0057914a2e1964ae8a945c4723491caae2077a90a00a2aabee22b40081a87|d1ce73527d7cd6f2218f8ca893990bc7d5c6b9334791ce7973bfa22f155f826e',
)
return sb
def test_superblock_is_valid(superblock):
from ravendarkd import RavenDarkDaemon
ravendarkd = RavenDarkDaemon.from_ravendark_conf(config.ravendark_conf)
orig = Superblock(**superblock.get_dict()) # make a copy
# original as-is should be valid
assert orig.is_valid() is True
# mess with payment amounts
superblock.payment_amounts = '7|yyzx'
assert superblock.is_valid() is False
superblock.payment_amounts = '7,|yzx'
assert superblock.is_valid() is False
superblock.payment_amounts = '7|8'
assert superblock.is_valid() is True
superblock.payment_amounts = ' 7|8'
assert superblock.is_valid() is False
superblock.payment_amounts = '7|8 '
assert superblock.is_valid() is False
superblock.payment_amounts = ' 7|8 '
assert superblock.is_valid() is False
# reset
superblock = Superblock(**orig.get_dict())
assert superblock.is_valid() is True
# mess with payment addresses
superblock.payment_addresses = 'yTC62huR4YQEPn9AJHjnQxxreHSbgAoatV|1234 Anywhere ST, Chicago, USA'
assert superblock.is_valid() is False
# leading spaces in payment addresses
superblock.payment_addresses = ' yTC62huR4YQEPn9AJHjnQxxreHSbgAoatV'
superblock.payment_amounts = '5.00'
assert superblock.is_valid() is False
# trailing spaces in payment addresses
superblock.payment_addresses = 'yTC62huR4YQEPn9AJHjnQxxreHSbgAoatV '
superblock.payment_amounts = '5.00'
assert superblock.is_valid() is False
# leading & trailing spaces in payment addresses
superblock.payment_addresses = ' yTC62huR4YQEPn9AJHjnQxxreHSbgAoatV '
superblock.payment_amounts = '5.00'
assert superblock.is_valid() is False
# single payment addr/amt is ok
superblock.payment_addresses = 'yTC62huR4YQEPn9AJHjnQxxreHSbgAoatV'
superblock.payment_amounts = '5.00'
assert superblock.is_valid() is True
# ensure number of payment addresses matches number of payments
superblock.payment_addresses = 'yTC62huR4YQEPn9AJHjnQxxreHSbgAoatV'
superblock.payment_amounts = '37.00|23.24'
assert superblock.is_valid() is False
superblock.payment_addresses = 'yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui|yTC62huR4YQEPn9AJHjnQxxreHSbgAoatV'
superblock.payment_amounts = '37.00'
assert superblock.is_valid() is False
# ensure amounts greater than zero
superblock.payment_addresses = 'yTC62huR4YQEPn9AJHjnQxxreHSbgAoatV'
superblock.payment_amounts = '-37.00'
assert superblock.is_valid() is False
# reset
superblock = Superblock(**orig.get_dict())
assert superblock.is_valid() is True
# mess with proposal hashes
superblock.proposal_hashes = '7|yyzx'
assert superblock.is_valid() is False
superblock.proposal_hashes = '7,|yyzx'
assert superblock.is_valid() is False
superblock.proposal_hashes = '0|1'
assert superblock.is_valid() is False
superblock.proposal_hashes = '0000000000000000000000000000000000000000000000000000000000000000|1111111111111111111111111111111111111111111111111111111111111111'
assert superblock.is_valid() is True
# reset
superblock = Superblock(**orig.get_dict())
assert superblock.is_valid() is True
def test_serialisable_fields():
s1 = ['event_block_height', 'payment_addresses', 'payment_amounts', 'proposal_hashes']
s2 = Superblock.serialisable_fields()
s1.sort()
s2.sort()
assert s2 == s1
def test_deterministic_superblock_creation(go_list_proposals):
import ravendarklib
import misc
from ravendarkd import RavenDarkDaemon
ravendarkd = RavenDarkDaemon.from_ravendark_conf(config.ravendark_conf)
for item in go_list_proposals:
(go, subobj) = GovernanceObject.import_gobject_from_ravendarkd(ravendarkd, item)
max_budget = 60
prop_list = Proposal.approved_and_ranked(proposal_quorum=1, next_superblock_max_budget=max_budget)
# MAX_GOVERNANCE_OBJECT_DATA_SIZE defined in governance-object.h
maxgovobjdatasize = 16 * 1024
sb = ravendarklib.create_superblock(prop_list, 72000, max_budget, misc.now(), maxgovobjdatasize)
assert sb.event_block_height == 72000
assert sb.payment_addresses == 'yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui|yTC62huR4YQEPn9AJHjnQxxreHSbgAoatV'
assert sb.payment_amounts == '25.75000000|32.01000000'
assert sb.proposal_hashes == 'dfd7d63979c0b62456b63d5fc5306dbec451180adee85876cbf5b28c69d1a86c|0523445762025b2e01a2cd34f1d10f4816cf26ee1796167e5b029901e5873630'
assert sb.hex_hash() == 'bb3f33ccf95415c396bd09d35325dbcbc7b067010d51c7ccf772a9e839c1e414'
def test_superblock_size_limit(go_list_proposals):
import ravendarklib
import misc
from ravendarkd import RavenDarkDaemon
ravendarkd = RavenDarkDaemon.from_ravendark_conf(config.ravendark_conf)
for item in go_list_proposals:
(go, subobj) = GovernanceObject.import_gobject_from_ravendarkd(ravendarkd, item)
max_budget = 60
prop_list = Proposal.approved_and_ranked(proposal_quorum=1, next_superblock_max_budget=max_budget)
maxgovobjdatasize = 469
sb = ravendarklib.create_superblock(prop_list, 72000, max_budget, misc.now(), maxgovobjdatasize)
# two proposals in the list, but...
assert len(prop_list) == 2
# only one should have been included in the SB, because the 2nd one is over the limit
assert sb.event_block_height == 72000
assert sb.payment_addresses == 'yYe8KwyaUu5YswSYmB3q3ryx8XTUu9y7Ui'
assert sb.payment_amounts == '25.75000000'
assert sb.proposal_hashes == 'dfd7d63979c0b62456b63d5fc5306dbec451180adee85876cbf5b28c69d1a86c'
assert sb.hex_hash() == '6b8cababf797644f1d62003e4cc68c1c40a8c1873c8a68ed0fc88772ea77cc44'
def test_deterministic_superblock_selection(go_list_superblocks):
from ravendarkd import RavenDarkDaemon
ravendarkd = RavenDarkDaemon.from_ravendark_conf(config.ravendark_conf)
for item in go_list_superblocks:
(go, subobj) = GovernanceObject.import_gobject_from_ravendarkd(ravendarkd, item)
# highest hash wins if same -- so just order by hash
sb = Superblock.find_highest_deterministic('542f4433e438bdd64697b8381fda1a7a9b7a111c3a4e32fad524d1821d820394')
assert sb.object_hash == 'bc2834f357da7504138566727c838e6ada74d079e63b6104701f4f8eb05dae36'
| []
| []
| [
"SENTINEL_ENV",
"SENTINEL_CONFIG"
]
| [] | ["SENTINEL_ENV", "SENTINEL_CONFIG"] | python | 2 | 0 | |
cmd/lncli/main.go | // Copyright (c) 2013-2017 The btcsuite developers
// Copyright (c) 2015-2016 The Decred developers
// Copyright (C) 2015-2017 The Lightning Network Developers
package main
import (
"fmt"
"io/ioutil"
"os"
"os/user"
"path/filepath"
"strings"
macaroon "gopkg.in/macaroon.v2"
"github.com/btcsuite/btcutil"
"github.com/lightningnetwork/lnd/build"
"github.com/lightningnetwork/lnd/lncfg"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/macaroons"
"github.com/urfave/cli"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
const (
defaultDataDir = "data"
defaultChainSubDir = "chain"
defaultTLSCertFilename = "tls.cert"
defaultMacaroonFilename = "admin.macaroon"
defaultRPCPort = "10009"
defaultRPCHostPort = "localhost:" + defaultRPCPort
)
var (
defaultLndDir = btcutil.AppDataDir("lnd", false)
defaultTLSCertPath = filepath.Join(defaultLndDir, defaultTLSCertFilename)
// maxMsgRecvSize is the largest message our client will receive. We
// set this to 200MiB atm.
maxMsgRecvSize = grpc.MaxCallRecvMsgSize(1 * 1024 * 1024 * 200)
)
func fatal(err error) {
fmt.Fprintf(os.Stderr, "[lncli] %v\n", err)
os.Exit(1)
}
func getWalletUnlockerClient(ctx *cli.Context) (lnrpc.WalletUnlockerClient, func()) {
conn := getClientConn(ctx, true)
cleanUp := func() {
conn.Close()
}
return lnrpc.NewWalletUnlockerClient(conn), cleanUp
}
func getClient(ctx *cli.Context) (lnrpc.LightningClient, func()) {
conn := getClientConn(ctx, false)
cleanUp := func() {
conn.Close()
}
return lnrpc.NewLightningClient(conn), cleanUp
}
func getClientConn(ctx *cli.Context, skipMacaroons bool) *grpc.ClientConn {
// First, we'll parse the args from the command.
tlsCertPath, macPath, err := extractPathArgs(ctx)
if err != nil {
fatal(err)
}
// Load the specified TLS certificate and build transport credentials
// with it.
creds, err := credentials.NewClientTLSFromFile(tlsCertPath, "")
if err != nil {
fatal(err)
}
// Create a dial options array.
opts := []grpc.DialOption{
grpc.WithTransportCredentials(creds),
}
// Only process macaroon credentials if --no-macaroons isn't set and
// if we're not skipping macaroon processing.
if !ctx.GlobalBool("no-macaroons") && !skipMacaroons {
// Load the specified macaroon file.
macBytes, err := ioutil.ReadFile(macPath)
if err != nil {
fatal(fmt.Errorf("unable to read macaroon path (check "+
"the network setting!): %v", err))
}
mac := &macaroon.Macaroon{}
if err = mac.UnmarshalBinary(macBytes); err != nil {
fatal(fmt.Errorf("unable to decode macaroon: %v", err))
}
macConstraints := []macaroons.Constraint{
// We add a time-based constraint to prevent replay of the
// macaroon. It's good for 60 seconds by default to make up for
// any discrepancy between client and server clocks, but leaking
// the macaroon before it becomes invalid makes it possible for
// an attacker to reuse the macaroon. In addition, the validity
// time of the macaroon is extended by the time the server clock
// is behind the client clock, or shortened by the time the
// server clock is ahead of the client clock (or invalid
// altogether if, in the latter case, this time is more than 60
// seconds).
// TODO(aakselrod): add better anti-replay protection.
macaroons.TimeoutConstraint(ctx.GlobalInt64("macaroontimeout")),
// Lock macaroon down to a specific IP address.
macaroons.IPLockConstraint(ctx.GlobalString("macaroonip")),
// ... Add more constraints if needed.
}
// Apply constraints to the macaroon.
constrainedMac, err := macaroons.AddConstraints(mac, macConstraints...)
if err != nil {
fatal(err)
}
// Now we append the macaroon credentials to the dial options.
cred := macaroons.NewMacaroonCredential(constrainedMac)
opts = append(opts, grpc.WithPerRPCCredentials(cred))
}
// We need to use a custom dialer so we can also connect to unix sockets
// and not just TCP addresses.
genericDialer := lncfg.ClientAddressDialer(defaultRPCPort)
opts = append(opts, grpc.WithContextDialer(genericDialer))
opts = append(opts, grpc.WithDefaultCallOptions(maxMsgRecvSize))
conn, err := grpc.Dial(ctx.GlobalString("rpcserver"), opts...)
if err != nil {
fatal(fmt.Errorf("unable to connect to RPC server: %v", err))
}
return conn
}
// extractPathArgs parses the TLS certificate and macaroon paths from the
// command.
func extractPathArgs(ctx *cli.Context) (string, string, error) {
// We'll start off by parsing the active chain and network. These are
// needed to determine the correct path to the macaroon when not
// specified.
chain := strings.ToLower(ctx.GlobalString("chain"))
switch chain {
case "bitcoin", "litecoin":
default:
return "", "", fmt.Errorf("unknown chain: %v", chain)
}
network := strings.ToLower(ctx.GlobalString("network"))
switch network {
case "mainnet", "testnet", "regtest", "simnet":
default:
return "", "", fmt.Errorf("unknown network: %v", network)
}
// We'll now fetch the lnddir so we can make a decision on how to
// properly read the macaroons (if needed) and also the cert. This will
// either be the default, or will have been overwritten by the end
// user.
lndDir := cleanAndExpandPath(ctx.GlobalString("lnddir"))
// If the macaroon path as been manually provided, then we'll only
// target the specified file.
var macPath string
if ctx.GlobalString("macaroonpath") != "" {
macPath = cleanAndExpandPath(ctx.GlobalString("macaroonpath"))
} else {
// Otherwise, we'll go into the path:
// lnddir/data/chain/<chain>/<network> in order to fetch the
// macaroon that we need.
macPath = filepath.Join(
lndDir, defaultDataDir, defaultChainSubDir, chain,
network, defaultMacaroonFilename,
)
}
tlsCertPath := cleanAndExpandPath(ctx.GlobalString("tlscertpath"))
// If a custom lnd directory was set, we'll also check if custom paths
// for the TLS cert and macaroon file were set as well. If not, we'll
// override their paths so they can be found within the custom lnd
// directory set. This allows us to set a custom lnd directory, along
// with custom paths to the TLS cert and macaroon file.
if lndDir != defaultLndDir {
tlsCertPath = filepath.Join(lndDir, defaultTLSCertFilename)
}
return tlsCertPath, macPath, nil
}
func main() {
app := cli.NewApp()
app.Name = "lncli"
app.Version = build.Version()
app.Usage = "control plane for your Lightning Network Daemon (lnd)"
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "rpcserver",
Value: defaultRPCHostPort,
Usage: "host:port of ln daemon",
},
cli.StringFlag{
Name: "lnddir",
Value: defaultLndDir,
Usage: "path to lnd's base directory",
},
cli.StringFlag{
Name: "tlscertpath",
Value: defaultTLSCertPath,
Usage: "path to TLS certificate",
},
cli.StringFlag{
Name: "chain, c",
Usage: "the chain lnd is running on e.g. bitcoin",
Value: "bitcoin",
},
cli.StringFlag{
Name: "network, n",
Usage: "the network lnd is running on e.g. mainnet, " +
"testnet, etc.",
Value: "mainnet",
},
cli.BoolFlag{
Name: "no-macaroons",
Usage: "disable macaroon authentication",
},
cli.StringFlag{
Name: "macaroonpath",
Usage: "path to macaroon file",
},
cli.Int64Flag{
Name: "macaroontimeout",
Value: 60,
Usage: "anti-replay macaroon validity time in seconds",
},
cli.StringFlag{
Name: "macaroonip",
Usage: "if set, lock macaroon to specific IP address",
},
}
app.Commands = []cli.Command{
createCommand,
unlockCommand,
changePasswordCommand,
newAddressCommand,
estimateFeeCommand,
sendManyCommand,
sendCoinsCommand,
listUnspentCommand,
connectCommand,
disconnectCommand,
openChannelCommand,
closeChannelCommand,
closeAllChannelsCommand,
abandonChannelCommand,
listPeersCommand,
walletBalanceCommand,
channelBalanceCommand,
getInfoCommand,
pendingChannelsCommand,
sendPaymentCommand,
payInvoiceCommand,
sendToRouteCommand,
addInvoiceCommand,
lookupInvoiceCommand,
listInvoicesCommand,
listChannelsCommand,
closedChannelsCommand,
listPaymentsCommand,
describeGraphCommand,
getNodeMetricsCommand,
getChanInfoCommand,
getNodeInfoCommand,
queryRoutesCommand,
getNetworkInfoCommand,
debugLevelCommand,
decodePayReqCommand,
listChainTxnsCommand,
stopCommand,
signMessageCommand,
verifyMessageCommand,
feeReportCommand,
updateChannelPolicyCommand,
forwardingHistoryCommand,
exportChanBackupCommand,
verifyChanBackupCommand,
restoreChanBackupCommand,
bakeMacaroonCommand,
}
// Add any extra commands determined by build flags.
app.Commands = append(app.Commands, autopilotCommands()...)
app.Commands = append(app.Commands, invoicesCommands()...)
app.Commands = append(app.Commands, routerCommands()...)
app.Commands = append(app.Commands, walletCommands()...)
app.Commands = append(app.Commands, watchtowerCommands()...)
app.Commands = append(app.Commands, wtclientCommands()...)
if err := app.Run(os.Args); err != nil {
fatal(err)
}
}
// cleanAndExpandPath expands environment variables and leading ~ in the
// passed path, cleans the result, and returns it.
// This function is taken from https://github.com/btcsuite/btcd
func cleanAndExpandPath(path string) string {
if path == "" {
return ""
}
// Expand initial ~ to OS specific home directory.
if strings.HasPrefix(path, "~") {
var homeDir string
user, err := user.Current()
if err == nil {
homeDir = user.HomeDir
} else {
homeDir = os.Getenv("HOME")
}
path = strings.Replace(path, "~", homeDir, 1)
}
// NOTE: The os.ExpandEnv doesn't work with Windows-style %VARIABLE%,
// but the variables can still be expanded via POSIX-style $VARIABLE.
return filepath.Clean(os.ExpandEnv(path))
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
tasks/libs/common/github_api.py | import json
import os
import platform
import subprocess
from invoke.exceptions import Exit
from .remote_api import RemoteAPI
__all__ = ["GithubAPI", "get_github_token"]
class GithubAPI(RemoteAPI):
"""
Helper class to perform API calls against the Github API, using a Github PAT.
"""
BASE_URL = "https://api.github.com"
def __init__(self, repository="", api_token=""):
super(GithubAPI, self).__init__("GitHub API")
self.api_token = api_token
self.repository = repository
self.authorization_error_message = (
"HTTP 401: The token is invalid. Is the Github token provided still allowed to perform this action?"
)
def repo(self):
"""
Gets the repo info.
"""
path = f"/repos/{self.repository}"
return self.make_request(path, method="GET", json_output=True)
def get_branch(self, branch_name):
"""
Gets info on a given branch in the given Github repository.
"""
path = f"/repos/{self.repository}/branches/{branch_name}"
return self.make_request(path, method="GET", json_output=True)
def create_pr(self, pr_title, pr_body, base_branch, target_branch):
"""
Creates a PR in the given Github repository.
"""
path = f"/repos/{self.repository}/pulls"
data = json.dumps({"head": target_branch, "base": base_branch, "title": pr_title, "body": pr_body})
return self.make_request(path, method="POST", json_output=True, data=data)
def update_pr(self, pull_number, milestone_number, labels):
"""
Updates a given PR with the provided milestone number and labels.
"""
path = f"/repos/{self.repository}/issues/{pull_number}"
data = json.dumps(
{
"milestone": milestone_number,
"labels": labels,
}
)
return self.make_request(path, method="POST", json_output=True, data=data)
def get_milestone_by_name(self, milestone_name):
"""
Searches for a milestone in the given repository that matches the provided name,
and returns data about it.
"""
path = f"/repos/{self.repository}/milestones"
res = self.make_request(path, method="GET", json_output=True)
for milestone in res:
if milestone["title"] == milestone_name:
return milestone
return None
def make_request(self, path, headers=None, method="GET", data=None, json_output=False):
"""
Utility to make an HTTP request to the GitHub API.
See RemoteAPI#request.
Adds "Authorization: token {self.api_token}" and "Accept: application/vnd.github.v3+json"
to the headers to be able to authenticate ourselves to GitHub.
"""
headers = dict(headers or [])
headers["Authorization"] = f"token {self.api_token}"
headers["Accept"] = "application/vnd.github.v3+json"
return self.request(
path=path,
headers=headers,
data=data,
json_input=False,
json_output=json_output,
stream_output=False,
method=method,
)
def get_github_token():
if "GITHUB_TOKEN" not in os.environ:
print("GITHUB_TOKEN not found in env. Trying keychain...")
if platform.system() == "Darwin":
try:
output = subprocess.check_output(
['security', 'find-generic-password', '-a', os.environ["USER"], '-s', 'GITHUB_TOKEN', '-w']
)
if output:
return output.strip()
except subprocess.CalledProcessError:
print("GITHUB_TOKEN not found in keychain...")
pass
raise Exit(
message="Please create a 'repo' access token at "
"https://github.com/settings/tokens and "
"add it as GITHUB_TOKEN in your keychain "
"or export it from your .bashrc or equivalent.",
code=1,
)
return os.environ["GITHUB_TOKEN"]
| []
| []
| [
"USER",
"GITHUB_TOKEN"
]
| [] | ["USER", "GITHUB_TOKEN"] | python | 2 | 0 | |
main.go | package main
import (
"net/http"
"os"
"github.com/willmadison/resourceful/repository"
resourceful "github.com/willmadison/resourceful/types"
)
func main() {
server := resourceful.NewServer(repository.NewInMemory())
http.Handle("/", server.Router)
http.ListenAndServe(":"+os.Getenv("PORT"), nil)
}
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
server.py | import os
import sys
import logging
from urllib.parse import urlparse
from flask import Flask, jsonify, request, flash, render_template, redirect, \
make_response, url_for, render_template_string, get_flashed_messages
from flask_login import LoginManager, current_user, login_user, logout_user, \
UserMixin
from flask_jwt_extended import (
jwt_optional, create_access_token,
jwt_refresh_token_required, create_refresh_token, get_csrf_token,
get_jwt_identity, set_access_cookies,
set_refresh_cookies, unset_jwt_cookies
)
from flask_ldap3_login import LDAP3LoginManager
from flask_ldap3_login.forms import LDAPLoginForm
import i18n
from qwc_services_core.jwt import jwt_manager
from qwc_services_core.tenant_handler import (
TenantHandler, TenantPrefixMiddleware, TenantSessionInterface)
app = Flask(__name__)
app.config['JWT_COOKIE_SECURE'] = bool(os.environ.get(
'JWT_COOKIE_SECURE', False))
app.config['JWT_COOKIE_SAMESITE'] = os.environ.get(
'JWT_COOKIE_SAMESITE', 'Lax')
app.config['JWT_ACCESS_TOKEN_EXPIRES'] = int(os.environ.get(
'JWT_ACCESS_TOKEN_EXPIRES', 12*3600))
jwt = jwt_manager(app)
app.secret_key = app.config['JWT_SECRET_KEY']
i18n.set('load_path', [
os.path.join(os.path.dirname(__file__), 'translations')])
SUPPORTED_LANGUAGES = ['en', 'de']
# *Enable* WTForms built-in messages translation
# https://wtforms.readthedocs.io/en/2.3.x/i18n/
app.config['WTF_I18N_ENABLED'] = False
# https://flask-ldap3-login.readthedocs.io/en/latest/quick_start.html
# Hostname of your LDAP Server
app.config['LDAP_HOST'] = os.environ.get('LDAP_HOST', 'localhost')
# The port number of your LDAP server.
app.config['LDAP_PORT'] = int(os.environ.get('LDAP_PORT', 389))
# Set to True if your server uses SSL
app.config['LDAP_USE_SSL'] = os.environ.get('LDAP_USE_SSL', False)
# Base DN of your directory
app.config['LDAP_BASE_DN'] = os.environ.get(
'LDAP_BASE_DN', 'dc=example,dc=org')
# Users DN to be prepended to the Base DN
app.config['LDAP_USER_DN'] = os.environ.get('LDAP_USER_DN', 'ou=users')
# Groups DN to be prepended to the Base DN
app.config['LDAP_GROUP_DN'] = os.environ.get('LDAP_GROUP_DN', 'ou=groups')
# Search for groups
app.config['LDAP_SEARCH_FOR_GROUPS'] = os.environ.get(
'LDAP_SEARCH_FOR_GROUPS', False)
# Specifies what scope to search in when searching for a specific group
app.config['LDAP_GROUP_SEARCH_SCOPE'] = os.environ.get(
'LDAP_GROUP_SEARCH_SCOPE', 'LEVEL')
# Specifies what object filter to apply when searching for groups.
app.config['LDAP_GROUP_OBJECT_FILTER'] = os.environ.get(
'LDAP_GROUP_OBJECT_FILTER', '(objectclass=group)')
# Specifies the LDAP attribute where group members are declared.
app.config['LDAP_GROUP_MEMBERS_ATTR'] = os.environ.get(
'LDAP_GROUP_MEMBERS_ATTR', 'uniqueMember')
# Specifies what scope to search in when searching for a specific user
app.config['LDAP_USER_SEARCH_SCOPE'] = os.environ.get(
'LDAP_USER_SEARCH_SCOPE', 'LEVEL')
# The RDN attribute for your user schema on LDAP
app.config['LDAP_USER_RDN_ATTR'] = os.environ.get('LDAP_USER_RDN_ATTR', 'cn')
# The Attribute you want users to authenticate to LDAP with.
LDAP_USER_LOGIN_ATTR = os.environ.get('LDAP_USER_LOGIN_ATTR', 'cn')
app.config['LDAP_USER_LOGIN_ATTR'] = LDAP_USER_LOGIN_ATTR
# Default is ldap3.ALL_ATTRIBUTES (*)
app.config['LDAP_GET_USER_ATTRIBUTES'] = os.environ.get(
'LDAP_GET_USER_ATTRIBUTES', '*') # app.config['LDAP_USER_LOGIN_ATTR']
# The Username to bind to LDAP with
app.config['LDAP_BIND_USER_DN'] = os.environ.get('LDAP_BIND_USER_DN', None)
# The Password to bind to LDAP with
app.config['LDAP_BIND_USER_PASSWORD'] = os.environ.get(
'LDAP_BIND_USER_PASSWORD', None)
# Group name attribute in LDAP group response
LDAP_GROUP_NAME_ATTRIBUTE = os.environ.get('LDAP_GROUP_NAME_ATTRIBUTE', 'cn')
# Default is ldap3.ALL_ATTRIBUTES (*)
app.config['LDAP_GET_GROUP_ATTRIBUTES'] = os.environ.get(
'LDAP_GET_GROUP_ATTRIBUTES', '*') # LDAP_GROUP_NAME_ATTRIBUTE
app.config['DEBUG'] = os.environ.get('FLASK_ENV', '') == 'development'
if app.config['DEBUG']:
logging.getLogger('flask_ldap3_login').setLevel(logging.DEBUG)
login_manager = LoginManager(app) # Setup a Flask-Login Manager
ldap_manager = LDAP3LoginManager(app) # Setup a LDAP3 Login Manager.
if os.environ.get('TENANT_HEADER'):
app.wsgi_app = TenantPrefixMiddleware(
app.wsgi_app, os.environ.get('TENANT_HEADER'))
if os.environ.get('TENANT_HEADER') or os.environ.get('TENANT_URL_RE'):
app.session_interface = TenantSessionInterface(os.environ)
# Create a dictionary to store the users in when they authenticate.
users = {}
# Declare an Object Model for the user, and make it comply with the
# flask-login UserMixin mixin.
class User(UserMixin):
def __init__(self, dn, username, info, groups):
self.dn = dn
# NOTE: get original LDAP username,
# as login username may be case insensitive
ldap_username = info.get(LDAP_USER_LOGIN_ATTR)
if ldap_username and isinstance(ldap_username, list):
self.username = ldap_username[0]
elif isinstance(ldap_username, str):
self.username = ldap_username
else:
app.logger.warning(
"Could not read attribute '%s' as username"
% LDAP_USER_LOGIN_ATTR
)
self.username = username
if groups:
# LDAP query returns a dict like
# [{'cn': 'dl_qwc_login_r', ...}]
group_names = [
g.get(LDAP_GROUP_NAME_ATTRIBUTE)
for g in groups if not None
]
else:
group_names = None
self.groups = group_names
app.logger.debug("Login username: %s" % username)
app.logger.debug("LDAP username: %s" % self.username)
app.logger.debug("LDAP info: %s" % info)
app.logger.debug("LDAP Groups: %s" % groups)
def __repr__(self):
return self.dn
def get_id(self):
return self.dn
# Declare a User Loader for Flask-Login.
# Simply returns the User if it exists in our 'database', otherwise
# returns None.
@login_manager.user_loader
def load_user(id):
if id in users:
return users[id]
return None
# Declare The User Saver for Flask-Ldap3-Login
# This method is called whenever a LDAPLoginForm() successfully validates.
# Here you have to save the user, and return it so it can be used in the
# login controller.
@ldap_manager.save_user
def save_user(dn, username, info, groups):
user = User(dn, username, info, groups)
users[dn] = user
return user
# Declare some routes for usage to show the authentication process.
@app.route('/')
def home():
# Redirect users who are not logged in.
if not current_user or current_user.is_anonymous:
return redirect(url_for('login'))
# User is logged in, so show them a page with their username and dn.
template = """
<h1>Welcome: {{ current_user.username }}</h1>
<h2>{{ current_user.dn }}</h2>
"""
return render_template_string(template)
@app.route('/login', methods=['GET', 'POST'])
def login():
target_url = url_path(request.args.get('url', '/'))
if current_user.is_authenticated:
return redirect(target_url)
form = LDAPLoginForm(meta=wft_locales())
if form.validate_on_submit():
user = form.user
# flask_login stores user in session
login_user(user)
app.logger.info("Logging in as user '%s'" % user.username)
app.logger.info("Groups: %s" % user.groups)
if user.groups:
identity = {'username': user.username, 'groups': user.groups}
else:
identity = user.username
# Create the tokens we will be sending back to the user
access_token = create_access_token(identity)
# refresh_token = create_refresh_token(identity)
resp = make_response(redirect(target_url))
# Set the JWTs and the CSRF double submit protection cookies
# in this response
set_access_cookies(resp, access_token)
return resp
elif form.submit():
# Replace untranslated messages
for field, errors in form.errors.items():
if 'Invalid Username/Password.' in errors:
errors.remove('Invalid Username/Password.')
errors.append(i18n.t('auth.auth_failed'))
return render_template('login.html', form=form, i18n=i18n,
title=i18n.t("auth.login_page_title"))
@app.route('/logout', methods=['GET', 'POST'])
@jwt_optional
def logout():
target_url = url_path(request.args.get('url', '/'))
resp = make_response(redirect(target_url))
unset_jwt_cookies(resp)
logout_user()
return resp
""" readyness probe endpoint """
@app.route("/ready", methods=['GET'])
def ready():
return jsonify({"status": "OK"})
""" liveness probe endpoint """
@app.route("/healthz", methods=['GET'])
def healthz():
return jsonify({"status": "OK"})
@app.before_request
def set_lang():
i18n.set('locale',
request.accept_languages.best_match(SUPPORTED_LANGUAGES) or 'en')
def wft_locales():
return {'locales': [i18n.get('locale')]}
def url_path(url):
""" Extract path and query parameters from URL """
o = urlparse(url)
parts = list(filter(None, [o.path, o.query]))
return '?'.join(parts)
if __name__ == '__main__':
app.logger.setLevel(logging.DEBUG)
app.run(host='localhost', port=5017, debug=True)
| []
| []
| [
"LDAP_GROUP_DN",
"LDAP_GROUP_SEARCH_SCOPE",
"LDAP_GROUP_OBJECT_FILTER",
"LDAP_USE_SSL",
"TENANT_HEADER",
"LDAP_USER_LOGIN_ATTR",
"JWT_ACCESS_TOKEN_EXPIRES",
"LDAP_GROUP_NAME_ATTRIBUTE",
"LDAP_BIND_USER_DN",
"JWT_COOKIE_SECURE",
"LDAP_BIND_USER_PASSWORD",
"JWT_COOKIE_SAMESITE",
"LDAP_GROUP_MEMBERS_ATTR",
"LDAP_PORT",
"LDAP_GET_USER_ATTRIBUTES",
"LDAP_HOST",
"LDAP_USER_DN",
"LDAP_SEARCH_FOR_GROUPS",
"FLASK_ENV",
"LDAP_USER_SEARCH_SCOPE",
"TENANT_URL_RE",
"LDAP_USER_RDN_ATTR",
"LDAP_GET_GROUP_ATTRIBUTES",
"LDAP_BASE_DN"
]
| [] | ["LDAP_GROUP_DN", "LDAP_GROUP_SEARCH_SCOPE", "LDAP_GROUP_OBJECT_FILTER", "LDAP_USE_SSL", "TENANT_HEADER", "LDAP_USER_LOGIN_ATTR", "JWT_ACCESS_TOKEN_EXPIRES", "LDAP_GROUP_NAME_ATTRIBUTE", "LDAP_BIND_USER_DN", "JWT_COOKIE_SECURE", "LDAP_BIND_USER_PASSWORD", "JWT_COOKIE_SAMESITE", "LDAP_GROUP_MEMBERS_ATTR", "LDAP_PORT", "LDAP_GET_USER_ATTRIBUTES", "LDAP_HOST", "LDAP_USER_DN", "LDAP_SEARCH_FOR_GROUPS", "FLASK_ENV", "LDAP_USER_SEARCH_SCOPE", "TENANT_URL_RE", "LDAP_USER_RDN_ATTR", "LDAP_GET_GROUP_ATTRIBUTES", "LDAP_BASE_DN"] | python | 24 | 0 | |
app/aicos_eregister/__init__.py | from flask import Blueprint
aicos_eregister = Blueprint('aicos_eregister', __name__,
template_folder='templates')
from . import views
| []
| []
| []
| [] | [] | python | null | null | null |
go-controller/pkg/config/config.go | package config
import (
"fmt"
"io/ioutil"
"net"
"net/url"
"os"
"path/filepath"
"reflect"
"runtime"
"strings"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
gcfg "gopkg.in/gcfg.v1"
kexec "k8s.io/utils/exec"
)
// The following are global config parameters that other modules may access directly
var (
// ovn-kubernetes version, to be changed with every release
Version = "0.3.0"
// ovn-kubernetes cni config file name
CNIConfFileName = "10-ovn-kubernetes.conf"
// Default holds parsed config file parameters and command-line overrides
Default = DefaultConfig{
MTU: 1400,
ConntrackZone: 64000,
EncapType: "geneve",
EncapIP: "",
InactivityProbe: 100000,
}
// Logging holds logging-related parsed config file parameters and command-line overrides
Logging = LoggingConfig{
File: "", // do not log to a file by default
Level: 4,
}
// CNI holds CNI-related parsed config file parameters and command-line overrides
CNI = CNIConfig{
ConfDir: "/etc/cni/net.d",
Plugin: "ovn-k8s-cni-overlay",
WinHNSNetworkID: "",
}
// Kubernetes holds Kubernetes-related parsed config file parameters and command-line overrides
Kubernetes = KubernetesConfig{
APIServer: "http://localhost:8080",
ServiceCIDR: "172.16.1.0/24",
OVNConfigNamespace: "ovn-kubernetes",
}
// OvnNorth holds northbound OVN database client and server authentication and location details
OvnNorth OvnAuthConfig
// OvnSouth holds southbound OVN database client and server authentication and location details
OvnSouth OvnAuthConfig
)
const (
kubeServiceAccountPath string = "/var/run/secrets/kubernetes.io/serviceaccount/"
kubeServiceAccountFileToken string = "token"
kubeServiceAccountFileCACert string = "ca.crt"
)
// DefaultConfig holds parsed config file parameters and command-line overrides
type DefaultConfig struct {
// MTU value used for the overlay networks.
MTU int `gcfg:"mtu"`
// ConntrackZone affects only the gateway nodes, This value is used to track connections
// that are initiated from the pods so that the reverse connections go back to the pods.
// This represents the conntrack zone used for the conntrack flow rules.
ConntrackZone int `gcfg:"conntrack-zone"`
// EncapType value defines the encapsulation protocol to use to transmit packets between
// hypervisors. By default the value is 'geneve'
EncapType string `gcfg:"encap-type"`
// The IP address of the encapsulation endpoint. If not specified, the IP address the
// NodeName resolves to will be used
EncapIP string `gcfg:"encap-ip"`
// Maximum number of milliseconds of idle time on connection that
// ovn-controller waits before it will send a connection health probe.
InactivityProbe int `gcfg:"inactivity-probe"`
}
// LoggingConfig holds logging-related parsed config file parameters and command-line overrides
type LoggingConfig struct {
// File is the path of the file to log to
File string `gcfg:"logfile"`
// Level is the logging verbosity level
Level int `gcfg:"loglevel"`
}
// CNIConfig holds CNI-related parsed config file parameters and command-line overrides
type CNIConfig struct {
// ConfDir specifies the CNI config directory in which to write the overlay CNI config file
ConfDir string `gcfg:"conf-dir"`
// Plugin specifies the name of the CNI plugin
Plugin string `gcfg:"plugin"`
// Windows ONLY, specifies the ID of the HNS Network to which the containers will be attached
WinHNSNetworkID string `gcfg:"win-hnsnetwork-id"`
}
// KubernetesConfig holds Kubernetes-related parsed config file parameters and command-line overrides
type KubernetesConfig struct {
Kubeconfig string `gcfg:"kubeconfig"`
CACert string `gcfg:"cacert"`
APIServer string `gcfg:"apiserver"`
Token string `gcfg:"token"`
ServiceCIDR string `gcfg:"service-cidr"`
OVNConfigNamespace string `gcfg:"ovn-config-namespace"`
}
// OvnAuthConfig holds client authentication and location details for
// an OVN database (either northbound or southbound)
type OvnAuthConfig struct {
// e.g: "ssl:192.168.1.2:6641,ssl:192.168.1.2:6642"
Address string `gcfg:"address"`
PrivKey string `gcfg:"client-privkey"`
Cert string `gcfg:"client-cert"`
CACert string `gcfg:"client-cacert"`
Scheme OvnDBScheme
northbound bool
externalID string // ovn-nb or ovn-remote
exec kexec.Interface
}
// OvnDBScheme describes the OVN database connection transport method
type OvnDBScheme string
const (
// OvnDBSchemeSSL specifies SSL as the OVN database transport method
OvnDBSchemeSSL OvnDBScheme = "ssl"
// OvnDBSchemeTCP specifies TCP as the OVN database transport method
OvnDBSchemeTCP OvnDBScheme = "tcp"
// OvnDBSchemeUnix specifies Unix domains sockets as the OVN database transport method
OvnDBSchemeUnix OvnDBScheme = "unix"
)
// Config is used to read the structured config file and to cache config in testcases
type config struct {
Default DefaultConfig
Logging LoggingConfig
CNI CNIConfig
Kubernetes KubernetesConfig
OvnNorth OvnAuthConfig
OvnSouth OvnAuthConfig
}
var (
savedDefault DefaultConfig
savedLogging LoggingConfig
savedCNI CNIConfig
savedKubernetes KubernetesConfig
savedOvnNorth OvnAuthConfig
savedOvnSouth OvnAuthConfig
// legacy service-cluster-ip-range CLI option
serviceClusterIPRange string
)
func init() {
// Cache original default config values so they can be restored by testcases
savedDefault = Default
savedLogging = Logging
savedCNI = CNI
savedKubernetes = Kubernetes
savedOvnNorth = OvnNorth
savedOvnSouth = OvnSouth
Flags = append(Flags, CommonFlags...)
Flags = append(Flags, K8sFlags...)
Flags = append(Flags, OvnNBFlags...)
Flags = append(Flags, OvnSBFlags...)
Flags = append(Flags, OVNGatewayFlags...)
}
// RestoreDefaultConfig restores default config values. Used by testcases to
// provide a pristine environment between tests.
func RestoreDefaultConfig() {
Default = savedDefault
Logging = savedLogging
CNI = savedCNI
Kubernetes = savedKubernetes
OvnNorth = savedOvnNorth
OvnSouth = savedOvnSouth
}
// copy members of struct 'src' into the corresponding field in struct 'dst'
// if the field in 'src' is a non-zero int or a non-zero-length string. This
// function should be called with pointers to structs.
func overrideFields(dst, src interface{}) {
dstStruct := reflect.ValueOf(dst).Elem()
srcStruct := reflect.ValueOf(src).Elem()
if dstStruct.Kind() != srcStruct.Kind() || dstStruct.Kind() != reflect.Struct {
panic("mismatched value types")
}
if dstStruct.NumField() != srcStruct.NumField() {
panic("mismatched struct types")
}
// Iterate over each field in dst/src Type so we can get the tags,
// and use the field name to retrieve the field's actual value from
// the dst/src instance
var handled bool
dstType := reflect.TypeOf(dst).Elem()
for i := 0; i < dstType.NumField(); i++ {
structField := dstType.Field(i)
// Ignore private internal fields; we only care about overriding
// 'gcfg' tagged fields read from CLI or the config file
if _, ok := structField.Tag.Lookup("gcfg"); !ok {
continue
}
handled = true
dstField := dstStruct.FieldByName(structField.Name)
srcField := srcStruct.FieldByName(structField.Name)
if !dstField.IsValid() || !srcField.IsValid() {
panic(fmt.Sprintf("invalid struct %q field %q",
dstType.Name(), structField.Name))
}
if dstField.Kind() != srcField.Kind() {
panic(fmt.Sprintf("mismatched struct %q fields %q",
dstType.Name(), structField.Name))
}
switch srcField.Kind() {
case reflect.String:
if srcField.String() != "" {
dstField.Set(srcField)
}
case reflect.Int:
if srcField.Int() != 0 {
dstField.Set(srcField)
}
default:
panic(fmt.Sprintf("unhandled struct %q field %q type %v",
dstType.Name(), structField.Name, srcField.Kind()))
}
}
if !handled {
// No tags found in the struct so we don't know how to override
panic(fmt.Sprintf("failed to find 'gcfg' tags in struct %q", dstType.Name()))
}
}
var cliConfig config
//CommonFlags capture general options.
var CommonFlags = []cli.Flag{
// Mode flags
cli.BoolFlag{
Name: "net-controller",
Usage: "Flag to start the central controller that watches pods/services/policies",
},
cli.StringFlag{
Name: "init-master",
Usage: "initialize master, requires the hostname as argument",
},
cli.StringFlag{
Name: "init-node",
Usage: "initialize node, requires the name that node is registered with in kubernetes cluster",
},
cli.StringFlag{
Name: "pidfile",
Usage: "Name of file that will hold the ovnkube pid (optional)",
},
cli.StringFlag{
Name: "config-file",
Usage: "configuration file path (default: /etc/openvswitch/ovn_k8s.conf)",
},
cli.IntFlag{
Name: "mtu",
Usage: "MTU value used for the overlay networks (default: 1400)",
Destination: &cliConfig.Default.MTU,
},
cli.IntFlag{
Name: "conntrack-zone",
Usage: "For gateway nodes, the conntrack zone used for conntrack flow rules (default: 64000)",
Destination: &cliConfig.Default.ConntrackZone,
},
cli.StringFlag{
Name: "encap-type",
Usage: "The encapsulation protocol to use to transmit packets between hypervisors (default: geneve)",
Destination: &cliConfig.Default.EncapType,
},
cli.StringFlag{
Name: "encap-ip",
Usage: "The IP address of the encapsulation endpoint (default: Node IP address resolved from Node hostname)",
Destination: &cliConfig.Default.EncapIP,
},
cli.IntFlag{
Name: "inactivity-probe",
Usage: "Maximum number of milliseconds of idle time on " +
"connection for ovn-controller before it sends a inactivity probe",
Destination: &cliConfig.Default.InactivityProbe,
},
// Logging options
cli.IntFlag{
Name: "loglevel",
Usage: "log verbosity and level: 5=debug, 4=info, 3=warn, 2=error, 1=fatal (default: 4)",
Destination: &cliConfig.Logging.Level,
},
cli.StringFlag{
Name: "logfile",
Usage: "path of a file to direct log output to",
Destination: &cliConfig.Logging.File,
},
}
// K8sFlags capture Kubernetes-related options
var K8sFlags = []cli.Flag{
cli.StringFlag{
Name: "cluster-subnet",
Value: "11.11.0.0/16",
Usage: "A comma separated set of IP subnets and the associated" +
"hostsubnetlengths to use for the cluster (eg, \"10.128.0.0/14/23,10.0.0.0/14/23\"). " +
"Each entry is given in the form IP address/subnet mask/hostsubnetlength, " +
"the hostsubnetlength is optional and if unspecified defaults to 24. The " +
"hostsubnetlength defines how many IP addresses are dedicated to each node.",
},
// CNI options
cli.StringFlag{
Name: "cni-conf-dir",
Usage: "the CNI config directory in which to write the overlay CNI config file (default: /etc/cni/net.d)",
Destination: &cliConfig.CNI.ConfDir,
},
cli.StringFlag{
Name: "cni-plugin",
Usage: "the name of the CNI plugin (default: ovn-k8s-cni-overlay)",
Destination: &cliConfig.CNI.Plugin,
},
cli.StringFlag{
Name: "win-hnsnetwork-id",
Usage: "the ID of the HNS network to which containers will be attached (default: not set)",
Destination: &cliConfig.CNI.WinHNSNetworkID,
},
cli.StringFlag{
Name: "service-cluster-ip-range",
Usage: "Deprecated alias for k8s-service-cidr.",
Destination: &serviceClusterIPRange,
},
cli.StringFlag{
Name: "k8s-service-cidr",
Usage: "A CIDR notation IP range from which k8s assigns " +
"service cluster IPs. This should be the same as the one " +
"provided for kube-apiserver \"-service-cluster-ip-range\" " +
"option. (default: 172.16.1.0/24)",
Destination: &cliConfig.Kubernetes.ServiceCIDR,
},
cli.StringFlag{
Name: "k8s-kubeconfig",
Usage: "absolute path to the Kubernetes kubeconfig file (not required if the --k8s-apiserver, --k8s-ca-cert, and --k8s-token are given)",
Destination: &cliConfig.Kubernetes.Kubeconfig,
},
cli.StringFlag{
Name: "k8s-apiserver",
Usage: "URL of the Kubernetes API server (not required if --k8s-kubeconfig is given) (default: http://localhost:8443)",
Destination: &cliConfig.Kubernetes.APIServer,
},
cli.StringFlag{
Name: "k8s-cacert",
Usage: "the absolute path to the Kubernetes API CA certificate (not required if --k8s-kubeconfig is given)",
Destination: &cliConfig.Kubernetes.CACert,
},
cli.StringFlag{
Name: "k8s-token",
Usage: "the Kubernetes API authentication token (not required if --k8s-kubeconfig is given)",
Destination: &cliConfig.Kubernetes.Token,
},
cli.StringFlag{
Name: "ovn-config-namespace",
Usage: "specify a namespace which will contain services to config the OVN databases",
Destination: &cliConfig.Kubernetes.OVNConfigNamespace,
},
}
// OvnNBFlags capture OVN northbound database options
var OvnNBFlags = []cli.Flag{
cli.StringFlag{
Name: "nb-address",
Usage: "IP address and port of the OVN northbound API " +
"(eg, ssl://1.2.3.4:6641,ssl://1.2.3.5:6642). Leave empty to " +
"use a local unix socket.",
Destination: &cliConfig.OvnNorth.Address,
},
cli.StringFlag{
Name: "nb-client-privkey",
Usage: "Private key that the client should use for talking to the OVN database. Leave empty to use local unix socket. (default: /etc/openvswitch/ovnnb-privkey.pem)",
Destination: &cliConfig.OvnNorth.PrivKey,
},
cli.StringFlag{
Name: "nb-client-cert",
Usage: "Client certificate that the client should use for talking to the OVN database. Leave empty to use local unix socket. (default: /etc/openvswitch/ovnnb-cert.pem)",
Destination: &cliConfig.OvnNorth.Cert,
},
cli.StringFlag{
Name: "nb-client-cacert",
Usage: "CA certificate that the client should use for talking to the OVN database. Leave empty to use local unix socket. (default: /etc/openvswitch/ovnnb-ca.cert)",
Destination: &cliConfig.OvnNorth.CACert,
},
}
//OvnSBFlags capture OVN southbound database options
var OvnSBFlags = []cli.Flag{
cli.StringFlag{
Name: "sb-address",
Usage: "IP address and port of the OVN southbound API " +
"(eg, ssl://1.2.3.4:6642,ssl://1.2.3.5:6642). " +
"Leave empty to use a local unix socket.",
Destination: &cliConfig.OvnSouth.Address,
},
cli.StringFlag{
Name: "sb-client-privkey",
Usage: "Private key that the client should use for talking to the OVN database. Leave empty to use local unix socket. (default: /etc/openvswitch/ovnsb-privkey.pem)",
Destination: &cliConfig.OvnSouth.PrivKey,
},
cli.StringFlag{
Name: "sb-client-cert",
Usage: "Client certificate that the client should use for talking to the OVN database. Leave empty to use local unix socket. (default: /etc/openvswitch/ovnsb-cert.pem)",
Destination: &cliConfig.OvnSouth.Cert,
},
cli.StringFlag{
Name: "sb-client-cacert",
Usage: "CA certificate that the client should use for talking to the OVN database. Leave empty to use local unix socket. (default: /etc/openvswitch/ovnsb-ca.cert)",
Destination: &cliConfig.OvnSouth.CACert,
},
}
//OVNGatewayFlags capture L3 Gateway related flags
var OVNGatewayFlags = []cli.Flag{
cli.BoolFlag{
Name: "init-gateways",
Usage: "initialize a gateway in the minion. Only useful with \"init-node\"",
},
cli.StringFlag{
Name: "gateway-interface",
Usage: "The interface in minions that will be the gateway interface. " +
"If none specified, then the node's interface on which the " +
"default gateway is configured will be used as the gateway " +
"interface. Only useful with \"init-gateways\"",
},
cli.StringFlag{
Name: "gateway-nexthop",
Usage: "The external default gateway which is used as a next hop by " +
"OVN gateway. This is many times just the default gateway " +
"of the node in question. If not specified, the default gateway" +
"configured in the node is used. Only useful with " +
"\"init-gateways\"",
},
cli.BoolFlag{
Name: "gateway-spare-interface",
Usage: "If true, assumes that \"gateway-interface\" provided can be " +
"exclusively used for the OVN gateway. When true, only OVN" +
"related traffic can flow through this interface",
},
cli.BoolFlag{
Name: "gateway-local",
Usage: "If true, creates a local gateway (br-local) to let traffic reach " +
"host network and also exit host with iptables NAT",
},
cli.UintFlag{
Name: "gateway-vlanid",
Usage: "The VLAN on which the external network is available. " +
"Valid only for Shared or Spare Gateway interface mode.",
},
cli.BoolFlag{
Name: "nodeport",
Usage: "Setup nodeport based ingress on gateways.",
},
}
// Flags are general command-line flags. Apps should add these flags to their
// own urfave/cli flags and call InitConfig() early in the application.
var Flags []cli.Flag
// Defaults are a set of flags to indicate which options should be read from
// ovs-vsctl and used as default values if option is not found via the config
// file or command-line
type Defaults struct {
OvnNorthAddress bool
K8sAPIServer bool
K8sToken bool
K8sCert bool
}
const (
ovsVsctlCommand = "ovs-vsctl"
)
// Can't use pkg/ovs or pkg/util here because those package import this one
func rawExec(exec kexec.Interface, cmd string, args ...string) (string, error) {
cmdPath, err := exec.LookPath(cmd)
if err != nil {
return "", err
}
logrus.Debugf("exec: %s %s", cmdPath, strings.Join(args, " "))
out, err := exec.Command(cmdPath, args...).CombinedOutput()
if err != nil {
logrus.Debugf("exec: %s %s => %v", cmdPath, strings.Join(args, " "), err)
return "", err
}
return strings.TrimSpace(string(out)), nil
}
// Can't use pkg/ovs or pkg/util here because those package import this one
func runOVSVsctl(exec kexec.Interface, args ...string) (string, error) {
newArgs := append([]string{"--timeout=15"}, args...)
out, err := rawExec(exec, ovsVsctlCommand, newArgs...)
if err != nil {
return "", err
}
return strings.Trim(strings.TrimSpace(out), "\""), nil
}
func getOVSExternalID(exec kexec.Interface, name string) string {
out, err := runOVSVsctl(exec,
"--if-exists",
"get",
"Open_vSwitch",
".",
"external_ids:"+name)
if err != nil {
logrus.Debugf("failed to get OVS external_id %s: %v\n\t%s", name, err, out)
return ""
}
return out
}
func setOVSExternalID(exec kexec.Interface, key, value string) error {
out, err := runOVSVsctl(exec,
"set",
"Open_vSwitch",
".",
fmt.Sprintf("external_ids:%s=%s", key, value))
if err != nil {
return fmt.Errorf("Error setting OVS external ID '%s=%s': %v\n %q", key, value, err, out)
}
return nil
}
func buildKubernetesConfig(exec kexec.Interface, cli, file *config, saPath string, defaults *Defaults) error {
// token adn ca.crt may be from files mounted in container.
var saConfig KubernetesConfig
if data, err := ioutil.ReadFile(filepath.Join(saPath, kubeServiceAccountFileToken)); err == nil {
saConfig.Token = string(data)
}
if _, err2 := os.Stat(filepath.Join(saPath, kubeServiceAccountFileCACert)); err2 == nil {
saConfig.CACert = filepath.Join(saPath, kubeServiceAccountFileCACert)
}
overrideFields(&Kubernetes, &saConfig)
// Grab default values from OVS external IDs
if defaults.K8sAPIServer {
Kubernetes.APIServer = getOVSExternalID(exec, "k8s-api-server")
}
if defaults.K8sToken {
Kubernetes.Token = getOVSExternalID(exec, "k8s-api-token")
}
if defaults.K8sCert {
Kubernetes.CACert = getOVSExternalID(exec, "k8s-ca-certificate")
}
// values for token, cacert, kubeconfig, api-server may be found in several places.
// Take the first found when looking in this order: command line options, config file,
// environment variables, service account files
envConfig := KubernetesConfig{
Kubeconfig: os.Getenv("KUBECONFIG"),
CACert: os.Getenv("K8S_CACERT"),
APIServer: os.Getenv("K8S_APISERVER"),
Token: os.Getenv("K8S_TOKEN"),
}
overrideFields(&Kubernetes, &envConfig)
// Copy config file values over default values
overrideFields(&Kubernetes, &file.Kubernetes)
// And CLI overrides over config file and default values
overrideFields(&Kubernetes, &cli.Kubernetes)
if Kubernetes.Kubeconfig != "" && !pathExists(Kubernetes.Kubeconfig) {
return fmt.Errorf("kubernetes kubeconfig file %q not found", Kubernetes.Kubeconfig)
}
if Kubernetes.CACert != "" && !pathExists(Kubernetes.CACert) {
return fmt.Errorf("kubernetes CA certificate file %q not found", Kubernetes.CACert)
}
url, err := url.Parse(Kubernetes.APIServer)
if err != nil {
return fmt.Errorf("kubernetes API server address %q invalid: %v", Kubernetes.APIServer, err)
} else if url.Scheme != "https" && url.Scheme != "http" {
return fmt.Errorf("kubernetes API server URL scheme %q invalid", url.Scheme)
}
// Legacy service-cluster-ip-range CLI option overrides config file or --k8s-service-cidr
if serviceClusterIPRange != "" {
Kubernetes.ServiceCIDR = serviceClusterIPRange
}
if Kubernetes.ServiceCIDR == "" {
return fmt.Errorf("kubernetes service-cidr is required")
} else if _, _, err := net.ParseCIDR(Kubernetes.ServiceCIDR); err != nil {
return fmt.Errorf("kubernetes service network CIDR %q invalid: %v", Kubernetes.ServiceCIDR, err)
}
return nil
}
// getConfigFilePath returns config file path and 'true' if the config file is
// the fallback path (eg not given by the user), 'false' if given explicitly
// by the user
func getConfigFilePath(ctx *cli.Context) (string, bool) {
configFile := ctx.String("config-file")
if configFile != "" {
return configFile, false
}
// Linux default
if runtime.GOOS != "windows" {
return filepath.Join("/etc", "openvswitch", "ovn_k8s.conf"), true
}
// Windows default
return filepath.Join(os.Getenv("OVS_SYSCONFDIR"), "ovn_k8s.conf"), true
}
// InitConfig reads the config file and common command-line options and
// constructs the global config object from them. It returns the config file
// path (if explicitly specified) or an error
func InitConfig(ctx *cli.Context, exec kexec.Interface, defaults *Defaults) (string, error) {
return initConfigWithPath(ctx, exec, kubeServiceAccountPath, defaults)
}
// InitConfigSa reads the config file and common command-line options and
// constructs the global config object from them. It passes the service account directory.
// It returns the config file path (if explicitly specified) or an error
func InitConfigSa(ctx *cli.Context, exec kexec.Interface, saPath string, defaults *Defaults) (string, error) {
return initConfigWithPath(ctx, exec, saPath, defaults)
}
// initConfigWithPath reads the given config file (or if empty, reads the config file
// specified by command-line arguments, or empty, the default config file) and
// common command-line options and constructs the global config object from
// them. It returns the config file path (if explicitly specified) or an error
func initConfigWithPath(ctx *cli.Context, exec kexec.Interface, saPath string, defaults *Defaults) (string, error) {
var cfg config
var retConfigFile string
var configFile string
var configFileIsDefault bool
configFile, configFileIsDefault = getConfigFilePath(ctx)
logrus.SetOutput(os.Stderr)
if !configFileIsDefault {
// Only return explicitly specified config file
retConfigFile = configFile
}
f, err := os.Open(configFile)
// Failure to find a default config file is not a hard error
if err != nil && !configFileIsDefault {
return "", fmt.Errorf("failed to open config file %s: %v", configFile, err)
}
if f != nil {
defer f.Close()
// Parse ovn-k8s config file.
if err = gcfg.ReadInto(&cfg, f); err != nil {
return "", fmt.Errorf("failed to parse config file %s: %v", f.Name(), err)
}
logrus.Infof("Parsed config file %s", f.Name())
logrus.Infof("Parsed config: %+v", cfg)
}
if defaults == nil {
defaults = &Defaults{}
}
// Build config that needs no special processing
overrideFields(&Default, &cfg.Default)
overrideFields(&Default, &cliConfig.Default)
overrideFields(&CNI, &cfg.CNI)
overrideFields(&CNI, &cliConfig.CNI)
// Logging setup
overrideFields(&Logging, &cfg.Logging)
overrideFields(&Logging, &cliConfig.Logging)
logrus.SetLevel(logrus.Level(Logging.Level))
if Logging.File != "" {
var file *os.File
if _, err = os.Stat(filepath.Dir(Logging.File)); os.IsNotExist(err) {
dir := filepath.Dir(Logging.File)
if err = os.MkdirAll(dir, 0755); err != nil {
logrus.Errorf("failed to create logfile directory %s (%v). Ignoring..", dir, err)
}
}
file, err = os.OpenFile(Logging.File, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0660)
if err != nil {
logrus.Errorf("failed to open logfile %s (%v). Ignoring..", Logging.File, err)
} else {
logrus.SetOutput(file)
}
}
if err = buildKubernetesConfig(exec, &cliConfig, &cfg, saPath, defaults); err != nil {
return "", err
}
tmpAuth, err := buildOvnAuth(exec, true, &cliConfig.OvnNorth, &cfg.OvnNorth, defaults.OvnNorthAddress)
if err != nil {
return "", err
}
OvnNorth = *tmpAuth
tmpAuth, err = buildOvnAuth(exec, false, &cliConfig.OvnSouth, &cfg.OvnSouth, false)
if err != nil {
return "", err
}
OvnSouth = *tmpAuth
logrus.Debugf("Default config: %+v", Default)
logrus.Debugf("Logging config: %+v", Logging)
logrus.Debugf("CNI config: %+v", CNI)
logrus.Debugf("Kubernetes config: %+v", Kubernetes)
logrus.Debugf("OVN North config: %+v", OvnNorth)
logrus.Debugf("OVN South config: %+v", OvnSouth)
return retConfigFile, nil
}
func pathExists(path string) bool {
_, err := os.Stat(path)
if err != nil && os.IsNotExist(err) {
return false
}
return true
}
// parseAddress parses an OVN database address, which can be of form
// "ssl:1.2.3.4:6641,ssl:1.2.3.5:6641" or "ssl://1.2.3.4:6641,ssl://1.2.3.5:6641"
// and returns the validated address(es) and the scheme
func parseAddress(urlString string) (string, OvnDBScheme, error) {
var parsedAddress, scheme string
var parsedScheme OvnDBScheme
urlString = strings.Replace(urlString, "//", "", -1)
for _, ovnAddress := range strings.Split(urlString, ",") {
splits := strings.Split(ovnAddress, ":")
if len(splits) != 3 {
return "", "", fmt.Errorf("Failed to parse OVN address %s", urlString)
}
hostPort := splits[1] + ":" + splits[2]
if scheme == "" {
scheme = splits[0]
} else if scheme != splits[0] {
return "", "", fmt.Errorf("Invalid protocols in OVN address %s",
urlString)
}
host, port, err := net.SplitHostPort(hostPort)
if err != nil {
return "", "", fmt.Errorf("failed to parse OVN DB host/port %q: %v",
hostPort, err)
}
ip := net.ParseIP(host)
if ip == nil {
return "", "", fmt.Errorf("OVN DB host %q must be an IP address, "+
"not a DNS name", hostPort)
}
if parsedAddress != "" {
parsedAddress += ","
}
parsedAddress += fmt.Sprintf("%s:%s:%s", scheme, host, port)
}
switch {
case scheme == "ssl":
parsedScheme = OvnDBSchemeSSL
case scheme == "tcp":
parsedScheme = OvnDBSchemeTCP
default:
return "", "", fmt.Errorf("unknown OVN DB scheme %q", scheme)
}
return parsedAddress, parsedScheme, nil
}
// buildOvnAuth returns an OvnAuthConfig object describing the connection to an
// OVN database, given a connection description string and authentication
// details
func buildOvnAuth(exec kexec.Interface, northbound bool, cliAuth, confAuth *OvnAuthConfig, readAddress bool) (*OvnAuthConfig, error) {
auth := &OvnAuthConfig{
northbound: northbound,
exec: exec,
}
var direction string
if northbound {
auth.externalID = "ovn-nb"
direction = "nb"
} else {
auth.externalID = "ovn-remote"
direction = "sb"
}
// Determine final address so we know how to set cert/key defaults
address := cliAuth.Address
if address == "" {
address = confAuth.Address
}
if address == "" && readAddress {
address = getOVSExternalID(exec, "ovn-"+direction)
}
if strings.HasPrefix(address, "ssl") {
// Set up default SSL cert/key paths
auth.CACert = "/etc/openvswitch/ovn" + direction + "-ca.cert"
auth.PrivKey = "/etc/openvswitch/ovn" + direction + "-privkey.pem"
auth.Cert = "/etc/openvswitch/ovn" + direction + "-cert.pem"
}
// Build the final auth config with overrides from CLI and config file
overrideFields(auth, confAuth)
overrideFields(auth, cliAuth)
if address == "" {
if auth.PrivKey != "" || auth.Cert != "" || auth.CACert != "" {
return nil, fmt.Errorf("certificate or key given; perhaps you mean to use the 'ssl' scheme?")
}
auth.Scheme = OvnDBSchemeUnix
return auth, nil
}
var err error
auth.Address, auth.Scheme, err = parseAddress(address)
if err != nil {
return nil, err
}
switch {
case auth.Scheme == OvnDBSchemeSSL:
if auth.PrivKey == "" || auth.Cert == "" || auth.CACert == "" {
return nil, fmt.Errorf("must specify private key, certificate, and CA certificate for 'ssl' scheme")
}
case auth.Scheme == OvnDBSchemeTCP:
if auth.PrivKey != "" || auth.Cert != "" || auth.CACert != "" {
return nil, fmt.Errorf("certificate or key given; perhaps you mean to use the 'ssl' scheme?")
}
}
return auth, nil
}
func (a *OvnAuthConfig) ensureCACert() error {
if pathExists(a.CACert) {
// CA file exists, nothing to do
return nil
}
// Client can bootstrap the CA from the OVN API. Use nbctl for both
// SB and NB since ovn-sbctl only supports --bootstrap-ca-cert from
// 2.9.90+.
// FIXME: change back to a.ctlCmd when sbctl supports --bootstrap-ca-cert
// https://github.com/openvswitch/ovs/pull/226
args := []string{
"--db=" + a.GetURL(),
"--timeout=5",
}
if a.Scheme == OvnDBSchemeSSL {
args = append(args, "--private-key="+a.PrivKey)
args = append(args, "--certificate="+a.Cert)
args = append(args, "--bootstrap-ca-cert="+a.CACert)
}
args = append(args, "list", "nb_global")
_, _ = rawExec(a.exec, "ovn-nbctl", args...)
if _, err := os.Stat(a.CACert); os.IsNotExist(err) {
logrus.Warnf("bootstrapping %s CA certificate failed", a.CACert)
}
return nil
}
// GetURL returns a URL suitable for passing to ovn-northd which describes the
// transport mechanism for connection to the database
func (a *OvnAuthConfig) GetURL() string {
return a.Address
}
// SetDBAuth sets the authentication configuration and connection method
// for the OVN northbound or southbound database server or client
func (a *OvnAuthConfig) SetDBAuth() error {
if a.Scheme == OvnDBSchemeUnix {
// Nothing to do
return nil
} else if a.Scheme == OvnDBSchemeSSL {
// Both server and client SSL schemes require privkey and cert
if !pathExists(a.PrivKey) {
return fmt.Errorf("private key file %s not found", a.PrivKey)
}
if !pathExists(a.Cert) {
return fmt.Errorf("certificate file %s not found", a.Cert)
}
}
if a.Scheme == OvnDBSchemeSSL {
// Client can bootstrap the CA cert from the DB
if err := a.ensureCACert(); err != nil {
return err
}
// Tell Southbound DB clients (like ovn-controller)
// which certificates to use to talk to the DB.
// Must happen *before* setting the "ovn-remote"
// external-id.
if !a.northbound {
out, err := runOVSVsctl(a.exec, "del-ssl")
if err != nil {
return fmt.Errorf("error deleting ovs-vsctl SSL "+
"configuration: %q (%v)", out, err)
}
out, err = runOVSVsctl(a.exec, "set-ssl", a.PrivKey, a.Cert, a.CACert)
if err != nil {
return fmt.Errorf("error setting client southbound DB SSL options: %v\n %q", err, out)
}
}
}
if err := setOVSExternalID(a.exec, a.externalID, "\""+a.GetURL()+"\""); err != nil {
return err
}
return nil
}
func (a *OvnAuthConfig) updateIP(newIP []string, port string) error {
if a.Address != "" {
s := strings.Split(a.Address, ":")
if len(s) != 3 {
return fmt.Errorf("failed to parse OvnAuthConfig address %q", a.Address)
}
var newPort string
if port != "" {
newPort = port
} else {
newPort = s[2]
}
newAddresses := make([]string, 0, len(newIP))
for _, ipAddress := range newIP {
newAddresses = append(newAddresses, s[0]+":"+ipAddress+":"+newPort)
}
a.Address = strings.Join(newAddresses, ",")
}
return nil
}
// UpdateOVNNodeAuth updates the host and URL in ClientAuth
// for both OvnNorth and OvnSouth. It updates them with the new masterIP.
func UpdateOVNNodeAuth(masterIP []string, southboundDBPort, northboundDBPort string) error {
logrus.Debugf("Update OVN node auth with new master ip: %s", masterIP)
if err := OvnNorth.updateIP(masterIP, northboundDBPort); err != nil {
return fmt.Errorf("failed to update OvnNorth ClientAuth URL: %v", err)
}
if err := OvnSouth.updateIP(masterIP, southboundDBPort); err != nil {
return fmt.Errorf("failed to update OvnSouth ClientAuth URL: %v", err)
}
return nil
}
| [
"\"KUBECONFIG\"",
"\"K8S_CACERT\"",
"\"K8S_APISERVER\"",
"\"K8S_TOKEN\"",
"\"OVS_SYSCONFDIR\""
]
| []
| [
"K8S_CACERT",
"K8S_APISERVER",
"KUBECONFIG",
"K8S_TOKEN",
"OVS_SYSCONFDIR"
]
| [] | ["K8S_CACERT", "K8S_APISERVER", "KUBECONFIG", "K8S_TOKEN", "OVS_SYSCONFDIR"] | go | 5 | 0 | |
config/config.go | package config
import (
"os"
"path/filepath"
"sync"
"github.com/spf13/viper"
)
type ConfigWrapper struct {
Viper *viper.Viper
overridden map[string]interface{}
ReadDone bool
}
type DBConfig struct {
Connection string
DBName string
Options string
}
var once sync.Once
var Config *ConfigWrapper
// overriddenValues stores overridden v values
// and is initialized as an empty map in the read method
var overriddenValues map[string]interface{}
func init() {
Config = GetConfig()
}
func GetConfig() *ConfigWrapper {
once.Do(func() {
Config = NewConfig()
})
return Config
}
func NewConfig() *ConfigWrapper {
c := &ConfigWrapper{}
c.Init()
c.Read()
return c
}
func (c *ConfigWrapper) Init() {
c.overridden = make(map[string]interface{})
c.Viper = viper.New()
c.Viper.SetEnvPrefix("LW")
c.Viper.SetDefault("Debug", false)
c.Viper.BindEnv("Debug")
c.Viper.BindEnv("Lbrynet")
c.Viper.SetDefault("Lbrynet", "http://localhost:5279/")
c.Viper.SetDefault("Address", ":8080")
c.Viper.SetDefault("Host", "http://localhost:8080")
c.Viper.SetDefault("BaseContentURL", "http://localhost:8080/content/")
c.Viper.SetDefault("AccountsEnabled", false)
c.Viper.BindEnv("AccountsEnabled")
c.Viper.SetConfigName("lbrytv") // name of config file (without extension)
c.Viper.AddConfigPath(os.Getenv("LBRYTV_CONFIG_DIR"))
c.Viper.AddConfigPath(ProjectRoot())
c.Viper.AddConfigPath(".")
c.Viper.AddConfigPath("..")
c.Viper.AddConfigPath("../..")
c.Viper.AddConfigPath("$HOME/.lbrytv")
}
func (c *ConfigWrapper) Read() {
err := c.Viper.ReadInConfig()
if err != nil {
panic(err)
}
c.ReadDone = true
}
// IsProduction is true if we are running in a production environment
func IsProduction() bool {
return !Config.Viper.GetBool("Debug")
}
func ProjectRoot() string {
ex, err := os.Executable()
if err != nil {
panic(err)
}
return filepath.Dir(ex)
}
// Override sets a setting key value to whatever you supply.
// Useful in tests:
// config.Override("Lbrynet", "http://www.google.com:8080/api/proxy")
// defer config.RestoreOverridden()
// ...
func Override(key string, value interface{}) {
Config.overridden[key] = Config.Viper.Get(key)
Config.Viper.Set(key, value)
}
// RestoreOverridden restores original v values overridden by Override
func RestoreOverridden() {
c := GetConfig()
v := c.Viper
if len(c.overridden) == 0 {
return
}
for k, val := range c.overridden {
v.Set(k, val)
}
c.overridden = make(map[string]interface{})
}
// Concrete v variables go here
// AccountsEnabled enables or disables accounts subsystem
func AccountsEnabled() bool {
return Config.Viper.GetBool("AccountsEnabled")
}
// GetAddress determines address to bind http API server to
func GetAddress() string {
return Config.Viper.GetString("Address")
}
// MetricsAddress determines address to bind metrics HTTP server to
func MetricsAddress() string {
return Config.Viper.GetString("MetricsAddress")
}
// MetricsPath determines the path to bind metrics HTTP server to
func MetricsPath() string {
return Config.Viper.GetString("MetricsPath")
}
// GetLbrynet returns the address of SDK server to use
func GetLbrynet() string {
return Config.Viper.GetString("Lbrynet")
}
// GetInternalAPIHost returns the address of internal-api server
func GetInternalAPIHost() string {
return Config.Viper.GetString("InternalAPIHost")
}
// GetDatabase returns postgresql database server connection config
func GetDatabase() DBConfig {
var config DBConfig
Config.Viper.UnmarshalKey("Database", &config)
return config
}
// GetSentryDSN returns sentry.io service DSN
func GetSentryDSN() string {
return Config.Viper.GetString("SentryDSN")
}
// GetProjectURL returns publicly accessible URL for the project
func GetProjectURL() string {
return Config.Viper.GetString("ProjectURL")
}
// GetPublishSourceDir returns directory for storing published files before they're uploaded to lbrynet.
// The directory needs to be accessed by the running SDK instance.
func GetPublishSourceDir() string {
return Config.Viper.GetString("PublishSourceDir")
}
// GetBlobFilesDir returns directory where SDK instance stores blob files.
func GetBlobFilesDir() string {
return Config.Viper.GetString("BlobFilesDir")
}
// GetReflectorAddress returns reflector address in the format of host:port.
func GetReflectorAddress() string {
return Config.Viper.GetString("ReflectorAddress")
}
| [
"\"LBRYTV_CONFIG_DIR\""
]
| []
| [
"LBRYTV_CONFIG_DIR"
]
| [] | ["LBRYTV_CONFIG_DIR"] | go | 1 | 0 | |
v1functions/timer-trigger-azuresearch-index-monitoring/function/run.py | # -*- coding: utf-8 -*-
"""
Azure Functions Timer Trigger Python Sample
- Get Azure Search Index Statistics and store them into DocumentDB
DocumentDB binding reference:
https://docs.microsoft.com/en-us/azure/azure-functions/functions-bindings-documentdb
"""
import sys, os, datetime, json
import httplib, urllib
AZURE_SEARCH_SERVICE_NAME='<azure search service name>'
AZURE_SEARCH_API_VER='<azure search api version: ex. 2015-02-28-Preview>'
AZURE_SEARCH_ADMIN_KEY='<azure search API admin key>'
AZURE_SEARCH_INDEX_NAME='<azure search index name>'
CONTENT_TYPE='application/json'
headers = {
'api-key': AZURE_SEARCH_ADMIN_KEY,
'content-type': "application/json"
}
r_data = ''
try:
conn = httplib.HTTPSConnection('{}.search.windows.net'.format(AZURE_SEARCH_SERVICE_NAME))
conn.request("GET",
"/indexes/{0}/stats?api-version={1}".format(AZURE_SEARCH_INDEX_NAME, AZURE_SEARCH_API_VER),
'', headers)
response = conn.getresponse()
r_data = response.read()
conn.close()
except Exception as e:
print("[Errno {0}] {1}".format(e.errno, e.strerror))
if r_data:
r_jsonobject=json.loads(r_data)
outdoc= {
"doccount": r_jsonobject['documentCount'],
"storagesize": r_jsonobject['storageSize'],
"timestamp": str(datetime.datetime.utcnow())
}
print outdoc
# Writing to DocumentDB (Document parameter name: outputDocument)
with open(os.environ['outputDocument'], 'wb') as f:
json.dump(outdoc,f)
| []
| []
| [
"outputDocument"
]
| [] | ["outputDocument"] | python | 1 | 0 | |
app/models/users.py | """This module contains the user model that regesters a new user """
import os
import datetime
from .db import Db
from werkzeug.security import generate_password_hash
class User():
def __init__(self, username, email, password, designation):
self.username = username
self.email = email
self.password = generate_password_hash(password.strip())
self.designation = designation
def __repr__(self):
return {'username': self.username,
'email': self.email,
'password': self.password,
'designation': self.designation}
@staticmethod
def check_user(username):
"""checks if a user has already been register"""
sql = "SELECT * FROM users WHERE users.username=\'%s\' "%(username)
conn = Db.db_connection()
cur = conn.cursor()
cur.execute(sql)
output = cur.fetchone()
return output
@staticmethod
def check_email(email):
"""checks if a user has already been register"""
sql = "SELECT * FROM users WHERE users.email_adress=\'%s\' "%(email)
conn = Db.db_connection()
cur = conn.cursor()
cur.execute(sql)
output = cur.fetchone()
return output
def register_user(self):
"""Regesters a new user into the database"""
sql = 'INSERT INTO users (username,\
pass_word,\
email_adress,\
user_type)\
VALUES(\'%s\', \'%s\', \'%s\',\'%s\');' % (
self.username,
# hash password
self.password,
self.email,
self.designation
)
conn = Db.db_connection()
cur = conn.cursor()
cur.execute(sql)
conn.commit()
@staticmethod
def get_a_user(id):
sql = f"SELECT * FROM users WHERE users.id={id}"
conn = Db.db_connection()
cur = conn.cursor()
cur.execute(sql)
output = cur.fetchall()
print(f"users with {id} is {output}")
@staticmethod
def update_user(id, username, email, designation):
sql = f"UPDATE users SET username = \'{username}\',\
email_adress =\'{email}\',\
user_type =\'{designation}\'\
WHERE ride_offer.id = {id}"
conn = Db.db_connection(os.environ.get('config_name'))
cur = conn.cursor()
cur.execute(sql)
conn.commit()
print("update successful")
@staticmethod
def delete_user(id):
sql = f"DELETE FROM ride_offer WHERE users.id ={id}"
conn = Db.db_connection()
cur = conn.cursor()
cur.execute(sql)
conn.commit()
print(f'succesfuly deleted user with id {id}')
@staticmethod
def get_all_usesrs():
sql = f"SELECT * FROM users"
conn = Db.db_connection()
cur = conn.cursor()
cur.execute(sql)
output = cur.fetchall()
print(f'output is {output}')
return output
| []
| []
| [
"config_name"
]
| [] | ["config_name"] | python | 1 | 0 | |
supernode/store/store.go | /*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package store
import (
"context"
"fmt"
"io"
"strings"
"github.com/dragonflyoss/Dragonfly/supernode/config"
)
// Store is a wrapper of the storage which implements the interface of StorageDriver.
type Store struct {
// name is a unique identifier, you can also name it ID.
driverName string
// config is used to init storage driver.
config interface{}
// driver holds a storage which implements the interface of StorageDriver
driver StorageDriver
}
// NewStore create a new Store instance.
func NewStore(name string, builder StorageBuilder, cfg string) (*Store, error) {
if name == "" || builder == nil {
return nil, fmt.Errorf("plugin name or builder cannot be nil")
}
// init driver with specific config
driver, err := builder(cfg)
if err != nil {
return nil, fmt.Errorf("failed to init storage driver %s: %v", name, err)
}
return &Store{
driverName: name,
config: cfg,
driver: driver,
}, nil
}
// Type return the plugin type: StoragePlugin.
func (s *Store) Type() config.PluginType {
return config.StoragePlugin
}
// Name return the plugin name.
func (s *Store) Name() string {
return s.driverName
}
// Get the data from the storage driver in io stream.
func (s *Store) Get(ctx context.Context, raw *Raw, writer io.Writer) error {
if err := isEmptyKey(raw.key); err != nil {
return err
}
return s.driver.Get(ctx, raw, writer)
}
// GetBytes gets the data from the storage driver in bytes.
func (s *Store) GetBytes(ctx context.Context, raw *Raw) ([]byte, error) {
if err := isEmptyKey(raw.key); err != nil {
return nil, err
}
return s.driver.GetBytes(ctx, raw)
}
// Put puts data into the storage in io stream.
func (s *Store) Put(ctx context.Context, raw *Raw, data io.Reader) error {
if err := isEmptyKey(raw.key); err != nil {
return err
}
return s.driver.Put(ctx, raw, data)
}
// PutBytes puts data into the storage in bytes.
func (s *Store) PutBytes(ctx context.Context, raw *Raw, data []byte) error {
if err := isEmptyKey(raw.key); err != nil {
return err
}
return s.driver.PutBytes(ctx, raw, data)
}
// Remove the data from the storage based on raw information.
func (s *Store) Remove(ctx context.Context, raw *Raw) error {
if err := isEmptyKey(raw.key); err != nil {
return err
}
return s.driver.Remove(ctx, raw)
}
// Stat determine whether the data exists based on raw information.
// If that, and return some info that in the form of struct StorageInfo.
// If not, return the ErrNotFound.
func (s *Store) Stat(ctx context.Context, raw *Raw) (*StorageInfo, error) {
if err := isEmptyKey(raw.key); err != nil {
return nil, err
}
return s.driver.Stat(ctx, raw)
}
func isEmptyKey(str string) error {
if strings.TrimSpace(str) == "" {
return ErrEmptyKey
}
return nil
}
| []
| []
| []
| [] | [] | go | null | null | null |
style.go | package main
import (
"encoding/json"
"os"
homedir "github.com/mitchellh/go-homedir"
"github.com/nsf/termbox-go"
)
type Style struct {
Default_bg termbox.Attribute
Default_fg termbox.Attribute
Rune_fg termbox.Attribute
Space_rune_fg termbox.Attribute
Int_fg termbox.Attribute
Bit_fg termbox.Attribute
Selected_option_bg termbox.Attribute
Search_progress_fg termbox.Attribute
Text_cursor_hex_bg termbox.Attribute
Bit_cursor_hex_bg termbox.Attribute
Int_cursor_hex_bg termbox.Attribute
Fp_cursor_hex_bg termbox.Attribute
Hilite_hex_fg termbox.Attribute
Hilite_rune_fg termbox.Attribute
Field_editor_bg termbox.Attribute
Field_editor_fg termbox.Attribute
Field_editor_last_bg termbox.Attribute
Field_editor_last_fg termbox.Attribute
Field_editor_invalid_bg termbox.Attribute
Field_editor_invalid_fg termbox.Attribute
About_logo_bg termbox.Attribute
Filled_bit_rune rune
Empty_bit_rune rune
Space_rune rune
}
func (s *Style) SaveStyleToFile(filename string) error {
file, err := os.Create(filename)
if err != nil {
return err
}
defer file.Close()
encoder := json.NewEncoder(file)
encoder.SetIndent("", " ")
return encoder.Encode(s)
}
func LoadStyleFromFile(filename string) (*Style, error) {
file, err := os.Open(filename)
if err != nil {
return nil, err
}
defer file.Close()
decoder := json.NewDecoder(file)
ret := &Style{}
err = decoder.Decode(ret)
return ret, err
}
func GetConfigDir() (string, error) {
configdir := os.Getenv("XDG_CONFIG_HOME")
if configdir == "" {
h, err := homedir.Dir()
if err != nil {
return configdir, err
}
configdir = h + string(os.PathSeparator) + ".config" + string(os.PathSeparator) + PROGRAM_NAME
} else {
configdir += string(os.PathSeparator) + PROGRAM_NAME
}
return configdir + string(os.PathSeparator), os.MkdirAll(configdir, 0755)
}
| [
"\"XDG_CONFIG_HOME\""
]
| []
| [
"XDG_CONFIG_HOME"
]
| [] | ["XDG_CONFIG_HOME"] | go | 1 | 0 | |
FastAPIRedisRQ/app/util/redis_rq.py | import redis
import os
from rq import Queue
import sys
sys.path.append("..")
from conf import RedisSettings
# pool = redis.ConnectionPool(host=RedisSettings.HOST, port=RedisSettings.PORT,
# db=0)
pool = redis.ConnectionPool(host=os.getenv("REDIS_HOST", "127.0.0.1"),
port=os.getenv("REDIS_PORT", "6379"), db=0)
redis_conn = redis.Redis(connection_pool=pool)
redis_queue = Queue("test_job", connection=redis_conn, default_timeout=RedisSettings.REDIS_DEFAULT_TIMEOUT)
| []
| []
| [
"REDIS_PORT",
"REDIS_HOST"
]
| [] | ["REDIS_PORT", "REDIS_HOST"] | python | 2 | 0 | |
object_detection/pytorch/tools/test_net.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Set up custom environment before nearly anything else is imported
# NOTE: this should be the first import (no not reorder)
from maskrcnn_benchmark.utils.env import setup_environment # noqa F401 isort:skip
import argparse
import os
import torch
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.data import make_data_loader
from maskrcnn_benchmark.engine.inference import inference
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.utils.collect_env import collect_env_info
from maskrcnn_benchmark.utils.comm import synchronize, get_rank
from maskrcnn_benchmark.utils.logger import setup_logger
from maskrcnn_benchmark.utils.miscellaneous import mkdir
def main():
parser = argparse.ArgumentParser(description="PyTorch Object Detection Inference")
parser.add_argument(
"--config-file",
default="/private/home/fmassa/github/detectron.pytorch_v2/configs/e2e_faster_rcnn_R_50_C4_1x_caffe2.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
distributed = num_gpus > 1
if distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(
backend="nccl", init_method="env://"
)
synchronize()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
save_dir = ""
logger = setup_logger("maskrcnn_benchmark", save_dir, get_rank())
logger.info("Using {} GPUs".format(num_gpus))
logger.info(cfg)
logger.info("Collecting env info (might take some time)")
logger.info("\n" + collect_env_info())
model = build_detection_model(cfg)
model.to(cfg.MODEL.DEVICE)
output_dir = cfg.OUTPUT_DIR
checkpointer = DetectronCheckpointer(cfg, model, save_dir=output_dir)
_ = checkpointer.load(cfg.MODEL.WEIGHT)
iou_types = ("bbox",)
if cfg.MODEL.MASK_ON:
iou_types = iou_types + ("segm",)
if cfg.MODEL.KEYPOINT_ON:
iou_types = iou_types + ("keypoints",)
output_folders = [None] * len(cfg.DATASETS.TEST)
dataset_names = cfg.DATASETS.TEST
if cfg.OUTPUT_DIR:
for idx, dataset_name in enumerate(dataset_names):
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name)
mkdir(output_folder)
output_folders[idx] = output_folder
data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=distributed)
for output_folder, dataset_name, data_loader_val in zip(output_folders, dataset_names, data_loaders_val):
inference(
model,
data_loader_val,
dataset_name=dataset_name,
iou_types=iou_types,
box_only=False if cfg.MODEL.RETINANET_ON else cfg.MODEL.RPN_ONLY,
device=cfg.MODEL.DEVICE,
expected_results=cfg.TEST.EXPECTED_RESULTS,
expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,
output_folder=output_folder,
)
synchronize()
if __name__ == "__main__":
main()
| []
| []
| [
"WORLD_SIZE"
]
| [] | ["WORLD_SIZE"] | python | 1 | 0 | |
internal/twitter/twitter_wrapper.go | package twitterwrapper
import (
"bytes"
"encoding/base64"
"encoding/json"
"fmt"
"image"
"image/jpeg"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"github.com/dghubble/go-twitter/twitter"
"github.com/dghubble/oauth1"
)
// TwitterImage is the struct that represents a default twitter response for media uploading
type TwitterImage struct {
MediaID int64 `json:"media_id"`
MediaIDString string `json:"media_id_string"`
MediaKey string `json:"media_key"`
Size int `json:"size"`
ExpiresAfterSecs int `json:"expires_after_secs"`
Image struct {
ImageType string `json:"image_type"`
W int `json:"w"`
H int `json:"h"`
} `json:"image"`
}
// TwitterWrapper needs two different clients because dghubble's lib is not able to send tweets with pictures
type TwitterWrapper struct {
TwitterClient *twitter.Client
HTTPClient *http.Client
}
func NewTwitterWrapper() *TwitterWrapper {
config := oauth1.NewConfig(os.Getenv("CONSUMER_KEY"), os.Getenv("CONSUMER_SECRET"))
token := oauth1.NewToken(os.Getenv("ACCESS_TOKEN"), os.Getenv("ACCESS_SECRET"))
httpClient := config.Client(oauth1.NoContext, token)
twitterClient := twitter.NewClient(httpClient)
return &TwitterWrapper{
TwitterClient: twitterClient,
HTTPClient: httpClient,
}
}
func (t *TwitterWrapper) HandleImagePost(imageURL string) int64 {
res, err := http.Get(imageURL)
if err != nil || res.StatusCode != 200 {
log.Printf(fmt.Sprint("Bad news here, reason: ", err.Error()))
return 0
}
defer res.Body.Close()
m, _, err := image.Decode(res.Body)
if err != nil {
log.Printf(fmt.Sprint("Bad news here, reason: ", err.Error()))
return 0
}
form := url.Values{}
buf := new(bytes.Buffer)
err = jpeg.Encode(buf, m, nil)
bytesImage := buf.Bytes()
encodedImage := base64.StdEncoding.EncodeToString(bytesImage)
form.Add("media_data", encodedImage)
resp, err := t.HTTPClient.PostForm("https://upload.twitter.com/1.1/media/upload.json?media_category=tweet_image", form)
if err != nil {
log.Printf(fmt.Sprint("Bad news here, reason: ", err.Error()))
return 0
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Printf(fmt.Sprint("Bad news here, reason: ", err.Error()))
return 0
}
var imageResponse TwitterImage
err = json.Unmarshal(body, &imageResponse)
if err != nil {
log.Printf(fmt.Sprint("Bad news here, reason: ", err.Error()))
return 0
}
return int64(imageResponse.MediaID)
}
| [
"\"CONSUMER_KEY\"",
"\"CONSUMER_SECRET\"",
"\"ACCESS_TOKEN\"",
"\"ACCESS_SECRET\""
]
| []
| [
"CONSUMER_KEY",
"ACCESS_SECRET",
"CONSUMER_SECRET",
"ACCESS_TOKEN"
]
| [] | ["CONSUMER_KEY", "ACCESS_SECRET", "CONSUMER_SECRET", "ACCESS_TOKEN"] | go | 4 | 0 | |
client.go | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// [START maps_routespreferred_samples_default]
package main
import (
"context"
"crypto/tls"
"io"
"log"
"os"
"time"
"github.com/golang/protobuf/proto"
v1 "google.golang.org/genproto/googleapis/maps/routes/v1"
"google.golang.org/genproto/googleapis/type/latlng"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/metadata"
)
const (
serverAddr = "routespreferred.googleapis.com:443"
// Note that setting the field mask to * is OK for testing, but discouraged in
// production.
// For example, for ComputeRoutes, set the field mask to
// "routes.distanceMeters,routes.duration,routes.polyline.encodedPolyline"
// in order to get the route distances, durations, and encoded polylines.
fieldMask = "*"
)
func createWaypoint(lat float64, lng float64) *v1.Waypoint {
return &v1.Waypoint{LocationType: &v1.Waypoint_Location{
Location: &v1.Location{
LatLng: &latlng.LatLng{Latitude: lat, Longitude: lng},
},
}}
}
func callComputeRoutes(client v1.RoutesPreferredClient, ctx *context.Context) {
request := v1.ComputeRoutesRequest{
Origin: createWaypoint(37.420761, -122.081356),
Destination: createWaypoint(37.420999, -122.086894),
TravelMode: v1.RouteTravelMode_DRIVE,
RoutingPreference: v1.RoutingPreference_TRAFFIC_AWARE,
ComputeAlternativeRoutes: true,
Units: v1.Units_METRIC,
LanguageCode: "en-us",
RouteModifiers: &v1.RouteModifiers{
AvoidTolls: false,
AvoidHighways: true,
AvoidFerries: true,
},
PolylineQuality: v1.PolylineQuality_OVERVIEW,
}
marshaler := proto.TextMarshaler{}
log.Printf("Sending request: \n%s", marshaler.Text(&request))
result, err := client.ComputeRoutes(*ctx, &request)
if err != nil {
log.Fatalf("Failed to call ComputeRoutes: %v", err)
}
log.Printf("Result: %s", marshaler.Text(result))
}
func callComputeRouteMatrix(client v1.RoutesPreferredClient, ctx *context.Context) {
request := v1.ComputeRouteMatrixRequest{
Origins: []*v1.RouteMatrixOrigin{
{Waypoint: createWaypoint(37.420761, -122.081356), RouteModifiers: &v1.RouteModifiers{
AvoidTolls: false,
AvoidHighways: true,
AvoidFerries: true,
}},
{Waypoint: createWaypoint(37.403184, -122.097371)},
},
Destinations: []*v1.RouteMatrixDestination{
{Waypoint: createWaypoint(37.420999, -122.086894)},
{Waypoint: createWaypoint(37.383047, -122.044651)},
},
TravelMode: v1.RouteTravelMode_DRIVE,
RoutingPreference: v1.RoutingPreference_TRAFFIC_AWARE,
}
marshaler := proto.TextMarshaler{}
log.Printf("Sending request: \n%s", marshaler.Text(&request))
stream, err := client.ComputeRouteMatrix(*ctx, &request)
if err != nil {
log.Fatalf("Failed to call ComputeRouteMatrix: %v", err)
}
for {
element, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
log.Fatalf("Received error in ComputeRouteMatrix stream: %v", err)
}
log.Printf("Element: %s\n", marshaler.Text(element))
}
}
func main() {
config := tls.Config{}
conn, err := grpc.Dial(serverAddr,
grpc.WithTransportCredentials(credentials.NewTLS(&config)))
if err != nil {
log.Fatalf("Failed to connect: %v", err)
}
defer conn.Close()
client := v1.NewRoutesPreferredClient(conn)
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
ctx = metadata.AppendToOutgoingContext(ctx, "X-Goog-Api-Key", os.Getenv("GOOGLE_MAPS_API_KEY"))
ctx = metadata.AppendToOutgoingContext(ctx, "X-Goog-Fieldmask", fieldMask)
defer cancel()
callComputeRoutes(client, &ctx)
callComputeRouteMatrix(client, &ctx)
}
// [END maps_routespreferred_samples_default]
| [
"\"GOOGLE_MAPS_API_KEY\""
]
| []
| [
"GOOGLE_MAPS_API_KEY"
]
| [] | ["GOOGLE_MAPS_API_KEY"] | go | 1 | 0 | |
src/avalara/client.py | """
AvaTax Software Development Kit for Python.
Copyright 2019 Avalara, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author Robert Bronson
@author Phil Werner
@author Adrienne Karnoski
@author Han Bao
@copyright 2019 Avalara, Inc.
@license https://www.apache.org/licenses/LICENSE-2.0
@version TBD
@link https://github.com/avadev/AvaTax-REST-V2-Python-SDK
"""
from requests.auth import HTTPBasicAuth
from ._str_version import str_type
from . import client_methods
import os
class AvataxClient(client_methods.Mixin):
"""Class for our Avatax client."""
def __init__(self, app_name=None, app_version=None, machine_name=None,
environment=None, timeout_limit=None):
"""
Initialize the sandbox client.
By default the client object environment will be production. For
sandbox API, set the envirnoment variable to sandbox.
:param string app_name: The name of your Application
:param string/integer app_version: Version of your Application
:param string machine_name: Name of machine you are working on
:param string environment: Default environment is production,
input sandbox, for the sandbox API
:param int/float The timeout limit for every call made by this client instance. (default: 10 sec)
:return: object
"""
if not all(isinstance(i, str_type) for i in [app_name,
machine_name,
environment]):
raise ValueError('Input(s) must be string or none type object')
self.base_url = 'https://rest.avatax.com'
if environment:
if environment.lower() == 'sandbox':
self.base_url = 'https://sandbox-rest.avatax.com'
elif environment[:8] == 'https://' or environment[:7] == 'http://':
self.base_url = environment
self.auth = None
self.app_name = app_name
self.app_version = app_version
self.machine_name = machine_name
self.client_id = '{}; {}; Python SDK; 18.5; {};'.format(app_name,
app_version,
machine_name)
self.client_header = {'X-Avalara-Client': self.client_id}
self.timeout_limit = timeout_limit
def add_credentials(self, username=None, password=None):
"""
Configure this client for the specified username/password security.
:param string username: The username of your AvaTax user account
:param string password: The password of your AvaTax user account
:param int accountId: The account ID of your avatax account
:param string licenseKey: The license key of your avatax account
:param string bearerToken: The OAuth 2.0 token provided by Avalara
:return: AvaTaxClient
Note: if you wish to use Bearer token, enter it as the ONLY argument to this method.
"""
if not all(isinstance(i, str_type) for i in [username, password]):
raise ValueError('Input(s) must be string or none type object')
if username and not password:
self.client_header['Authorization'] = 'Bearer ' + username
else:
self.auth = HTTPBasicAuth(username, password)
return self
# to generate a client object on initialization of this file, uncomment the script below
# if __name__ == '__main__': # pragma no cover
# """Creating a client with credential, must have env variables username & password."""
# client = AvataxClient('my test app',
# 'ver 0.0',
# 'my test machine',
# 'sandbox')
# c = client.add_credentials(os.environ.get('USERNAME', ''),
# os.environ.get('PASSWORD', ''))
# print(client.ping().text)
# tax_document = {
# 'addresses': {'SingleLocation': {'city': 'Irvine',
# 'country': 'US',
# 'line1': '123 Main Street',
# 'postalCode': '92615',
# 'region': 'CA'}},
# 'commit': False,
# 'companyCode': 'DEFAULT',
# 'currencyCode': 'USD',
# 'customerCode': 'ABC',
# 'date': '2017-04-12',
# 'description': 'Yarn',
# 'lines': [{'amount': 100,
# 'description': 'Yarn',
# 'itemCode': 'Y0001',
# 'number': '1',
# 'quantity': 1,
# 'taxCode': 'PS081282'}],
# 'purchaseOrderNo': '2017-04-12-001',
# 'type': 'SalesInvoice'}
| []
| []
| [
"USERNAME",
"PASSWORD"
]
| [] | ["USERNAME", "PASSWORD"] | python | 2 | 0 | |
python/hsfs/connection.py | #
# Copyright 2020 Logical Clocks AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from requests.exceptions import ConnectionError
from hsfs.decorators import connected, not_connected
from hsfs import engine, client
from hsfs.core import feature_store_api, project_api, hosts_api, services_api
class Connection:
AWS_DEFAULT_REGION = "default"
HOPSWORKS_PORT_DEFAULT = 443
SECRETS_STORE_DEFAULT = "parameterstore"
HOSTNAME_VERIFICATION_DEFAULT = True
CERT_FOLDER_DEFAULT = "hops"
def __init__(
self,
host=None,
port=None,
project=None,
region_name=None,
secrets_store=None,
hostname_verification=None,
trust_store_path=None,
cert_folder=None,
api_key_file=None,
):
self._host = host
self._port = port or self.HOPSWORKS_PORT_DEFAULT
self._project = project
self._region_name = region_name or self.AWS_DEFAULT_REGION
self._secrets_store = secrets_store or self.SECRETS_STORE_DEFAULT
self._hostname_verification = (
hostname_verification or self.HOSTNAME_VERIFICATION_DEFAULT
)
self._trust_store_path = trust_store_path
self._cert_folder = cert_folder or self.CERT_FOLDER_DEFAULT
self._api_key_file = api_key_file
self._connected = False
self.connect()
@classmethod
def connection(
cls,
host=None,
port=None,
project=None,
region_name=None,
secrets_store=None,
hostname_verification=None,
trust_store_path=None,
cert_folder=None,
api_key_file=None,
):
return cls(
host,
port,
project,
region_name,
secrets_store,
hostname_verification,
trust_store_path,
cert_folder,
api_key_file,
)
@classmethod
def setup_databricks(
cls,
host,
project,
port=443,
region_name="default",
secrets_store="parameterstore",
cert_folder="hops",
hostname_verification=True,
trust_store_path=None,
api_key_file=None,
):
connection = cls(
host,
port,
project,
region_name,
secrets_store,
hostname_verification,
trust_store_path,
cert_folder,
api_key_file,
)
dbfs_folder = client.get_instance()._cert_folder_base
os.makedirs(os.path.join(dbfs_folder, "scripts"), exist_ok=True)
connection._get_clients(dbfs_folder)
hive_host = connection._get_hivemetastore_hostname()
connection._write_init_script(dbfs_folder)
connection._print_instructions(
cert_folder, client.get_instance()._cert_folder, hive_host
)
return connection
@not_connected
def connect(self):
self._connected = True
try:
if client.base.Client.REST_ENDPOINT not in os.environ:
if os.path.exists("/dbfs/"):
# databricks
client.init(
"external",
self._host,
self._port,
self._project,
self._region_name,
self._secrets_store,
self._hostname_verification,
os.path.join("/dbfs", self._trust_store_path)
if self._trust_store_path is not None
else None,
os.path.join("/dbfs", self._cert_folder),
os.path.join("/dbfs", self._api_key_file)
if self._api_key_file is not None
else None,
)
engine.init("spark")
else:
# aws
client.init(
"external",
self._host,
self._port,
self._project,
self._region_name,
self._secrets_store,
self._hostname_verification,
self._trust_store_path,
self._cert_folder,
self._api_key_file,
)
engine.init(
"hive",
self._host,
self._cert_folder,
self._project,
client.get_instance()._cert_key,
)
else:
client.init("hopsworks")
engine.init("spark")
self._feature_store_api = feature_store_api.FeatureStoreApi()
self._project_api = project_api.ProjectApi()
self._hosts_api = hosts_api.HostsApi()
self._services_api = services_api.ServicesApi()
except (TypeError, ConnectionError):
self._connected = False
raise
print("Connected. Call `.close()` to terminate connection gracefully.")
def close(self):
client.stop()
self._feature_store_api = None
engine.stop()
self._connected = False
print("Connection closed.")
@connected
def get_feature_store(self, name=None):
"""Get a reference to a feature store, to perform operations on.
Defaulting to the project's default feature store. Shared feature stores can be
retrieved by passing the `name`.
:param name: the name of the feature store, defaults to None
:type name: str, optional
:return: feature store object
:rtype: FeatureStore
"""
if not name:
name = client.get_instance()._project_name + "_featurestore"
return self._feature_store_api.get(name)
def _get_clients(self, dbfs_folder):
"""
Get the client libraries and save them in the dbfs folder.
:param dbfs_folder: the folder in which to save the libraries
:type dbfs_folder: str
"""
client_path = os.path.join(dbfs_folder, "client.tar.gz")
if not os.path.exists(client_path):
client_libs = self._project_api.get_client()
with open(client_path, "wb") as f:
for chunk in client_libs:
f.write(chunk)
def _get_hivemetastore_hostname(self):
"""
Get the internal hostname of the Hopsworks instance.
"""
hosts = self._hosts_api.get()
hivemetastore = self._services_api.get_service("hivemetastore")
hosts = [host for host in hosts if host["id"] == hivemetastore["hostId"]]
return hosts[0]["hostname"]
def _write_init_script(self, dbfs_folder):
"""
Write the init script for databricks clusters to dbfs.
:param dbfs_folder: the folder on dbfs in which to save the script
:type dbfs_foler: str
"""
initScript = """
#!/bin/sh
tar -xvf PATH/client.tar.gz -C /tmp
tar -xvf /tmp/client/apache-hive-*-bin.tar.gz -C /tmp
mv /tmp/apache-hive-*-bin /tmp/apache-hive-bin
chmod -R +xr /tmp/apache-hive-bin
cp /tmp/client/hopsfs-client*.jar /databricks/jars/
"""
script_path = os.path.join(dbfs_folder, "scripts/initScript.sh")
if not os.path.exists(script_path):
initScript = initScript.replace("PATH", dbfs_folder)
with open(script_path, "w") as f:
f.write(initScript)
def _print_instructions(self, user_cert_folder, cert_folder, internal_host):
"""
Print the instructions to set up the hopsfs hive connection on databricks.
:param user_cert_folder: the original user specified cert_folder without `/dbfs/` prefix
:type user_cert_folder: str
:cert_folder: the directory in which the credential were saved, prefixed with `/dbfs/` and `[hostname]`
:type cert_folder: str
:param internal_ip: the internal ip of the hopsworks instance
:type internal_ip: str
"""
instructions = """
In the advanced options of your databricks cluster configuration
add the following path to Init Scripts: dbfs:/{0}/scripts/initScript.sh
add the following to the Spark Config:
spark.hadoop.fs.hopsfs.impl io.hops.hopsfs.client.HopsFileSystem
spark.hadoop.hops.ipc.server.ssl.enabled true
spark.hadoop.hops.ssl.hostname.verifier ALLOW_ALL
spark.hadoop.hops.rpc.socket.factory.class.default io.hops.hadoop.shaded.org.apache.hadoop.net.HopsSSLSocketFactory
spark.hadoop.client.rpc.ssl.enabled.protocol TLSv1.2
spark.hadoop.hops.ssl.keystores.passwd.name {1}/material_passwd
spark.hadoop.hops.ssl.keystore.name {1}/keyStore.jks
spark.hadoop.hops.ssl.trustore.name {1}/trustStore.jks
spark.sql.hive.metastore.jars /tmp/apache-hive-bin/lib/*
spark.hadoop.hive.metastore.uris thrift://{2}:9083
Then save and restart the cluster.
""".format(
user_cert_folder, cert_folder, internal_host
)
print(instructions)
@property
def host(self):
return self._host
@host.setter
@not_connected
def host(self, host):
self._host = host
@property
def port(self):
return self._port
@port.setter
@not_connected
def port(self, port):
self._port = port
@property
def project(self):
return self._project
@project.setter
@not_connected
def project(self, project):
self._project = project
@property
def region_name(self):
return self._region_name
@region_name.setter
@not_connected
def region_name(self, region_name):
self._region_name = region_name
@property
def secrets_store(self):
return self._secrets_store
@secrets_store.setter
@not_connected
def secrets_store(self, secrets_store):
self._secrets_store = secrets_store
@property
def hostname_verification(self):
return self._hostname_verification
@hostname_verification.setter
@not_connected
def hostname_verification(self, hostname_verification):
self._hostname_verification = hostname_verification
@property
def trust_store_path(self):
return self._trust_store_path
@trust_store_path.setter
@not_connected
def trust_store_path(self, trust_store_path):
self._trust_store_path = trust_store_path
@property
def cert_folder(self):
return self._cert_folder
@cert_folder.setter
@not_connected
def cert_folder(self, cert_folder):
self._cert_folder = cert_folder
@property
def api_key_file(self):
return self._api_key_file
@api_key_file.setter
@not_connected
def api_key_file(self, api_key_file):
self._api_key_file = api_key_file
def __enter__(self):
self.connect()
return self
def __exit__(self, type, value, traceback):
self.close()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
metadata-ingestion/src/datahub/telemetry/telemetry.py | import errno
import json
import logging
import os
import platform
import sys
import uuid
from functools import wraps
from pathlib import Path
from typing import Any, Callable, Dict, Optional, TypeVar, Union
import requests
import datahub as datahub_package
logger = logging.getLogger(__name__)
GA_VERSION = 1
GA_TID = "UA-212728656-1"
DATAHUB_FOLDER = Path(os.path.expanduser("~/.datahub"))
CONFIG_FILE = DATAHUB_FOLDER / "telemetry-config.json"
# also fall back to environment variable if config file is not found
ENV_ENABLED = os.environ.get("DATAHUB_TELEMETRY_ENABLED", "true").lower() == "true"
class Telemetry:
client_id: str
enabled: bool = True
def __init__(self):
# init the client ID and config if it doesn't exist
if not CONFIG_FILE.exists():
self.client_id = str(uuid.uuid4())
self.update_config()
else:
self.load_config()
def update_config(self) -> None:
"""
Update the config file with the current client ID and enabled status.
"""
if not DATAHUB_FOLDER.exists():
os.makedirs(DATAHUB_FOLDER)
try:
with open(CONFIG_FILE, "w") as f:
json.dump(
{"client_id": self.client_id, "enabled": self.enabled}, f, indent=2
)
except IOError as x:
if x.errno == errno.ENOENT:
logger.debug(
f"{CONFIG_FILE} does not exist and could not be created. Please check permissions on the parent folder."
)
elif x.errno == errno.EACCES:
logger.debug(
f"{CONFIG_FILE} cannot be read. Please check the permissions on this file."
)
else:
logger.debug(
f"{CONFIG_FILE} had an IOError, please inspect this file for issues."
)
def enable(self) -> None:
"""
Enable telemetry.
"""
self.enabled = True
self.update_config()
def disable(self) -> None:
"""
Disable telemetry.
"""
self.enabled = False
self.update_config()
def load_config(self):
"""
Load the saved config for the telemetry client ID and enabled status.
"""
try:
with open(CONFIG_FILE, "r") as f:
config = json.load(f)
self.client_id = config["client_id"]
self.enabled = config["enabled"] & ENV_ENABLED
except IOError as x:
if x.errno == errno.ENOENT:
logger.debug(
f"{CONFIG_FILE} does not exist and could not be created. Please check permissions on the parent folder."
)
elif x.errno == errno.EACCES:
logger.debug(
f"{CONFIG_FILE} cannot be read. Please check the permissions on this file."
)
else:
logger.debug(
f"{CONFIG_FILE} had an IOError, please inspect this file for issues."
)
def ping(
self,
category: str,
action: str,
label: Optional[str] = None,
value: Optional[int] = None,
) -> None:
"""
Ping Google Analytics with a single event.
Args:
category (str): category for the event
action (str): action taken
label (Optional[str], optional): label for the event
value (Optional[int], optional): value for the event
"""
if not self.enabled:
return
req_url = "https://www.google-analytics.com/collect"
params: Dict[str, Union[str, int]] = {
"an": "datahub-cli", # app name
"av": datahub_package.nice_version_name(), # app version
"t": "event", # event type
"v": GA_VERSION, # Google Analytics version
"tid": GA_TID, # tracking id
"cid": self.client_id, # client id
"ec": category, # event category
"ea": action, # event action
# use custom dimensions to capture OS and Python version
# see https://developers.google.com/analytics/devguides/collection/protocol/v1/parameters#cd_
"cd1": platform.system(), # OS
"cd2": platform.python_version(), # Python version
}
if label:
params["el"] = label
# this has to a non-negative int, otherwise the request will fail
if value:
params["ev"] = value
try:
requests.post(
req_url,
data=params,
headers={
"user-agent": f"datahub {datahub_package.nice_version_name()}"
},
)
except Exception as e:
logger.debug(f"Error reporting telemetry: {e}")
telemetry_instance = Telemetry()
T = TypeVar("T")
def get_full_class_name(obj):
module = obj.__class__.__module__
if module is None or module == str.__class__.__module__:
return obj.__class__.__name__
return module + "." + obj.__class__.__name__
def with_telemetry(func: Callable[..., T]) -> Callable[..., T]:
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
category = func.__module__
action = func.__name__
telemetry_instance.ping(category, action, "started")
try:
res = func(*args, **kwargs)
telemetry_instance.ping(category, action, "completed")
return res
# Catch general exceptions
except Exception as e:
telemetry_instance.ping(category, action, f"error:{get_full_class_name(e)}")
raise e
# System exits (used in ingestion and Docker commands) are not caught by the exception handler,
# so we need to catch them here.
except SystemExit as e:
# Forward successful exits
if e.code == 0:
telemetry_instance.ping(category, action, "completed")
sys.exit(0)
# Report failed exits
else:
telemetry_instance.ping(
category, action, f"error:{get_full_class_name(e)}"
)
sys.exit(e.code)
# Catch SIGINTs
except KeyboardInterrupt:
telemetry_instance.ping(category, action, "cancelled")
sys.exit(0)
return wrapper
| []
| []
| [
"DATAHUB_TELEMETRY_ENABLED"
]
| [] | ["DATAHUB_TELEMETRY_ENABLED"] | python | 1 | 0 | |
conda_smithy/lint_recipe.py | # -*- coding: utf-8 -*-
from collections.abc import Sequence, Mapping
str_type = str
import copy
from glob import glob
import io
import itertools
import os
import re
import shutil
import subprocess
import sys
import github
from conda_build.metadata import (
ensure_valid_license_family,
FIELDS as cbfields,
)
import conda_build.conda_interface
from .utils import render_meta_yaml, get_yaml
FIELDS = copy.deepcopy(cbfields)
# Just in case 'extra' moves into conda_build
if "extra" not in FIELDS.keys():
FIELDS["extra"] = set()
FIELDS["extra"].add("recipe-maintainers")
FIELDS["extra"].add("feedstock-name")
EXPECTED_SECTION_ORDER = [
"package",
"source",
"build",
"requirements",
"test",
"app",
"outputs",
"about",
"extra",
]
REQUIREMENTS_ORDER = ["build", "host", "run"]
TEST_KEYS = {"imports", "commands"}
TEST_FILES = ["run_test.py", "run_test.sh", "run_test.bat", "run_test.pl"]
NEEDED_FAMILIES = ["gpl", "bsd", "mit", "apache", "psf"]
sel_pat = re.compile(r"(.+?)\s*(#.*)?\[([^\[\]]+)\](?(2).*)$")
jinja_pat = re.compile(r"\s*\{%\s*(set)\s+[^\s]+\s*=\s*[^\s]+\s*%\}")
JINJA_VAR_PAT = re.compile(r"{{(.*?)}}")
def get_section(parent, name, lints):
if name == "source":
return get_list_section(parent, name, lints, allow_single=True)
elif name == "outputs":
return get_list_section(parent, name, lints)
section = parent.get(name, {})
if not isinstance(section, Mapping):
lints.append(
'The "{}" section was expected to be a dictionary, but '
"got a {}.".format(name, type(section).__name__)
)
section = {}
return section
def get_list_section(parent, name, lints, allow_single=False):
section = parent.get(name, [])
if allow_single and isinstance(section, Mapping):
return [section]
elif isinstance(section, Sequence) and not isinstance(section, str_type):
return section
else:
msg = 'The "{}" section was expected to be a {}list, but got a {}.{}.'.format(
name,
"dictionary or a " if allow_single else "",
type(section).__module__,
type(section).__name__,
)
lints.append(msg)
return [{}]
def lint_section_order(major_sections, lints):
section_order_sorted = sorted(
major_sections, key=EXPECTED_SECTION_ORDER.index
)
if major_sections != section_order_sorted:
section_order_sorted_str = map(
lambda s: "'%s'" % s, section_order_sorted
)
section_order_sorted_str = ", ".join(section_order_sorted_str)
section_order_sorted_str = "[" + section_order_sorted_str + "]"
lints.append(
"The top level meta keys are in an unexpected order. "
"Expecting {}.".format(section_order_sorted_str)
)
def lint_about_contents(about_section, lints):
for about_item in ["home", "license", "summary"]:
# if the section doesn't exist, or is just empty, lint it.
if not about_section.get(about_item, ""):
lints.append(
"The {} item is expected in the about section."
"".format(about_item)
)
def lintify(meta, recipe_dir=None, conda_forge=False):
lints = []
hints = []
major_sections = list(meta.keys())
# If the recipe_dir exists (no guarantee within this function) , we can
# find the meta.yaml within it.
meta_fname = os.path.join(recipe_dir or "", "meta.yaml")
sources_section = get_section(meta, "source", lints)
build_section = get_section(meta, "build", lints)
requirements_section = get_section(meta, "requirements", lints)
test_section = get_section(meta, "test", lints)
about_section = get_section(meta, "about", lints)
extra_section = get_section(meta, "extra", lints)
package_section = get_section(meta, "package", lints)
outputs_section = get_section(meta, "outputs", lints)
recipe_dirname = os.path.basename(recipe_dir) if recipe_dir else "recipe"
is_staged_recipes = recipe_dirname != "recipe"
# 0: Top level keys should be expected
unexpected_sections = []
for section in major_sections:
if section not in EXPECTED_SECTION_ORDER:
lints.append(
"The top level meta key {} is unexpected".format(section)
)
unexpected_sections.append(section)
for section in unexpected_sections:
major_sections.remove(section)
# 1: Top level meta.yaml keys should have a specific order.
lint_section_order(major_sections, lints)
# 2: The about section should have a home, license and summary.
lint_about_contents(about_section, lints)
# 3a: The recipe should have some maintainers.
if not extra_section.get("recipe-maintainers", []):
lints.append(
"The recipe could do with some maintainers listed in "
"the `extra/recipe-maintainers` section."
)
# 3b: Maintainers should be a list
if not (
isinstance(extra_section.get("recipe-maintainers", []), Sequence)
and not isinstance(
extra_section.get("recipe-maintainers", []), str_type
)
):
lints.append("Recipe maintainers should be a json list.")
# 4: The recipe should have some tests.
if not any(key in TEST_KEYS for key in test_section):
a_test_file_exists = recipe_dir is not None and any(
os.path.exists(os.path.join(recipe_dir, test_file))
for test_file in TEST_FILES
)
if not a_test_file_exists:
has_outputs_test = False
no_test_hints = []
if outputs_section:
for out in outputs_section:
test_out = get_section(out, "test", lints)
if any(key in TEST_KEYS for key in test_out):
has_outputs_test = True
else:
no_test_hints.append(
"It looks like the '{}' output doesn't "
"have any tests.".format(out.get("name", "???"))
)
if has_outputs_test:
hints.extend(no_test_hints)
else:
lints.append("The recipe must have some tests.")
# 5: License cannot be 'unknown.'
license = about_section.get("license", "").lower()
if "unknown" == license.strip():
lints.append("The recipe license cannot be unknown.")
# 6: Selectors should be in a tidy form.
if recipe_dir is not None and os.path.exists(meta_fname):
bad_selectors = []
bad_lines = []
# Good selectors look like ".*\s\s#\s[...]"
good_selectors_pat = re.compile(r"(.+?)\s{2,}#\s\[(.+)\](?(2).*)$")
with io.open(meta_fname, "rt") as fh:
for selector_line, line_number in selector_lines(fh):
if not good_selectors_pat.match(selector_line):
bad_selectors.append(selector_line)
bad_lines.append(line_number)
if bad_selectors:
lints.append(
"Selectors are suggested to take a "
"``<two spaces>#<one space>[<expression>]`` form."
" See lines {}".format(bad_lines)
)
# 7: The build section should have a build number.
if build_section.get("number", None) is None:
lints.append("The recipe must have a `build/number` section.")
# 8: The build section should be before the run section in requirements.
seen_requirements = [
k for k in requirements_section if k in REQUIREMENTS_ORDER
]
requirements_order_sorted = sorted(
seen_requirements, key=REQUIREMENTS_ORDER.index
)
if seen_requirements != requirements_order_sorted:
lints.append(
"The `requirements/` sections should be defined "
"in the following order: "
+ ", ".join(REQUIREMENTS_ORDER)
+ "; instead saw: "
+ ", ".join(seen_requirements)
+ "."
)
# 9: Files downloaded should have a hash.
for source_section in sources_section:
if "url" in source_section and not (
{"sha1", "sha256", "md5"} & set(source_section.keys())
):
lints.append(
"When defining a source/url please add a sha256, sha1 "
"or md5 checksum (sha256 preferably)."
)
# 10: License should not include the word 'license'.
license = about_section.get("license", "").lower()
if (
"license" in license.lower()
and "unlicense" not in license.lower()
and "licenseref" not in license.lower()
and "-license" not in license.lower()
):
lints.append(
"The recipe `license` should not include the word " '"License".'
)
# 11: There should be one empty line at the end of the file.
if recipe_dir is not None and os.path.exists(meta_fname):
with io.open(meta_fname, "r") as f:
lines = f.read().split("\n")
# Count the number of empty lines from the end of the file
empty_lines = itertools.takewhile(lambda x: x == "", reversed(lines))
end_empty_lines_count = len(list(empty_lines))
if end_empty_lines_count > 1:
lints.append(
"There are {} too many lines. "
"There should be one empty line at the end of the "
"file.".format(end_empty_lines_count - 1)
)
elif end_empty_lines_count < 1:
lints.append(
"There are too few lines. There should be one empty "
"line at the end of the file."
)
# 12: License family must be valid (conda-build checks for that)
try:
ensure_valid_license_family(meta)
except RuntimeError as e:
lints.append(str(e))
# 12a: License family must be valid (conda-build checks for that)
license_family = about_section.get("license_family", license).lower()
license_file = about_section.get("license_file", None)
if not license_file and any(
f for f in NEEDED_FAMILIES if f in license_family
):
lints.append("license_file entry is missing, but is required.")
# 13: Check that the recipe name is valid
recipe_name = package_section.get("name", "").strip()
if re.match("^[a-z0-9_\-.]+$", recipe_name) is None:
lints.append(
"Recipe name has invalid characters. only lowercase alpha, numeric, "
"underscores, hyphens and dots allowed"
)
# 14: Run conda-forge specific lints
if conda_forge:
run_conda_forge_specific(meta, recipe_dir, lints, hints)
# 15: Check if we are using legacy patterns
build_reqs = requirements_section.get("build", None)
if build_reqs and ("numpy x.x" in build_reqs):
lints.append(
"Using pinned numpy packages is a deprecated pattern. Consider "
"using the method outlined "
"[here](https://conda-forge.org/docs/maintainer/knowledge_base.html#linking-numpy)."
)
# 16: Subheaders should be in the allowed subheadings
for section in major_sections:
expected_subsections = FIELDS.get(section, [])
if not expected_subsections:
continue
for subsection in get_section(meta, section, lints):
if (
section != "source"
and section != "outputs"
and subsection not in expected_subsections
):
lints.append(
"The {} section contained an unexpected "
"subsection name. {} is not a valid subsection"
" name.".format(section, subsection)
)
elif section == "source" or section == "outputs":
for source_subsection in subsection:
if source_subsection not in expected_subsections:
lints.append(
"The {} section contained an unexpected "
"subsection name. {} is not a valid subsection"
" name.".format(section, source_subsection)
)
# 17: noarch doesn't work with selectors for runtime dependencies
if build_section.get("noarch") is not None and os.path.exists(meta_fname):
with io.open(meta_fname, "rt") as fh:
in_runreqs = False
for line in fh:
line_s = line.strip()
if line_s == "host:" or line_s == "run:":
in_runreqs = True
runreqs_spacing = line[: -len(line.lstrip())]
continue
if line_s.startswith("skip:") and is_selector_line(line):
lints.append(
"`noarch` packages can't have selectors. If "
"the selectors are necessary, please remove "
"`noarch: {}`.".format(build_section["noarch"])
)
break
if in_runreqs:
if runreqs_spacing == line[: -len(line.lstrip())]:
in_runreqs = False
continue
if is_selector_line(line):
lints.append(
"`noarch` packages can't have selectors. If "
"the selectors are necessary, please remove "
"`noarch: {}`.".format(build_section["noarch"])
)
break
# 19: check version
if package_section.get("version") is not None:
ver = str(package_section.get("version"))
try:
conda_build.conda_interface.VersionOrder(ver)
except:
lints.append(
"Package version {} doesn't match conda spec".format(ver)
)
# 20: Jinja2 variable definitions should be nice.
if recipe_dir is not None and os.path.exists(meta_fname):
bad_jinja = []
bad_lines = []
# Good Jinja2 variable definitions look like "{% set .+ = .+ %}"
good_jinja_pat = re.compile(r"\s*\{%\s(set)\s[^\s]+\s=\s[^\s]+\s%\}")
with io.open(meta_fname, "rt") as fh:
for jinja_line, line_number in jinja_lines(fh):
if not good_jinja_pat.match(jinja_line):
bad_jinja.append(jinja_line)
bad_lines.append(line_number)
if bad_jinja:
lints.append(
"Jinja2 variable definitions are suggested to "
"take a ``{{%<one space>set<one space>"
"<variable name><one space>=<one space>"
"<expression><one space>%}}`` form. See lines "
"{}".format(bad_lines)
)
# 21: Legacy usage of compilers
if build_reqs and ("toolchain" in build_reqs):
lints.append(
"Using toolchain directly in this manner is deprecated. Consider "
"using the compilers outlined "
"[here](https://conda-forge.org/docs/maintainer/knowledge_base.html#compilers)."
)
# 22: Single space in pinned requirements
for section, requirements in requirements_section.items():
for requirement in requirements or []:
req, _, _ = requirement.partition("#")
if "{{" in req:
continue
parts = req.split()
if len(parts) > 2 and parts[1] in [
"!=",
"=",
"==",
">",
"<",
"<=",
">=",
]:
# check for too many spaces
lints.append(
(
"``requirements: {section}: {requirement}`` should not "
"contain a space between relational operator and the version, i.e. "
"``{name} {pin}``"
).format(
section=section,
requirement=requirement,
name=parts[0],
pin="".join(parts[1:]),
)
)
continue
# check that there is a space if there is a pin
bad_char_idx = [(parts[0].find(c), c) for c in "><="]
bad_char_idx = [bci for bci in bad_char_idx if bci[0] >= 0]
if bad_char_idx:
bad_char_idx.sort()
i = bad_char_idx[0][0]
lints.append(
(
"``requirements: {section}: {requirement}`` must "
"contain a space between the name and the pin, i.e. "
"``{name} {pin}``"
).format(
section=section,
requirement=requirement,
name=parts[0][:i],
pin=parts[0][i:] + "".join(parts[1:]),
)
)
continue
# 23: non noarch builds shouldn't use version constraints on python and r-base
check_languages = ["python", "r-base"]
host_reqs = requirements_section.get("host") or []
run_reqs = requirements_section.get("run") or []
for language in check_languages:
if build_section.get("noarch") is None and not outputs_section:
filtered_host_reqs = [
req
for req in host_reqs
if req.partition(" ")[0] == str(language)
]
filtered_run_reqs = [
req
for req in run_reqs
if req.partition(" ")[0] == str(language)
]
if filtered_host_reqs and not filtered_run_reqs:
lints.append(
"If {0} is a host requirement, it should be a run requirement.".format(
str(language)
)
)
for reqs in [filtered_host_reqs, filtered_run_reqs]:
if str(language) in reqs:
continue
for req in reqs:
constraint = req.split(" ", 1)[1]
if constraint.startswith(">") or constraint.startswith(
"<"
):
lints.append(
"Non noarch packages should have {0} requirement without any version constraints.".format(
str(language)
)
)
# 24: jinja2 variable references should be {{<one space>var<one space>}}
if recipe_dir is not None and os.path.exists(meta_fname):
bad_vars = []
bad_lines = []
with io.open(meta_fname, "rt") as fh:
for i, line in enumerate(fh.readlines()):
for m in JINJA_VAR_PAT.finditer(line):
if m.group(1) is not None:
var = m.group(1)
if var != " %s " % var.strip():
bad_vars.append(m.group(1).strip())
bad_lines.append(i + 1)
if bad_vars:
hints.append(
"Jinja2 variable references are suggested to "
"take a ``{{<one space><variable name><one space>}}``"
" form. See lines %s." % (bad_lines,)
)
# 25: require a lower bound on python version
if build_section.get("noarch") == "python" and not outputs_section:
for req in run_reqs:
if (req.strip().split()[0] == "python") and (req != "python"):
break
else:
lints.append(
"noarch: python recipes are required to have a lower bound "
"on the python version. Typically this means putting "
"`python >=3.6` in **both** `host` and `run` but you should check "
"upstream for the package's Python compatibility."
)
# hints
# 1: suggest pip
if "script" in build_section:
scripts = build_section["script"]
if isinstance(scripts, str):
scripts = [scripts]
for script in scripts:
if "python setup.py install" in script:
hints.append(
"Whenever possible python packages should use pip. "
"See https://conda-forge.org/docs/maintainer/adding_pkgs.html#use-pip"
)
# 2: suggest python noarch (skip on feedstocks)
if (
build_section.get("noarch") is None
and build_reqs
and not any(["_compiler_stub" in b for b in build_reqs])
and ("pip" in build_reqs)
and (is_staged_recipes or not conda_forge)
):
with io.open(meta_fname, "rt") as fh:
in_runreqs = False
no_arch_possible = True
for line in fh:
line_s = line.strip()
if line_s == "host:" or line_s == "run:":
in_runreqs = True
runreqs_spacing = line[: -len(line.lstrip())]
continue
if line_s.startswith("skip:") and is_selector_line(line):
no_arch_possible = False
break
if in_runreqs:
if runreqs_spacing == line[: -len(line.lstrip())]:
in_runreqs = False
continue
if is_selector_line(line):
no_arch_possible = False
break
if no_arch_possible:
hints.append(
"Whenever possible python packages should use noarch. "
"See https://conda-forge.org/docs/maintainer/knowledge_base.html#noarch-builds"
)
# 3: suggest fixing all recipe/*.sh shellcheck findings
shellcheck_enabled = False
shell_scripts = []
if recipe_dir:
shell_scripts = glob(os.path.join(recipe_dir, "*.sh"))
# support feedstocks and staged-recipes
forge_yaml = glob(
os.path.join(recipe_dir, "..", "conda-forge.yml")
) or glob(
os.path.join(recipe_dir, "..", "..", "conda-forge.yml"),
)
if shell_scripts and forge_yaml:
with open(forge_yaml[0], "r") as fh:
code = get_yaml().load(fh)
shellcheck_enabled = code.get("shellcheck", {}).get(
"enabled", shellcheck_enabled
)
if shellcheck_enabled and shutil.which("shellcheck") and shell_scripts:
MAX_SHELLCHECK_LINES = 50
cmd = [
"shellcheck",
"--enable=all",
"--shell=bash",
# SC2154: var is referenced but not assigned,
# see https://github.com/koalaman/shellcheck/wiki/SC2154
"--exclude=SC2154",
]
p = subprocess.Popen(
cmd + shell_scripts,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env={
"PATH": os.getenv("PATH")
}, # exclude other env variables to protect against token leakage
)
sc_stdout, _ = p.communicate()
if p.returncode == 1:
# All files successfully scanned with some issues.
findings = (
sc_stdout.decode(sys.stdout.encoding)
.replace("\r\n", "\n")
.splitlines()
)
hints.append(
"Whenever possible fix all shellcheck findings ('"
+ " ".join(cmd)
+ " recipe/*.sh -f diff | git apply' helps)"
)
hints.extend(findings[:50])
if len(findings) > MAX_SHELLCHECK_LINES:
hints.append(
"Output restricted, there are '%s' more lines."
% (len(findings) - MAX_SHELLCHECK_LINES)
)
elif p.returncode != 0:
# Something went wrong.
hints.append(
"There have been errors while scanning with shellcheck."
)
# 4: Check for SPDX
import license_expression
license = about_section.get("license", "")
licensing = license_expression.Licensing()
parsed_exceptions = []
try:
parsed_licenses = []
parsed_licenses_with_exception = licensing.license_symbols(
license.strip(), decompose=False
)
for l in parsed_licenses_with_exception:
if isinstance(l, license_expression.LicenseWithExceptionSymbol):
parsed_licenses.append(l.license_symbol.key)
parsed_exceptions.append(l.exception_symbol.key)
else:
parsed_licenses.append(l.key)
except license_expression.ExpressionError:
parsed_licenses = [license]
licenseref_regex = re.compile("^LicenseRef[a-zA-Z0-9\-.]*$")
filtered_licenses = []
for license in parsed_licenses:
if not licenseref_regex.match(license):
filtered_licenses.append(license)
with open(
os.path.join(os.path.dirname(__file__), "licenses.txt"), "r"
) as f:
expected_licenses = f.readlines()
expected_licenses = set([l.strip() for l in expected_licenses])
with open(
os.path.join(os.path.dirname(__file__), "license_exceptions.txt"), "r"
) as f:
expected_exceptions = f.readlines()
expected_exceptions = set([l.strip() for l in expected_exceptions])
if set(filtered_licenses) - expected_licenses:
hints.append(
"License is not an SPDX identifier (or a custom LicenseRef) nor an SPDX license expression.\n\n"
"Documentation on acceptable licenses can be found "
"[here]( https://conda-forge.org/docs/maintainer/adding_pkgs.html#spdx-identifiers-and-expressions )."
)
if set(parsed_exceptions) - expected_exceptions:
hints.append(
"License exception is not an SPDX exception.\n\n"
"Documentation on acceptable licenses can be found "
"[here]( https://conda-forge.org/docs/maintainer/adding_pkgs.html#spdx-identifiers-and-expressions )."
)
return lints, hints
def run_conda_forge_specific(meta, recipe_dir, lints, hints):
gh = github.Github(os.environ["GH_TOKEN"])
package_section = get_section(meta, "package", lints)
extra_section = get_section(meta, "extra", lints)
recipe_dirname = os.path.basename(recipe_dir) if recipe_dir else "recipe"
recipe_name = package_section.get("name", "").strip()
is_staged_recipes = recipe_dirname != "recipe"
# 1: Check that the recipe does not exist in conda-forge or bioconda
if is_staged_recipes and recipe_name:
cf = gh.get_user(os.getenv("GH_ORG", "conda-forge"))
try:
for name in set(
[
recipe_name,
recipe_name.replace("-", "_"),
recipe_name.replace("_", "-"),
]
):
if cf.get_repo("{}-feedstock".format(name)):
existing_recipe_name = name
feedstock_exists = True
break
else:
feedstock_exists = False
except github.UnknownObjectException as e:
feedstock_exists = False
if feedstock_exists and existing_recipe_name == recipe_name:
lints.append("Feedstock with the same name exists in conda-forge.")
elif feedstock_exists:
hints.append(
"Feedstock with the name {} exists in conda-forge. Is it the same as this package ({})?".format(
existing_recipe_name,
recipe_name,
)
)
bio = gh.get_user("bioconda").get_repo("bioconda-recipes")
try:
bio.get_dir_contents("recipes/{}".format(recipe_name))
except github.UnknownObjectException as e:
pass
else:
hints.append(
"Recipe with the same name exists in bioconda: "
"please discuss with @conda-forge/bioconda-recipes."
)
# 2: Check that the recipe maintainers exists:
maintainers = extra_section.get("recipe-maintainers", [])
for maintainer in maintainers:
if "/" in maintainer:
# It's a team. Checking for existence is expensive. Skip for now
continue
try:
gh.get_user(maintainer)
except github.UnknownObjectException as e:
lints.append(
'Recipe maintainer "{}" does not exist'.format(maintainer)
)
# 3: if the recipe dir is inside the example dir
if recipe_dir is not None and "recipes/example/" in recipe_dir:
lints.append(
"Please move the recipe out of the example dir and "
"into its own dir."
)
# 4: Do not delete example recipe
if is_staged_recipes and recipe_dir is not None:
example_meta_fname = os.path.abspath(
os.path.join(recipe_dir, "..", "example", "meta.yaml")
)
if not os.path.exists(example_meta_fname):
msg = (
"Please do not delete the example recipe found in "
"`recipes/example/meta.yaml`."
)
if msg not in lints:
lints.append(msg)
def is_selector_line(line):
# Using the same pattern defined in conda-build (metadata.py),
# we identify selectors.
line = line.rstrip()
if line.lstrip().startswith("#"):
# Don't bother with comment only lines
return False
m = sel_pat.match(line)
if m:
m.group(3)
return True
return False
def is_jinja_line(line):
line = line.rstrip()
m = jinja_pat.match(line)
if m:
return True
return False
def selector_lines(lines):
for i, line in enumerate(lines):
if is_selector_line(line):
yield line, i
def jinja_lines(lines):
for i, line in enumerate(lines):
if is_jinja_line(line):
yield line, i
def main(recipe_dir, conda_forge=False, return_hints=False):
recipe_dir = os.path.abspath(recipe_dir)
recipe_meta = os.path.join(recipe_dir, "meta.yaml")
if not os.path.exists(recipe_dir):
raise IOError("Feedstock has no recipe/meta.yaml.")
with io.open(recipe_meta, "rt") as fh:
content = render_meta_yaml("".join(fh))
meta = get_yaml().load(content)
results, hints = lintify(meta, recipe_dir, conda_forge)
if return_hints:
return results, hints
else:
return results
| []
| []
| [
"GH_ORG",
"GH_TOKEN",
"PATH"
]
| [] | ["GH_ORG", "GH_TOKEN", "PATH"] | python | 3 | 0 | |
EncryptionAdapterTestUtilities/conanfile.py | import os
from conans import ConanFile, tools, CMake
class EncryptionAdapterTestUtilitiesConan(ConanFile):
name = "EncryptionAdapterTestUtilities"
description = "Test utilities for library-agnostic API for C++ to encrypt data"
url = "https://github.com/systelab/cpp-encryption-adapter"
homepage = "https://github.com/systelab/cpp-encryption-adapter"
author = "CSW <[email protected]>"
topics = ("conan", "encryption", "adapter", "wrapper", "test", "gtest")
license = "MIT"
generators = "cmake_find_package"
settings = "os", "compiler", "build_type", "arch"
options = {"gtest": ["1.7.0", "1.8.1", "1.10.0"]}
default_options = "gtest=1.10.0"
exports_sources = "*"
def requirements(self):
if self.options.gtest == "1.7.0":
self.requires("gtest/1.7.0@systelab/stable")
elif self.options.gtest == "1.8.1":
self.requires("gtest/1.8.1")
elif self.options.gtest == "1.10.0":
self.requires("gtest/1.10.0#0c895f60b461f8fee0da53a84d659131")
else:
self.requires(f"gtest/{self.options.gtest}")
self.requires("TestUtilitiesInterface/1.0.8@systelab/stable")
if ("%s" % self.version) == "None":
channel = os.environ['CHANNEL'] if "CHANNEL" in os.environ else "stable"
self.requires(f"EncryptionAdapterInterface/{os.environ['VERSION']}@systelab/{channel}")
else:
self.requires(f"EncryptionAdapterInterface/{self.version}@systelab/{self.channel}")
def build(self):
cmake = CMake(self)
cmake.configure(source_folder=".")
cmake.build()
def package(self):
self.copy("*.h", dst="include/EncryptionAdapterTestUtilities", keep_path=True)
self.copy("*EncryptionAdapterTestUtilities.lib", dst="lib", keep_path=False)
self.copy("*EncryptionAdapterTestUtilities.pdb", dst="lib", keep_path=False)
self.copy("*EncryptionAdapterTestUtilities.a", dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
| []
| []
| [
"VERSION",
"CHANNEL"
]
| [] | ["VERSION", "CHANNEL"] | python | 2 | 0 | |
owner/src/test/java/org/aeonbits/owner/importedprops/SystemPropertiesAndEnvTest.java | /*
* Copyright (c) 2013, Luigi R. Viggiano
* All rights reserved.
*
* This software is distributable under the BSD license.
* See the terms of the BSD license in the documentation provided with this software.
*/
package org.aeonbits.owner.importedprops;
import org.aeonbits.owner.Config;
import org.aeonbits.owner.ConfigFactory;
import org.junit.Test;
import java.io.File;
import java.io.PrintStream;
import static org.junit.Assert.assertEquals;
/**
* @author Luigi R. Viggiano
*/
public class SystemPropertiesAndEnvTest {
interface SystemEnvProperties extends Config {
@Key("file.separator")
String fileSeparator();
@Key("java.home")
String javaHome();
@Key("HOME")
String home();
@Key("USER")
String user();
void list(PrintStream out);
}
@Test
public void testSystemEnvProperties() {
SystemEnvProperties cfg = ConfigFactory.create(SystemEnvProperties
.class, System.getProperties(), System.getenv());
assertEquals(File.separator, cfg.fileSeparator());
assertEquals(System.getProperty("java.home"), cfg.javaHome());
assertEquals(System.getenv().get("HOME"), cfg.home());
assertEquals(System.getenv().get("USER"), cfg.user());
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
main.go | package main
import (
"fmt"
"log"
"net/http"
"os"
"strconv"
httpbin "github.com/ahmetb/go-httpbin"
)
const (
defaultPort = 8080
)
func main() {
port, err := strconv.Atoi(os.Getenv("PORT"))
if err != nil {
port = defaultPort
}
router := httpbin.GetMux()
hostStr := fmt.Sprintf(":%d", port)
log.Printf("kubehttpbin listening on %s", hostStr)
if err := http.ListenAndServe(hostStr, router); err != nil {
log.Fatalf("server failed (%s)", err)
}
}
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
test_driven_development_with_python/superlists/manage.py | #!/usr/bin/env python3
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "superlists.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
old/fundamentals/variables.go | package main
import (
"fmt"
"os"
)
func main() {
name := os.Getenv("USERNAME")
tvshow := "Poirot"
fmt.Println("\nHi", name, ", you're currenty watching", tvshow)
changeTVShow(&tvshow)
fmt.Println("\nYou've changed the TV channel, and you're now watching", tvshow, ".")
}
func changeTVShow(_tvshow *string) string {
*_tvshow = "Narcos"
fmt.Println("Trying to switch TV channel to", *_tvshow)
return *_tvshow
} | [
"\"USERNAME\""
]
| []
| [
"USERNAME"
]
| [] | ["USERNAME"] | go | 1 | 0 | |
ActivityTracker/wsgi.py | """
WSGI config for ActivityTracker project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ActivityTracker.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
test/lib/test_gateway/test_gateway_process.go | // Copyright (c) 2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package testgateway
import (
"bufio"
"encoding/json"
"io"
"os"
"os/exec"
"regexp"
"strconv"
"strings"
"github.com/pkg/errors"
)
var realHTTPAddrRegex = regexp.MustCompile(
`"realHTTPAddr":"([0-9\.\:]+)"`,
)
var realTChannelAddrRegex = regexp.MustCompile(
`"realTChannelAddr":"([0-9\.\:]+)"`,
)
var infoIgnoreList = map[string]bool{
"Outbound connection is active.": true,
"Channel.Close called.": true,
"Connection.Close called.": true,
"Connection state updated in Close.": true,
"Connection state updated during shutdown.": true,
"Removed peer from root peer list.": true,
"Inbound connection is active.": true,
"Channel closed.": true,
}
// MalformedStdoutError is used when the child process has unexpected stdout
type MalformedStdoutError struct {
Type string
StdoutLine string
Message string
}
func (err *MalformedStdoutError) Error() string {
return err.Message
}
func (gateway *ChildProcessGateway) createAndSpawnChild(
mainFile string,
defaultConfigFiles []string,
testConfigOverrides map[string]interface{},
) error {
info, err := createTestBinaryFile(mainFile)
if err != nil {
return errors.Wrap(err, "Could not create test binary file: ")
}
gateway.binaryFileInfo = info
args := []string{
gateway.binaryFileInfo.BinaryFile,
}
if os.Getenv("COVER_ON") == "1" {
args = append(args,
"-test.coverprofile", info.CoverProfileFile,
)
}
tempConfigFile, err := writeConfigToFile(testConfigOverrides)
if err != nil {
gateway.Close()
return errors.Wrap(err, "Could not exec test command")
}
configFiles := append(defaultConfigFiles, tempConfigFile)
args = append(args, "-config", strings.Join(configFiles, ";"))
gateway.cmd = exec.Command(args[0], args[1:]...)
gateway.cmd.Env = append(
[]string{
"GATEWAY_RUN_CHILD_PROCESS_TEST=1",
},
os.Environ()...,
)
gateway.cmd.Stderr = os.Stderr
cmdStdout, err := gateway.cmd.StdoutPipe()
if err != nil {
gateway.Close()
return errors.Wrap(err, "Could not create stdout pipe")
}
err = gateway.cmd.Start()
if err != nil {
gateway.Close()
return errors.Wrap(err, "Could not start test gateway")
}
reader := bufio.NewReader(cmdStdout)
err = readAddrFromStdout(gateway, reader)
if err != nil {
gateway.Close()
return errors.Wrap(err, "could not read addr from stdout")
}
go gateway.copyToStdout(reader)
return nil
}
func addJSONLine(gateway *ChildProcessGateway, line string) {
gateway.jsonLines = append(gateway.jsonLines, line)
lineStruct := map[string]interface{}{}
jsonErr := json.Unmarshal([]byte(line), &lineStruct)
if jsonErr != nil {
// do not decode msg
return
}
msg := lineStruct["msg"].(string)
msgLogs := gateway.logMessages[msg]
if msgLogs == nil {
msgLogs = []LogMessage{lineStruct}
} else {
msgLogs = append(msgLogs, lineStruct)
}
gateway.logMessages[msg] = msgLogs
}
func readAddrFromStdout(testGateway *ChildProcessGateway, reader *bufio.Reader) error {
var msg string
var httpFound, tchannelFound []string
for {
line, err := reader.ReadString('\n')
if err == io.EOF {
break
}
if line[0] == '{' {
addJSONLine(testGateway, line)
msg += line
}
printJSONLine(line)
if httpFound == nil {
httpFound = realHTTPAddrRegex.FindStringSubmatch(line)
if httpFound != nil {
testGateway.RealHTTPAddr = httpFound[1]
indexOfSep := strings.LastIndex(testGateway.RealHTTPAddr, ":")
if indexOfSep != -1 {
host := testGateway.RealHTTPAddr[0:indexOfSep]
port := testGateway.RealHTTPAddr[indexOfSep+1:]
portNum, err := strconv.Atoi(port)
testGateway.RealHTTPHost = host
if err != nil {
testGateway.RealHTTPPort = -1
} else {
testGateway.RealHTTPPort = portNum
}
} else {
httpFound = nil
}
}
}
if tchannelFound == nil {
tchannelFound = realTChannelAddrRegex.FindStringSubmatch(line)
if tchannelFound != nil {
testGateway.RealTChannelAddr = tchannelFound[1]
indexOfSep := strings.LastIndex(testGateway.RealTChannelAddr, ":")
if indexOfSep != -1 {
host := testGateway.RealTChannelAddr[0:indexOfSep]
port := testGateway.RealTChannelAddr[indexOfSep+1:]
portNum, err := strconv.Atoi(port)
testGateway.RealTChannelHost = host
if err != nil {
testGateway.RealTChannelPort = -1
} else {
testGateway.RealTChannelPort = portNum
}
} else {
tchannelFound = nil
}
}
}
if httpFound != nil && tchannelFound != nil {
return nil
}
}
if httpFound == nil || tchannelFound == nil {
return &MalformedStdoutError{
Type: "malformed.stdout",
StdoutLine: msg,
Message: "Could not find real http/tchannel address in server stdout",
}
}
return nil
}
func (gateway *ChildProcessGateway) copyToStdout(reader *bufio.Reader) {
for {
line, err := reader.ReadString('\n')
if err != nil {
break
}
if line == "PASS\n" {
continue
} else if strings.Index(line, "coverage:") == 0 {
continue
}
if line[0] == '{' {
addJSONLine(gateway, line)
}
printJSONLine(line)
}
}
func printJSONLine(line string) {
lineStruct := map[string]interface{}{}
jsonErr := json.Unmarshal([]byte(line), &lineStruct)
if jsonErr == nil {
// Validate the whitelist if its valid json.
msg := lineStruct["msg"].(string)
// Do not print a line if its in the ignore list.
if infoIgnoreList[msg] {
return
}
}
_, err := os.Stdout.WriteString(line)
if err != nil {
// TODO: betterer...
panic(err)
}
}
| [
"\"COVER_ON\""
]
| []
| [
"COVER_ON"
]
| [] | ["COVER_ON"] | go | 1 | 0 | |
cmd/gardenlet/app/gardenlet.go | // Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package app
import (
"bufio"
"context"
"errors"
"flag"
"fmt"
"os"
"path/filepath"
"regexp"
goruntime "runtime"
"strings"
"time"
cmdutils "github.com/gardener/gardener/cmd/utils"
gardencore "github.com/gardener/gardener/pkg/apis/core"
gardencorev1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1"
gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1"
v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants"
"github.com/gardener/gardener/pkg/client/kubernetes"
"github.com/gardener/gardener/pkg/client/kubernetes/clientmap"
clientmapbuilder "github.com/gardener/gardener/pkg/client/kubernetes/clientmap/builder"
"github.com/gardener/gardener/pkg/client/kubernetes/clientmap/keys"
"github.com/gardener/gardener/pkg/features"
"github.com/gardener/gardener/pkg/gardenlet/apis/config"
configv1alpha1 "github.com/gardener/gardener/pkg/gardenlet/apis/config/v1alpha1"
configvalidation "github.com/gardener/gardener/pkg/gardenlet/apis/config/validation"
"github.com/gardener/gardener/pkg/gardenlet/bootstrap"
"github.com/gardener/gardener/pkg/gardenlet/bootstrap/certificate"
"github.com/gardener/gardener/pkg/gardenlet/controller"
gardenletfeatures "github.com/gardener/gardener/pkg/gardenlet/features"
"github.com/gardener/gardener/pkg/healthz"
"github.com/gardener/gardener/pkg/logger"
"github.com/gardener/gardener/pkg/server"
"github.com/gardener/gardener/pkg/server/routes"
"github.com/gardener/gardener/pkg/utils"
kutil "github.com/gardener/gardener/pkg/utils/kubernetes"
"github.com/gardener/gardener/pkg/utils/secrets"
"github.com/go-logr/logr"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
certificatesv1 "k8s.io/api/certificates/v1"
certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
coordinationv1 "k8s.io/api/coordination/v1"
corev1 "k8s.io/api/core/v1"
eventsv1 "k8s.io/api/events/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/util/clock"
kubernetesclientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/record"
"k8s.io/component-base/version"
"k8s.io/component-base/version/verflag"
runtimelog "sigs.k8s.io/controller-runtime/pkg/log"
)
// Options has all the context and parameters needed to run a Gardenlet.
type Options struct {
// ConfigFile is the location of the Gardenlet's configuration file.
ConfigFile string
config *config.GardenletConfiguration
scheme *runtime.Scheme
codecs serializer.CodecFactory
}
// AddFlags adds flags for a specific Gardenlet to the specified FlagSet.
func (o *Options) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&o.ConfigFile, "config", o.ConfigFile, "The path to the configuration file.")
}
// NewOptions returns a new Options object.
func NewOptions() (*Options, error) {
o := &Options{
config: new(config.GardenletConfiguration),
}
o.scheme = runtime.NewScheme()
o.codecs = serializer.NewCodecFactory(o.scheme)
if err := config.AddToScheme(o.scheme); err != nil {
return nil, err
}
if err := configv1alpha1.AddToScheme(o.scheme); err != nil {
return nil, err
}
if err := gardencore.AddToScheme(o.scheme); err != nil {
return nil, err
}
if err := gardencorev1beta1.AddToScheme(o.scheme); err != nil {
return nil, err
}
return o, nil
}
// loadConfigFromFile loads the content of file and decodes it as a
// GardenletConfiguration object.
func (o *Options) loadConfigFromFile(file string) (*config.GardenletConfiguration, error) {
data, err := os.ReadFile(file)
if err != nil {
return nil, err
}
return o.decodeConfig(data)
}
// decodeConfig decodes data as a GardenletConfiguration object.
func (o *Options) decodeConfig(data []byte) (*config.GardenletConfiguration, error) {
gardenletConfig := &config.GardenletConfiguration{}
if _, _, err := o.codecs.UniversalDecoder().Decode(data, nil, gardenletConfig); err != nil {
return nil, err
}
return gardenletConfig, nil
}
func (o *Options) configFileSpecified() error {
if len(o.ConfigFile) == 0 {
return fmt.Errorf("missing Gardenlet config file")
}
return nil
}
// Validate validates all the required options.
func (o *Options) validate(args []string) error {
if len(args) != 0 {
return errors.New("arguments are not supported")
}
return nil
}
func run(ctx context.Context, o *Options) error {
c, err := o.loadConfigFromFile(o.ConfigFile)
if err != nil {
return fmt.Errorf("unable to read the configuration file: %w", err)
}
if errs := configvalidation.ValidateGardenletConfiguration(c, nil, false); len(errs) > 0 {
return fmt.Errorf("errors validating the configuration: %+v", errs)
}
o.config = c
// Add feature flags
if err := gardenletfeatures.FeatureGate.SetFromMap(o.config.FeatureGates); err != nil {
return err
}
kubernetes.UseCachedRuntimeClients = gardenletfeatures.FeatureGate.Enabled(features.CachedRuntimeClients)
if gardenletfeatures.FeatureGate.Enabled(features.ReversedVPN) && !gardenletfeatures.FeatureGate.Enabled(features.APIServerSNI) {
return fmt.Errorf("inconsistent feature gate: APIServerSNI is required for ReversedVPN (APIServerSNI: %t, ReversedVPN: %t)",
gardenletfeatures.FeatureGate.Enabled(features.APIServerSNI), gardenletfeatures.FeatureGate.Enabled(features.ReversedVPN))
}
gardenlet, err := NewGardenlet(ctx, o.config)
if err != nil {
return err
}
return gardenlet.Run(ctx)
}
// NewCommandStartGardenlet creates a *cobra.Command object with default parameters
func NewCommandStartGardenlet() *cobra.Command {
opts, err := NewOptions()
if err != nil {
panic(err)
}
cmd := &cobra.Command{
Use: "gardenlet",
Short: "Launch the Gardenlet",
Long: `In essence, the Gardener is an extension API server along with a bundle
of Kubernetes controllers which introduce new API objects in an existing Kubernetes
cluster (which is called Garden cluster) in order to use them for the management of
further Kubernetes clusters (which are called Shoot clusters).
To do that reliably and to offer a certain quality of service, it requires to control
the main components of a Kubernetes cluster (etcd, API server, controller manager, scheduler).
These so-called control plane components are hosted in Kubernetes clusters themselves
(which are called Seed clusters).`,
RunE: func(cmd *cobra.Command, args []string) error {
verflag.PrintAndExitIfRequested()
if err := opts.configFileSpecified(); err != nil {
return err
}
if err := opts.validate(args); err != nil {
return err
}
return run(cmd.Context(), opts)
},
SilenceUsage: true,
}
flags := cmd.Flags()
verflag.AddFlags(flags)
opts.AddFlags(flags)
return cmd
}
// Gardenlet represents all the parameters required to start the
// Gardenlet.
type Gardenlet struct {
Config *config.GardenletConfiguration
Identity *gardencorev1beta1.Gardener
GardenClusterIdentity string
ClientMap clientmap.ClientMap
Log logr.Logger
Recorder record.EventRecorder
LeaderElection *leaderelection.LeaderElectionConfig
HealthManager healthz.Manager
CertificateManager *certificate.Manager
ClientCertificateExpirationTimestamp *metav1.Time
}
// NewGardenlet is the main entry point of instantiating a new Gardenlet.
func NewGardenlet(ctx context.Context, cfg *config.GardenletConfiguration) (*Gardenlet, error) {
if cfg == nil {
return nil, errors.New("config is required")
}
// Initialize logrus and zap logger (for the migration period, we will use both in parallel)
logrusLogger := logger.NewLogger(*cfg.LogLevel, *cfg.LogFormat)
log, err := logger.NewZapLogger(*cfg.LogLevel, *cfg.LogFormat)
if err != nil {
return nil, fmt.Errorf("error instantiating zap logger: %w", err)
}
// set the logger used by sigs.k8s.io/controller-runtime
runtimelog.SetLogger(log)
log.Info("Starting gardenlet", "version", version.Get())
log.Info("Feature Gates", "featureGates", gardenletfeatures.FeatureGate.String())
if flag := flag.Lookup("v"); flag != nil {
if err := flag.Value.Set(fmt.Sprintf("%d", cfg.KubernetesLogLevel)); err != nil {
return nil, err
}
}
// Prepare a Kubernetes client object for the Garden cluster which contains all the Clientsets
// that can be used to access the Kubernetes API.
if kubeconfig := os.Getenv("GARDEN_KUBECONFIG"); kubeconfig != "" {
cfg.GardenClientConnection.Kubeconfig = kubeconfig
}
if kubeconfig := os.Getenv("KUBECONFIG"); kubeconfig != "" {
cfg.SeedClientConnection.Kubeconfig = kubeconfig
}
var (
clientCertificateExpirationTimestamp *metav1.Time
kubeconfigFromBootstrap []byte
csrName string
seedName string
)
// constructs a seed client for `SeedClientConnection.kubeconfig` or if not set,
// creates a seed client based on the service account token mounted into the gardenlet container running in Kubernetes
// when running outside of Kubernetes, `SeedClientConnection.kubeconfig` has to be set either directly or via the environment variable "KUBECONFIG"
seedClient, err := kubernetes.NewClientFromFile(
"",
cfg.SeedClientConnection.ClientConnectionConfiguration.Kubeconfig,
kubernetes.WithClientConnectionOptions(cfg.SeedClientConnection.ClientConnectionConfiguration),
kubernetes.WithDisabledCachedClient(),
)
if err != nil {
return nil, err
}
if cfg.GardenClientConnection.KubeconfigSecret != nil {
kubeconfigFromBootstrap, csrName, seedName, err = bootstrapKubeconfig(ctx, logrusLogger, seedClient.Client(), cfg)
if err != nil {
return nil, err
}
} else {
log.Info("No kubeconfig secret given in the configuration under `.gardenClientConnection.kubeconfigSecret`. Skipping the kubeconfig bootstrap process and certificate rotation")
}
if kubeconfigFromBootstrap == nil {
log.Info("Falling back to the kubeconfig specified in the configuration under `.gardenClientConnection.kubeconfig`")
if len(cfg.GardenClientConnection.Kubeconfig) == 0 {
return nil, fmt.Errorf("the configuration file needs to either specify a Garden API Server kubeconfig under `.gardenClientConnection.kubeconfig` or provide bootstrapping information. " +
"To configure the Gardenlet for bootstrapping, provide the secret containing the bootstrap kubeconfig under `.gardenClientConnection.kubeconfigSecret` and also the secret name where the created kubeconfig should be stored for further use via`.gardenClientConnection.kubeconfigSecret`")
}
} else {
gardenClientCertificate, err := certificate.GetCurrentCertificate(logrusLogger, kubeconfigFromBootstrap, cfg.GardenClientConnection)
if err != nil {
return nil, err
}
clientCertificateExpirationTimestamp = &metav1.Time{Time: gardenClientCertificate.Leaf.NotAfter}
log.Info("The client certificate used to communicate with the garden cluster has expiration date", "expirationDate", gardenClientCertificate.Leaf.NotAfter)
}
restCfg, err := kubernetes.RESTConfigFromClientConnectionConfiguration(&cfg.GardenClientConnection.ClientConnectionConfiguration, kubeconfigFromBootstrap)
if err != nil {
return nil, err
}
gardenClientMapBuilder := clientmapbuilder.NewGardenClientMapBuilder().
WithRESTConfig(restCfg).
// gardenlet does not have the required RBAC permissions for listing/watching the following resources, so let's prevent any
// attempts to cache them
WithUncached(
&gardencorev1alpha1.ExposureClass{},
&gardencorev1alpha1.ShootState{},
&gardencorev1beta1.CloudProfile{},
&gardencorev1beta1.ControllerDeployment{},
&gardencorev1beta1.Project{},
&gardencorev1beta1.SecretBinding{},
&certificatesv1.CertificateSigningRequest{},
&certificatesv1beta1.CertificateSigningRequest{},
&coordinationv1.Lease{},
&corev1.Namespace{},
&corev1.ConfigMap{},
&corev1.Event{},
&eventsv1.Event{},
).
ForSeed(cfg.SeedConfig.Name)
seedClientMapBuilder := clientmapbuilder.NewSeedClientMapBuilder().
WithClientConnectionConfig(&cfg.SeedClientConnection.ClientConnectionConfiguration)
shootClientMapBuilder := clientmapbuilder.NewShootClientMapBuilder().
WithClientConnectionConfig(&cfg.ShootClientConnection.ClientConnectionConfiguration)
clientMap, err := clientmapbuilder.NewDelegatingClientMapBuilder().
WithGardenClientMapBuilder(gardenClientMapBuilder).
WithSeedClientMapBuilder(seedClientMapBuilder).
WithShootClientMapBuilder(shootClientMapBuilder).
Build()
if err != nil {
return nil, fmt.Errorf("failed to build ClientMap: %w", err)
}
k8sGardenClient, err := clientMap.GetClient(ctx, keys.ForGarden())
if err != nil {
return nil, fmt.Errorf("failed to get garden client: %w", err)
}
// Delete bootstrap auth data if certificate was newly acquired
if len(csrName) > 0 && len(seedName) > 0 {
log.Info("Deleting bootstrap authentication data used to request a certificate")
if err := bootstrap.DeleteBootstrapAuth(ctx, k8sGardenClient.Client(), k8sGardenClient.Client(), csrName, seedName); err != nil {
return nil, err
}
}
// Set up leader election if enabled and prepare event recorder.
var (
leaderElectionConfig *leaderelection.LeaderElectionConfig
recorder = cmdutils.CreateRecorder(k8sGardenClient.Kubernetes(), "gardenlet")
)
if cfg.LeaderElection.LeaderElect {
seedRestCfg, err := kubernetes.RESTConfigFromClientConnectionConfiguration(&cfg.SeedClientConnection.ClientConnectionConfiguration, nil)
if err != nil {
return nil, err
}
k8sSeedClientLeaderElection, err := kubernetesclientset.NewForConfig(seedRestCfg)
if err != nil {
return nil, fmt.Errorf("failed to create client for leader election: %w", err)
}
leaderElectionConfig, err = cmdutils.MakeLeaderElectionConfig(
*cfg.LeaderElection,
k8sSeedClientLeaderElection,
cmdutils.CreateRecorder(k8sSeedClientLeaderElection, "gardenlet"),
)
if err != nil {
return nil, err
}
}
identity, err := determineGardenletIdentity()
if err != nil {
return nil, err
}
gardenClusterIdentity := &corev1.ConfigMap{}
if err := k8sGardenClient.Client().Get(ctx, kutil.Key(metav1.NamespaceSystem, v1beta1constants.ClusterIdentity), gardenClusterIdentity); err != nil {
return nil, fmt.Errorf("unable to get Gardener`s cluster-identity ConfigMap: %w", err)
}
clusterIdentity, ok := gardenClusterIdentity.Data[v1beta1constants.ClusterIdentity]
if !ok {
return nil, errors.New("unable to extract Gardener`s cluster identity from cluster-identity ConfigMap")
}
// create the certificate manager to schedule certificate rotations
var certificateManager *certificate.Manager
if cfg.GardenClientConnection.KubeconfigSecret != nil {
certificateManager = certificate.NewCertificateManager(clientMap, seedClient.Client(), cfg)
}
return &Gardenlet{
Identity: identity,
GardenClusterIdentity: clusterIdentity,
Config: cfg,
Log: log,
Recorder: recorder,
ClientMap: clientMap,
LeaderElection: leaderElectionConfig,
CertificateManager: certificateManager,
ClientCertificateExpirationTimestamp: clientCertificateExpirationTimestamp,
}, nil
}
// Run runs the Gardenlet. This should never exit.
func (g *Gardenlet) Run(ctx context.Context) error {
controllerCtx, controllerCancel := context.WithCancel(ctx)
defer controllerCancel()
// Initialize /healthz manager.
healthGracePeriod := time.Duration((*g.Config.Controllers.Seed.LeaseResyncSeconds)*(*g.Config.Controllers.Seed.LeaseResyncMissThreshold)) * time.Second
g.HealthManager = healthz.NewPeriodicHealthz(clock.RealClock{}, healthGracePeriod)
if g.CertificateManager != nil {
g.CertificateManager.ScheduleCertificateRotation(controllerCtx, controllerCancel, g.Recorder)
}
// Start HTTPS server.
if g.Config.Server.HTTPS.TLS == nil {
g.Log.Info("No TLS server certificates provided, self-generating them now")
_, _, tempDir, err := secrets.SelfGenerateTLSServerCertificate("gardenlet", []string{
"gardenlet",
fmt.Sprintf("gardenlet.%s", v1beta1constants.GardenNamespace),
fmt.Sprintf("gardenlet.%s.svc", v1beta1constants.GardenNamespace),
}, nil)
if err != nil {
return err
}
g.Config.Server.HTTPS.TLS = &config.TLSServer{
ServerCertPath: filepath.Join(tempDir, secrets.DataKeyCertificate),
ServerKeyPath: filepath.Join(tempDir, secrets.DataKeyPrivateKey),
}
g.Log.Info("TLS server certificates successfully self-generated")
}
g.startServer(ctx)
// Prepare a reusable run function.
run := func(ctx context.Context) error {
g.HealthManager.Start()
return g.startControllers(ctx)
}
leaderElectionCtx, leaderElectionCancel := context.WithCancel(context.Background())
// If leader election is enabled, run via LeaderElector until done and exit.
if g.LeaderElection != nil {
g.LeaderElection.Callbacks = leaderelection.LeaderCallbacks{
OnStartedLeading: func(_ context.Context) {
g.Log.Info("Acquired leadership, starting controllers")
if err := run(controllerCtx); err != nil {
g.Log.Error(err, "Failed to run controllers")
}
leaderElectionCancel()
},
OnStoppedLeading: func() {
g.Log.Info("Lost leadership, terminating")
controllerCancel()
},
}
leaderElector, err := leaderelection.NewLeaderElector(*g.LeaderElection)
if err != nil {
return fmt.Errorf("couldn't create leader elector: %w", err)
}
leaderElector.Run(leaderElectionCtx)
return nil
}
// Leader election is disabled, thus run directly until done.
leaderElectionCancel()
return run(controllerCtx)
}
func (g *Gardenlet) startServer(ctx context.Context) {
builder := server.
NewBuilder().
WithBindAddress(g.Config.Server.HTTPS.BindAddress).
WithPort(g.Config.Server.HTTPS.Port).
WithTLS(g.Config.Server.HTTPS.TLS.ServerCertPath, g.Config.Server.HTTPS.TLS.ServerKeyPath).
WithHandler("/metrics", promhttp.Handler()).
WithHandlerFunc("/healthz", healthz.HandlerFunc(g.HealthManager))
if g.Config.Debugging != nil && g.Config.Debugging.EnableProfiling {
routes.Profiling{}.AddToBuilder(builder)
if g.Config.Debugging.EnableContentionProfiling {
goruntime.SetBlockProfileRate(1)
}
}
go builder.Build().Start(ctx)
}
func (g *Gardenlet) startControllers(ctx context.Context) error {
return controller.NewGardenletControllerFactory(
g.ClientMap,
g.Config,
g.Identity,
g.GardenClusterIdentity,
g.Recorder,
g.HealthManager,
g.ClientCertificateExpirationTimestamp,
).Run(ctx)
}
// We want to determine the Docker container id of the currently running Gardenlet because
// we need to identify for still ongoing operations whether another Gardenlet instance is
// still operating the respective Shoots. When running locally, we generate a random string because
// there is no container id.
func determineGardenletIdentity() (*gardencorev1beta1.Gardener, error) {
var (
validID = regexp.MustCompile(`([0-9a-f]{64})`)
gardenletID string
gardenletName string
err error
)
gardenletName, err = os.Hostname()
if err != nil {
return nil, fmt.Errorf("unable to get hostname: %w", err)
}
// If running inside a Kubernetes cluster (as container) we can read the container id from the proc file system.
// Otherwise generate a random string for the gardenletID
if cGroupFile, err := os.Open("/proc/self/cgroup"); err == nil {
defer cGroupFile.Close()
reader := bufio.NewReader(cGroupFile)
var cgroupV1 string
for {
line, err := reader.ReadString('\n')
if err != nil {
break
}
// Store cgroup-v1 result for fall back
if strings.HasPrefix(line, "1:name=systemd") {
cgroupV1 = line
}
// Always prefer cgroup-v2
if strings.HasPrefix(line, "0::") {
if id := extractID(line); validID.MatchString(id) {
gardenletID = id
break
}
}
}
// Fall-back to cgroup-v1 if possible
if len(gardenletID) == 0 && len(cgroupV1) > 0 {
gardenletID = extractID(cgroupV1)
}
}
if gardenletID == "" {
gardenletID, err = utils.GenerateRandomString(64)
if err != nil {
return nil, fmt.Errorf("unable to generate gardenletID: %w", err)
}
}
return &gardencorev1beta1.Gardener{
ID: gardenletID,
Name: gardenletName,
Version: version.Get().GitVersion,
}, nil
}
func extractID(line string) string {
var (
id string
splitBySlash = strings.Split(line, "/")
)
if len(splitBySlash) == 0 {
return ""
}
id = strings.TrimSpace(splitBySlash[len(splitBySlash)-1])
id = strings.TrimSuffix(id, ".scope")
id = strings.TrimPrefix(id, "docker-")
return id
}
| [
"\"GARDEN_KUBECONFIG\"",
"\"KUBECONFIG\""
]
| []
| [
"KUBECONFIG",
"GARDEN_KUBECONFIG"
]
| [] | ["KUBECONFIG", "GARDEN_KUBECONFIG"] | go | 2 | 0 | |
FlaskServer/torrent_collection.py | from torrents.torrent import Torrent, TorStatus
from torrents import database as DB
"""
A class to contain a collection of torrents
"""
class TorrentCollection(object):
def __init__(self, tordb):
self.tordb = tordb
self.torrents = []
# Make the class iterable
def __iter__(self):
return iter(self.torrents)
# Get all torrents from the database
def refresh(self):
self.torrents = map(Torrent, DB.get_all(self.tordb))
# returns for any not started torrents
def not_started(self):
return [t for t in self.torrents if not t.is_started()]
# returns any torrent marked for deletion
def marked_delete(self):
return [t for t in self.torrents if t.marked_delete()]
def finished(self):
return [t for t in self.torrents if t.finished()]
# Update the database with the torrent objects
def update(self):
for torrent in self.torrents:
DB.update_full(self.tordb, torrent.id, torrent.serialize())
| []
| []
| []
| [] | [] | python | null | null | null |
jenkins-trigger.py | #!/usr/bin/env python3
#
# @source https://github.com/briceburg/jenkins-trigger
# @requires requests
#
# usage: jenkins-trigger.py [-h] [--user USER] [--token TOKEN] [-p [name=value] ...] [--no-wait] [-vv] job_url
#
# Trigger a Jenkins job and [optionally] bubble its result. Returns 0 on success, 1 on error, 2 on aborted.
#
import sys
import argparse
import logging
import os
import requests
import re
import time
def main(args):
"""Trigger a Jenkins Job over the REST API"""
logging.basicConfig(
format="%(asctime)s [%(levelname)s] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.DEBUG if args.verbose else logging.INFO,
)
try:
client = JenkinsJobClient(args.job_url, args.user, args.token)
queueId = client.triggerJob(args.job_params)
logging.info("successfully triggered job with queueId: %d" % queueId)
if args.no_wait:
return
start_time = time.time()
job_url = None
logging.info("polling for job completion")
while time.time() < start_time + args.timeout:
time.sleep(args.interval)
job = client.getJobByQueueId(queueId)
if not job:
logging.info("job is still in queue")
else:
status = job["result"]
if not status:
logging.info("currently executing build %s" % job["id"])
elif status == "SUCCESS":
logging.info("job completed successfully")
sys.exit(0)
elif status == "FAILURE":
logging.error("job completed in failure")
sys.exit(100)
elif status == "ABORTED":
logging.error("job was aborted")
sys.exit(101)
else:
logging.error("job returned status: %s" % status)
sys.exit(102)
logging.error("Timeout waiting for job to complete")
raise TimeoutError()
except SystemExit as e:
sys.exit(e)
except Exception as e:
logging.critical(e)
sys.exit(1)
finally:
try:
# print the job url if it's available at the end.
# this way we aren't filling logs with long URLs.
logging.info("job url: %s" % job["url"])
except:
pass
class JenkinsJobClient:
def __init__(self, job_url, user=None, password=None):
self.job_url = job_url.strip("/")
self.auth = (user, password) if user and password else None
# ensure we can accesss the job API
logging.info("determining if job is buildable")
json = self.get_json("/api/json")
if not "buildable" in json or not json["buildable"]:
raise ValueError("%s does not appear to be a buildable job" % self.job_url)
def triggerJob(self, params=[]):
logging.info("building %s" % self.job_url)
path = "/buildWithParameters" if params else "/build"
r = requests.post(self.job_url + path, data=params, auth=self.auth)
self.response(r)
# triggering a job returns the queue location
if not r.headers["Location"]:
raise Exception("failed to trigger job")
# extract the queueId and return it
match = re.search(r"/queue/item/(\d+)", r.headers["Location"])
if not match:
raise Exception("failed determining job queueId")
return int(match.group(1))
def getJobByQueueId(self, queueId: int):
"""given a queueId, fetch the related job and return its status. None if no job matches the queueId"""
logging.debug("fetching job status")
json = self.get_json("/api/json?tree=builds[url,id,result,queueId]")
if not "builds" in json:
raise Exception("failed fetching builds for %s" % self.job_url)
for build in json["builds"]:
if build["queueId"] == queueId:
return build
return None
def get_json(self, path):
r = requests.get(self.job_url + path, auth=self.auth)
self.response(r)
return r.json()
def response(self, r: requests.Response):
self.log_request(r.request)
self.log_response(r)
r.raise_for_status()
def log_request(self, req):
logging.debug(
"HTTP/1.1 {method} {url}\n{headers}\n\n{body}".format(
method=req.method,
url=req.url,
headers="\n".join(
"{}: {}".format(k, v) for k, v in req.headers.items()
),
body=req.body,
)
)
def log_response(self, res):
logging.debug(
"HTTP/1.1 {status_code}\n{headers}\n\n{body}".format(
status_code=res.status_code,
headers="\n".join(
"{}: {}".format(k, v) for k, v in res.headers.items()
),
body=res.content,
)
)
if __name__ == "__main__":
class KVArg(argparse.Action):
"""argparse action supporting Key=Value arguments"""
def __call__(self, parser, args, value, option_string=None):
try:
for k, v in [value.split("=", 1)]:
getattr(args, self.dest).append((k, v))
except Exception as e:
raise argparse.ArgumentError(
self, "Could not parse '%s'. Please use key=value format" % value
)
parser = argparse.ArgumentParser(
description="Trigger a Jenkins job and [optionally] bubble its result. Returns 0 on success, 100 on failue, 101 on aborted, 102 on other.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"job_url",
help="Job URL, e.g. 'https://jenkins.iceburg.net/job/test-folder/job/foo-job'. (Env: JOB_URL)",
nargs="?",
default=os.environ.get("JOB_URL"),
)
parser.add_argument(
"-u",
"--user",
help="User Name (Env: JOB_USER_NAME)",
default=os.environ.get("JOB_USER_NAME"),
)
parser.add_argument(
"-p",
"--token",
"--password",
help="User Token or Password (Env: JOB_USER_TOKEN)",
)
parser.add_argument(
"--param",
dest="job_params",
nargs="?",
action=KVArg,
metavar="name=value",
help="Job Parameters, e.g. 'color=purple'. stackable.",
default=[],
)
parser.add_argument(
"--no-wait",
help="Return immediately and do not bubble job completion status.",
action="store_true",
)
parser.add_argument(
"--timeout",
help="Time in seconds to wait for job to complete",
default=1800,
type=int,
)
parser.add_argument(
"--interval", help="Poll interval in seconds", default=10, type=int
)
parser.add_argument(
"-vv", "--verbose", help="enables debug output", action="store_true"
)
args = parser.parse_args()
if not args.job_url:
sys.exit(parser.print_help())
if not args.token:
args.token = os.environ.get("JOB_USER_TOKEN")
main(args)
| []
| []
| [
"JOB_USER_TOKEN",
"JOB_URL",
"JOB_USER_NAME"
]
| [] | ["JOB_USER_TOKEN", "JOB_URL", "JOB_USER_NAME"] | python | 3 | 0 | |
pkg/backend/httpstate/context.go | // Copyright 2016-2018, Pulumi Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package httpstate
import (
"path/filepath"
"strings"
"github.com/pkg/errors"
"github.com/pulumi/pulumi/pkg/util/fsutil"
"github.com/pulumi/pulumi/pkg/workspace"
)
// getContextAndMain computes the root path of the archive as well as the relative path (from the archive root)
// to the main function. In the case where there is no custom archive root, things are simple, the archive root
// is the root of the project, and main can remain unchanged. When an context is set, however, we need to do some
// work:
//
// 1. We need to ensure the archive root is "above" the project root.
// 2. We need to change "main" which was relative to the project root to be relative to the archive root.
//
// Note that the relative paths in Pulumi.yaml for Context and Main are always unix style paths, but the returned
// context is an absolute path, using file system specific seperators. We continue use a unix style partial path for
// Main.
func getContextAndMain(proj *workspace.Project, projectRoot string) (string, string, error) {
context, err := filepath.Abs(projectRoot)
if err != nil {
return "", "", err
}
main := proj.Main
if proj.Context != "" {
context, err = filepath.Abs(filepath.Join(context,
strings.Replace(proj.Context, "/", string(filepath.Separator), -1)))
if err != nil {
return "", "", err
}
if !strings.HasPrefix(projectRoot, context) {
return "", "", errors.Errorf("Context directory '%v' is not a parent of '%v'", context, projectRoot)
}
// Walk up to the archive root, starting from the project root, recording the directories we see,
// we'll combine these with the existing main value to get a main relative to the root of the archive
// which is what the pulumi-service expects. We use fsutil.WalkUp here, so we have to provide a dummy
// function which ignores every file we visit.
ignoreFileVisitFunc := func(string) bool {
// return false so fsutil.Walk does not stop early
return false
}
prefix := ""
_, err := fsutil.WalkUp(projectRoot, ignoreFileVisitFunc, func(p string) bool {
if p != context {
prefix = filepath.Base(p) + "/" + prefix
return true
}
return false
})
if err != nil {
return "", "", err
}
main = prefix + main
}
return context, main, nil
}
| []
| []
| []
| [] | [] | go | null | null | null |
runtests.py | #!/usr/bin/env python
import sys
import os
import shutil
import warnings
from django.core.management import execute_from_command_line
from wagtail.tests.settings import STATIC_ROOT, MEDIA_ROOT
os.environ['DJANGO_SETTINGS_MODULE'] = 'wagtail.tests.settings'
def runtests():
# Don't ignore DeprecationWarnings
warnings.simplefilter('default', DeprecationWarning)
argv = sys.argv[:1] + ['test'] + sys.argv[1:]
try:
execute_from_command_line(argv)
finally:
shutil.rmtree(STATIC_ROOT, ignore_errors=True)
shutil.rmtree(MEDIA_ROOT, ignore_errors=True)
if __name__ == '__main__':
runtests()
| []
| []
| [
"DJANGO_SETTINGS_MODULE"
]
| [] | ["DJANGO_SETTINGS_MODULE"] | python | 1 | 0 | |
upup/pkg/fi/cloudup/apply_cluster.go | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloudup
import (
"context"
"crypto/sha256"
"encoding/base64"
"fmt"
"io"
"net"
"net/url"
"os"
"path"
"sort"
"strconv"
"strings"
"github.com/blang/semver/v4"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog/v2"
kopsbase "k8s.io/kops"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/registry"
"k8s.io/kops/pkg/apis/kops/util"
"k8s.io/kops/pkg/apis/kops/validation"
"k8s.io/kops/pkg/apis/nodeup"
"k8s.io/kops/pkg/assets"
"k8s.io/kops/pkg/client/simple"
"k8s.io/kops/pkg/dns"
"k8s.io/kops/pkg/featureflag"
"k8s.io/kops/pkg/model"
"k8s.io/kops/pkg/model/alimodel"
"k8s.io/kops/pkg/model/awsmodel"
"k8s.io/kops/pkg/model/azuremodel"
"k8s.io/kops/pkg/model/components"
"k8s.io/kops/pkg/model/components/etcdmanager"
"k8s.io/kops/pkg/model/components/kubeapiserver"
"k8s.io/kops/pkg/model/domodel"
"k8s.io/kops/pkg/model/gcemodel"
"k8s.io/kops/pkg/model/iam"
"k8s.io/kops/pkg/model/openstackmodel"
"k8s.io/kops/pkg/templates"
"k8s.io/kops/pkg/wellknownports"
"k8s.io/kops/upup/models"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/aliup"
"k8s.io/kops/upup/pkg/fi/cloudup/awsup"
"k8s.io/kops/upup/pkg/fi/cloudup/azure"
"k8s.io/kops/upup/pkg/fi/cloudup/bootstrapchannelbuilder"
"k8s.io/kops/upup/pkg/fi/cloudup/cloudformation"
"k8s.io/kops/upup/pkg/fi/cloudup/do"
"k8s.io/kops/upup/pkg/fi/cloudup/gce"
"k8s.io/kops/upup/pkg/fi/cloudup/openstack"
"k8s.io/kops/upup/pkg/fi/cloudup/terraform"
"k8s.io/kops/upup/pkg/fi/cloudup/terraformWriter"
"k8s.io/kops/upup/pkg/fi/fitasks"
"k8s.io/kops/util/pkg/architectures"
"k8s.io/kops/util/pkg/hashing"
"k8s.io/kops/util/pkg/mirrors"
"k8s.io/kops/util/pkg/vfs"
)
const (
starline = "*********************************************************************************"
)
var (
// AlphaAllowGCE is a feature flag that gates GCE support while it is alpha
AlphaAllowGCE = featureflag.New("AlphaAllowGCE", featureflag.Bool(false))
// AlphaAllowALI is a feature flag that gates aliyun support while it is alpha
AlphaAllowALI = featureflag.New("AlphaAllowALI", featureflag.Bool(false))
// OldestSupportedKubernetesVersion is the oldest kubernetes version that is supported in Kops
OldestSupportedKubernetesVersion = "1.17.0"
// OldestRecommendedKubernetesVersion is the oldest kubernetes version that is not deprecated in Kops
OldestRecommendedKubernetesVersion = "1.19.0"
)
type ApplyClusterCmd struct {
Cloud fi.Cloud
Cluster *kops.Cluster
InstanceGroups []*kops.InstanceGroup
// NodeUpAssets are the assets for downloading nodeup
NodeUpAssets map[architectures.Architecture]*mirrors.MirroredAsset
// TargetName specifies how we are operating e.g. direct to GCE, or AWS, or dry-run, or terraform
TargetName string
// Target is the fi.Target we will operate against
Target fi.Target
// OutDir is a local directory in which we place output, can cache files etc
OutDir string
// Assets is a list of sources for files (primarily when not using everything containerized)
// Formats:
// raw url: http://... or https://...
// url with hash: <hex>@http://... or <hex>@https://...
Assets map[architectures.Architecture][]*mirrors.MirroredAsset
Clientset simple.Clientset
// DryRun is true if this is only a dry run
DryRun bool
// AllowKopsDowngrade permits applying with a kops version older than what was last used to apply to the cluster.
AllowKopsDowngrade bool
// RunTasksOptions defines parameters for task execution, e.g. retry interval
RunTasksOptions *fi.RunTasksOptions
// The channel we are using
channel *kops.Channel
// Phase can be set to a Phase to run the specific subset of tasks, if we don't want to run everything
Phase Phase
// LifecycleOverrides is passed in to override the lifecycle for one of more tasks.
// The key value is the task name such as InternetGateway and the value is the fi.Lifecycle
// that is re-mapped.
LifecycleOverrides map[string]fi.Lifecycle
// GetAssets is whether this is called just to obtain the list of assets.
GetAssets bool
// TaskMap is the map of tasks that we built (output)
TaskMap map[string]fi.Task
// ImageAssets are the image assets we use (output).
ImageAssets []*assets.ImageAsset
// FileAssets are the file assets we use (output).
FileAssets []*assets.FileAsset
}
func (c *ApplyClusterCmd) Run(ctx context.Context) error {
if c.InstanceGroups == nil {
list, err := c.Clientset.InstanceGroupsFor(c.Cluster).List(ctx, metav1.ListOptions{})
if err != nil {
return err
}
var instanceGroups []*kops.InstanceGroup
for i := range list.Items {
instanceGroups = append(instanceGroups, &list.Items[i])
}
c.InstanceGroups = instanceGroups
}
for _, ig := range c.InstanceGroups {
// Try to guess the path for additional third party volume plugins in Flatcar
image := strings.ToLower(ig.Spec.Image)
if strings.Contains(image, "flatcar") {
if c.Cluster.Spec.Kubelet == nil {
c.Cluster.Spec.Kubelet = &kops.KubeletConfigSpec{}
}
if c.Cluster.Spec.Kubelet.VolumePluginDirectory == "" {
c.Cluster.Spec.Kubelet.VolumePluginDirectory = "/var/lib/kubelet/volumeplugins/"
}
}
}
channel, err := ChannelForCluster(c.Cluster)
if err != nil {
klog.Warningf("%v", err)
}
c.channel = channel
securityLifecycle := fi.LifecycleSync
networkLifecycle := fi.LifecycleSync
clusterLifecycle := fi.LifecycleSync
switch c.Phase {
case Phase(""):
// Everything ... the default
case PhaseNetwork:
securityLifecycle = fi.LifecycleIgnore
clusterLifecycle = fi.LifecycleIgnore
case PhaseSecurity:
networkLifecycle = fi.LifecycleExistsAndWarnIfChanges
clusterLifecycle = fi.LifecycleIgnore
case PhaseCluster:
if c.TargetName == TargetDryRun {
securityLifecycle = fi.LifecycleExistsAndWarnIfChanges
networkLifecycle = fi.LifecycleExistsAndWarnIfChanges
} else {
networkLifecycle = fi.LifecycleExistsAndValidates
securityLifecycle = fi.LifecycleExistsAndValidates
}
default:
return fmt.Errorf("unknown phase %q", c.Phase)
}
if c.GetAssets {
networkLifecycle = fi.LifecycleIgnore
securityLifecycle = fi.LifecycleIgnore
clusterLifecycle = fi.LifecycleIgnore
}
assetBuilder := assets.NewAssetBuilder(c.Cluster, c.GetAssets)
err = c.upgradeSpecs(assetBuilder)
if err != nil {
return err
}
err = c.validateKopsVersion()
if err != nil {
return err
}
err = c.validateKubernetesVersion()
if err != nil {
return err
}
cluster := c.Cluster
configBase, err := vfs.Context.BuildVfsPath(cluster.Spec.ConfigBase)
if err != nil {
return fmt.Errorf("error parsing config base %q: %v", cluster.Spec.ConfigBase, err)
}
if !c.AllowKopsDowngrade {
kopsVersionUpdatedBytes, err := configBase.Join(registry.PathKopsVersionUpdated).ReadFile()
if err == nil {
kopsVersionUpdated := strings.TrimSpace(string(kopsVersionUpdatedBytes))
version, err := semver.Parse(kopsVersionUpdated)
if err != nil {
return fmt.Errorf("error parsing last kops version updated: %v", err)
}
if version.GT(semver.MustParse(kopsbase.Version)) {
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
fmt.Printf("The cluster was last updated by kops version %s\n", kopsVersionUpdated)
fmt.Printf("To permit updating by the older version %s, run with the --allow-kops-downgrade flag\n", kopsbase.Version)
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
return fmt.Errorf("kops version older than last used to update the cluster")
}
} else if err != os.ErrNotExist {
return fmt.Errorf("error reading last kops version used to update: %v", err)
}
}
cloud := c.Cloud
err = validation.DeepValidate(c.Cluster, c.InstanceGroups, true, cloud)
if err != nil {
return err
}
if cluster.Spec.KubernetesVersion == "" {
return fmt.Errorf("KubernetesVersion not set")
}
if cluster.Spec.DNSZone == "" && !dns.IsGossipHostname(cluster.ObjectMeta.Name) {
return fmt.Errorf("DNSZone not set")
}
l := &Loader{}
l.Init()
keyStore, err := c.Clientset.KeyStore(cluster)
if err != nil {
return err
}
sshCredentialStore, err := c.Clientset.SSHCredentialStore(cluster)
if err != nil {
return err
}
secretStore, err := c.Clientset.SecretStore(cluster)
if err != nil {
return err
}
addonsClient := c.Clientset.AddonsFor(cluster)
addons, err := addonsClient.List()
if err != nil {
return fmt.Errorf("error fetching addons: %v", err)
}
// Normalize k8s version
versionWithoutV := strings.TrimSpace(cluster.Spec.KubernetesVersion)
versionWithoutV = strings.TrimPrefix(versionWithoutV, "v")
if cluster.Spec.KubernetesVersion != versionWithoutV {
klog.Warningf("Normalizing kubernetes version: %q -> %q", cluster.Spec.KubernetesVersion, versionWithoutV)
cluster.Spec.KubernetesVersion = versionWithoutV
}
// check if we should recommend turning off anonymousAuth
{
// we do a check here because setting modifying the kubelet object messes with the output
warn := false
if cluster.Spec.Kubelet == nil {
warn = true
} else if cluster.Spec.Kubelet.AnonymousAuth == nil {
warn = true
}
if warn {
fmt.Println("")
fmt.Printf("%s\n", starline)
fmt.Println("")
fmt.Println("Kubelet anonymousAuth is currently turned on. This allows RBAC escalation and remote code execution possibilities.")
fmt.Println("It is highly recommended you turn it off by setting 'spec.kubelet.anonymousAuth' to 'false' via 'kops edit cluster'")
fmt.Println("")
fmt.Println("See https://kops.sigs.k8s.io/security/#kubelet-api")
fmt.Println("")
fmt.Printf("%s\n", starline)
fmt.Println("")
}
}
encryptionConfigSecretHash := ""
if fi.BoolValue(c.Cluster.Spec.EncryptionConfig) {
secret, err := secretStore.FindSecret("encryptionconfig")
if err != nil {
return fmt.Errorf("could not load encryptionconfig secret: %v", err)
}
if secret == nil {
fmt.Println("")
fmt.Println("You have encryptionConfig enabled, but no encryptionconfig secret has been set.")
fmt.Println("See `kops create secret encryptionconfig -h` and https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/")
return fmt.Errorf("could not find encryptionconfig secret")
}
hashBytes := sha256.Sum256(secret.Data)
encryptionConfigSecretHash = base64.URLEncoding.EncodeToString(hashBytes[:])
}
ciliumSpec := c.Cluster.Spec.Networking.Cilium
if ciliumSpec != nil && ciliumSpec.EnableEncryption {
secret, err := secretStore.FindSecret("ciliumpassword")
if err != nil {
return fmt.Errorf("could not load the ciliumpassword secret: %w", err)
}
if secret == nil {
fmt.Println("")
fmt.Println("You have cilium encryption enabled, but no ciliumpassword secret has been set.")
fmt.Println("See `kops create secret ciliumpassword -h`")
return fmt.Errorf("could not find ciliumpassword secret")
}
}
if err := c.addFileAssets(assetBuilder); err != nil {
return err
}
checkExisting := true
project := ""
var sshPublicKeys [][]byte
{
keys, err := sshCredentialStore.FindSSHPublicKeys(fi.SecretNameSSHPrimary)
if err != nil {
return fmt.Errorf("error retrieving SSH public key %q: %v", fi.SecretNameSSHPrimary, err)
}
for _, k := range keys {
sshPublicKeys = append(sshPublicKeys, []byte(k.Spec.PublicKey))
}
}
modelContext := &model.KopsModelContext{
IAMModelContext: iam.IAMModelContext{Cluster: cluster},
InstanceGroups: c.InstanceGroups,
}
switch kops.CloudProviderID(cluster.Spec.CloudProvider) {
case kops.CloudProviderGCE:
{
gceCloud := cloud.(gce.GCECloud)
project = gceCloud.Project()
if !AlphaAllowGCE.Enabled() {
return fmt.Errorf("GCE support is currently alpha, and is feature-gated. export KOPS_FEATURE_FLAGS=AlphaAllowGCE")
}
}
case kops.CloudProviderDO:
{
if len(sshPublicKeys) == 0 && (c.Cluster.Spec.SSHKeyName == nil || *c.Cluster.Spec.SSHKeyName == "") {
return fmt.Errorf("SSH public key must be specified when running with DigitalOcean (create with `kops create secret --name %s sshpublickey admin -i ~/.ssh/id_rsa.pub`)", cluster.ObjectMeta.Name)
}
}
case kops.CloudProviderAWS:
{
awsCloud := cloud.(awsup.AWSCloud)
accountID, partition, err := awsCloud.AccountInfo()
if err != nil {
return err
}
modelContext.AWSAccountID = accountID
modelContext.AWSPartition = partition
if len(sshPublicKeys) == 0 && c.Cluster.Spec.SSHKeyName == nil {
return fmt.Errorf("SSH public key must be specified when running with AWS (create with `kops create secret --name %s sshpublickey admin -i ~/.ssh/id_rsa.pub`)", cluster.ObjectMeta.Name)
}
if len(sshPublicKeys) > 1 {
return fmt.Errorf("exactly one 'admin' SSH public key can be specified when running with AWS; please delete a key using `kops delete secret`")
}
}
case kops.CloudProviderALI:
{
fmt.Println("")
fmt.Println("aliyun support has been deprecated due to lack of maintainers. It may be removed in a future version of kOps.")
fmt.Println("")
if !AlphaAllowALI.Enabled() {
return fmt.Errorf("aliyun support is currently alpha, and is feature-gated. export KOPS_FEATURE_FLAGS=AlphaAllowALI")
}
if len(sshPublicKeys) == 0 {
return fmt.Errorf("SSH public key must be specified when running with ALICloud (create with `kops create secret --name %s sshpublickey admin -i ~/.ssh/id_rsa.pub`)", cluster.ObjectMeta.Name)
}
if len(sshPublicKeys) != 1 {
return fmt.Errorf("exactly one 'admin' SSH public key can be specified when running with ALICloud; please delete a key using `kops delete secret`")
}
}
case kops.CloudProviderAzure:
{
if !featureflag.Azure.Enabled() {
return fmt.Errorf("azure support is currently alpha, and is feature-gated. Please export KOPS_FEATURE_FLAGS=Azure")
}
if len(sshPublicKeys) == 0 {
return fmt.Errorf("SSH public key must be specified when running with AzureCloud (create with `kops create secret --name %s sshpublickey admin -i ~/.ssh/id_rsa.pub`)", cluster.ObjectMeta.Name)
}
if len(sshPublicKeys) != 1 {
return fmt.Errorf("exactly one 'admin' SSH public key can be specified when running with AzureCloud; please delete a key using `kops delete secret`")
}
}
case kops.CloudProviderOpenstack:
{
if len(sshPublicKeys) == 0 {
return fmt.Errorf("SSH public key must be specified when running with Openstack (create with `kops create secret --name %s sshpublickey admin -i ~/.ssh/id_rsa.pub`)", cluster.ObjectMeta.Name)
}
if len(sshPublicKeys) != 1 {
return fmt.Errorf("exactly one 'admin' SSH public key can be specified when running with Openstack; please delete a key using `kops delete secret`")
}
}
default:
return fmt.Errorf("unknown CloudProvider %q", cluster.Spec.CloudProvider)
}
modelContext.SSHPublicKeys = sshPublicKeys
modelContext.Region = cloud.Region()
if dns.IsGossipHostname(cluster.ObjectMeta.Name) {
klog.Infof("Gossip DNS: skipping DNS validation")
} else {
err = validateDNS(cluster, cloud)
if err != nil {
return err
}
}
tf := &TemplateFunctions{
KopsModelContext: *modelContext,
cloud: cloud,
}
configBuilder, err := newNodeUpConfigBuilder(cluster, assetBuilder, c.Assets, encryptionConfigSecretHash)
if err != nil {
return err
}
bootstrapScriptBuilder := &model.BootstrapScriptBuilder{
KopsModelContext: modelContext,
Lifecycle: clusterLifecycle,
NodeUpConfigBuilder: configBuilder,
NodeUpAssets: c.NodeUpAssets,
Cluster: cluster,
}
{
templates, err := templates.LoadTemplates(cluster, models.NewAssetPath("cloudup/resources"))
if err != nil {
return fmt.Errorf("error loading templates: %v", err)
}
err = tf.AddTo(templates.TemplateFunctions, secretStore)
if err != nil {
return err
}
bcb := bootstrapchannelbuilder.NewBootstrapChannelBuilder(
modelContext,
clusterLifecycle,
assetBuilder,
templates,
addons,
)
l.Builders = append(l.Builders,
bcb,
&model.PKIModelBuilder{
KopsModelContext: modelContext,
Lifecycle: clusterLifecycle,
},
&model.IssuerDiscoveryModelBuilder{
KopsModelContext: modelContext,
Lifecycle: clusterLifecycle,
Cluster: cluster,
},
&kubeapiserver.KubeApiserverBuilder{
AssetBuilder: assetBuilder,
KopsModelContext: modelContext,
Lifecycle: clusterLifecycle,
},
&etcdmanager.EtcdManagerBuilder{
AssetBuilder: assetBuilder,
KopsModelContext: modelContext,
Lifecycle: clusterLifecycle,
},
&model.MasterVolumeBuilder{KopsModelContext: modelContext, Lifecycle: clusterLifecycle},
&model.ConfigBuilder{KopsModelContext: modelContext, Lifecycle: clusterLifecycle},
)
switch kops.CloudProviderID(cluster.Spec.CloudProvider) {
case kops.CloudProviderAWS:
awsModelContext := &awsmodel.AWSModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders,
&awsmodel.APILoadBalancerBuilder{AWSModelContext: awsModelContext, Lifecycle: clusterLifecycle, SecurityLifecycle: securityLifecycle},
&awsmodel.BastionModelBuilder{AWSModelContext: awsModelContext, Lifecycle: clusterLifecycle, SecurityLifecycle: securityLifecycle},
&awsmodel.DNSModelBuilder{AWSModelContext: awsModelContext, Lifecycle: clusterLifecycle},
&awsmodel.ExternalAccessModelBuilder{AWSModelContext: awsModelContext, Lifecycle: securityLifecycle},
&awsmodel.FirewallModelBuilder{AWSModelContext: awsModelContext, Lifecycle: securityLifecycle},
&awsmodel.SSHKeyModelBuilder{AWSModelContext: awsModelContext, Lifecycle: securityLifecycle},
&awsmodel.NetworkModelBuilder{AWSModelContext: awsModelContext, Lifecycle: networkLifecycle},
&awsmodel.IAMModelBuilder{AWSModelContext: awsModelContext, Lifecycle: securityLifecycle, Cluster: cluster},
&awsmodel.OIDCProviderBuilder{AWSModelContext: awsModelContext, Lifecycle: securityLifecycle, KeyStore: keyStore},
)
awsModelBuilder := &awsmodel.AutoscalingGroupModelBuilder{
AWSModelContext: awsModelContext,
BootstrapScriptBuilder: bootstrapScriptBuilder,
Lifecycle: clusterLifecycle,
SecurityLifecycle: securityLifecycle,
Cluster: cluster,
}
if featureflag.Spotinst.Enabled() {
l.Builders = append(l.Builders, &awsmodel.SpotInstanceGroupModelBuilder{
AWSModelContext: awsModelContext,
BootstrapScriptBuilder: bootstrapScriptBuilder,
Lifecycle: clusterLifecycle,
SecurityLifecycle: securityLifecycle,
})
if featureflag.SpotinstHybrid.Enabled() {
l.Builders = append(l.Builders, awsModelBuilder)
}
} else {
l.Builders = append(l.Builders, awsModelBuilder)
}
nth := c.Cluster.Spec.NodeTerminationHandler
if nth != nil && fi.BoolValue(nth.Enabled) && fi.BoolValue(nth.EnableSQSTerminationDraining) {
l.Builders = append(l.Builders, &awsmodel.NodeTerminationHandlerBuilder{
AWSModelContext: awsModelContext,
Lifecycle: clusterLifecycle,
})
}
case kops.CloudProviderDO:
doModelContext := &domodel.DOModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders,
&domodel.APILoadBalancerModelBuilder{DOModelContext: doModelContext, Lifecycle: securityLifecycle},
&domodel.DropletBuilder{DOModelContext: doModelContext, BootstrapScriptBuilder: bootstrapScriptBuilder, Lifecycle: clusterLifecycle},
)
case kops.CloudProviderGCE:
gceModelContext := &gcemodel.GCEModelContext{
KopsModelContext: modelContext,
}
storageACLLifecycle := securityLifecycle
if storageACLLifecycle != fi.LifecycleIgnore {
// This is a best-effort permissions fix
storageACLLifecycle = fi.LifecycleWarnIfInsufficientAccess
}
l.Builders = append(l.Builders,
&gcemodel.APILoadBalancerBuilder{GCEModelContext: gceModelContext, Lifecycle: securityLifecycle},
&gcemodel.ExternalAccessModelBuilder{GCEModelContext: gceModelContext, Lifecycle: securityLifecycle},
&gcemodel.FirewallModelBuilder{GCEModelContext: gceModelContext, Lifecycle: securityLifecycle},
&gcemodel.NetworkModelBuilder{GCEModelContext: gceModelContext, Lifecycle: networkLifecycle},
&gcemodel.StorageAclBuilder{GCEModelContext: gceModelContext, Cloud: cloud.(gce.GCECloud), Lifecycle: storageACLLifecycle},
&gcemodel.AutoscalingGroupModelBuilder{GCEModelContext: gceModelContext, BootstrapScriptBuilder: bootstrapScriptBuilder, Lifecycle: clusterLifecycle},
)
case kops.CloudProviderALI:
aliModelContext := &alimodel.ALIModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders,
&alimodel.APILoadBalancerModelBuilder{ALIModelContext: aliModelContext, Lifecycle: clusterLifecycle},
&alimodel.NetworkModelBuilder{ALIModelContext: aliModelContext, Lifecycle: clusterLifecycle},
&alimodel.RAMModelBuilder{ALIModelContext: aliModelContext, Lifecycle: clusterLifecycle},
&alimodel.SSHKeyModelBuilder{ALIModelContext: aliModelContext, Lifecycle: clusterLifecycle},
&alimodel.FirewallModelBuilder{ALIModelContext: aliModelContext, Lifecycle: clusterLifecycle},
&alimodel.ExternalAccessModelBuilder{ALIModelContext: aliModelContext, Lifecycle: clusterLifecycle},
&alimodel.ScalingGroupModelBuilder{ALIModelContext: aliModelContext, BootstrapScriptBuilder: bootstrapScriptBuilder, Lifecycle: clusterLifecycle},
)
case kops.CloudProviderAzure:
azureModelContext := &azuremodel.AzureModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders,
&azuremodel.APILoadBalancerModelBuilder{AzureModelContext: azureModelContext, Lifecycle: clusterLifecycle},
&azuremodel.NetworkModelBuilder{AzureModelContext: azureModelContext, Lifecycle: clusterLifecycle},
&azuremodel.ResourceGroupModelBuilder{AzureModelContext: azureModelContext, Lifecycle: clusterLifecycle},
&azuremodel.VMScaleSetModelBuilder{AzureModelContext: azureModelContext, BootstrapScriptBuilder: bootstrapScriptBuilder, Lifecycle: clusterLifecycle},
)
case kops.CloudProviderOpenstack:
openstackModelContext := &openstackmodel.OpenstackModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders,
&openstackmodel.NetworkModelBuilder{OpenstackModelContext: openstackModelContext, Lifecycle: networkLifecycle},
&openstackmodel.SSHKeyModelBuilder{OpenstackModelContext: openstackModelContext, Lifecycle: securityLifecycle},
&openstackmodel.FirewallModelBuilder{OpenstackModelContext: openstackModelContext, Lifecycle: securityLifecycle},
&openstackmodel.ServerGroupModelBuilder{OpenstackModelContext: openstackModelContext, BootstrapScriptBuilder: bootstrapScriptBuilder, Lifecycle: clusterLifecycle},
)
default:
return fmt.Errorf("unknown cloudprovider %q", cluster.Spec.CloudProvider)
}
}
c.TaskMap, err = l.BuildTasks(c.LifecycleOverrides)
if err != nil {
return fmt.Errorf("error building tasks: %v", err)
}
var target fi.Target
shouldPrecreateDNS := true
switch c.TargetName {
case TargetDirect:
switch kops.CloudProviderID(cluster.Spec.CloudProvider) {
case kops.CloudProviderGCE:
target = gce.NewGCEAPITarget(cloud.(gce.GCECloud))
case kops.CloudProviderAWS:
target = awsup.NewAWSAPITarget(cloud.(awsup.AWSCloud))
case kops.CloudProviderDO:
target = do.NewDOAPITarget(cloud.(do.DOCloud))
case kops.CloudProviderOpenstack:
target = openstack.NewOpenstackAPITarget(cloud.(openstack.OpenstackCloud))
case kops.CloudProviderALI:
target = aliup.NewALIAPITarget(cloud.(aliup.ALICloud))
case kops.CloudProviderAzure:
target = azure.NewAzureAPITarget(cloud.(azure.AzureCloud))
default:
return fmt.Errorf("direct configuration not supported with CloudProvider:%q", cluster.Spec.CloudProvider)
}
case TargetTerraform:
checkExisting = false
outDir := c.OutDir
tf := terraform.NewTerraformTarget(cloud, project, outDir, cluster.Spec.Target)
// We include a few "util" variables in the TF output
if err := tf.AddOutputVariable("region", terraformWriter.LiteralFromStringValue(cloud.Region())); err != nil {
return err
}
if project != "" {
if err := tf.AddOutputVariable("project", terraformWriter.LiteralFromStringValue(project)); err != nil {
return err
}
}
if err := tf.AddOutputVariable("cluster_name", terraformWriter.LiteralFromStringValue(cluster.ObjectMeta.Name)); err != nil {
return err
}
target = tf
// Can cause conflicts with terraform management
shouldPrecreateDNS = false
case TargetCloudformation:
checkExisting = false
outDir := c.OutDir
target = cloudformation.NewCloudformationTarget(cloud, project, outDir)
// Can cause conflicts with cloudformation management
shouldPrecreateDNS = false
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
fmt.Printf("Kops support for CloudFormation is deprecated and will be removed in a future release.\n")
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
case TargetDryRun:
var out io.Writer = os.Stdout
if c.GetAssets {
out = io.Discard
}
target = fi.NewDryRunTarget(assetBuilder, out)
// Avoid making changes on a dry-run
shouldPrecreateDNS = false
default:
return fmt.Errorf("unsupported target type %q", c.TargetName)
}
c.Target = target
if checkExisting {
c.TaskMap, err = l.FindDeletions(cloud, c.LifecycleOverrides)
if err != nil {
return fmt.Errorf("error finding deletions: %w", err)
}
}
context, err := fi.NewContext(target, cluster, cloud, keyStore, secretStore, configBase, checkExisting, c.TaskMap)
if err != nil {
return fmt.Errorf("error building context: %v", err)
}
defer context.Close()
var options fi.RunTasksOptions
if c.RunTasksOptions != nil {
options = *c.RunTasksOptions
} else {
options.InitDefaults()
}
err = context.RunTasks(options)
if err != nil {
return fmt.Errorf("error running tasks: %v", err)
}
if dns.IsGossipHostname(cluster.Name) {
shouldPrecreateDNS = false
}
if shouldPrecreateDNS && clusterLifecycle != fi.LifecycleIgnore {
if err := precreateDNS(ctx, cluster, cloud); err != nil {
klog.Warningf("unable to pre-create DNS records - cluster startup may be slower: %v", err)
}
}
err = target.Finish(c.TaskMap) //This will finish the apply, and print the changes
if err != nil {
return fmt.Errorf("error closing target: %v", err)
}
c.ImageAssets = assetBuilder.ImageAssets
c.FileAssets = assetBuilder.FileAssets
return nil
}
// upgradeSpecs ensures that fields are fully populated / defaulted
func (c *ApplyClusterCmd) upgradeSpecs(assetBuilder *assets.AssetBuilder) error {
fullCluster, err := PopulateClusterSpec(c.Clientset, c.Cluster, c.Cloud, assetBuilder)
if err != nil {
return err
}
c.Cluster = fullCluster
for i, g := range c.InstanceGroups {
fullGroup, err := PopulateInstanceGroupSpec(fullCluster, g, c.Cloud, c.channel)
if err != nil {
return err
}
c.InstanceGroups[i] = fullGroup
}
return nil
}
// validateKopsVersion ensures that kops meet the version requirements / recommendations in the channel
func (c *ApplyClusterCmd) validateKopsVersion() error {
kopsVersion, err := semver.ParseTolerant(kopsbase.Version)
if err != nil {
klog.Warningf("unable to parse kops version %q", kopsbase.Version)
// Not a hard-error
return nil
}
if c.channel == nil {
klog.Warning("channel unavailable, skipping version validation")
return nil
}
versionInfo := kops.FindKopsVersionSpec(c.channel.Spec.KopsVersions, kopsVersion)
if versionInfo == nil {
klog.Warningf("unable to find version information for kops version %q in channel", kopsVersion)
// Not a hard-error
return nil
}
recommended, err := versionInfo.FindRecommendedUpgrade(kopsVersion)
if err != nil {
klog.Warningf("unable to parse version recommendation for kops version %q in channel", kopsVersion)
}
required, err := versionInfo.IsUpgradeRequired(kopsVersion)
if err != nil {
klog.Warningf("unable to parse version requirement for kops version %q in channel", kopsVersion)
}
if recommended != nil && !required && !c.GetAssets {
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
fmt.Printf("A new kops version is available: %s", recommended)
fmt.Printf("\n")
fmt.Printf("Upgrading is recommended\n")
fmt.Printf("More information: %s\n", buildPermalink("upgrade_kops", recommended.String()))
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
} else if required {
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
if recommended != nil {
fmt.Printf("a new kops version is available: %s\n", recommended)
}
fmt.Println("")
fmt.Printf("This version of kops (%s) is no longer supported; upgrading is required\n", kopsbase.Version)
fmt.Printf("(you can bypass this check by exporting KOPS_RUN_OBSOLETE_VERSION)\n")
fmt.Println("")
fmt.Printf("More information: %s\n", buildPermalink("upgrade_kops", recommended.String()))
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
}
if required {
if os.Getenv("KOPS_RUN_OBSOLETE_VERSION") == "" {
return fmt.Errorf("kops upgrade is required")
}
}
return nil
}
// validateKubernetesVersion ensures that kubernetes meet the version requirements / recommendations in the channel
func (c *ApplyClusterCmd) validateKubernetesVersion() error {
parsed, err := util.ParseKubernetesVersion(c.Cluster.Spec.KubernetesVersion)
if err != nil {
klog.Warningf("unable to parse kubernetes version %q", c.Cluster.Spec.KubernetesVersion)
// Not a hard-error
return nil
}
kopsVersion, err := semver.Parse(kopsbase.KOPS_RELEASE_VERSION)
if err != nil {
klog.Warningf("unable to parse kops version %q", kopsVersion)
} else {
tooNewVersion := kopsVersion
tooNewVersion.Minor++
tooNewVersion.Pre = nil
tooNewVersion.Build = nil
if util.IsKubernetesGTE(tooNewVersion.String(), *parsed) {
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
fmt.Printf("This version of kubernetes is not yet supported; upgrading kops is required\n")
fmt.Printf("(you can bypass this check by exporting KOPS_RUN_TOO_NEW_VERSION)\n")
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
if os.Getenv("KOPS_RUN_TOO_NEW_VERSION") == "" {
return fmt.Errorf("kops upgrade is required")
}
}
}
if !util.IsKubernetesGTE(OldestSupportedKubernetesVersion, *parsed) {
fmt.Printf("This version of Kubernetes is no longer supported; upgrading Kubernetes is required\n")
fmt.Printf("\n")
fmt.Printf("More information: %s\n", buildPermalink("upgrade_k8s", OldestRecommendedKubernetesVersion))
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
return fmt.Errorf("kubernetes upgrade is required")
}
if !util.IsKubernetesGTE(OldestRecommendedKubernetesVersion, *parsed) && !c.GetAssets {
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
fmt.Printf("Kops support for this Kubernetes version is deprecated and will be removed in a future release.\n")
fmt.Printf("\n")
fmt.Printf("Upgrading Kubernetes is recommended\n")
fmt.Printf("More information: %s\n", buildPermalink("upgrade_k8s", OldestRecommendedKubernetesVersion))
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
}
// TODO: make util.ParseKubernetesVersion not return a pointer
kubernetesVersion := *parsed
if c.channel == nil {
klog.Warning("unable to load channel, skipping kubernetes version recommendation/requirements checks")
return nil
}
versionInfo := kops.FindKubernetesVersionSpec(c.channel.Spec.KubernetesVersions, kubernetesVersion)
if versionInfo == nil {
klog.Warningf("unable to find version information for kubernetes version %q in channel", kubernetesVersion)
// Not a hard-error
return nil
}
recommended, err := versionInfo.FindRecommendedUpgrade(kubernetesVersion)
if err != nil {
klog.Warningf("unable to parse version recommendation for kubernetes version %q in channel", kubernetesVersion)
}
required, err := versionInfo.IsUpgradeRequired(kubernetesVersion)
if err != nil {
klog.Warningf("unable to parse version requirement for kubernetes version %q in channel", kubernetesVersion)
}
if recommended != nil && !required && !c.GetAssets {
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
fmt.Printf("A new kubernetes version is available: %s\n", recommended)
fmt.Printf("Upgrading is recommended (try kops upgrade cluster)\n")
fmt.Printf("\n")
fmt.Printf("More information: %s\n", buildPermalink("upgrade_k8s", recommended.String()))
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
} else if required {
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
if recommended != nil {
fmt.Printf("A new kubernetes version is available: %s\n", recommended)
}
fmt.Printf("\n")
fmt.Printf("This version of kubernetes is no longer supported; upgrading is required\n")
fmt.Printf("(you can bypass this check by exporting KOPS_RUN_OBSOLETE_VERSION)\n")
fmt.Printf("\n")
fmt.Printf("More information: %s\n", buildPermalink("upgrade_k8s", recommended.String()))
fmt.Printf("\n")
fmt.Printf("%s\n", starline)
fmt.Printf("\n")
}
if required {
if os.Getenv("KOPS_RUN_OBSOLETE_VERSION") == "" {
return fmt.Errorf("kubernetes upgrade is required")
}
}
return nil
}
// addFileAssets adds the file assets within the assetBuilder
func (c *ApplyClusterCmd) addFileAssets(assetBuilder *assets.AssetBuilder) error {
var baseURL string
if components.IsBaseURL(c.Cluster.Spec.KubernetesVersion) {
baseURL = c.Cluster.Spec.KubernetesVersion
} else {
baseURL = "https://storage.googleapis.com/kubernetes-release/release/v" + c.Cluster.Spec.KubernetesVersion
}
c.Assets = make(map[architectures.Architecture][]*mirrors.MirroredAsset)
c.NodeUpAssets = make(map[architectures.Architecture]*mirrors.MirroredAsset)
for _, arch := range architectures.GetSupported() {
c.Assets[arch] = []*mirrors.MirroredAsset{}
k8sAssetsNames := []string{
fmt.Sprintf("/bin/linux/%s/kubelet", arch),
fmt.Sprintf("/bin/linux/%s/kubectl", arch),
}
if needsMounterAsset(c.Cluster, c.InstanceGroups) {
k8sAssetsNames = append(k8sAssetsNames, fmt.Sprintf("/bin/linux/%s/mounter", arch))
}
for _, an := range k8sAssetsNames {
k, err := url.Parse(baseURL)
if err != nil {
return err
}
k.Path = path.Join(k.Path, an)
u, hash, err := assetBuilder.RemapFileAndSHA(k)
if err != nil {
return err
}
c.Assets[arch] = append(c.Assets[arch], mirrors.BuildMirroredAsset(u, hash))
}
cniAsset, cniAssetHash, err := findCNIAssets(c.Cluster, assetBuilder, arch)
if err != nil {
return err
}
c.Assets[arch] = append(c.Assets[arch], mirrors.BuildMirroredAsset(cniAsset, cniAssetHash))
if c.Cluster.Spec.Networking.LyftVPC != nil {
lyftAsset, lyftAssetHash, err := findLyftVPCAssets(c.Cluster, assetBuilder, arch)
if err != nil {
return err
}
c.Assets[arch] = append(c.Assets[arch], mirrors.BuildMirroredAsset(lyftAsset, lyftAssetHash))
}
var containerRuntimeAssetUrl *url.URL
var containerRuntimeAssetHash *hashing.Hash
switch c.Cluster.Spec.ContainerRuntime {
case "docker":
containerRuntimeAssetUrl, containerRuntimeAssetHash, err = findDockerAsset(c.Cluster, assetBuilder, arch)
case "containerd":
containerRuntimeAssetUrl, containerRuntimeAssetHash, err = findContainerdAsset(c.Cluster, assetBuilder, arch)
default:
err = fmt.Errorf("unknown container runtime: %q", c.Cluster.Spec.ContainerRuntime)
}
if err != nil {
return err
}
c.Assets[arch] = append(c.Assets[arch], mirrors.BuildMirroredAsset(containerRuntimeAssetUrl, containerRuntimeAssetHash))
asset, err := NodeUpAsset(assetBuilder, arch)
if err != nil {
return err
}
c.NodeUpAssets[arch] = asset
}
return nil
}
// buildPermalink returns a link to our "permalink docs", to further explain an error message
func buildPermalink(key, anchor string) string {
url := "https://github.com/kubernetes/kops/blob/master/permalinks/" + key + ".md"
if anchor != "" {
url += "#" + anchor
}
return url
}
func ChannelForCluster(c *kops.Cluster) (*kops.Channel, error) {
channelLocation := c.Spec.Channel
if channelLocation == "" {
channelLocation = kops.DefaultChannel
}
return kops.LoadChannel(channelLocation)
}
// needsMounterAsset checks if we need the mounter program
// This is only needed currently on ContainerOS i.e. GCE, but we don't have a nice way to detect it yet
func needsMounterAsset(c *kops.Cluster, instanceGroups []*kops.InstanceGroup) bool {
// TODO: Do real detection of ContainerOS (but this has to work with image names, and maybe even forked images)
switch kops.CloudProviderID(c.Spec.CloudProvider) {
case kops.CloudProviderGCE:
return true
default:
return false
}
}
type nodeUpConfigBuilder struct {
// Assets is a list of sources for files (primarily when not using everything containerized)
// Formats:
// raw url: http://... or https://...
// url with hash: <hex>@http://... or <hex>@https://...
assets map[architectures.Architecture][]*mirrors.MirroredAsset
assetBuilder *assets.AssetBuilder
channels []string
configBase vfs.Path
cluster *kops.Cluster
etcdManifests map[kops.InstanceGroupRole][]string
images map[kops.InstanceGroupRole]map[architectures.Architecture][]*nodeup.Image
protokubeAsset map[architectures.Architecture][]*mirrors.MirroredAsset
channelsAsset map[architectures.Architecture][]*mirrors.MirroredAsset
encryptionConfigSecretHash string
}
func newNodeUpConfigBuilder(cluster *kops.Cluster, assetBuilder *assets.AssetBuilder, assets map[architectures.Architecture][]*mirrors.MirroredAsset, encryptionConfigSecretHash string) (model.NodeUpConfigBuilder, error) {
configBase, err := vfs.Context.BuildVfsPath(cluster.Spec.ConfigBase)
if err != nil {
return nil, fmt.Errorf("error parsing config base %q: %v", cluster.Spec.ConfigBase, err)
}
channels := []string{
configBase.Join("addons", "bootstrap-channel.yaml").Path(),
}
for i := range cluster.Spec.Addons {
channels = append(channels, cluster.Spec.Addons[i].Manifest)
}
etcdManifests := map[kops.InstanceGroupRole][]string{}
images := map[kops.InstanceGroupRole]map[architectures.Architecture][]*nodeup.Image{}
protokubeAsset := map[architectures.Architecture][]*mirrors.MirroredAsset{}
channelsAsset := map[architectures.Architecture][]*mirrors.MirroredAsset{}
for _, arch := range architectures.GetSupported() {
asset, err := ProtokubeAsset(assetBuilder, arch)
if err != nil {
return nil, err
}
protokubeAsset[arch] = append(protokubeAsset[arch], asset)
}
for _, arch := range architectures.GetSupported() {
asset, err := ChannelsAsset(assetBuilder, arch)
if err != nil {
return nil, err
}
channelsAsset[arch] = append(channelsAsset[arch], asset)
}
for _, role := range kops.AllInstanceGroupRoles {
isMaster := role == kops.InstanceGroupRoleMaster
isAPIServer := role == kops.InstanceGroupRoleAPIServer
images[role] = make(map[architectures.Architecture][]*nodeup.Image)
if components.IsBaseURL(cluster.Spec.KubernetesVersion) {
// When using a custom version, we want to preload the images over http
components := []string{"kube-proxy"}
if isMaster {
components = append(components, "kube-apiserver", "kube-controller-manager", "kube-scheduler")
}
if isAPIServer {
components = append(components, "kube-apiserver")
}
for _, arch := range architectures.GetSupported() {
for _, component := range components {
baseURL, err := url.Parse(cluster.Spec.KubernetesVersion)
if err != nil {
return nil, err
}
baseURL.Path = path.Join(baseURL.Path, "/bin/linux", string(arch), component+".tar")
u, hash, err := assetBuilder.RemapFileAndSHA(baseURL)
if err != nil {
return nil, err
}
image := &nodeup.Image{
Sources: []string{u.String()},
Hash: hash.Hex(),
}
images[role][arch] = append(images[role][arch], image)
}
}
}
// `docker load` our images when using a KOPS_BASE_URL, so we
// don't need to push/pull from a registry
if os.Getenv("KOPS_BASE_URL") != "" && isMaster {
for _, arch := range architectures.GetSupported() {
for _, name := range []string{"kops-controller", "dns-controller", "kube-apiserver-healthcheck"} {
baseURL, err := url.Parse(os.Getenv("KOPS_BASE_URL"))
if err != nil {
return nil, err
}
baseURL.Path = path.Join(baseURL.Path, "/images/"+name+"-"+string(arch)+".tar.gz")
u, hash, err := assetBuilder.RemapFileAndSHA(baseURL)
if err != nil {
return nil, err
}
image := &nodeup.Image{
Sources: []string{u.String()},
Hash: hash.Hex(),
}
images[role][arch] = append(images[role][arch], image)
}
}
}
if os.Getenv("KOPS_BASE_URL") != "" && isAPIServer {
for _, arch := range architectures.GetSupported() {
for _, name := range []string{"kube-apiserver-healthcheck"} {
baseURL, err := url.Parse(os.Getenv("KOPS_BASE_URL"))
if err != nil {
return nil, err
}
baseURL.Path = path.Join(baseURL.Path, "/images/"+name+"-"+string(arch)+".tar.gz")
u, hash, err := assetBuilder.RemapFileAndSHA(baseURL)
if err != nil {
return nil, err
}
image := &nodeup.Image{
Sources: []string{u.String()},
Hash: hash.Hex(),
}
images[role][arch] = append(images[role][arch], image)
}
}
}
if isMaster {
for _, etcdCluster := range cluster.Spec.EtcdClusters {
if etcdCluster.Provider == kops.EtcdProviderTypeManager {
p := configBase.Join("manifests/etcd/" + etcdCluster.Name + ".yaml").Path()
etcdManifests[role] = append(etcdManifests[role], p)
}
}
}
}
configBuilder := nodeUpConfigBuilder{
assetBuilder: assetBuilder,
assets: assets,
channels: channels,
configBase: configBase,
cluster: cluster,
etcdManifests: etcdManifests,
images: images,
protokubeAsset: protokubeAsset,
channelsAsset: channelsAsset,
encryptionConfigSecretHash: encryptionConfigSecretHash,
}
return &configBuilder, nil
}
// BuildConfig returns the NodeUp config and auxiliary config.
func (n *nodeUpConfigBuilder) BuildConfig(ig *kops.InstanceGroup, apiserverAdditionalIPs []string, caTasks map[string]*fitasks.Keypair) (*nodeup.Config, *nodeup.BootConfig, error) {
cluster := n.cluster
if ig == nil {
return nil, nil, fmt.Errorf("instanceGroup cannot be nil")
}
role := ig.Spec.Role
if role == "" {
return nil, nil, fmt.Errorf("cannot determine role for instance group: %v", ig.ObjectMeta.Name)
}
useGossip := dns.IsGossipHostname(cluster.Spec.MasterInternalName)
isMaster := role == kops.InstanceGroupRoleMaster
hasAPIServer := isMaster || role == kops.InstanceGroupRoleAPIServer
config, bootConfig := nodeup.NewConfig(cluster, ig)
config.Assets = make(map[architectures.Architecture][]string)
for _, arch := range architectures.GetSupported() {
config.Assets[arch] = []string{}
for _, a := range n.assets[arch] {
config.Assets[arch] = append(config.Assets[arch], a.CompactString())
}
}
if err := getTasksCertificate(caTasks, fi.CertificateIDCA, config, false); err != nil {
return nil, nil, err
}
if caTasks["etcd-clients-ca-cilium"] != nil {
if err := getTasksCertificate(caTasks, "etcd-clients-ca-cilium", config, hasAPIServer); err != nil {
return nil, nil, err
}
}
if isMaster {
config.KeypairIDs[fi.CertificateIDCA] = caTasks[fi.CertificateIDCA].Keyset().Primary.Id
if err := getTasksCertificate(caTasks, "etcd-clients-ca", config, true); err != nil {
return nil, nil, err
}
for _, etcdCluster := range cluster.Spec.EtcdClusters {
k := etcdCluster.Name
if err := getTasksCertificate(caTasks, "etcd-manager-ca-"+k, config, true); err != nil {
return nil, nil, err
}
if err := getTasksCertificate(caTasks, "etcd-peers-ca-"+k, config, true); err != nil {
return nil, nil, err
}
if k != "events" && k != "main" {
if err := getTasksCertificate(caTasks, "etcd-clients-ca-"+k, config, true); err != nil {
return nil, nil, err
}
}
}
config.KeypairIDs["service-account"] = caTasks["service-account"].Keyset().Primary.Id
} else {
if caTasks["etcd-client-cilium"] != nil {
config.KeypairIDs["etcd-client-cilium"] = caTasks["etcd-client-cilium"].Keyset().Primary.Id
}
}
if hasAPIServer {
if err := getTasksCertificate(caTasks, "apiserver-aggregator-ca", config, true); err != nil {
return nil, nil, err
}
if caTasks["etcd-clients-ca"] != nil {
if err := getTasksCertificate(caTasks, "etcd-clients-ca", config, true); err != nil {
return nil, nil, err
}
}
if cluster.Spec.KubeAPIServer != nil && fi.StringValue(cluster.Spec.KubeAPIServer.ServiceAccountIssuer) != "" {
config.KeypairIDs["service-account"] = caTasks["service-account"].Keyset().Primary.Id
}
config.APIServerConfig.EncryptionConfigSecretHash = n.encryptionConfigSecretHash
var err error
config.APIServerConfig.ServiceAccountPublicKeys, err = caTasks["service-account"].Keyset().ToPublicKeys()
if err != nil {
return nil, nil, fmt.Errorf("encoding service-account keys: %w", err)
}
}
if isMaster || useGossip {
for _, arch := range architectures.GetSupported() {
for _, a := range n.protokubeAsset[arch] {
config.Assets[arch] = append(config.Assets[arch], a.CompactString())
}
}
for _, arch := range architectures.GetSupported() {
for _, a := range n.channelsAsset[arch] {
config.Assets[arch] = append(config.Assets[arch], a.CompactString())
}
}
}
useConfigServer := featureflag.KopsControllerStateStore.Enabled() && (role != kops.InstanceGroupRoleMaster)
if useConfigServer {
baseURL := url.URL{
Scheme: "https",
Host: net.JoinHostPort("kops-controller.internal."+cluster.ObjectMeta.Name, strconv.Itoa(wellknownports.KopsControllerPort)),
Path: "/",
}
configServer := &nodeup.ConfigServerOptions{
Server: baseURL.String(),
CACertificates: config.CAs[fi.CertificateIDCA],
}
bootConfig.ConfigServer = configServer
delete(config.CAs, fi.CertificateIDCA)
} else {
bootConfig.ConfigBase = fi.String(n.configBase.Path())
}
if isMaster {
config.ApiserverAdditionalIPs = apiserverAdditionalIPs
}
for _, manifest := range n.assetBuilder.StaticManifests {
match := false
for _, r := range manifest.Roles {
if r == role {
match = true
}
}
if !match {
continue
}
config.StaticManifests = append(config.StaticManifests, &nodeup.StaticManifest{
Key: manifest.Key,
Path: manifest.Path,
})
}
config.Images = n.images[role]
config.Channels = n.channels
config.EtcdManifests = n.etcdManifests[role]
if cluster.Spec.ContainerRuntime == "containerd" {
config.ContainerdConfig = cluster.Spec.Containerd
}
if ig.Spec.WarmPool != nil || cluster.Spec.WarmPool != nil {
config.WarmPoolImages = n.buildWarmPoolImages(ig)
}
return config, bootConfig, nil
}
func getTasksCertificate(caTasks map[string]*fitasks.Keypair, name string, config *nodeup.Config, includeKeypairID bool) error {
cas, err := fi.ResourceAsString(caTasks[name].Certificates())
if err != nil {
// CA task may not have run yet; we'll retry
return fmt.Errorf("failed to read %s certificates: %w", name, err)
}
config.CAs[name] = cas
if includeKeypairID {
config.KeypairIDs[name] = caTasks[name].Keyset().Primary.Id
}
return nil
}
// buildWarmPoolImages returns a list of container images that should be pre-pulled during instance pre-initialization
func (n *nodeUpConfigBuilder) buildWarmPoolImages(ig *kops.InstanceGroup) []string {
if ig == nil || ig.Spec.Role == kops.InstanceGroupRoleMaster {
return nil
}
images := map[string]bool{}
// Add component and addon images that impact startup time
// TODO: Exclude images that only run on control-plane nodes in a generic way
desiredImagePrefixes := []string{
"602401143452.dkr.ecr.us-west-2.amazonaws.com/", // Amazon VPC CNI
// Ignore images hosted on docker.io until a solution for rate limiting is implemented
//"docker.io/calico/",
//"docker.io/cilium/",
//"docker.io/cloudnativelabs/kube-router:",
//"docker.io/weaveworks/",
"k8s.gcr.io/kube-proxy:",
"k8s.gcr.io/provider-aws/",
"k8s.gcr.io/sig-storage/csi-node-driver-registrar:",
"k8s.gcr.io/sig-storage/livenessprobe:",
"quay.io/calico/",
"quay.io/cilium/",
"quay.io/coreos/flannel:",
"quay.io/weaveworks/",
}
assetBuilder := n.assetBuilder
if assetBuilder != nil {
for _, image := range assetBuilder.ImageAssets {
for _, prefix := range desiredImagePrefixes {
if strings.HasPrefix(image.DownloadLocation, prefix) {
images[image.DownloadLocation] = true
}
}
}
}
var unique []string
for image := range images {
unique = append(unique, image)
}
sort.Strings(unique)
return unique
}
| [
"\"KOPS_RUN_OBSOLETE_VERSION\"",
"\"KOPS_RUN_TOO_NEW_VERSION\"",
"\"KOPS_RUN_OBSOLETE_VERSION\"",
"\"KOPS_BASE_URL\"",
"\"KOPS_BASE_URL\"",
"\"KOPS_BASE_URL\"",
"\"KOPS_BASE_URL\""
]
| []
| [
"KOPS_BASE_URL",
"KOPS_RUN_TOO_NEW_VERSION",
"KOPS_RUN_OBSOLETE_VERSION"
]
| [] | ["KOPS_BASE_URL", "KOPS_RUN_TOO_NEW_VERSION", "KOPS_RUN_OBSOLETE_VERSION"] | go | 3 | 0 | |
tests/e2e/helpers.go | package e2e
import (
"fmt"
"io"
"os"
"os/exec"
"strings"
"time"
//nolint:golint,stylecheck
. "github.com/onsi/ginkgo"
//nolint:golint,stylecheck
. "github.com/onsi/gomega"
)
var (
selinuxdInAContainer bool
selinuxdContainerName string
)
const (
// timeout for selinuxd to report it's ready
selinuxdReadyTimeout float64 = 320
// default time to wait for selinuxd do an operation
selinuxdTimeout = 10 * time.Minute
// default interval between operations
defaultInterval = 2 * time.Second
selinuxdDir = "/etc/selinux.d"
)
func initVars() {
if strings.EqualFold(os.Getenv("SELINUXD_IS_CONTAINER"), "yes") ||
strings.EqualFold(os.Getenv("SELINUXD_IS_CONTAINER"), "true") {
selinuxdInAContainer = true
selinuxdContainerName = os.Getenv("SELINUXD_CONTAINER_NAME")
if selinuxdContainerName == "" {
fmt.Println("You must specify $SELINUXD_CONTAINER_NAME if running in a container")
os.Exit(1)
}
}
}
// Wrapper for Gomega's Eventually function. Targeted at checking
// That the policy status will eventually reach a certain state.
func policyEventually(policy string) AsyncAssertion {
return Eventually(func() string {
return selinuxdctl("status", policy)
}, selinuxdTimeout, defaultInterval)
}
func do(cmd string, args ...string) string {
execcmd := exec.Command(cmd, args...)
output, err := execcmd.CombinedOutput()
Expect(err).ShouldNot(HaveOccurred(),
"The command '%s' shouldn't fail.\n- Arguments: %v\n- Output: %s", cmd, args, output)
return strings.Trim(string(output), "\n")
}
func selinuxdctl(args ...string) string {
if !selinuxdInAContainer {
return do("selinuxdctl", args...)
}
return do("podman", append([]string{"exec", selinuxdContainerName, "selinuxdctl"}, args...)...)
}
func waitForSelinuxdToBeReady(done Done) {
for {
isReady := selinuxdctl("is-ready")
if isReady == "yes" {
close(done)
return
}
time.Sleep(defaultInterval)
}
}
func installPolicyFromReference(refPath, destPath string) {
By(fmt.Sprintf("Installing policy from %s to %s", refPath, destPath))
Expect(refPath).Should(BeAnExistingFile())
ref, openErr := os.Open(refPath)
Expect(openErr).ShouldNot(HaveOccurred())
dest, createErr := os.Create(destPath)
Expect(createErr).ShouldNot(HaveOccurred())
_, copyErr := io.Copy(dest, ref)
Expect(copyErr).ShouldNot(HaveOccurred())
}
func removePolicyIfPossible(policy string) {
if !CurrentGinkgoTestDescription().Failed {
By(fmt.Sprintf("Removing policy: %s", policy))
os.Remove(policy)
}
}
| [
"\"SELINUXD_IS_CONTAINER\"",
"\"SELINUXD_IS_CONTAINER\"",
"\"SELINUXD_CONTAINER_NAME\""
]
| []
| [
"SELINUXD_CONTAINER_NAME",
"SELINUXD_IS_CONTAINER"
]
| [] | ["SELINUXD_CONTAINER_NAME", "SELINUXD_IS_CONTAINER"] | go | 2 | 0 | |
playbooks/openstack/inventory.py | #!/usr/bin/env python
"""
This is an Ansible dynamic inventory for OpenStack.
It requires your OpenStack credentials to be set in clouds.yaml or your shell
environment.
"""
from __future__ import print_function
import json
import os
from keystoneauth1.exceptions.catalog import EndpointNotFound
import shade
def base_openshift_inventory(cluster_hosts):
'''Set the base openshift inventory.'''
inventory = {}
masters = [server.name for server in cluster_hosts
if server.metadata['host-type'] == 'master']
etcd = [server.name for server in cluster_hosts
if server.metadata['host-type'] == 'etcd']
if not etcd:
etcd = masters
infra_hosts = [server.name for server in cluster_hosts
if server.metadata['host-type'] == 'node' and
server.metadata['sub-host-type'] == 'infra']
app = [server.name for server in cluster_hosts
if server.metadata['host-type'] == 'node' and
server.metadata['sub-host-type'] == 'app']
cns = [server.name for server in cluster_hosts
if server.metadata['host-type'] == 'cns']
nodes = list(set(masters + infra_hosts + app + cns))
dns = [server.name for server in cluster_hosts
if server.metadata['host-type'] == 'dns']
load_balancers = [server.name for server in cluster_hosts
if server.metadata['host-type'] == 'lb']
osev3 = list(set(nodes + etcd + load_balancers))
inventory['cluster_hosts'] = {'hosts': [s.name for s in cluster_hosts]}
inventory['OSEv3'] = {'hosts': osev3, 'vars': {}}
inventory['masters'] = {'hosts': masters}
inventory['etcd'] = {'hosts': etcd}
inventory['nodes'] = {'hosts': nodes}
inventory['infra_hosts'] = {'hosts': infra_hosts}
inventory['app'] = {'hosts': app}
inventory['glusterfs'] = {'hosts': cns}
inventory['dns'] = {'hosts': dns}
inventory['lb'] = {'hosts': load_balancers}
inventory['localhost'] = {'ansible_connection': 'local'}
return inventory
def get_docker_storage_mountpoints(volumes):
'''Check volumes to see if they're being used for docker storage'''
docker_storage_mountpoints = {}
for volume in volumes:
if volume.metadata.get('purpose') == "openshift_docker_storage":
for attachment in volume.attachments:
if attachment.server_id in docker_storage_mountpoints:
docker_storage_mountpoints[attachment.server_id].append(attachment.device)
else:
docker_storage_mountpoints[attachment.server_id] = [attachment.device]
return docker_storage_mountpoints
def _get_hostvars(server, docker_storage_mountpoints):
ssh_ip_address = server.public_v4 or server.private_v4
hostvars = {
'ansible_host': ssh_ip_address
}
public_v4 = server.public_v4 or server.private_v4
if public_v4:
hostvars['public_v4'] = server.public_v4
hostvars['openshift_public_ip'] = server.public_v4
# TODO(shadower): what about multiple networks?
if server.private_v4:
hostvars['private_v4'] = server.private_v4
hostvars['openshift_ip'] = server.private_v4
# NOTE(shadower): Yes, we set both hostname and IP to the private
# IP address for each node. OpenStack doesn't resolve nodes by
# name at all, so using a hostname here would require an internal
# DNS which would complicate the setup and potentially introduce
# performance issues.
hostvars['openshift_hostname'] = server.metadata.get(
'openshift_hostname', server.private_v4)
hostvars['openshift_public_hostname'] = server.name
if server.metadata['host-type'] == 'cns':
hostvars['glusterfs_devices'] = ['/dev/nvme0n1']
group_name = server.metadata.get('openshift_node_group_name')
hostvars['openshift_node_group_name'] = group_name
# check for attached docker storage volumes
if 'os-extended-volumes:volumes_attached' in server:
if server.id in docker_storage_mountpoints:
hostvars['docker_storage_mountpoints'] = ' '.join(
docker_storage_mountpoints[server.id])
return hostvars
def build_inventory():
'''Build the dynamic inventory.'''
cloud = shade.openstack_cloud()
# TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER`
# environment variable.
cluster_hosts = [
server for server in cloud.list_servers()
if 'metadata' in server and 'clusterid' in server.metadata]
inventory = base_openshift_inventory(cluster_hosts)
for server in cluster_hosts:
if 'group' in server.metadata:
group = server.metadata.get('group')
if group not in inventory:
inventory[group] = {'hosts': []}
inventory[group]['hosts'].append(server.name)
inventory['_meta'] = {'hostvars': {}}
# Some clouds don't have Cinder. That's okay:
try:
volumes = cloud.list_volumes()
except EndpointNotFound:
volumes = []
# cinder volumes used for docker storage
docker_storage_mountpoints = get_docker_storage_mountpoints(volumes)
for server in cluster_hosts:
inventory['_meta']['hostvars'][server.name] = _get_hostvars(
server,
docker_storage_mountpoints)
stout = _get_stack_outputs(cloud)
if stout is not None:
try:
inventory['localhost'].update({
'openshift_openstack_api_lb_provider':
stout['api_lb_provider'],
'openshift_openstack_api_lb_port_id':
stout['api_lb_vip_port_id'],
'openshift_openstack_api_lb_sg_id':
stout['api_lb_sg_id']})
except KeyError:
pass # Not an API load balanced deployment
try:
inventory['OSEv3']['vars'][
'openshift_master_cluster_hostname'] = stout['private_api_ip']
except KeyError:
pass # Internal LB not specified
inventory['localhost']['openshift_openstack_private_api_ip'] = \
stout.get('private_api_ip')
inventory['localhost']['openshift_openstack_public_api_ip'] = \
stout.get('public_api_ip')
inventory['localhost']['openshift_openstack_public_router_ip'] = \
stout.get('public_router_ip')
try:
inventory['OSEv3']['vars'] = _get_kuryr_vars(cloud, stout)
except KeyError:
pass # Not a kuryr deployment
return inventory
def _get_stack_outputs(cloud_client):
"""Returns a dictionary with the stack outputs"""
cluster_name = os.getenv('OPENSHIFT_CLUSTER', 'openshift-cluster')
stack = cloud_client.get_stack(cluster_name)
if stack is None or stack['stack_status'] not in (
'CREATE_COMPLETE', 'UPDATE_COMPLETE'):
return None
data = {}
for output in stack['outputs']:
data[output['output_key']] = output['output_value']
return data
def _get_kuryr_vars(cloud_client, data):
"""Returns a dictionary of Kuryr variables resulting of heat stacking"""
settings = {}
settings['kuryr_openstack_pod_subnet_id'] = data['pod_subnet']
if 'pod_subnet_pool' in data:
settings['kuryr_openstack_pod_subnet_pool_id'] = data[
'pod_subnet_pool']
settings['kuryr_openstack_pod_router_id'] = data['pod_router']
settings['kuryr_openstack_worker_nodes_subnet_id'] = data['vm_subnet']
settings['kuryr_openstack_service_subnet_id'] = data['service_subnet']
settings['kuryr_openstack_pod_sg_id'] = data['pod_access_sg_id']
settings['kuryr_openstack_pod_project_id'] = (
cloud_client.current_project_id)
settings['kuryr_openstack_api_lb_ip'] = data['private_api_ip']
settings['kuryr_openstack_auth_url'] = cloud_client.auth['auth_url']
settings['kuryr_openstack_username'] = cloud_client.auth['username']
settings['kuryr_openstack_password'] = cloud_client.auth['password']
if 'user_domain_id' in cloud_client.auth:
settings['kuryr_openstack_user_domain_name'] = (
cloud_client.auth['user_domain_id'])
else:
settings['kuryr_openstack_user_domain_name'] = (
cloud_client.auth['user_domain_name'])
# FIXME(apuimedo): consolidate kuryr controller credentials into the same
# vars the openstack playbook uses.
settings['kuryr_openstack_project_id'] = cloud_client.current_project_id
if 'project_domain_id' in cloud_client.auth:
settings['kuryr_openstack_project_domain_name'] = (
cloud_client.auth['project_domain_id'])
else:
settings['kuryr_openstack_project_domain_name'] = (
cloud_client.auth['project_domain_name'])
return settings
if __name__ == '__main__':
print(json.dumps(build_inventory(), indent=4, sort_keys=True))
| []
| []
| [
"OPENSHIFT_CLUSTER"
]
| [] | ["OPENSHIFT_CLUSTER"] | python | 1 | 0 | |
vendor/github.com/elastic/beats/packetbeat/protos/icmp/message.go | package icmp
import (
"encoding/binary"
"time"
"github.com/elastic/beats/libbeat/logp"
"github.com/tsg/gopacket/layers"
)
// TODO: more types (that are not provided as constants in gopacket)
// ICMPv4 types that represent a response (all other types represent a request)
var icmp4ResponseTypes = map[uint8]bool{
layers.ICMPv4TypeEchoReply: true,
layers.ICMPv4TypeTimestampReply: true,
layers.ICMPv4TypeInfoReply: true,
layers.ICMPv4TypeAddressMaskReply: true,
}
// ICMPv6 types that represent a response (all other types represent a request)
var icmp6ResponseTypes = map[uint8]bool{
layers.ICMPv6TypeEchoReply: true,
}
// ICMPv4 types that represent an error
var icmp4ErrorTypes = map[uint8]bool{
layers.ICMPv4TypeDestinationUnreachable: true,
layers.ICMPv4TypeSourceQuench: true,
layers.ICMPv4TypeTimeExceeded: true,
layers.ICMPv4TypeParameterProblem: true,
}
// ICMPv6 types that represent an error
var icmp6ErrorTypes = map[uint8]bool{
layers.ICMPv6TypeDestinationUnreachable: true,
layers.ICMPv6TypePacketTooBig: true,
layers.ICMPv6TypeTimeExceeded: true,
layers.ICMPv6TypeParameterProblem: true,
}
// ICMPv4 types that require a request & a response
var icmp4PairTypes = map[uint8]bool{
layers.ICMPv4TypeEchoRequest: true,
layers.ICMPv4TypeEchoReply: true,
layers.ICMPv4TypeTimestampRequest: true,
layers.ICMPv4TypeTimestampReply: true,
layers.ICMPv4TypeInfoRequest: true,
layers.ICMPv4TypeInfoReply: true,
layers.ICMPv4TypeAddressMaskRequest: true,
layers.ICMPv4TypeAddressMaskReply: true,
}
// ICMPv6 types that require a request & a response
var icmp6PairTypes = map[uint8]bool{
layers.ICMPv6TypeEchoRequest: true,
layers.ICMPv6TypeEchoReply: true,
}
// Contains all used information from the ICMP message on the wire.
type icmpMessage struct {
ts time.Time
Type uint8
code uint8
length int
}
func isRequest(tuple *icmpTuple, msg *icmpMessage) bool {
if tuple.icmpVersion == 4 {
return !icmp4ResponseTypes[msg.Type]
}
if tuple.icmpVersion == 6 {
return !icmp6ResponseTypes[msg.Type]
}
logp.WTF("icmp", "Invalid ICMP version[%d]", tuple.icmpVersion)
return true
}
func isError(tuple *icmpTuple, msg *icmpMessage) bool {
if tuple.icmpVersion == 4 {
return icmp4ErrorTypes[msg.Type]
}
if tuple.icmpVersion == 6 {
return icmp6ErrorTypes[msg.Type]
}
logp.WTF("icmp", "Invalid ICMP version[%d]", tuple.icmpVersion)
return true
}
func requiresCounterpart(tuple *icmpTuple, msg *icmpMessage) bool {
if tuple.icmpVersion == 4 {
return icmp4PairTypes[msg.Type]
}
if tuple.icmpVersion == 6 {
return icmp6PairTypes[msg.Type]
}
logp.WTF("icmp", "Invalid ICMP version[%d]", tuple.icmpVersion)
return false
}
func extractTrackingData(icmpVersion uint8, msgType uint8, baseLayer *layers.BaseLayer) (uint16, uint16) {
if icmpVersion == 4 {
if icmp4PairTypes[msgType] {
id := binary.BigEndian.Uint16(baseLayer.Contents[4:6])
seq := binary.BigEndian.Uint16(baseLayer.Contents[6:8])
return id, seq
}
return 0, 0
}
if icmpVersion == 6 {
if icmp6PairTypes[msgType] {
id := binary.BigEndian.Uint16(baseLayer.Contents[4:6])
seq := binary.BigEndian.Uint16(baseLayer.Contents[6:8])
return id, seq
}
return 0, 0
}
logp.WTF("icmp", "Invalid ICMP version[%d]", icmpVersion)
return 0, 0
}
func humanReadable(tuple *icmpTuple, msg *icmpMessage) string {
if tuple.icmpVersion == 4 {
return layers.ICMPv4TypeCode(binary.BigEndian.Uint16([]byte{msg.Type, msg.code})).String()
}
if tuple.icmpVersion == 6 {
return layers.ICMPv6TypeCode(binary.BigEndian.Uint16([]byte{msg.Type, msg.code})).String()
}
logp.WTF("icmp", "Invalid ICMP version[%d]", tuple.icmpVersion)
return ""
}
| []
| []
| []
| [] | [] | go | null | null | null |
lib/go/blocklist/blocklist_test.go | package blocklist
import (
"os"
"strconv"
"testing"
"github.com/bjartek/go-with-the-flow/v2/gwtf"
util "github.com/flow-usdc/flow-usdc"
"github.com/flow-usdc/flow-usdc/owner"
"github.com/flow-usdc/flow-usdc/vault"
"github.com/stretchr/testify/assert"
)
func TestCreateBlocklister(t *testing.T) {
g := gwtf.NewGoWithTheFlow(util.FlowJSON, os.Getenv("NETWORK"), false, 1)
events, err := CreateBlocklister(g, "blocklister")
assert.NoError(t, err)
// Test event
util.NewExpectedEvent("FiatToken", "BlocklisterCreated").AssertHasKey(t, events[0], "resourceId")
_, err = CreateBlocklister(g, "non-blocklister")
assert.NoError(t, err)
}
func TestSetBlocklistCapability(t *testing.T) {
g := gwtf.NewGoWithTheFlow(util.FlowJSON, os.Getenv("NETWORK"), false, 1)
err := owner.SetBlocklistCapability(g, "blocklister", "owner")
assert.NoError(t, err)
}
func TestBlocklistWithCap(t *testing.T) {
g := gwtf.NewGoWithTheFlow(util.FlowJSON, os.Getenv("NETWORK"), false, 1)
_, err := vault.AddVaultToAccount(g, "vaulted-account")
assert.NoError(t, err)
_, err = vault.TransferTokens(g, "1.00000000", "owner", "vaulted-account")
assert.NoError(t, err)
uuid, err := util.GetUUID(g, "vaulted-account", "Vault")
assert.NoError(t, err)
events, err := BlocklistOrUnblocklistRsc(g, "blocklister", uuid, 1)
assert.NoError(t, err)
// Test event
util.NewExpectedEvent("FiatToken", "Blocklisted").AddField("resourceId", strconv.Itoa(int(uuid))).AssertEqual(t, events[0])
blockheight, err := GetBlocklistStatus(g, uuid)
assert.NoError(t, err)
assert.Equal(t, true, blockheight > 0)
// Once blocklisted, "vaulted-account" should not be able to transfer / recv / load
// its vault and deposit its content into another vault
// - check initial and post tx balance is the same
// - ensure that tx fails
init_rec_balance, err := util.GetBalance(g, "vaulted-account")
assert.NoError(t, err)
// Test cannot receive
events, err = vault.TransferTokens(g, "10.00000000", "owner", "vaulted-account")
assert.Error(t, err)
assert.Empty(t, events)
// Test cannot withdraw
events, err = vault.TransferTokens(g, "0.50000000", "vaulted-account", "owner")
assert.Error(t, err)
assert.Empty(t, events)
// Test cannot load and deposit
events, err = vault.MoveAndDeposit(g, "vaulted-account", "owner")
assert.Error(t, err)
assert.Empty(t, events)
post_rec_balance, err := util.GetBalance(g, "vaulted-account")
assert.NoError(t, err)
assert.Equal(t, init_rec_balance, post_rec_balance)
}
func TestUnblocklistWithCap(t *testing.T) {
g := gwtf.NewGoWithTheFlow(util.FlowJSON, os.Getenv("NETWORK"), false, 1)
uuid, err := util.GetUUID(g, "vaulted-account", "Vault")
assert.NoError(t, err)
events, err := BlocklistOrUnblocklistRsc(g, "blocklister", uuid, 0)
assert.NoError(t, err)
// Test event
util.NewExpectedEvent("FiatToken", "Unblocklisted").AddField("resourceId", strconv.Itoa(int(uuid))).AssertEqual(t, events[0])
// After blocklisted, "vaulted-account" should be able to transfer
// - the balance of post tx, recv should receive 10.0 more
// - ensure that tx has no error
init_rec_balance, err := util.GetBalance(g, "vaulted-account")
assert.NoError(t, err)
_, err = vault.TransferTokens(g, "10.00000000", "owner", "vaulted-account")
assert.NoError(t, err)
post_rec_balance, err := util.GetBalance(g, "vaulted-account")
assert.NoError(t, err)
assert.Equal(t, "10.00000000", (post_rec_balance - init_rec_balance).String())
}
func TestBlocklistWithoutCap(t *testing.T) {
g := gwtf.NewGoWithTheFlow(util.FlowJSON, os.Getenv("NETWORK"), false, 1)
uuid, err := util.GetUUID(g, "vaulted-account", "Vault")
assert.NoError(t, err)
rawEvents, err := BlocklistOrUnblocklistRsc(g, "non-blocklister", uuid, 1)
assert.Error(t, err)
assert.Empty(t, rawEvents)
}
func TestMultiSig_Blocklist(t *testing.T) {
g := gwtf.NewGoWithTheFlow(util.FlowJSON, os.Getenv("NETWORK"), false, 1)
// Add New Payload
currentIndex, err := util.GetTxIndex(g, "blocklister", "Blocklister")
assert.NoError(t, err)
expectedNewIndex := currentIndex + 1
toBlock := uint64(111)
resourceId := util.Arg{V: toBlock, T: "UInt64"}
// `true` for new payload
events, err := util.MultiSig_SignAndSubmit(g, true, expectedNewIndex, util.Acct500_1, "blocklister", "Blocklister", "blocklist", resourceId)
assert.NoError(t, err)
newTxIndex, err := util.GetTxIndex(g, "blocklister", "Blocklister")
assert.NoError(t, err)
assert.Equal(t, expectedNewIndex, newTxIndex)
blocklister, err := util.GetUUID(g, "blocklister", "Blocklister")
assert.NoError(t, err)
util.NewExpectedEvent("OnChainMultiSig", "NewPayloadAdded").
AddField("resourceId", strconv.Itoa(int(blocklister))).
AddField("txIndex", strconv.Itoa(int(newTxIndex))).
AssertEqual(t, events[0])
// Try to Execute without enough weight. This should error as there is not enough signer yet
_, err = util.MultiSig_ExecuteTx(g, newTxIndex, "owner", "blocklister", "Blocklister")
assert.Error(t, err)
// Add Another Payload Signature
// `false` for new signature for existing paylaod
events, err = util.MultiSig_SignAndSubmit(g, false, expectedNewIndex, util.Acct500_2, "blocklister", "Blocklister", "blocklist", resourceId)
assert.NoError(t, err)
util.NewExpectedEvent("OnChainMultiSig", "NewPayloadSigAdded").
AddField("resourceId", strconv.Itoa(int(blocklister))).
AddField("txIndex", strconv.Itoa(int(newTxIndex))).
AssertEqual(t, events[0])
events, err = util.MultiSig_ExecuteTx(g, newTxIndex, "owner", "blocklister", "Blocklister")
assert.NoError(t, err)
// Test event
util.NewExpectedEvent("FiatToken", "Blocklisted").AddField("resourceId", strconv.Itoa(int(toBlock))).AssertEqual(t, events[0])
blockheight, err := GetBlocklistStatus(g, toBlock)
assert.NoError(t, err)
assert.Equal(t, true, blockheight > 0)
}
func TestMultiSig_Unblocklist(t *testing.T) {
g := gwtf.NewGoWithTheFlow(util.FlowJSON, os.Getenv("NETWORK"), false, 1)
// Add New Payload
currentIndex, err := util.GetTxIndex(g, "blocklister", "Blocklister")
assert.NoError(t, err)
expectedNewIndex := currentIndex + 1
toUnblock := uint64(111)
resourceId := util.Arg{V: toUnblock, T: "UInt64"}
// `true` for new payload
// signed with full account
_, err = util.MultiSig_SignAndSubmit(g, true, expectedNewIndex, util.Acct1000, "blocklister", "Blocklister", "unblocklist", resourceId)
assert.NoError(t, err)
newTxIndex, err := util.GetTxIndex(g, "blocklister", "Blocklister")
assert.NoError(t, err)
assert.Equal(t, expectedNewIndex, newTxIndex)
events, err := util.MultiSig_ExecuteTx(g, newTxIndex, "owner", "blocklister", "Blocklister")
assert.NoError(t, err)
// Test event
util.NewExpectedEvent("FiatToken", "Unblocklisted").AddField("resourceId", strconv.Itoa(int(toUnblock))).AssertEqual(t, events[0])
blocked, err := GetBlocklistStatus(g, toUnblock)
assert.NoError(t, err)
assert.Equal(t, uint64(0), blocked)
}
func TestMultiSig_BlocklisterUnknowMethodFails(t *testing.T) {
g := gwtf.NewGoWithTheFlow(util.FlowJSON, os.Getenv("NETWORK"), false, 1)
m := util.Arg{V: uint64(111), T: "UInt64"}
txIndex, err := util.GetTxIndex(g, "blocklister", "Blocklister")
assert.NoError(t, err)
_, err = util.MultiSig_SignAndSubmit(g, true, txIndex+1, util.Acct1000, "blocklister", "Blocklister", "unknowmethod", m)
assert.NoError(t, err)
newTxIndex, err := util.GetTxIndex(g, "blocklister", "Blocklister")
assert.NoError(t, err)
_, err = util.MultiSig_ExecuteTx(g, newTxIndex, "owner", "blocklister", "Blocklister")
assert.Error(t, err)
}
func TestMultiSig_BlocklisterCanRemoveKey(t *testing.T) {
g := gwtf.NewGoWithTheFlow(util.FlowJSON, os.Getenv("NETWORK"), false, 1)
pk250_1 := g.Account(util.Acct250_1).Key().ToConfig().PrivateKey.PublicKey().String()
k := util.Arg{V: pk250_1[2:], T: "String"}
hasKey, err := util.ContainsKey(g, "blocklister", "Blocklister", pk250_1[2:])
assert.NoError(t, err)
assert.Equal(t, hasKey, true)
txIndex, err := util.GetTxIndex(g, "blocklister", "Blocklister")
newTxIndex := txIndex + 1
assert.NoError(t, err)
_, err = util.MultiSig_SignAndSubmit(g, true, newTxIndex, util.Acct1000, "blocklister", "Blocklister", "removeKey", k)
assert.NoError(t, err)
_, err = util.MultiSig_ExecuteTx(g, newTxIndex, "owner", "blocklister", "Blocklister")
assert.NoError(t, err)
hasKey, err = util.ContainsKey(g, "blocklister", "Blocklister", pk250_1[2:])
assert.NoError(t, err)
assert.Equal(t, hasKey, false)
}
func TestMultiSig_BlocklisterCanAddKey(t *testing.T) {
g := gwtf.NewGoWithTheFlow(util.FlowJSON, os.Getenv("NETWORK"), false, 1)
pk250_1 := g.Account(util.Acct250_1).Key().ToConfig().PrivateKey.PublicKey().String()
k := util.Arg{V: pk250_1[2:], T: "String"}
w := util.Arg{V: "250.00000000", T: "UFix64"}
sa := util.Arg{V: uint8(1), T: "UInt8"}
hasKey, err := util.ContainsKey(g, "blocklister", "Blocklister", pk250_1[2:])
assert.NoError(t, err)
assert.Equal(t, hasKey, false)
txIndex, err := util.GetTxIndex(g, "blocklister", "Blocklister")
newTxIndex := txIndex + 1
assert.NoError(t, err)
_, err = util.MultiSig_SignAndSubmit(g, true, newTxIndex, util.Acct1000, "blocklister", "Blocklister", "configureKey", k, w, sa)
assert.NoError(t, err)
_, err = util.MultiSig_ExecuteTx(g, newTxIndex, "owner", "blocklister", "Blocklister")
assert.NoError(t, err)
hasKey, err = util.ContainsKey(g, "blocklister", "Blocklister", pk250_1[2:])
assert.NoError(t, err)
assert.Equal(t, hasKey, true)
weight, err := util.GetKeyWeight(g, util.Acct250_1, "blocklister", "Blocklister")
assert.NoError(t, err)
assert.Equal(t, w.V, weight.String())
}
| [
"\"NETWORK\"",
"\"NETWORK\"",
"\"NETWORK\"",
"\"NETWORK\"",
"\"NETWORK\"",
"\"NETWORK\"",
"\"NETWORK\"",
"\"NETWORK\"",
"\"NETWORK\"",
"\"NETWORK\""
]
| []
| [
"NETWORK"
]
| [] | ["NETWORK"] | go | 1 | 0 | |
timevortex/wsgi.py | """
WSGI config for timevortex project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "timevortex.settings.production")
application = get_wsgi_application() # pylint: disable=I0011,C0103
| []
| []
| []
| [] | [] | python | 0 | 0 | |
src/ruby/supply/supply_test.go | package supply_test
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"reflect"
"github.com/cloudfoundry/ruby-buildpack/src/ruby/cache"
"github.com/cloudfoundry/ruby-buildpack/src/ruby/supply"
"github.com/cloudfoundry/libbuildpack"
"github.com/cloudfoundry/libbuildpack/ansicleaner"
"github.com/golang/mock/gomock"
. "github.com/onsi/ginkgo"
// . "github.com/onsi/ginkgo/extensions/table"
. "github.com/onsi/gomega"
)
//go:generate mockgen -source=supply.go --destination=mocks_test.go --package=supply_test
type MacTempDir struct{}
func (t *MacTempDir) CopyDirToTemp(dir string) (string, error) {
tmpDir, err := ioutil.TempDir("", "supply-tests")
Expect(err).To(BeNil())
tmpDir = filepath.Join(tmpDir, filepath.Base(dir))
os.MkdirAll(tmpDir, 0700)
libbuildpack.CopyDirectory(dir, tmpDir)
return tmpDir, nil
}
var _ = Describe("Supply", func() {
var (
err error
buildDir string
depsDir string
depsIdx string
supplier *supply.Supplier
logger *libbuildpack.Logger
buffer *bytes.Buffer
mockCtrl *gomock.Controller
mockManifest *MockManifest
mockInstaller *MockInstaller
mockVersions *MockVersions
mockCommand *MockCommand
mockCache *MockCache
mockTempDir *MacTempDir
)
BeforeEach(func() {
buildDir, err = ioutil.TempDir("", "ruby-buildpack.build.")
Expect(err).To(BeNil())
depsDir, err = ioutil.TempDir("", "ruby-buildpack.deps.")
Expect(err).To(BeNil())
depsIdx = "9"
Expect(os.MkdirAll(filepath.Join(depsDir, depsIdx), 0755)).To(Succeed())
buffer = new(bytes.Buffer)
logger = libbuildpack.NewLogger(ansicleaner.New(buffer))
mockCtrl = gomock.NewController(GinkgoT())
mockManifest = NewMockManifest(mockCtrl)
mockManifest.EXPECT().AllDependencyVersions("bundler").Return([]string{"1.17.2"}).AnyTimes()
mockInstaller = NewMockInstaller(mockCtrl)
mockVersions = NewMockVersions(mockCtrl)
mockVersions.EXPECT().Gemfile().AnyTimes().Return(filepath.Join(buildDir, "Gemfile"))
mockVersions.EXPECT().GetBundlerVersion().Return("1.17.2", nil).AnyTimes()
mockCommand = NewMockCommand(mockCtrl)
mockCache = NewMockCache(mockCtrl)
mockTempDir = &MacTempDir{}
args := []string{buildDir, "", depsDir, depsIdx}
stager := libbuildpack.NewStager(args, logger, &libbuildpack.Manifest{})
supplier = &supply.Supplier{
Stager: stager,
Manifest: mockManifest,
Installer: mockInstaller,
Log: logger,
Versions: mockVersions,
Cache: mockCache,
Command: mockCommand,
TempDir: mockTempDir,
}
})
JustBeforeEach(func() {
Expect(supplier.Setup()).To(Succeed())
})
AfterEach(func() {
mockCtrl.Finish()
err = os.RemoveAll(buildDir)
Expect(err).To(BeNil())
err = os.RemoveAll(depsDir)
Expect(err).To(BeNil())
})
Describe("InstallBundler", func() {
var tempSupplier supply.Supplier
BeforeEach(func() {
tempSupplier = *supplier
mockStager := NewMockStager(mockCtrl)
tempSupplier.Stager = mockStager
mockInstaller.EXPECT().InstallDependency(libbuildpack.Dependency{Name: "bundler", Version: "1.17.2"}, gomock.Any())
mockStager.EXPECT().LinkDirectoryInDepDir(gomock.Any(), gomock.Any())
mockStager.EXPECT().DepDir().AnyTimes()
err := ioutil.WriteFile(filepath.Join(buildDir, "Gemfile.lock"), []byte("BUNDLED WITH\n 1.16.4"), 0644)
Expect(err).NotTo(HaveOccurred())
})
It("installs bundler version matching constraint given", func() {
Expect(tempSupplier.InstallBundler()).To(Succeed())
})
})
Describe("CalcChecksum", func() {
BeforeEach(func() {
Expect(ioutil.WriteFile(filepath.Join(buildDir, "Gemfile"), []byte("source \"https://rubygems.org\"\r\ngem \"rack\"\r\n"), 0644)).To(Succeed())
Expect(ioutil.WriteFile(filepath.Join(buildDir, "other"), []byte("other"), 0644)).To(Succeed())
Expect(os.MkdirAll(filepath.Join(buildDir, "dir"), 0755)).To(Succeed())
Expect(ioutil.WriteFile(filepath.Join(buildDir, "dir", "other"), []byte("other"), 0644)).To(Succeed())
})
It("Returns an MD5 of the full contents", func() {
Expect(supplier.CalcChecksum()).To(Equal("d8be25466f8d12112d354e1a4add36a3"))
})
Context(".cloudfoundry directory", func() {
BeforeEach(func() {
Expect(os.MkdirAll(filepath.Join(buildDir, ".cloudfoundry", "dir"), 0755)).To(Succeed())
Expect(ioutil.WriteFile(filepath.Join(buildDir, ".cloudfoundry", "other"), []byte("other"), 0644)).To(Succeed())
Expect(ioutil.WriteFile(filepath.Join(buildDir, ".cloudfoundry", "dir", "other"), []byte("other"), 0644)).To(Succeed())
})
It("excludes .cloudfoundry directory", func() {
Expect(supplier.CalcChecksum()).To(Equal("d8be25466f8d12112d354e1a4add36a3"))
})
})
})
Describe("InstallGems", func() {
BeforeEach(func() {
mockVersions.EXPECT().RubyEngineVersion().AnyTimes().Return("2.5.0", nil)
mockVersions.EXPECT().Engine().AnyTimes().Return("ruby", nil)
})
const windowsWarning = "**WARNING** Windows line endings detected in Gemfile. Your app may fail to stage. Please use UNIX line endings."
handleBundleBinstubRegeneration := func(cmd *exec.Cmd) error {
if len(cmd.Args) > 5 && reflect.DeepEqual(cmd.Args[0:5], []string{"bundle", "binstubs", "bundler", "--force", "--path"}) {
Expect(cmd.Args[5]).To(HavePrefix(filepath.Join(depsDir, depsIdx)))
Expect(os.MkdirAll(cmd.Args[5], 0755)).To(Succeed())
Expect(ioutil.WriteFile(filepath.Join(cmd.Args[5], "bundle"), []byte("new bundle binstub"), 0644)).To(Succeed())
}
return nil
}
itRegeneratesBundleBinstub := func() {
It("Re-generates the bundler binstub to replace older, rails-generated ones that are incompatible with bundler > 1.16.0", func() {
Expect(supplier.InstallGems()).To(Succeed())
Expect(ioutil.ReadFile(filepath.Join(depsDir, depsIdx, "binstubs", "bundle"))).To(Equal([]byte("new bundle binstub")))
Expect(ioutil.ReadFile(filepath.Join(depsDir, depsIdx, "bin", "bundle"))).To(Equal([]byte("new bundle binstub")))
})
}
Context("Windows Gemfile", func() {
BeforeEach(func() {
mockVersions.EXPECT().HasWindowsGemfileLock().Return(false, nil)
mockCommand.EXPECT().Run(gomock.Any()).AnyTimes().Do(handleBundleBinstubRegeneration)
Expect(ioutil.WriteFile(filepath.Join(buildDir, "Gemfile"), []byte("source \"https://rubygems.org\"\r\ngem \"rack\"\r\n"), 0644)).To(Succeed())
})
itRegeneratesBundleBinstub()
It("Warns the user", func() {
Expect(supplier.InstallGems()).To(Succeed())
Expect(buffer.String()).To(ContainSubstring(windowsWarning))
})
})
Context("UNIX Gemfile", func() {
BeforeEach(func() {
os.Setenv("BUNDLE_CONFIG", filepath.Join(depsDir, depsIdx, "bundle_config"))
mockVersions.EXPECT().HasWindowsGemfileLock().Return(false, nil)
mockCommand.EXPECT().Run(gomock.Any()).AnyTimes().Do(func(cmd *exec.Cmd) error {
if len(cmd.Args) > 2 && cmd.Args[1] == "install" {
Expect(os.MkdirAll(filepath.Join(cmd.Dir, ".bundle"), 0755)).To(Succeed())
Expect(ioutil.WriteFile(filepath.Join(cmd.Dir, ".bundle", "config"), []byte("new bundle config"), 0644)).To(Succeed())
} else {
return handleBundleBinstubRegeneration(cmd)
}
return nil
})
Expect(ioutil.WriteFile(filepath.Join(buildDir, "Gemfile"), []byte("source \"https://rubygems.org\"\ngem \"rack\"\n"), 0644)).To(Succeed())
})
AfterEach(func() {
os.Unsetenv("BUNDLE_CONFIG")
})
itRegeneratesBundleBinstub()
It("Does not warn the user", func() {
Expect(supplier.InstallGems()).To(Succeed())
Expect(buffer.String()).ToNot(ContainSubstring(windowsWarning))
})
It("does not change .bundle/config", func() {
Expect(os.MkdirAll(filepath.Join(buildDir, ".bundle"), 0755)).To(Succeed())
Expect(ioutil.WriteFile(filepath.Join(buildDir, ".bundle", "config"), []byte("orig content"), 0644)).To(Succeed())
Expect(ioutil.ReadFile(filepath.Join(buildDir, ".bundle", "config"))).To(Equal([]byte("orig content")))
Expect(supplier.InstallGems()).To(Succeed())
Expect(ioutil.ReadFile(filepath.Join(buildDir, ".bundle", "config"))).To(Equal([]byte("orig content")))
})
})
Context("Windows Gemfile.lock", func() {
BeforeEach(func() {
mockVersions.EXPECT().RubyEngineVersion().AnyTimes().Return("2.5.0", nil)
})
Context("With Unix Line Endings", func() {
const gemfileLock = "GEM\n remote: https://rubygems.org/\n specs:\n rack (1.5.2)\n\nPLATFORMS\n x64-mingw32\n ruby\n\nDEPENDENCIES\n rack\n"
const newGemfileLock = "new lockfile"
BeforeEach(func() {
mockVersions.EXPECT().HasWindowsGemfileLock().Return(false, nil)
mockVersions.EXPECT().BundledWithVersion().Return("", nil)
Expect(ioutil.WriteFile(filepath.Join(buildDir, "Gemfile"), []byte("source \"https://rubygems.org\"\ngem \"rack\"\n"), 0644)).To(Succeed())
Expect(ioutil.WriteFile(filepath.Join(buildDir, "Gemfile.lock"), []byte(gemfileLock), 0644)).To(Succeed())
})
It("runs bundler with existing Gemfile.lock", func() {
mockCommand.EXPECT().Run(gomock.Any()).AnyTimes().Do(func(cmd *exec.Cmd) {
if cmd.Args[1] == "install" {
Expect(filepath.Join(cmd.Dir, "Gemfile")).To(BeAnExistingFile())
Expect(filepath.Join(cmd.Dir, "Gemfile.lock")).To(BeAnExistingFile())
} else {
handleBundleBinstubRegeneration(cmd)
}
})
Expect(supplier.InstallGems()).To(Succeed())
Expect(ioutil.ReadFile(filepath.Join(buildDir, "Gemfile.lock"))).To(ContainSubstring(gemfileLock))
Expect(ioutil.ReadFile(filepath.Join(depsDir, depsIdx, "Gemfile.lock"))).To(ContainSubstring(gemfileLock))
})
It("runs bundler in a copy so it does not change the build directory", func() {
installCalled := false
mockCommand.EXPECT().Run(gomock.Any()).AnyTimes().Do(func(cmd *exec.Cmd) {
if cmd.Args[1] == "install" {
Expect(cmd.Dir).ToNot(Equal(buildDir))
installCalled = true
} else {
handleBundleBinstubRegeneration(cmd)
}
})
Expect(supplier.InstallGems()).To(Succeed())
Expect(installCalled).To(BeTrue())
})
})
Context("With Windows Line Endings", func() {
const gemfileLock = "GEM\n remote: https://rubygems.org/\n specs:\n rack (1.5.2)\n\nPLATFORMS\n x64-mingw32\n\nDEPENDENCIES\n rack\n"
const newGemfileLock = "new lockfile"
BeforeEach(func() {
mockVersions.EXPECT().BundledWithVersion().Return("", nil)
mockVersions.EXPECT().HasWindowsGemfileLock().Return(true, nil)
Expect(ioutil.WriteFile(filepath.Join(buildDir, "Gemfile"), []byte("source \"https://rubygems.org\"\r\ngem \"rack\"\r\n"), 0644)).To(Succeed())
Expect(ioutil.WriteFile(filepath.Join(buildDir, "Gemfile.lock"), []byte(gemfileLock), 0644)).To(Succeed())
})
It("runs bundler without the Gemfile.lock and copies the Gemfile.lock it creates to the dep directory", func() {
mockCommand.EXPECT().Run(gomock.Any()).AnyTimes().Do(func(cmd *exec.Cmd) {
if cmd.Args[1] == "install" {
Expect(cmd.Args).ToNot(ContainElement("--deployment"))
Expect(filepath.Join(cmd.Dir, "Gemfile")).To(BeAnExistingFile())
Expect(filepath.Join(cmd.Dir, "Gemfile.lock")).ToNot(BeAnExistingFile())
Expect(ioutil.WriteFile(filepath.Join(cmd.Dir, "Gemfile.lock"), []byte(newGemfileLock), 0644)).To(Succeed())
} else {
handleBundleBinstubRegeneration(cmd)
}
})
Expect(supplier.InstallGems()).To(Succeed())
Expect(ioutil.ReadFile(filepath.Join(buildDir, "Gemfile.lock"))).To(ContainSubstring(gemfileLock))
Expect(ioutil.ReadFile(filepath.Join(depsDir, depsIdx, "Gemfile.lock"))).To(ContainSubstring(newGemfileLock))
})
It("runs bundler in a copy so it does not change the build directory", func() {
installCalled := false
mockCommand.EXPECT().Run(gomock.Any()).AnyTimes().Do(func(cmd *exec.Cmd) {
if cmd.Args[1] == "install" {
Expect(cmd.Dir).ToNot(Equal(buildDir))
installCalled = true
} else {
handleBundleBinstubRegeneration(cmd)
}
})
Expect(supplier.InstallGems()).To(Succeed())
Expect(installCalled).To(BeTrue())
})
})
})
})
Describe("InstallJVM", func() {
Context("app/.jdk exists", func() {
BeforeEach(func() {
Expect(os.Mkdir(filepath.Join(buildDir, ".jdk"), 0755)).To(Succeed())
})
It("skips jdk install", func() {
Expect(supplier.InstallJVM()).To(Succeed())
Expect(buffer.String()).To(ContainSubstring("Using pre-installed JDK"))
Expect(filepath.Join(depsDir, depsIdx, "jvm")).ToNot(BeADirectory())
})
})
Context("app/.jdk does not exist", func() {
BeforeEach(func() {
mockInstaller.EXPECT().InstallOnlyVersion("openjdk1.8-latest", gomock.Any()).Do(func(_, path string) error {
Expect(os.MkdirAll(filepath.Join(path, "bin"), 0755)).To(Succeed())
Expect(ioutil.WriteFile(filepath.Join(path, "bin", "java"), []byte("java.exe"), 0755)).To(Succeed())
return nil
})
})
It("installs and links the JDK", func() {
Expect(supplier.InstallJVM()).To(Succeed())
Expect(filepath.Join(depsDir, depsIdx, "jvm", "bin", "java")).To(BeAnExistingFile())
Expect(filepath.Join(depsDir, depsIdx, "bin", "java")).To(BeAnExistingFile())
})
It("writes jruby default env vars to profile.d", func() {
Expect(supplier.InstallJVM()).To(Succeed())
body, err := ioutil.ReadFile(filepath.Join(depsDir, depsIdx, "profile.d", "jruby.sh"))
Expect(err).ToNot(HaveOccurred())
Expect(string(body)).To(ContainSubstring(`export JAVA_MEM=${JAVA_MEM:--Xmx${JVM_MAX_HEAP:-384}m}`))
})
})
})
Describe("EnableLDLibraryPathEnv", func() {
AfterEach(func() {
Expect(os.Unsetenv("LD_LIBRARY_PATH")).To(Succeed())
})
Context("app has ld_library_path directory", func() {
BeforeEach(func() {
Expect(os.Mkdir(filepath.Join(buildDir, "ld_library_path"), 0755)).To(Succeed())
})
Context("LD_LIBRARY_PATH is set", func() {
BeforeEach(func() {
Expect(os.Setenv("LD_LIBRARY_PATH", "prior_ld_path")).To(Succeed())
})
It("Sets LD_LIBRARY_PATH", func() {
Expect(supplier.EnableLDLibraryPathEnv()).To(Succeed())
Expect(os.Getenv("LD_LIBRARY_PATH")).To(Equal(filepath.Join(buildDir, "ld_library_path") + ":prior_ld_path"))
})
It("Writes LD_LIBRARY_PATH env file for later buildpacks", func() {
Expect(supplier.EnableLDLibraryPathEnv()).To(Succeed())
Expect(filepath.Join(depsDir, depsIdx, "env", "LD_LIBRARY_PATH")).To(BeAnExistingFile())
Expect(ioutil.ReadFile(filepath.Join(depsDir, depsIdx, "env", "LD_LIBRARY_PATH"))).To(Equal([]byte(filepath.Join(buildDir, "ld_library_path") + ":prior_ld_path")))
})
It("Writes LD_LIBRARY_PATH env file as a profile.d script", func() {
Expect(supplier.EnableLDLibraryPathEnv()).To(Succeed())
Expect(filepath.Join(depsDir, depsIdx, "profile.d", "app_lib_path.sh")).To(BeAnExistingFile())
Expect(ioutil.ReadFile(filepath.Join(depsDir, depsIdx, "profile.d", "app_lib_path.sh"))).To(Equal([]byte(`export LD_LIBRARY_PATH="$HOME/ld_library_path$([[ ! -z "${LD_LIBRARY_PATH:-}" ]] && echo ":$LD_LIBRARY_PATH")"`)))
})
})
Context("LD_LIBRARY_PATH is NOT set", func() {
BeforeEach(func() {
Expect(os.Unsetenv("LD_LIBRARY_PATH")).To(Succeed())
})
It("Sets LD_LIBRARY_PATH", func() {
Expect(supplier.EnableLDLibraryPathEnv()).To(Succeed())
Expect(os.Getenv("LD_LIBRARY_PATH")).To(Equal(filepath.Join(buildDir, "ld_library_path")))
})
It("Writes LD_LIBRARY_PATH env file for later buildpacks", func() {
Expect(supplier.EnableLDLibraryPathEnv()).To(Succeed())
Expect(filepath.Join(depsDir, depsIdx, "env", "LD_LIBRARY_PATH")).To(BeAnExistingFile())
Expect(ioutil.ReadFile(filepath.Join(depsDir, depsIdx, "env", "LD_LIBRARY_PATH"))).To(Equal([]byte(filepath.Join(buildDir, "ld_library_path"))))
})
It("Writes LD_LIBRARY_PATH env file as a profile.d script", func() {
Expect(supplier.EnableLDLibraryPathEnv()).To(Succeed())
Expect(filepath.Join(depsDir, depsIdx, "profile.d", "app_lib_path.sh")).To(BeAnExistingFile())
Expect(ioutil.ReadFile(filepath.Join(depsDir, depsIdx, "profile.d", "app_lib_path.sh"))).To(Equal([]byte(`export LD_LIBRARY_PATH="$HOME/ld_library_path$([[ ! -z "${LD_LIBRARY_PATH:-}" ]] && echo ":$LD_LIBRARY_PATH")"`)))
})
})
})
Context("app does NOT have ld_library_path directory", func() {
var oldLibraryPath string
BeforeEach(func() {
oldLibraryPath = os.Getenv("LD_LIBRARY_PATH")
Expect(os.Setenv("LD_LIBRARY_PATH", "/foo/lib")).To(Succeed())
})
AfterEach(func() {
Expect(os.Setenv("LD_LIBRARY_PATH", oldLibraryPath)).To(Succeed())
})
It("Does not change LD_LIBRARY_PATH", func() {
Expect(supplier.EnableLDLibraryPathEnv()).To(Succeed())
Expect(os.Getenv("LD_LIBRARY_PATH")).To(Equal("/foo/lib"))
})
It("Does not write LD_LIBRARY_PATH env file for later buildpacks", func() {
Expect(supplier.EnableLDLibraryPathEnv()).To(Succeed())
Expect(filepath.Join(depsDir, depsIdx, "env", "LD_LIBRARY_PATH")).ToNot(BeAnExistingFile())
})
})
})
Describe("CreateDefaultEnv", func() {
AfterEach(func() {
_ = os.Unsetenv("RAILS_ENV")
_ = os.Unsetenv("RACK_ENV")
_ = os.Unsetenv("RAILS_GROUPS")
})
It("Sets RAILS_ENV", func() {
Expect(supplier.CreateDefaultEnv()).To(Succeed())
Expect(os.Getenv("RAILS_ENV")).To(Equal("production"))
})
It("Sets RAILS_GROUPS", func() {
Expect(supplier.CreateDefaultEnv()).To(Succeed())
Expect(os.Getenv("RAILS_GROUPS")).To(Equal("assets"))
})
It("Sets RACK_ENV", func() {
Expect(supplier.CreateDefaultEnv()).To(Succeed())
Expect(os.Getenv("RACK_ENV")).To(Equal("production"))
})
It("Sets RAILS_ENV in env directory", func() {
Expect(supplier.CreateDefaultEnv()).To(Succeed())
data, err := ioutil.ReadFile(filepath.Join(depsDir, depsIdx, "env", "RAILS_ENV"))
Expect(err).ToNot(HaveOccurred())
Expect(string(data)).To(Equal("production"))
})
It("Sets RAILS_GROUPS in env directory", func() {
Expect(supplier.CreateDefaultEnv()).To(Succeed())
data, err := ioutil.ReadFile(filepath.Join(depsDir, depsIdx, "env", "RAILS_GROUPS"))
Expect(err).ToNot(HaveOccurred())
Expect(string(data)).To(Equal("assets"))
})
It("Sets RACK_ENV in env directory", func() {
Expect(supplier.CreateDefaultEnv()).To(Succeed())
data, err := ioutil.ReadFile(filepath.Join(depsDir, depsIdx, "env", "RACK_ENV"))
Expect(err).ToNot(HaveOccurred())
Expect(string(data)).To(Equal("production"))
})
Context("RAILS_ENV is set", func() {
BeforeEach(func() { _ = os.Setenv("RAILS_ENV", "test") })
It("does not set RAILS_ENV", func() {
Expect(supplier.CreateDefaultEnv()).To(Succeed())
Expect(os.Getenv("RAILS_ENV")).To(Equal("test"))
})
It("does not set RAILS_ENV in env directory", func() {
Expect(supplier.CreateDefaultEnv()).To(Succeed())
Expect(filepath.Join(depsDir, depsIdx, "env", "RAILS_ENV")).ToNot(BeAnExistingFile())
})
})
Context("RAILS_GROUPS is set", func() {
BeforeEach(func() { _ = os.Setenv("RAILS_GROUPS", "test") })
It("does not set RAILS_ENV", func() {
Expect(supplier.CreateDefaultEnv()).To(Succeed())
Expect(os.Getenv("RAILS_GROUPS")).To(Equal("test"))
})
It("does not set RAILS_ENV in env directory", func() {
Expect(supplier.CreateDefaultEnv()).To(Succeed())
Expect(filepath.Join(depsDir, depsIdx, "env", "RAILS_GROUPS")).ToNot(BeAnExistingFile())
})
})
Context("RACK_ENV is set", func() {
BeforeEach(func() { _ = os.Setenv("RACK_ENV", "test") })
It("does not set RACK_ENV", func() {
Expect(supplier.CreateDefaultEnv()).To(Succeed())
Expect(os.Getenv("RACK_ENV")).To(Equal("test"))
})
It("does not set RACK_ENV in env directory", func() {
Expect(supplier.CreateDefaultEnv()).To(Succeed())
Expect(filepath.Join(depsDir, depsIdx, "env", "RACK_ENV")).ToNot(BeAnExistingFile())
})
})
})
Describe("WriteProfileD", func() {
BeforeEach(func() {
mockCommand.EXPECT().Output(buildDir, "node", "--version").AnyTimes().Return("v8.2.1", nil)
Expect(ioutil.WriteFile(filepath.Join(buildDir, "Gemfile"), []byte{}, 0644)).To(Succeed())
Expect(ioutil.WriteFile(filepath.Join(buildDir, "Gemfile.lock"), []byte{}, 0644)).To(Succeed())
})
Describe("SecretKeyBase", func() {
BeforeEach(func() {
mockVersions.EXPECT().RubyEngineVersion().AnyTimes().Return("2.5.0", nil)
})
Context("Rails >= 4.1", func() {
BeforeEach(func() {
mockVersions.EXPECT().HasGemVersion("rails", ">=4.1.0.beta1").Return(true, nil)
})
Context("SECRET_KEY_BASE is cached", func() {
BeforeEach(func() {
mockCache.EXPECT().Metadata().Return(&cache.Metadata{SecretKeyBase: "foobar"})
})
It("writes the cached SECRET_KEY_BASE to profile.d", func() {
Expect(supplier.WriteProfileD("enginename")).To(Succeed())
contents, err := ioutil.ReadFile(filepath.Join(depsDir, depsIdx, "profile.d", "ruby.sh"))
Expect(err).ToNot(HaveOccurred())
Expect(string(contents)).To(ContainSubstring("export SECRET_KEY_BASE=${SECRET_KEY_BASE:-foobar}"))
})
})
Context("SECRET_KEY_BASE is not cached", func() {
BeforeEach(func() {
mockCache.EXPECT().Metadata().Return(&cache.Metadata{})
mockCommand.EXPECT().Output(buildDir, "bundle", "exec", "rake", "secret").Return("\n\nabcdef\n\n", nil)
})
It("writes default SECRET_KEY_BASE to profile.d", func() {
Expect(supplier.WriteProfileD("enginename")).To(Succeed())
contents, err := ioutil.ReadFile(filepath.Join(depsDir, depsIdx, "profile.d", "ruby.sh"))
Expect(err).ToNot(HaveOccurred())
Expect(string(contents)).To(ContainSubstring("export SECRET_KEY_BASE=${SECRET_KEY_BASE:-abcdef}"))
})
})
})
Context("NOT Rails >= 4.1", func() {
BeforeEach(func() {
mockVersions.EXPECT().HasGemVersion("rails", ">=4.1.0.beta1").Return(false, nil)
})
It("does not set default SECRET_KEY_BASE in profile.d", func() {
Expect(supplier.WriteProfileD("enginename")).To(Succeed())
contents, err := ioutil.ReadFile(filepath.Join(depsDir, depsIdx, "profile.d", "ruby.sh"))
Expect(err).ToNot(HaveOccurred())
Expect(string(contents)).ToNot(ContainSubstring("SECRET_KEY_BASE"))
})
})
})
Describe("Default Rails ENVS", func() {
BeforeEach(func() {
mockVersions.EXPECT().RubyEngineVersion().AnyTimes().Return("2.3.19", nil)
mockVersions.EXPECT().HasGemVersion("rails", ">=4.1.0.beta1").Return(false, nil)
})
It("writes default RAILS_ENV to profile.d", func() {
Expect(supplier.WriteProfileD("somerubyengine")).To(Succeed())
contents, err := ioutil.ReadFile(filepath.Join(depsDir, depsIdx, "profile.d", "ruby.sh"))
Expect(err).ToNot(HaveOccurred())
Expect(string(contents)).To(ContainSubstring("export RAILS_ENV=${RAILS_ENV:-production}"))
})
It("writes default RAILS_SERVE_STATIC_FILES to profile.d", func() {
Expect(supplier.WriteProfileD("somerubyengine")).To(Succeed())
contents, err := ioutil.ReadFile(filepath.Join(depsDir, depsIdx, "profile.d", "ruby.sh"))
Expect(err).ToNot(HaveOccurred())
Expect(string(contents)).To(ContainSubstring("export RAILS_SERVE_STATIC_FILES=${RAILS_SERVE_STATIC_FILES:-enabled}"))
})
It("writes default RAILS_LOG_TO_STDOUT to profile.d", func() {
Expect(supplier.WriteProfileD("somerubyengine")).To(Succeed())
contents, err := ioutil.ReadFile(filepath.Join(depsDir, depsIdx, "profile.d", "ruby.sh"))
Expect(err).ToNot(HaveOccurred())
Expect(string(contents)).To(ContainSubstring("export RAILS_LOG_TO_STDOUT=${RAILS_LOG_TO_STDOUT:-enabled}"))
})
It("writes default GEM_PATH to profile.d", func() {
Expect(supplier.WriteProfileD("somerubyengine")).To(Succeed())
contents, err := ioutil.ReadFile(filepath.Join(depsDir, depsIdx, "profile.d", "ruby.sh"))
Expect(err).ToNot(HaveOccurred())
Expect(string(contents)).To(ContainSubstring("export GEM_PATH=${GEM_PATH:-$DEPS_DIR/9/vendor_bundle/somerubyengine/2.3.19:$DEPS_DIR/9/gem_home:$DEPS_DIR/9/bundler}"))
})
})
})
Describe("DetermineRuby", func() {
BeforeEach(func() {
Expect(ioutil.WriteFile(filepath.Join(buildDir, "Gemfile"), []byte{}, 0644)).To(Succeed())
Expect(ioutil.WriteFile(filepath.Join(buildDir, "Gemfile.lock"), []byte{}, 0644)).To(Succeed())
})
Context("MRI", func() {
BeforeEach(func() {
mockVersions.EXPECT().Engine().AnyTimes().Return("ruby", nil)
})
Context("version determined from Gemfile", func() {
BeforeEach(func() {
mockVersions.EXPECT().Version().Return("2.3.1", nil)
})
It("returns the engine and version", func() {
engine, version, err := supplier.DetermineRuby()
Expect(err).ToNot(HaveOccurred())
Expect(engine).To(Equal("ruby"))
Expect(version).To(Equal("2.3.1"))
})
})
Context("version not determined from Gemfile", func() {
BeforeEach(func() {
mockVersions.EXPECT().Version().Return("", nil)
mockManifest.EXPECT().DefaultVersion("ruby").Return(libbuildpack.Dependency{Version: "9.10.11"}, nil)
})
It("returns ruby with the default from the manifest", func() {
engine, version, err := supplier.DetermineRuby()
Expect(err).ToNot(HaveOccurred())
Expect(engine).To(Equal("ruby"))
Expect(version).To(Equal("9.10.11"))
})
It("logs a warning", func() {
_, _, err := supplier.DetermineRuby()
Expect(err).ToNot(HaveOccurred())
Expect(buffer.String()).To(ContainSubstring("You have not declared a Ruby version in your Gemfile."))
Expect(buffer.String()).To(ContainSubstring("Defaulting to 9.10.11"))
})
})
Context("version in Gemfile not in manifest", func() {
BeforeEach(func() {
mockVersions.EXPECT().Version().Return("", errors.New(""))
})
It("returns an error", func() {
_, _, err := supplier.DetermineRuby()
Expect(err).To(HaveOccurred())
})
})
})
Context("jruby", func() {
BeforeEach(func() {
mockVersions.EXPECT().Engine().Return("jruby", nil)
})
Context("version determined from Gemfile", func() {
BeforeEach(func() {
mockVersions.EXPECT().JrubyVersion().Return("9.2.0.0", nil)
})
It("returns the engine and version", func() {
engine, version, err := supplier.DetermineRuby()
Expect(err).ToNot(HaveOccurred())
Expect(engine).To(Equal("jruby"))
Expect(version).To(Equal("9.2.0.0"))
})
})
Context("version in Gemfile not in manifest", func() {
BeforeEach(func() {
mockVersions.EXPECT().JrubyVersion().Return("", errors.New(""))
})
It("returns an error", func() {
_, _, err := supplier.DetermineRuby()
Expect(err).To(HaveOccurred())
})
})
})
Context("other", func() {
BeforeEach(func() {
mockVersions.EXPECT().Engine().Return("rubinius", nil)
})
It("returns an error", func() {
_, _, err := supplier.DetermineRuby()
Expect(err).To(HaveOccurred())
})
})
})
Describe("RemoveUnusedRubyVersions", func() {
selectedRubyVersion := "1.3.3"
selectedRubyEngine := "some-ruby-engine"
BeforeEach(func() {
Expect(os.MkdirAll(filepath.Join(depsDir, depsIdx, "vendor_bundle", selectedRubyEngine, "1.2.0"), 0755)).To(Succeed())
Expect(os.MkdirAll(filepath.Join(depsDir, depsIdx, "vendor_bundle", selectedRubyEngine, "1.3.0"), 0755)).To(Succeed())
})
Context("multiple Ruby major+minor versions in dep dir", func() {
It("removes the version that is not currently selected", func() {
Expect(supplier.RemoveUnusedRubyVersions(selectedRubyEngine, selectedRubyVersion)).To(Succeed())
Expect(filepath.Join(depsDir, depsIdx, "vendor_bundle", selectedRubyEngine, "1.2.0")).ToNot(BeADirectory())
Expect(filepath.Join(depsDir, depsIdx, "vendor_bundle", selectedRubyEngine, "1.3.0")).To(BeADirectory())
})
})
})
Describe("InstallYarn", func() {
Context("app has yarn.lock file", func() {
BeforeEach(func() {
Expect(ioutil.WriteFile(filepath.Join(buildDir, "yarn.lock"), []byte("contents"), 0644)).To(Succeed())
})
It("installs yarn", func() {
mockInstaller.EXPECT().InstallOnlyVersion("yarn", gomock.Any()).Do(func(_, installDir string) error {
Expect(os.MkdirAll(filepath.Join(installDir, "bin"), 0755)).To(Succeed())
Expect(ioutil.WriteFile(filepath.Join(installDir, "bin", "yarn"), []byte("contents"), 0644)).To(Succeed())
return nil
})
Expect(supplier.InstallYarn()).To(Succeed())
Expect(filepath.Join(depsDir, depsIdx, "bin", "yarn")).To(BeAnExistingFile())
data, err := ioutil.ReadFile(filepath.Join(depsDir, depsIdx, "bin", "yarn"))
Expect(err).ToNot(HaveOccurred())
Expect(string(data)).To(Equal("contents"))
})
})
Context("app does not have a yarn.lock file", func() {
It("does NOT install yarn", func() {
Expect(supplier.InstallYarn()).To(Succeed())
Expect(filepath.Join(depsDir, depsIdx, "bin", "yarn")).ToNot(BeAnExistingFile())
})
})
})
Describe("NeedsNode", func() {
Context("node is not already installed", func() {
BeforeEach(func() {
mockCommand.EXPECT().Output(buildDir, "node", "--version").AnyTimes().Return("", fmt.Errorf("could not find node"))
})
Context("webpacker is installed", func() {
BeforeEach(func() {
mockVersions.EXPECT().HasGemVersion("webpacker", ">=0.0.0").Return(true, nil)
mockVersions.EXPECT().HasGemVersion(gomock.Any(), ">=0.0.0").AnyTimes().Return(false, nil)
})
It("returns true", func() {
Expect(supplier.NeedsNode()).To(BeTrue())
})
})
Context("execjs is installed", func() {
BeforeEach(func() {
mockVersions.EXPECT().HasGemVersion("execjs", ">=0.0.0").Return(true, nil)
mockVersions.EXPECT().HasGemVersion(gomock.Any(), ">=0.0.0").AnyTimes().Return(false, nil)
})
It("returns true", func() {
Expect(supplier.NeedsNode()).To(BeTrue())
})
})
Context("neither webpacker nor execjs are installed", func() {
BeforeEach(func() {
mockVersions.EXPECT().HasGemVersion(gomock.Any(), ">=0.0.0").AnyTimes().Return(false, nil)
})
It("returns false", func() {
Expect(supplier.NeedsNode()).To(BeFalse())
})
})
})
Context("node is already installed", func() {
BeforeEach(func() {
mockCommand.EXPECT().Output(buildDir, "node", "--version").AnyTimes().Return("v8.2.1", nil)
})
It("returns false", func() {
Expect(supplier.NeedsNode()).To(BeFalse())
})
It("informs the user that node is being skipped", func() {
supplier.NeedsNode()
Expect(buffer.String()).To(ContainSubstring("Skipping install of nodejs since it has been supplied"))
})
})
})
Describe("UpdateRubygems", func() {
BeforeEach(func() {
mockManifest.EXPECT().AllDependencyVersions("rubygems").AnyTimes().Return([]string{"2.6.13"})
})
Context("gem version is less than 2.6.13", func() {
BeforeEach(func() {
mockCommand.EXPECT().Output(gomock.Any(), "gem", "--version").AnyTimes().Return("2.6.12\n", nil)
mockVersions.EXPECT().VersionConstraint("2.6.12", ">= 2.6.13").AnyTimes().Return(false, nil)
})
It("updates rubygems", func() {
mockVersions.EXPECT().Engine().Return("ruby", nil)
mockInstaller.EXPECT().InstallDependency(gomock.Any(), gomock.Any()).Do(func(dep libbuildpack.Dependency, _ string) {
Expect(dep.Name).To(Equal("rubygems"))
Expect(dep.Version).To(Equal("2.6.13"))
})
mockCommand.EXPECT().Output(gomock.Any(), "ruby", "setup.rb")
Expect(supplier.UpdateRubygems()).To(Succeed())
})
Context("jruby", func() {
It("skips update of rubygems", func() {
mockVersions.EXPECT().Engine().Return("jruby", nil)
Expect(supplier.UpdateRubygems()).To(Succeed())
})
})
})
Context("gem version is equal to 2.6.13", func() {
BeforeEach(func() {
mockCommand.EXPECT().Output(gomock.Any(), "gem", "--version").AnyTimes().Return("2.6.13\n", nil)
mockVersions.EXPECT().VersionConstraint("2.6.13", ">= 2.6.13").AnyTimes().Return(true, nil)
})
It("does nothing", func() {
Expect(supplier.UpdateRubygems()).To(Succeed())
})
})
Context("gem version is greater than to 2.6.13", func() {
BeforeEach(func() {
mockCommand.EXPECT().Output(gomock.Any(), "gem", "--version").AnyTimes().Return("2.6.14\n", nil)
mockVersions.EXPECT().VersionConstraint("2.6.14", ">= 2.6.13").AnyTimes().Return(true, nil)
})
It("does nothing", func() {
Expect(supplier.UpdateRubygems()).To(Succeed())
})
})
})
Describe("RewriteShebangs", func() {
var depDir string
BeforeEach(func() {
depDir = filepath.Join(depsDir, depsIdx)
Expect(os.MkdirAll(filepath.Join(depDir, "bin"), 0755)).To(Succeed())
Expect(ioutil.WriteFile(filepath.Join(depDir, "bin", "somescript"), []byte("#!/usr/bin/ruby\n\n\n"), 0755)).To(Succeed())
Expect(ioutil.WriteFile(filepath.Join(depDir, "bin", "anotherscript"), []byte("#!//bin/ruby\n\n\n"), 0755)).To(Succeed())
Expect(os.MkdirAll(filepath.Join(depDir, "bin", "__ruby__"), 0755)).To(Succeed())
Expect(os.Symlink(filepath.Join(depDir, "bin", "__ruby__"), filepath.Join(depDir, "bin", "__ruby__SYMLINK"))).To(Succeed())
mockVersions.EXPECT().Engine().AnyTimes().Return("ruby", nil)
})
It("changes them to #!/usr/bin/env ruby", func() {
Expect(supplier.RewriteShebangs()).To(Succeed())
fileContents, err := ioutil.ReadFile(filepath.Join(depDir, "bin", "somescript"))
Expect(err).ToNot(HaveOccurred())
secondFileContents, err := ioutil.ReadFile(filepath.Join(depDir, "bin", "anotherscript"))
Expect(err).ToNot(HaveOccurred())
Expect(string(fileContents)).To(HavePrefix("#!/usr/bin/env ruby"))
Expect(string(secondFileContents)).To(HavePrefix("#!/usr/bin/env ruby"))
})
It(`also finds files in vendor_bundle/ruby/*/bin/*`, func() {
Expect(os.MkdirAll(filepath.Join(depDir, "vendor_bundle", "ruby", "2.4.0", "bin"), 0755)).To(Succeed())
Expect(ioutil.WriteFile(filepath.Join(depDir, "vendor_bundle", "ruby", "2.4.0", "bin", "somescript"), []byte("#!/usr/bin/ruby\n\n\n"), 0755)).To(Succeed())
Expect(supplier.RewriteShebangs()).To(Succeed())
fileContents, err := ioutil.ReadFile(filepath.Join(depDir, "vendor_bundle", "ruby", "2.4.0", "bin", "somescript"))
Expect(err).ToNot(HaveOccurred())
Expect(string(fileContents)).To(HavePrefix("#!/usr/bin/env ruby"))
})
})
Describe("SymlinkBundlerIntoRubygems", func() {
var depDir string
BeforeEach(func() {
depDir = filepath.Join(depsDir, depsIdx)
mockVersions.EXPECT().Engine().AnyTimes().Return("ruby", nil)
mockVersions.EXPECT().RubyEngineVersion().Return("2.3.4", nil)
Expect(os.MkdirAll(filepath.Join(depDir, "bundler", "gems", "bundler-1.17.2"), 0755)).To(Succeed())
Expect(ioutil.WriteFile(filepath.Join(depDir, "bundler", "gems", "bundler-1.17.2", "file"), []byte("my content"), 0644)).To(Succeed())
})
It("Creates a symlink from the installed ruby's gem directory to the installed bundler gem", func() {
Expect(supplier.SymlinkBundlerIntoRubygems()).To(Succeed())
fileContents, err := ioutil.ReadFile(filepath.Join(depDir, "ruby", "lib", "ruby", "gems", "2.3.4", "gems", "bundler-1.17.2", "file"))
Expect(err).ToNot(HaveOccurred())
Expect(string(fileContents)).To(HavePrefix("my content"))
})
})
})
| [
"\"LD_LIBRARY_PATH\"",
"\"LD_LIBRARY_PATH\"",
"\"LD_LIBRARY_PATH\"",
"\"LD_LIBRARY_PATH\"",
"\"RAILS_ENV\"",
"\"RAILS_GROUPS\"",
"\"RACK_ENV\"",
"\"RAILS_ENV\"",
"\"RAILS_GROUPS\"",
"\"RACK_ENV\""
]
| []
| [
"RACK_ENV",
"LD_LIBRARY_PATH",
"RAILS_GROUPS",
"RAILS_ENV"
]
| [] | ["RACK_ENV", "LD_LIBRARY_PATH", "RAILS_GROUPS", "RAILS_ENV"] | go | 4 | 0 | |
nowellpoint-aws-api/src/test/java/com/nowellpoint/api/idp/test/TestAuthentication.java | package com.nowellpoint.api.idp.test;
import java.util.Locale;
import org.junit.Test;
import com.nowellpoint.http.HttpResponse;
import com.nowellpoint.http.MediaType;
import com.nowellpoint.http.RestResource;
public class TestAuthentication {
@Test
public void testAuthentication() {
System.out.println(new Locale("en_us"));
HttpResponse httpResponse = RestResource.post(System.getenv("OKTA_AUTHORIZATION_SERVER"))
.basicAuthorization(System.getenv("OKTA_CLIENT_ID"), System.getenv("OKTA_CLIENT_SECRET"))
.accept(MediaType.APPLICATION_JSON)
.contentType(MediaType.APPLICATION_FORM_URLENCODED)
.header("Cache-Control", "no-cache")
.path("v1")
.path("token")
.parameter("grant_type", "password")
.parameter("username", System.getenv("NOWELLPOINT_USERNAME"))
.parameter("password", System.getenv("NOWELLPOINT_PASSWORD"))
.parameter("scope", "offline_access")
.execute();
System.out.println(httpResponse.getStatusCode());
System.out.println(httpResponse.getAsString());
}
} | [
"\"OKTA_AUTHORIZATION_SERVER\"",
"\"OKTA_CLIENT_ID\"",
"\"OKTA_CLIENT_SECRET\"",
"\"NOWELLPOINT_USERNAME\"",
"\"NOWELLPOINT_PASSWORD\""
]
| []
| [
"NOWELLPOINT_PASSWORD",
"NOWELLPOINT_USERNAME",
"OKTA_CLIENT_SECRET",
"OKTA_AUTHORIZATION_SERVER",
"OKTA_CLIENT_ID"
]
| [] | ["NOWELLPOINT_PASSWORD", "NOWELLPOINT_USERNAME", "OKTA_CLIENT_SECRET", "OKTA_AUTHORIZATION_SERVER", "OKTA_CLIENT_ID"] | java | 5 | 0 | |
client/pool_darwin.go | package client
import (
"crypto/x509"
"io/ioutil"
"os"
"path"
)
// systemCertPool circumvents the fact that Go on macOS does not support
// SSL_CERT_{DIR,FILE}.
func systemCertPool() (*x509.CertPool, error) {
var certPem []byte
if f := os.Getenv("SSL_CERT_FILE"); len(f) > 0 {
pem, err := ioutil.ReadFile(f)
if err != nil {
return nil, err
}
pem = append(pem, '\n')
certPem = append(certPem, pem...)
}
if d := os.Getenv("SSL_CERT_DIR"); len(d) > 0 {
entries, err := ioutil.ReadDir(d)
if err != nil {
return nil, err
}
for _, entry := range entries {
if entry.IsDir() {
continue
}
pem, err := ioutil.ReadFile(path.Join(d, entry.Name()))
if err != nil {
return nil, err
}
pem = append(pem, '\n')
certPem = append(certPem, pem...)
}
}
pool, err := x509.SystemCertPool()
if err != nil {
return nil, err
}
pool.AppendCertsFromPEM(certPem)
return pool, nil
}
| [
"\"SSL_CERT_FILE\"",
"\"SSL_CERT_DIR\""
]
| []
| [
"SSL_CERT_DIR",
"SSL_CERT_FILE"
]
| [] | ["SSL_CERT_DIR", "SSL_CERT_FILE"] | go | 2 | 0 | |
tests/test_sqsexec.py | # Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import random
import string
from concurrent.futures import as_completed
from c7n.sqsexec import SQSExecutor, MessageIterator
from c7n import utils
from common import BaseTest
TEST_SQS_PREFIX = os.environ.get("TEST_SQS_PREFIX", "cloud-c7n-test-sqsexec")
def int_processor(*args):
if not args:
return 1
return args[0] * 2
class TestSQSExec(BaseTest):
def test_sqsexec(self):
session_factory = self.replay_flight_data('test_sqs_exec')
client = session_factory().client('sqs')
map_queue = client.create_queue(
QueueName = "%s-map-%s" % (
TEST_SQS_PREFIX, "".join(random.sample(string.letters, 3))))[
'QueueUrl']
self.addCleanup(client.delete_queue, QueueUrl=map_queue)
reduce_queue = client.create_queue(
QueueName = "%s-map-%s" % (
TEST_SQS_PREFIX, "".join(random.sample(string.letters, 3))))[
'QueueUrl']
self.addCleanup(client.delete_queue, QueueUrl=reduce_queue)
with SQSExecutor(
session_factory, map_queue, reduce_queue) as w:
w.op_sequence_start = 699723
w.op_sequence = 699723
# Submit work
futures = []
for i in range(10):
futures.append(w.submit(int_processor, i))
# Manually process and send results
messages = MessageIterator(client, map_queue, limit=10)
for m in messages:
d = utils.loads(m['Body'])
self.assertEqual(
m['MessageAttributes']['op']['StringValue'],
'tests.test_sqsexec:int_processor')
client.send_message(
QueueUrl=reduce_queue,
MessageBody=utils.dumps([
d['args'], int_processor(*d['args'])]),
MessageAttributes=m['MessageAttributes'])
w.gather()
results = [json.loads(r.result()['Body'])
for r in list(as_completed(futures))]
self.assertEqual(
list(sorted(results))[-1],
[[9], 18])
| []
| []
| [
"TEST_SQS_PREFIX"
]
| [] | ["TEST_SQS_PREFIX"] | python | 1 | 0 | |
benwaonline_auth/config.py | import os
def get_pem(fname):
try:
with open(fname, "r") as f:
return f.read()
except FileNotFoundError:
return None
class Config(object):
DB_BASE_URI = "mysql+pymysql://{}:{}@{}:{}/".format(
os.getenv("MYSQL_USER"),
os.getenv("MYSQL_PASSWORD"),
os.getenv("MYSQL_HOST"),
os.getenv("MYSQL_PORT"),
)
DB_NAME = os.getenv("DB_NAME")
SQLALCHEMY_DATABASE_URI = DB_BASE_URI + DB_NAME
SQLALCHEMY_TRACK_MODIFICATIONS = False
SECRET_KEY = os.getenv("SECRET_KEY_AUTH")
ISSUER = "issuer"
API_AUDIENCE = "api audience"
REDIS_HOST = os.getenv("REDIS_HOST")
REDIS_PORT = os.getenv("REDIS_PORT")
FRONT_URL = os.getenv("FRONT_HOST")
AUTH_HOST = os.getenv("AUTH_HOST")
AUTH_URL = "{}:{}".format(os.getenv("AUTH_HOST"), os.getenv("AUTH_PORT", ""))
PRIVATE_KEY = get_pem(os.getenv("PRIVATE_KEY"))
PUBLIC_KEY = get_pem(os.getenv("PUBLIC_KEY"))
TWITTER = {
"consumer_key": os.getenv("TWITTER_CONSUMER_KEY"),
"consumer_secret": os.getenv("TWITTER_CONSUMER_SECRET"),
"base_url": "https://api.twitter.com",
"request_token_url": "oauth/request_token",
"access_token_url": "oauth/access_token",
"authorize_url": "oauth/authenticate",
}
class DevConfig(Config):
CLIENT_ID = "nice"
CLIENT_SECRET = "ok"
class TestConfig(Config):
DB_NAME = "benwaonlineauth_test"
SQLALCHEMY_DATABASE_URI = Config.DB_BASE_URI + DB_NAME
FRONT_URL = "mock://mock-front"
# AUTH_URL = os.getenv('AUTH_URL')
TESTING = True
WTF_CSRF_ENABLED = False
PRIVATE_KEY = get_pem("tests/data/benwaonline_auth_test_priv.pem")
PUBLIC_KEY = get_pem("tests/data/benwaonline_auth_test_pub.pem")
class ProdConfig(Config):
DB_BASE_URI = "mysql+pymysql://{}:{}@{}:{}/".format(
os.getenv("MYSQL_USER"),
os.getenv("MYSQL_PASSWORD"),
os.getenv("MYSQL_HOST"),
os.getenv("MYSQL_PORT"),
)
DB_NAME = os.getenv("DB_NAME")
SQLALCHEMY_DATABASE_URI = DB_BASE_URI + DB_NAME
ISSUER = "https://benwa.online"
API_AUDIENCE = "https://benwa.online/api"
app_config = {"development": DevConfig, "testing": TestConfig, "production": ProdConfig}
| []
| []
| [
"SECRET_KEY_AUTH",
"REDIS_PORT",
"MYSQL_PASSWORD",
"TWITTER_CONSUMER_KEY",
"MYSQL_USER",
"MYSQL_PORT",
"TWITTER_CONSUMER_SECRET",
"REDIS_HOST",
"DB_NAME",
"AUTH_HOST",
"PRIVATE_KEY",
"AUTH_PORT",
"FRONT_HOST",
"AUTH_URL",
"PUBLIC_KEY",
"MYSQL_HOST"
]
| [] | ["SECRET_KEY_AUTH", "REDIS_PORT", "MYSQL_PASSWORD", "TWITTER_CONSUMER_KEY", "MYSQL_USER", "MYSQL_PORT", "TWITTER_CONSUMER_SECRET", "REDIS_HOST", "DB_NAME", "AUTH_HOST", "PRIVATE_KEY", "AUTH_PORT", "FRONT_HOST", "AUTH_URL", "PUBLIC_KEY", "MYSQL_HOST"] | python | 16 | 0 | |
example/django_pytest/project/settings.py | """
https://docs.djangoproject.com/en/1.11/topics/settings/
https://docs.djangoproject.com/en/1.11/ref/settings/
https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
"""
import os
from collections import OrderedDict
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "yeicahshiquohc3Mae8ahl9yah7fa9eiCh6ooyiefaeheipiobe1fikohL6ahpho"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get("DJANGO_DEBUG", "false").lower() == "true"
if DEBUG:
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
ALLOWED_HOSTS = ["*"]
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.humanize",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"app",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "project.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.media",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "project.wsgi.application"
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(".", "db.sqlite3"),
}
}
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": (
"django.contrib.auth.password_validation."
"UserAttributeSimilarityValidator"
),
},
{
"NAME": (
"django.contrib.auth.password_validation." "MinimumLengthValidator"
),
},
{
"NAME": (
"django.contrib.auth.password_validation."
"CommonPasswordValidator"
),
},
{
"NAME": (
"django.contrib.auth.password_validation."
"NumericPasswordValidator"
),
},
]
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = "/static/"
STATIC_ROOT = "static"
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
#
# HERE STARTS DYNACONF EXTENSION LOAD
#
import dynaconf # noqa
from dynaconf import Validator # noqa
settings = dynaconf.DjangoDynaconf(
__name__,
settings_files=["settings.yaml"],
env_switcher="DJANGO_ENVIRONMENT",
load_dotenv=True,
validators=[
Validator("ENVIRONMENT", "TESTING", "FOO", must_exist=True),
],
) # noqa
#
# HERE ENDS DYNACONF EXTENSION LOAD (No more code below this line)
#
| []
| []
| [
"DJANGO_DEBUG"
]
| [] | ["DJANGO_DEBUG"] | python | 1 | 0 | |
fundamentals/mysql/readquery.py | #!/usr/local/bin/python
# encoding: utf-8
"""
*Given a mysql query, read the data from the database and return the results as a list of dictionaries (database rows)*
:Author:
David Young
:Date Created:
June 21, 2016
"""
################# GLOBAL IMPORTS ####################
import sys
import os
os.environ['TERM'] = 'vt100'
from fundamentals import tools
def readquery(
sqlQuery,
dbConn,
log,
quiet=False):
"""Given a mysql query, read the data from the database and return the results as a list of dictionaries (database rows)
**Key Arguments:**
- ``log`` -- the logger.
- ``sqlQuery`` -- the MySQL command to execute
- ``dbConn`` -- the db connection
- ``quiet`` -- ignore mysql warnings and errors and move on. Be careful when setting this to true - damaging errors can easily be missed. Default *False*.
**Return:**
- ``rows`` -- the rows returned by the sql query
**Usage:**
.. code-block:: python
from fundamentals.mysql import readquery
rows = readquery(
log=log,
sqlQuery=sqlQuery,
dbConn=dbConn,
quiet=False
)
"""
log.debug('starting the ``readquery`` function')
import pymysql
import warnings
warnings.filterwarnings('error', category=pymysql.Warning)
rows = []
try:
cursor = dbConn.cursor(pymysql.cursors.DictCursor)
except Exception as e:
log.error('could not create the database cursor: %s' % (e, ))
raise IOError('could not create the database cursor: %s' % (e, ))
# EXECUTE THE SQL COMMAND
cursor.execute(sqlQuery)
rows = cursor.fetchall()
try:
cursor.execute(sqlQuery)
rows = cursor.fetchall()
except Exception as e:
sqlQuery = sqlQuery[:1000]
if quiet == False:
log.warning(
'MySQL raised an error - read command not executed.\n' + str(e) + '\nHere is the sqlQuery\n\t%(sqlQuery)s' % locals())
raise e
# CLOSE THE CURSOR
try:
cursor.close()
except Exception as e:
log.warning('could not close the db cursor ' + str(e) + '\n')
log.debug('completed the ``readquery`` function')
return rows
| []
| []
| [
"TERM"
]
| [] | ["TERM"] | python | 1 | 0 | |
klo/klo.py | #!/usr/bin/env python3
from FeatureExamples import FeatureExamples
from FeatureHandler import FeatureHandler
from matrix_client.client import MatrixClient
import os
from bs4 import BeautifulSoup
import requests
def find_room(sender, message, room):
html = requests.get("https://foss-ag.de/").text
soup = BeautifulSoup(html, 'html.parser')
date = soup.find(id="ag-termine").find_next_sibling("div").find("ul").find("li").get_text().strip()
room.sendText(date)
def main():
# connect to server and join room
client = MatrixClient("https://matrix.org")
token = client.login(username="foss-ag_klo", password=os.environ['KLO_PW'])
room = client.join_room("#klotest:matrix.org")
# create FeatureHandler
fh = FeatureHandler(room, client)
# add features to FeatureHandler that are called when the specified command is posted in the Matrix room
fh.add_feature("!echo", FeatureExamples.echo)
fh.add_feature("!room", find_room)
# run script until it's stopped manually
while True:
pass
if __name__ == "__main__":
main()
| []
| []
| [
"KLO_PW"
]
| [] | ["KLO_PW"] | python | 1 | 0 | |
ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/service_check.py | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
import socket
import sys
import time
import subprocess
from hcat_service_check import hcat_service_check
from webhcat_service_check import webhcat_service_check
from ambari_commons import OSConst
from ambari_commons.os_family_impl import OsFamilyImpl
from resource_management.core import shell
from resource_management.core.logger import Logger
from resource_management.libraries.functions import get_unique_id_and_date
class HiveServiceCheck(Script):
pass
@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
class HiveServiceCheckWindows(HiveServiceCheck):
def service_check(self, env):
import params
env.set_params(params)
smoke_cmd = os.path.join(params.stack_root,"Run-SmokeTests.cmd")
service = "HIVE"
Execute(format("cmd /C {smoke_cmd} {service}"), user=params.hive_user, logoutput=True)
hcat_service_check()
webhcat_service_check()
@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
class HiveServiceCheckDefault(HiveServiceCheck):
def __init__(self):
super(HiveServiceCheckDefault, self).__init__()
Logger.initialize_logger()
def service_check(self, env):
import params
env.set_params(params)
if params.security_enabled:
kinit_cmd = format(
"{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal}; ")
else:
kinit_cmd = ""
# Check HiveServer
Logger.info("Running Hive Server checks")
Logger.info("--------------------------\n")
self.check_hive_server(env, 'Hive Server', kinit_cmd, params.hive_server_hosts,
int(format("{hive_server_port}")))
if params.has_hive_interactive and params.hive_interactive_enabled:
Logger.info("Running Hive Server2 checks")
Logger.info("--------------------------\n")
self.check_hive_server(env, 'Hive Server2', kinit_cmd, params.hive_interactive_hosts,
int(format("{hive_server_interactive_port}")))
Logger.info("Running LLAP checks")
Logger.info("-------------------\n")
self.check_llap(env, kinit_cmd)
Logger.info("Running HCAT checks")
Logger.info("-------------------\n")
hcat_service_check()
Logger.info("Running WEBHCAT checks")
Logger.info("---------------------\n")
webhcat_service_check()
def check_hive_server(self, env, server_component_name, kinit_cmd, address_list, server_port):
import params
env.set_params(params)
Logger.info("Server Address List : {0}, Port : {1}".format(address_list, server_port))
if not address_list:
raise Fail("Can not find any "+server_component_name+" ,host. Please check configuration.")
SOCKET_WAIT_SECONDS = 290
start_time = time.time()
end_time = start_time + SOCKET_WAIT_SECONDS
Logger.info("Waiting for the {0} to start...".format(server_component_name))
workable_server_available = False
i = 0
while time.time() < end_time and not workable_server_available:
address = address_list[i]
try:
check_thrift_port_sasl(address, server_port, params.hive_server2_authentication,
params.hive_server_principal, kinit_cmd, params.smokeuser,
transport_mode=params.hive_transport_mode, http_endpoint=params.hive_http_endpoint,
ssl=params.hive_ssl, ssl_keystore=params.hive_ssl_keystore_path,
ssl_password=params.hive_ssl_keystore_password)
Logger.info("Successfully connected to {0} on port {1}".format(address, server_port))
workable_server_available = True
except:
Logger.info("Connection to {0} on port {1} failed".format(address, server_port))
time.sleep(5)
i += 1
if i == len(address_list):
i = 0
elapsed_time = time.time() - start_time
if not workable_server_available:
raise Fail("Connection to '{0}' on host: {1} and port {2} failed after {3} seconds"
.format(server_component_name, params.hostname, server_port, elapsed_time))
Logger.info("Successfully stayed connected to '{0}' on host: {1} and port {2} after {3} seconds"
.format(server_component_name, params.hostname, server_port, elapsed_time))
def check_llap(self, env, kinit_cmd):
import params
env.set_params(params)
File(format("{tmp_dir}/hiveLlapSmoke.sh"),
content=StaticFile("hiveLlapSmoke.sh"),
mode=0755
)
unique_id = get_unique_id_and_date()
llap_cmd = format("{kinit_cmd}env JAVA_HOME={java64_home} {tmp_dir}/hiveLlapSmoke.sh {stack_root} llap_smoke_{unique_id} prepare")
exec_path = params.execute_path
if params.version and params.stack_root:
upgrade_hive_bin = format("{stack_root}/{version}/hive2/bin")
exec_path = os.environ['PATH'] + os.pathsep + params.hadoop_bin_dir + os.pathsep + upgrade_hive_bin
Execute(llap_cmd,
user=params.hive_user,
path=['/usr/sbin', '/usr/local/bin', '/bin', '/usr/bin', exec_path],
tries=1,
try_sleep=5,
wait_for_finish=True,
stderr=subprocess.PIPE,
logoutput=True)
if __name__ == "__main__":
HiveServiceCheck().execute() | []
| []
| [
"PATH"
]
| [] | ["PATH"] | python | 1 | 0 | |
nodeup/pkg/model/protokube.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package model
import (
"bytes"
"fmt"
"os"
"path/filepath"
"strings"
kopsbase "k8s.io/kops"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/util"
"k8s.io/kops/pkg/dns"
"k8s.io/kops/pkg/flagbuilder"
"k8s.io/kops/pkg/systemd"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
"github.com/blang/semver"
"github.com/golang/glog"
"k8s.io/kops/pkg/assets"
)
// ProtokubeBuilder configures protokube
type ProtokubeBuilder struct {
*NodeupModelContext
}
var _ fi.ModelBuilder = &ProtokubeBuilder{}
// Build is responsible for generating the options for protokube
func (t *ProtokubeBuilder) Build(c *fi.ModelBuilderContext) error {
useGossip := dns.IsGossipHostname(t.Cluster.Spec.MasterInternalName)
// check is not a master and we are not using gossip (https://github.com/kubernetes/kops/pull/3091)
if !t.IsMaster && !useGossip {
glog.V(2).Infof("skipping the provisioning of protokube on the nodes")
return nil
}
if t.IsMaster {
kubeconfig, err := t.buildPKIKubeconfig("kops")
if err != nil {
return err
}
c.AddTask(&nodetasks.File{
Path: "/var/lib/kops/kubeconfig",
Contents: fi.NewStringResource(kubeconfig),
Type: nodetasks.FileType_File,
Mode: s("0400"),
})
// retrieve the etcd peer certificates and private keys from the keystore
if t.UseEtcdTLS() {
for _, x := range []string{"etcd", "etcd-client"} {
if err := t.BuildCertificateTask(c, x, fmt.Sprintf("%s.pem", x)); err != nil {
return err
}
}
for _, x := range []string{"etcd", "etcd-client"} {
if err := t.BuildPrivateTask(c, x, fmt.Sprintf("%s-key.pem", x)); err != nil {
return err
}
}
}
}
service, err := t.buildSystemdService()
if err != nil {
return err
}
c.AddTask(service)
return nil
}
// buildSystemdService generates the manifest for the protokube service
func (t *ProtokubeBuilder) buildSystemdService() (*nodetasks.Service, error) {
k8sVersion, err := util.ParseKubernetesVersion(t.Cluster.Spec.KubernetesVersion)
if err != nil || k8sVersion == nil {
return nil, fmt.Errorf("unable to parse KubernetesVersion %q", t.Cluster.Spec.KubernetesVersion)
}
protokubeFlags, err := t.ProtokubeFlags(*k8sVersion)
if err != nil {
return nil, err
}
protokubeFlagsArgs, err := flagbuilder.BuildFlags(protokubeFlags)
if err != nil {
return nil, err
}
dockerArgs := []string{
"/usr/bin/docker", "run",
"-v", "/:/rootfs/",
"-v", "/var/run/dbus:/var/run/dbus",
"-v", "/run/systemd:/run/systemd",
}
// add kubectl only if a master
// path changes depending on distro, and always mount it on /opt/kops/bin
// kubectl is downloaded an installed by other tasks
if t.IsMaster {
dockerArgs = append(dockerArgs, []string{
"-v", t.KubectlPath() + ":/opt/kops/bin:ro",
"--env", "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/kops/bin",
}...)
}
dockerArgs = append(dockerArgs, []string{
"--net=host",
"--pid=host", // Needed for mounting in a container (when using systemd mounting?)
"--privileged", // We execute in the host namespace
"--env", "KUBECONFIG=/rootfs/var/lib/kops/kubeconfig",
t.ProtokubeEnvironmentVariables(),
t.ProtokubeImageName(),
"/usr/bin/protokube",
}...)
protokubeCommand := strings.Join(dockerArgs, " ") + " " + protokubeFlagsArgs
manifest := &systemd.Manifest{}
manifest.Set("Unit", "Description", "Kubernetes Protokube Service")
manifest.Set("Unit", "Documentation", "https://github.com/kubernetes/kops")
manifest.Set("Service", "ExecStartPre", t.ProtokubeImagePullCommand())
manifest.Set("Service", "ExecStart", protokubeCommand)
manifest.Set("Service", "Restart", "always")
manifest.Set("Service", "RestartSec", "2s")
manifest.Set("Service", "StartLimitInterval", "0")
manifest.Set("Install", "WantedBy", "multi-user.target")
manifestString := manifest.Render()
glog.V(8).Infof("Built service manifest %q\n%s", "protokube", manifestString)
service := &nodetasks.Service{
Name: "protokube.service",
Definition: s(manifestString),
}
service.InitDefaults()
return service, nil
}
// ProtokubeImageName returns the docker image for protokube
func (t *ProtokubeBuilder) ProtokubeImageName() string {
name := ""
if t.NodeupConfig.ProtokubeImage != nil && t.NodeupConfig.ProtokubeImage.Name != "" {
name = t.NodeupConfig.ProtokubeImage.Name
}
if name == "" {
// use current default corresponding to this version of nodeup
name = kopsbase.DefaultProtokubeImageName()
}
return name
}
// ProtokubeImagePullCommand returns the command to pull the image
func (t *ProtokubeBuilder) ProtokubeImagePullCommand() string {
source := ""
if t.NodeupConfig.ProtokubeImage != nil {
source = t.NodeupConfig.ProtokubeImage.Source
}
if source == "" {
// Nothing to pull; return dummy value
return "/bin/true"
}
if strings.HasPrefix(source, "http:") || strings.HasPrefix(source, "https:") || strings.HasPrefix(source, "s3:") {
// We preloaded the image; return a dummy value
return "/bin/true"
}
return "/usr/bin/docker pull " + t.NodeupConfig.ProtokubeImage.Source
}
// ProtokubeFlags are the flags for protokube
type ProtokubeFlags struct {
ApplyTaints *bool `json:"applyTaints,omitempty" flag:"apply-taints"`
Channels []string `json:"channels,omitempty" flag:"channels"`
Cloud *string `json:"cloud,omitempty" flag:"cloud"`
// ClusterID flag is required only for vSphere cloud type, to pass cluster id information to protokube. AWS and GCE workflows ignore this flag.
ClusterID *string `json:"cluster-id,omitempty" flag:"cluster-id"`
Containerized *bool `json:"containerized,omitempty" flag:"containerized"`
DNSInternalSuffix *string `json:"dnsInternalSuffix,omitempty" flag:"dns-internal-suffix"`
DNSProvider *string `json:"dnsProvider,omitempty" flag:"dns"`
DNSServer *string `json:"dns-server,omitempty" flag:"dns-server"`
EtcdBackupImage string `json:"etcd-backup-image,omitempty" flag:"etcd-backup-image"`
EtcdBackupStore string `json:"etcd-backup-store,omitempty" flag:"etcd-backup-store"`
EtcdImage *string `json:"etcd-image,omitempty" flag:"etcd-image"`
EtcdLeaderElectionTimeout *string `json:"etcd-election-timeout,omitempty" flag:"etcd-election-timeout"`
EtcdHearbeatInterval *string `json:"etcd-heartbeat-interval,omitempty" flag:"etcd-heartbeat-interval"`
InitializeRBAC *bool `json:"initializeRBAC,omitempty" flag:"initialize-rbac"`
LogLevel *int32 `json:"logLevel,omitempty" flag:"v"`
Master *bool `json:"master,omitempty" flag:"master"`
PeerTLSCaFile *string `json:"peer-ca,omitempty" flag:"peer-ca"`
PeerTLSCertFile *string `json:"peer-cert,omitempty" flag:"peer-cert"`
PeerTLSKeyFile *string `json:"peer-key,omitempty" flag:"peer-key"`
TLSAuth *bool `json:"tls-auth,omitempty" flag:"tls-auth"`
TLSCAFile *string `json:"tls-ca,omitempty" flag:"tls-ca"`
TLSCertFile *string `json:"tls-cert,omitempty" flag:"tls-cert"`
TLSKeyFile *string `json:"tls-key,omitempty" flag:"tls-key"`
Zone []string `json:"zone,omitempty" flag:"zone"`
}
// ProtokubeFlags is responsible for building the command line flags for protokube
func (t *ProtokubeBuilder) ProtokubeFlags(k8sVersion semver.Version) (*ProtokubeFlags, error) {
imageVersion := t.Cluster.Spec.EtcdClusters[0].Version
// overrides imageVersion if set
etcdContainerImage := t.Cluster.Spec.EtcdClusters[0].Image
var leaderElectionTimeout string
var heartbeatInterval string
if v := t.Cluster.Spec.EtcdClusters[0].LeaderElectionTimeout; v != nil {
leaderElectionTimeout = convEtcdSettingsToMs(v)
}
if v := t.Cluster.Spec.EtcdClusters[0].HeartbeatInterval; v != nil {
heartbeatInterval = convEtcdSettingsToMs(v)
}
f := &ProtokubeFlags{
Channels: t.NodeupConfig.Channels,
Containerized: fi.Bool(true),
EtcdLeaderElectionTimeout: s(leaderElectionTimeout),
EtcdHearbeatInterval: s(heartbeatInterval),
LogLevel: fi.Int32(4),
Master: b(t.IsMaster),
}
for _, e := range t.Cluster.Spec.EtcdClusters {
if e.Backups != nil {
if f.EtcdBackupImage == "" {
f.EtcdBackupImage = e.Backups.Image
}
if f.EtcdBackupStore == "" {
f.EtcdBackupStore = e.Backups.BackupStore
}
}
}
// TODO this is dupicate code with etcd model
image := fmt.Sprintf("k8s.gcr.io/etcd:%s", imageVersion)
// override image if set as API value
if etcdContainerImage != "" {
image = etcdContainerImage
}
assets := assets.NewAssetBuilder(t.Cluster, "")
remapped, err := assets.RemapImage(image)
if err != nil {
return nil, fmt.Errorf("unable to remap container %q: %v", image, err)
} else {
image = remapped
}
f.EtcdImage = s(image)
// initialize rbac on Kubernetes >= 1.6 and master
if k8sVersion.Major == 1 && k8sVersion.Minor >= 6 {
f.InitializeRBAC = fi.Bool(true)
}
// check if we are using tls and add the options to protokube
if t.UseEtcdTLS() {
f.PeerTLSCaFile = s(filepath.Join(t.PathSrvKubernetes(), "ca.crt"))
f.PeerTLSCertFile = s(filepath.Join(t.PathSrvKubernetes(), "etcd.pem"))
f.PeerTLSKeyFile = s(filepath.Join(t.PathSrvKubernetes(), "etcd-key.pem"))
f.TLSCAFile = s(filepath.Join(t.PathSrvKubernetes(), "ca.crt"))
f.TLSCertFile = s(filepath.Join(t.PathSrvKubernetes(), "etcd.pem"))
f.TLSKeyFile = s(filepath.Join(t.PathSrvKubernetes(), "etcd-key.pem"))
}
if t.UseTLSAuth() {
enableAuth := true
f.TLSAuth = b(enableAuth)
}
zone := t.Cluster.Spec.DNSZone
if zone != "" {
if strings.Contains(zone, ".") {
// match by name
f.Zone = append(f.Zone, zone)
} else {
// match by id
f.Zone = append(f.Zone, "*/"+zone)
}
} else {
glog.Warningf("DNSZone not specified; protokube won't be able to update DNS")
// @TODO: Should we permit wildcard updates if zone is not specified?
//argv = append(argv, "--zone=*/*")
}
if dns.IsGossipHostname(t.Cluster.Spec.MasterInternalName) {
glog.Warningf("MasterInternalName %q implies gossip DNS", t.Cluster.Spec.MasterInternalName)
f.DNSProvider = fi.String("gossip")
// @TODO: This is hacky, but we want it so that we can have a different internal & external name
internalSuffix := t.Cluster.Spec.MasterInternalName
internalSuffix = strings.TrimPrefix(internalSuffix, "api.")
f.DNSInternalSuffix = fi.String(internalSuffix)
}
if t.Cluster.Spec.CloudProvider != "" {
f.Cloud = fi.String(t.Cluster.Spec.CloudProvider)
if f.DNSProvider == nil {
switch kops.CloudProviderID(t.Cluster.Spec.CloudProvider) {
case kops.CloudProviderAWS:
f.DNSProvider = fi.String("aws-route53")
case kops.CloudProviderGCE:
f.DNSProvider = fi.String("google-clouddns")
case kops.CloudProviderVSphere:
f.DNSProvider = fi.String("coredns")
f.ClusterID = fi.String(t.Cluster.ObjectMeta.Name)
f.DNSServer = fi.String(*t.Cluster.Spec.CloudConfig.VSphereCoreDNSServer)
default:
glog.Warningf("Unknown cloudprovider %q; won't set DNS provider", t.Cluster.Spec.CloudProvider)
}
}
}
if f.DNSInternalSuffix == nil {
f.DNSInternalSuffix = fi.String(".internal." + t.Cluster.ObjectMeta.Name)
}
if k8sVersion.Major == 1 && k8sVersion.Minor <= 5 {
f.ApplyTaints = fi.Bool(true)
}
return f, nil
}
// ProtokubeEnvironmentVariables generates the environments variables for docker
func (t *ProtokubeBuilder) ProtokubeEnvironmentVariables() string {
var buffer bytes.Buffer
// TODO write out an environments file for this. This is getting a tad long.
// Pass in required credentials when using user-defined s3 endpoint
if os.Getenv("AWS_REGION") != "" {
buffer.WriteString(" ")
buffer.WriteString("-e 'AWS_REGION=")
buffer.WriteString(os.Getenv("AWS_REGION"))
buffer.WriteString("'")
buffer.WriteString(" ")
}
if os.Getenv("S3_ENDPOINT") != "" {
buffer.WriteString(" ")
buffer.WriteString("-e S3_ENDPOINT=")
buffer.WriteString("'")
buffer.WriteString(os.Getenv("S3_ENDPOINT"))
buffer.WriteString("'")
buffer.WriteString(" -e S3_REGION=")
buffer.WriteString("'")
buffer.WriteString(os.Getenv("S3_REGION"))
buffer.WriteString("'")
buffer.WriteString(" -e S3_ACCESS_KEY_ID=")
buffer.WriteString("'")
buffer.WriteString(os.Getenv("S3_ACCESS_KEY_ID"))
buffer.WriteString("'")
buffer.WriteString(" -e S3_SECRET_ACCESS_KEY=")
buffer.WriteString("'")
buffer.WriteString(os.Getenv("S3_SECRET_ACCESS_KEY"))
buffer.WriteString("'")
buffer.WriteString(" ")
}
t.writeProxyEnvVars(&buffer)
return buffer.String()
}
func (t *ProtokubeBuilder) writeProxyEnvVars(buffer *bytes.Buffer) {
for _, envVar := range getProxyEnvVars(t.Cluster.Spec.EgressProxy) {
buffer.WriteString(" -e ")
buffer.WriteString(envVar.Name)
buffer.WriteString("=")
buffer.WriteString(envVar.Value)
buffer.WriteString(" ")
}
}
| [
"\"AWS_REGION\"",
"\"AWS_REGION\"",
"\"S3_ENDPOINT\"",
"\"S3_ENDPOINT\"",
"\"S3_REGION\"",
"\"S3_ACCESS_KEY_ID\"",
"\"S3_SECRET_ACCESS_KEY\""
]
| []
| [
"AWS_REGION",
"S3_SECRET_ACCESS_KEY",
"S3_ACCESS_KEY_ID",
"S3_REGION",
"S3_ENDPOINT"
]
| [] | ["AWS_REGION", "S3_SECRET_ACCESS_KEY", "S3_ACCESS_KEY_ID", "S3_REGION", "S3_ENDPOINT"] | go | 5 | 0 | |
image/distributionutil/login.go | // Copyright © 2021 Alibaba Group Holding Ltd.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package distributionutil
import (
"context"
"fmt"
"net/http"
"net/url"
"os"
"strings"
"time"
"github.com/pkg/errors"
"github.com/docker/distribution/registry/client/auth"
"github.com/docker/distribution/registry/client/transport"
"github.com/docker/docker/api/types"
"github.com/docker/docker/dockerversion"
dockerRegistry "github.com/docker/docker/registry"
)
func Login(ctx context.Context, authConfig *types.AuthConfig) error {
domain := authConfig.ServerAddress
if !strings.HasPrefix(domain, "http://") && !strings.HasPrefix(domain, "https://") {
domain = "https://" + domain
}
endpointURL, err := url.Parse(domain)
if err != nil {
return err
}
modifiers := dockerRegistry.Headers(dockerversion.DockerUserAgent(ctx), nil)
base := dockerRegistry.NewTransport(nil)
base.TLSClientConfig.InsecureSkipVerify = os.Getenv("SKIP_TLS_VERIFY") == "true"
authTransport := transport.NewTransport(base, modifiers...)
credentialAuthConfig := *authConfig
creds := loginCredentialStore{
authConfig: &credentialAuthConfig,
}
loginClient, err := authHTTPClient(endpointURL, authTransport, modifiers, creds, nil)
if err != nil {
return err
}
endpointStr := strings.TrimRight(endpointURL.String(), "/") + "/v2/"
req, err := http.NewRequest("GET", endpointStr, nil)
if err != nil {
return err
}
resp, err := loginClient.Do(req)
if err != nil {
if strings.Contains(err.Error(), "x509") {
return fmt.Errorf("%v, if you want to skip TLS verification, set the environment variable 'SKIP_TLS_VERIFY=true' ", err)
}
return err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusOK {
return nil
}
// TODO(dmcgowan): Attempt to further interpret result, status code and error code string
err = errors.Errorf("login attempt to %s failed with status: %d %s", endpointStr, resp.StatusCode, http.StatusText(resp.StatusCode))
return err
}
func authHTTPClient(endpoint *url.URL, authTransport http.RoundTripper, modifiers []transport.RequestModifier, creds auth.CredentialStore, scopes []auth.Scope) (*http.Client, error) {
challengeManager, _, err := dockerRegistry.PingV2Registry(endpoint, authTransport)
if err != nil {
return nil, err
}
tokenHandlerOptions := auth.TokenHandlerOptions{
Transport: authTransport,
Credentials: creds,
OfflineAccess: true,
ClientID: dockerRegistry.AuthClientID,
Scopes: scopes,
}
tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions)
basicHandler := auth.NewBasicHandler(creds)
modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler))
tr := transport.NewTransport(authTransport, modifiers...)
return &http.Client{
Transport: tr,
Timeout: 15 * time.Second,
}, nil
}
type loginCredentialStore struct {
authConfig *types.AuthConfig
}
func (lcs loginCredentialStore) Basic(*url.URL) (string, string) {
return lcs.authConfig.Username, lcs.authConfig.Password
}
func (lcs loginCredentialStore) RefreshToken(*url.URL, string) string {
return lcs.authConfig.IdentityToken
}
func (lcs loginCredentialStore) SetRefreshToken(u *url.URL, service, token string) {
lcs.authConfig.IdentityToken = token
}
| [
"\"SKIP_TLS_VERIFY\""
]
| []
| [
"SKIP_TLS_VERIFY"
]
| [] | ["SKIP_TLS_VERIFY"] | go | 1 | 0 | |
appdata_test.go | // Copyright (c) 2013-2014 Conformal Systems LLC.
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package btcutil_test
import (
"os"
"os/user"
"path/filepath"
"runtime"
"testing"
"unicode"
"github.com/mably/btcutil"
)
// TestAppDataDir tests the API for AppDataDir to ensure it gives expected
// results for various operating systems.
func TestAppDataDir(t *testing.T) {
// App name plus upper and lowercase variants.
appName := "myapp"
appNameUpper := string(unicode.ToUpper(rune(appName[0]))) + appName[1:]
appNameLower := string(unicode.ToLower(rune(appName[0]))) + appName[1:]
// When we're on Windows, set the expected local and roaming directories
// per the environment vars. When we aren't on Windows, the function
// should return the current directory when forced to provide the
// Windows path since the environment variables won't exist.
winLocal := "."
winRoaming := "."
if runtime.GOOS == "windows" {
localAppData := os.Getenv("LOCALAPPDATA")
roamingAppData := os.Getenv("APPDATA")
if localAppData == "" {
localAppData = roamingAppData
}
winLocal = filepath.Join(localAppData, appNameUpper)
winRoaming = filepath.Join(roamingAppData, appNameUpper)
}
// Get the home directory to use for testing expected results.
var homeDir string
usr, err := user.Current()
if err != nil {
t.Errorf("user.Current: %v", err)
return
}
homeDir = usr.HomeDir
// Mac app data directory.
macAppData := filepath.Join(homeDir, "Library", "Application Support")
tests := []struct {
goos string
appName string
roaming bool
want string
}{
// Various combinations of application name casing, leading
// period, operating system, and roaming flags.
{"windows", appNameLower, false, winLocal},
{"windows", appNameUpper, false, winLocal},
{"windows", "." + appNameLower, false, winLocal},
{"windows", "." + appNameUpper, false, winLocal},
{"windows", appNameLower, true, winRoaming},
{"windows", appNameUpper, true, winRoaming},
{"windows", "." + appNameLower, true, winRoaming},
{"windows", "." + appNameUpper, true, winRoaming},
{"linux", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"linux", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"linux", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"linux", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"darwin", appNameLower, false, filepath.Join(macAppData, appNameUpper)},
{"darwin", appNameUpper, false, filepath.Join(macAppData, appNameUpper)},
{"darwin", "." + appNameLower, false, filepath.Join(macAppData, appNameUpper)},
{"darwin", "." + appNameUpper, false, filepath.Join(macAppData, appNameUpper)},
{"openbsd", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"openbsd", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"openbsd", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"openbsd", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"freebsd", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"freebsd", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"freebsd", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"freebsd", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"netbsd", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"netbsd", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"netbsd", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"netbsd", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"plan9", appNameLower, false, filepath.Join(homeDir, appNameLower)},
{"plan9", appNameUpper, false, filepath.Join(homeDir, appNameLower)},
{"plan9", "." + appNameLower, false, filepath.Join(homeDir, appNameLower)},
{"plan9", "." + appNameUpper, false, filepath.Join(homeDir, appNameLower)},
{"unrecognized", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"unrecognized", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"unrecognized", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"unrecognized", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
// No application name provided, so expect current directory.
{"windows", "", false, "."},
{"windows", "", true, "."},
{"linux", "", false, "."},
{"darwin", "", false, "."},
{"openbsd", "", false, "."},
{"freebsd", "", false, "."},
{"netbsd", "", false, "."},
{"plan9", "", false, "."},
{"unrecognized", "", false, "."},
// Single dot provided for application name, so expect current
// directory.
{"windows", ".", false, "."},
{"windows", ".", true, "."},
{"linux", ".", false, "."},
{"darwin", ".", false, "."},
{"openbsd", ".", false, "."},
{"freebsd", ".", false, "."},
{"netbsd", ".", false, "."},
{"plan9", ".", false, "."},
{"unrecognized", ".", false, "."},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
ret := btcutil.TstAppDataDir(test.goos, test.appName, test.roaming)
if ret != test.want {
t.Errorf("appDataDir #%d (%s) does not match - "+
"expected got %s, want %s", i, test.goos, ret,
test.want)
continue
}
}
}
| [
"\"LOCALAPPDATA\"",
"\"APPDATA\""
]
| []
| [
"APPDATA",
"LOCALAPPDATA"
]
| [] | ["APPDATA", "LOCALAPPDATA"] | go | 2 | 0 | |
cmd/env_factory.go | package cmd
import (
"os"
gopath "path"
"time"
"github.com/cppforlife/go-patch/patch"
bihttpagent "github.com/cloudfoundry/bosh-agent/agentclient/http"
biblobstore "github.com/cloudfoundry/bosh-cli/blobstore"
bicloud "github.com/cloudfoundry/bosh-cli/cloud"
biconfig "github.com/cloudfoundry/bosh-cli/config"
bicpirel "github.com/cloudfoundry/bosh-cli/cpi/release"
bidepl "github.com/cloudfoundry/bosh-cli/deployment"
bidisk "github.com/cloudfoundry/bosh-cli/deployment/disk"
biinstance "github.com/cloudfoundry/bosh-cli/deployment/instance"
biinstancestate "github.com/cloudfoundry/bosh-cli/deployment/instance/state"
bideplmanifest "github.com/cloudfoundry/bosh-cli/deployment/manifest"
bideplrel "github.com/cloudfoundry/bosh-cli/deployment/release"
bisshtunnel "github.com/cloudfoundry/bosh-cli/deployment/sshtunnel"
bidepltpl "github.com/cloudfoundry/bosh-cli/deployment/template"
bivm "github.com/cloudfoundry/bosh-cli/deployment/vm"
boshtpl "github.com/cloudfoundry/bosh-cli/director/template"
biindex "github.com/cloudfoundry/bosh-cli/index"
boshinst "github.com/cloudfoundry/bosh-cli/installation"
boshinstmanifest "github.com/cloudfoundry/bosh-cli/installation/manifest"
bitarball "github.com/cloudfoundry/bosh-cli/installation/tarball"
biregistry "github.com/cloudfoundry/bosh-cli/registry"
boshrel "github.com/cloudfoundry/bosh-cli/release"
birelsetmanifest "github.com/cloudfoundry/bosh-cli/release/set/manifest"
bistatepkg "github.com/cloudfoundry/bosh-cli/state/pkg"
bistemcell "github.com/cloudfoundry/bosh-cli/stemcell"
bitemplate "github.com/cloudfoundry/bosh-cli/templatescompiler"
bitemplateerb "github.com/cloudfoundry/bosh-cli/templatescompiler/erbrenderer"
bihttpclient "github.com/cloudfoundry/bosh-utils/httpclient"
)
type envFactory struct {
deps BasicDeps
manifestPath string
manifestVars boshtpl.Variables
manifestOp patch.Op
deploymentStateService biconfig.DeploymentStateService
installationManifestParser ReleaseSetAndInstallationManifestParser
releaseManager boshinst.ReleaseManager
releaseFetcher boshinst.ReleaseFetcher
stemcellFetcher bistemcell.Fetcher
cpiInstaller bicpirel.CpiInstaller
targetProvider boshinst.TargetProvider
cloudFactory bicloud.Factory
diskManagerFactory bidisk.ManagerFactory
vmManagerFactory bivm.ManagerFactory
stemcellManagerFactory bistemcell.ManagerFactory
instanceManagerFactory biinstance.ManagerFactory
deploymentManagerFactory bidepl.ManagerFactory
agentClientFactory bihttpagent.AgentClientFactory
blobstoreFactory biblobstore.Factory
deploymentFactory bidepl.Factory
deploymentRecord bidepl.Record
}
func NewEnvFactory(deps BasicDeps, manifestPath string, statePath string, manifestVars boshtpl.Variables, manifestOp patch.Op) *envFactory {
f := envFactory{
deps: deps,
manifestPath: manifestPath,
manifestVars: manifestVars,
manifestOp: manifestOp,
}
f.releaseManager = boshinst.NewReleaseManager(deps.Logger)
releaseJobResolver := bideplrel.NewJobResolver(f.releaseManager)
// todo expand path?
workspaceRootPath := gopath.Join(os.Getenv("HOME"), ".bosh")
{
tarballCacheBasePath := gopath.Join(workspaceRootPath, "downloads")
tarballCache := bitarball.NewCache(tarballCacheBasePath, deps.FS, deps.Logger)
httpClient := bihttpclient.NewHTTPClient(bitarball.HTTPClient, deps.Logger)
tarballProvider := bitarball.NewProvider(
tarballCache, deps.FS, httpClient, 3, 500*time.Millisecond, deps.Logger)
releaseProvider := boshrel.NewProvider(
deps.CmdRunner, deps.Compressor, deps.DigestCalculator, deps.FS, deps.Logger)
f.releaseFetcher = boshinst.NewReleaseFetcher(
tarballProvider,
releaseProvider.NewExtractingArchiveReader(),
f.releaseManager,
)
stemcellReader := bistemcell.NewReader(deps.Compressor, deps.FS)
stemcellExtractor := bistemcell.NewExtractor(stemcellReader, deps.FS)
f.stemcellFetcher = bistemcell.Fetcher{
TarballProvider: tarballProvider,
StemcellExtractor: stemcellExtractor,
}
}
f.deploymentStateService = biconfig.NewFileSystemDeploymentStateService(
deps.FS, deps.UUIDGen, deps.Logger, biconfig.DeploymentStatePath(manifestPath, statePath))
{
registryServer := biregistry.NewServerManager(deps.Logger)
installerFactory := boshinst.NewInstallerFactory(
deps.UI, deps.CmdRunner, deps.Compressor, releaseJobResolver,
deps.UUIDGen, registryServer, deps.Logger, deps.FS, deps.DigestCreationAlgorithms)
f.cpiInstaller = bicpirel.CpiInstaller{
ReleaseManager: f.releaseManager,
InstallerFactory: installerFactory,
Validator: bicpirel.NewValidator(),
}
}
f.targetProvider = boshinst.NewTargetProvider(
f.deploymentStateService, deps.UUIDGen, gopath.Join(workspaceRootPath, "installations"))
{
diskRepo := biconfig.NewDiskRepo(f.deploymentStateService, deps.UUIDGen)
stemcellRepo := biconfig.NewStemcellRepo(f.deploymentStateService, deps.UUIDGen)
vmRepo := biconfig.NewVMRepo(f.deploymentStateService)
f.diskManagerFactory = bidisk.NewManagerFactory(diskRepo, deps.Logger)
diskDeployer := bivm.NewDiskDeployer(f.diskManagerFactory, diskRepo, deps.Logger)
f.stemcellManagerFactory = bistemcell.NewManagerFactory(stemcellRepo)
f.vmManagerFactory = bivm.NewManagerFactory(
vmRepo, stemcellRepo, diskDeployer, deps.UUIDGen, deps.FS, deps.Logger)
deploymentRepo := biconfig.NewDeploymentRepo(f.deploymentStateService)
releaseRepo := biconfig.NewReleaseRepo(f.deploymentStateService, deps.UUIDGen)
f.deploymentRecord = bidepl.NewRecord(deploymentRepo, releaseRepo, stemcellRepo)
}
{
f.blobstoreFactory = biblobstore.NewBlobstoreFactory(deps.UUIDGen, deps.FS, deps.Logger)
f.deploymentFactory = bidepl.NewFactory(10*time.Second, 500*time.Millisecond)
f.agentClientFactory = bihttpagent.NewAgentClientFactory(1*time.Second, deps.Logger)
f.cloudFactory = bicloud.NewFactory(deps.FS, deps.CmdRunner, deps.Logger)
}
{
erbRenderer := bitemplateerb.NewERBRenderer(deps.FS, deps.CmdRunner, deps.Logger)
jobRenderer := bitemplate.NewJobRenderer(erbRenderer, deps.FS, deps.UUIDGen, deps.Logger)
builderFactory := biinstancestate.NewBuilderFactory(
bistatepkg.NewCompiledPackageRepo(biindex.NewInMemoryIndex()),
releaseJobResolver,
bitemplate.NewJobListRenderer(jobRenderer, deps.Logger),
bitemplate.NewRenderedJobListCompressor(deps.FS, deps.Compressor, deps.DigestCalculator, deps.Logger),
deps.Logger,
)
sshTunnelFactory := bisshtunnel.NewFactory(deps.Logger)
instanceFactory := biinstance.NewFactory(builderFactory)
f.instanceManagerFactory = biinstance.NewManagerFactory(
sshTunnelFactory, instanceFactory, deps.Logger)
}
{
releaseSetValidator := birelsetmanifest.NewValidator(deps.Logger)
releaseSetParser := birelsetmanifest.NewParser(deps.FS, deps.Logger, releaseSetValidator)
installValidator := boshinstmanifest.NewValidator(deps.Logger)
installParser := boshinstmanifest.NewParser(deps.FS, deps.UUIDGen, deps.Logger, installValidator)
f.installationManifestParser = ReleaseSetAndInstallationManifestParser{
ReleaseSetParser: releaseSetParser,
InstallationParser: installParser,
}
}
return &f
}
func (f *envFactory) Preparer() DeploymentPreparer {
return NewDeploymentPreparer(
f.deps.UI,
f.deps.Logger,
"DeploymentPreparer",
f.deploymentStateService,
biconfig.NewLegacyDeploymentStateMigrator(
f.deploymentStateService,
f.deps.FS,
f.deps.UUIDGen,
f.deps.Logger,
),
f.releaseManager,
f.deploymentRecord,
f.cloudFactory,
f.stemcellManagerFactory,
f.agentClientFactory,
f.vmManagerFactory,
f.blobstoreFactory,
bidepl.NewDeployer(
f.vmManagerFactory,
f.instanceManagerFactory,
f.deploymentFactory,
f.deps.Logger,
),
f.manifestPath,
f.manifestVars,
f.manifestOp,
f.cpiInstaller,
f.releaseFetcher,
f.stemcellFetcher,
f.installationManifestParser,
NewDeploymentManifestParser(
bideplmanifest.NewParser(f.deps.FS, f.deps.Logger),
bideplmanifest.NewValidator(f.deps.Logger),
f.releaseManager,
bidepltpl.NewDeploymentTemplateFactory(f.deps.FS),
),
NewTempRootConfigurator(f.deps.FS),
f.targetProvider,
)
}
func (f *envFactory) Deleter() DeploymentDeleter {
return NewDeploymentDeleter(
f.deps.UI,
"DeploymentDeleter",
f.deps.Logger,
f.deploymentStateService,
f.releaseManager,
f.cloudFactory,
f.agentClientFactory,
f.blobstoreFactory,
bidepl.NewManagerFactory(
f.vmManagerFactory,
f.instanceManagerFactory,
f.diskManagerFactory,
f.stemcellManagerFactory,
f.deploymentFactory,
),
f.manifestPath,
f.manifestVars,
f.manifestOp,
f.cpiInstaller,
boshinst.NewUninstaller(f.deps.FS, f.deps.Logger),
f.releaseFetcher,
f.installationManifestParser,
NewTempRootConfigurator(f.deps.FS),
f.targetProvider,
)
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
main.py | from discord.ext import commands
import os
# Import and load all files
client = commands.Bot(command_prefix="-")
client.remove_command('help')
@client.command(hidden=True)
@commands.is_owner()
async def load(ctx, extension):
client.load_extension(f'cogs.{extension}')
await ctx.send(f'Loaded: {extension}')
@client.command(hidden=True)
@commands.is_owner()
async def unload(ctx, extension):
client.unload_extension(f'cogs.{extension}')
await ctx.send(f'Unloaded: {extension}')
@client.command(hidden=True)
@commands.is_owner()
async def reload(ctx, extension):
client.unload_extension(f'cogs.{extension}')
client.load_extension(f'cogs.{extension}')
await ctx.send(f'Reloaded: {extension}')
for filename in os.listdir('./cogs'):
if filename.endswith('py'):
client.load_extension(f'cogs.{filename[:-3]}')
if __name__ == "__main__":
client.run(os.getenv('discord_token'))
| []
| []
| [
"discord_token"
]
| [] | ["discord_token"] | python | 1 | 0 | |
test/unit/test_transformation.py | from collections import OrderedDict
from ektelo.data import Relation
from ektelo.data import RelationHelper
import numpy as np
import os
from ektelo.client.mapper import Grid
from ektelo.private.transformation import Group
from ektelo.private.transformation import Null
from ektelo.private.transformation import ReduceByPartition
from ektelo.private.transformation import Reshape
from ektelo.private.transformation import Filter
from ektelo.private.transformation import Where
from ektelo.private.transformation import Project
from ektelo.private.transformation import Vectorize
import unittest
import yaml
CSV_PATH = os.environ['EKTELO_DATA']
CONFIG_PATH = os.path.join(os.environ['EKTELO_HOME'], 'resources', 'config')
class TestTransformation(unittest.TestCase):
def setUp(self):
self.n = 8
self.grid_shape = 2
self.idxs = [1,3,5]
self.X = np.random.rand(self.n)
delimiter = ','
self.reduced_domain = (10, 10, 7, 4, 2)
config = yaml.load(open(os.path.join(CONFIG_PATH, 'cps.yml'), 'r').read())
self.relation = RelationHelper('CPS').load()
def test_vectorize_operator(self):
vectorize = Vectorize('CPS-CSV', reduced_domain=self.reduced_domain)
transformation = vectorize.transform(self.relation)
X = transformation
self.assertEqual(np.prod(self.reduced_domain), len(X))
def test_where_operator(self):
where = Where('age >= 30')
X = where.transform(self.relation)
self.assertEqual(X._df.age.min(), 30)
def test_project_operator(self):
project = Project(['income'])
X = project.transform(self.relation)
np.testing.assert_array_equal(X.domains, [X.config['income']['domain']])
def test_group_operator(self):
group = Group(self.idxs)
transformation = group.transform(self.X)
self.assertEqual(transformation.shape, (3,))
np.testing.assert_array_equal(transformation, self.X[self.idxs])
def test_reduce_operator(self):
grid = Grid(self.n, self.grid_shape, canonical_order=False)
mapping = grid.mapping()
reduction = ReduceByPartition(mapping)
transformation = reduction.transform(self.X)
for i in range(4):
self.assertEqual(sum(self.X[2*i:2*i+2]), transformation[i])
def test_reshape_operator(self):
shape = (4, 2)
reshaper = Reshape(shape)
x_hat = reshaper.transform(self.X)
self.assertEqual(x_hat.shape, shape)
def test_filter_operator(self):
vectorize = Vectorize('CPS-CSV', reduced_domain=self.reduced_domain)
transformation = vectorize.transform(self.relation)
X = transformation
mask = np.ones(self.reduced_domain).flatten()
filterer = Filter(mask)
self.assertEqual(sum(filterer.transform(X)), sum(X))
filterer = Filter(1-mask)
self.assertEqual(sum(filterer.transform(X)), 0)
def test_null_operator(self):
null = Null()
transformation = null.transform(self.X)
np.testing.assert_array_equal(transformation, self.X)
| []
| []
| [
"EKTELO_DATA",
"EKTELO_HOME"
]
| [] | ["EKTELO_DATA", "EKTELO_HOME"] | python | 2 | 0 | |
pkg/utils/http_client.go | // Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package utils
import (
"crypto/tls"
"fmt"
"io"
"net"
"net/http"
"net/url"
"os"
"time"
)
// HTTPClient is a wrap of http.Client
type HTTPClient struct {
client *http.Client
}
// NewHTTPClient returns a new HTTP client with timeout and HTTPS support
func NewHTTPClient(timeout time.Duration, tlsConfig *tls.Config) *HTTPClient {
if timeout < time.Second {
timeout = 10 * time.Second // default timeout is 10s
}
tr := &http.Transport{
TLSClientConfig: tlsConfig,
Dial: (&net.Dialer{Timeout: 5 * time.Second}).Dial,
}
// prefer to use the inner http proxy
httpProxy := os.Getenv("TIUP_INNER_HTTP_PROXY")
if len(httpProxy) == 0 {
httpProxy = os.Getenv("HTTP_PROXY")
}
if len(httpProxy) > 0 {
if proxyURL, err := url.Parse(httpProxy); err == nil {
tr.Proxy = http.ProxyURL(proxyURL)
}
}
return &HTTPClient{
client: &http.Client{
Timeout: timeout,
Transport: tr,
},
}
}
// Get fetch an URL with GET method and returns the response
func (c *HTTPClient) Get(url string) ([]byte, error) {
res, err := c.client.Get(url)
if err != nil {
return nil, err
}
defer res.Body.Close()
return checkHTTPResponse(res)
}
// Post send a POST request to the url and returns the response
func (c *HTTPClient) Post(url string, body io.Reader) ([]byte, error) {
res, err := c.client.Post(url, "application/json", body)
if err != nil {
return nil, err
}
defer res.Body.Close()
return checkHTTPResponse(res)
}
// Delete send a DELETE request to the url and returns the response and status code.
func (c *HTTPClient) Delete(url string, body io.Reader) ([]byte, int, error) {
var statusCode int
req, err := http.NewRequest("DELETE", url, body)
if err != nil {
return nil, statusCode, err
}
res, err := c.client.Do(req)
if err != nil {
return nil, statusCode, err
}
defer res.Body.Close()
b, err := checkHTTPResponse(res)
statusCode = res.StatusCode
return b, statusCode, err
}
// Client returns the http.Client
func (c *HTTPClient) Client() *http.Client {
return c.client
}
// WithClient uses the specified HTTP client
func (c *HTTPClient) WithClient(client *http.Client) *HTTPClient {
c.client = client
return c
}
// checkHTTPResponse checks if an HTTP response is with normal status codes
func checkHTTPResponse(res *http.Response) ([]byte, error) {
body, err := io.ReadAll(res.Body)
if err != nil {
return nil, err
}
if res.StatusCode < 200 || res.StatusCode >= 400 {
return body, fmt.Errorf("error requesting %s, response: %s, code %d",
res.Request.URL, string(body), res.StatusCode)
}
return body, nil
}
| [
"\"TIUP_INNER_HTTP_PROXY\"",
"\"HTTP_PROXY\""
]
| []
| [
"TIUP_INNER_HTTP_PROXY",
"HTTP_PROXY"
]
| [] | ["TIUP_INNER_HTTP_PROXY", "HTTP_PROXY"] | go | 2 | 0 | |
src/main/java/com/github/gabrielpbzr/cookbook/utils/Configuration.java | package com.github.gabrielpbzr.cookbook.utils;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.HashMap;
import java.util.Map;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class Configuration {
private static Configuration instance;
private Map<String, String> properties;
private Logger logger = LoggerFactory.getLogger(Configuration.class);
private Configuration() {
}
public static Configuration getInstance() {
if (instance == null) {
instance = new Configuration();
instance.load();
}
return instance;
}
public String getValue(String key) {
return properties.getOrDefault(key, "");
}
/**
* Load configuration from .env files
*/
private void load() {
try {
this.properties = new HashMap<>(System.getenv());
BufferedReader br = new BufferedReader(new InputStreamReader(getClass().getClassLoader().getResourceAsStream(".env")));
String line;
while ((line = br.readLine()) != null) {
if (line.startsWith("#")) {
continue;
}
int pos = line.indexOf("=");
if (pos > -1) {
String key = line.substring(0, pos).trim();
String value = line.substring(pos + 1).trim();
properties.put(key, value);
}
}
} catch (IOException ioe) {
logger.error("Error loading environment variables");
}
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
weasyl/macro.py | from __future__ import absolute_import
import os
from libweasyl import ratings
MACRO_EMAIL_ADDRESS = "[email protected]"
MACRO_BCRYPT_ROUNDS = 13
# Example input (userid, "su")
MACRO_IGNOREUSER = " AND NOT EXISTS (SELECT 0 FROM ignoreuser WHERE (userid, otherid) = (%i, %s.userid))"
# Example input (userid, userid)
MACRO_BLOCKTAG_SUBMIT = (
" AND (su.userid = %i OR NOT EXISTS (SELECT 0 FROM searchmapsubmit"
" WHERE targetid = su.submitid"
" AND tagid IN (SELECT tagid FROM blocktag WHERE userid = %i AND rating <= su.rating)))")
# Example input (userid, userid)
MACRO_BLOCKTAG_CHAR = (
" AND (ch.userid = %i OR NOT EXISTS (SELECT 0 FROM searchmapchar"
" WHERE targetid = ch.charid AND tagid IN (SELECT tagid FROM blocktag WHERE userid = %i AND rating <= ch.rating)))")
# Example input (userid, userid)
MACRO_BLOCKTAG_JOURNAL = (
" AND (jo.userid = %i OR NOT EXISTS (SELECT 0 FROM searchmapjournal"
" WHERE targetid = jo.journalid AND tagid IN (SELECT tagid FROM blocktag"
" WHERE userid = %i AND rating <= jo.rating)))")
# Example input (userid, userid, userid)
MACRO_FRIENDUSER_SUBMIT = (
" AND (su.settings !~ 'f' OR su.userid = %i OR EXISTS (SELECT 0 FROM frienduser"
" WHERE ((userid, otherid) = (%i, su.userid) OR (userid, otherid) = (su.userid, %i)) AND settings !~ 'p'))")
# Example input (userid, userid, userid)
MACRO_FRIENDUSER_JOURNAL = (
" AND (jo.settings !~ 'f' OR jo.userid = %i OR EXISTS (SELECT 0 FROM frienduser"
" WHERE ((userid, otherid) = (%i, jo.userid) OR (userid, otherid) = (jo.userid, %i)) AND settings !~ 'p'))")
# Example input (userid, userid, userid)
MACRO_FRIENDUSER_CHARACTER = (
" AND (ch.settings !~ 'f' or ch.userid = %i OR EXISTS (SELECT 0 from frienduser"
" WHERE ((userid, otherid) = (%i, ch.userid) OR (userid, otherid) = (ch.userid, %i)) AND settings !~ 'p'))")
MACRO_SUBCAT_LIST = [
[1010, "Sketch"],
[1020, "Traditional"],
[1030, "Digital"],
[1040, "Animation"],
[1050, "Photography"],
[1060, "Design / Interface"],
[1070, "Modeling / Sculpture"],
[1075, "Crafts / Jewelry"],
[1078, "Sewing / Knitting"],
[1080, "Desktop / Wallpaper"],
[1999, "Other"],
[2010, "Story"],
[2020, "Poetry / Lyrics"],
[2030, "Script / Screenplay"],
[2999, "Other"],
[3010, "Original Music"],
[3020, "Cover Version"],
[3030, "Remix / Mashup"],
[3040, "Speech / Reading"],
[3500, "Embedded Video"],
[3999, "Other"],
]
# Mod actions which apply to all submissions
MACRO_MOD_ACTIONS = [
# Line below is so default mod action is 'nothing'. Intentional behavior.
('null', ''),
('hide', 'Hide'),
('show', 'Show'),
] + [('rate-%s' % (r.code,), 'Rate %s' % (r.name,)) for r in ratings.ALL_RATINGS] + [
('clearcritique', 'Remove critique-requested flag'),
('setcritique', 'Set critique-requested flag'),
]
def MACRO_MOD_ACTIONS_FOR_SETTINGS(settings, submission_type):
# We start with the complete list of mod actions, then filter it based on submission_type
valid_list = MACRO_MOD_ACTIONS
# Journals and characters can't have the critique flag set
if submission_type in ("journal", "character"):
valid_list = [(a, b) for a, b in valid_list if not a.endswith('critique')]
# Select whether we show 'Show' or 'Hide' depending on whether the
# submission is hidden
if 'h' in settings:
valid_list = [(a, b) for a, b in valid_list if a != 'hide']
else:
valid_list = [(a, b) for a, b in valid_list if a != 'show']
# Select whether we show 'Set Critique' or 'Clear Critique' depending on
# whether the Critique Requested flag is set
if 'q' in settings:
valid_list = [(a, b) for a, b in valid_list if a != 'setcritique']
else:
valid_list = [(a, b) for a, b in valid_list if a != 'clearcritique']
# Return our shiny, filtered list of mod actions
return valid_list
MACRO_REPORT_URGENCY = [
(10, "Urgent"),
(20, "Normal"),
(30, "Trivial"),
]
MACRO_REPORT_VIOLATION = [
# fields: id, urgency, text, comment_required
# id must be unique
# urgency is no longer used, but refers to MACRO_REPORT_URGENCY
# text is the text which describes this type of violation
# comment_required: whether the user must submit a comment with this report type
# Mod comments
(0, 0, "Comment", False),
# Report user
(1010, 10, "Inappropriate avatar", False),
(1020, 10, "Inappropriate profile picture", False),
(1030, 20, "Spam or alternate account", False),
(1999, 20, "Other (please comment)", True),
# Report submission or character
(2010, 20, "Harassing content", False),
(2020, 30, "Tracing or plagiarism", True),
(2030, 10, "Rapidly flashing colors", False),
(2040, 10, "Incorrect content rating", False),
(2050, 20, "Perpetual incorrect tagging", False),
(2060, 30, "Low-quality photography", False),
(2065, 30, "Low-quality literature", False),
(2070, 20, "Spamming or flooding", False),
(2080, 20, "Meme or image macro", False),
(2090, 20, "Unacceptable screenshot", False),
(2110, 10, "Illegal content", False),
(2120, 10, "Photographic pornography", False),
(2130, 10, "Offensive content", False),
(2140, 10, "Misleading thumbnail", False),
(2999, 20, "Other (please comment)", True),
# Report journal
(3010, 20, "Harassing content", False),
(3020, 20, "Spamming or flooding", False),
(3030, 10, "Illegal activity", False),
(3040, 10, "Offensive content", False),
(3999, 20, "Other (please comment)", True),
# Report shout or comment
(4010, 20, "Harassing comment", False),
(4020, 20, "Spamming or flooding", False),
(4030, 20, "Inappropriate comment", False),
(4999, 20, "Other (please comment)", True),
]
MACRO_APP_ROOT = os.environ['WEASYL_APP_ROOT'] + "/"
MACRO_STORAGE_ROOT = os.environ['WEASYL_STORAGE_ROOT'] + "/"
MACRO_URL_CHAR_PATH = "static/character/"
MACRO_SYS_CHAR_PATH = os.path.join(MACRO_STORAGE_ROOT, MACRO_URL_CHAR_PATH)
MACRO_SYS_LOG_PATH = os.path.join(MACRO_STORAGE_ROOT, "log/")
MACRO_SYS_TEMP_PATH = os.path.join(MACRO_STORAGE_ROOT, "temp/")
MACRO_SYS_CONFIG_PATH = os.path.join(MACRO_APP_ROOT, "config/")
MACRO_SYS_STAFF_CONFIG_PATH = os.path.join(MACRO_SYS_CONFIG_PATH, "weasyl-staff.py")
MACRO_BLANK_THUMB = "/static/images/default-thumbs/visual.png"
MACRO_DEFAULT_SUBMISSION_THUMBNAIL = [
{
'display_url': MACRO_BLANK_THUMB,
'file_url': MACRO_BLANK_THUMB,
},
]
MACRO_BLANK_AVATAR = "/static/images/avatar_default.jpg"
MACRO_DEFAULT_AVATAR = [
{
'display_url': MACRO_BLANK_AVATAR,
'file_url': MACRO_BLANK_AVATAR,
},
]
MACRO_CFG_SITE_CONFIG = MACRO_SYS_CONFIG_PATH + "site.config.txt"
SOCIAL_SITES = {
"deviantart": {
"name": "deviantArt",
"url": "https://%s.deviantart.com/",
},
"facebook": {
"name": "Facebook",
"url": "https://www.facebook.com/%s",
},
"flickr": {
"name": "Flickr",
"url": "https://www.flickr.com/photos/%s",
},
"furaffinity": {
"name": "Fur Affinity",
"url": "https://www.furaffinity.net/user/%s",
},
"googleplus": {
"name": "Google+",
"url": "https://plus.google.com/+%s",
},
"inkbunny": {
"name": "Inkbunny",
"url": "https://inkbunny.net/%s",
},
"reddit": {
"name": "reddit",
"url": "https://www.reddit.com/user/%s",
},
"sofurry": {
"name": "SoFurry",
"url": "https://%s.sofurry.com/",
},
"steam": {
"name": "Steam",
"url": "https://steamcommunity.com/id/%s",
},
"tumblr": {
"name": "Tumblr",
"url": "https://%s.tumblr.com/",
},
"twitter": {
"name": "Twitter",
"url": "https://twitter.com/%s",
},
"youtube": {
"name": "YouTube",
"url": "https://www.youtube.com/user/%s",
},
"patreon": {
"name": "Patreon",
"url": "https://www.patreon.com/%s",
},
}
SOCIAL_SITES_BY_NAME = {v['name']: v for v in SOCIAL_SITES.itervalues()}
ART_SUBMISSION_CATEGORY = 1000
TEXT_SUBMISSION_CATEGORY = 2000
MULTIMEDIA_SUBMISSION_CATEGORY = 3000
ALL_SUBMISSION_CATEGORIES = [
ART_SUBMISSION_CATEGORY,
TEXT_SUBMISSION_CATEGORY,
MULTIMEDIA_SUBMISSION_CATEGORY,
]
CONTYPE_PARSABLE_MAP = {
10: 'submission',
20: 'character',
30: 'journal',
40: 'usercollect',
}
CATEGORY_PARSABLE_MAP = {
ART_SUBMISSION_CATEGORY: 'visual',
TEXT_SUBMISSION_CATEGORY: 'literary',
MULTIMEDIA_SUBMISSION_CATEGORY: 'multimedia',
}
| []
| []
| [
"WEASYL_STORAGE_ROOT",
"WEASYL_APP_ROOT"
]
| [] | ["WEASYL_STORAGE_ROOT", "WEASYL_APP_ROOT"] | python | 2 | 0 | |
main.go | package main
import (
"log"
"math/rand"
"net/http"
"os"
"time"
"github.com/Rocksus/joke-api/joke"
"github.com/gorilla/mux"
"github.com/joho/godotenv"
)
func init() {
rand.Seed(time.Now().UTC().UnixNano())
}
func main() {
r := mux.NewRouter()
joke.Load("data/jokes.json")
jokeHandler := joke.InitHandler()
randomHandler := joke.InitRandomHandler()
r.HandleFunc("/jokes", randomHandler).Methods("GET")
r.HandleFunc("/jokes/{category}", randomHandler).Methods("GET")
r.HandleFunc("/joke/{id:[0-9]+}", jokeHandler).Methods("GET")
godotenv.Load()
port := os.Getenv("PORT")
if port == "" {
port = "8000"
}
srv := &http.Server{
Handler: r,
Addr: ":" + port,
ReadTimeout: 5 * time.Second,
WriteTimeout: 5 * time.Second,
}
log.Printf("Server listening to port %s", port)
log.Fatal(srv.ListenAndServe())
}
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
src/common/resource/resource.go | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
package resource
import (
"os"
"path"
"strings"
"github.com/golang/glog"
)
var (
// ResourcePath is the default path to the resource directory.
ResourcePath = "/github.com/facebookresearch/Clinical-Trial-Parser/src/resources"
// DataPath is the default path to the data directory.
DataPath = "/github.com/facebookresearch/Clinical-Trial-Parser/data"
)
// GetResourcePath returns the path to the project's resource directory.
func GetResourcePath() string {
if env := os.Getenv("RESOURCE_PATH"); len(env) != 0 {
return env
}
if env := os.Getenv("GOPATH"); env != "" {
gopaths := strings.Split(env, ":")
for _, gp := range gopaths {
check := path.Join(gp, "src", ResourcePath)
if _, err := os.Stat(check); err == nil {
return check
}
}
}
glog.Fatalf("Cannot find resource path for %q\n", ResourcePath)
return ""
}
// GetDataPath returns the path to the project's data directory.
func GetDataPath() string {
if env := os.Getenv("DATA_PATH"); len(env) != 0 {
return env
}
if env := os.Getenv("GOPATH"); env != "" {
gopaths := strings.Split(env, ":")
for _, gp := range gopaths {
check := path.Join(gp, "src", DataPath)
if _, err := os.Stat(check); err == nil {
return check
}
}
}
glog.Fatalf("Cannot find data path for %q\n", DataPath)
return ""
}
| [
"\"RESOURCE_PATH\"",
"\"GOPATH\"",
"\"DATA_PATH\"",
"\"GOPATH\""
]
| []
| [
"GOPATH",
"DATA_PATH",
"RESOURCE_PATH"
]
| [] | ["GOPATH", "DATA_PATH", "RESOURCE_PATH"] | go | 3 | 0 | |
src/hwc/integration/integration_suite_test.go | package integration_test
import (
"encoding/json"
"flag"
"fmt"
"net/url"
"os"
"os/exec"
"path/filepath"
"time"
"github.com/blang/semver"
"github.com/cloudfoundry/libbuildpack/cutlass"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"testing"
)
var bpDir string
var buildpackVersion string
var packagedBuildpack cutlass.VersionedBuildpackPackage
var token string
var platform string
var _ = func() bool {
testing.Init()
return true
}()
func init() {
flag.StringVar(&buildpackVersion, "version", "", "version to use (builds if empty)")
flag.BoolVar(&cutlass.Cached, "cached", true, "cached buildpack")
flag.StringVar(&cutlass.DefaultMemory, "memory", "256M", "default memory for pushed apps")
flag.StringVar(&cutlass.DefaultDisk, "disk", "384M", "default disk for pushed apps")
flag.StringVar(&token, "github-token", "", "use the token to make GitHub API requests")
flag.StringVar(&platform, "platform", "cf", "platform to run tests against")
flag.Parse()
}
var _ = SynchronizedBeforeSuite(func() []byte {
// Run once
if buildpackVersion == "" {
packagedBuildpack, err := cutlass.PackageUniquelyVersionedBuildpack(os.Getenv("CF_STACK"), ApiHasStackAssociation())
Expect(err).NotTo(HaveOccurred())
data, err := json.Marshal(packagedBuildpack)
Expect(err).NotTo(HaveOccurred())
return data
}
return []byte{}
}, func(data []byte) {
// Run on all nodes
var err error
if len(data) > 0 {
err = json.Unmarshal(data, &packagedBuildpack)
Expect(err).NotTo(HaveOccurred())
buildpackVersion = packagedBuildpack.Version
}
bpDir, err = cutlass.FindRoot()
Expect(err).NotTo(HaveOccurred())
Expect(cutlass.CopyCfHome()).To(Succeed())
cutlass.SeedRandom()
cutlass.DefaultStdoutStderr = GinkgoWriter
})
var _ = SynchronizedAfterSuite(func() {
// Run on all nodes
}, func() {
// Run once
Expect(cutlass.RemovePackagedBuildpack(packagedBuildpack)).To(Succeed())
Expect(cutlass.DeleteOrphanedRoutes()).To(Succeed())
})
func TestIntegration(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Integration Suite")
}
func PushAppAndConfirm(app *cutlass.App) {
err := app.Push()
ExpectWithOffset(1, err).ToNot(HaveOccurred())
Eventually(func() ([]string, error) { return app.InstanceStates() }, 20*time.Second).Should(Equal([]string{"RUNNING"}))
ExpectWithOffset(1, app.ConfirmBuildpack(buildpackVersion)).To(Succeed())
}
func Restart(app *cutlass.App) {
Expect(app.Restart()).To(Succeed())
Eventually(func() ([]string, error) { return app.InstanceStates() }, 20*time.Second).Should(Equal([]string{"RUNNING"}))
}
func ApiHasTask() bool {
apiVersionString, err := cutlass.ApiVersion()
Expect(err).To(BeNil())
apiVersion, err := semver.Make(apiVersionString)
Expect(err).To(BeNil())
apiHasTask, err := semver.ParseRange(">= 2.75.0")
Expect(err).To(BeNil())
return apiHasTask(apiVersion)
}
func ApiHasMultiBuildpack() bool {
supported, err := cutlass.ApiGreaterThan("2.90.0")
Expect(err).NotTo(HaveOccurred())
return supported
}
func ApiHasStackAssociation() bool {
supported, err := cutlass.ApiGreaterThan("2.113.0")
Expect(err).NotTo(HaveOccurred())
return supported
}
func SkipUnlessUncached() {
if cutlass.Cached {
Skip("Running cached tests")
}
}
func SkipUnlessCached() {
if !cutlass.Cached {
Skip("Running uncached tests")
}
}
func DestroyApp(app *cutlass.App) *cutlass.App {
if app != nil {
app.Destroy()
}
return nil
}
func AssertUsesProxyDuringStagingIfPresent(fixtureName string) {
Context("with an uncached buildpack", func() {
BeforeEach(SkipUnlessUncached)
It("uses a proxy during staging if present", func() {
proxy, err := cutlass.NewProxy()
Expect(err).To(BeNil())
defer proxy.Close()
bpFile := filepath.Join(bpDir, buildpackVersion+"tmp")
cmd := exec.Command("cp", packagedBuildpack.File, bpFile)
err = cmd.Run()
Expect(err).To(BeNil())
defer os.Remove(bpFile)
traffic, built, _, err := cutlass.InternetTraffic(
filepath.Join("fixtures", fixtureName),
bpFile,
[]string{"HTTP_PROXY=" + proxy.URL, "HTTPS_PROXY=" + proxy.URL},
)
Expect(err).To(BeNil())
Expect(built).To(BeTrue())
destUrl, err := url.Parse(proxy.URL)
Expect(err).To(BeNil())
Expect(cutlass.UniqueDestination(
traffic, fmt.Sprintf("%s.%s", destUrl.Hostname(), destUrl.Port()),
)).To(BeNil())
})
})
}
func AssertNoInternetTraffic(fixtureName string) {
It("has no traffic", func() {
SkipUnlessCached()
bpFile := filepath.Join(bpDir, buildpackVersion+"tmp")
cmd := exec.Command("cp", packagedBuildpack.File, bpFile)
err := cmd.Run()
Expect(err).To(BeNil())
defer os.Remove(bpFile)
traffic, built, _, err := cutlass.InternetTraffic(
filepath.Join("fixtures", fixtureName),
bpFile,
[]string{},
)
Expect(err).To(BeNil())
Expect(built).To(BeTrue())
Expect(traffic).To(BeEmpty())
})
}
| [
"\"CF_STACK\""
]
| []
| [
"CF_STACK"
]
| [] | ["CF_STACK"] | go | 1 | 0 | |
superset/security/manager.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-few-public-methods
"""A set of constants and methods to manage permissions and security"""
import logging
import re
from typing import Any, Callable, cast, List, Optional, Set, Tuple, TYPE_CHECKING, Union
from flask import current_app, g
from flask_appbuilder import Model
from flask_appbuilder.security.sqla.manager import SecurityManager
from flask_appbuilder.security.sqla.models import (
assoc_permissionview_role,
assoc_user_role,
PermissionView,
)
from flask_appbuilder.security.views import (
PermissionModelView,
PermissionViewModelView,
RoleModelView,
UserModelView,
ViewMenuModelView,
)
from flask_appbuilder.widgets import ListWidget
from sqlalchemy import and_, or_
from sqlalchemy.engine.base import Connection
from sqlalchemy.orm.mapper import Mapper
from sqlalchemy.orm.query import Query as SqlaQuery
from superset import sql_parse
from superset.connectors.connector_registry import ConnectorRegistry
from superset.constants import RouteMethod
from superset.errors import ErrorLevel, SupersetError, SupersetErrorType
from superset.exceptions import SupersetSecurityException
from superset.utils.core import DatasourceName, RowLevelSecurityFilterType
if TYPE_CHECKING:
from superset.common.query_context import QueryContext
from superset.connectors.base.models import BaseDatasource
from superset.connectors.druid.models import DruidCluster
from superset.models.core import Database
from superset.models.sql_lab import Query
from superset.sql_parse import Table
from superset.viz import BaseViz
logger = logging.getLogger(__name__)
class SupersetSecurityListWidget(ListWidget):
"""
Redeclaring to avoid circular imports
"""
template = "superset/fab_overrides/list.html"
class SupersetRoleListWidget(ListWidget):
"""
Role model view from FAB already uses a custom list widget override
So we override the override
"""
template = "superset/fab_overrides/list_role.html"
def __init__(self, **kwargs: Any) -> None:
kwargs["appbuilder"] = current_app.appbuilder
super().__init__(**kwargs)
UserModelView.list_widget = SupersetSecurityListWidget
RoleModelView.list_widget = SupersetRoleListWidget
PermissionViewModelView.list_widget = SupersetSecurityListWidget
PermissionModelView.list_widget = SupersetSecurityListWidget
# Limiting routes on FAB model views
UserModelView.include_route_methods = RouteMethod.CRUD_SET | {
RouteMethod.ACTION,
RouteMethod.API_READ,
RouteMethod.ACTION_POST,
"userinfo",
}
RoleModelView.include_route_methods = RouteMethod.CRUD_SET
PermissionViewModelView.include_route_methods = {RouteMethod.LIST}
PermissionModelView.include_route_methods = {RouteMethod.LIST}
ViewMenuModelView.include_route_methods = {RouteMethod.LIST}
RoleModelView.list_columns = ["name"]
RoleModelView.edit_columns = ["name", "permissions", "user"]
RoleModelView.related_views = []
class SupersetSecurityManager( # pylint: disable=too-many-public-methods
SecurityManager
):
userstatschartview = None
READ_ONLY_MODEL_VIEWS = {"DatabaseAsync", "DatabaseView", "DruidClusterModelView"}
USER_MODEL_VIEWS = {
"UserDBModelView",
"UserLDAPModelView",
"UserOAuthModelView",
"UserOIDModelView",
"UserRemoteUserModelView",
}
GAMMA_READ_ONLY_MODEL_VIEWS = {
"SqlMetricInlineView",
"TableColumnInlineView",
"TableModelView",
"DruidColumnInlineView",
"DruidDatasourceModelView",
"DruidMetricInlineView",
"Datasource",
} | READ_ONLY_MODEL_VIEWS
ADMIN_ONLY_VIEW_MENUS = {
"AccessRequestsModelView",
"SQL Lab",
"Refresh Druid Metadata",
"ResetPasswordView",
"RoleModelView",
"LogModelView",
"Security",
"Row Level Security",
"Row Level Security Filters",
"RowLevelSecurityFiltersModelView",
} | USER_MODEL_VIEWS
ALPHA_ONLY_VIEW_MENUS = {
"Manage",
"CSS Templates",
"Queries",
"Import dashboards",
"Upload a CSV",
}
ADMIN_ONLY_PERMISSIONS = {
"can_sql_json", # TODO: move can_sql_json to sql_lab role
"can_override_role_permissions",
"can_sync_druid_source",
"can_override_role_permissions",
"can_approve",
"can_update_role",
"all_query_access",
}
READ_ONLY_PERMISSION = {"can_show", "can_list", "can_get", "can_external_metadata"}
ALPHA_ONLY_PERMISSIONS = {
"muldelete",
"all_database_access",
"all_datasource_access",
}
OBJECT_SPEC_PERMISSIONS = {
"database_access",
"schema_access",
"datasource_access",
"metric_access",
}
ACCESSIBLE_PERMS = {"can_userinfo"}
data_access_permissions = (
"database_access",
"schema_access",
"datasource_access",
"all_datasource_access",
"all_database_access",
"all_query_access",
)
def get_schema_perm( # pylint: disable=no-self-use
self, database: Union["Database", str], schema: Optional[str] = None
) -> Optional[str]:
"""
Return the database specific schema permission.
:param database: The Superset database or database name
:param schema: The Superset schema name
:return: The database specific schema permission
"""
if schema:
return f"[{database}].[{schema}]"
return None
def unpack_schema_perm( # pylint: disable=no-self-use
self, schema_permission: str
) -> Tuple[str, str]:
# [database_name].[schema_name]
schema_name = schema_permission.split(".")[1][1:-1]
database_name = schema_permission.split(".")[0][1:-1]
return database_name, schema_name
def can_access(self, permission_name: str, view_name: str) -> bool:
"""
Return True if the user can access the FAB permission/view, False otherwise.
Note this method adds protection from has_access failing from missing
permission/view entries.
:param permission_name: The FAB permission name
:param view_name: The FAB view-menu name
:returns: Whether the user can access the FAB permission/view
"""
user = g.user
if user.is_anonymous:
return self.is_item_public(permission_name, view_name)
return self._has_view_access(user, permission_name, view_name)
def can_access_all_queries(self) -> bool:
"""
Return True if the user can access all SQL Lab queries, False otherwise.
:returns: Whether the user can access all queries
"""
return self.can_access("all_query_access", "all_query_access")
def can_access_all_datasources(self) -> bool:
"""
Return True if the user can fully access all the Superset datasources, False
otherwise.
:returns: Whether the user can fully access all Superset datasources
"""
return self.can_access("all_datasource_access", "all_datasource_access")
def can_access_all_databases(self) -> bool:
"""
Return True if the user can fully access all the Superset databases, False
otherwise.
:returns: Whether the user can fully access all Superset databases
"""
return self.can_access("all_database_access", "all_database_access")
def can_access_database(self, database: Union["Database", "DruidCluster"]) -> bool:
"""
Return True if the user can fully access the Superset database, False otherwise.
Note for Druid the database is akin to the Druid cluster.
:param database: The Superset database
:returns: Whether the user can fully access the Superset database
"""
return (
self.can_access_all_datasources()
or self.can_access_all_databases()
or self.can_access("database_access", database.perm) # type: ignore
)
def can_access_schema(self, datasource: "BaseDatasource") -> bool:
"""
Return True if the user can fully access the schema associated with the Superset
datasource, False otherwise.
Note for Druid datasources the database and schema are akin to the Druid cluster
and datasource name prefix respectively, i.e., [schema.]datasource.
:param datasource: The Superset datasource
:returns: Whether the user can fully access the datasource's schema
"""
return (
self.can_access_all_datasources()
or self.can_access_database(datasource.database)
or self.can_access("schema_access", datasource.schema_perm or "")
)
def can_access_datasource(self, datasource: "BaseDatasource") -> bool:
"""
Return True if the user can fully access of the Superset datasource, False
otherwise.
:param datasource: The Superset datasource
:returns: Whether the user can fully access the Superset datasource
"""
try:
self.raise_for_access(datasource=datasource)
except SupersetSecurityException:
return False
return True
@staticmethod
def get_datasource_access_error_msg(datasource: "BaseDatasource") -> str:
"""
Return the error message for the denied Superset datasource.
:param datasource: The denied Superset datasource
:returns: The error message
"""
return f"""This endpoint requires the datasource {datasource.name}, database or
`all_datasource_access` permission"""
@staticmethod
def get_datasource_access_link( # pylint: disable=unused-argument
datasource: "BaseDatasource",
) -> Optional[str]:
"""
Return the link for the denied Superset datasource.
:param datasource: The denied Superset datasource
:returns: The access URL
"""
from superset import conf
return conf.get("PERMISSION_INSTRUCTIONS_LINK")
def get_datasource_access_error_object( # pylint: disable=invalid-name
self, datasource: "BaseDatasource"
) -> SupersetError:
"""
Return the error object for the denied Superset datasource.
:param datasource: The denied Superset datasource
:returns: The error object
"""
return SupersetError(
error_type=SupersetErrorType.DATASOURCE_SECURITY_ACCESS_ERROR,
message=self.get_datasource_access_error_msg(datasource),
level=ErrorLevel.ERROR,
extra={
"link": self.get_datasource_access_link(datasource),
"datasource": datasource.name,
},
)
def get_table_access_error_msg( # pylint: disable=no-self-use
self, tables: Set["Table"]
) -> str:
"""
Return the error message for the denied SQL tables.
:param tables: The set of denied SQL tables
:returns: The error message
"""
quoted_tables = [f"`{table}`" for table in tables]
return f"""You need access to the following tables: {", ".join(quoted_tables)},
`all_database_access` or `all_datasource_access` permission"""
def get_table_access_error_object(self, tables: Set["Table"]) -> SupersetError:
"""
Return the error object for the denied SQL tables.
:param tables: The set of denied SQL tables
:returns: The error object
"""
return SupersetError(
error_type=SupersetErrorType.TABLE_SECURITY_ACCESS_ERROR,
message=self.get_table_access_error_msg(tables),
level=ErrorLevel.ERROR,
extra={
"link": self.get_table_access_link(tables),
"tables": [str(table) for table in tables],
},
)
def get_table_access_link( # pylint: disable=unused-argument,no-self-use
self, tables: Set["Table"]
) -> Optional[str]:
"""
Return the access link for the denied SQL tables.
:param tables: The set of denied SQL tables
:returns: The access URL
"""
from superset import conf
return conf.get("PERMISSION_INSTRUCTIONS_LINK")
def can_access_table(self, database: "Database", table: "Table") -> bool:
"""
Return True if the user can access the SQL table, False otherwise.
:param database: The SQL database
:param table: The SQL table
:returns: Whether the user can access the SQL table
"""
try:
self.raise_for_access(database=database, table=table)
except SupersetSecurityException:
return False
return True
def user_view_menu_names(self, permission_name: str) -> Set[str]:
base_query = (
self.get_session.query(self.viewmenu_model.name)
.join(self.permissionview_model)
.join(self.permission_model)
.join(assoc_permissionview_role)
.join(self.role_model)
)
if not g.user.is_anonymous:
# filter by user id
view_menu_names = (
base_query.join(assoc_user_role)
.join(self.user_model)
.filter(self.user_model.id == g.user.id)
.filter(self.permission_model.name == permission_name)
).all()
return {s.name for s in view_menu_names}
# Properly treat anonymous user
public_role = self.get_public_role()
if public_role:
# filter by public role
view_menu_names = (
base_query.filter(self.role_model.id == public_role.id).filter(
self.permission_model.name == permission_name
)
).all()
return {s.name for s in view_menu_names}
return set()
def get_schemas_accessible_by_user(
self, database: "Database", schemas: List[str], hierarchical: bool = True
) -> List[str]:
"""
Return the list of SQL schemas accessible by the user.
:param database: The SQL database
:param schemas: The list of eligible SQL schemas
:param hierarchical: Whether to check using the hierarchical permission logic
:returns: The list of accessible SQL schemas
"""
from superset.connectors.sqla.models import SqlaTable
if hierarchical and self.can_access_database(database):
return schemas
# schema_access
accessible_schemas = {
self.unpack_schema_perm(s)[1]
for s in self.user_view_menu_names("schema_access")
if s.startswith(f"[{database}].")
}
# datasource_access
perms = self.user_view_menu_names("datasource_access")
if perms:
tables = (
self.get_session.query(SqlaTable.schema)
.filter(SqlaTable.database_id == database.id)
.filter(SqlaTable.schema.isnot(None))
.filter(SqlaTable.schema != "")
.filter(or_(SqlaTable.perm.in_(perms)))
.distinct()
)
accessible_schemas.update([table.schema for table in tables])
return [s for s in schemas if s in accessible_schemas]
def get_datasources_accessible_by_user( # pylint: disable=invalid-name
self,
database: "Database",
datasource_names: List[DatasourceName],
schema: Optional[str] = None,
) -> List[DatasourceName]:
"""
Return the list of SQL tables accessible by the user.
:param database: The SQL database
:param datasource_names: The list of eligible SQL tables w/ schema
:param schema: The fallback SQL schema if not present in the table name
:returns: The list of accessible SQL tables w/ schema
"""
if self.can_access_database(database):
return datasource_names
if schema:
schema_perm = self.get_schema_perm(database, schema)
if schema_perm and self.can_access("schema_access", schema_perm):
return datasource_names
user_perms = self.user_view_menu_names("datasource_access")
schema_perms = self.user_view_menu_names("schema_access")
user_datasources = ConnectorRegistry.query_datasources_by_permissions(
self.get_session, database, user_perms, schema_perms
)
if schema:
names = {d.table_name for d in user_datasources if d.schema == schema}
return [d for d in datasource_names if d in names]
full_names = {d.full_name for d in user_datasources}
return [d for d in datasource_names if f"[{database}].[{d}]" in full_names]
def merge_perm(self, permission_name: str, view_menu_name: str) -> None:
"""
Add the FAB permission/view-menu.
:param permission_name: The FAB permission name
:param view_menu_names: The FAB view-menu name
:see: SecurityManager.add_permission_view_menu
"""
logger.warning(
"This method 'merge_perm' is deprecated use add_permission_view_menu"
)
self.add_permission_view_menu(permission_name, view_menu_name)
def _is_user_defined_permission(self, perm: Model) -> bool:
"""
Return True if the FAB permission is user defined, False otherwise.
:param perm: The FAB permission
:returns: Whether the FAB permission is user defined
"""
return perm.permission.name in self.OBJECT_SPEC_PERMISSIONS
def create_custom_permissions(self) -> None:
"""
Create custom FAB permissions.
"""
self.add_permission_view_menu("all_datasource_access", "all_datasource_access")
self.add_permission_view_menu("all_database_access", "all_database_access")
self.add_permission_view_menu("all_query_access", "all_query_access")
def create_missing_perms(self) -> None:
"""
Creates missing FAB permissions for datasources, schemas and metrics.
"""
from superset.models import core as models
logger.info("Fetching a set of all perms to lookup which ones are missing")
all_pvs = set()
for pv in self.get_session.query(self.permissionview_model).all():
if pv.permission and pv.view_menu:
all_pvs.add((pv.permission.name, pv.view_menu.name))
def merge_pv(view_menu: str, perm: str) -> None:
"""Create permission view menu only if it doesn't exist"""
if view_menu and perm and (view_menu, perm) not in all_pvs:
self.add_permission_view_menu(view_menu, perm)
logger.info("Creating missing datasource permissions.")
datasources = ConnectorRegistry.get_all_datasources(self.get_session)
for datasource in datasources:
merge_pv("datasource_access", datasource.get_perm())
merge_pv("schema_access", datasource.get_schema_perm())
logger.info("Creating missing database permissions.")
databases = self.get_session.query(models.Database).all()
for database in databases:
merge_pv("database_access", database.perm)
def clean_perms(self) -> None:
"""
Clean up the FAB faulty permissions.
"""
logger.info("Cleaning faulty perms")
sesh = self.get_session
pvms = sesh.query(PermissionView).filter(
or_(
PermissionView.permission # pylint: disable=singleton-comparison
== None,
PermissionView.view_menu # pylint: disable=singleton-comparison
== None,
)
)
deleted_count = pvms.delete()
sesh.commit()
if deleted_count:
logger.info("Deleted %i faulty permissions", deleted_count)
def sync_role_definitions(self) -> None:
"""
Initialize the Superset application with security roles and such.
"""
from superset import conf
logger.info("Syncing role definition")
self.create_custom_permissions()
# Creating default roles
self.set_role("Admin", self._is_admin_pvm)
self.set_role("Alpha", self._is_alpha_pvm)
self.set_role("Gamma", self._is_gamma_pvm)
self.set_role("granter", self._is_granter_pvm)
self.set_role("sql_lab", self._is_sql_lab_pvm)
# Configure public role
if conf["PUBLIC_ROLE_LIKE"]:
self.copy_role(conf["PUBLIC_ROLE_LIKE"], self.auth_role_public, merge=True)
if conf.get("PUBLIC_ROLE_LIKE_GAMMA", False):
logger.warning(
"The config `PUBLIC_ROLE_LIKE_GAMMA` is deprecated and will be removed "
"in Superset 1.0. Please use `PUBLIC_ROLE_LIKE ` instead."
)
self.copy_role("Gamma", self.auth_role_public, merge=True)
self.create_missing_perms()
# commit role and view menu updates
self.get_session.commit()
self.clean_perms()
def _get_pvms_from_builtin_role(self, role_name: str) -> List[PermissionView]:
"""
Gets a list of model PermissionView permissions infered from a builtin role
definition
"""
role_from_permissions_names = self.builtin_roles.get(role_name, [])
all_pvms = self.get_session.query(PermissionView).all()
role_from_permissions = []
for pvm_regex in role_from_permissions_names:
view_name_regex = pvm_regex[0]
permission_name_regex = pvm_regex[1]
for pvm in all_pvms:
if re.match(view_name_regex, pvm.view_menu.name) and re.match(
permission_name_regex, pvm.permission.name
):
if pvm not in role_from_permissions:
role_from_permissions.append(pvm)
return role_from_permissions
def copy_role(
self, role_from_name: str, role_to_name: str, merge: bool = True
) -> None:
"""
Copies permissions from a role to another.
Note: Supports regex defined builtin roles
:param role_from_name: The FAB role name from where the permissions are taken
:param role_to_name: The FAB role name from where the permissions are copied to
:param merge: If merge is true, keep data access permissions
if they already exist on the target role
"""
logger.info("Copy/Merge %s to %s", role_from_name, role_to_name)
# If it's a builtin role extract permissions from it
if role_from_name in self.builtin_roles:
role_from_permissions = self._get_pvms_from_builtin_role(role_from_name)
else:
role_from_permissions = list(self.find_role(role_from_name).permissions)
role_to = self.add_role(role_to_name)
# If merge, recover existing data access permissions
if merge:
for permission_view in role_to.permissions:
if (
permission_view not in role_from_permissions
and permission_view.permission.name in self.data_access_permissions
):
role_from_permissions.append(permission_view)
role_to.permissions = role_from_permissions
self.get_session.merge(role_to)
self.get_session.commit()
def set_role(
self, role_name: str, pvm_check: Callable[[PermissionView], bool]
) -> None:
"""
Set the FAB permission/views for the role.
:param role_name: The FAB role name
:param pvm_check: The FAB permission/view check
"""
logger.info("Syncing %s perms", role_name)
pvms = self.get_session.query(PermissionView).all()
pvms = [p for p in pvms if p.permission and p.view_menu]
role = self.add_role(role_name)
role_pvms = [
permission_view for permission_view in pvms if pvm_check(permission_view)
]
role.permissions = role_pvms
self.get_session.merge(role)
self.get_session.commit()
def _is_admin_only(self, pvm: Model) -> bool:
"""
Return True if the FAB permission/view is accessible to only Admin users,
False otherwise.
Note readonly operations on read only model views are allowed only for admins.
:param pvm: The FAB permission/view
:returns: Whether the FAB object is accessible to only Admin users
"""
if (
pvm.view_menu.name in self.READ_ONLY_MODEL_VIEWS
and pvm.permission.name not in self.READ_ONLY_PERMISSION
):
return True
return (
pvm.view_menu.name in self.ADMIN_ONLY_VIEW_MENUS
or pvm.permission.name in self.ADMIN_ONLY_PERMISSIONS
)
def _is_alpha_only(self, pvm: PermissionModelView) -> bool:
"""
Return True if the FAB permission/view is accessible to only Alpha users,
False otherwise.
:param pvm: The FAB permission/view
:returns: Whether the FAB object is accessible to only Alpha users
"""
if (
pvm.view_menu.name in self.GAMMA_READ_ONLY_MODEL_VIEWS
and pvm.permission.name not in self.READ_ONLY_PERMISSION
):
return True
return (
pvm.view_menu.name in self.ALPHA_ONLY_VIEW_MENUS
or pvm.permission.name in self.ALPHA_ONLY_PERMISSIONS
)
def _is_accessible_to_all(self, pvm: PermissionModelView) -> bool:
"""
Return True if the FAB permission/view is accessible to all, False
otherwise.
:param pvm: The FAB permission/view
:returns: Whether the FAB object is accessible to all users
"""
return pvm.permission.name in self.ACCESSIBLE_PERMS
def _is_admin_pvm(self, pvm: PermissionModelView) -> bool:
"""
Return True if the FAB permission/view is Admin user related, False
otherwise.
:param pvm: The FAB permission/view
:returns: Whether the FAB object is Admin related
"""
return not self._is_user_defined_permission(pvm)
def _is_alpha_pvm(self, pvm: PermissionModelView) -> bool:
"""
Return True if the FAB permission/view is Alpha user related, False
otherwise.
:param pvm: The FAB permission/view
:returns: Whether the FAB object is Alpha related
"""
return not (
self._is_user_defined_permission(pvm) or self._is_admin_only(pvm)
) or self._is_accessible_to_all(pvm)
def _is_gamma_pvm(self, pvm: PermissionModelView) -> bool:
"""
Return True if the FAB permission/view is Gamma user related, False
otherwise.
:param pvm: The FAB permission/view
:returns: Whether the FAB object is Gamma related
"""
return not (
self._is_user_defined_permission(pvm)
or self._is_admin_only(pvm)
or self._is_alpha_only(pvm)
) or self._is_accessible_to_all(pvm)
def _is_sql_lab_pvm(self, pvm: PermissionModelView) -> bool:
"""
Return True if the FAB permission/view is SQL Lab related, False
otherwise.
:param pvm: The FAB permission/view
:returns: Whether the FAB object is SQL Lab related
"""
return (
pvm.view_menu.name
in {"SQL Lab", "SQL Editor", "Query Search", "Saved Queries"}
or pvm.permission.name
in {
"can_sql_json",
"can_csv",
"can_search_queries",
"can_sqllab_viz",
"can_sqllab_table_viz",
"can_sqllab",
}
or (
pvm.view_menu.name in self.USER_MODEL_VIEWS
and pvm.permission.name == "can_list"
)
)
def _is_granter_pvm( # pylint: disable=no-self-use
self, pvm: PermissionModelView
) -> bool:
"""
Return True if the user can grant the FAB permission/view, False
otherwise.
:param pvm: The FAB permission/view
:returns: Whether the user can grant the FAB permission/view
"""
return pvm.permission.name in {"can_override_role_permissions", "can_approve"}
def set_perm( # pylint: disable=no-self-use,unused-argument
self, mapper: Mapper, connection: Connection, target: "BaseDatasource"
) -> None:
"""
Set the datasource permissions.
:param mapper: The table mapper
:param connection: The DB-API connection
:param target: The mapped instance being persisted
"""
link_table = target.__table__ # pylint: disable=no-member
if target.perm != target.get_perm():
connection.execute(
link_table.update()
.where(link_table.c.id == target.id)
.values(perm=target.get_perm())
)
if (
hasattr(target, "schema_perm")
and target.schema_perm != target.get_schema_perm()
):
connection.execute(
link_table.update()
.where(link_table.c.id == target.id)
.values(schema_perm=target.get_schema_perm())
)
pvm_names = []
if target.__tablename__ in {"dbs", "clusters"}:
pvm_names.append(("database_access", target.get_perm()))
else:
pvm_names.append(("datasource_access", target.get_perm()))
if target.schema:
pvm_names.append(("schema_access", target.get_schema_perm()))
# TODO(bogdan): modify slice permissions as well.
for permission_name, view_menu_name in pvm_names:
permission = self.find_permission(permission_name)
view_menu = self.find_view_menu(view_menu_name)
pv = None
if not permission:
permission_table = (
self.permission_model.__table__ # pylint: disable=no-member
)
connection.execute(
permission_table.insert().values(name=permission_name)
)
permission = self.find_permission(permission_name)
if not view_menu:
view_menu_table = (
self.viewmenu_model.__table__ # pylint: disable=no-member
)
connection.execute(view_menu_table.insert().values(name=view_menu_name))
view_menu = self.find_view_menu(view_menu_name)
if permission and view_menu:
pv = (
self.get_session.query(self.permissionview_model)
.filter_by(permission=permission, view_menu=view_menu)
.first()
)
if not pv and permission and view_menu:
permission_view_table = (
self.permissionview_model.__table__ # pylint: disable=no-member
)
connection.execute(
permission_view_table.insert().values(
permission_id=permission.id, view_menu_id=view_menu.id
)
)
def raise_for_access( # pylint: disable=too-many-arguments,too-many-branches
self,
database: Optional["Database"] = None,
datasource: Optional["BaseDatasource"] = None,
query: Optional["Query"] = None,
query_context: Optional["QueryContext"] = None,
table: Optional["Table"] = None,
viz: Optional["BaseViz"] = None,
) -> None:
"""
Raise an exception if the user cannot access the resource.
:param database: The Superset database
:param datasource: The Superset datasource
:param query: The SQL Lab query
:param query_context: The query context
:param table: The Superset table (requires database)
:param viz: The visualization
:raises SupersetSecurityException: If the user cannot access the resource
"""
from superset.connectors.sqla.models import SqlaTable
from superset.sql_parse import Table
if database and table or query:
if query:
database = query.database
database = cast("Database", database)
if self.can_access_database(database):
return
if query:
tables = {
Table(table_.table, table_.schema or query.schema)
for table_ in sql_parse.ParsedQuery(query.sql).tables
}
elif table:
tables = {table}
denied = set()
for table_ in tables:
schema_perm = self.get_schema_perm(database, schema=table_.schema)
if not (schema_perm and self.can_access("schema_access", schema_perm)):
datasources = SqlaTable.query_datasources_by_name(
self.get_session, database, table_.table, schema=table_.schema
)
# Access to any datasource is suffice.
for datasource_ in datasources:
if self.can_access("datasource_access", datasource_.perm):
break
else:
denied.add(table_)
if denied:
raise SupersetSecurityException(
self.get_table_access_error_object(denied)
)
if datasource or query_context or viz:
if query_context:
datasource = query_context.datasource
elif viz:
datasource = viz.datasource
assert datasource
if not (
self.can_access_schema(datasource)
or self.can_access("datasource_access", datasource.perm or "")
):
raise SupersetSecurityException(
self.get_datasource_access_error_object(datasource)
)
def get_rls_filters(self, table: "BaseDatasource") -> List[SqlaQuery]:
"""
Retrieves the appropriate row level security filters for the current user and
the passed table.
:param table: The table to check against
:returns: A list of filters
"""
if hasattr(g, "user") and hasattr(g.user, "id"):
from superset.connectors.sqla.models import (
RLSFilterRoles,
RLSFilterTables,
RowLevelSecurityFilter,
)
user_roles = (
self.get_session.query(assoc_user_role.c.role_id)
.filter(assoc_user_role.c.user_id == g.user.id)
.subquery()
)
regular_filter_roles = (
self.get_session.query(RLSFilterRoles.c.rls_filter_id)
.join(RowLevelSecurityFilter)
.filter(
RowLevelSecurityFilter.filter_type
== RowLevelSecurityFilterType.REGULAR
)
.filter(RLSFilterRoles.c.role_id.in_(user_roles))
.subquery()
)
base_filter_roles = (
self.get_session.query(RLSFilterRoles.c.rls_filter_id)
.join(RowLevelSecurityFilter)
.filter(
RowLevelSecurityFilter.filter_type
== RowLevelSecurityFilterType.BASE
)
.filter(RLSFilterRoles.c.role_id.in_(user_roles))
.subquery()
)
filter_tables = (
self.get_session.query(RLSFilterTables.c.rls_filter_id)
.filter(RLSFilterTables.c.table_id == table.id)
.subquery()
)
query = (
self.get_session.query(
RowLevelSecurityFilter.id,
RowLevelSecurityFilter.group_key,
RowLevelSecurityFilter.clause,
)
.filter(RowLevelSecurityFilter.id.in_(filter_tables))
.filter(
or_(
and_(
RowLevelSecurityFilter.filter_type
== RowLevelSecurityFilterType.REGULAR,
RowLevelSecurityFilter.id.in_(regular_filter_roles),
),
and_(
RowLevelSecurityFilter.filter_type
== RowLevelSecurityFilterType.BASE,
RowLevelSecurityFilter.id.notin_(base_filter_roles),
),
)
)
)
return query.all()
return []
def get_rls_ids(self, table: "BaseDatasource") -> List[int]:
"""
Retrieves the appropriate row level security filters IDs for the current user
and the passed table.
:param table: The table to check against
:returns: A list of IDs
"""
ids = [f.id for f in self.get_rls_filters(table)]
ids.sort() # Combinations rather than permutations
return ids
| []
| []
| []
| [] | [] | python | null | null | null |
clients/google-api-services-paymentsresellersubscription/v1/1.31.0/com/google/api/services/paymentsresellersubscription/v1/PaymentsResellerSubscription.java | /*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.paymentsresellersubscription.v1;
/**
* Service definition for PaymentsResellerSubscription (v1).
*
* <p>
*
* </p>
*
* <p>
* For more information about this service, see the
* <a href="https://developers.google.com/payments/reseller/subscription/" target="_blank">API Documentation</a>
* </p>
*
* <p>
* This service uses {@link PaymentsResellerSubscriptionRequestInitializer} to initialize global parameters via its
* {@link Builder}.
* </p>
*
* @since 1.3
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public class PaymentsResellerSubscription extends com.google.api.client.googleapis.services.json.AbstractGoogleJsonClient {
// Note: Leave this static initializer at the top of the file.
static {
com.google.api.client.util.Preconditions.checkState(
com.google.api.client.googleapis.GoogleUtils.MAJOR_VERSION == 1 &&
(com.google.api.client.googleapis.GoogleUtils.MINOR_VERSION >= 32 ||
(com.google.api.client.googleapis.GoogleUtils.MINOR_VERSION == 31 &&
com.google.api.client.googleapis.GoogleUtils.BUGFIX_VERSION >= 1)),
"You are currently running with version %s of google-api-client. " +
"You need at least version 1.31.1 of google-api-client to run version " +
"1.32.1 of the Payments Reseller Subscription API library.", com.google.api.client.googleapis.GoogleUtils.VERSION);
}
/**
* The default encoded root URL of the service. This is determined when the library is generated
* and normally should not be changed.
*
* @since 1.7
*/
public static final String DEFAULT_ROOT_URL = "https://paymentsresellersubscription.googleapis.com/";
/**
* The default encoded mTLS root URL of the service. This is determined when the library is generated
* and normally should not be changed.
*
* @since 1.31
*/
public static final String DEFAULT_MTLS_ROOT_URL = "https://paymentsresellersubscription.mtls.googleapis.com/";
/**
* The default encoded service path of the service. This is determined when the library is
* generated and normally should not be changed.
*
* @since 1.7
*/
public static final String DEFAULT_SERVICE_PATH = "";
/**
* The default encoded batch path of the service. This is determined when the library is
* generated and normally should not be changed.
*
* @since 1.23
*/
public static final String DEFAULT_BATCH_PATH = "batch";
/**
* The default encoded base URL of the service. This is determined when the library is generated
* and normally should not be changed.
*/
public static final String DEFAULT_BASE_URL = DEFAULT_ROOT_URL + DEFAULT_SERVICE_PATH;
/**
* Constructor.
*
* <p>
* Use {@link Builder} if you need to specify any of the optional parameters.
* </p>
*
* @param transport HTTP transport, which should normally be:
* <ul>
* <li>Google App Engine:
* {@code com.google.api.client.extensions.appengine.http.UrlFetchTransport}</li>
* <li>Android: {@code newCompatibleTransport} from
* {@code com.google.api.client.extensions.android.http.AndroidHttp}</li>
* <li>Java: {@link com.google.api.client.googleapis.javanet.GoogleNetHttpTransport#newTrustedTransport()}
* </li>
* </ul>
* @param jsonFactory JSON factory, which may be:
* <ul>
* <li>Jackson: {@code com.google.api.client.json.jackson2.JacksonFactory}</li>
* <li>Google GSON: {@code com.google.api.client.json.gson.GsonFactory}</li>
* <li>Android Honeycomb or higher:
* {@code com.google.api.client.extensions.android.json.AndroidJsonFactory}</li>
* </ul>
* @param httpRequestInitializer HTTP request initializer or {@code null} for none
* @since 1.7
*/
public PaymentsResellerSubscription(com.google.api.client.http.HttpTransport transport, com.google.api.client.json.JsonFactory jsonFactory,
com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
this(new Builder(transport, jsonFactory, httpRequestInitializer));
}
/**
* @param builder builder
*/
PaymentsResellerSubscription(Builder builder) {
super(builder);
}
@Override
protected void initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest<?> httpClientRequest) throws java.io.IOException {
super.initialize(httpClientRequest);
}
/**
* An accessor for creating requests from the Partners collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code PaymentsResellerSubscription paymentsresellersubscription = new PaymentsResellerSubscription(...);}
* {@code PaymentsResellerSubscription.Partners.List request = paymentsresellersubscription.partners().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Partners partners() {
return new Partners();
}
/**
* The "partners" collection of methods.
*/
public class Partners {
/**
* An accessor for creating requests from the Products collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code PaymentsResellerSubscription paymentsresellersubscription = new PaymentsResellerSubscription(...);}
* {@code PaymentsResellerSubscription.Products.List request = paymentsresellersubscription.products().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Products products() {
return new Products();
}
/**
* The "products" collection of methods.
*/
public class Products {
/**
* Used by partners to list products that can be resold to their customers. It should be called
* directly by the partner using service accounts.
*
* Create a request for the method "products.list".
*
* This request holds the parameters needed by the paymentsresellersubscription server. After
* setting any optional parameters, call the {@link List#execute()} method to invoke the remote
* operation.
*
* @param parent Required. The parent, the partner that can resell. Format: partners/{partner}
* @return the request
*/
public List list(java.lang.String parent) throws java.io.IOException {
List result = new List(parent);
initialize(result);
return result;
}
public class List extends PaymentsResellerSubscriptionRequest<com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1ListProductsResponse> {
private static final String REST_PATH = "v1/{+parent}/products";
private final java.util.regex.Pattern PARENT_PATTERN =
java.util.regex.Pattern.compile("^partners/[^/]+$");
/**
* Used by partners to list products that can be resold to their customers. It should be called
* directly by the partner using service accounts.
*
* Create a request for the method "products.list".
*
* This request holds the parameters needed by the the paymentsresellersubscription server. After
* setting any optional parameters, call the {@link List#execute()} method to invoke the remote
* operation. <p> {@link
* List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must be
* called to initialize this instance immediately after invoking the constructor. </p>
*
* @param parent Required. The parent, the partner that can resell. Format: partners/{partner}
* @since 1.13
*/
protected List(java.lang.String parent) {
super(PaymentsResellerSubscription.this, "GET", REST_PATH, null, com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1ListProductsResponse.class);
this.parent = com.google.api.client.util.Preconditions.checkNotNull(parent, "Required parameter parent must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^partners/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** Required. The parent, the partner that can resell. Format: partners/{partner} */
@com.google.api.client.util.Key
private java.lang.String parent;
/** Required. The parent, the partner that can resell. Format: partners/{partner}
*/
public java.lang.String getParent() {
return parent;
}
/** Required. The parent, the partner that can resell. Format: partners/{partner} */
public List setParent(java.lang.String parent) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^partners/[^/]+$");
}
this.parent = parent;
return this;
}
/**
* Optional. The maximum number of products to return. The service may return fewer than
* this value. If unspecified, at most 50 products will be returned. The maximum value is
* 1000; values above 1000 will be coerced to 1000.
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Optional. The maximum number of products to return. The service may return fewer than this value.
If unspecified, at most 50 products will be returned. The maximum value is 1000; values above 1000
will be coerced to 1000.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* Optional. The maximum number of products to return. The service may return fewer than
* this value. If unspecified, at most 50 products will be returned. The maximum value is
* 1000; values above 1000 will be coerced to 1000.
*/
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* Optional. A page token, received from a previous `ListProducts` call. Provide this to
* retrieve the subsequent page. When paginating, all other parameters provided to
* `ListProducts` must match the call that provided the page token.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** Optional. A page token, received from a previous `ListProducts` call. Provide this to retrieve the
subsequent page. When paginating, all other parameters provided to `ListProducts` must match the
call that provided the page token.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* Optional. A page token, received from a previous `ListProducts` call. Provide this to
* retrieve the subsequent page. When paginating, all other parameters provided to
* `ListProducts` must match the call that provided the page token.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the Promotions collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code PaymentsResellerSubscription paymentsresellersubscription = new PaymentsResellerSubscription(...);}
* {@code PaymentsResellerSubscription.Promotions.List request = paymentsresellersubscription.promotions().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Promotions promotions() {
return new Promotions();
}
/**
* The "promotions" collection of methods.
*/
public class Promotions {
/**
* Used by partners to list promotions, such as free trial, that can be applied on subscriptions. It
* should be called directly by the partner using service accounts.
*
* Create a request for the method "promotions.list".
*
* This request holds the parameters needed by the paymentsresellersubscription server. After
* setting any optional parameters, call the {@link List#execute()} method to invoke the remote
* operation.
*
* @param parent Required. The parent, the partner that can resell. Format: partners/{partner}
* @return the request
*/
public List list(java.lang.String parent) throws java.io.IOException {
List result = new List(parent);
initialize(result);
return result;
}
public class List extends PaymentsResellerSubscriptionRequest<com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1ListPromotionsResponse> {
private static final String REST_PATH = "v1/{+parent}/promotions";
private final java.util.regex.Pattern PARENT_PATTERN =
java.util.regex.Pattern.compile("^partners/[^/]+$");
/**
* Used by partners to list promotions, such as free trial, that can be applied on subscriptions.
* It should be called directly by the partner using service accounts.
*
* Create a request for the method "promotions.list".
*
* This request holds the parameters needed by the the paymentsresellersubscription server. After
* setting any optional parameters, call the {@link List#execute()} method to invoke the remote
* operation. <p> {@link
* List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must be
* called to initialize this instance immediately after invoking the constructor. </p>
*
* @param parent Required. The parent, the partner that can resell. Format: partners/{partner}
* @since 1.13
*/
protected List(java.lang.String parent) {
super(PaymentsResellerSubscription.this, "GET", REST_PATH, null, com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1ListPromotionsResponse.class);
this.parent = com.google.api.client.util.Preconditions.checkNotNull(parent, "Required parameter parent must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^partners/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** Required. The parent, the partner that can resell. Format: partners/{partner} */
@com.google.api.client.util.Key
private java.lang.String parent;
/** Required. The parent, the partner that can resell. Format: partners/{partner}
*/
public java.lang.String getParent() {
return parent;
}
/** Required. The parent, the partner that can resell. Format: partners/{partner} */
public List setParent(java.lang.String parent) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^partners/[^/]+$");
}
this.parent = parent;
return this;
}
/**
* Optional. Specifies the filters for the promotion results. The syntax defined in the EBNF
* grammar: https://google.aip.dev/assets/misc/ebnf-filtering.txt. Examples: -
* applicable_products: "sku1" - region_codes: "US" - applicable_products: "sku1" AND
* region_codes: "US"
*/
@com.google.api.client.util.Key
private java.lang.String filter;
/** Optional. Specifies the filters for the promotion results. The syntax defined in the EBNF grammar:
https://google.aip.dev/assets/misc/ebnf-filtering.txt. Examples: - applicable_products: "sku1" -
region_codes: "US" - applicable_products: "sku1" AND region_codes: "US"
*/
public java.lang.String getFilter() {
return filter;
}
/**
* Optional. Specifies the filters for the promotion results. The syntax defined in the EBNF
* grammar: https://google.aip.dev/assets/misc/ebnf-filtering.txt. Examples: -
* applicable_products: "sku1" - region_codes: "US" - applicable_products: "sku1" AND
* region_codes: "US"
*/
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/**
* Optional. The maximum number of promotions to return. The service may return fewer than
* this value. If unspecified, at most 50 products will be returned. The maximum value is
* 1000; values above 1000 will be coerced to 1000.
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Optional. The maximum number of promotions to return. The service may return fewer than this value.
If unspecified, at most 50 products will be returned. The maximum value is 1000; values above 1000
will be coerced to 1000.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* Optional. The maximum number of promotions to return. The service may return fewer than
* this value. If unspecified, at most 50 products will be returned. The maximum value is
* 1000; values above 1000 will be coerced to 1000.
*/
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* Optional. A page token, received from a previous `ListPromotions` call. Provide this to
* retrieve the subsequent page. When paginating, all other parameters provided to
* `ListPromotions` must match the call that provided the page token.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** Optional. A page token, received from a previous `ListPromotions` call. Provide this to retrieve
the subsequent page. When paginating, all other parameters provided to `ListPromotions` must match
the call that provided the page token.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* Optional. A page token, received from a previous `ListPromotions` call. Provide this to
* retrieve the subsequent page. When paginating, all other parameters provided to
* `ListPromotions` must match the call that provided the page token.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the Subscriptions collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code PaymentsResellerSubscription paymentsresellersubscription = new PaymentsResellerSubscription(...);}
* {@code PaymentsResellerSubscription.Subscriptions.List request = paymentsresellersubscription.subscriptions().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Subscriptions subscriptions() {
return new Subscriptions();
}
/**
* The "subscriptions" collection of methods.
*/
public class Subscriptions {
/**
* Used by partners to cancel a subscription service either immediately or by the end of the current
* billing cycle for their customers. It should be called directly by the partner using service
* accounts.
*
* Create a request for the method "subscriptions.cancel".
*
* This request holds the parameters needed by the paymentsresellersubscription server. After
* setting any optional parameters, call the {@link Cancel#execute()} method to invoke the remote
* operation.
*
* @param name Required. The name of the subscription resource to be cancelled. It will have the format of
* "partners/{partner_id}/subscriptions/{subscription_id}"
* @param content the {@link com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1CancelSubscriptionRequest}
* @return the request
*/
public Cancel cancel(java.lang.String name, com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1CancelSubscriptionRequest content) throws java.io.IOException {
Cancel result = new Cancel(name, content);
initialize(result);
return result;
}
public class Cancel extends PaymentsResellerSubscriptionRequest<com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1CancelSubscriptionResponse> {
private static final String REST_PATH = "v1/{+name}:cancel";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^partners/[^/]+/subscriptions/[^/]+$");
/**
* Used by partners to cancel a subscription service either immediately or by the end of the
* current billing cycle for their customers. It should be called directly by the partner using
* service accounts.
*
* Create a request for the method "subscriptions.cancel".
*
* This request holds the parameters needed by the the paymentsresellersubscription server. After
* setting any optional parameters, call the {@link Cancel#execute()} method to invoke the remote
* operation. <p> {@link
* Cancel#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Required. The name of the subscription resource to be cancelled. It will have the format of
* "partners/{partner_id}/subscriptions/{subscription_id}"
* @param content the {@link com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1CancelSubscriptionRequest}
* @since 1.13
*/
protected Cancel(java.lang.String name, com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1CancelSubscriptionRequest content) {
super(PaymentsResellerSubscription.this, "POST", REST_PATH, content, com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1CancelSubscriptionResponse.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^partners/[^/]+/subscriptions/[^/]+$");
}
}
@Override
public Cancel set$Xgafv(java.lang.String $Xgafv) {
return (Cancel) super.set$Xgafv($Xgafv);
}
@Override
public Cancel setAccessToken(java.lang.String accessToken) {
return (Cancel) super.setAccessToken(accessToken);
}
@Override
public Cancel setAlt(java.lang.String alt) {
return (Cancel) super.setAlt(alt);
}
@Override
public Cancel setCallback(java.lang.String callback) {
return (Cancel) super.setCallback(callback);
}
@Override
public Cancel setFields(java.lang.String fields) {
return (Cancel) super.setFields(fields);
}
@Override
public Cancel setKey(java.lang.String key) {
return (Cancel) super.setKey(key);
}
@Override
public Cancel setOauthToken(java.lang.String oauthToken) {
return (Cancel) super.setOauthToken(oauthToken);
}
@Override
public Cancel setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Cancel) super.setPrettyPrint(prettyPrint);
}
@Override
public Cancel setQuotaUser(java.lang.String quotaUser) {
return (Cancel) super.setQuotaUser(quotaUser);
}
@Override
public Cancel setUploadType(java.lang.String uploadType) {
return (Cancel) super.setUploadType(uploadType);
}
@Override
public Cancel setUploadProtocol(java.lang.String uploadProtocol) {
return (Cancel) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. The name of the subscription resource to be cancelled. It will have the format
* of "partners/{partner_id}/subscriptions/{subscription_id}"
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** Required. The name of the subscription resource to be cancelled. It will have the format of
"partners/{partner_id}/subscriptions/{subscription_id}"
*/
public java.lang.String getName() {
return name;
}
/**
* Required. The name of the subscription resource to be cancelled. It will have the format
* of "partners/{partner_id}/subscriptions/{subscription_id}"
*/
public Cancel setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^partners/[^/]+/subscriptions/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Cancel set(String parameterName, Object value) {
return (Cancel) super.set(parameterName, value);
}
}
/**
* Used by partners to create a subscription for their customers. The created subscription is
* associated with the end user inferred from the end user credentials. This API must be authorized
* by the end user using OAuth.
*
* Create a request for the method "subscriptions.create".
*
* This request holds the parameters needed by the paymentsresellersubscription server. After
* setting any optional parameters, call the {@link Create#execute()} method to invoke the remote
* operation.
*
* @param parent Required. The parent resource name, which is the identifier of the partner. It will have the format
* of "partners/{partner_id}".
* @param content the {@link com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1Subscription}
* @return the request
*/
public Create create(java.lang.String parent, com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1Subscription content) throws java.io.IOException {
Create result = new Create(parent, content);
initialize(result);
return result;
}
public class Create extends PaymentsResellerSubscriptionRequest<com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1Subscription> {
private static final String REST_PATH = "v1/{+parent}/subscriptions";
private final java.util.regex.Pattern PARENT_PATTERN =
java.util.regex.Pattern.compile("^partners/[^/]+$");
/**
* Used by partners to create a subscription for their customers. The created subscription is
* associated with the end user inferred from the end user credentials. This API must be
* authorized by the end user using OAuth.
*
* Create a request for the method "subscriptions.create".
*
* This request holds the parameters needed by the the paymentsresellersubscription server. After
* setting any optional parameters, call the {@link Create#execute()} method to invoke the remote
* operation. <p> {@link
* Create#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param parent Required. The parent resource name, which is the identifier of the partner. It will have the format
* of "partners/{partner_id}".
* @param content the {@link com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1Subscription}
* @since 1.13
*/
protected Create(java.lang.String parent, com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1Subscription content) {
super(PaymentsResellerSubscription.this, "POST", REST_PATH, content, com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1Subscription.class);
this.parent = com.google.api.client.util.Preconditions.checkNotNull(parent, "Required parameter parent must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^partners/[^/]+$");
}
}
@Override
public Create set$Xgafv(java.lang.String $Xgafv) {
return (Create) super.set$Xgafv($Xgafv);
}
@Override
public Create setAccessToken(java.lang.String accessToken) {
return (Create) super.setAccessToken(accessToken);
}
@Override
public Create setAlt(java.lang.String alt) {
return (Create) super.setAlt(alt);
}
@Override
public Create setCallback(java.lang.String callback) {
return (Create) super.setCallback(callback);
}
@Override
public Create setFields(java.lang.String fields) {
return (Create) super.setFields(fields);
}
@Override
public Create setKey(java.lang.String key) {
return (Create) super.setKey(key);
}
@Override
public Create setOauthToken(java.lang.String oauthToken) {
return (Create) super.setOauthToken(oauthToken);
}
@Override
public Create setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Create) super.setPrettyPrint(prettyPrint);
}
@Override
public Create setQuotaUser(java.lang.String quotaUser) {
return (Create) super.setQuotaUser(quotaUser);
}
@Override
public Create setUploadType(java.lang.String uploadType) {
return (Create) super.setUploadType(uploadType);
}
@Override
public Create setUploadProtocol(java.lang.String uploadProtocol) {
return (Create) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. The parent resource name, which is the identifier of the partner. It will have
* the format of "partners/{partner_id}".
*/
@com.google.api.client.util.Key
private java.lang.String parent;
/** Required. The parent resource name, which is the identifier of the partner. It will have the format
of "partners/{partner_id}".
*/
public java.lang.String getParent() {
return parent;
}
/**
* Required. The parent resource name, which is the identifier of the partner. It will have
* the format of "partners/{partner_id}".
*/
public Create setParent(java.lang.String parent) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^partners/[^/]+$");
}
this.parent = parent;
return this;
}
/**
* Required. Identifies the subscription resource on the Partner side. The value is
* restricted to 63 ASCII characters at the maximum. If a subscription was previously
* created with the same subscription_id, we will directly return that one.
*/
@com.google.api.client.util.Key
private java.lang.String subscriptionId;
/** Required. Identifies the subscription resource on the Partner side. The value is restricted to 63
ASCII characters at the maximum. If a subscription was previously created with the same
subscription_id, we will directly return that one.
*/
public java.lang.String getSubscriptionId() {
return subscriptionId;
}
/**
* Required. Identifies the subscription resource on the Partner side. The value is
* restricted to 63 ASCII characters at the maximum. If a subscription was previously
* created with the same subscription_id, we will directly return that one.
*/
public Create setSubscriptionId(java.lang.String subscriptionId) {
this.subscriptionId = subscriptionId;
return this;
}
@Override
public Create set(String parameterName, Object value) {
return (Create) super.set(parameterName, value);
}
}
/**
* Used by partners to entitle a previously provisioned subscription to the current end user. The
* end user identity is inferred from the authorized credential of the request. This API must be
* authorized by the end user using OAuth.
*
* Create a request for the method "subscriptions.entitle".
*
* This request holds the parameters needed by the paymentsresellersubscription server. After
* setting any optional parameters, call the {@link Entitle#execute()} method to invoke the remote
* operation.
*
* @param name Required. The name of the subscription resource that is entitled to the current end user. It will
* have the format of "partners/{partner_id}/subscriptions/{subscription_id}"
* @param content the {@link com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1EntitleSubscriptionRequest}
* @return the request
*/
public Entitle entitle(java.lang.String name, com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1EntitleSubscriptionRequest content) throws java.io.IOException {
Entitle result = new Entitle(name, content);
initialize(result);
return result;
}
public class Entitle extends PaymentsResellerSubscriptionRequest<com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1EntitleSubscriptionResponse> {
private static final String REST_PATH = "v1/{+name}:entitle";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^partners/[^/]+/subscriptions/[^/]+$");
/**
* Used by partners to entitle a previously provisioned subscription to the current end user. The
* end user identity is inferred from the authorized credential of the request. This API must be
* authorized by the end user using OAuth.
*
* Create a request for the method "subscriptions.entitle".
*
* This request holds the parameters needed by the the paymentsresellersubscription server. After
* setting any optional parameters, call the {@link Entitle#execute()} method to invoke the remote
* operation. <p> {@link
* Entitle#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Required. The name of the subscription resource that is entitled to the current end user. It will
* have the format of "partners/{partner_id}/subscriptions/{subscription_id}"
* @param content the {@link com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1EntitleSubscriptionRequest}
* @since 1.13
*/
protected Entitle(java.lang.String name, com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1EntitleSubscriptionRequest content) {
super(PaymentsResellerSubscription.this, "POST", REST_PATH, content, com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1EntitleSubscriptionResponse.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^partners/[^/]+/subscriptions/[^/]+$");
}
}
@Override
public Entitle set$Xgafv(java.lang.String $Xgafv) {
return (Entitle) super.set$Xgafv($Xgafv);
}
@Override
public Entitle setAccessToken(java.lang.String accessToken) {
return (Entitle) super.setAccessToken(accessToken);
}
@Override
public Entitle setAlt(java.lang.String alt) {
return (Entitle) super.setAlt(alt);
}
@Override
public Entitle setCallback(java.lang.String callback) {
return (Entitle) super.setCallback(callback);
}
@Override
public Entitle setFields(java.lang.String fields) {
return (Entitle) super.setFields(fields);
}
@Override
public Entitle setKey(java.lang.String key) {
return (Entitle) super.setKey(key);
}
@Override
public Entitle setOauthToken(java.lang.String oauthToken) {
return (Entitle) super.setOauthToken(oauthToken);
}
@Override
public Entitle setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Entitle) super.setPrettyPrint(prettyPrint);
}
@Override
public Entitle setQuotaUser(java.lang.String quotaUser) {
return (Entitle) super.setQuotaUser(quotaUser);
}
@Override
public Entitle setUploadType(java.lang.String uploadType) {
return (Entitle) super.setUploadType(uploadType);
}
@Override
public Entitle setUploadProtocol(java.lang.String uploadProtocol) {
return (Entitle) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. The name of the subscription resource that is entitled to the current end user.
* It will have the format of "partners/{partner_id}/subscriptions/{subscription_id}"
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** Required. The name of the subscription resource that is entitled to the current end user. It will
have the format of "partners/{partner_id}/subscriptions/{subscription_id}"
*/
public java.lang.String getName() {
return name;
}
/**
* Required. The name of the subscription resource that is entitled to the current end user.
* It will have the format of "partners/{partner_id}/subscriptions/{subscription_id}"
*/
public Entitle setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^partners/[^/]+/subscriptions/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Entitle set(String parameterName, Object value) {
return (Entitle) super.set(parameterName, value);
}
}
/**
* Used by partners to extend a subscription service for their customers. It should be called
* directly by the partner using service accounts.
*
* Create a request for the method "subscriptions.extend".
*
* This request holds the parameters needed by the paymentsresellersubscription server. After
* setting any optional parameters, call the {@link Extend#execute()} method to invoke the remote
* operation.
*
* @param name Required. The name of the subscription resource to be extended. It will have the format of
* "partners/{partner_id}/subscriptions/{subscription_id}".
* @param content the {@link com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1ExtendSubscriptionRequest}
* @return the request
*/
public Extend extend(java.lang.String name, com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1ExtendSubscriptionRequest content) throws java.io.IOException {
Extend result = new Extend(name, content);
initialize(result);
return result;
}
public class Extend extends PaymentsResellerSubscriptionRequest<com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1ExtendSubscriptionResponse> {
private static final String REST_PATH = "v1/{+name}:extend";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^partners/[^/]+/subscriptions/[^/]+$");
/**
* Used by partners to extend a subscription service for their customers. It should be called
* directly by the partner using service accounts.
*
* Create a request for the method "subscriptions.extend".
*
* This request holds the parameters needed by the the paymentsresellersubscription server. After
* setting any optional parameters, call the {@link Extend#execute()} method to invoke the remote
* operation. <p> {@link
* Extend#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Required. The name of the subscription resource to be extended. It will have the format of
* "partners/{partner_id}/subscriptions/{subscription_id}".
* @param content the {@link com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1ExtendSubscriptionRequest}
* @since 1.13
*/
protected Extend(java.lang.String name, com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1ExtendSubscriptionRequest content) {
super(PaymentsResellerSubscription.this, "POST", REST_PATH, content, com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1ExtendSubscriptionResponse.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^partners/[^/]+/subscriptions/[^/]+$");
}
}
@Override
public Extend set$Xgafv(java.lang.String $Xgafv) {
return (Extend) super.set$Xgafv($Xgafv);
}
@Override
public Extend setAccessToken(java.lang.String accessToken) {
return (Extend) super.setAccessToken(accessToken);
}
@Override
public Extend setAlt(java.lang.String alt) {
return (Extend) super.setAlt(alt);
}
@Override
public Extend setCallback(java.lang.String callback) {
return (Extend) super.setCallback(callback);
}
@Override
public Extend setFields(java.lang.String fields) {
return (Extend) super.setFields(fields);
}
@Override
public Extend setKey(java.lang.String key) {
return (Extend) super.setKey(key);
}
@Override
public Extend setOauthToken(java.lang.String oauthToken) {
return (Extend) super.setOauthToken(oauthToken);
}
@Override
public Extend setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Extend) super.setPrettyPrint(prettyPrint);
}
@Override
public Extend setQuotaUser(java.lang.String quotaUser) {
return (Extend) super.setQuotaUser(quotaUser);
}
@Override
public Extend setUploadType(java.lang.String uploadType) {
return (Extend) super.setUploadType(uploadType);
}
@Override
public Extend setUploadProtocol(java.lang.String uploadProtocol) {
return (Extend) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. The name of the subscription resource to be extended. It will have the format
* of "partners/{partner_id}/subscriptions/{subscription_id}".
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** Required. The name of the subscription resource to be extended. It will have the format of
"partners/{partner_id}/subscriptions/{subscription_id}".
*/
public java.lang.String getName() {
return name;
}
/**
* Required. The name of the subscription resource to be extended. It will have the format
* of "partners/{partner_id}/subscriptions/{subscription_id}".
*/
public Extend setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^partners/[^/]+/subscriptions/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Extend set(String parameterName, Object value) {
return (Extend) super.set(parameterName, value);
}
}
/**
* Used by partners to get a subscription by id. It should be called directly by the partner using
* service accounts.
*
* Create a request for the method "subscriptions.get".
*
* This request holds the parameters needed by the paymentsresellersubscription server. After
* setting any optional parameters, call the {@link Get#execute()} method to invoke the remote
* operation.
*
* @param name Required. The name of the subscription resource to retrieve. It will have the format of
* "partners/{partner_id}/subscriptions/{subscription_id}"
* @return the request
*/
public Get get(java.lang.String name) throws java.io.IOException {
Get result = new Get(name);
initialize(result);
return result;
}
public class Get extends PaymentsResellerSubscriptionRequest<com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1Subscription> {
private static final String REST_PATH = "v1/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^partners/[^/]+/subscriptions/[^/]+$");
/**
* Used by partners to get a subscription by id. It should be called directly by the partner using
* service accounts.
*
* Create a request for the method "subscriptions.get".
*
* This request holds the parameters needed by the the paymentsresellersubscription server. After
* setting any optional parameters, call the {@link Get#execute()} method to invoke the remote
* operation. <p> {@link
* Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must be
* called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Required. The name of the subscription resource to retrieve. It will have the format of
* "partners/{partner_id}/subscriptions/{subscription_id}"
* @since 1.13
*/
protected Get(java.lang.String name) {
super(PaymentsResellerSubscription.this, "GET", REST_PATH, null, com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1Subscription.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^partners/[^/]+/subscriptions/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. The name of the subscription resource to retrieve. It will have the format of
* "partners/{partner_id}/subscriptions/{subscription_id}"
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** Required. The name of the subscription resource to retrieve. It will have the format of
"partners/{partner_id}/subscriptions/{subscription_id}"
*/
public java.lang.String getName() {
return name;
}
/**
* Required. The name of the subscription resource to retrieve. It will have the format of
* "partners/{partner_id}/subscriptions/{subscription_id}"
*/
public Get setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^partners/[^/]+/subscriptions/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Used by partners to provision a subscription for their customers. This creates a subscription
* without associating it with the end user account. EntitleSubscription must be called separately
* using OAuth in order for the end user account to be associated with the subscription. It should
* be called directly by the partner using service accounts.
*
* Create a request for the method "subscriptions.provision".
*
* This request holds the parameters needed by the paymentsresellersubscription server. After
* setting any optional parameters, call the {@link Provision#execute()} method to invoke the remote
* operation.
*
* @param parent Required. The parent resource name, which is the identifier of the partner. It will have the format
* of "partners/{partner_id}".
* @param content the {@link com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1Subscription}
* @return the request
*/
public Provision provision(java.lang.String parent, com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1Subscription content) throws java.io.IOException {
Provision result = new Provision(parent, content);
initialize(result);
return result;
}
public class Provision extends PaymentsResellerSubscriptionRequest<com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1Subscription> {
private static final String REST_PATH = "v1/{+parent}/subscriptions:provision";
private final java.util.regex.Pattern PARENT_PATTERN =
java.util.regex.Pattern.compile("^partners/[^/]+$");
/**
* Used by partners to provision a subscription for their customers. This creates a subscription
* without associating it with the end user account. EntitleSubscription must be called separately
* using OAuth in order for the end user account to be associated with the subscription. It should
* be called directly by the partner using service accounts.
*
* Create a request for the method "subscriptions.provision".
*
* This request holds the parameters needed by the the paymentsresellersubscription server. After
* setting any optional parameters, call the {@link Provision#execute()} method to invoke the
* remote operation. <p> {@link
* Provision#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param parent Required. The parent resource name, which is the identifier of the partner. It will have the format
* of "partners/{partner_id}".
* @param content the {@link com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1Subscription}
* @since 1.13
*/
protected Provision(java.lang.String parent, com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1Subscription content) {
super(PaymentsResellerSubscription.this, "POST", REST_PATH, content, com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1Subscription.class);
this.parent = com.google.api.client.util.Preconditions.checkNotNull(parent, "Required parameter parent must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^partners/[^/]+$");
}
}
@Override
public Provision set$Xgafv(java.lang.String $Xgafv) {
return (Provision) super.set$Xgafv($Xgafv);
}
@Override
public Provision setAccessToken(java.lang.String accessToken) {
return (Provision) super.setAccessToken(accessToken);
}
@Override
public Provision setAlt(java.lang.String alt) {
return (Provision) super.setAlt(alt);
}
@Override
public Provision setCallback(java.lang.String callback) {
return (Provision) super.setCallback(callback);
}
@Override
public Provision setFields(java.lang.String fields) {
return (Provision) super.setFields(fields);
}
@Override
public Provision setKey(java.lang.String key) {
return (Provision) super.setKey(key);
}
@Override
public Provision setOauthToken(java.lang.String oauthToken) {
return (Provision) super.setOauthToken(oauthToken);
}
@Override
public Provision setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Provision) super.setPrettyPrint(prettyPrint);
}
@Override
public Provision setQuotaUser(java.lang.String quotaUser) {
return (Provision) super.setQuotaUser(quotaUser);
}
@Override
public Provision setUploadType(java.lang.String uploadType) {
return (Provision) super.setUploadType(uploadType);
}
@Override
public Provision setUploadProtocol(java.lang.String uploadProtocol) {
return (Provision) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. The parent resource name, which is the identifier of the partner. It will have
* the format of "partners/{partner_id}".
*/
@com.google.api.client.util.Key
private java.lang.String parent;
/** Required. The parent resource name, which is the identifier of the partner. It will have the format
of "partners/{partner_id}".
*/
public java.lang.String getParent() {
return parent;
}
/**
* Required. The parent resource name, which is the identifier of the partner. It will have
* the format of "partners/{partner_id}".
*/
public Provision setParent(java.lang.String parent) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^partners/[^/]+$");
}
this.parent = parent;
return this;
}
/**
* Required. Identifies the subscription resource on the Partner side. The value is
* restricted to 63 ASCII characters at the maximum. If a subscription was previously
* created with the same subscription_id, we will directly return that one.
*/
@com.google.api.client.util.Key
private java.lang.String subscriptionId;
/** Required. Identifies the subscription resource on the Partner side. The value is restricted to 63
ASCII characters at the maximum. If a subscription was previously created with the same
subscription_id, we will directly return that one.
*/
public java.lang.String getSubscriptionId() {
return subscriptionId;
}
/**
* Required. Identifies the subscription resource on the Partner side. The value is
* restricted to 63 ASCII characters at the maximum. If a subscription was previously
* created with the same subscription_id, we will directly return that one.
*/
public Provision setSubscriptionId(java.lang.String subscriptionId) {
this.subscriptionId = subscriptionId;
return this;
}
@Override
public Provision set(String parameterName, Object value) {
return (Provision) super.set(parameterName, value);
}
}
/**
* Used by partners to revoke the pending cancellation of a subscription, which is currently in
* `STATE_CANCEL_AT_END_OF_CYCLE` state. If the subscription is already cancelled, the request will
* fail. It should be called directly by the partner using service accounts.
*
* Create a request for the method "subscriptions.undoCancel".
*
* This request holds the parameters needed by the paymentsresellersubscription server. After
* setting any optional parameters, call the {@link UndoCancel#execute()} method to invoke the
* remote operation.
*
* @param name Required. The name of the subscription resource whose pending cancellation needs to be undone. It
* will have the format of "partners/{partner_id}/subscriptions/{subscription_id}"
* @param content the {@link com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1UndoCancelSubscriptionRequest}
* @return the request
*/
public UndoCancel undoCancel(java.lang.String name, com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1UndoCancelSubscriptionRequest content) throws java.io.IOException {
UndoCancel result = new UndoCancel(name, content);
initialize(result);
return result;
}
public class UndoCancel extends PaymentsResellerSubscriptionRequest<com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1UndoCancelSubscriptionResponse> {
private static final String REST_PATH = "v1/{+name}:undoCancel";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^partners/[^/]+/subscriptions/[^/]+$");
/**
* Used by partners to revoke the pending cancellation of a subscription, which is currently in
* `STATE_CANCEL_AT_END_OF_CYCLE` state. If the subscription is already cancelled, the request
* will fail. It should be called directly by the partner using service accounts.
*
* Create a request for the method "subscriptions.undoCancel".
*
* This request holds the parameters needed by the the paymentsresellersubscription server. After
* setting any optional parameters, call the {@link UndoCancel#execute()} method to invoke the
* remote operation. <p> {@link
* UndoCancel#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Required. The name of the subscription resource whose pending cancellation needs to be undone. It
* will have the format of "partners/{partner_id}/subscriptions/{subscription_id}"
* @param content the {@link com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1UndoCancelSubscriptionRequest}
* @since 1.13
*/
protected UndoCancel(java.lang.String name, com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1UndoCancelSubscriptionRequest content) {
super(PaymentsResellerSubscription.this, "POST", REST_PATH, content, com.google.api.services.paymentsresellersubscription.v1.model.GoogleCloudPaymentsResellerSubscriptionV1UndoCancelSubscriptionResponse.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^partners/[^/]+/subscriptions/[^/]+$");
}
}
@Override
public UndoCancel set$Xgafv(java.lang.String $Xgafv) {
return (UndoCancel) super.set$Xgafv($Xgafv);
}
@Override
public UndoCancel setAccessToken(java.lang.String accessToken) {
return (UndoCancel) super.setAccessToken(accessToken);
}
@Override
public UndoCancel setAlt(java.lang.String alt) {
return (UndoCancel) super.setAlt(alt);
}
@Override
public UndoCancel setCallback(java.lang.String callback) {
return (UndoCancel) super.setCallback(callback);
}
@Override
public UndoCancel setFields(java.lang.String fields) {
return (UndoCancel) super.setFields(fields);
}
@Override
public UndoCancel setKey(java.lang.String key) {
return (UndoCancel) super.setKey(key);
}
@Override
public UndoCancel setOauthToken(java.lang.String oauthToken) {
return (UndoCancel) super.setOauthToken(oauthToken);
}
@Override
public UndoCancel setPrettyPrint(java.lang.Boolean prettyPrint) {
return (UndoCancel) super.setPrettyPrint(prettyPrint);
}
@Override
public UndoCancel setQuotaUser(java.lang.String quotaUser) {
return (UndoCancel) super.setQuotaUser(quotaUser);
}
@Override
public UndoCancel setUploadType(java.lang.String uploadType) {
return (UndoCancel) super.setUploadType(uploadType);
}
@Override
public UndoCancel setUploadProtocol(java.lang.String uploadProtocol) {
return (UndoCancel) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. The name of the subscription resource whose pending cancellation needs to be
* undone. It will have the format of
* "partners/{partner_id}/subscriptions/{subscription_id}"
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** Required. The name of the subscription resource whose pending cancellation needs to be undone. It
will have the format of "partners/{partner_id}/subscriptions/{subscription_id}"
*/
public java.lang.String getName() {
return name;
}
/**
* Required. The name of the subscription resource whose pending cancellation needs to be
* undone. It will have the format of
* "partners/{partner_id}/subscriptions/{subscription_id}"
*/
public UndoCancel setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^partners/[^/]+/subscriptions/[^/]+$");
}
this.name = name;
return this;
}
@Override
public UndoCancel set(String parameterName, Object value) {
return (UndoCancel) super.set(parameterName, value);
}
}
}
}
/**
* Builder for {@link PaymentsResellerSubscription}.
*
* <p>
* Implementation is not thread-safe.
* </p>
*
* @since 1.3.0
*/
public static final class Builder extends com.google.api.client.googleapis.services.json.AbstractGoogleJsonClient.Builder {
private static String chooseEndpoint(com.google.api.client.http.HttpTransport transport) {
// If the GOOGLE_API_USE_MTLS_ENDPOINT environment variable value is "always", use mTLS endpoint.
// If the env variable is "auto", use mTLS endpoint if and only if the transport is mTLS.
// Use the regular endpoint for all other cases.
String useMtlsEndpoint = System.getenv("GOOGLE_API_USE_MTLS_ENDPOINT");
useMtlsEndpoint = useMtlsEndpoint == null ? "auto" : useMtlsEndpoint;
if ("always".equals(useMtlsEndpoint) || ("auto".equals(useMtlsEndpoint) && transport != null && transport.isMtls())) {
return DEFAULT_MTLS_ROOT_URL;
}
return DEFAULT_ROOT_URL;
}
/**
* Returns an instance of a new builder.
*
* @param transport HTTP transport, which should normally be:
* <ul>
* <li>Google App Engine:
* {@code com.google.api.client.extensions.appengine.http.UrlFetchTransport}</li>
* <li>Android: {@code newCompatibleTransport} from
* {@code com.google.api.client.extensions.android.http.AndroidHttp}</li>
* <li>Java: {@link com.google.api.client.googleapis.javanet.GoogleNetHttpTransport#newTrustedTransport()}
* </li>
* </ul>
* @param jsonFactory JSON factory, which may be:
* <ul>
* <li>Jackson: {@code com.google.api.client.json.jackson2.JacksonFactory}</li>
* <li>Google GSON: {@code com.google.api.client.json.gson.GsonFactory}</li>
* <li>Android Honeycomb or higher:
* {@code com.google.api.client.extensions.android.json.AndroidJsonFactory}</li>
* </ul>
* @param httpRequestInitializer HTTP request initializer or {@code null} for none
* @since 1.7
*/
public Builder(com.google.api.client.http.HttpTransport transport, com.google.api.client.json.JsonFactory jsonFactory,
com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
super(
transport,
jsonFactory,
Builder.chooseEndpoint(transport),
DEFAULT_SERVICE_PATH,
httpRequestInitializer,
false);
setBatchPath(DEFAULT_BATCH_PATH);
}
/** Builds a new instance of {@link PaymentsResellerSubscription}. */
@Override
public PaymentsResellerSubscription build() {
return new PaymentsResellerSubscription(this);
}
@Override
public Builder setRootUrl(String rootUrl) {
return (Builder) super.setRootUrl(rootUrl);
}
@Override
public Builder setServicePath(String servicePath) {
return (Builder) super.setServicePath(servicePath);
}
@Override
public Builder setBatchPath(String batchPath) {
return (Builder) super.setBatchPath(batchPath);
}
@Override
public Builder setHttpRequestInitializer(com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
return (Builder) super.setHttpRequestInitializer(httpRequestInitializer);
}
@Override
public Builder setApplicationName(String applicationName) {
return (Builder) super.setApplicationName(applicationName);
}
@Override
public Builder setSuppressPatternChecks(boolean suppressPatternChecks) {
return (Builder) super.setSuppressPatternChecks(suppressPatternChecks);
}
@Override
public Builder setSuppressRequiredParameterChecks(boolean suppressRequiredParameterChecks) {
return (Builder) super.setSuppressRequiredParameterChecks(suppressRequiredParameterChecks);
}
@Override
public Builder setSuppressAllChecks(boolean suppressAllChecks) {
return (Builder) super.setSuppressAllChecks(suppressAllChecks);
}
/**
* Set the {@link PaymentsResellerSubscriptionRequestInitializer}.
*
* @since 1.12
*/
public Builder setPaymentsResellerSubscriptionRequestInitializer(
PaymentsResellerSubscriptionRequestInitializer paymentsresellersubscriptionRequestInitializer) {
return (Builder) super.setGoogleClientRequestInitializer(paymentsresellersubscriptionRequestInitializer);
}
@Override
public Builder setGoogleClientRequestInitializer(
com.google.api.client.googleapis.services.GoogleClientRequestInitializer googleClientRequestInitializer) {
return (Builder) super.setGoogleClientRequestInitializer(googleClientRequestInitializer);
}
}
}
| [
"\"GOOGLE_API_USE_MTLS_ENDPOINT\""
]
| []
| [
"GOOGLE_API_USE_MTLS_ENDPOINT"
]
| [] | ["GOOGLE_API_USE_MTLS_ENDPOINT"] | java | 1 | 0 | |
sechub-integrationtest/src/main/java/com/mercedesbenz/sechub/integrationtest/api/LocalDeveloperFileSetupSupport.java | // SPDX-License-Identifier: MIT
package com.mercedesbenz.sechub.integrationtest.api;
import java.io.File;
import java.io.FileInputStream;
import java.util.Properties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A little helper class for developers: it is very inconvenient to setup some
* standard when doing a local development in your IDE and you have to turn on
* every junit test launch with -Dsechub.integrationtest.running=true (otherwise
* tests will be skipped). To simplify this, this class was created. Just create
* file `~/.sechub/sechub-developer.properties` and set there your settings. At
* Gradle builds this will be ignored. well then!
*
* @author Albert Tregnaghi
*
*/
public class LocalDeveloperFileSetupSupport {
public static LocalDeveloperFileSetupSupport INSTANCE = new LocalDeveloperFileSetupSupport();
private static final Logger LOG = LoggerFactory.getLogger(LocalDeveloperFileSetupSupport.class);
private boolean alwaysSecHubIntegrationTestRunning;
private LocalDeveloperFileSetupSupport() {
logInfo("Local developer support initializing");
File userHome = new File(System.getProperty("user.home"));
File sechubHidden = new File(userHome, ".sechub");
File sechubDevConfig = new File(sechubHidden, "sechub-developer.properties");
String buildGradleEnv = System.getenv("SECHUB_BUILD_GRADLE");
if (Boolean.parseBoolean(buildGradleEnv)) {
logInfo("Recognized gradle build, skip check for :" + sechubDevConfig.getAbsolutePath());
return;
}
if (!sechubDevConfig.exists()) {
return;
}
Properties properties = new Properties();
try (FileInputStream fis = new FileInputStream(sechubDevConfig)) {
properties.load(fis);
alwaysSecHubIntegrationTestRunning = Boolean.parseBoolean(properties.getProperty(IntegrationTestSetup.SECHUB_INTEGRATIONTEST_RUNNING, "false"));
logInfo("Local developer support has been initialized");
} catch (Exception e) {
logError("Was not able to load developer config file", e);
}
}
public boolean isAlwaysSecHubIntegrationTestRunning() {
return alwaysSecHubIntegrationTestRunning;
}
public static void main(String[] args) {
new LocalDeveloperFileSetupSupport().toString();
}
private void logInfo(String message) {
if (LOG == null) {
// as some unclear reasons this can happen in IDEs when executing junit tests -
// so fallback necessary
System.out.println("NO_LOG (info):" + message);
return;
}
LOG.info(message);
}
private void logError(String message, Throwable t) {
if (LOG == null) {
// as some unclear reasons this can happen in IDEs when executing junit tests -
// so fallback necessarys
System.err.println("NO_LOG (error):" + message);
t.printStackTrace();
return;
}
LOG.error(message, t);
}
}
| [
"\"SECHUB_BUILD_GRADLE\""
]
| []
| [
"SECHUB_BUILD_GRADLE"
]
| [] | ["SECHUB_BUILD_GRADLE"] | java | 1 | 0 | |
weather-data/lambda/dataset_processor.py | import boto3
import rasterio
import os
import numpy as np
from osgeo import gdal
from botocore.handlers import disable_signing
from typing import List
from datetime import datetime
DATASET_TMP_PATH = "/tmp/tmp.grib2"
GDAL_TMP_FILE = "/tmp/temp.tiff"
FINAL_IMG = "/tmp/final.jpeg"
NOAA_BUCKET = 'noaa-gfs-bdp-pds'
PROCESSED_BUCKET_AP = 'arn:aws:s3:eu-west-1:168000702421:accesspoint/noaa-processing'
if ('CDK_NOAA_BUCKET_ID' in os.environ):
NOAA_BUCKET = os.environ['CDK_NOAA_BUCKET_ID']
if ('CDK_PROCESSED_BUCKET_AP' in os.environ):
PROCESSED_BUCKET_AP = os.environ['CDK_PROCESSED_BUCKET_AP']
def get_file_s3_unsigned(bucket: str, key: str, file_path: str, TEST_ENV=False) -> None:
s3 = boto3.resource('s3')
gfs_res = s3.Bucket(bucket)
# Enable unsigned s3 requests only while not testing
# as mocking them is difficult.
if (not TEST_ENV):
gfs_res.meta.client.meta.events.register('choose-signer.s3.*', disable_signing)
gfs_res.download_file(key, file_path)
def put_file_s3(bucket: str, key: str, file_path: str) -> None:
s3 = boto3.resource('s3')
s3.Bucket(bucket).upload_file(file_path, key)
# Processes the dataset and returns timestamp as a string.
def process_dataset(file_in: str, file_out: str, tmp_file: str, mode: str) -> str:
ds = gdal.Open(file_in)
ds_time = datetime.utcfromtimestamp(
int(ds.GetRasterBand(1).GetMetadataItem('GRIB_VALID_TIME').replace("sec UTC", ""))
)
valid_timestring = ds_time.strftime('%Y-%m-%dT%H:%M:%S')
# if ds_time is more than 12 hours in the future, discard it
# this should be parsed directly from the filename, but this works out too
if (ds_time - datetime.utcnow()).total_seconds() > 12 * 60 * 60:
return {
"status": "noop",
"valid_timestring": valid_timestring,
}
ugrd = "UGRD"
vgrd = "VGRD"
bands = None
if (mode == "wind"):
bands = [
{ # U component of wind (m/s)
"GRIB_ELEMENT": ugrd,
"GRIB_SHORT_NAME": "10-HTGL"
},
{ # V component of wind (m/s)
"GRIB_ELEMENT": vgrd,
"GRIB_SHORT_NAME": "10-HTGL"
},
]
band_indexes = {}
for i in range(1, ds.RasterCount + 1):
band = ds.GetRasterBand(i)
grib_element = band.GetMetadata()['GRIB_ELEMENT']
grib_short_name = band.GetMetadata()['GRIB_SHORT_NAME']
for (band_idx, band_dict) in enumerate(bands):
if (grib_element == band_dict['GRIB_ELEMENT'] and grib_short_name == band_dict['GRIB_SHORT_NAME']):
band_indexes[grib_element] = i
break
in_srs = "+proj=longlat +datum=WGS84 +lon_wrap=180"
out_srs = "EPSG:3857"
band_indexes_keys = band_indexes.values()
# pick the bands we want from grib file
translated = gdal.Translate("", ds, bandList=band_indexes_keys, format="VRT")
# reproject to epsg:3857 and cut to sensible bounds (taken manually from qgis osm layer)
bounds = [-20037508.3427892439067364,-20037508.3427892550826073,20037508.3427892439067364,20037508.3427892439067364]
# write reprojected file to tmp file so we can pick bands we want with rasterio
warped = gdal.Warp(tmp_file, translated, dstNodata=9999, srcSRS=in_srs, dstSRS=out_srs, outputBounds=bounds, creationOptions=["COMPRESS=LZW"])
# write dataset to disk
del warped
del translated
# only 512mb of disk space is available for lambda, so deletion here might be necessary
# os.unlink(file_in)
with rasterio.open(tmp_file) as src:
with rasterio.open(file_out, "w", width=src.shape[0], height=src.shape[1], count=3, dtype='uint8') as dst:
if (mode == "wind"):
# rasterio band indexing starts from 0
u_index = [index for index, value in enumerate(band_indexes) if value == ugrd][0] + 1
v_index = [index for index, value in enumerate(band_indexes) if value == vgrd][0] + 1
u_raw = src.read(u_index)
v_raw = src.read(v_index)
u_rescaled = np.interp(u_raw, (-50, 50), (0, 255)).astype(np.uint8)
v_rescaled = np.interp(v_raw, (-50, 50), (0, 255)).astype(np.uint8)
# In a sense this band three is completely redundant, but WebGL lookup from a picture
# like this was easier, so keeping it this way for now.
speed = np.sqrt(src.read(u_index)**2 + src.read(v_index)**2).astype(np.uint8)
dst.write(u_rescaled, 1)
dst.write(v_rescaled, 2)
dst.write(speed, 3)
else:
print("Mode not supported")
return {
"status": "update",
"valid_timestring": valid_timestring,
}
def delete_files_if_exists(files: List[str]) -> None:
for f in files:
if os.path.exists(f):
os.unlink(f)
def key_is_fresh_enough(key: str) -> bool:
# gfs.20210226/18/gfs.t18z.sfluxgrbf010.grib2
hours = int(key.split("sfluxgrbf")[1].split(".")[0])
return hours < 24
def handle_new_gfs(key: str):
if not key_is_fresh_enough(key):
print("Hour more than 24h in the future, skipping" + key)
return
# If we get same execution context as from previous lambda invocation,
# we might have unncessary files there filling up the 512M limit on /tmp.
delete_files_if_exists([DATASET_TMP_PATH, GDAL_TMP_FILE, FINAL_IMG])
get_file_s3_unsigned(NOAA_BUCKET, key, DATASET_TMP_PATH)
for mode in ["wind"]:
result = process_dataset(DATASET_TMP_PATH, FINAL_IMG, GDAL_TMP_FILE, "wind")
if (result["status"] == "update"):
output_key = f'{result["valid_timestring"]}_noaa_{mode}.jpeg'
put_file_s3(PROCESSED_BUCKET_AP, output_key, FINAL_IMG)
else:
print("Not updating, too far in the future")
| []
| []
| [
"CDK_NOAA_BUCKET_ID",
"CDK_PROCESSED_BUCKET_AP"
]
| [] | ["CDK_NOAA_BUCKET_ID", "CDK_PROCESSED_BUCKET_AP"] | python | 2 | 0 | |
scripts/workers/remote_submission_worker.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import contextlib
import importlib
import json
import logging
import os
import requests
import signal
import shutil
import sys
import tempfile
import time
import traceback
import zipfile
from os.path import join
# all challenge and submission will be stored in temp directory
BASE_TEMP_DIR = tempfile.mkdtemp()
COMPUTE_DIRECTORY_PATH = join(BASE_TEMP_DIR, "compute")
logger = logging.getLogger(__name__)
AUTH_TOKEN = os.environ.get("AUTH_TOKEN", "x")
DJANGO_SERVER = os.environ.get("DJANGO_SERVER", "django")
DJANGO_SERVER_PORT = os.environ.get("DJANGO_SERVER_PORT", "8000")
QUEUE_NAME = os.environ.get("QUEUE_NAME", "test-ai4h")
CHALLENGE_DATA_BASE_DIR = join(COMPUTE_DIRECTORY_PATH, "challenge_data")
SUBMISSION_DATA_BASE_DIR = join(COMPUTE_DIRECTORY_PATH, "submission_files")
CHALLENGE_DATA_DIR = join(CHALLENGE_DATA_BASE_DIR, "challenge_{challenge_id}")
PHASE_DATA_BASE_DIR = join(CHALLENGE_DATA_DIR, "phase_data")
PHASE_DATA_DIR = join(PHASE_DATA_BASE_DIR, "phase_{phase_id}")
PHASE_ANNOTATION_FILE_PATH = join(PHASE_DATA_DIR, "{annotation_file}")
SUBMISSION_DATA_DIR = join(
SUBMISSION_DATA_BASE_DIR, "submission_{submission_id}"
)
SUBMISSION_INPUT_FILE_PATH = join(SUBMISSION_DATA_DIR, "{input_file}")
CHALLENGE_IMPORT_STRING = "challenge_data.challenge_{challenge_id}"
EVALUATION_SCRIPTS = {}
URLS = {
"get_message_from_sqs_queue": "/api/jobs/challenge/queues/{}/",
"delete_message_from_sqs_queue": "/api/jobs/queues/{}/",
"get_submission_by_pk": "/api/jobs/submission/{}",
"get_challenge_phases_by_challenge_pk": "/api/challenges/{}/phases/",
"get_challenge_by_queue_name": "/api/challenges/challenge/queues/{}/",
"get_challenge_phase_by_pk": "/api/challenges/challenge/{}/challenge_phase/{}",
"update_submission_data": "/api/jobs/challenge/{}/update_submission/",
}
EVALAI_ERROR_CODES = [400, 401, 406]
# map of challenge id : phase id : phase annotation file name
# Use: On arrival of submission message, lookup here to fetch phase file name
# this saves db query just to fetch phase annotation file name
PHASE_ANNOTATION_FILE_NAME_MAP = {}
class GracefulKiller:
kill_now = False
def __init__(self):
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self, signum, frame):
self.kill_now = True
class ExecutionTimeLimitExceeded(Exception):
pass
@contextlib.contextmanager
def stdout_redirect(where):
sys.stdout = where
try:
yield where
finally:
sys.stdout = sys.__stdout__
@contextlib.contextmanager
def stderr_redirect(where):
sys.stderr = where
try:
yield where
finally:
sys.stderr = sys.__stderr__
def alarm_handler(signum, frame):
raise ExecutionTimeLimitExceeded
def download_and_extract_file(url, download_location):
"""
* Function to extract download a file.
* `download_location` should include name of file as well.
"""
try:
response = requests.get(url)
except Exception as e:
logger.error("Failed to fetch file from {}, error {}".format(url, e))
traceback.print_exc()
response = None
if response and response.status_code == 200:
with open(download_location, "wb") as f:
f.write(response.content)
def download_and_extract_zip_file(url, download_location, extract_location):
"""
* Function to extract download a zip file, extract it and then removes the zip file.
* `download_location` should include name of file as well.
"""
try:
response = requests.get(url)
except Exception as e:
logger.error("Failed to fetch file from {}, error {}".format(url, e))
response = None
if response and response.status_code == 200:
with open(download_location, "wb") as f:
f.write(response.content)
# extract zip file
zip_ref = zipfile.ZipFile(download_location, "r")
zip_ref.extractall(extract_location)
zip_ref.close()
# delete zip file
try:
os.remove(download_location)
except Exception as e:
logger.error(
"Failed to remove zip file {}, error {}".format(
download_location, e
)
)
traceback.print_exc()
def create_dir(directory):
"""
Creates a directory if it does not exists
"""
if not os.path.exists(directory):
os.makedirs(directory)
def create_dir_as_python_package(directory):
"""
Create a directory and then makes it a python
package by creating `__init__.py` file.
"""
create_dir(directory)
init_file_path = join(directory, "__init__.py")
with open(init_file_path, "w") as init_file: # noqa
# to create empty file
pass
def return_url_per_environment(url):
base_url = "http://{0}:{1}".format(DJANGO_SERVER, DJANGO_SERVER_PORT)
url = "{0}{1}".format(base_url, url)
return url
def load_challenge():
"""
Creates python package for a challenge and extracts relevant data
"""
# make sure that the challenge base directory exists
create_dir_as_python_package(CHALLENGE_DATA_BASE_DIR)
try:
challenge = get_challenge_by_queue_name()
except Exception:
logger.exception(
"Challenge with queue name %s does not exists." % (QUEUE_NAME)
)
raise
challenge_pk = challenge.get("id")
phases = get_challenge_phases_by_challenge_pk(challenge_pk)
extract_challenge_data(challenge, phases)
def extract_challenge_data(challenge, phases):
"""
* Expects a challenge object and an array of phase object
* Extracts `evaluation_script` for challenge and `annotation_file` for each phase
"""
challenge_data_directory = CHALLENGE_DATA_DIR.format(
challenge_id=challenge.get("id")
)
evaluation_script_url = challenge.get("evaluation_script")
create_dir_as_python_package(challenge_data_directory)
# set entry in map
PHASE_ANNOTATION_FILE_NAME_MAP[challenge.get("id")] = {}
challenge_zip_file = join(
challenge_data_directory,
"challenge_{}.zip".format(challenge.get("id")),
)
download_and_extract_zip_file(
evaluation_script_url, challenge_zip_file, challenge_data_directory
)
phase_data_base_directory = PHASE_DATA_BASE_DIR.format(
challenge_id=challenge.get("id")
)
create_dir(phase_data_base_directory)
for phase in phases:
phase_data_directory = PHASE_DATA_DIR.format(
challenge_id=challenge.get("id"), phase_id=phase.get("id")
)
# create phase directory
create_dir(phase_data_directory)
annotation_file_url = phase.get("test_annotation")
annotation_file_name = os.path.basename(phase.get("test_annotation"))
PHASE_ANNOTATION_FILE_NAME_MAP[challenge.get("id")][
phase.get("id")
] = annotation_file_name
annotation_file_path = PHASE_ANNOTATION_FILE_PATH.format(
challenge_id=challenge.get("id"),
phase_id=phase.get("id"),
annotation_file=annotation_file_name,
)
download_and_extract_file(annotation_file_url, annotation_file_path)
try:
# import the challenge after everything is finished
challenge_module = importlib.import_module(
CHALLENGE_IMPORT_STRING.format(challenge_id=challenge.get("id"))
)
EVALUATION_SCRIPTS[challenge.get("id")] = challenge_module
except Exception:
logger.exception(
"Exception raised while creating Python module for challenge_id: %s"
% (challenge.get("id"))
)
raise
def process_submission_callback(body):
try:
logger.info("[x] Received submission message %s" % body)
process_submission_message(body)
except Exception as e:
logger.exception(
"Exception while processing message from submission queue with error {}".format(
e
)
)
def process_submission_message(message):
"""
Extracts the submission related metadata from the message
and send the submission object for evaluation
"""
challenge_pk = int(message.get("challenge_pk"))
phase_pk = message.get("phase_pk")
submission_pk = message.get("submission_pk")
submission_instance = extract_submission_data(submission_pk)
# so that the further execution does not happen
if not submission_instance:
return
challenge = get_challenge_by_queue_name()
remote_evaluation = challenge.get("remote_evaluation")
challenge_phase = get_challenge_phase_by_pk(challenge_pk, phase_pk)
if not challenge_phase:
logger.exception(
"Challenge Phase {} does not exist for queue {}".format(
phase_pk, QUEUE_NAME
)
)
raise
user_annotation_file_path = join(
SUBMISSION_DATA_DIR.format(submission_id=submission_pk),
os.path.basename(submission_instance.get("input_file")),
)
run_submission(
challenge_pk,
challenge_phase,
submission_instance,
user_annotation_file_path,
remote_evaluation,
)
def extract_submission_data(submission_pk):
"""
* Expects submission id and extracts input file for it.
"""
submission = get_submission_by_pk(submission_pk)
if not submission:
logger.critical("Submission {} does not exist".format(submission_pk))
traceback.print_exc()
# return from here so that the message can be acked
# This also indicates that we don't want to take action
# for message corresponding to which submission entry
# does not exist
return
submission_input_file = submission.get("input_file")
submission_data_directory = SUBMISSION_DATA_DIR.format(
submission_id=submission.get("id")
)
submission_input_file_name = os.path.basename(submission_input_file)
submission_input_file_path = SUBMISSION_INPUT_FILE_PATH.format(
submission_id=submission.get("id"),
input_file=submission_input_file_name,
)
create_dir_as_python_package(submission_data_directory)
download_and_extract_file(
submission_input_file, submission_input_file_path
)
return submission
def get_request_headers():
headers = {"Authorization": "Token {}".format(AUTH_TOKEN)}
return headers
def make_request(url, method, data=None):
headers = get_request_headers()
if method == "GET":
try:
response = requests.get(url=url, headers=headers, verify=False)
response.raise_for_status()
except requests.exceptions.RequestException:
logger.info(
"The worker is not able to establish connection with EvalAI"
)
raise
return response.json()
elif method == "PUT":
try:
response = requests.put(url=url, headers=headers, data=data)
response.raise_for_status()
except requests.exceptions.RequestException:
logger.exception(
"The worker is not able to establish connection with EvalAI due to {}"
% (response.json())
)
raise
except requests.exceptions.HTTPError:
logger.exception(
f"The request to URL {url} is failed due to {response.json()}"
)
raise
return response.json()
elif method == "PATCH":
try:
response = requests.patch(url=url, headers=headers, data=data)
response.raise_for_status()
except requests.exceptions.RequestException:
logger.info(
"The worker is not able to establish connection with EvalAI"
)
raise
except requests.exceptions.HTTPError:
logger.info(
f"The request to URL {url} is failed due to {response.json()}"
)
raise
return response.json()
elif method == "POST":
try:
response = requests.post(url=url, headers=headers, data=data)
response.raise_for_status()
except requests.exceptions.RequestException:
logger.info(
"The worker is not able to establish connection with EvalAI"
)
raise
except requests.exceptions.HTTPError:
logger.info(
f"The request to URL {url} is failed due to {response.json()}"
)
raise
return response.json()
def get_message_from_sqs_queue():
url = URLS.get("get_message_from_sqs_queue").format(QUEUE_NAME)
url = return_url_per_environment(url)
response = make_request(url, "GET")
return response
def delete_message_from_sqs_queue(receipt_handle):
url = URLS.get("delete_message_from_sqs_queue").format(QUEUE_NAME)
url = return_url_per_environment(url)
response = make_request(
url, "POST", data={"receipt_handle": receipt_handle}
) # noqa
return response
def get_submission_by_pk(submission_pk):
url = URLS.get("get_submission_by_pk").format(submission_pk)
url = return_url_per_environment(url)
response = make_request(url, "GET")
return response
def get_challenge_phases_by_challenge_pk(challenge_pk):
url = URLS.get("get_challenge_phases_by_challenge_pk").format(challenge_pk)
url = return_url_per_environment(url)
response = make_request(url, "GET")
return response
def get_challenge_by_queue_name():
url = URLS.get("get_challenge_by_queue_name").format(QUEUE_NAME)
url = return_url_per_environment(url)
response = make_request(url, "GET")
return response
def get_challenge_phase_by_pk(challenge_pk, challenge_phase_pk):
url = URLS.get("get_challenge_phase_by_pk").format(
challenge_pk, challenge_phase_pk
)
url = return_url_per_environment(url)
response = make_request(url, "GET")
return response
def update_submission_data(data, challenge_pk, submission_pk):
url = URLS.get("update_submission_data").format(challenge_pk)
url = return_url_per_environment(url)
response = make_request(url, "PUT", data=data)
return response
def update_submission_status(data, challenge_pk):
url = "/api/jobs/challenge/{}/update_submission/".format(challenge_pk)
url = return_url_per_environment(url)
response = make_request(url, "PATCH", data=data)
return response
def read_file_content(file_path):
with open(file_path, "r") as obj:
file_content = obj.read()
if not file_content:
file_content = " "
return file_content
def run_submission(
challenge_pk,
challenge_phase,
submission,
user_annotation_file_path,
remote_evaluation,
):
"""
* Checks whether the corresponding evaluation script and the annotation file for the challenge exists or not
* Calls evaluation script to evaluate the particular submission
Arguments:
challenge_pk -- challenge Id in which the submission is created
challenge_phase -- challenge phase JSON object in which the submission is created
submission -- JSON object for the submisson
user_annotation_file_path -- File submitted by user as a submission
"""
# Send the submission data to the evaluation script
# so that challenge hosts can use data for webhooks or any other service.
submission_output = None
phase_pk = challenge_phase.get("id")
submission_pk = submission.get("id")
annotation_file_name = PHASE_ANNOTATION_FILE_NAME_MAP[challenge_pk][
phase_pk
]
annotation_file_path = PHASE_ANNOTATION_FILE_PATH.format(
challenge_id=challenge_pk,
phase_id=phase_pk,
annotation_file=annotation_file_name,
)
submission_data_dir = SUBMISSION_DATA_DIR.format(
submission_id=submission.get("id")
)
submission_data = {
"submission_status": "running",
"submission": submission_pk,
}
update_submission_status(submission_data, challenge_pk)
status = "running"
# create a temporary run directory under submission directory, so that
# main directory does not gets polluted
temp_run_dir = join(submission_data_dir, "run")
create_dir(temp_run_dir)
stdout_file = join(temp_run_dir, "temp_stdout.txt")
stderr_file = join(temp_run_dir, "temp_stderr.txt")
stdout = open(stdout_file, "a+")
stderr = open(stderr_file, "a+")
try:
logger.info(
"Sending submission {} for evaluation".format(submission_pk)
)
with stdout_redirect(stdout), stderr_redirect(stderr):
submission_output = EVALUATION_SCRIPTS[challenge_pk].evaluate(
annotation_file_path,
user_annotation_file_path,
challenge_phase.get("codename"),
submission_metadata=submission,
)
if remote_evaluation:
return
except Exception:
status = "failed"
stderr.write(traceback.format_exc())
stdout.close()
stderr.close()
stdout_content = read_file_content(stdout_file)
stderr_content = read_file_content(stderr_file)
submission_data = {
"challenge_phase": phase_pk,
"submission": submission_pk,
"submission_status": status,
"stdout": stdout_content,
"stderr": stderr_content,
}
update_submission_data(submission_data, challenge_pk, submission_pk)
shutil.rmtree(temp_run_dir)
return
stdout.close()
stderr.close()
stdout_content = read_file_content(stdout_file)
stderr_content = read_file_content(stderr_file)
submission_data = {
"challenge_phase": phase_pk,
"submission": submission_pk,
"submission_status": status,
"stdout": stdout_content,
"stderr": stderr_content,
}
if "result" in submission_output:
status = "finished"
submission_data["result"] = json.dumps(submission_output.get("result"))
submission_data["metadata"] = json.dumps(
submission_output.get("submission_metadata")
)
submission_data["submission_status"] = status
else:
status = "failed"
submission_data["submission_status"] = status
update_submission_data(submission_data, challenge_pk, submission_pk)
shutil.rmtree(temp_run_dir)
return
def main():
killer = GracefulKiller()
logger.info(
"Using {0} as temp directory to store data".format(BASE_TEMP_DIR)
)
create_dir_as_python_package(COMPUTE_DIRECTORY_PATH)
sys.path.append(COMPUTE_DIRECTORY_PATH)
# create submission base data directory
create_dir_as_python_package(SUBMISSION_DATA_BASE_DIR)
load_challenge()
while True:
logger.info(
"Fetching new messages from the queue {}".format(QUEUE_NAME)
)
message = get_message_from_sqs_queue()
message_body = message.get("body")
if message_body:
submission_pk = message_body.get("submission_pk")
submission = get_submission_by_pk(submission_pk)
if submission:
if submission.get("status") == "finished":
message_receipt_handle = message.get("receipt_handle")
delete_message_from_sqs_queue(message_receipt_handle)
elif submission.get("status") == "running":
continue
else:
message_receipt_handle = message.get("receipt_handle")
logger.info(
"Processing message body: {}".format(message_body)
)
process_submission_callback(message_body)
# Let the queue know that the message is processed
delete_message_from_sqs_queue(message_receipt_handle)
time.sleep(5)
if killer.kill_now:
break
if __name__ == "__main__":
main()
logger.info("Quitting Submission Worker.")
| []
| []
| [
"QUEUE_NAME",
"DJANGO_SERVER_PORT",
"DJANGO_SERVER",
"AUTH_TOKEN"
]
| [] | ["QUEUE_NAME", "DJANGO_SERVER_PORT", "DJANGO_SERVER", "AUTH_TOKEN"] | python | 4 | 0 | |
stream_alert/alert_merger/main.py | """
Copyright 2017-present, Airbnb Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from datetime import datetime
import json
import os
from stream_alert.alert_merger import LOGGER
from stream_alert.shared.alert import Alert, AlertCreationError
from stream_alert.shared.alert_table import AlertTable
from stream_alert.shared.metrics import ALERT_MERGER_NAME, MetricLogger
import boto3
class AlertMergeGroup(object):
"""A list of alerts within a single merge window which match on their merge keys."""
# In order to limit the size of a merged alert, cap the maximum number that can be merged.
MAX_ALERTS_PER_GROUP = 50
def __init__(self, alert):
"""Initialize the group with the oldest alert remaining."""
self.alerts = [alert]
def add(self, alert):
"""Try adding an Alert to this merge group.
Returns:
True if the alert matches this group and was added, False otherwise.
"""
if len(self.alerts) >= self.MAX_ALERTS_PER_GROUP:
return False
if alert.can_merge(self.alerts[0]):
self.alerts.append(alert)
return True
return False
class AlertMerger(object):
"""Dispatch alerts to the alert processor."""
ALERT_MERGER = None # AlertMerger instance which can be re-used across Lambda invocations
# Async invocations of Lambda functions are capped at 128KB.
# Set the max payload size to slightly under that to account for the rest of the message.
MAX_LAMBDA_PAYLOAD_SIZE = 126000
@classmethod
def get_instance(cls):
"""Get an instance of the AlertMerger, using a cached version if possible."""
if not cls.ALERT_MERGER:
cls.ALERT_MERGER = AlertMerger()
return cls.ALERT_MERGER
def __init__(self):
self.table = AlertTable(os.environ['ALERTS_TABLE'])
self.alert_proc = os.environ['ALERT_PROCESSOR']
self.alert_proc_timeout = int(os.environ['ALERT_PROCESSOR_TIMEOUT_SEC'])
self.lambda_client = boto3.client('lambda')
def _get_alerts(self, rule_name):
"""Build a list of Alert instances triggered from the given rule name."""
alerts = []
for record in self.table.get_alert_records(rule_name, self.alert_proc_timeout):
try:
alerts.append(Alert.create_from_dynamo_record(record))
except AlertCreationError:
LOGGER.exception('Invalid alert record %s', record)
return alerts
@staticmethod
def _merge_groups(alerts):
"""Gather alerts into groupings which can be merged together and sent now.
Args:
alerts (list): List of Alert instances with defined merge configuration.
Returns:
list<AlertMergeGroup>: Each returned merge group has the following properties:
(1) The oldest alert is older than its merge window (i.e. should be sent now), AND
(2) All alerts in the merge group fit within a single merge window, AND
(3) All alerts in the merge group have the same values for all of their merge keys.
Alerts which are too recent to fit in any merge group are excluded from the results.
"""
merge_groups = []
for alert in sorted(alerts):
# Iterate over alerts (in order of creation) and try to add them to each merge group.
if not any(group.add(alert) for group in merge_groups):
# The alert doesn't fit in any merge group - try creating a new one.
if datetime.utcnow() < alert.created + alert.merge_window:
# This alert is too recent - no other alerts can be merged. Stop here.
break
merge_groups.append(AlertMergeGroup(alert))
return merge_groups
def _dispatch_alert(self, alert):
"""Dispatch a single alert to the alert processor."""
alert.attempts += 1
LOGGER.info('Dispatching %s to %s (attempt %d)', alert, self.alert_proc, alert.attempts)
MetricLogger.log_metric(ALERT_MERGER_NAME, MetricLogger.ALERT_ATTEMPTS, alert.attempts)
record_payload = json.dumps(
alert.dynamo_record(), cls=Alert.AlertEncoder, separators=(',', ':'))
if len(record_payload) <= self.MAX_LAMBDA_PAYLOAD_SIZE:
# The entire alert fits in the Lambda payload - send it all
payload = record_payload
else:
# The alert is too big - the alert processor will have to pull it from Dynamo
payload = json.dumps(alert.dynamo_key)
self.lambda_client.invoke(
FunctionName=self.alert_proc,
InvocationType='Event',
Payload=payload,
Qualifier='production'
)
alert.dispatched = datetime.utcnow()
self.table.mark_as_dispatched(alert)
def dispatch(self):
"""Find and dispatch all pending alerts to the alert processor."""
# To reduce the API calls to Dynamo, batch all additions and deletions until the end.
merged_alerts = [] # List of newly created merge alerts
alerts_to_delete = [] # List of alerts which can be deleted
# TODO: Find a way to avoid a full table scan just to get rule names
for rule_name in self.table.rule_names():
alerts = self._get_alerts(rule_name)
if not alerts:
continue
merge_enabled_alerts = []
for alert in alerts:
if alert.remaining_outputs:
# If an alert still has pending outputs, it needs to be sent immediately.
# For example, all alerts are sent to the default firehose now even if they will
# later be merged when sending to other outputs.
self._dispatch_alert(alert)
elif alert.merge_enabled:
# This alert has finished sending to non-merged outputs; it is now a candidate
# for alert merging.
merge_enabled_alerts.append(alert)
else:
# This alert has sent successfully but doesn't need to be merged.
# It should have been deleted by the alert processor, but we can do it now.
alerts_to_delete.append(alert)
for group in self._merge_groups(merge_enabled_alerts):
# Create a new merged Alert.
new_alert = Alert.merge(group.alerts)
LOGGER.info('Merged %d alerts into a new alert with ID %s',
len(group.alerts), new_alert.alert_id)
merged_alerts.append(new_alert)
# Since we already guaranteed that the original alerts have sent to the unmerged
# outputs (e.g. default firehose), they can be safely marked for deletion.
alerts_to_delete.extend(group.alerts)
if merged_alerts:
# Add new merged alerts to the alerts table and send them to the alert processor.
self.table.add_alerts(merged_alerts)
for alert in merged_alerts:
self._dispatch_alert(alert)
if alerts_to_delete:
self.table.delete_alerts([
(alert.rule_name, alert.alert_id) for alert in alerts_to_delete
])
def handler(event, context): # pylint: disable=unused-argument
"""Entry point for the alert merger."""
AlertMerger.get_instance().dispatch()
| []
| []
| [
"ALERTS_TABLE",
"ALERT_PROCESSOR_TIMEOUT_SEC",
"ALERT_PROCESSOR"
]
| [] | ["ALERTS_TABLE", "ALERT_PROCESSOR_TIMEOUT_SEC", "ALERT_PROCESSOR"] | python | 3 | 0 | |
sos/plugins/morpheus_rabbitmq.py | from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
import os
import yaml
from sos import utilities
class MorpheusRabbitMQ(Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin):
"""Morpheus Embedded RabbitMQ
"""
short_desc = 'Morpheus RabbitMQ Service'
plugin_name = 'morpheus_rabbitmq'
profiles = ('services',)
rmq_embedded = True
rmq_config_file = ""
morpheus_application_yml = "/opt/morpheus/conf/application.yml"
files = (morpheus_application_yml,)
def check_rmq_embedded(self):
rmq_status_local = self.get_command_output("morpheus-ctl status rabbitmq")
if not rmq_status_local['output']:
self.rmq_embedded = False
def get_remote_hostnames_ports(self):
if os.path.isfile(self.morpheus_application_yml):
with open(self.morpheus_application_yml) as appyml:
appyml_data = yaml.load(appyml, Loader=yaml.Loader)
rmq_details = []
rmq_config = appyml_data['environments']['production']['rabbitmq']['connectionFactories']
for factory in rmq_config:
rmq_details.append(factory)
return rmq_details
def setup(self):
self.check_rmq_embedded()
if self.rmq_embedded:
original_env = os.environ
envopts = {
# 'HOME': '/opt/morpheus/embedded/rabbitmq',
'PREPATH': '/opt/morpheus/bin:/opt/morpheus/embedded/bin:/opt/morpheus/embedded/sbin',
'ERL_EPMD_ADDRESS': '127.0.0.1',
'RABBITMQ_CONF_ENV_FILE': '/opt/morpheus/embedded/rabbitmq/etc/rabbitmq-env.conf',
'RABBITMQ_SYS_PREFIX': '/opt/morpheus/embedded/rabbitmq',
'RABBITMQ_CONFIG_FILE': '/opt/morpheus/embedded/rabbitmq/etc/rabbit',
'RABBITMQ_MNESIA_BASE': '/var/opt/morpheus/rabbitmq/db',
'RABBITMQ_SCHEMA_DIR': '/opt/morpheus/embedded/rabbitmq/priv/schema',
'RABBITMQ_ENABLED_PLUGINS_FILE': '/opt/morpheus/embedded/rabbitmq/etc/enabled_plugins',
'RABBITMQ_LOG_BASE': '/var/log/morpheus/rabbitmq',
'RABBITMQ_NODE_IP_ADDRESS': '127.0.0.1',
'RABBITMQ_NODE_PORT': '5672',
'RABBITMQ_PID_FILE': '/var/run/morpheus/rabbitmq/[email protected]',
'RABBITMQ_NODENAME': 'rabbit@localhost'
}
envopts['PATH'] = envopts['PREPATH'] + ":" + os.environ['PATH']
os.environ.update(envopts)
#envopts = {'HOME': '/opt/morpheus/embedded/rabbitmq'}
#self.add_cmd_output("rabbitmqctl report")
out = utilities.sos_get_command_output("rabbitmqctl report")
self.add_string_as_file(out['output'], "rabbitmqctl_report_out")
os.environ = original_env
self.add_copy_spec([
"/opt/morpheus/embedded/rabbitmq/etc/*",
"/etc/security/limits.d/",
"/etc/systemd/"
])
self.add_copy_spec([
"/var/log/morpheus/rabbitmq/*",
])
# else:
# # sockettest = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# rmq_endpoints = self.get_remote_hostnames_ports()
# localhosts = ['127.0.0.1', 'localhost']
# if any(s in rmq_endpoints[0]['hostname'] for s in localhosts):
# out = utilities.sos_get_command_output("rabbitmqctl report")
# if out['status'] is not 0:
# for line in out['output']:
# if "mismatch" in line:
# quote_split = line.split('"')
# real_rabbitmq = quote_split[1]
# self.add_string_as_file(real_rabbitmq, "rabbit_nodename")
# out = utilities.sos_get_command_output("rabbitmqctl report",
# env={'RABBITMQ_NODENAME': real_rabbitmq})
# self.add_string_as_file(out['output'], "rabbitmqctl_report_out")
| []
| []
| [
"PATH"
]
| [] | ["PATH"] | python | 1 | 0 | |
mpi_test/test_pyss.py | import scipy.io
import numpy
from pyss.util.contour import Circle, Ellipse
A = scipy.io.mmread("matrix/cage4.mtx")
B = scipy.sparse.eye(9)
contour = Ellipse(real=200, imag=0.3, shift=900)
def create_source(x, y):
return numpy.eye(x, y)
option = {'l': 2, 'm': 1, 'n': 12, 'source': create_source}
| []
| []
| []
| [] | [] | python | null | null | null |
apiserver.go | package main
/*
Copyright 2017-2018 Crunchy Data Solutions, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import (
"crypto/tls"
"crypto/x509"
"io/ioutil"
"net/http"
"os"
"strconv"
log "github.com/Sirupsen/logrus"
"github.com/crunchydata/postgres-operator/apiserver"
"github.com/crunchydata/postgres-operator/apiserver/backrestservice"
"github.com/crunchydata/postgres-operator/apiserver/backupservice"
"github.com/crunchydata/postgres-operator/apiserver/clusterservice"
"github.com/crunchydata/postgres-operator/apiserver/configservice"
"github.com/crunchydata/postgres-operator/apiserver/dfservice"
"github.com/crunchydata/postgres-operator/apiserver/failoverservice"
"github.com/crunchydata/postgres-operator/apiserver/ingestservice"
"github.com/crunchydata/postgres-operator/apiserver/labelservice"
"github.com/crunchydata/postgres-operator/apiserver/loadservice"
"github.com/crunchydata/postgres-operator/apiserver/pgbouncerservice"
"github.com/crunchydata/postgres-operator/apiserver/pgpoolservice"
"github.com/crunchydata/postgres-operator/apiserver/policyservice"
"github.com/crunchydata/postgres-operator/apiserver/pvcservice"
"github.com/crunchydata/postgres-operator/apiserver/reloadservice"
"github.com/crunchydata/postgres-operator/apiserver/scheduleservice"
"github.com/crunchydata/postgres-operator/apiserver/statusservice"
"github.com/crunchydata/postgres-operator/apiserver/upgradeservice"
"github.com/crunchydata/postgres-operator/apiserver/userservice"
"github.com/crunchydata/postgres-operator/apiserver/versionservice"
"github.com/gorilla/mux"
)
const serverCert = "/config/server.crt"
const serverKey = "/config/server.key"
func main() {
PORT := "8443"
tmp := os.Getenv("PORT")
if tmp != "" {
PORT = tmp
}
debugFlag := os.Getenv("CRUNCHY_DEBUG")
if debugFlag == "true" {
log.SetLevel(log.DebugLevel)
log.Debug("debug flag set to true")
} else {
log.Info("debug flag set to false")
}
tmp = os.Getenv("TLS_NO_VERIFY")
if tmp == "true" {
log.Debug("TLS_NO_VERIFY set to true")
} else {
tmp = "false"
log.Debug("TLS_NO_VERIFY set to false")
}
tlsNoVerify, _ := strconv.ParseBool(tmp)
log.Infoln("postgres-operator apiserver starts")
apiserver.Initialize()
r := mux.NewRouter()
r.HandleFunc("/version", versionservice.VersionHandler)
r.HandleFunc("/policies", policyservice.CreatePolicyHandler)
r.HandleFunc("/policies/{name}", policyservice.ShowPolicyHandler).Methods("GET")
//here
r.HandleFunc("/policiesdelete/{name}", policyservice.DeletePolicyHandler).Methods("GET")
r.HandleFunc("/pvc/{pvcname}", pvcservice.ShowPVCHandler).Methods("GET")
r.HandleFunc("/policies/apply", policyservice.ApplyPolicyHandler).Methods("POST")
r.HandleFunc("/ingest", ingestservice.CreateIngestHandler).Methods("POST")
r.HandleFunc("/ingest/{name}", ingestservice.ShowIngestHandler).Methods("GET")
//here
r.HandleFunc("/ingestdelete/{name}", ingestservice.DeleteIngestHandler).Methods("GET")
r.HandleFunc("/label", labelservice.LabelHandler).Methods("POST")
r.HandleFunc("/load", loadservice.LoadHandler).Methods("POST")
r.HandleFunc("/user", userservice.UserHandler).Methods("POST")
r.HandleFunc("/users", userservice.CreateUserHandler).Methods("POST")
r.HandleFunc("/users/{name}", userservice.ShowUserHandler).Methods("GET")
//here
r.HandleFunc("/usersdelete/{name}", userservice.DeleteUserHandler).Methods("GET")
r.HandleFunc("/upgrades", upgradeservice.CreateUpgradeHandler).Methods("POST")
r.HandleFunc("/upgrades/{name}", upgradeservice.ShowUpgradeHandler).Methods("GET")
//here
r.HandleFunc("/upgradesdelete/{name}", upgradeservice.DeleteUpgradeHandler).Methods("GET")
r.HandleFunc("/clusters", clusterservice.CreateClusterHandler).Methods("POST")
r.HandleFunc("/clusters/{name}", clusterservice.ShowClusterHandler).Methods("GET")
//here
r.HandleFunc("/clustersdelete/{name}", clusterservice.DeleteClusterHandler).Methods("GET")
r.HandleFunc("/clusters/test/{name}", clusterservice.TestClusterHandler)
r.HandleFunc("/clusters/scale/{name}", clusterservice.ScaleClusterHandler)
r.HandleFunc("/scale/{name}", clusterservice.ScaleQueryHandler).Methods("GET")
r.HandleFunc("/scaledown/{name}", clusterservice.ScaleDownHandler).Methods("GET")
r.HandleFunc("/status", statusservice.StatusHandler)
r.HandleFunc("/df/{name}", dfservice.DfHandler)
r.HandleFunc("/config", configservice.ShowConfigHandler)
r.HandleFunc("/backups/{name}", backupservice.ShowBackupHandler).Methods("GET")
//here
r.HandleFunc("/backupsdelete/{name}", backupservice.DeleteBackupHandler).Methods("GET")
r.HandleFunc("/backups", backupservice.CreateBackupHandler).Methods("POST")
r.HandleFunc("/backrestbackup", backrestservice.CreateBackupHandler).Methods("POST")
r.HandleFunc("/backrest/{name}", backrestservice.ShowBackrestHandler).Methods("GET")
r.HandleFunc("/restore", backrestservice.RestoreHandler).Methods("POST")
r.HandleFunc("/reload", reloadservice.ReloadHandler).Methods("POST")
r.HandleFunc("/failover", failoverservice.CreateFailoverHandler).Methods("POST")
r.HandleFunc("/failover/{name}", failoverservice.QueryFailoverHandler).Methods("GET")
r.HandleFunc("/pgbouncer", pgbouncerservice.CreatePgbouncerHandler).Methods("POST")
r.HandleFunc("/pgbouncer", pgbouncerservice.DeletePgbouncerHandler).Methods("DELETE")
r.HandleFunc("/pgbouncerdelete", pgbouncerservice.DeletePgbouncerHandler).Methods("POST")
r.HandleFunc("/pgpool", pgpoolservice.CreatePgpoolHandler).Methods("POST")
r.HandleFunc("/pgpooldelete", pgpoolservice.DeletePgpoolHandler).Methods("POST")
//schedule
r.HandleFunc("/schedule", scheduleservice.CreateScheduleHandler).Methods("POST")
r.HandleFunc("/scheduledelete", scheduleservice.DeleteScheduleHandler).Methods("POST")
r.HandleFunc("/scheduleshow", scheduleservice.ShowScheduleHandler).Methods("POST")
caCert, err := ioutil.ReadFile(serverCert)
if err != nil {
log.Fatal(err)
log.Error("could not read " + serverCert)
os.Exit(2)
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
cfg := &tls.Config{
//ClientAuth: tls.RequireAndVerifyClientCert,
//specify pgo-apiserver in the CN....then, add ServerName: "pgo-apiserver",
ServerName: "pgo-apiserver",
InsecureSkipVerify: tlsNoVerify,
ClientCAs: caCertPool,
}
log.Info("listening on port " + PORT)
srv := &http.Server{
Addr: ":" + PORT,
Handler: r,
TLSConfig: cfg,
}
_, err = ioutil.ReadFile(serverKey)
if err != nil {
log.Fatal(err)
log.Error("could not read " + serverKey)
os.Exit(2)
}
log.Fatal(srv.ListenAndServeTLS(serverCert, serverKey))
}
| [
"\"PORT\"",
"\"CRUNCHY_DEBUG\"",
"\"TLS_NO_VERIFY\""
]
| []
| [
"PORT",
"CRUNCHY_DEBUG",
"TLS_NO_VERIFY"
]
| [] | ["PORT", "CRUNCHY_DEBUG", "TLS_NO_VERIFY"] | go | 3 | 0 | |
src/test/java/com/shopify/ShopifyEndpointTest.java | package com.shopify;
import com.shopify.model.ShopifyPage;
import com.shopify.model.structs.ShopifyDeprecatedApiCall;
import com.shopify.model.structs.ShopifyProduct;
import org.glassfish.jersey.logging.LoggingFeature;
import org.junit.Before;
import org.junit.Test;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
public class ShopifyEndpointTest {
private static final String SHOP_SUBDOMAIN = System.getenv("SHOP_SUBDOMAIN");
private static final String ACCESS_TOKEN = System.getenv("ACCESS_TOKEN");
private ShopifySdk shopifySdk;
@Before
public void setUp() {
shopifySdk = ShopifySdk.newBuilder()
.withSubdomain(SHOP_SUBDOMAIN)
.withAccessToken(ACCESS_TOKEN)
.withMaximumRequestRetryTimeout(5, TimeUnit.SECONDS)
.withMaximumRequestRetryRandomDelay(5, TimeUnit.SECONDS)
.withApiVersion("2021-04")
.build();
}
@Test
public void testShopifySdk() {
System.out.println(shopifySdk);
}
@Test
public void getShop() {
System.out.println(shopifySdk.getShop());
}
@Test
public void getProducts() {
final ShopifyPage<ShopifyProduct> shopifyProducts = shopifySdk.getProducts(null, 1);
System.out.println(shopifyProducts);
}
@Test
public void getProductCount() {
final int count = shopifySdk.getProductCount();
System.out.println(count);
}
@Test
public void getOrderCount() {
final int count = shopifySdk.getOrderCount();
System.out.println(count);
}
@Test
public void getDeprecatedApiCalls() {
final List<ShopifyDeprecatedApiCall> shopifyDeprecatedApiCalls = shopifySdk.getDeprecatedApiCalls();
System.out.println(shopifyDeprecatedApiCalls);
}
@Test
public void enableLogging() {
shopifySdk.getWebTarget().register(new LoggingFeature(java.util.logging.Logger.getLogger(ShopifySdk.class.getName()), Level.OFF, LoggingFeature.Verbosity.PAYLOAD_TEXT, 819200000));
System.out.println(shopifySdk.getShop());
}
}
| [
"\"SHOP_SUBDOMAIN\"",
"\"ACCESS_TOKEN\""
]
| []
| [
"ACCESS_TOKEN",
"SHOP_SUBDOMAIN"
]
| [] | ["ACCESS_TOKEN", "SHOP_SUBDOMAIN"] | java | 2 | 0 | |
contrib/pkg/testresource/command.go | package testresource
import (
"fmt"
"io/ioutil"
"os"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/types"
"github.com/openshift/hive/pkg/resource"
)
// NewTestResourceCommand returns a command to test resource functions
func NewTestResourceCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "resource SUB-COMMAND",
Short: "Utility to test resource commands (apply/patch)",
Long: "Contains utility commands to test the resource patch and apply functions",
Run: func(cmd *cobra.Command, args []string) {
cmd.Usage()
},
}
cmd.AddCommand(newApplyCommand())
cmd.AddCommand(newPatchCommand())
return cmd
}
func mustRead(file string) []byte {
content, err := ioutil.ReadFile(file)
if err != nil {
log.Fatalf("erorr reading file %s: %v", file, err)
}
return content
}
func newApplyCommand() *cobra.Command {
kubeconfigPath := ""
cmd := &cobra.Command{
Use: "apply RESOURCEFILE",
Short: "apply the given resource to the cluster",
Long: "Tests the Hive resource.Apply function",
Run: func(cmd *cobra.Command, args []string) {
if len(args) != 1 {
fmt.Printf("Please specify a file with a resource to apply\n")
cmd.Usage()
return
}
if len(kubeconfigPath) == 0 {
fmt.Printf("Please specify a Kubeconfig to use\n")
cmd.Usage()
return
}
content := mustRead(args[0])
kubeconfig := mustRead(kubeconfigPath)
helper := resource.NewHelper(kubeconfig, log.WithField("cmd", "apply"))
info, err := helper.Info(content)
if err != nil {
fmt.Printf("Error obtaining info: %v\n", err)
return
}
name := types.NamespacedName{Namespace: info.Namespace, Name: info.Name}
fmt.Printf("The resource is %s (Kind: %s, APIVersion: %s)", name.String(), info.Kind, info.APIVersion)
applyResult, err := helper.Apply(content)
if err != nil {
fmt.Printf("Error applying: %v\n", err)
return
}
fmt.Printf("The resource was applied successfully: %s\n", applyResult)
},
}
cmd.Flags().StringVarP(&kubeconfigPath, "kubeconfig", "k", os.Getenv("KUBECONFIG"), "Kubeconfig file to connect to target server")
return cmd
}
func newPatchCommand() *cobra.Command {
var (
kubeconfigPath,
patchTypeStr,
namespace,
name,
kind,
apiVersion string
patchTypes = map[string]types.PatchType{"json": types.JSONPatchType, "merge": types.MergePatchType, "strategic": types.StrategicMergePatchType}
)
cmd := &cobra.Command{
Use: "patch PATCHFILE",
Short: "apply the given patch to the cluster",
Long: "Tests the Hive resource.Patch function",
Run: func(cmd *cobra.Command, args []string) {
if len(args) != 1 {
fmt.Printf("Please specify a file with a patch to apply\n")
cmd.Usage()
return
}
if len(kubeconfigPath) == 0 {
fmt.Printf("Please specify a Kubeconfig to use\n")
cmd.Usage()
return
}
_, ok := patchTypes[patchTypeStr]
if !ok {
fmt.Printf("Invalid patch type %s\n", patchTypeStr)
cmd.Usage()
return
}
if len(name) == 0 {
fmt.Printf("name is required\n")
cmd.Usage()
return
}
if len(kind) == 0 {
fmt.Printf("kind is required\n")
cmd.Usage()
return
}
if len(apiVersion) == 0 {
fmt.Printf("apiVersion is required\n")
cmd.Usage()
return
}
content := mustRead(args[0])
kubeconfig := mustRead(kubeconfigPath)
helper := resource.NewHelper(kubeconfig, log.WithField("cmd", "patch"))
err := helper.Patch(types.NamespacedName{Name: name, Namespace: namespace}, kind, apiVersion, content, patchTypeStr)
if err != nil {
fmt.Printf("Error: %v\n", err)
return
}
fmt.Printf("The patch was applied successfully.\n")
},
}
cmd.Flags().StringVarP(&kubeconfigPath, "kubeconfig", "k", os.Getenv("KUBECONFIG"), "Kubeconfig file to connect to target server")
cmd.Flags().StringVar(&patchTypeStr, "type", "strategic", "Type of patch to apply. Available types are: json, merge, strategic")
cmd.Flags().StringVar(&namespace, "namespace", "", "Namespace of resource to patch")
cmd.Flags().StringVar(&name, "name", "", "Name of the resource to patch")
cmd.Flags().StringVar(&kind, "kind", "", "Kind of the resource to patch")
cmd.Flags().StringVar(&apiVersion, "api-version", "", "API version of the resource to patch (ie. hive.openshift.io/v1)")
return cmd
}
| [
"\"KUBECONFIG\"",
"\"KUBECONFIG\""
]
| []
| [
"KUBECONFIG"
]
| [] | ["KUBECONFIG"] | go | 1 | 0 | |
djweb/djweb/wsgi.py | """
WSGI config for djweb project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djweb.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
vendor/github.com/ethereum/go-ethereum/cmd/faucet/faucet.go | // Copyright 2017 The go-ethereum Authors
// This file is part of go-ethereum.
//
// go-ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// go-ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
// faucet is a Ether faucet backed by a light client.
package main
//go:generate go-bindata -nometadata -o website.go faucet.html
//go:generate gofmt -w -s website.go
import (
"bytes"
"context"
"encoding/json"
"errors"
"flag"
"fmt"
"html/template"
"io/ioutil"
"math"
"math/big"
"net/http"
"net/url"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/ethstats"
"github.com/ethereum/go-ethereum/les"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discv5"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/nat"
"github.com/ethereum/go-ethereum/params"
"golang.org/x/net/websocket"
)
var (
genesisFlag = flag.String("genesis", "", "Genesis json file to seed the chain with")
apiPortFlag = flag.Int("apiport", 8080, "Listener port for the HTTP API connection")
ethPortFlag = flag.Int("ethport", 30303, "Listener port for the devp2p connection")
bootFlag = flag.String("bootnodes", "", "Comma separated bootnode enode URLs to seed with")
netFlag = flag.Uint64("network", 0, "Network ID to use for the Ethereum protocol")
statsFlag = flag.String("ethstats", "", "Ethstats network monitoring auth string")
netnameFlag = flag.String("faucet.name", "", "Network name to assign to the faucet")
payoutFlag = flag.Int("faucet.amount", 1, "Number of Ethers to pay out per user request")
minutesFlag = flag.Int("faucet.minutes", 1440, "Number of minutes to wait between funding rounds")
tiersFlag = flag.Int("faucet.tiers", 3, "Number of funding tiers to enable (x3 time, x2.5 funds)")
accJSONFlag = flag.String("account.json", "", "Key json file to fund user requests with")
accPassFlag = flag.String("account.pass", "", "Decryption password to access faucet funds")
captchaToken = flag.String("captcha.token", "", "Recaptcha site key to authenticate client side")
captchaSecret = flag.String("captcha.secret", "", "Recaptcha secret key to authenticate server side")
noauthFlag = flag.Bool("noauth", false, "Enables funding requests without authentication")
logFlag = flag.Int("loglevel", 3, "Log level to use for Ethereum and the faucet")
)
var (
ether = new(big.Int).Exp(big.NewInt(10), big.NewInt(18), nil)
)
var (
gitCommit = "" // Git SHA1 commit hash of the release (set via linker flags)
gitDate = "" // Git commit date YYYYMMDD of the release (set via linker flags)
)
func main() {
// Parse the flags and set up the logger to print everything requested
flag.Parse()
log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*logFlag), log.StreamHandler(os.Stderr, log.TerminalFormat(true))))
// Construct the payout tiers
amounts := make([]string, *tiersFlag)
periods := make([]string, *tiersFlag)
for i := 0; i < *tiersFlag; i++ {
// Calculate the amount for the next tier and format it
amount := float64(*payoutFlag) * math.Pow(2.5, float64(i))
amounts[i] = fmt.Sprintf("%s Ethers", strconv.FormatFloat(amount, 'f', -1, 64))
if amount == 1 {
amounts[i] = strings.TrimSuffix(amounts[i], "s")
}
// Calculate the period for the next tier and format it
period := *minutesFlag * int(math.Pow(3, float64(i)))
periods[i] = fmt.Sprintf("%d mins", period)
if period%60 == 0 {
period /= 60
periods[i] = fmt.Sprintf("%d hours", period)
if period%24 == 0 {
period /= 24
periods[i] = fmt.Sprintf("%d days", period)
}
}
if period == 1 {
periods[i] = strings.TrimSuffix(periods[i], "s")
}
}
// Load up and render the faucet website
tmpl, err := Asset("faucet.html")
if err != nil {
log.Crit("Failed to load the faucet template", "err", err)
}
website := new(bytes.Buffer)
err = template.Must(template.New("").Parse(string(tmpl))).Execute(website, map[string]interface{}{
"Network": *netnameFlag,
"Amounts": amounts,
"Periods": periods,
"Recaptcha": *captchaToken,
"NoAuth": *noauthFlag,
})
if err != nil {
log.Crit("Failed to render the faucet template", "err", err)
}
// Load and parse the genesis block requested by the user
blob, err := ioutil.ReadFile(*genesisFlag)
if err != nil {
log.Crit("Failed to read genesis block contents", "genesis", *genesisFlag, "err", err)
}
genesis := new(core.Genesis)
if err = json.Unmarshal(blob, genesis); err != nil {
log.Crit("Failed to parse genesis block json", "err", err)
}
// Convert the bootnodes to internal enode representations
var enodes []*discv5.Node
for _, boot := range strings.Split(*bootFlag, ",") {
if url, err := discv5.ParseNode(boot); err == nil {
enodes = append(enodes, url)
} else {
log.Error("Failed to parse bootnode URL", "url", boot, "err", err)
}
}
// Load up the account key and decrypt its password
if blob, err = ioutil.ReadFile(*accPassFlag); err != nil {
log.Crit("Failed to read account password contents", "file", *accPassFlag, "err", err)
}
// Delete trailing newline in password
pass := strings.TrimSuffix(string(blob), "\n")
ks := keystore.NewKeyStore(filepath.Join(os.Getenv("HOME"), ".faucet", "keys"), keystore.StandardScryptN, keystore.StandardScryptP)
if blob, err = ioutil.ReadFile(*accJSONFlag); err != nil {
log.Crit("Failed to read account key contents", "file", *accJSONFlag, "err", err)
}
acc, err := ks.Import(blob, pass, pass)
if err != nil {
log.Crit("Failed to import faucet signer account", "err", err)
}
ks.Unlock(acc, pass)
// Assemble and start the faucet light service
faucet, err := newFaucet(genesis, *ethPortFlag, enodes, *netFlag, *statsFlag, ks, website.Bytes())
if err != nil {
log.Crit("Failed to start faucet", "err", err)
}
defer faucet.close()
if err := faucet.listenAndServe(*apiPortFlag); err != nil {
log.Crit("Failed to launch faucet API", "err", err)
}
}
// request represents an accepted funding request.
type request struct {
Avatar string `json:"avatar"` // Avatar URL to make the UI nicer
Account common.Address `json:"account"` // Ethereum address being funded
Time time.Time `json:"time"` // Timestamp when the request was accepted
Tx *types.Transaction `json:"tx"` // Transaction funding the account
}
// faucet represents a crypto faucet backed by an Ethereum light client.
type faucet struct {
config *params.ChainConfig // Chain configurations for signing
stack *node.Node // Ethereum protocol stack
client *ethclient.Client // Client connection to the Ethereum chain
index []byte // Index page to serve up on the web
keystore *keystore.KeyStore // Keystore containing the single signer
account accounts.Account // Account funding user faucet requests
head *types.Header // Current head header of the faucet
balance *big.Int // Current balance of the faucet
nonce uint64 // Current pending nonce of the faucet
price *big.Int // Current gas price to issue funds with
conns []*websocket.Conn // Currently live websocket connections
timeouts map[string]time.Time // History of users and their funding timeouts
reqs []*request // Currently pending funding requests
update chan struct{} // Channel to signal request updates
lock sync.RWMutex // Lock protecting the faucet's internals
}
func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network uint64, stats string, ks *keystore.KeyStore, index []byte) (*faucet, error) {
// Assemble the raw devp2p protocol stack
stack, err := node.New(&node.Config{
Name: "geth",
Version: params.VersionWithCommit(gitCommit, gitDate),
DataDir: filepath.Join(os.Getenv("HOME"), ".faucet"),
P2P: p2p.Config{
NAT: nat.Any(),
NoDiscovery: true,
DiscoveryV5: true,
ListenAddr: fmt.Sprintf(":%d", port),
MaxPeers: 25,
BootstrapNodesV5: enodes,
},
})
if err != nil {
return nil, err
}
// Assemble the Ethereum light client protocol
if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
cfg := eth.DefaultConfig
cfg.SyncMode = downloader.LightSync
cfg.NetworkId = network
cfg.Genesis = genesis
return les.New(ctx, &cfg)
}); err != nil {
return nil, err
}
// Assemble the ethstats monitoring and reporting service'
if stats != "" {
if err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
var serv *les.LightEthereum
ctx.Service(&serv)
return ethstats.New(stats, nil, serv)
}); err != nil {
return nil, err
}
}
// Boot up the client and ensure it connects to bootnodes
if err := stack.Start(); err != nil {
return nil, err
}
for _, boot := range enodes {
old, err := enode.ParseV4(boot.String())
if err == nil {
stack.Server().AddPeer(old)
}
}
// Attach to the client and retrieve and interesting metadatas
api, err := stack.Attach()
if err != nil {
stack.Stop()
return nil, err
}
client := ethclient.NewClient(api)
return &faucet{
config: genesis.Config,
stack: stack,
client: client,
index: index,
keystore: ks,
account: ks.Accounts()[0],
timeouts: make(map[string]time.Time),
update: make(chan struct{}, 1),
}, nil
}
// close terminates the Ethereum connection and tears down the faucet.
func (f *faucet) close() error {
return f.stack.Close()
}
// listenAndServe registers the HTTP handlers for the faucet and boots it up
// for service user funding requests.
func (f *faucet) listenAndServe(port int) error {
go f.loop()
http.HandleFunc("/", f.webHandler)
http.Handle("/api", websocket.Handler(f.apiHandler))
return http.ListenAndServe(fmt.Sprintf(":%d", port), nil)
}
// webHandler handles all non-api requests, simply flattening and returning the
// faucet website.
func (f *faucet) webHandler(w http.ResponseWriter, r *http.Request) {
w.Write(f.index)
}
// apiHandler handles requests for Ether grants and transaction statuses.
func (f *faucet) apiHandler(conn *websocket.Conn) {
// Start tracking the connection and drop at the end
defer conn.Close()
f.lock.Lock()
f.conns = append(f.conns, conn)
f.lock.Unlock()
defer func() {
f.lock.Lock()
for i, c := range f.conns {
if c == conn {
f.conns = append(f.conns[:i], f.conns[i+1:]...)
break
}
}
f.lock.Unlock()
}()
// Gather the initial stats from the network to report
var (
head *types.Header
balance *big.Int
nonce uint64
err error
)
for head == nil || balance == nil {
// Retrieve the current stats cached by the faucet
f.lock.RLock()
if f.head != nil {
head = types.CopyHeader(f.head)
}
if f.balance != nil {
balance = new(big.Int).Set(f.balance)
}
nonce = f.nonce
f.lock.RUnlock()
if head == nil || balance == nil {
// Report the faucet offline until initial stats are ready
if err = sendError(conn, errors.New("Faucet offline")); err != nil {
log.Warn("Failed to send faucet error to client", "err", err)
return
}
time.Sleep(3 * time.Second)
}
}
// Send over the initial stats and the latest header
if err = send(conn, map[string]interface{}{
"funds": new(big.Int).Div(balance, ether),
"funded": nonce,
"peers": f.stack.Server().PeerCount(),
"requests": f.reqs,
}, 3*time.Second); err != nil {
log.Warn("Failed to send initial stats to client", "err", err)
return
}
if err = send(conn, head, 3*time.Second); err != nil {
log.Warn("Failed to send initial header to client", "err", err)
return
}
// Keep reading requests from the websocket until the connection breaks
for {
// Fetch the next funding request and validate against github
var msg struct {
URL string `json:"url"`
Tier uint `json:"tier"`
Captcha string `json:"captcha"`
}
if err = websocket.JSON.Receive(conn, &msg); err != nil {
return
}
if !*noauthFlag && !strings.HasPrefix(msg.URL, "https://gist.github.com/") && !strings.HasPrefix(msg.URL, "https://twitter.com/") &&
!strings.HasPrefix(msg.URL, "https://plus.google.com/") && !strings.HasPrefix(msg.URL, "https://www.facebook.com/") {
if err = sendError(conn, errors.New("URL doesn't link to supported services")); err != nil {
log.Warn("Failed to send URL error to client", "err", err)
return
}
continue
}
if msg.Tier >= uint(*tiersFlag) {
if err = sendError(conn, errors.New("Invalid funding tier requested")); err != nil {
log.Warn("Failed to send tier error to client", "err", err)
return
}
continue
}
log.Info("Faucet funds requested", "url", msg.URL, "tier", msg.Tier)
// If captcha verifications are enabled, make sure we're not dealing with a robot
if *captchaToken != "" {
form := url.Values{}
form.Add("secret", *captchaSecret)
form.Add("response", msg.Captcha)
res, err := http.PostForm("https://www.google.com/recaptcha/api/siteverify", form)
if err != nil {
if err = sendError(conn, err); err != nil {
log.Warn("Failed to send captcha post error to client", "err", err)
return
}
continue
}
var result struct {
Success bool `json:"success"`
Errors json.RawMessage `json:"error-codes"`
}
err = json.NewDecoder(res.Body).Decode(&result)
res.Body.Close()
if err != nil {
if err = sendError(conn, err); err != nil {
log.Warn("Failed to send captcha decode error to client", "err", err)
return
}
continue
}
if !result.Success {
log.Warn("Captcha verification failed", "err", string(result.Errors))
if err = sendError(conn, errors.New("Beep-bop, you're a robot!")); err != nil {
log.Warn("Failed to send captcha failure to client", "err", err)
return
}
continue
}
}
// Retrieve the Ethereum address to fund, the requesting user and a profile picture
var (
username string
avatar string
address common.Address
)
switch {
case strings.HasPrefix(msg.URL, "https://gist.github.com/"):
if err = sendError(conn, errors.New("GitHub authentication discontinued at the official request of GitHub")); err != nil {
log.Warn("Failed to send GitHub deprecation to client", "err", err)
return
}
continue
case strings.HasPrefix(msg.URL, "https://plus.google.com/"):
if err = sendError(conn, errors.New("Google+ authentication discontinued as the service was sunset")); err != nil {
log.Warn("Failed to send Google+ deprecation to client", "err", err)
return
}
continue
case strings.HasPrefix(msg.URL, "https://twitter.com/"):
username, avatar, address, err = authTwitter(msg.URL)
case strings.HasPrefix(msg.URL, "https://www.facebook.com/"):
username, avatar, address, err = authFacebook(msg.URL)
case *noauthFlag:
username, avatar, address, err = authNoAuth(msg.URL)
default:
err = errors.New("Something funky happened, please open an issue at https://github.com/ethereum/go-ethereum/issues")
}
if err != nil {
if err = sendError(conn, err); err != nil {
log.Warn("Failed to send prefix error to client", "err", err)
return
}
continue
}
log.Info("Faucet request valid", "url", msg.URL, "tier", msg.Tier, "user", username, "address", address)
// Ensure the user didn't request funds too recently
f.lock.Lock()
var (
fund bool
timeout time.Time
)
if timeout = f.timeouts[username]; time.Now().After(timeout) {
// User wasn't funded recently, create the funding transaction
amount := new(big.Int).Mul(big.NewInt(int64(*payoutFlag)), ether)
amount = new(big.Int).Mul(amount, new(big.Int).Exp(big.NewInt(5), big.NewInt(int64(msg.Tier)), nil))
amount = new(big.Int).Div(amount, new(big.Int).Exp(big.NewInt(2), big.NewInt(int64(msg.Tier)), nil))
tx := types.NewTransaction(f.nonce+uint64(len(f.reqs)), address, amount, 21000, f.price, nil)
signed, err := f.keystore.SignTx(f.account, tx, f.config.ChainID)
if err != nil {
f.lock.Unlock()
if err = sendError(conn, err); err != nil {
log.Warn("Failed to send transaction creation error to client", "err", err)
return
}
continue
}
// Submit the transaction and mark as funded if successful
if err := f.client.SendTransaction(context.Background(), signed); err != nil {
f.lock.Unlock()
if err = sendError(conn, err); err != nil {
log.Warn("Failed to send transaction transmission error to client", "err", err)
return
}
continue
}
f.reqs = append(f.reqs, &request{
Avatar: avatar,
Account: address,
Time: time.Now(),
Tx: signed,
})
f.timeouts[username] = time.Now().Add(time.Duration(*minutesFlag*int(math.Pow(3, float64(msg.Tier)))) * time.Minute)
fund = true
}
f.lock.Unlock()
// Send an error if too frequent funding, othewise a success
if !fund {
if err = sendError(conn, fmt.Errorf("%s left until next allowance", common.PrettyDuration(timeout.Sub(time.Now())))); err != nil { // nolint: gosimple
log.Warn("Failed to send funding error to client", "err", err)
return
}
continue
}
if err = sendSuccess(conn, fmt.Sprintf("Funding request accepted for %s into %s", username, address.Hex())); err != nil {
log.Warn("Failed to send funding success to client", "err", err)
return
}
select {
case f.update <- struct{}{}:
default:
}
}
}
// refresh attempts to retrieve the latest header from the chain and extract the
// associated faucet balance and nonce for connectivity caching.
func (f *faucet) refresh(head *types.Header) error {
// Ensure a state update does not run for too long
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
// If no header was specified, use the current chain head
var err error
if head == nil {
if head, err = f.client.HeaderByNumber(ctx, nil); err != nil {
return err
}
}
// Retrieve the balance, nonce and gas price from the current head
var (
balance *big.Int
nonce uint64
price *big.Int
)
if balance, err = f.client.BalanceAt(ctx, f.account.Address, head.Number); err != nil {
return err
}
if nonce, err = f.client.NonceAt(ctx, f.account.Address, head.Number); err != nil {
return err
}
if price, err = f.client.SuggestGasPrice(ctx); err != nil {
return err
}
// Everything succeeded, update the cached stats and eject old requests
f.lock.Lock()
f.head, f.balance = head, balance
f.price, f.nonce = price, nonce
for len(f.reqs) > 0 && f.reqs[0].Tx.Nonce() < f.nonce {
f.reqs = f.reqs[1:]
}
f.lock.Unlock()
return nil
}
// loop keeps waiting for interesting events and pushes them out to connected
// websockets.
func (f *faucet) loop() {
// Wait for chain events and push them to clients
heads := make(chan *types.Header, 16)
sub, err := f.client.SubscribeNewHead(context.Background(), heads)
if err != nil {
log.Crit("Failed to subscribe to head events", "err", err)
}
defer sub.Unsubscribe()
// Start a goroutine to update the state from head notifications in the background
update := make(chan *types.Header)
go func() {
for head := range update {
// New chain head arrived, query the current stats and stream to clients
timestamp := time.Unix(int64(head.Time), 0)
if time.Since(timestamp) > time.Hour {
log.Warn("Skipping faucet refresh, head too old", "number", head.Number, "hash", head.Hash(), "age", common.PrettyAge(timestamp))
continue
}
if err := f.refresh(head); err != nil {
log.Warn("Failed to update faucet state", "block", head.Number, "hash", head.Hash(), "err", err)
continue
}
// Faucet state retrieved, update locally and send to clients
f.lock.RLock()
log.Info("Updated faucet state", "number", head.Number, "hash", head.Hash(), "age", common.PrettyAge(timestamp), "balance", f.balance, "nonce", f.nonce, "price", f.price)
balance := new(big.Int).Div(f.balance, ether)
peers := f.stack.Server().PeerCount()
for _, conn := range f.conns {
if err := send(conn, map[string]interface{}{
"funds": balance,
"funded": f.nonce,
"peers": peers,
"requests": f.reqs,
}, time.Second); err != nil {
log.Warn("Failed to send stats to client", "err", err)
conn.Close()
continue
}
if err := send(conn, head, time.Second); err != nil {
log.Warn("Failed to send header to client", "err", err)
conn.Close()
}
}
f.lock.RUnlock()
}
}()
// Wait for various events and assing to the appropriate background threads
for {
select {
case head := <-heads:
// New head arrived, send if for state update if there's none running
select {
case update <- head:
default:
}
case <-f.update:
// Pending requests updated, stream to clients
f.lock.RLock()
for _, conn := range f.conns {
if err := send(conn, map[string]interface{}{"requests": f.reqs}, time.Second); err != nil {
log.Warn("Failed to send requests to client", "err", err)
conn.Close()
}
}
f.lock.RUnlock()
}
}
}
// sends transmits a data packet to the remote end of the websocket, but also
// setting a write deadline to prevent waiting forever on the node.
func send(conn *websocket.Conn, value interface{}, timeout time.Duration) error {
if timeout == 0 {
timeout = 60 * time.Second
}
conn.SetWriteDeadline(time.Now().Add(timeout))
return websocket.JSON.Send(conn, value)
}
// sendError transmits an error to the remote end of the websocket, also setting
// the write deadline to 1 second to prevent waiting forever.
func sendError(conn *websocket.Conn, err error) error {
return send(conn, map[string]string{"error": err.Error()}, time.Second)
}
// sendSuccess transmits a success message to the remote end of the websocket, also
// setting the write deadline to 1 second to prevent waiting forever.
func sendSuccess(conn *websocket.Conn, msg string) error {
return send(conn, map[string]string{"success": msg}, time.Second)
}
// authTwitter tries to authenticate a faucet request using Twitter posts, returning
// the username, avatar URL and Ethereum address to fund on success.
func authTwitter(url string) (string, string, common.Address, error) {
// Ensure the user specified a meaningful URL, no fancy nonsense
parts := strings.Split(url, "/")
if len(parts) < 4 || parts[len(parts)-2] != "status" {
return "", "", common.Address{}, errors.New("Invalid Twitter status URL")
}
// Twitter's API isn't really friendly with direct links. Still, we don't
// want to do ask read permissions from users, so just load the public posts and
// scrape it for the Ethereum address and profile URL.
res, err := http.Get(url)
if err != nil {
return "", "", common.Address{}, err
}
defer res.Body.Close()
// Resolve the username from the final redirect, no intermediate junk
parts = strings.Split(res.Request.URL.String(), "/")
if len(parts) < 4 || parts[len(parts)-2] != "status" {
return "", "", common.Address{}, errors.New("Invalid Twitter status URL")
}
username := parts[len(parts)-3]
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", "", common.Address{}, err
}
address := common.HexToAddress(string(regexp.MustCompile("0x[0-9a-fA-F]{40}").Find(body)))
if address == (common.Address{}) {
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
}
var avatar string
if parts = regexp.MustCompile("src=\"([^\"]+twimg.com/profile_images[^\"]+)\"").FindStringSubmatch(string(body)); len(parts) == 2 {
avatar = parts[1]
}
return username + "@twitter", avatar, address, nil
}
// authFacebook tries to authenticate a faucet request using Facebook posts,
// returning the username, avatar URL and Ethereum address to fund on success.
func authFacebook(url string) (string, string, common.Address, error) {
// Ensure the user specified a meaningful URL, no fancy nonsense
parts := strings.Split(url, "/")
if len(parts) < 4 || parts[len(parts)-2] != "posts" {
return "", "", common.Address{}, errors.New("Invalid Facebook post URL")
}
username := parts[len(parts)-3]
// Facebook's Graph API isn't really friendly with direct links. Still, we don't
// want to do ask read permissions from users, so just load the public posts and
// scrape it for the Ethereum address and profile URL.
res, err := http.Get(url)
if err != nil {
return "", "", common.Address{}, err
}
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", "", common.Address{}, err
}
address := common.HexToAddress(string(regexp.MustCompile("0x[0-9a-fA-F]{40}").Find(body)))
if address == (common.Address{}) {
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
}
var avatar string
if parts = regexp.MustCompile("src=\"([^\"]+fbcdn.net[^\"]+)\"").FindStringSubmatch(string(body)); len(parts) == 2 {
avatar = parts[1]
}
return username + "@facebook", avatar, address, nil
}
// authNoAuth tries to interpret a faucet request as a plain Ethereum address,
// without actually performing any remote authentication. This mode is prone to
// Byzantine attack, so only ever use for truly private networks.
func authNoAuth(url string) (string, string, common.Address, error) {
address := common.HexToAddress(regexp.MustCompile("0x[0-9a-fA-F]{40}").FindString(url))
if address == (common.Address{}) {
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
}
return address.Hex() + "@noauth", "", address, nil
}
| [
"\"HOME\"",
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
MaixPy/tools/kconfig/Kconfiglib/menuconfig.py | #!/usr/bin/env python
# Copyright (c) 2018-2019, Nordic Semiconductor ASA and Ulf Magnusson
# SPDX-License-Identifier: ISC
"""
Overview
========
A curses-based Python 2/3 menuconfig implementation. The interface should feel
familiar to people used to mconf ('make menuconfig').
Supports the same keys as mconf, and also supports a set of keybindings
inspired by Vi:
J/K : Down/Up
L : Enter menu/Toggle item
H : Leave menu
Ctrl-D/U: Page Down/Page Up
G/End : Jump to end of list
g/Home : Jump to beginning of list
[Space] toggles values if possible, and enters menus otherwise. [Enter] works
the other way around.
The mconf feature where pressing a key jumps to a menu entry with that
character in it in the current menu isn't supported. A jump-to feature for
jumping directly to any symbol (including invisible symbols), choice, menu or
comment (as in a Kconfig 'comment "Foo"') is available instead.
A few different modes are available:
F: Toggle show-help mode, which shows the help text of the currently selected
item in the window at the bottom of the menu display. This is handy when
browsing through options.
C: Toggle show-name mode, which shows the symbol name before each symbol menu
entry
A: Toggle show-all mode, which shows all items, including currently invisible
items and items that lack a prompt. Invisible items are drawn in a different
style to make them stand out.
Running
=======
menuconfig.py can be run either as a standalone executable or by calling the
menuconfig() function with an existing Kconfig instance. The second option is a
bit inflexible in that it will still load and save .config, etc.
When run in standalone mode, the top-level Kconfig file to load can be passed
as a command-line argument. With no argument, it defaults to "Kconfig".
The KCONFIG_CONFIG environment variable specifies the .config file to load (if
it exists) and save. If KCONFIG_CONFIG is unset, ".config" is used.
When overwriting a configuration file, the old version is saved to
<filename>.old (e.g. .config.old).
$srctree is supported through Kconfiglib.
Color schemes
=============
It is possible to customize the color scheme by setting the MENUCONFIG_STYLE
environment variable. For example, setting it to 'aquatic' will enable an
alternative, less yellow, more 'make menuconfig'-like color scheme, contributed
by Mitja Horvat (pinkfluid).
This is the current list of built-in styles:
- default classic Kconfiglib theme with a yellow accent
- monochrome colorless theme (uses only bold and standout) attributes,
this style is used if the terminal doesn't support colors
- aquatic blue tinted style loosely resembling the lxdialog theme
It is possible to customize the current style by changing colors of UI
elements on the screen. This is the list of elements that can be stylized:
- path Top row in the main display, with the menu path
- separator Separator lines between windows. Also used for the top line
in the symbol information display.
- list List of items, e.g. the main display
- selection Style for the selected item
- inv-list Like list, but for invisible items. Used in show-all mode.
- inv-selection Like selection, but for invisible items. Used in show-all
mode.
- help Help text windows at the bottom of various fullscreen
dialogs
- show-help Window showing the help text in show-help mode
- frame Frame around dialog boxes
- body Body of dialog boxes
- edit Edit box in pop-up dialogs
- jump-edit Edit box in jump-to dialog
- text Symbol information text
The color definition is a comma separated list of attributes:
- fg:COLOR Set the foreground/background colors. COLOR can be one of
* or * the basic 16 colors (black, red, green, yellow, blue,
- bg:COLOR magenta, cyan, white and brighter versions, for example,
brightred). On terminals that support more than 8 colors,
you can also directly put in a color number, e.g. fg:123
(hexadecimal and octal constants are accepted as well).
Colors outside the range -1..curses.COLORS-1 (which is
terminal-dependent) are ignored (with a warning). The COLOR
can be also specified using a RGB value in the HTML
notation, for example #RRGGBB. If the terminal supports
color changing, the color is rendered accurately.
Otherwise, the visually nearest color is used.
If the background or foreground color of an element is not
specified, it defaults to -1, representing the default
terminal foreground or background color.
Note: On some terminals a bright version of the color
implies bold.
- bold Use bold text
- underline Use underline text
- standout Standout text attribute (reverse color)
More often than not, some UI elements share the same color definition. In such
cases the right value may specify an UI element from which the color definition
will be copied. For example, "separator=help" will apply the current color
definition for "help" to "separator".
A keyword without the '=' is assumed to be a style template. The template name
is looked up in the built-in styles list and the style definition is expanded
in-place. With this, built-in styles can be used as basis for new styles.
For example, take the aquatic theme and give it a red selection bar:
MENUCONFIG_STYLE="aquatic selection=fg:white,bg:red"
If there's an error in the style definition or if a missing style is assigned
to, the assignment will be ignored, along with a warning being printed on
stderr.
The 'default' theme is always implicitly parsed first (or the 'monochrome'
theme if the terminal lacks colors), so the following two settings have the
same effect:
MENUCONFIG_STYLE="selection=fg:white,bg:red"
MENUCONFIG_STYLE="default selection=fg:white,bg:red"
Other features
==============
- Seamless terminal resizing
- No dependencies on *nix, as the 'curses' module is in the Python standard
library
- Unicode text entry
- Improved information screen compared to mconf:
* Expressions are split up by their top-level &&/|| operands to improve
readability
* Undefined symbols in expressions are pointed out
* Menus and comments have information displays
* Kconfig definitions are printed
* The include path is shown, listing the locations of the 'source'
statements that included the Kconfig file of the symbol (or other
item)
Limitations
===========
Doesn't work out of the box on Windows, but can be made to work with 'pip
install windows-curses'. See the
https://github.com/zephyrproject-rtos/windows-curses repository.
'pip install kconfiglib' on Windows automatically installs windows-curses
to make the menuconfig usable.
"""
from __future__ import print_function
import curses
import errno
import locale
import os
import re
import sys
import textwrap
from kconfiglib import Symbol, Choice, MENU, COMMENT, MenuNode, \
BOOL, TRISTATE, STRING, INT, HEX, \
AND, OR, \
expr_str, expr_value, split_expr, \
standard_sc_expr_str, \
TRI_TO_STR, TYPE_TO_STR, \
standard_kconfig, standard_config_filename
#
# Configuration variables
#
# If True, try to change LC_CTYPE to a UTF-8 locale if it is set to the C
# locale (which implies ASCII). This fixes curses Unicode I/O issues on systems
# with bad defaults. ncurses configures itself from the locale settings.
#
# Related PEP: https://www.python.org/dev/peps/pep-0538/
_CHANGE_C_LC_CTYPE_TO_UTF8 = True
# How many steps an implicit submenu will be indented. Implicit submenus are
# created when an item depends on the symbol before it. Note that symbols
# defined with 'menuconfig' create a separate menu instead of indenting.
_SUBMENU_INDENT = 4
# Number of steps for Page Up/Down to jump
_PG_JUMP = 6
# Height of the help window in show-help mode
_SHOW_HELP_HEIGHT = 8
# How far the cursor needs to be from the edge of the window before it starts
# to scroll. Used for the main menu display, the information display, the
# search display, and for text boxes.
_SCROLL_OFFSET = 5
# Minimum width of dialogs that ask for text input
_INPUT_DIALOG_MIN_WIDTH = 30
# Number of arrows pointing up/down to draw when a window is scrolled
_N_SCROLL_ARROWS = 14
# Lines of help text shown at the bottom of the "main" display
_MAIN_HELP_LINES = """
[Space/Enter] Toggle/enter [ESC] Leave menu [S] Save
[O] Load [?] Symbol info [/] Jump to symbol
[F] Toggle show-help mode [C] Toggle show-name mode [A] Toggle show-all mode
[Q] Quit (prompts for save) [D] Save minimal config (advanced)
"""[1:-1].split("\n")
# Lines of help text shown at the bottom of the information dialog
_INFO_HELP_LINES = """
[ESC/q] Return to menu [/] Jump to symbol
"""[1:-1].split("\n")
# Lines of help text shown at the bottom of the search dialog
_JUMP_TO_HELP_LINES = """
Type text to narrow the search. Regexes are supported (via Python's 're'
module). The up/down cursor keys step in the list. [Enter] jumps to the
selected symbol. [ESC] aborts the search. Type multiple space-separated
strings/regexes to find entries that match all of them. Type Ctrl-F to
view the help of the selected item without leaving the dialog.
"""[1:-1].split("\n")
#
# Styling
#
_STYLES = {
"default": """
path=fg:black,bg:white,bold
separator=fg:black,bg:yellow,bold
list=fg:black,bg:white
selection=fg:white,bg:blue,bold
inv-list=fg:red,bg:white
inv-selection=fg:red,bg:blue
help=path
show-help=list
frame=fg:black,bg:yellow,bold
body=fg:white,bg:black
edit=fg:white,bg:blue
jump-edit=edit
text=list
""",
# This style is forced on terminals that do no support colors
"monochrome": """
path=bold
separator=bold,standout
list=
selection=bold,standout
inv-list=bold
inv-selection=bold,standout
help=bold
show-help=
frame=bold,standout
body=
edit=standout
jump-edit=
text=
""",
# Blue tinted style loosely resembling lxdialog
"aquatic": """
path=fg:cyan,bg:blue,bold
separator=fg:white,bg:cyan,bold
help=path
frame=fg:white,bg:cyan,bold
body=fg:brightwhite,bg:blue
edit=fg:black,bg:white
"""
}
# Standard colors definition
_STYLE_STD_COLORS = {
# Basic colors
"black": curses.COLOR_BLACK,
"red": curses.COLOR_RED,
"green": curses.COLOR_GREEN,
"yellow": curses.COLOR_YELLOW,
"blue": curses.COLOR_BLUE,
"magenta": curses.COLOR_MAGENTA,
"cyan": curses.COLOR_CYAN,
"white": curses.COLOR_WHITE,
# Bright versions
"brightblack": curses.COLOR_BLACK + 8,
"brightred": curses.COLOR_RED + 8,
"brightgreen": curses.COLOR_GREEN + 8,
"brightyellow": curses.COLOR_YELLOW + 8,
"brightblue": curses.COLOR_BLUE + 8,
"brightmagenta": curses.COLOR_MAGENTA + 8,
"brightcyan": curses.COLOR_CYAN + 8,
"brightwhite": curses.COLOR_WHITE + 8,
# Aliases
"purple": curses.COLOR_MAGENTA,
"brightpurple": curses.COLOR_MAGENTA + 8,
}
def _rgb_to_6cube(rgb):
# Converts an 888 RGB color to a 3-tuple (nice in that it's hashable)
# representing the closest xterm 256-color 6x6x6 color cube color.
#
# The xterm 256-color extension uses a RGB color palette with components in
# the range 0-5 (a 6x6x6 cube). The catch is that the mapping is nonlinear.
# Index 0 in the 6x6x6 cube is mapped to 0, index 1 to 95, then 135, 175,
# etc., in increments of 40. See the links below:
#
# https://commons.wikimedia.org/wiki/File:Xterm_256color_chart.svg
# https://github.com/tmux/tmux/blob/master/colour.c
# 48 is the middle ground between 0 and 95.
return tuple(0 if x < 48 else int(round(max(1, (x - 55)/40))) for x in rgb)
def _6cube_to_rgb(r6g6b6):
# Returns the 888 RGB color for a 666 xterm color cube index
return tuple(0 if x == 0 else 40*x + 55 for x in r6g6b6)
def _rgb_to_gray(rgb):
# Converts an 888 RGB color to the index of an xterm 256-color grayscale
# color with approx. the same perceived brightness
# Calculate the luminance (gray intensity) of the color. See
# https://stackoverflow.com/questions/596216/formula-to-determine-brightness-of-rgb-color
# and
# https://www.w3.org/TR/AERT/#color-contrast
luma = 0.299*rgb[0] + 0.587*rgb[1] + 0.114*rgb[2]
# Closest index in the grayscale palette, which starts at RGB 0x080808,
# with stepping 0x0A0A0A
index = int(round((luma - 8)/10))
# Clamp the index to 0-23, corresponding to 232-255
return max(0, min(index, 23))
def _gray_to_rgb(index):
# Convert a grayscale index to its closet single RGB component
return 3*(10*index + 8,) # Returns a 3-tuple
# Obscure Python: We never pass a value for rgb2index, and it keeps pointing to
# the same dict. This avoids a global.
def _alloc_rgb(rgb, rgb2index={}):
# Initialize a new entry in the xterm palette to the given RGB color,
# returning its index. If the color has already been initialized, the index
# of the existing entry is returned.
#
# ncurses is palette-based, so we need to overwrite palette entries to make
# new colors.
#
# The colors from 0 to 15 are user-defined, and there's no way to query
# their RGB values, so we better leave them untouched. Also leave any
# hypothetical colors above 255 untouched (though we're unlikely to
# allocate that many colors anyway).
if rgb in rgb2index:
return rgb2index[rgb]
# Many terminals allow the user to customize the first 16 colors. Avoid
# changing their values.
color_index = 16 + len(rgb2index)
if color_index >= 256:
_warn("Unable to allocate new RGB color ", rgb, ". Too many colors "
"allocated.")
return 0
# Map each RGB component from the range 0-255 to the range 0-1000, which is
# what curses uses
curses.init_color(color_index, *(int(round(1000*x/255)) for x in rgb))
rgb2index[rgb] = color_index
return color_index
def _color_from_num(num):
# Returns the index of a color that looks like color 'num' in the xterm
# 256-color palette (but that might not be 'num', if we're redefining
# colors)
# - _alloc_rgb() won't touch the first 16 colors or any (hypothetical)
# colors above 255, so we can always return them as-is
#
# - If the terminal doesn't support changing color definitions, or if
# curses.COLORS < 256, _alloc_rgb() won't touch any color, and all colors
# can be returned as-is
if num < 16 or num > 255 or not curses.can_change_color() or \
curses.COLORS < 256:
return num
# _alloc_rgb() might redefine colors, so emulate the xterm 256-color
# palette by allocating new colors instead of returning color numbers
# directly
if num < 232:
num -= 16
return _alloc_rgb(_6cube_to_rgb(((num//36)%6, (num//6)%6, num%6)))
return _alloc_rgb(_gray_to_rgb(num - 232))
def _color_from_rgb(rgb):
# Returns the index of a color matching the 888 RGB color 'rgb'. The
# returned color might be an ~exact match or an approximation, depending on
# terminal capabilities.
# Calculates the Euclidean distance between two RGB colors
def dist(r1, r2): return sum((x - y)**2 for x, y in zip(r1, r2))
if curses.COLORS >= 256:
# Assume we're dealing with xterm's 256-color extension
if curses.can_change_color():
# Best case -- the terminal supports changing palette entries via
# curses.init_color(). Initialize an unused palette entry and
# return it.
return _alloc_rgb(rgb)
# Second best case -- pick between the xterm 256-color extension colors
# Closest 6-cube "color" color
c6 = _rgb_to_6cube(rgb)
# Closest gray color
gray = _rgb_to_gray(rgb)
if dist(rgb, _6cube_to_rgb(c6)) < dist(rgb, _gray_to_rgb(gray)):
# Use the "color" color from the 6x6x6 color palette. Calculate the
# color number from the 6-cube index triplet.
return 16 + 36*c6[0] + 6*c6[1] + c6[2]
# Use the color from the gray palette
return 232 + gray
# Terminal not in xterm 256-color mode. This is probably the best we can
# do, or is it? Submit patches. :)
min_dist = float('inf')
best = -1
for color in range(curses.COLORS):
# ncurses uses the range 0..1000. Scale that down to 0..255.
d = dist(rgb, tuple(int(round(255*c/1000))
for c in curses.color_content(color)))
if d < min_dist:
min_dist = d
best = color
return best
def _parse_style(style_str, parsing_default):
# Parses a string with '<element>=<style>' assignments. Anything not
# containing '=' is assumed to be a reference to a built-in style, which is
# treated as if all the assignments from the style were inserted at that
# point in the string.
#
# The parsing_default flag is set to True when we're implicitly parsing the
# 'default'/'monochrome' style, to prevent warnings.
for sline in style_str.split():
# Words without a "=" character represents a style template
if "=" in sline:
key, data = sline.split("=", 1)
# The 'default' style template is assumed to define all keys. We
# run _style_to_curses() for non-existing keys as well, so that we
# print warnings for errors to the right of '=' for those too.
if key not in _style and not parsing_default:
_warn("Ignoring non-existent style", key)
# If data is a reference to another key, copy its style
if data in _style:
_style[key] = _style[data]
else:
_style[key] = _style_to_curses(data)
elif sline in _STYLES:
# Recursively parse style template. Ignore styles that don't exist,
# for backwards/forwards compatibility.
_parse_style(_STYLES[sline], parsing_default)
else:
_warn("Ignoring non-existent style template", sline)
# Dictionary mapping element types to the curses attributes used to display
# them
_style = {}
def _style_to_curses(style_def):
# Parses a style definition string (<element>=<style>), returning
# a (fg_color, bg_color, attributes) tuple.
def parse_color(color_def):
color_def = color_def.split(":", 1)[1]
if color_def in _STYLE_STD_COLORS:
return _color_from_num(_STYLE_STD_COLORS[color_def])
# HTML format, #RRGGBB
if re.match("#[A-Fa-f0-9]{6}", color_def):
return _color_from_rgb((
int(color_def[1:3], 16),
int(color_def[3:5], 16),
int(color_def[5:7], 16)))
try:
color_num = _color_from_num(int(color_def, 0))
except ValueError:
_warn("Ignoring color ", color_def, "that's neither predefined "
"nor a number")
return -1
if not -1 <= color_num < curses.COLORS:
_warn("Ignoring color {}, which is outside the range "
"-1..curses.COLORS-1 (-1..{})"
.format(color_def, curses.COLORS - 1))
return -1
return color_num
fg_color = -1
bg_color = -1
attrs = 0
if style_def:
for field in style_def.split(","):
if field.startswith("fg:"):
fg_color = parse_color(field)
elif field.startswith("bg:"):
bg_color = parse_color(field)
elif field == "bold":
# A_BOLD tends to produce faint and hard-to-read text on the
# Windows console, especially with the old color scheme, before
# the introduction of
# https://blogs.msdn.microsoft.com/commandline/2017/08/02/updating-the-windows-console-colors/
attrs |= curses.A_NORMAL if _IS_WINDOWS else curses.A_BOLD
elif field == "standout":
attrs |= curses.A_STANDOUT
elif field == "underline":
attrs |= curses.A_UNDERLINE
else:
_warn("Ignoring unknown style attribute", field)
return _style_attr(fg_color, bg_color, attrs)
def _init_styles():
if curses.has_colors():
curses.use_default_colors()
# Use the 'monochrome' style template as the base on terminals without
# color
_parse_style("default" if curses.has_colors() else "monochrome", True)
# Add any user-defined style from the environment
if "MENUCONFIG_STYLE" in os.environ:
_parse_style(os.environ["MENUCONFIG_STYLE"], False)
# color_attribs holds the color pairs we've already created, indexed by a
# (<foreground color>, <background color>) tuple.
#
# Obscure Python: We never pass a value for color_attribs, and it keeps
# pointing to the same dict. This avoids a global.
def _style_attr(fg_color, bg_color, attribs, color_attribs={}):
# Returns an attribute with the specified foreground and background color
# and the attributes in 'attribs'. Reuses color pairs already created if
# possible, and creates a new color pair otherwise.
#
# Returns 'attribs' if colors aren't supported.
if not curses.has_colors():
return attribs
if (fg_color, bg_color) not in color_attribs:
# Create new color pair. Color pair number 0 is hardcoded and cannot be
# changed, hence the +1s.
curses.init_pair(len(color_attribs) + 1, fg_color, bg_color)
color_attribs[(fg_color, bg_color)] = \
curses.color_pair(len(color_attribs) + 1)
return color_attribs[(fg_color, bg_color)] | attribs
#
# Main application
#
def _main():
menuconfig(standard_kconfig())
def menuconfig(kconf):
"""
Launches the configuration interface, returning after the user exits.
kconf:
Kconfig instance to be configured
"""
global _kconf
global _conf_filename
global _conf_changed
global _minconf_filename
global _show_all
_kconf = kconf
# Filename to save configuration to
_conf_filename = standard_config_filename()
# Load existing configuration and set _conf_changed True if it is outdated
_conf_changed = _load_config()
# Filename to save minimal configuration to
_minconf_filename = "defconfig"
# Any visible items in the top menu?
_show_all = False
if not _shown_nodes(kconf.top_node):
# Nothing visible. Start in show-all mode and try again.
_show_all = True
if not _shown_nodes(kconf.top_node):
# Give up. The implementation relies on always having a selected
# node.
print("Empty configuration -- nothing to configure.\n"
"Check that environment variables are set properly.")
return
# Disable warnings. They get mangled in curses mode, and we deal with
# errors ourselves.
kconf.warn = False
# Make curses use the locale settings specified in the environment
locale.setlocale(locale.LC_ALL, "")
# Try to fix Unicode issues on systems with bad defaults
if _CHANGE_C_LC_CTYPE_TO_UTF8:
_change_c_lc_ctype_to_utf8()
# Get rid of the delay between pressing ESC and jumping to the parent menu,
# unless the user has set ESCDELAY (see ncurses(3)). This makes the UI much
# smoother to work with.
#
# Note: This is strictly pretty iffy, since escape codes for e.g. cursor
# keys start with ESC, but I've never seen it cause problems in practice
# (probably because it's unlikely that the escape code for a key would get
# split up across read()s, at least with a terminal emulator). Please
# report if you run into issues. Some suitable small default value could be
# used here instead in that case. Maybe it's silly to not put in the
# smallest imperceptible delay here already, though I don't like guessing.
#
# (From a quick glance at the ncurses source code, ESCDELAY might only be
# relevant for mouse events there, so maybe escapes are assumed to arrive
# in one piece already...)
os.environ.setdefault("ESCDELAY", "0")
# Enter curses mode. _menuconfig() returns a string to print on exit, after
# curses has been de-initialized.
print(curses.wrapper(_menuconfig))
def _load_config():
# Loads any existing .config file. See the Kconfig.load_config() docstring.
#
# Returns True if .config is missing or outdated. We always prompt for
# saving the configuration in that case.
print(_kconf.load_config())
if not os.path.exists(_conf_filename):
# No .config
return True
return _needs_save()
def _needs_save():
# Returns True if a just-loaded .config file is outdated (would get
# modified when saving)
if _kconf.missing_syms:
# Assignments to undefined symbols in the .config
return True
for sym in _kconf.unique_defined_syms:
if sym.user_value is None:
if sym.config_string:
# Unwritten symbol
return True
elif sym.orig_type in (BOOL, TRISTATE):
if sym.tri_value != sym.user_value:
# Written bool/tristate symbol, new value
return True
elif sym.str_value != sym.user_value:
# Written string/int/hex symbol, new value
return True
# No need to prompt for save
return False
# Global variables used below:
#
# _stdscr:
# stdscr from curses
#
# _cur_menu:
# Menu node of the menu (or menuconfig symbol, or choice) currently being
# shown
#
# _shown:
# List of items in _cur_menu that are shown (ignoring scrolling). In
# show-all mode, this list contains all items in _cur_menu. Otherwise, it
# contains just the visible items.
#
# _sel_node_i:
# Index in _shown of the currently selected node
#
# _menu_scroll:
# Index in _shown of the top row of the main display
#
# _parent_screen_rows:
# List/stack of the row numbers that the selections in the parent menus
# appeared on. This is used to prevent the scrolling from jumping around
# when going in and out of menus.
#
# _show_help/_show_name/_show_all:
# If True, the corresponding mode is on. See the module docstring.
#
# _conf_filename:
# File to save the configuration to
#
# _minconf_filename:
# File to save minimal configurations to
#
# _conf_changed:
# True if the configuration has been changed. If False, we don't bother
# showing the save-and-quit dialog.
#
# We reset this to False whenever the configuration is saved explicitly
# from the save dialog.
def _menuconfig(stdscr):
# Logic for the main display, with the list of symbols, etc.
global _stdscr
global _conf_filename
global _conf_changed
global _minconf_filename
global _show_help
global _show_name
_stdscr = stdscr
_init()
while True:
_draw_main()
curses.doupdate()
c = _getch_compat(_menu_win)
if c == curses.KEY_RESIZE:
_resize_main()
elif c in (curses.KEY_DOWN, "j", "J"):
_select_next_menu_entry()
elif c in (curses.KEY_UP, "k", "K"):
_select_prev_menu_entry()
elif c in (curses.KEY_NPAGE, "\x04"): # Page Down/Ctrl-D
# Keep it simple. This way we get sane behavior for small windows,
# etc., for free.
for _ in range(_PG_JUMP):
_select_next_menu_entry()
elif c in (curses.KEY_PPAGE, "\x15"): # Page Up/Ctrl-U
for _ in range(_PG_JUMP):
_select_prev_menu_entry()
elif c in (curses.KEY_END, "G"):
_select_last_menu_entry()
elif c in (curses.KEY_HOME, "g"):
_select_first_menu_entry()
elif c == " ":
# Toggle the node if possible
sel_node = _shown[_sel_node_i]
if not _change_node(sel_node):
_enter_menu(sel_node)
elif c in (curses.KEY_RIGHT, "\n", "l", "L"):
# Enter the node if possible
sel_node = _shown[_sel_node_i]
if not _enter_menu(sel_node):
_change_node(sel_node)
elif c in ("n", "N"):
_set_sel_node_tri_val(0)
elif c in ("m", "M"):
_set_sel_node_tri_val(1)
elif c in ("y", "Y"):
_set_sel_node_tri_val(2)
elif c in (curses.KEY_LEFT, curses.KEY_BACKSPACE, _ERASE_CHAR,
"\x1B", "h", "H"): # \x1B = ESC
if c == "\x1B" and _cur_menu is _kconf.top_node:
res = _quit_dialog()
if res:
return res
else:
_leave_menu()
elif c in ("o", "O"):
_load_dialog()
elif c in ("s", "S"):
filename = _save_dialog(_kconf.write_config, _conf_filename,
"configuration")
if filename:
_conf_filename = filename
_conf_changed = False
elif c in ("d", "D"):
filename = _save_dialog(_kconf.write_min_config, _minconf_filename,
"minimal configuration")
if filename:
_minconf_filename = filename
elif c == "/":
_jump_to_dialog()
# The terminal might have been resized while the fullscreen jump-to
# dialog was open
_resize_main()
elif c == "?":
_info_dialog(_shown[_sel_node_i], False)
# The terminal might have been resized while the fullscreen info
# dialog was open
_resize_main()
elif c in ("f", "F"):
_show_help = not _show_help
_set_style(_help_win, "show-help" if _show_help else "help")
_resize_main()
elif c in ("c", "C"):
_show_name = not _show_name
elif c in ("a", "A"):
_toggle_show_all()
elif c in ("q", "Q"):
res = _quit_dialog()
if res:
return res
def _quit_dialog():
if not _conf_changed:
return "No changes to save (for '{}')".format(_conf_filename)
while True:
c = _key_dialog(
"Quit",
" Save configuration?\n"
"\n"
"(Y)es (N)o (C)ancel",
"ync")
if c is None or c == "c":
return None
if c == "y":
# Returns a message to print
msg = _try_save(_kconf.write_config, _conf_filename, "configuration")
if msg:
return msg
elif c == "n":
return "Configuration ({}) was not saved".format(_conf_filename)
def _init():
# Initializes the main display with the list of symbols, etc. Also does
# misc. global initialization that needs to happen after initializing
# curses.
global _ERASE_CHAR
global _path_win
global _top_sep_win
global _menu_win
global _bot_sep_win
global _help_win
global _parent_screen_rows
global _cur_menu
global _shown
global _sel_node_i
global _menu_scroll
global _show_help
global _show_name
# Looking for this in addition to KEY_BACKSPACE (which is unreliable) makes
# backspace work with TERM=vt100. That makes it likely to work in sane
# environments.
_ERASE_CHAR = curses.erasechar()
if sys.version_info[0] >= 3:
# erasechar() returns a one-byte bytes object on Python 3. This sets
# _ERASE_CHAR to a blank string if it can't be decoded, which should be
# harmless.
_ERASE_CHAR = _ERASE_CHAR.decode("utf-8", "ignore")
_init_styles()
# Hide the cursor
_safe_curs_set(0)
# Initialize windows
# Top row, with menu path
_path_win = _styled_win("path")
# Separator below menu path, with title and arrows pointing up
_top_sep_win = _styled_win("separator")
# List of menu entries with symbols, etc.
_menu_win = _styled_win("list")
_menu_win.keypad(True)
# Row below menu list, with arrows pointing down
_bot_sep_win = _styled_win("separator")
# Help window with keys at the bottom. Shows help texts in show-help mode.
_help_win = _styled_win("help")
# The rows we'd like the nodes in the parent menus to appear on. This
# prevents the scroll from jumping around when going in and out of menus.
_parent_screen_rows = []
# Initial state
_cur_menu = _kconf.top_node
_shown = _shown_nodes(_cur_menu)
_sel_node_i = _menu_scroll = 0
_show_help = _show_name = False
# Give windows their initial size
_resize_main()
def _resize_main():
# Resizes the main display, with the list of symbols, etc., to fill the
# terminal
global _menu_scroll
screen_height, screen_width = _stdscr.getmaxyx()
_path_win.resize(1, screen_width)
_top_sep_win.resize(1, screen_width)
_bot_sep_win.resize(1, screen_width)
help_win_height = _SHOW_HELP_HEIGHT if _show_help else \
len(_MAIN_HELP_LINES)
menu_win_height = screen_height - help_win_height - 3
if menu_win_height >= 1:
_menu_win.resize(menu_win_height, screen_width)
_help_win.resize(help_win_height, screen_width)
_top_sep_win.mvwin(1, 0)
_menu_win.mvwin(2, 0)
_bot_sep_win.mvwin(2 + menu_win_height, 0)
_help_win.mvwin(2 + menu_win_height + 1, 0)
else:
# Degenerate case. Give up on nice rendering and just prevent errors.
menu_win_height = 1
_menu_win.resize(1, screen_width)
_help_win.resize(1, screen_width)
for win in _top_sep_win, _menu_win, _bot_sep_win, _help_win:
win.mvwin(0, 0)
# Adjust the scroll so that the selected node is still within the window,
# if needed
if _sel_node_i - _menu_scroll >= menu_win_height:
_menu_scroll = _sel_node_i - menu_win_height + 1
def _height(win):
# Returns the height of 'win'
return win.getmaxyx()[0]
def _width(win):
# Returns the width of 'win'
return win.getmaxyx()[1]
def _enter_menu(menu):
# Makes 'menu' the currently displayed menu. In addition to actual 'menu's,
# "menu" here includes choices and symbols defined with the 'menuconfig'
# keyword.
#
# Returns False if 'menu' can't be entered.
global _cur_menu
global _shown
global _sel_node_i
global _menu_scroll
if not menu.is_menuconfig:
# Not a menu
return False
shown_sub = _shown_nodes(menu)
# Never enter empty menus. We depend on having a current node.
if not shown_sub:
return False
# Remember where the current node appears on the screen, so we can try
# to get it to appear in the same place when we leave the menu
_parent_screen_rows.append(_sel_node_i - _menu_scroll)
# Jump into menu
_cur_menu = menu
_shown = shown_sub
_sel_node_i = _menu_scroll = 0
if isinstance(menu.item, Choice):
_select_selected_choice_sym()
return True
def _select_selected_choice_sym():
# Puts the cursor on the currently selected (y-valued) choice symbol, if
# any. Does nothing if if the choice has no selection (is not visible/in y
# mode).
global _sel_node_i
choice = _cur_menu.item
if choice.selection:
# Search through all menu nodes to handle choice symbols being defined
# in multiple locations
for node in choice.selection.nodes:
if node in _shown:
_sel_node_i = _shown.index(node)
_center_vertically()
return
def _jump_to(node):
# Jumps directly to the menu node 'node'
global _cur_menu
global _shown
global _sel_node_i
global _menu_scroll
global _show_all
global _parent_screen_rows
# Clear remembered menu locations. We might not even have been in the
# parent menus before.
_parent_screen_rows = []
old_show_all = _show_all
jump_into = (isinstance(node.item, Choice) or node.item == MENU) and \
node.list
# If we're jumping to a non-empty choice or menu, jump to the first entry
# in it instead of jumping to its menu node
if jump_into:
_cur_menu = node
node = node.list
else:
_cur_menu = _parent_menu(node)
_shown = _shown_nodes(_cur_menu)
if node not in _shown:
# The node wouldn't be shown. Turn on show-all to show it.
_show_all = True
_shown = _shown_nodes(_cur_menu)
_sel_node_i = _shown.index(node)
if jump_into and not old_show_all and _show_all:
# If we're jumping into a choice or menu and were forced to turn on
# show-all because the first entry wasn't visible, try turning it off.
# That will land us at the first visible node if there are visible
# nodes, and is a no-op otherwise.
_toggle_show_all()
_center_vertically()
# If we're jumping to a non-empty choice, jump to the selected symbol, if
# any
if jump_into and isinstance(_cur_menu.item, Choice):
_select_selected_choice_sym()
def _leave_menu():
# Jumps to the parent menu of the current menu. Does nothing if we're in
# the top menu.
global _cur_menu
global _shown
global _sel_node_i
global _menu_scroll
if _cur_menu is _kconf.top_node:
return
# Jump to parent menu
parent = _parent_menu(_cur_menu)
_shown = _shown_nodes(parent)
_sel_node_i = _shown.index(_cur_menu)
_cur_menu = parent
# Try to make the menu entry appear on the same row on the screen as it did
# before we entered the menu.
if _parent_screen_rows:
# The terminal might have shrunk since we were last in the parent menu
screen_row = min(_parent_screen_rows.pop(), _height(_menu_win) - 1)
_menu_scroll = max(_sel_node_i - screen_row, 0)
else:
# No saved parent menu locations, meaning we jumped directly to some
# node earlier
_center_vertically()
def _select_next_menu_entry():
# Selects the menu entry after the current one, adjusting the scroll if
# necessary. Does nothing if we're already at the last menu entry.
global _sel_node_i
global _menu_scroll
if _sel_node_i < len(_shown) - 1:
# Jump to the next node
_sel_node_i += 1
# If the new node is sufficiently close to the edge of the menu window
# (as determined by _SCROLL_OFFSET), increase the scroll by one. This
# gives nice and non-jumpy behavior even when
# _SCROLL_OFFSET >= _height(_menu_win).
if _sel_node_i >= _menu_scroll + _height(_menu_win) - _SCROLL_OFFSET \
and _menu_scroll < _max_scroll(_shown, _menu_win):
_menu_scroll += 1
def _select_prev_menu_entry():
# Selects the menu entry before the current one, adjusting the scroll if
# necessary. Does nothing if we're already at the first menu entry.
global _sel_node_i
global _menu_scroll
if _sel_node_i > 0:
# Jump to the previous node
_sel_node_i -= 1
# See _select_next_menu_entry()
if _sel_node_i < _menu_scroll + _SCROLL_OFFSET:
_menu_scroll = max(_menu_scroll - 1, 0)
def _select_last_menu_entry():
# Selects the last menu entry in the current menu
global _sel_node_i
global _menu_scroll
_sel_node_i = len(_shown) - 1
_menu_scroll = _max_scroll(_shown, _menu_win)
def _select_first_menu_entry():
# Selects the first menu entry in the current menu
global _sel_node_i
global _menu_scroll
_sel_node_i = _menu_scroll = 0
def _toggle_show_all():
# Toggles show-all mode on/off. If turning it off would give no visible
# items in the current menu, it is left on.
global _show_all
global _shown
global _sel_node_i
global _menu_scroll
# Row on the screen the cursor is on. Preferably we want the same row to
# stay highlighted.
old_row = _sel_node_i - _menu_scroll
_show_all = not _show_all
# List of new nodes to be shown after toggling _show_all
new_shown = _shown_nodes(_cur_menu)
# Find a good node to select. The selected node might disappear if show-all
# mode is turned off.
# Select the previously selected node itself if it is still visible. If
# there are visible nodes before it, select the closest one.
for node in _shown[_sel_node_i::-1]:
if node in new_shown:
_sel_node_i = new_shown.index(node)
break
else:
# No visible nodes before the previously selected node. Select the
# closest visible node after it instead.
for node in _shown[_sel_node_i + 1:]:
if node in new_shown:
_sel_node_i = new_shown.index(node)
break
else:
# No visible nodes at all, meaning show-all was turned off inside
# an invisible menu. Don't allow that, as the implementation relies
# on always having a selected node.
_show_all = True
return
_shown = new_shown
# Try to make the cursor stay on the same row in the menu window. This
# might be impossible if too many nodes have disappeared above the node.
_menu_scroll = max(_sel_node_i - old_row, 0)
def _center_vertically():
# Centers the selected node vertically, if possible
global _menu_scroll
_menu_scroll = min(max(_sel_node_i - _height(_menu_win)//2, 0),
_max_scroll(_shown, _menu_win))
def _draw_main():
# Draws the "main" display, with the list of symbols, the header, and the
# footer.
#
# This could be optimized to only update the windows that have actually
# changed, but keep it simple for now and let curses sort it out.
term_width = _width(_stdscr)
#
# Update the separator row below the menu path
#
_top_sep_win.erase()
# Draw arrows pointing up if the symbol window is scrolled down. Draw them
# before drawing the title, so the title ends up on top for small windows.
if _menu_scroll > 0:
_safe_hline(_top_sep_win, 0, 4, curses.ACS_UARROW, _N_SCROLL_ARROWS)
# Add the 'mainmenu' text as the title, centered at the top
_safe_addstr(_top_sep_win,
0, max((term_width - len(_kconf.mainmenu_text))//2, 0),
_kconf.mainmenu_text)
_top_sep_win.noutrefresh()
# Note: The menu path at the top is deliberately updated last. See below.
#
# Update the symbol window
#
_menu_win.erase()
# Draw the _shown nodes starting from index _menu_scroll up to either as
# many as fit in the window, or to the end of _shown
for i in range(_menu_scroll,
min(_menu_scroll + _height(_menu_win), len(_shown))):
node = _shown[i]
# The 'not _show_all' test avoids showing invisible items in red
# outside show-all mode, which could look confusing/broken. Invisible
# symbols show up outside show-all mode if an invisible symbol has
# visible children in an implicit (indented) menu.
if _visible(node) or not _show_all:
style = _style["selection" if i == _sel_node_i else "list"]
else:
style = _style["inv-selection" if i == _sel_node_i else "inv-list"]
_safe_addstr(_menu_win, i - _menu_scroll, 0, _node_str(node), style)
_menu_win.noutrefresh()
#
# Update the bottom separator window
#
_bot_sep_win.erase()
# Draw arrows pointing down if the symbol window is scrolled up
if _menu_scroll < _max_scroll(_shown, _menu_win):
_safe_hline(_bot_sep_win, 0, 4, curses.ACS_DARROW, _N_SCROLL_ARROWS)
# Indicate when show-name/show-help/show-all mode is enabled
enabled_modes = []
if _show_help:
enabled_modes.append("show-help (toggle with [F])")
if _show_name:
enabled_modes.append("show-name")
if _show_all:
enabled_modes.append("show-all")
if enabled_modes:
s = " and ".join(enabled_modes) + " mode enabled"
_safe_addstr(_bot_sep_win, 0, max(term_width - len(s) - 2, 0), s)
_bot_sep_win.noutrefresh()
#
# Update the help window, which shows either key bindings or help texts
#
_help_win.erase()
if _show_help:
node = _shown[_sel_node_i]
if isinstance(node.item, (Symbol, Choice)) and node.help:
help_lines = textwrap.wrap(node.help, _width(_help_win))
for i in range(min(_height(_help_win), len(help_lines))):
_safe_addstr(_help_win, i, 0, help_lines[i])
else:
_safe_addstr(_help_win, 0, 0, "(no help)")
else:
for i, line in enumerate(_MAIN_HELP_LINES):
_safe_addstr(_help_win, i, 0, line)
_help_win.noutrefresh()
#
# Update the top row with the menu path.
#
# Doing this last leaves the cursor on the top row, which avoids some minor
# annoying jumpiness in gnome-terminal when reducing the height of the
# terminal. It seems to happen whenever the row with the cursor on it
# disappears.
#
_path_win.erase()
# Draw the menu path ("(Top) -> Menu -> Submenu -> ...")
menu_prompts = []
menu = _cur_menu
while menu is not _kconf.top_node:
# Promptless choices can be entered in show-all mode. Use
# standard_sc_expr_str() for them, so they show up as
# '<choice (name if any)>'.
menu_prompts.append(menu.prompt[0] if menu.prompt else
standard_sc_expr_str(menu.item))
menu = menu.parent
menu_prompts.append("(Top)")
menu_prompts.reverse()
# Hack: We can't put ACS_RARROW directly in the string. Temporarily
# represent it with NULL.
menu_path_str = " \0 ".join(menu_prompts)
# Scroll the menu path to the right if needed to make the current menu's
# title visible
if len(menu_path_str) > term_width:
menu_path_str = menu_path_str[len(menu_path_str) - term_width:]
# Print the path with the arrows reinserted
split_path = menu_path_str.split("\0")
_safe_addstr(_path_win, split_path[0])
for s in split_path[1:]:
_safe_addch(_path_win, curses.ACS_RARROW)
_safe_addstr(_path_win, s)
_path_win.noutrefresh()
def _parent_menu(node):
# Returns the menu node of the menu that contains 'node'. In addition to
# proper 'menu's, this might also be a 'menuconfig' symbol or a 'choice'.
# "Menu" here means a menu in the interface.
menu = node.parent
while not menu.is_menuconfig:
menu = menu.parent
return menu
def _shown_nodes(menu):
# Returns the list of menu nodes from 'menu' (see _parent_menu()) that
# would be shown when entering it
def rec(node):
res = []
while node:
if _visible(node) or _show_all:
res.append(node)
if node.list and not node.is_menuconfig:
# Nodes from implicit menu created from dependencies. Will
# be shown indented. Note that is_menuconfig is True for
# menus and choices as well as 'menuconfig' symbols.
res += rec(node.list)
elif node.list and isinstance(node.item, Symbol):
# Show invisible symbols if they have visible children. This
# can happen for an m/y-valued symbol with an optional prompt
# ('prompt "foo" is COND') that is currently disabled. Note
# that it applies to both 'config' and 'menuconfig' symbols.
shown_children = rec(node.list)
if shown_children:
res.append(node)
if not node.is_menuconfig:
res += shown_children
node = node.next
return res
if isinstance(menu.item, Choice):
# For named choices defined in multiple locations, entering the choice
# at a particular menu node would normally only show the choice symbols
# defined there (because that's what the MenuNode tree looks like).
#
# That might look confusing, and makes extending choices by defining
# them in multiple locations less useful. Instead, gather all the child
# menu nodes for all the choices whenever a choice is entered. That
# makes all choice symbols visible at all locations.
#
# Choices can contain non-symbol items (people do all sorts of weird
# stuff with them), hence the generality here. We really need to
# preserve the menu tree at each choice location.
#
# Note: Named choices are pretty broken in the C tools, and this is
# super obscure, so you probably won't find much that relies on this.
# This whole 'if' could be deleted if you don't care about defining
# choices in multiple locations to add symbols (which will still work,
# just with things being displayed in a way that might be unexpected).
# Do some additional work to avoid listing choice symbols twice if all
# or part of the choice is copied in multiple locations (e.g. by
# including some Kconfig file multiple times). We give the prompts at
# the current location precedence.
seen_syms = {node.item for node in rec(menu.list)
if isinstance(node.item, Symbol)}
res = []
for choice_node in menu.item.nodes:
for node in rec(choice_node.list):
# 'choice_node is menu' checks if we're dealing with the
# current location
if node.item not in seen_syms or choice_node is menu:
res.append(node)
if isinstance(node.item, Symbol):
seen_syms.add(node.item)
return res
return rec(menu.list)
def _visible(node):
# Returns True if the node should appear in the menu (outside show-all
# mode)
return node.prompt and expr_value(node.prompt[1]) and not \
(node.item == MENU and not expr_value(node.visibility))
def _change_node(node):
# Changes the value of the menu node 'node' if it is a symbol. Bools and
# tristates are toggled, while other symbol types pop up a text entry
# dialog.
#
# Returns False if the value of 'node' can't be changed.
if not _changeable(node):
return False
# sc = symbol/choice
sc = node.item
if sc.orig_type in (INT, HEX, STRING):
s = sc.str_value
while True:
s = _input_dialog(
"{} ({})".format(node.prompt[0], TYPE_TO_STR[sc.orig_type]),
s, _range_info(sc))
if s is None:
break
if sc.orig_type in (INT, HEX):
s = s.strip()
# 'make menuconfig' does this too. Hex values not starting with
# '0x' are accepted when loading .config files though.
if sc.orig_type == HEX and not s.startswith(("0x", "0X")):
s = "0x" + s
if _check_valid(sc, s):
_set_val(sc, s)
break
elif len(sc.assignable) == 1:
# Handles choice symbols for choices in y mode, which are a special
# case: .assignable can be (2,) while .tri_value is 0.
_set_val(sc, sc.assignable[0])
else:
# Set the symbol to the value after the current value in
# sc.assignable, with wrapping
val_index = sc.assignable.index(sc.tri_value)
_set_val(sc, sc.assignable[(val_index + 1) % len(sc.assignable)])
if _is_y_mode_choice_sym(sc) and not node.list:
# Immediately jump to the parent menu after making a choice selection,
# like 'make menuconfig' does, except if the menu node has children
# (which can happen if a symbol 'depends on' a choice symbol that
# immediately precedes it).
_leave_menu()
return True
def _changeable(node):
# Returns True if the value if 'node' can be changed
sc = node.item
if not isinstance(sc, (Symbol, Choice)):
return False
# This will hit for invisible symbols, which appear in show-all mode and
# when an invisible symbol has visible children (which can happen e.g. for
# symbols with optional prompts)
if not (node.prompt and expr_value(node.prompt[1])):
return False
return sc.orig_type in (STRING, INT, HEX) or len(sc.assignable) > 1 \
or _is_y_mode_choice_sym(sc)
def _set_sel_node_tri_val(tri_val):
# Sets the value of the currently selected menu entry to 'tri_val', if that
# value can be assigned
sc = _shown[_sel_node_i].item
if isinstance(sc, (Symbol, Choice)) and tri_val in sc.assignable:
_set_val(sc, tri_val)
def _set_val(sc, val):
# Wrapper around Symbol/Choice.set_value() for updating the menu state and
# _conf_changed
global _conf_changed
# Use the string representation of tristate values. This makes the format
# consistent for all symbol types.
if val in TRI_TO_STR:
val = TRI_TO_STR[val]
if val != sc.str_value:
sc.set_value(val)
_conf_changed = True
# Changing the value of the symbol might have changed what items in the
# current menu are visible. Recalculate the state.
_update_menu()
def _update_menu():
# Updates the current menu after the value of a symbol or choice has been
# changed. Changing a value might change which items in the menu are
# visible.
#
# If possible, preserves the location of the cursor on the screen when
# items are added/removed above the selected item.
global _shown
global _sel_node_i
global _menu_scroll
# Row on the screen the cursor was on
old_row = _sel_node_i - _menu_scroll
sel_node = _shown[_sel_node_i]
# New visible nodes
_shown = _shown_nodes(_cur_menu)
# New index of selected node
_sel_node_i = _shown.index(sel_node)
# Try to make the cursor stay on the same row in the menu window. This
# might be impossible if too many nodes have disappeared above the node.
_menu_scroll = max(_sel_node_i - old_row, 0)
def _input_dialog(title, initial_text, info_text=None):
# Pops up a dialog that prompts the user for a string
#
# title:
# Title to display at the top of the dialog window's border
#
# initial_text:
# Initial text to prefill the input field with
#
# info_text:
# String to show next to the input field. If None, just the input field
# is shown.
win = _styled_win("body")
win.keypad(True)
info_lines = info_text.split("\n") if info_text else []
# Give the input dialog its initial size
_resize_input_dialog(win, title, info_lines)
_safe_curs_set(2)
# Input field text
s = initial_text
# Cursor position
i = len(initial_text)
def edit_width():
return _width(win) - 4
# Horizontal scroll offset
hscroll = max(i - edit_width() + 1, 0)
while True:
# Draw the "main" display with the menu, etc., so that resizing still
# works properly. This is like a stack of windows, only hardcoded for
# now.
_draw_main()
_draw_input_dialog(win, title, info_lines, s, i, hscroll)
curses.doupdate()
c = _getch_compat(win)
if c == curses.KEY_RESIZE:
# Resize the main display too. The dialog floats above it.
_resize_main()
_resize_input_dialog(win, title, info_lines)
elif c == "\n":
_safe_curs_set(0)
return s
elif c == "\x1B": # \x1B = ESC
_safe_curs_set(0)
return None
else:
s, i, hscroll = _edit_text(c, s, i, hscroll, edit_width())
def _resize_input_dialog(win, title, info_lines):
# Resizes the input dialog to a size appropriate for the terminal size
screen_height, screen_width = _stdscr.getmaxyx()
win_height = 5
if info_lines:
win_height += len(info_lines) + 1
win_height = min(win_height, screen_height)
win_width = max(_INPUT_DIALOG_MIN_WIDTH,
len(title) + 4,
*(len(line) + 4 for line in info_lines))
win_width = min(win_width, screen_width)
win.resize(win_height, win_width)
win.mvwin((screen_height - win_height)//2,
(screen_width - win_width)//2)
def _draw_input_dialog(win, title, info_lines, s, i, hscroll):
edit_width = _width(win) - 4
win.erase()
# Note: Perhaps having a separate window for the input field would be nicer
visible_s = s[hscroll:hscroll + edit_width]
_safe_addstr(win, 2, 2, visible_s + " "*(edit_width - len(visible_s)),
_style["edit"])
for linenr, line in enumerate(info_lines):
_safe_addstr(win, 4 + linenr, 2, line)
# Draw the frame last so that it overwrites the body text for small windows
_draw_frame(win, title)
_safe_move(win, 2, 2 + i - hscroll)
win.noutrefresh()
def _load_dialog():
# Dialog for loading a new configuration
global _conf_changed
global _conf_filename
global _show_all
if _conf_changed:
c = _key_dialog(
"Load",
"You have unsaved changes. Load new\n"
"configuration anyway?\n"
"\n"
" (O)K (C)ancel",
"oc")
if c is None or c == "c":
return
filename = _conf_filename
while True:
filename = _input_dialog("File to load", filename, _load_save_info())
if filename is None:
return
filename = os.path.expanduser(filename)
if _try_load(filename):
_conf_filename = filename
_conf_changed = _needs_save()
# Turn on show-all mode if the selected node is not visible after
# loading the new configuration. _shown still holds the old state.
if _shown[_sel_node_i] not in _shown_nodes(_cur_menu):
_show_all = True
_update_menu()
# The message dialog indirectly updates the menu display, so _msg()
# must be called after the new state has been initialized
_msg("Success", "Loaded " + filename)
return
def _try_load(filename):
# Tries to load a configuration file. Pops up an error and returns False on
# failure.
#
# filename:
# Configuration file to load
try:
_kconf.load_config(filename)
return True
except EnvironmentError as e:
_error("Error loading '{}'\n\n{} (errno: {})"
.format(filename, e.strerror, errno.errorcode[e.errno]))
return False
def _save_dialog(save_fn, default_filename, description):
# Dialog for saving the current configuration
#
# save_fn:
# Function to call with 'filename' to save the file
#
# default_filename:
# Prefilled filename in the input field
#
# description:
# String describing the thing being saved
#
# Return value:
# The path to the saved file, or None if no file was saved
filename = default_filename
while True:
filename = _input_dialog("Filename to save {} to".format(description),
filename, _load_save_info())
if filename is None:
return None
filename = os.path.expanduser(filename)
msg = _try_save(save_fn, filename, description)
if msg:
_msg("Success", msg)
return filename
def _try_save(save_fn, filename, description):
# Tries to save a configuration file. Returns a message to print on
# success.
#
# save_fn:
# Function to call with 'filename' to save the file
#
# description:
# String describing the thing being saved
#
# Return value:
# A message to print on success, and None on failure
try:
# save_fn() returns a message to print
return save_fn(filename)
except EnvironmentError as e:
_error("Error saving {} to '{}'\n\n{} (errno: {})"
.format(description, e.filename, e.strerror,
errno.errorcode[e.errno]))
return None
def _key_dialog(title, text, keys):
# Pops up a dialog that can be closed by pressing a key
#
# title:
# Title to display at the top of the dialog window's border
#
# text:
# Text to show in the dialog
#
# keys:
# List of keys that will close the dialog. Other keys (besides ESC) are
# ignored. The caller is responsible for providing a hint about which
# keys can be pressed in 'text'.
#
# Return value:
# The key that was pressed to close the dialog. Uppercase characters are
# converted to lowercase. ESC will always close the dialog, and returns
# None.
win = _styled_win("body")
win.keypad(True)
_resize_key_dialog(win, text)
while True:
# See _input_dialog()
_draw_main()
_draw_key_dialog(win, title, text)
curses.doupdate()
c = _getch_compat(win)
if c == curses.KEY_RESIZE:
# Resize the main display too. The dialog floats above it.
_resize_main()
_resize_key_dialog(win, text)
elif c == "\x1B": # \x1B = ESC
return None
elif isinstance(c, str):
c = c.lower()
if c in keys:
return c
def _resize_key_dialog(win, text):
# Resizes the key dialog to a size appropriate for the terminal size
screen_height, screen_width = _stdscr.getmaxyx()
lines = text.split("\n")
win_height = min(len(lines) + 4, screen_height)
win_width = min(max(len(line) for line in lines) + 4, screen_width)
win.resize(win_height, win_width)
win.mvwin((screen_height - win_height)//2,
(screen_width - win_width)//2)
def _draw_key_dialog(win, title, text):
win.erase()
for i, line in enumerate(text.split("\n")):
_safe_addstr(win, 2 + i, 2, line)
# Draw the frame last so that it overwrites the body text for small windows
_draw_frame(win, title)
win.noutrefresh()
def _draw_frame(win, title):
# Draw a frame around the inner edges of 'win', with 'title' at the top
win_height, win_width = win.getmaxyx()
win.attron(_style["frame"])
# Draw top/bottom edge
_safe_hline(win, 0, 0, " ", win_width)
_safe_hline(win, win_height - 1, 0, " ", win_width)
# Draw left/right edge
_safe_vline(win, 0, 0, " ", win_height)
_safe_vline(win, 0, win_width - 1, " ", win_height)
# Draw title
_safe_addstr(win, 0, max((win_width - len(title))//2, 0), title)
win.attroff(_style["frame"])
def _jump_to_dialog():
# Implements the jump-to dialog, where symbols can be looked up via
# incremental search and jumped to.
#
# Returns True if the user jumped to a symbol, and False if the dialog was
# canceled.
s = "" # Search text
prev_s = None # Previous search text
s_i = 0 # Search text cursor position
hscroll = 0 # Horizontal scroll offset
sel_node_i = 0 # Index of selected row
scroll = 0 # Index in 'matches' of the top row of the list
# Edit box at the top
edit_box = _styled_win("jump-edit")
edit_box.keypad(True)
# List of matches
matches_win = _styled_win("list")
# Bottom separator, with arrows pointing down
bot_sep_win = _styled_win("separator")
# Help window with instructions at the bottom
help_win = _styled_win("help")
# Give windows their initial size
_resize_jump_to_dialog(edit_box, matches_win, bot_sep_win, help_win,
sel_node_i, scroll)
_safe_curs_set(2)
# Logic duplication with _select_{next,prev}_menu_entry(), except we do a
# functional variant that returns the new (sel_node_i, scroll) values to
# avoid 'nonlocal'. TODO: Can this be factored out in some nice way?
def select_next_match():
if sel_node_i == len(matches) - 1:
return sel_node_i, scroll
if sel_node_i + 1 >= scroll + _height(matches_win) - _SCROLL_OFFSET \
and scroll < _max_scroll(matches, matches_win):
return sel_node_i + 1, scroll + 1
return sel_node_i + 1, scroll
def select_prev_match():
if sel_node_i == 0:
return sel_node_i, scroll
if sel_node_i - 1 < scroll + _SCROLL_OFFSET:
return sel_node_i - 1, max(scroll - 1, 0)
return sel_node_i - 1, scroll
while True:
if s != prev_s:
# The search text changed. Find new matching nodes.
prev_s = s
try:
# We could use re.IGNORECASE here instead of lower(), but this
# is noticeably less jerky while inputting regexes like
# '.*debug$' (though the '.*' is redundant there). Those
# probably have bad interactions with re.search(), which
# matches anywhere in the string.
#
# It's not horrible either way. Just a bit smoother.
regex_searches = [re.compile(regex).search
for regex in s.lower().split()]
# No exception thrown, so the regexes are okay
bad_re = None
# List of matching nodes
matches = []
add_match = matches.append
# Search symbols and choices
for node in _sorted_sc_nodes():
# Symbol/choice
sc = node.item
for search in regex_searches:
# Both the name and the prompt might be missing, since
# we're searching both symbols and choices
# Does the regex match either the symbol name or the
# prompt (if any)?
if not (sc.name and search(sc.name.lower()) or
node.prompt and search(node.prompt[0].lower())):
# Give up on the first regex that doesn't match, to
# speed things up a bit when multiple regexes are
# entered
break
else:
add_match(node)
# Search menus and comments
for node in _sorted_menu_comment_nodes():
for search in regex_searches:
if not search(node.prompt[0].lower()):
break
else:
add_match(node)
except re.error as e:
# Bad regex. Remember the error message so we can show it.
bad_re = "Bad regular expression"
# re.error.msg was added in Python 3.5
if hasattr(e, "msg"):
bad_re += ": " + e.msg
matches = []
# Reset scroll and jump to the top of the list of matches
sel_node_i = scroll = 0
_draw_jump_to_dialog(edit_box, matches_win, bot_sep_win, help_win,
s, s_i, hscroll,
bad_re, matches, sel_node_i, scroll)
curses.doupdate()
c = _getch_compat(edit_box)
if c == "\n":
if matches:
_jump_to(matches[sel_node_i])
_safe_curs_set(0)
return True
elif c == "\x1B": # \x1B = ESC
_safe_curs_set(0)
return False
elif c == curses.KEY_RESIZE:
# We adjust the scroll so that the selected node stays visible in
# the list when the terminal is resized, hence the 'scroll'
# assignment
scroll = _resize_jump_to_dialog(
edit_box, matches_win, bot_sep_win, help_win,
sel_node_i, scroll)
elif c == "\x06": # \x06 = Ctrl-F
if matches:
_safe_curs_set(0)
_info_dialog(matches[sel_node_i], True)
_safe_curs_set(2)
scroll = _resize_jump_to_dialog(
edit_box, matches_win, bot_sep_win, help_win,
sel_node_i, scroll)
elif c == curses.KEY_DOWN:
sel_node_i, scroll = select_next_match()
elif c == curses.KEY_UP:
sel_node_i, scroll = select_prev_match()
elif c in (curses.KEY_NPAGE, "\x04"): # Page Down/Ctrl-D
# Keep it simple. This way we get sane behavior for small windows,
# etc., for free.
for _ in range(_PG_JUMP):
sel_node_i, scroll = select_next_match()
# Page Up (no Ctrl-U, as it's already used by the edit box)
elif c == curses.KEY_PPAGE:
for _ in range(_PG_JUMP):
sel_node_i, scroll = select_prev_match()
elif c == curses.KEY_END:
sel_node_i = len(matches) - 1
scroll = _max_scroll(matches, matches_win)
elif c == curses.KEY_HOME:
sel_node_i = scroll = 0
else:
s, s_i, hscroll = _edit_text(c, s, s_i, hscroll,
_width(edit_box) - 2)
# Obscure Python: We never pass a value for cached_nodes, and it keeps pointing
# to the same list. This avoids a global.
def _sorted_sc_nodes(cached_nodes=[]):
# Returns a sorted list of symbol and choice nodes to search. The symbol
# nodes appear first, sorted by name, and then the choice nodes, sorted by
# prompt and (secondarily) name.
if not cached_nodes:
# Add symbol nodes
for sym in sorted(_kconf.unique_defined_syms,
key=lambda sym: sym.name):
# += is in-place for lists
cached_nodes += sym.nodes
# Add choice nodes
choices = sorted(_kconf.unique_choices,
key=lambda choice: choice.name or "")
cached_nodes += sorted(
[node
for choice in choices
for node in choice.nodes],
key=lambda node: node.prompt[0] if node.prompt else "")
return cached_nodes
def _sorted_menu_comment_nodes(cached_nodes=[]):
# Returns a list of menu and comment nodes to search, sorted by prompt,
# with the menus first
if not cached_nodes:
def prompt_text(mc):
return mc.prompt[0]
cached_nodes += sorted(_kconf.menus, key=prompt_text)
cached_nodes += sorted(_kconf.comments, key=prompt_text)
return cached_nodes
def _resize_jump_to_dialog(edit_box, matches_win, bot_sep_win, help_win,
sel_node_i, scroll):
# Resizes the jump-to dialog to fill the terminal.
#
# Returns the new scroll index. We adjust the scroll if needed so that the
# selected node stays visible.
screen_height, screen_width = _stdscr.getmaxyx()
bot_sep_win.resize(1, screen_width)
help_win_height = len(_JUMP_TO_HELP_LINES)
matches_win_height = screen_height - help_win_height - 4
if matches_win_height >= 1:
edit_box.resize(3, screen_width)
matches_win.resize(matches_win_height, screen_width)
help_win.resize(help_win_height, screen_width)
matches_win.mvwin(3, 0)
bot_sep_win.mvwin(3 + matches_win_height, 0)
help_win.mvwin(3 + matches_win_height + 1, 0)
else:
# Degenerate case. Give up on nice rendering and just prevent errors.
matches_win_height = 1
edit_box.resize(screen_height, screen_width)
matches_win.resize(1, screen_width)
help_win.resize(1, screen_width)
for win in matches_win, bot_sep_win, help_win:
win.mvwin(0, 0)
# Adjust the scroll so that the selected row is still within the window, if
# needed
if sel_node_i - scroll >= matches_win_height:
return sel_node_i - matches_win_height + 1
return scroll
def _draw_jump_to_dialog(edit_box, matches_win, bot_sep_win, help_win,
s, s_i, hscroll,
bad_re, matches, sel_node_i, scroll):
edit_width = _width(edit_box) - 2
#
# Update list of matches
#
matches_win.erase()
if matches:
for i in range(scroll,
min(scroll + _height(matches_win), len(matches))):
node = matches[i]
if isinstance(node.item, (Symbol, Choice)):
node_str = _name_and_val_str(node.item)
if node.prompt:
node_str += ' "{}"'.format(node.prompt[0])
elif node.item == MENU:
node_str = 'menu "{}"'.format(node.prompt[0])
else: # node.item == COMMENT
node_str = 'comment "{}"'.format(node.prompt[0])
_safe_addstr(matches_win, i - scroll, 0, node_str,
_style["selection" if i == sel_node_i else "list"])
else:
# bad_re holds the error message from the re.error exception on errors
_safe_addstr(matches_win, 0, 0, bad_re or "No matches")
matches_win.noutrefresh()
#
# Update bottom separator line
#
bot_sep_win.erase()
# Draw arrows pointing down if the symbol list is scrolled up
if scroll < _max_scroll(matches, matches_win):
_safe_hline(bot_sep_win, 0, 4, curses.ACS_DARROW, _N_SCROLL_ARROWS)
bot_sep_win.noutrefresh()
#
# Update help window at bottom
#
help_win.erase()
for i, line in enumerate(_JUMP_TO_HELP_LINES):
_safe_addstr(help_win, i, 0, line)
help_win.noutrefresh()
#
# Update edit box. We do this last since it makes it handy to position the
# cursor.
#
edit_box.erase()
_draw_frame(edit_box, "Jump to symbol/choice/menu/comment")
# Draw arrows pointing up if the symbol list is scrolled down
if scroll > 0:
# TODO: Bit ugly that _style["frame"] is repeated here
_safe_hline(edit_box, 2, 4, curses.ACS_UARROW, _N_SCROLL_ARROWS,
_style["frame"])
visible_s = s[hscroll:hscroll + edit_width]
_safe_addstr(edit_box, 1, 1, visible_s)
_safe_move(edit_box, 1, 1 + s_i - hscroll)
edit_box.noutrefresh()
def _info_dialog(node, from_jump_to_dialog):
# Shows a fullscreen window with information about 'node'.
#
# If 'from_jump_to_dialog' is True, the information dialog was opened from
# within the jump-to-dialog. In this case, we make '/' from within the
# information dialog just return, to avoid a confusing recursive invocation
# of the jump-to-dialog.
# Top row, with title and arrows point up
top_line_win = _styled_win("separator")
# Text display
text_win = _styled_win("text")
text_win.keypad(True)
# Bottom separator, with arrows pointing down
bot_sep_win = _styled_win("separator")
# Help window with keys at the bottom
help_win = _styled_win("help")
# Give windows their initial size
_resize_info_dialog(top_line_win, text_win, bot_sep_win, help_win)
# Get lines of help text
lines = _info_str(node).split("\n")
# Index of first row in 'lines' to show
scroll = 0
while True:
_draw_info_dialog(node, lines, scroll, top_line_win, text_win,
bot_sep_win, help_win)
curses.doupdate()
c = _getch_compat(text_win)
if c == curses.KEY_RESIZE:
_resize_info_dialog(top_line_win, text_win, bot_sep_win, help_win)
elif c in (curses.KEY_DOWN, "j", "J"):
if scroll < _max_scroll(lines, text_win):
scroll += 1
elif c in (curses.KEY_NPAGE, "\x04"): # Page Down/Ctrl-D
scroll = min(scroll + _PG_JUMP, _max_scroll(lines, text_win))
elif c in (curses.KEY_PPAGE, "\x15"): # Page Up/Ctrl-U
scroll = max(scroll - _PG_JUMP, 0)
elif c in (curses.KEY_END, "G"):
scroll = _max_scroll(lines, text_win)
elif c in (curses.KEY_HOME, "g"):
scroll = 0
elif c in (curses.KEY_UP, "k", "K"):
if scroll > 0:
scroll -= 1
elif c == "/":
# Support starting a search from within the information dialog
if from_jump_to_dialog:
# Avoid recursion
return
if _jump_to_dialog():
# Jumped to a symbol. Cancel the information dialog.
return
# Stay in the information dialog if the jump-to dialog was
# canceled. Resize it in case the terminal was resized while the
# fullscreen jump-to dialog was open.
_resize_info_dialog(top_line_win, text_win, bot_sep_win, help_win)
elif c in (curses.KEY_LEFT, curses.KEY_BACKSPACE, _ERASE_CHAR,
"\x1B", # \x1B = ESC
"q", "Q", "h", "H"):
return
def _resize_info_dialog(top_line_win, text_win, bot_sep_win, help_win):
# Resizes the info dialog to fill the terminal
screen_height, screen_width = _stdscr.getmaxyx()
top_line_win.resize(1, screen_width)
bot_sep_win.resize(1, screen_width)
help_win_height = len(_INFO_HELP_LINES)
text_win_height = screen_height - help_win_height - 2
if text_win_height >= 1:
text_win.resize(text_win_height, screen_width)
help_win.resize(help_win_height, screen_width)
text_win.mvwin(1, 0)
bot_sep_win.mvwin(1 + text_win_height, 0)
help_win.mvwin(1 + text_win_height + 1, 0)
else:
# Degenerate case. Give up on nice rendering and just prevent errors.
text_win.resize(1, screen_width)
help_win.resize(1, screen_width)
for win in text_win, bot_sep_win, help_win:
win.mvwin(0, 0)
def _draw_info_dialog(node, lines, scroll, top_line_win, text_win,
bot_sep_win, help_win):
text_win_height, text_win_width = text_win.getmaxyx()
# Note: The top row is deliberately updated last. See _draw_main().
#
# Update text display
#
text_win.erase()
for i, line in enumerate(lines[scroll:scroll + text_win_height]):
_safe_addstr(text_win, i, 0, line)
text_win.noutrefresh()
#
# Update bottom separator line
#
bot_sep_win.erase()
# Draw arrows pointing down if the symbol window is scrolled up
if scroll < _max_scroll(lines, text_win):
_safe_hline(bot_sep_win, 0, 4, curses.ACS_DARROW, _N_SCROLL_ARROWS)
bot_sep_win.noutrefresh()
#
# Update help window at bottom
#
help_win.erase()
for i, line in enumerate(_INFO_HELP_LINES):
_safe_addstr(help_win, i, 0, line)
help_win.noutrefresh()
#
# Update top row
#
top_line_win.erase()
# Draw arrows pointing up if the information window is scrolled down. Draw
# them before drawing the title, so the title ends up on top for small
# windows.
if scroll > 0:
_safe_hline(top_line_win, 0, 4, curses.ACS_UARROW, _N_SCROLL_ARROWS)
title = ("Symbol" if isinstance(node.item, Symbol) else
"Choice" if isinstance(node.item, Choice) else
"Menu" if node.item == MENU else
"Comment") + " information"
_safe_addstr(top_line_win, 0, max((text_win_width - len(title))//2, 0),
title)
top_line_win.noutrefresh()
def _info_str(node):
# Returns information about the menu node 'node' as a string.
#
# The helper functions are responsible for adding newlines. This allows
# them to return "" if they don't want to add any output.
if isinstance(node.item, Symbol):
sym = node.item
return (
_name_info(sym) +
_prompt_info(sym) +
"Type: {}\n".format(TYPE_TO_STR[sym.type]) +
_value_info(sym) +
_help_info(sym) +
_direct_dep_info(sym) +
_defaults_info(sym) +
_select_imply_info(sym) +
_kconfig_def_info(sym)
)
if isinstance(node.item, Choice):
choice = node.item
return (
_name_info(choice) +
_prompt_info(choice) +
"Type: {}\n".format(TYPE_TO_STR[choice.type]) +
'Mode: {}\n'.format(choice.str_value) +
_help_info(choice) +
_choice_syms_info(choice) +
_direct_dep_info(choice) +
_defaults_info(choice) +
_kconfig_def_info(choice)
)
# node.item in (MENU, COMMENT)
return _kconfig_def_info(node)
def _name_info(sc):
# Returns a string with the name of the symbol/choice. Names are optional
# for choices.
return "Name: {}\n".format(sc.name) if sc.name else ""
def _prompt_info(sc):
# Returns a string listing the prompts of 'sc' (Symbol or Choice)
s = ""
for node in sc.nodes:
if node.prompt:
s += "Prompt: {}\n".format(node.prompt[0])
return s
def _value_info(sym):
# Returns a string showing 'sym's value
# Only put quotes around the value for string symbols
return "Value: {}\n".format(
'"{}"'.format(sym.str_value)
if sym.orig_type == STRING
else sym.str_value)
def _choice_syms_info(choice):
# Returns a string listing the choice symbols in 'choice'. Adds
# "(selected)" next to the selected one.
s = "Choice symbols:\n"
for sym in choice.syms:
s += " - " + sym.name
if sym is choice.selection:
s += " (selected)"
s += "\n"
return s + "\n"
def _help_info(sc):
# Returns a string with the help text(s) of 'sc' (Symbol or Choice).
# Symbols and choices defined in multiple locations can have multiple help
# texts.
s = "\n"
for node in sc.nodes:
if node.help is not None:
s += "Help:\n\n{}\n\n".format(_indent(node.help, 2))
return s
def _direct_dep_info(sc):
# Returns a string describing the direct dependencies of 'sc' (Symbol or
# Choice). The direct dependencies are the OR of the dependencies from each
# definition location. The dependencies at each definition location come
# from 'depends on' and dependencies inherited from parent items.
return "" if sc.direct_dep is _kconf.y else \
'Direct dependencies (={}):\n{}\n' \
.format(TRI_TO_STR[expr_value(sc.direct_dep)],
_split_expr_info(sc.direct_dep, 2))
def _defaults_info(sc):
# Returns a string describing the defaults of 'sc' (Symbol or Choice)
if not sc.defaults:
return ""
s = "Defaults:\n"
for val, cond in sc.orig_defaults:
s += " - "
if isinstance(sc, Symbol):
s += _expr_str(val)
# Skip the tristate value hint if the expression is just a single
# symbol. _expr_str() already shows its value as a string.
#
# This also avoids showing the tristate value for string/int/hex
# defaults, which wouldn't make any sense.
if isinstance(val, tuple):
s += ' (={})'.format(TRI_TO_STR[expr_value(val)])
else:
# Don't print the value next to the symbol name for choice
# defaults, as it looks a bit confusing
s += val.name
s += "\n"
if cond is not _kconf.y:
s += " Condition (={}):\n{}" \
.format(TRI_TO_STR[expr_value(cond)],
_split_expr_info(cond, 4))
return s + "\n"
def _split_expr_info(expr, indent):
# Returns a string with 'expr' split into its top-level && or || operands,
# with one operand per line, together with the operand's value. This is
# usually enough to get something readable for long expressions. A fancier
# recursive thingy would be possible too.
#
# indent:
# Number of leading spaces to add before the split expression.
if len(split_expr(expr, AND)) > 1:
split_op = AND
op_str = "&&"
else:
split_op = OR
op_str = "||"
s = ""
for i, term in enumerate(split_expr(expr, split_op)):
s += "{}{} {}".format(indent*" ",
" " if i == 0 else op_str,
_expr_str(term))
# Don't bother showing the value hint if the expression is just a
# single symbol. _expr_str() already shows its value.
if isinstance(term, tuple):
s += " (={})".format(TRI_TO_STR[expr_value(term)])
s += "\n"
return s
def _select_imply_info(sym):
# Returns a string with information about which symbols 'select' or 'imply'
# 'sym'. The selecting/implying symbols are grouped according to which
# value they select/imply 'sym' to (n/m/y).
def sis(expr, val, title):
# sis = selects/implies
sis = [si for si in split_expr(expr, OR) if expr_value(si) == val]
if not sis:
return ""
res = title
for si in sis:
res += " - {}\n".format(split_expr(si, AND)[0].name)
return res + "\n"
s = ""
if sym.rev_dep is not _kconf.n:
s += sis(sym.rev_dep, 2,
"Symbols currently y-selecting this symbol:\n")
s += sis(sym.rev_dep, 1,
"Symbols currently m-selecting this symbol:\n")
s += sis(sym.rev_dep, 0,
"Symbols currently n-selecting this symbol (no effect):\n")
if sym.weak_rev_dep is not _kconf.n:
s += sis(sym.weak_rev_dep, 2,
"Symbols currently y-implying this symbol:\n")
s += sis(sym.weak_rev_dep, 1,
"Symbols currently m-implying this symbol:\n")
s += sis(sym.weak_rev_dep, 0,
"Symbols currently n-implying this symbol (no effect):\n")
return s
def _kconfig_def_info(item):
# Returns a string with the definition of 'item' in Kconfig syntax,
# together with the definition location(s) and their include and menu paths
nodes = [item] if isinstance(item, MenuNode) else item.nodes
s = "Kconfig definition{}, with parent deps. propagated to 'depends on'\n" \
.format("s" if len(nodes) > 1 else "")
s += (len(s) - 1)*"="
for node in nodes:
s += "\n\n" \
"At {}:{}\n" \
"{}" \
"Menu path: {}\n\n" \
"{}" \
.format(node.filename, node.linenr,
_include_path_info(node),
_menu_path_info(node),
_indent(node.custom_str(_name_and_val_str), 2))
return s
def _include_path_info(node):
if not node.include_path:
# In the top-level Kconfig file
return ""
return "Included via {}\n".format(
" -> ".join("{}:{}".format(filename, linenr)
for filename, linenr in node.include_path))
def _menu_path_info(node):
# Returns a string describing the menu path leading up to 'node'
path = ""
while node.parent is not _kconf.top_node:
node = node.parent
# Promptless choices might appear among the parents. Use
# standard_sc_expr_str() for them, so that they show up as
# '<choice (name if any)>'.
path = " -> " + (node.prompt[0] if node.prompt else
standard_sc_expr_str(node.item)) + path
return "(Top)" + path
def _indent(s, n):
# Returns 's' with each line indented 'n' spaces. textwrap.indent() is not
# available in Python 2 (it's 3.3+).
return "\n".join(n*" " + line for line in s.split("\n"))
def _name_and_val_str(sc):
# Custom symbol/choice printer that shows symbol values after symbols
# Show the values of non-constant (non-quoted) symbols that don't look like
# numbers. Things like 123 are actually symbol references, and only work as
# expected due to undefined symbols getting their name as their value.
# Showing the symbol value for those isn't helpful though.
if isinstance(sc, Symbol) and not sc.is_constant and not _is_num(sc.name):
if not sc.nodes:
# Undefined symbol reference
return "{}(undefined/n)".format(sc.name)
return '{}(={})'.format(sc.name, sc.str_value)
# For other items, use the standard format
return standard_sc_expr_str(sc)
def _expr_str(expr):
# Custom expression printer that shows symbol values
return expr_str(expr, _name_and_val_str)
def _styled_win(style):
# Returns a new curses window with style 'style' and space as the fill
# character. The initial dimensions are (1, 1), so the window needs to be
# sized and positioned separately.
win = curses.newwin(1, 1)
_set_style(win, style)
return win
def _set_style(win, style):
# Changes the style of an existing window
win.bkgdset(" ", _style[style])
def _max_scroll(lst, win):
# Assuming 'lst' is a list of items to be displayed in 'win',
# returns the maximum number of steps 'win' can be scrolled down.
# We stop scrolling when the bottom item is visible.
return max(0, len(lst) - _height(win))
def _edit_text(c, s, i, hscroll, width):
# Implements text editing commands for edit boxes. Takes a character (which
# could also be e.g. curses.KEY_LEFT) and the edit box state, and returns
# the new state after the character has been processed.
#
# c:
# Character from user
#
# s:
# Current contents of string
#
# i:
# Current cursor index in string
#
# hscroll:
# Index in s of the leftmost character in the edit box, for horizontal
# scrolling
#
# width:
# Width in characters of the edit box
#
# Return value:
# An (s, i, hscroll) tuple for the new state
if c == curses.KEY_LEFT:
if i > 0:
i -= 1
elif c == curses.KEY_RIGHT:
if i < len(s):
i += 1
elif c in (curses.KEY_HOME, "\x01"): # \x01 = CTRL-A
i = 0
elif c in (curses.KEY_END, "\x05"): # \x05 = CTRL-E
i = len(s)
elif c in (curses.KEY_BACKSPACE, _ERASE_CHAR):
if i > 0:
s = s[:i-1] + s[i:]
i -= 1
elif c == curses.KEY_DC:
s = s[:i] + s[i+1:]
elif c == "\x17": # \x17 = CTRL-W
# The \W removes characters like ',' one at a time
new_i = re.search(r"(?:\w*|\W)\s*$", s[:i]).start()
s = s[:new_i] + s[i:]
i = new_i
elif c == "\x0B": # \x0B = CTRL-K
s = s[:i]
elif c == "\x15": # \x15 = CTRL-U
s = s[i:]
i = 0
elif isinstance(c, str):
# Insert character
s = s[:i] + c + s[i:]
i += 1
# Adjust the horizontal scroll so that the cursor never touches the left or
# right edges of the edit box, except when it's at the beginning or the end
# of the string
if i < hscroll + _SCROLL_OFFSET:
hscroll = max(i - _SCROLL_OFFSET, 0)
elif i >= hscroll + width - _SCROLL_OFFSET:
max_scroll = max(len(s) - width + 1, 0)
hscroll = min(i - width + _SCROLL_OFFSET + 1, max_scroll)
return s, i, hscroll
def _load_save_info():
# Returns an information string for load/save dialog boxes
return "(Relative to {})\n\nRefer to your home directory with ~" \
.format(os.path.join(os.getcwd(), ""))
def _msg(title, text):
# Pops up a message dialog that can be dismissed with Space/Enter/ESC
_key_dialog(title, text, " \n")
def _error(text):
# Pops up an error dialog that can be dismissed with Space/Enter/ESC
_msg("Error", text)
def _node_str(node):
# Returns the complete menu entry text for a menu node.
#
# Example return value: "[*] Support for X"
# Calculate the indent to print the item with by checking how many levels
# above it the closest 'menuconfig' item is (this includes menus and
# choices as well as menuconfig symbols)
indent = 0
parent = node.parent
while not parent.is_menuconfig:
indent += _SUBMENU_INDENT
parent = parent.parent
# This approach gives nice alignment for empty string symbols ("() Foo")
s = "{:{}}".format(_value_str(node), 3 + indent)
if _should_show_name(node):
if isinstance(node.item, Symbol):
s += " <{}>".format(node.item.name)
else:
# For choices, use standard_sc_expr_str(). That way they show up as
# '<choice (name if any)>'.
s += " " + standard_sc_expr_str(node.item)
if node.prompt:
if node.item == COMMENT:
s += " *** {} ***".format(node.prompt[0])
else:
s += " " + node.prompt[0]
if isinstance(node.item, Symbol):
sym = node.item
# Print "(NEW)" next to symbols without a user value (from e.g. a
# .config), but skip it for choice symbols in choices in y mode,
# and for symbols of UNKNOWN type (which generate a warning though)
if sym.user_value is None and sym.orig_type and \
not (sym.choice and sym.choice.tri_value == 2):
s += " (NEW)"
if isinstance(node.item, Choice) and node.item.tri_value == 2:
# Print the prompt of the selected symbol after the choice for
# choices in y mode
sym = node.item.selection
if sym:
for sym_node in sym.nodes:
# Use the prompt used at this choice location, in case the
# choice symbol is defined in multiple locations
if sym_node.parent is node and sym_node.prompt:
s += " ({})".format(sym_node.prompt[0])
break
else:
# If the symbol isn't defined at this choice location, then
# just use whatever prompt we can find for it
for sym_node in sym.nodes:
if sym_node.prompt:
s += " ({})".format(sym_node.prompt[0])
break
# Print "--->" next to nodes that have menus that can potentially be
# entered. Print "----" if the menu is empty. We don't allow those to be
# entered.
if node.is_menuconfig:
s += " --->" if _shown_nodes(node) else " ----"
return s
def _should_show_name(node):
# Returns True if 'node' is a symbol or choice whose name should shown (if
# any, as names are optional for choices)
# The 'not node.prompt' case only hits in show-all mode, for promptless
# symbols and choices
return not node.prompt or \
(_show_name and isinstance(node.item, (Symbol, Choice)))
def _value_str(node):
# Returns the value part ("[*]", "<M>", "(foo)" etc.) of a menu node
item = node.item
if item in (MENU, COMMENT):
return ""
# Wouldn't normally happen, and generates a warning
if not item.orig_type:
return ""
if item.orig_type in (STRING, INT, HEX):
return "({})".format(item.str_value)
# BOOL or TRISTATE
if _is_y_mode_choice_sym(item):
return "(X)" if item.choice.selection is item else "( )"
tri_val_str = (" ", "M", "*")[item.tri_value]
if len(item.assignable) <= 1:
# Pinned to a single value
return "" if isinstance(item, Choice) else "-{}-".format(tri_val_str)
if item.type == BOOL:
return "[{}]".format(tri_val_str)
# item.type == TRISTATE
if item.assignable == (1, 2):
return "{{{}}}".format(tri_val_str) # {M}/{*}
return "<{}>".format(tri_val_str)
def _is_y_mode_choice_sym(item):
# The choice mode is an upper bound on the visibility of choice symbols, so
# we can check the choice symbols' own visibility to see if the choice is
# in y mode
return isinstance(item, Symbol) and item.choice and item.visibility == 2
def _check_valid(sym, s):
# Returns True if the string 's' is a well-formed value for 'sym'.
# Otherwise, displays an error and returns False.
if sym.orig_type not in (INT, HEX):
# Anything goes for non-int/hex symbols
return True
base = 10 if sym.orig_type == INT else 16
try:
int(s, base)
except ValueError:
_error("'{}' is a malformed {} value"
.format(s, TYPE_TO_STR[sym.orig_type]))
return False
for low_sym, high_sym, cond in sym.ranges:
if expr_value(cond):
low_s = low_sym.str_value
high_s = high_sym.str_value
if not int(low_s, base) <= int(s, base) <= int(high_s, base):
_error("{} is outside the range {}-{}"
.format(s, low_s, high_s))
return False
break
return True
def _range_info(sym):
# Returns a string with information about the valid range for the symbol
# 'sym', or None if 'sym' doesn't have a range
if sym.orig_type in (INT, HEX):
for low, high, cond in sym.ranges:
if expr_value(cond):
return "Range: {}-{}".format(low.str_value, high.str_value)
return None
def _is_num(name):
# Heuristic to see if a symbol name looks like a number, for nicer output
# when printing expressions. Things like 16 are actually symbol names, only
# they get their name as their value when the symbol is undefined.
try:
int(name)
except ValueError:
if not name.startswith(("0x", "0X")):
return False
try:
int(name, 16)
except ValueError:
return False
return True
def _getch_compat(win):
# Uses get_wch() if available (Python 3.3+) and getch() otherwise. Also
# handles a PDCurses resizing quirk.
if hasattr(win, "get_wch"):
c = win.get_wch()
else:
c = win.getch()
if 0 <= c <= 255:
c = chr(c)
# Decent resizing behavior on PDCurses requires calling resize_term(0, 0)
# after receiving KEY_RESIZE, while ncurses (usually) handles terminal
# resizing automatically in get(_w)ch() (see the end of the
# resizeterm(3NCURSES) man page).
#
# resize_term(0, 0) reliably fails and does nothing on ncurses, so this
# hack gives ncurses/PDCurses compatibility for resizing. I don't know
# whether it would cause trouble for other implementations.
if c == curses.KEY_RESIZE:
try:
curses.resize_term(0, 0)
except curses.error:
pass
return c
def _warn(*args):
# Temporarily returns from curses to shell mode and prints a warning to
# stderr. The warning would get lost in curses mode.
curses.endwin()
print("menuconfig warning: ", end="", file=sys.stderr)
print(*args, file=sys.stderr)
curses.doupdate()
# Ignore exceptions from some functions that might fail, e.g. for small
# windows. They usually do reasonable things anyway.
def _safe_curs_set(visibility):
try:
curses.curs_set(visibility)
except curses.error:
pass
def _safe_addstr(win, *args):
# Clip the line to avoid wrapping to the next line, which looks glitchy.
# addchstr() would do it for us, but it's not available in the 'curses'
# module.
attr = None
if isinstance(args[0], str):
y, x = win.getyx()
s = args[0]
if len(args) == 2:
attr = args[1]
else:
y, x, s = args[:3]
if len(args) == 4:
attr = args[3]
maxlen = _width(win) - x
s = s.expandtabs()
try:
# The 'curses' module uses wattr_set() internally if you pass 'attr',
# overwriting the background style, so setting 'attr' to 0 in the first
# case won't do the right thing
if attr is None:
win.addnstr(y, x, s, maxlen)
else:
win.addnstr(y, x, s, maxlen, attr)
except curses.error:
pass
def _safe_addch(win, *args):
try:
win.addch(*args)
except curses.error:
pass
def _safe_hline(win, *args):
try:
win.hline(*args)
except curses.error:
pass
def _safe_vline(win, *args):
try:
win.vline(*args)
except curses.error:
pass
def _safe_move(win, *args):
try:
win.move(*args)
except curses.error:
pass
def _change_c_lc_ctype_to_utf8():
# See _CHANGE_C_LC_CTYPE_TO_UTF8
if _IS_WINDOWS:
# Windows rarely has issues here, and the PEP 538 implementation avoids
# changing the locale on it. None of the UTF-8 locales below were
# supported from some quick testing either. Play it safe.
return
def try_set_locale(loc):
try:
locale.setlocale(locale.LC_CTYPE, loc)
return True
except locale.Error:
return False
# Is LC_CTYPE set to the C locale?
if locale.setlocale(locale.LC_CTYPE) == "C":
# This list was taken from the PEP 538 implementation in the CPython
# code, in Python/pylifecycle.c
for loc in "C.UTF-8", "C.utf8", "UTF-8":
if try_set_locale(loc):
# LC_CTYPE successfully changed
return
# Are we running on Windows?
_IS_WINDOWS = os.name == "nt"
if __name__ == "__main__":
_main()
| []
| []
| [
"MENUCONFIG_STYLE"
]
| [] | ["MENUCONFIG_STYLE"] | python | 1 | 0 | |
src/main/java/ui/voucher/saleandpurchase/PurchaseReturn.java | package ui.voucher.saleandpurchase;
import java.awt.Toolkit;
import java.awt.event.KeyEvent;
import java.nio.file.FileSystems;
import java.nio.file.Files;
import java.nio.file.Path;
import java.sql.Connection;
import java.sql.Timestamp;
import java.text.DecimalFormat;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Calendar;
import java.util.Date;
import javax.swing.DefaultCellEditor;
import javax.swing.JOptionPane;
import javax.swing.JTable;
import javax.swing.RowFilter;
import javax.swing.table.DefaultTableModel;
import javax.swing.table.TableModel;
import javax.swing.table.TableRowSorter;
import org.jdesktop.beansbinding.AbstractBindingListener;
import org.jdesktop.beansbinding.Binding;
import org.jdesktop.beansbinding.PropertyStateEvent;
import ui.feature.TableUI;
import ui.voucher.TableUtils;
import ui.main.MainFrame;
import ui.reports.print.PurchaseReturnNotesTaker;
import ui.reports.print.action.RegRetPrint;
import ui.voucher.FetchVoucherData;
import ui.voucher.ListOfData;
import ui.voucher.saleandpurchase.utils.PurchaseReturnHandler;
/**
*
* @author Vimal
*/
public class PurchaseReturn extends javax.swing.JPanel {
private ListOfData listOfData;
private int selectedRow;
private final MainFrame mainFrame;
private final String companyCode;
private String voucherNumber;
private final Connection conn;
private String primaryUnit;
private String itemCode;
private String billsundry;
private String billOverallSUndry;
private String unitName;
private String totalItemAmount;
private double itemTableTotalAmount = 0;
private double totAmount = 0;
private TableRowSorter<TableModel> sorter;
private int selectedRow1;
private String saveType;
private PurchaseReturnNotesTaker notesTaker;
private String notes;
private RegRetPrint print;
/**
* Creates new form PurchaseReturn
*
* @param mainFrame
* @param companyCode
* @param conn
*/
public PurchaseReturn(MainFrame mainFrame, String companyCode, Connection conn) {
initComponents();
this.mainFrame = mainFrame;
this.companyCode = companyCode;
this.conn = conn;
dateChooserCombo1.setFormat(1);
dateChooserCombo1.setFormat(2);
errorMessage.setOpaque(false);
defaultControlShow();
cashAccCB.setVisible(false);
bankAccCB.setVisible(false);
creditLimitCB.setVisible(false);
creditLimitDayTF.setVisible(false);
dateChooserCombo2.setVisible(false);
itemTable.setTableHeader(new TableUI().getTableHeaderUI(itemTable));
billSundryTable.setTableHeader(new TableUI().getTableHeaderUI(billSundryTable));
saleOrderListTable.setTableHeader(new TableUI().getTableHeaderUI(saleOrderListTable));
billSundryTable.getColumnModel().getColumn(2).setCellEditor(new DefaultCellEditor(taxTypeClassCBB));
billSundryTable.getColumnModel().getColumn(3).setCellEditor(new DefaultCellEditor(billSundryNatureCB));
itemTable.getColumnModel().getColumn(5).setCellEditor(new DefaultCellEditor(quantityTF));
itemTable.getColumnModel().getColumn(6).setCellEditor(new DefaultCellEditor(rateTF));
itemTable.getColumnModel().getColumn(9).setCellEditor(new DefaultCellEditor(taxTypeClassCB));
itemTable.getColumnModel().getColumn(11).setCellEditor(new DefaultCellEditor(natureCB));
itemTable.getColumnModel().getColumn(10).setCellEditor(new DefaultCellEditor(atTheRateTF));
itemTable.getColumnModel().getColumn(12).setCellEditor(new DefaultCellEditor(miscAmountTF));
itemTable.getColumnModel().getColumn(14).setCellEditor(new DefaultCellEditor(expiryDateTF));
billSundryTable.getColumnModel().getColumn(1).setCellEditor(new DefaultCellEditor(aRateTF));
billSundryTable.getColumnModel().getColumn(4).setCellEditor(new DefaultCellEditor(amountTF));
new FetchVoucherData().fetchAccountNoSale(mainFrame, accountNumberCB, companyCode, conn);
new FetchVoucherData().fetchAccountNameSale(mainFrame, accountNameCB, companyCode, conn);
new FetchVoucherData().fetchCashAccNo(mainFrame, cashAccCB, companyCode, conn);
new FetchVoucherData().fetchBankAccNo(mainFrame, bankAccCB, companyCode, conn);
quantityTF.setEnabled(false);
natureCB.setEnabled(false);
rateTF.setEnabled(false);
taxTypeClassCB.setEnabled(false);
atTheRateTF.setEnabled(false);
miscAmountTF.setEnabled(false);
bindingGroup.addBindingListener(new AbstractBindingListener() {
@Override
public void targetChanged(Binding binding, PropertyStateEvent event) {
super.targetChanged(binding, event);
errorMessage.setOpaque(false);
errorMessage.setText(null);
if (totalAmount.getText().isEmpty()) {
cashTF.setEditable(false);
bankTF.setEditable(false);
} else {
cashTF.setEditable(true);
bankTF.setEditable(true);
if (!cashTF.getText().isEmpty() || !bankTF.getText().isEmpty()) {
calculateTransactionValues();
}
}
}
});
}
@SuppressWarnings("unchecked")
// <editor-fold defaultstate="collapsed" desc="Generated Code">//GEN-BEGIN:initComponents
private void initComponents() {
bindingGroup = new org.jdesktop.beansbinding.BindingGroup();
taxTypeClassCB = new javax.swing.JComboBox();
natureCB = new javax.swing.JComboBox();
billSundryNatureCB = new javax.swing.JComboBox();
taxTypeClassCBB = new javax.swing.JComboBox();
list = new javax.swing.JPanel();
jScrollPane6 = new javax.swing.JScrollPane();
saleOrderListTable = new javax.swing.JTable();
jLayeredPane5 = new javax.swing.JLayeredPane();
jLabel7 = new javax.swing.JLabel();
searchBy = new javax.swing.JComboBox();
searchKeyTF = new javax.swing.JTextField();
jLabel8 = new javax.swing.JLabel();
backButton = new javax.swing.JButton();
quantityTF = new javax.swing.JFormattedTextField();
rateTF = new javax.swing.JFormattedTextField();
atTheRateTF = new javax.swing.JFormattedTextField();
miscAmountTF = new javax.swing.JFormattedTextField();
aRateTF = new javax.swing.JFormattedTextField();
amountTF = new javax.swing.JFormattedTextField();
expiryDateTF = new javax.swing.JTextField();
container = new javax.swing.JPanel();
contentPanel = new javax.swing.JLayeredPane();
jLabel1 = new javax.swing.JLabel();
voucherNumberTF = new javax.swing.JTextField();
jLabel2 = new javax.swing.JLabel();
accountNumberCB = new javax.swing.JComboBox();
jLabel3 = new javax.swing.JLabel();
jLabel4 = new javax.swing.JLabel();
jScrollPane1 = new javax.swing.JScrollPane();
addressTA = new javax.swing.JTextArea();
jLabel6 = new javax.swing.JLabel();
jLayeredPane2 = new javax.swing.JLayeredPane();
jScrollPane3 = new javax.swing.JScrollPane();
billSundryTable = new javax.swing.JTable();
removeRowBillSundry = new javax.swing.JButton();
insertRowBillSundry = new javax.swing.JButton();
jLayeredPane6 = new javax.swing.JLayeredPane();
jLabel15 = new javax.swing.JLabel();
totalAmount = new javax.swing.JLabel();
jLabel17 = new javax.swing.JLabel();
jScrollPane4 = new javax.swing.JScrollPane();
otherDetailTA = new javax.swing.JTextArea();
dateChooserCombo1 = new datechooser.beans.DateChooserCombo();
itemTableScroll = new javax.swing.JScrollPane();
jLayeredPane1 = new javax.swing.JLayeredPane();
jScrollPane2 = new javax.swing.JScrollPane();
itemTable = new javax.swing.JTable();
jLayeredPane3 = new javax.swing.JLayeredPane();
jLabel11 = new javax.swing.JLabel();
amount = new javax.swing.JLabel();
insertRowItemTable = new javax.swing.JButton();
removeRowItemTable = new javax.swing.JButton();
errorMessage = new javax.swing.JLabel();
accountNameCB = new javax.swing.JComboBox();
quantityInfo = new javax.swing.JLabel();
jPanel1 = new javax.swing.JPanel();
jLabel5 = new javax.swing.JLabel();
cashTF = new javax.swing.JTextField();
cashAccCB = new javax.swing.JComboBox();
jLabel9 = new javax.swing.JLabel();
bankTF = new javax.swing.JTextField();
bankAccCB = new javax.swing.JComboBox();
jLabel10 = new javax.swing.JLabel();
creditTF = new javax.swing.JTextField();
debitTF = new javax.swing.JTextField();
jLabel12 = new javax.swing.JLabel();
creditLimitCB = new javax.swing.JComboBox();
creditLimitDayTF = new javax.swing.JTextField();
dateChooserCombo2 = new datechooser.beans.DateChooserCombo();
jLabel18 = new javax.swing.JLabel();
contolLP = new javax.swing.JLayeredPane();
mainControl = new javax.swing.JLayeredPane();
addButton = new javax.swing.JButton();
saveButton = new javax.swing.JButton();
updateButton = new javax.swing.JButton();
cancelButton = new javax.swing.JButton();
editButton = new javax.swing.JButton();
deleteButton = new javax.swing.JButton();
printButton = new javax.swing.JButton();
taxTypeClassCB.setFont(new java.awt.Font("Trebuchet MS", 0, 12)); // NOI18N
taxTypeClassCB.setModel(new javax.swing.DefaultComboBoxModel(new String[] { "Fix Amount", "Percentage", "Quantity" }));
taxTypeClassCB.setBorder(null);
taxTypeClassCB.addMouseListener(new java.awt.event.MouseAdapter() {
public void mouseClicked(java.awt.event.MouseEvent evt) {
taxTypeClassCBMouseClicked(evt);
}
});
taxTypeClassCB.addItemListener(new java.awt.event.ItemListener() {
public void itemStateChanged(java.awt.event.ItemEvent evt) {
taxTypeClassCBItemStateChanged(evt);
}
});
taxTypeClassCB.addFocusListener(new java.awt.event.FocusAdapter() {
public void focusGained(java.awt.event.FocusEvent evt) {
taxTypeClassCBFocusGained(evt);
}
public void focusLost(java.awt.event.FocusEvent evt) {
taxTypeClassCBFocusLost(evt);
}
});
natureCB.setFont(new java.awt.Font("Trebuchet MS", 0, 12)); // NOI18N
natureCB.setModel(new javax.swing.DefaultComboBoxModel(new String[] { "Additive", "Subtractive" }));
natureCB.setBorder(null);
natureCB.addMouseListener(new java.awt.event.MouseAdapter() {
public void mouseClicked(java.awt.event.MouseEvent evt) {
natureCBMouseClicked(evt);
}
});
natureCB.addFocusListener(new java.awt.event.FocusAdapter() {
public void focusGained(java.awt.event.FocusEvent evt) {
natureCBFocusGained(evt);
}
public void focusLost(java.awt.event.FocusEvent evt) {
natureCBFocusLost(evt);
}
});
billSundryNatureCB.setFont(new java.awt.Font("Trebuchet MS", 0, 12)); // NOI18N
billSundryNatureCB.setModel(new javax.swing.DefaultComboBoxModel(new String[] { "Additive", "Subtractive" }));
billSundryNatureCB.setBorder(null);
billSundryNatureCB.addMouseListener(new java.awt.event.MouseAdapter() {
public void mouseClicked(java.awt.event.MouseEvent evt) {
billSundryNatureCBMouseClicked(evt);
}
});
billSundryNatureCB.addPopupMenuListener(new javax.swing.event.PopupMenuListener() {
public void popupMenuCanceled(javax.swing.event.PopupMenuEvent evt) {
}
public void popupMenuWillBecomeInvisible(javax.swing.event.PopupMenuEvent evt) {
billSundryNatureCBPopupMenuWillBecomeInvisible(evt);
}
public void popupMenuWillBecomeVisible(javax.swing.event.PopupMenuEvent evt) {
}
});
billSundryNatureCB.addFocusListener(new java.awt.event.FocusAdapter() {
public void focusGained(java.awt.event.FocusEvent evt) {
billSundryNatureCBFocusGained(evt);
}
public void focusLost(java.awt.event.FocusEvent evt) {
billSundryNatureCBFocusLost(evt);
}
});
taxTypeClassCBB.setFont(new java.awt.Font("Trebuchet MS", 0, 12)); // NOI18N
taxTypeClassCBB.setModel(new javax.swing.DefaultComboBoxModel(new String[] { "Fix Amount", "Percentage" }));
taxTypeClassCBB.setBorder(null);
taxTypeClassCBB.addMouseListener(new java.awt.event.MouseAdapter() {
public void mouseClicked(java.awt.event.MouseEvent evt) {
taxTypeClassCBBMouseClicked(evt);
}
});
taxTypeClassCBB.addPopupMenuListener(new javax.swing.event.PopupMenuListener() {
public void popupMenuCanceled(javax.swing.event.PopupMenuEvent evt) {
}
public void popupMenuWillBecomeInvisible(javax.swing.event.PopupMenuEvent evt) {
taxTypeClassCBBPopupMenuWillBecomeInvisible(evt);
}
public void popupMenuWillBecomeVisible(javax.swing.event.PopupMenuEvent evt) {
}
});
taxTypeClassCBB.addItemListener(new java.awt.event.ItemListener() {
public void itemStateChanged(java.awt.event.ItemEvent evt) {
taxTypeClassCBBItemStateChanged(evt);
}
});
taxTypeClassCBB.addFocusListener(new java.awt.event.FocusAdapter() {
public void focusGained(java.awt.event.FocusEvent evt) {
taxTypeClassCBBFocusGained(evt);
}
public void focusLost(java.awt.event.FocusEvent evt) {
taxTypeClassCBBFocusLost(evt);
}
});
list.setBackground(new java.awt.Color(255, 255, 255));
jScrollPane6.setBorder(javax.swing.BorderFactory.createLineBorder(javax.swing.UIManager.getDefaults().getColor("Button.light")));
saleOrderListTable.setFont(new java.awt.Font("Trebuchet MS", 1, 12)); // NOI18N
saleOrderListTable.setModel(new javax.swing.table.DefaultTableModel(
new Object [][] {
},
new String [] {
"S. N.", "Voucher Number", "Date", "Account Number", "Customer Name"
}
) {
Class[] types = new Class [] {
java.lang.Integer.class, java.lang.Object.class, java.lang.Object.class, java.lang.Object.class, java.lang.Object.class
};
boolean[] canEdit = new boolean [] {
false, false, false, false, false
};
public Class getColumnClass(int columnIndex) {
return types [columnIndex];
}
public boolean isCellEditable(int rowIndex, int columnIndex) {
return canEdit [columnIndex];
}
});
saleOrderListTable.setFocusable(false);
saleOrderListTable.setRowHeight(23);
saleOrderListTable.setSelectionMode(javax.swing.ListSelectionModel.SINGLE_SELECTION);
saleOrderListTable.addMouseListener(new java.awt.event.MouseAdapter() {
public void mouseClicked(java.awt.event.MouseEvent evt) {
saleOrderListTableMouseClicked(evt);
}
});
jScrollPane6.setViewportView(saleOrderListTable);
if (saleOrderListTable.getColumnModel().getColumnCount() > 0) {
saleOrderListTable.getColumnModel().getColumn(0).setPreferredWidth(200);
saleOrderListTable.getColumnModel().getColumn(1).setPreferredWidth(300);
saleOrderListTable.getColumnModel().getColumn(2).setPreferredWidth(200);
saleOrderListTable.getColumnModel().getColumn(3).setPreferredWidth(300);
saleOrderListTable.getColumnModel().getColumn(4).setPreferredWidth(400);
}
jLayeredPane5.setBorder(javax.swing.BorderFactory.createLineBorder(javax.swing.UIManager.getDefaults().getColor("Button.light")));
jLabel7.setFont(new java.awt.Font("Trebuchet MS", 0, 12)); // NOI18N
jLabel7.setText("Search By :");
searchBy.setBackground(javax.swing.UIManager.getDefaults().getColor("Panel.background"));
searchBy.setFont(new java.awt.Font("Trebuchet MS", 0, 12)); // NOI18N
searchBy.setModel(new javax.swing.DefaultComboBoxModel(new String[] { "Account Number", "Customer Name", "Date", "Voucher Number" }));
searchKeyTF.setFont(new java.awt.Font("Trebuchet MS", 0, 12)); // NOI18N
searchKeyTF.addCaretListener(new javax.swing.event.CaretListener() {
public void caretUpdate(javax.swing.event.CaretEvent evt) {
searchKeyTFCaretUpdate(evt);
}
});
jLabel8.setForeground(new java.awt.Color(255, 0, 0));
backButton.setText("Back");
backButton.setBorder(null);
backButton.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
backButtonActionPerformed(evt);
}
});
javax.swing.GroupLayout jLayeredPane5Layout = new javax.swing.GroupLayout(jLayeredPane5);
jLayeredPane5.setLayout(jLayeredPane5Layout);
jLayeredPane5Layout.setHorizontalGroup(
jLayeredPane5Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(jLayeredPane5Layout.createSequentialGroup()
.addGap(30, 30, 30)
.addComponent(jLabel7)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(searchBy, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(searchKeyTF, javax.swing.GroupLayout.DEFAULT_SIZE, 333, Short.MAX_VALUE)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(jLabel8, javax.swing.GroupLayout.PREFERRED_SIZE, 281, javax.swing.GroupLayout.PREFERRED_SIZE)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addComponent(backButton, javax.swing.GroupLayout.PREFERRED_SIZE, 75, javax.swing.GroupLayout.PREFERRED_SIZE)
.addContainerGap())
);
jLayeredPane5Layout.setVerticalGroup(
jLayeredPane5Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(jLayeredPane5Layout.createSequentialGroup()
.addGap(2, 2, 2)
.addGroup(jLayeredPane5Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jLabel7)
.addComponent(searchBy, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(searchKeyTF, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(jLabel8)
.addComponent(backButton, javax.swing.GroupLayout.PREFERRED_SIZE, 22, javax.swing.GroupLayout.PREFERRED_SIZE))
.addGap(2, 2, 2))
);
jLayeredPane5Layout.linkSize(javax.swing.SwingConstants.VERTICAL, new java.awt.Component[] {jLabel8, searchKeyTF});
jLayeredPane5.setLayer(jLabel7, javax.swing.JLayeredPane.DEFAULT_LAYER);
jLayeredPane5.setLayer(searchBy, javax.swing.JLayeredPane.DEFAULT_LAYER);
jLayeredPane5.setLayer(searchKeyTF, javax.swing.JLayeredPane.DEFAULT_LAYER);
jLayeredPane5.setLayer(jLabel8, javax.swing.JLayeredPane.DEFAULT_LAYER);
jLayeredPane5.setLayer(backButton, javax.swing.JLayeredPane.DEFAULT_LAYER);
javax.swing.GroupLayout listLayout = new javax.swing.GroupLayout(list);
list.setLayout(listLayout);
listLayout.setHorizontalGroup(
listLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(listLayout.createSequentialGroup()
.addContainerGap()
.addGroup(listLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jLayeredPane5)
.addComponent(jScrollPane6))
.addContainerGap())
);
listLayout.setVerticalGroup(
listLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(listLayout.createSequentialGroup()
.addGap(5, 5, 5)
.addComponent(jLayeredPane5, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(jScrollPane6, javax.swing.GroupLayout.DEFAULT_SIZE, 500, Short.MAX_VALUE)
.addContainerGap())
);
quantityTF.setFormatterFactory(new javax.swing.text.DefaultFormatterFactory(new javax.swing.text.NumberFormatter(new java.text.DecimalFormat("0.##########"))));
quantityTF.addFocusListener(new java.awt.event.FocusAdapter() {
public void focusGained(java.awt.event.FocusEvent evt) {
quantityTFFocusGained(evt);
}
public void focusLost(java.awt.event.FocusEvent evt) {
quantityTFFocusLost(evt);
}
});
quantityTF.addKeyListener(new java.awt.event.KeyAdapter() {
public void keyTyped(java.awt.event.KeyEvent evt) {
quantityTFKeyTyped(evt);
}
});
rateTF.setFormatterFactory(new javax.swing.text.DefaultFormatterFactory(new javax.swing.text.NumberFormatter(new java.text.DecimalFormat("0.##########"))));
rateTF.addFocusListener(new java.awt.event.FocusAdapter() {
public void focusGained(java.awt.event.FocusEvent evt) {
rateTFFocusGained(evt);
}
public void focusLost(java.awt.event.FocusEvent evt) {
rateTFFocusLost(evt);
}
});
rateTF.addKeyListener(new java.awt.event.KeyAdapter() {
public void keyTyped(java.awt.event.KeyEvent evt) {
rateTFKeyTyped(evt);
}
});
atTheRateTF.setFormatterFactory(new javax.swing.text.DefaultFormatterFactory(new javax.swing.text.NumberFormatter(new java.text.DecimalFormat("0.##########"))));
atTheRateTF.addFocusListener(new java.awt.event.FocusAdapter() {
public void focusGained(java.awt.event.FocusEvent evt) {
atTheRateTFFocusGained(evt);
}
public void focusLost(java.awt.event.FocusEvent evt) {
atTheRateTFFocusLost(evt);
}
});
atTheRateTF.addKeyListener(new java.awt.event.KeyAdapter() {
public void keyTyped(java.awt.event.KeyEvent evt) {
atTheRateTFKeyTyped(evt);
}
});
miscAmountTF.setFormatterFactory(new javax.swing.text.DefaultFormatterFactory(new javax.swing.text.NumberFormatter(new java.text.DecimalFormat("0.##########"))));
miscAmountTF.addFocusListener(new java.awt.event.FocusAdapter() {
public void focusGained(java.awt.event.FocusEvent evt) {
miscAmountTFFocusGained(evt);
}
public void focusLost(java.awt.event.FocusEvent evt) {
miscAmountTFFocusLost(evt);
}
});
miscAmountTF.addKeyListener(new java.awt.event.KeyAdapter() {
public void keyTyped(java.awt.event.KeyEvent evt) {
miscAmountTFKeyTyped(evt);
}
});
aRateTF.setFormatterFactory(new javax.swing.text.DefaultFormatterFactory(new javax.swing.text.NumberFormatter(new java.text.DecimalFormat("0.##########"))));
aRateTF.addFocusListener(new java.awt.event.FocusAdapter() {
public void focusGained(java.awt.event.FocusEvent evt) {
aRateTFFocusGained(evt);
}
public void focusLost(java.awt.event.FocusEvent evt) {
aRateTFFocusLost(evt);
}
});
aRateTF.addKeyListener(new java.awt.event.KeyAdapter() {
public void keyTyped(java.awt.event.KeyEvent evt) {
aRateTFKeyTyped(evt);
}
});
amountTF.setFormatterFactory(new javax.swing.text.DefaultFormatterFactory(new javax.swing.text.NumberFormatter(new java.text.DecimalFormat("0.##########"))));
amountTF.addFocusListener(new java.awt.event.FocusAdapter() {
public void focusGained(java.awt.event.FocusEvent evt) {
amountTFFocusGained(evt);
}
public void focusLost(java.awt.event.FocusEvent evt) {
amountTFFocusLost(evt);
}
});
amountTF.addKeyListener(new java.awt.event.KeyAdapter() {
public void keyTyped(java.awt.event.KeyEvent evt) {
amountTFKeyTyped(evt);
}
});
expiryDateTF.addFocusListener(new java.awt.event.FocusAdapter() {
public void focusGained(java.awt.event.FocusEvent evt) {
expiryDateTFFocusGained(evt);
}
public void focusLost(java.awt.event.FocusEvent evt) {
expiryDateTFFocusLost(evt);
}
});
expiryDateTF.addKeyListener(new java.awt.event.KeyAdapter() {
public void keyTyped(java.awt.event.KeyEvent evt) {
expiryDateTFKeyTyped(evt);
}
});
setBackground(new java.awt.Color(255, 255, 255));
contentPanel.setBackground(new java.awt.Color(255, 255, 255));
contentPanel.setBorder(javax.swing.BorderFactory.createLineBorder(javax.swing.UIManager.getDefaults().getColor("Button.light")));
contentPanel.setOpaque(true);
jLabel1.setFont(new java.awt.Font("Trebuchet MS", 0, 14)); // NOI18N
jLabel1.setText("Voucher Number :");
voucherNumberTF.setFont(new java.awt.Font("Trebuchet MS", 0, 12)); // NOI18N
org.jdesktop.beansbinding.Binding binding = org.jdesktop.beansbinding.Bindings.createAutoBinding(org.jdesktop.beansbinding.AutoBinding.UpdateStrategy.READ_WRITE, errorMessage, org.jdesktop.beansbinding.ELProperty.create("${labelFor.foreground}"), voucherNumberTF, org.jdesktop.beansbinding.BeanProperty.create("text"));
bindingGroup.addBinding(binding);
jLabel2.setFont(new java.awt.Font("Trebuchet MS", 0, 14)); // NOI18N
jLabel2.setText("Account Number :");
accountNumberCB.setFont(new java.awt.Font("Trebuchet MS", 0, 12)); // NOI18N
accountNumberCB.setBorder(null);
binding = org.jdesktop.beansbinding.Bindings.createAutoBinding(org.jdesktop.beansbinding.AutoBinding.UpdateStrategy.READ_WRITE, jLabel1, org.jdesktop.beansbinding.ELProperty.create("${labelFor.foreground}"), accountNumberCB, org.jdesktop.beansbinding.BeanProperty.create("selectedItem"));
bindingGroup.addBinding(binding);
accountNumberCB.addMouseListener(new java.awt.event.MouseAdapter() {
public void mouseEntered(java.awt.event.MouseEvent evt) {
accountNumberCBMouseEntered(evt);
}
});
accountNumberCB.addPopupMenuListener(new javax.swing.event.PopupMenuListener() {
public void popupMenuCanceled(javax.swing.event.PopupMenuEvent evt) {
}
public void popupMenuWillBecomeInvisible(javax.swing.event.PopupMenuEvent evt) {
accountNumberCBPopupMenuWillBecomeInvisible(evt);
}
public void popupMenuWillBecomeVisible(javax.swing.event.PopupMenuEvent evt) {
}
});
jLabel3.setFont(new java.awt.Font("Trebuchet MS", 0, 14)); // NOI18N
jLabel3.setText("Account Name :");
jLabel4.setFont(new java.awt.Font("Trebuchet MS", 0, 14)); // NOI18N
jLabel4.setText("Address :");
addressTA.setColumns(20);
addressTA.setFont(new java.awt.Font("Trebuchet MS", 0, 12)); // NOI18N
addressTA.setRows(5);
addressTA.setNextFocusableComponent(itemTable);
binding = org.jdesktop.beansbinding.Bindings.createAutoBinding(org.jdesktop.beansbinding.AutoBinding.UpdateStrategy.READ_WRITE, errorMessage, org.jdesktop.beansbinding.ELProperty.create("${labelFor.foreground}"), addressTA, org.jdesktop.beansbinding.BeanProperty.create("text"));
bindingGroup.addBinding(binding);
jScrollPane1.setViewportView(addressTA);
jLabel6.setFont(new java.awt.Font("Trebuchet MS", 0, 14)); // NOI18N
jLabel6.setText("Date :");
jLayeredPane2.setBorder(new javax.swing.border.LineBorder(javax.swing.UIManager.getDefaults().getColor("Button.light"), 1, true));
billSundryTable.setFont(new java.awt.Font("Tahoma", 1, 12)); // NOI18N
billSundryTable.setModel(new javax.swing.table.DefaultTableModel(
new Object [][] {
{null, null, null, null, null}
},
new String [] {
"Bill Sundry", "@", "Tax Class", "Nature", "Amount"
}
) {
boolean[] canEdit = new boolean [] {
false, true, true, true, true
};
public boolean isCellEditable(int rowIndex, int columnIndex) {
return canEdit [columnIndex];
}
});
billSundryTable.setColumnSelectionAllowed(true);
billSundryTable.setPreferredSize(new java.awt.Dimension(2000, 95));
billSundryTable.setRowHeight(28);
billSundryTable.setSelectionBackground(new java.awt.Color(51, 102, 255));
billSundryTable.setSelectionMode(javax.swing.ListSelectionModel.SINGLE_SELECTION);
billSundryTable.setSurrendersFocusOnKeystroke(true);
billSundryTable.getTableHeader().setReorderingAllowed(false);
binding = org.jdesktop.beansbinding.Bindings.createAutoBinding(org.jdesktop.beansbinding.AutoBinding.UpdateStrategy.READ_WRITE, errorMessage, org.jdesktop.beansbinding.ELProperty.create("${labelFor.foreground}"), billSundryTable, org.jdesktop.beansbinding.BeanProperty.create("selectedElement"));
bindingGroup.addBinding(binding);
billSundryTable.addMouseListener(new java.awt.event.MouseAdapter() {
public void mouseClicked(java.awt.event.MouseEvent evt) {
billSundryTableMouseClicked(evt);
}
});
billSundryTable.addFocusListener(new java.awt.event.FocusAdapter() {
public void focusGained(java.awt.event.FocusEvent evt) {
billSundryTableFocusGained(evt);
}
});
billSundryTable.addKeyListener(new java.awt.event.KeyAdapter() {
public void keyTyped(java.awt.event.KeyEvent evt) {
billSundryTableKeyTyped(evt);
}
});
jScrollPane3.setViewportView(billSundryTable);
billSundryTable.getColumnModel().getSelectionModel().setSelectionMode(javax.swing.ListSelectionModel.SINGLE_INTERVAL_SELECTION);
if (billSundryTable.getColumnModel().getColumnCount() > 0) {
billSundryTable.getColumnModel().getColumn(0).setPreferredWidth(500);
billSundryTable.getColumnModel().getColumn(1).setPreferredWidth(300);
billSundryTable.getColumnModel().getColumn(2).setPreferredWidth(400);
billSundryTable.getColumnModel().getColumn(3).setPreferredWidth(400);
billSundryTable.getColumnModel().getColumn(4).setPreferredWidth(400);
}
removeRowBillSundry.setBackground(new java.awt.Color(255, 255, 255));
removeRowBillSundry.setForeground(new java.awt.Color(255, 0, 0));
removeRowBillSundry.setText("Remove Row");
removeRowBillSundry.setFocusPainted(false);
removeRowBillSundry.setNextFocusableComponent(addButton);
removeRowBillSundry.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
removeRowBillSundryActionPerformed(evt);
}
});
insertRowBillSundry.setBackground(new java.awt.Color(255, 255, 255));
insertRowBillSundry.setText("Insert Row");
insertRowBillSundry.setFocusPainted(false);
insertRowBillSundry.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
insertRowBillSundryActionPerformed(evt);
}
});
jLayeredPane6.setBackground(javax.swing.UIManager.getDefaults().getColor("CheckBoxMenuItem.selectionBackground"));
jLayeredPane6.setOpaque(true);
jLabel15.setFont(new java.awt.Font("Trebuchet MS", 0, 14)); // NOI18N
jLabel15.setForeground(new java.awt.Color(255, 255, 255));
jLabel15.setHorizontalAlignment(javax.swing.SwingConstants.CENTER);
jLabel15.setText("Total Amount :");
totalAmount.setBackground(javax.swing.UIManager.getDefaults().getColor("CheckBoxMenuItem.selectionBackground"));
totalAmount.setFont(new java.awt.Font("Trebuchet MS", 1, 14)); // NOI18N
totalAmount.setForeground(new java.awt.Color(255, 255, 255));
totalAmount.setHorizontalAlignment(javax.swing.SwingConstants.RIGHT);
totalAmount.setOpaque(true);
binding = org.jdesktop.beansbinding.Bindings.createAutoBinding(org.jdesktop.beansbinding.AutoBinding.UpdateStrategy.READ_WRITE, errorMessage, org.jdesktop.beansbinding.ELProperty.create("${labelFor.foreground}"), totalAmount, org.jdesktop.beansbinding.BeanProperty.create("text"));
bindingGroup.addBinding(binding);
javax.swing.GroupLayout jLayeredPane6Layout = new javax.swing.GroupLayout(jLayeredPane6);
jLayeredPane6.setLayout(jLayeredPane6Layout);
jLayeredPane6Layout.setHorizontalGroup(
jLayeredPane6Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(jLayeredPane6Layout.createSequentialGroup()
.addComponent(jLabel15, javax.swing.GroupLayout.PREFERRED_SIZE, 106, javax.swing.GroupLayout.PREFERRED_SIZE)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(totalAmount, javax.swing.GroupLayout.PREFERRED_SIZE, 201, javax.swing.GroupLayout.PREFERRED_SIZE)
.addContainerGap())
);
jLayeredPane6Layout.setVerticalGroup(
jLayeredPane6Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jLabel15, javax.swing.GroupLayout.Alignment.TRAILING, javax.swing.GroupLayout.PREFERRED_SIZE, 25, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(totalAmount, javax.swing.GroupLayout.Alignment.TRAILING, javax.swing.GroupLayout.PREFERRED_SIZE, 25, javax.swing.GroupLayout.PREFERRED_SIZE)
);
jLayeredPane6.setLayer(jLabel15, javax.swing.JLayeredPane.DEFAULT_LAYER);
jLayeredPane6.setLayer(totalAmount, javax.swing.JLayeredPane.DEFAULT_LAYER);
javax.swing.GroupLayout jLayeredPane2Layout = new javax.swing.GroupLayout(jLayeredPane2);
jLayeredPane2.setLayout(jLayeredPane2Layout);
jLayeredPane2Layout.setHorizontalGroup(
jLayeredPane2Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(jLayeredPane2Layout.createSequentialGroup()
.addGap(2, 2, 2)
.addGroup(jLayeredPane2Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jScrollPane3)
.addGroup(jLayeredPane2Layout.createSequentialGroup()
.addComponent(insertRowBillSundry)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(removeRowBillSundry)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
.addComponent(jLayeredPane6, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))))
);
jLayeredPane2Layout.setVerticalGroup(
jLayeredPane2Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(jLayeredPane2Layout.createSequentialGroup()
.addGap(2, 2, 2)
.addComponent(jScrollPane3, javax.swing.GroupLayout.PREFERRED_SIZE, 124, javax.swing.GroupLayout.PREFERRED_SIZE)
.addGap(0, 0, 0)
.addGroup(jLayeredPane2Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.TRAILING)
.addGroup(jLayeredPane2Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(insertRowBillSundry)
.addComponent(removeRowBillSundry))
.addComponent(jLayeredPane6, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)))
);
jLayeredPane2.setLayer(jScrollPane3, javax.swing.JLayeredPane.DEFAULT_LAYER);
jLayeredPane2.setLayer(removeRowBillSundry, javax.swing.JLayeredPane.DEFAULT_LAYER);
jLayeredPane2.setLayer(insertRowBillSundry, javax.swing.JLayeredPane.DEFAULT_LAYER);
jLayeredPane2.setLayer(jLayeredPane6, javax.swing.JLayeredPane.DEFAULT_LAYER);
jLabel17.setFont(new java.awt.Font("Trebuchet MS", 0, 14)); // NOI18N
jLabel17.setText("Other Details :");
otherDetailTA.setColumns(20);
otherDetailTA.setRows(5);
binding = org.jdesktop.beansbinding.Bindings.createAutoBinding(org.jdesktop.beansbinding.AutoBinding.UpdateStrategy.READ_WRITE, errorMessage, org.jdesktop.beansbinding.ELProperty.create("${labelFor.foreground}"), otherDetailTA, org.jdesktop.beansbinding.BeanProperty.create("text"));
bindingGroup.addBinding(binding);
jScrollPane4.setViewportView(otherDetailTA);
dateChooserCombo1.setFieldFont(new java.awt.Font("Trebuchet MS", java.awt.Font.BOLD, 12));
dateChooserCombo1.setLocked(true);
itemTableScroll.setBackground(new java.awt.Color(255, 255, 255));
itemTableScroll.setBorder(javax.swing.BorderFactory.createLineBorder(javax.swing.UIManager.getDefaults().getColor("Button.light")));
itemTableScroll.setPreferredSize(new java.awt.Dimension(900, 100));
jLayeredPane1.setBackground(new java.awt.Color(255, 255, 255));
jLayeredPane1.setOpaque(true);
jLayeredPane1.setPreferredSize(new java.awt.Dimension(3010, 10000));
jScrollPane2.setBackground(new java.awt.Color(255, 255, 255));
jScrollPane2.setHorizontalScrollBarPolicy(javax.swing.ScrollPaneConstants.HORIZONTAL_SCROLLBAR_NEVER);
jScrollPane2.setVerticalScrollBarPolicy(javax.swing.ScrollPaneConstants.VERTICAL_SCROLLBAR_NEVER);
itemTable.setFont(new java.awt.Font("Trebuchet MS", 1, 14)); // NOI18N
itemTable.setModel(new javax.swing.table.DefaultTableModel(
new Object [][] {
{null, null, null, null, null, null, null, null, null, null, null, null, null, null, null}
},
new String [] {
"S. N.", "Item Code", "Item Name", "Store", "Unit", "Quantity", "Rate", "Item Amount", "Bill Sundry", "Tax Class", "@", "Nature", "Misc. Amount", "Total Row Amount", "Expiry Date"
}
) {
Class[] types = new Class [] {
java.lang.Integer.class, java.lang.Object.class, java.lang.Object.class, java.lang.Object.class, java.lang.Object.class, java.lang.Object.class, java.lang.Object.class, java.lang.Object.class, java.lang.Object.class, java.lang.Object.class, java.lang.Object.class, java.lang.Object.class, java.lang.Object.class, java.lang.Object.class, java.lang.Object.class
};
boolean[] canEdit = new boolean [] {
false, false, false, false, false, true, true, false, false, true, true, true, true, false, true
};
public Class getColumnClass(int columnIndex) {
return types [columnIndex];
}
public boolean isCellEditable(int rowIndex, int columnIndex) {
return canEdit [columnIndex];
}
});
itemTable.setCellSelectionEnabled(true);
itemTable.setPreferredSize(new java.awt.Dimension(3000, 10000));
itemTable.setRowHeight(30);
itemTable.setSelectionBackground(new java.awt.Color(0, 153, 153));
itemTable.setSelectionMode(javax.swing.ListSelectionModel.SINGLE_SELECTION);
itemTable.setSurrendersFocusOnKeystroke(true);
itemTable.getTableHeader().setReorderingAllowed(false);
itemTable.addMouseListener(new java.awt.event.MouseAdapter() {
public void mouseClicked(java.awt.event.MouseEvent evt) {
itemTableMouseClicked(evt);
}
});
itemTable.addFocusListener(new java.awt.event.FocusAdapter() {
public void focusGained(java.awt.event.FocusEvent evt) {
itemTableFocusGained(evt);
}
});
itemTable.addKeyListener(new java.awt.event.KeyAdapter() {
public void keyPressed(java.awt.event.KeyEvent evt) {
itemTableKeyPressed(evt);
}
public void keyTyped(java.awt.event.KeyEvent evt) {
itemTableKeyTyped(evt);
}
});
jScrollPane2.setViewportView(itemTable);
if (itemTable.getColumnModel().getColumnCount() > 0) {
itemTable.getColumnModel().getColumn(0).setPreferredWidth(200);
itemTable.getColumnModel().getColumn(1).setPreferredWidth(400);
itemTable.getColumnModel().getColumn(2).setPreferredWidth(500);
itemTable.getColumnModel().getColumn(3).setPreferredWidth(400);
itemTable.getColumnModel().getColumn(4).setPreferredWidth(400);
itemTable.getColumnModel().getColumn(5).setPreferredWidth(500);
itemTable.getColumnModel().getColumn(6).setPreferredWidth(300);
itemTable.getColumnModel().getColumn(7).setPreferredWidth(500);
itemTable.getColumnModel().getColumn(8).setPreferredWidth(500);
itemTable.getColumnModel().getColumn(9).setPreferredWidth(500);
itemTable.getColumnModel().getColumn(10).setPreferredWidth(300);
itemTable.getColumnModel().getColumn(11).setPreferredWidth(500);
itemTable.getColumnModel().getColumn(12).setPreferredWidth(400);
itemTable.getColumnModel().getColumn(13).setPreferredWidth(500);
itemTable.getColumnModel().getColumn(14).setPreferredWidth(400);
}
javax.swing.GroupLayout jLayeredPane1Layout = new javax.swing.GroupLayout(jLayeredPane1);
jLayeredPane1.setLayout(jLayeredPane1Layout);
jLayeredPane1Layout.setHorizontalGroup(
jLayeredPane1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(jLayeredPane1Layout.createSequentialGroup()
.addContainerGap()
.addComponent(jScrollPane2, javax.swing.GroupLayout.PREFERRED_SIZE, 3000, javax.swing.GroupLayout.PREFERRED_SIZE)
.addContainerGap(javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE))
);
jLayeredPane1Layout.setVerticalGroup(
jLayeredPane1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(jLayeredPane1Layout.createSequentialGroup()
.addContainerGap()
.addComponent(jScrollPane2, javax.swing.GroupLayout.PREFERRED_SIZE, 10000, javax.swing.GroupLayout.PREFERRED_SIZE)
.addContainerGap(javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE))
);
jLayeredPane1.setLayer(jScrollPane2, javax.swing.JLayeredPane.DEFAULT_LAYER);
itemTableScroll.setViewportView(jLayeredPane1);
jLayeredPane3.setBackground(javax.swing.UIManager.getDefaults().getColor("CheckBoxMenuItem.selectionBackground"));
jLayeredPane3.setOpaque(true);
jLabel11.setFont(new java.awt.Font("Trebuchet MS", 0, 14)); // NOI18N
jLabel11.setForeground(new java.awt.Color(255, 255, 255));
jLabel11.setHorizontalAlignment(javax.swing.SwingConstants.CENTER);
jLabel11.setText("Amount :");
amount.setBackground(javax.swing.UIManager.getDefaults().getColor("CheckBoxMenuItem.selectionBackground"));
amount.setFont(new java.awt.Font("Trebuchet MS", 1, 14)); // NOI18N
amount.setForeground(new java.awt.Color(255, 255, 255));
amount.setHorizontalAlignment(javax.swing.SwingConstants.RIGHT);
amount.setOpaque(true);
javax.swing.GroupLayout jLayeredPane3Layout = new javax.swing.GroupLayout(jLayeredPane3);
jLayeredPane3.setLayout(jLayeredPane3Layout);
jLayeredPane3Layout.setHorizontalGroup(
jLayeredPane3Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(jLayeredPane3Layout.createSequentialGroup()
.addContainerGap()
.addComponent(jLabel11, javax.swing.GroupLayout.PREFERRED_SIZE, 76, javax.swing.GroupLayout.PREFERRED_SIZE)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(amount, javax.swing.GroupLayout.PREFERRED_SIZE, 221, javax.swing.GroupLayout.PREFERRED_SIZE)
.addContainerGap(javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE))
);
jLayeredPane3Layout.setVerticalGroup(
jLayeredPane3Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jLabel11, javax.swing.GroupLayout.Alignment.TRAILING, javax.swing.GroupLayout.PREFERRED_SIZE, 25, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(amount, javax.swing.GroupLayout.Alignment.TRAILING, javax.swing.GroupLayout.PREFERRED_SIZE, 25, javax.swing.GroupLayout.PREFERRED_SIZE)
);
jLayeredPane3.setLayer(jLabel11, javax.swing.JLayeredPane.DEFAULT_LAYER);
jLayeredPane3.setLayer(amount, javax.swing.JLayeredPane.DEFAULT_LAYER);
insertRowItemTable.setBackground(new java.awt.Color(255, 255, 255));
insertRowItemTable.setText("Insert Row");
insertRowItemTable.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
insertRowItemTableActionPerformed(evt);
}
});
removeRowItemTable.setBackground(new java.awt.Color(255, 255, 255));
removeRowItemTable.setForeground(new java.awt.Color(255, 0, 0));
removeRowItemTable.setText("Remove Row");
removeRowItemTable.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
removeRowItemTableActionPerformed(evt);
}
});
errorMessage.setFont(new java.awt.Font("Trebuchet MS", 1, 12)); // NOI18N
errorMessage.setForeground(new java.awt.Color(255, 0, 0));
accountNameCB.setFont(new java.awt.Font("Trebuchet MS", 0, 12)); // NOI18N
accountNameCB.setBorder(null);
accountNameCB.addMouseListener(new java.awt.event.MouseAdapter() {
public void mouseEntered(java.awt.event.MouseEvent evt) {
accountNameCBMouseEntered(evt);
}
});
accountNameCB.addPopupMenuListener(new javax.swing.event.PopupMenuListener() {
public void popupMenuCanceled(javax.swing.event.PopupMenuEvent evt) {
}
public void popupMenuWillBecomeInvisible(javax.swing.event.PopupMenuEvent evt) {
accountNameCBPopupMenuWillBecomeInvisible(evt);
}
public void popupMenuWillBecomeVisible(javax.swing.event.PopupMenuEvent evt) {
}
});
quantityInfo.setFont(new java.awt.Font("Trebuchet MS", 1, 14)); // NOI18N
quantityInfo.setForeground(new java.awt.Color(204, 0, 0));
quantityInfo.setHorizontalAlignment(javax.swing.SwingConstants.CENTER);
jPanel1.setBackground(new java.awt.Color(255, 255, 255));
jPanel1.setMinimumSize(new java.awt.Dimension(387, 100));
jLabel5.setFont(new java.awt.Font("Tahoma", 0, 12)); // NOI18N
jLabel5.setText("Cash :");
cashTF.addCaretListener(new javax.swing.event.CaretListener() {
public void caretUpdate(javax.swing.event.CaretEvent evt) {
cashTFCaretUpdate(evt);
}
});
cashTF.addKeyListener(new java.awt.event.KeyAdapter() {
public void keyTyped(java.awt.event.KeyEvent evt) {
cashTFKeyTyped(evt);
}
});
cashAccCB.addMouseListener(new java.awt.event.MouseAdapter() {
public void mouseEntered(java.awt.event.MouseEvent evt) {
cashAccCBMouseEntered(evt);
}
});
jLabel9.setFont(new java.awt.Font("Tahoma", 0, 12)); // NOI18N
jLabel9.setText("Bank :");
bankTF.addCaretListener(new javax.swing.event.CaretListener() {
public void caretUpdate(javax.swing.event.CaretEvent evt) {
bankTFCaretUpdate(evt);
}
});
bankTF.addKeyListener(new java.awt.event.KeyAdapter() {
public void keyTyped(java.awt.event.KeyEvent evt) {
bankTFKeyTyped(evt);
}
});
bankAccCB.addMouseListener(new java.awt.event.MouseAdapter() {
public void mouseEntered(java.awt.event.MouseEvent evt) {
bankAccCBMouseEntered(evt);
}
});
jLabel10.setFont(new java.awt.Font("Tahoma", 0, 12)); // NOI18N
jLabel10.setText("Credit :");
creditTF.setEditable(false);
creditTF.addCaretListener(new javax.swing.event.CaretListener() {
public void caretUpdate(javax.swing.event.CaretEvent evt) {
creditTFCaretUpdate(evt);
}
});
debitTF.setEditable(false);
jLabel12.setFont(new java.awt.Font("Tahoma", 0, 12)); // NOI18N
jLabel12.setText("Debit :");
creditLimitCB.setModel(new javax.swing.DefaultComboBoxModel(new String[] { "Day", "Date" }));
creditLimitCB.addItemListener(new java.awt.event.ItemListener() {
public void itemStateChanged(java.awt.event.ItemEvent evt) {
creditLimitCBItemStateChanged(evt);
}
});
creditLimitDayTF.addKeyListener(new java.awt.event.KeyAdapter() {
public void keyTyped(java.awt.event.KeyEvent evt) {
creditLimitDayTFKeyTyped(evt);
}
});
javax.swing.GroupLayout jPanel1Layout = new javax.swing.GroupLayout(jPanel1);
jPanel1.setLayout(jPanel1Layout);
jPanel1Layout.setHorizontalGroup(
jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(jPanel1Layout.createSequentialGroup()
.addGap(11, 11, 11)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(jPanel1Layout.createSequentialGroup()
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING, false)
.addGroup(jPanel1Layout.createSequentialGroup()
.addComponent(jLabel10)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(creditTF, javax.swing.GroupLayout.DEFAULT_SIZE, 100, Short.MAX_VALUE))
.addGroup(jPanel1Layout.createSequentialGroup()
.addComponent(jLabel12)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(debitTF)))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING, false)
.addGroup(jPanel1Layout.createSequentialGroup()
.addComponent(creditLimitCB, javax.swing.GroupLayout.PREFERRED_SIZE, 70, javax.swing.GroupLayout.PREFERRED_SIZE)
.addGap(18, 18, 18)
.addComponent(creditLimitDayTF, javax.swing.GroupLayout.PREFERRED_SIZE, 126, javax.swing.GroupLayout.PREFERRED_SIZE))
.addComponent(dateChooserCombo2, javax.swing.GroupLayout.DEFAULT_SIZE, 215, Short.MAX_VALUE)))
.addGroup(jPanel1Layout.createSequentialGroup()
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING, false)
.addGroup(jPanel1Layout.createSequentialGroup()
.addComponent(jLabel5)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(cashTF, javax.swing.GroupLayout.DEFAULT_SIZE, 190, Short.MAX_VALUE))
.addGroup(jPanel1Layout.createSequentialGroup()
.addComponent(jLabel9)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(bankTF)))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING, false)
.addComponent(bankAccCB, javax.swing.GroupLayout.PREFERRED_SIZE, 126, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(cashAccCB, javax.swing.GroupLayout.PREFERRED_SIZE, 126, javax.swing.GroupLayout.PREFERRED_SIZE))))
.addContainerGap())
);
jPanel1Layout.linkSize(javax.swing.SwingConstants.HORIZONTAL, new java.awt.Component[] {jLabel10, jLabel12, jLabel5, jLabel9});
jPanel1Layout.setVerticalGroup(
jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(javax.swing.GroupLayout.Alignment.TRAILING, jPanel1Layout.createSequentialGroup()
.addGap(0, 0, 0)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(cashTF, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(cashAccCB, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addComponent(jLabel5))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING, false)
.addComponent(bankAccCB, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(bankTF, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(jLabel9, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING, false)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(creditTF, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(creditLimitCB, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(creditLimitDayTF, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addComponent(jLabel10, javax.swing.GroupLayout.PREFERRED_SIZE, 20, javax.swing.GroupLayout.PREFERRED_SIZE))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addGroup(jPanel1Layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(debitTF, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(jLabel12, javax.swing.GroupLayout.PREFERRED_SIZE, 20, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(dateChooserCombo2, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addGap(0, 0, 0))
);
jPanel1Layout.linkSize(javax.swing.SwingConstants.VERTICAL, new java.awt.Component[] {bankAccCB, bankTF, cashAccCB, cashTF, creditLimitCB, creditLimitDayTF, creditTF, dateChooserCombo2, debitTF, jLabel10, jLabel12, jLabel5, jLabel9});
jLabel18.setBackground(javax.swing.UIManager.getDefaults().getColor("CheckBoxMenuItem.selectionBackground"));
jLabel18.setFont(new java.awt.Font("Trebuchet MS", 0, 14)); // NOI18N
jLabel18.setForeground(new java.awt.Color(255, 255, 255));
jLabel18.setHorizontalAlignment(javax.swing.SwingConstants.CENTER);
jLabel18.setText(" Transaction Modes: ");
jLabel18.setOpaque(true);
javax.swing.GroupLayout contentPanelLayout = new javax.swing.GroupLayout(contentPanel);
contentPanel.setLayout(contentPanelLayout);
contentPanelLayout.setHorizontalGroup(
contentPanelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(contentPanelLayout.createSequentialGroup()
.addGroup(contentPanelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(contentPanelLayout.createSequentialGroup()
.addContainerGap()
.addGroup(contentPanelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(itemTableScroll, javax.swing.GroupLayout.DEFAULT_SIZE, 919, Short.MAX_VALUE)
.addGroup(javax.swing.GroupLayout.Alignment.TRAILING, contentPanelLayout.createSequentialGroup()
.addComponent(insertRowItemTable)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(removeRowItemTable)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(quantityInfo, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(jLayeredPane3, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))))
.addGroup(contentPanelLayout.createSequentialGroup()
.addGap(60, 60, 60)
.addGroup(contentPanelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.TRAILING, false)
.addGroup(contentPanelLayout.createSequentialGroup()
.addGroup(contentPanelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.TRAILING)
.addComponent(jLabel2)
.addComponent(jLabel4, javax.swing.GroupLayout.Alignment.LEADING))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addGroup(contentPanelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING, false)
.addComponent(accountNumberCB, 0, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
.addComponent(jScrollPane1, javax.swing.GroupLayout.DEFAULT_SIZE, 280, Short.MAX_VALUE)))
.addGroup(javax.swing.GroupLayout.Alignment.LEADING, contentPanelLayout.createSequentialGroup()
.addComponent(jLabel1)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addComponent(voucherNumberTF)))
.addGroup(contentPanelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(contentPanelLayout.createSequentialGroup()
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
.addGroup(contentPanelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.TRAILING)
.addComponent(jLabel3)
.addComponent(jLabel6, javax.swing.GroupLayout.PREFERRED_SIZE, 100, javax.swing.GroupLayout.PREFERRED_SIZE))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.UNRELATED)
.addGroup(contentPanelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING, false)
.addComponent(accountNameCB, 0, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
.addComponent(dateChooserCombo1, javax.swing.GroupLayout.DEFAULT_SIZE, 280, Short.MAX_VALUE))
.addGap(0, 0, Short.MAX_VALUE))
.addGroup(contentPanelLayout.createSequentialGroup()
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
.addComponent(errorMessage, javax.swing.GroupLayout.PREFERRED_SIZE, 460, javax.swing.GroupLayout.PREFERRED_SIZE))))
.addGroup(javax.swing.GroupLayout.Alignment.TRAILING, contentPanelLayout.createSequentialGroup()
.addGroup(contentPanelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING, false)
.addGroup(contentPanelLayout.createSequentialGroup()
.addContainerGap()
.addComponent(jLabel17, javax.swing.GroupLayout.PREFERRED_SIZE, 99, javax.swing.GroupLayout.PREFERRED_SIZE)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(jScrollPane4))
.addComponent(jPanel1, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
.addComponent(jLabel18, javax.swing.GroupLayout.Alignment.TRAILING, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE))
.addGap(0, 0, 0)
.addComponent(jLayeredPane2)))
.addContainerGap())
);
contentPanelLayout.setVerticalGroup(
contentPanelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(javax.swing.GroupLayout.Alignment.TRAILING, contentPanelLayout.createSequentialGroup()
.addContainerGap()
.addGroup(contentPanelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jLabel2, javax.swing.GroupLayout.PREFERRED_SIZE, 15, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(accountNumberCB, javax.swing.GroupLayout.PREFERRED_SIZE, 26, javax.swing.GroupLayout.PREFERRED_SIZE)
.addGroup(javax.swing.GroupLayout.Alignment.TRAILING, contentPanelLayout.createSequentialGroup()
.addGroup(contentPanelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(jLabel3, javax.swing.GroupLayout.PREFERRED_SIZE, 15, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(accountNameCB, javax.swing.GroupLayout.PREFERRED_SIZE, 26, javax.swing.GroupLayout.PREFERRED_SIZE))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)))
.addGroup(contentPanelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(contentPanelLayout.createSequentialGroup()
.addGroup(contentPanelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(voucherNumberTF, javax.swing.GroupLayout.PREFERRED_SIZE, 22, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(jLabel1, javax.swing.GroupLayout.PREFERRED_SIZE, 19, javax.swing.GroupLayout.PREFERRED_SIZE))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addGroup(contentPanelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING, false)
.addComponent(jLabel4, javax.swing.GroupLayout.DEFAULT_SIZE, 40, Short.MAX_VALUE)
.addComponent(jScrollPane1, javax.swing.GroupLayout.PREFERRED_SIZE, 0, Short.MAX_VALUE)))
.addGroup(contentPanelLayout.createSequentialGroup()
.addGroup(contentPanelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jLabel6)
.addComponent(dateChooserCombo1, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(errorMessage, javax.swing.GroupLayout.PREFERRED_SIZE, 26, javax.swing.GroupLayout.PREFERRED_SIZE)))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(itemTableScroll, javax.swing.GroupLayout.DEFAULT_SIZE, 170, Short.MAX_VALUE)
.addGap(2, 2, 2)
.addGroup(contentPanelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jLayeredPane3, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addGroup(contentPanelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(insertRowItemTable)
.addComponent(removeRowItemTable)
.addComponent(quantityInfo, javax.swing.GroupLayout.PREFERRED_SIZE, 23, javax.swing.GroupLayout.PREFERRED_SIZE)))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addGroup(contentPanelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.TRAILING, false)
.addGroup(contentPanelLayout.createSequentialGroup()
.addGroup(contentPanelLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addComponent(jScrollPane4, javax.swing.GroupLayout.PREFERRED_SIZE, 30, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(jLabel17, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE))
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(jLabel18, javax.swing.GroupLayout.PREFERRED_SIZE, 18, javax.swing.GroupLayout.PREFERRED_SIZE)
.addGap(1, 1, 1)
.addComponent(jPanel1, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addComponent(jLayeredPane2, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
.addGap(2, 2, 2))
);
contentPanelLayout.linkSize(javax.swing.SwingConstants.VERTICAL, new java.awt.Component[] {accountNumberCB, dateChooserCombo1, jLabel1, jLabel2, jLabel3, jLabel6, voucherNumberTF});
contentPanelLayout.linkSize(javax.swing.SwingConstants.VERTICAL, new java.awt.Component[] {insertRowItemTable, removeRowItemTable});
contentPanel.setLayer(jLabel1, javax.swing.JLayeredPane.DEFAULT_LAYER);
contentPanel.setLayer(voucherNumberTF, javax.swing.JLayeredPane.DEFAULT_LAYER);
contentPanel.setLayer(jLabel2, javax.swing.JLayeredPane.DEFAULT_LAYER);
contentPanel.setLayer(accountNumberCB, javax.swing.JLayeredPane.DEFAULT_LAYER);
contentPanel.setLayer(jLabel3, javax.swing.JLayeredPane.DEFAULT_LAYER);
contentPanel.setLayer(jLabel4, javax.swing.JLayeredPane.DEFAULT_LAYER);
contentPanel.setLayer(jScrollPane1, javax.swing.JLayeredPane.DEFAULT_LAYER);
contentPanel.setLayer(jLabel6, javax.swing.JLayeredPane.DEFAULT_LAYER);
contentPanel.setLayer(jLayeredPane2, javax.swing.JLayeredPane.DEFAULT_LAYER);
contentPanel.setLayer(jLabel17, javax.swing.JLayeredPane.DEFAULT_LAYER);
contentPanel.setLayer(jScrollPane4, javax.swing.JLayeredPane.DEFAULT_LAYER);
contentPanel.setLayer(dateChooserCombo1, javax.swing.JLayeredPane.DEFAULT_LAYER);
contentPanel.setLayer(itemTableScroll, javax.swing.JLayeredPane.DEFAULT_LAYER);
contentPanel.setLayer(jLayeredPane3, javax.swing.JLayeredPane.DEFAULT_LAYER);
contentPanel.setLayer(insertRowItemTable, javax.swing.JLayeredPane.DEFAULT_LAYER);
contentPanel.setLayer(removeRowItemTable, javax.swing.JLayeredPane.DEFAULT_LAYER);
contentPanel.setLayer(errorMessage, javax.swing.JLayeredPane.DEFAULT_LAYER);
contentPanel.setLayer(accountNameCB, javax.swing.JLayeredPane.DEFAULT_LAYER);
contentPanel.setLayer(quantityInfo, javax.swing.JLayeredPane.DEFAULT_LAYER);
contentPanel.setLayer(jPanel1, javax.swing.JLayeredPane.DEFAULT_LAYER);
contentPanel.setLayer(jLabel18, javax.swing.JLayeredPane.DEFAULT_LAYER);
javax.swing.GroupLayout containerLayout = new javax.swing.GroupLayout(container);
container.setLayout(containerLayout);
containerLayout.setHorizontalGroup(
containerLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(containerLayout.createSequentialGroup()
.addGap(0, 0, 0)
.addComponent(contentPanel)
.addGap(0, 0, 0))
);
containerLayout.setVerticalGroup(
containerLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(containerLayout.createSequentialGroup()
.addGap(0, 0, 0)
.addComponent(contentPanel)
.addGap(0, 0, 0))
);
contolLP.setBorder(javax.swing.BorderFactory.createLineBorder(javax.swing.UIManager.getDefaults().getColor("Button.light")));
addButton.setBackground(new java.awt.Color(255, 255, 255));
addButton.setText("Add");
addButton.setBorder(null);
addButton.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
addButtonActionPerformed(evt);
}
});
saveButton.setBackground(new java.awt.Color(255, 255, 255));
saveButton.setText("Save");
saveButton.setBorder(null);
saveButton.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
saveButtonActionPerformed(evt);
}
});
updateButton.setBackground(new java.awt.Color(255, 255, 255));
updateButton.setText("Update");
updateButton.setBorder(null);
updateButton.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
updateButtonActionPerformed(evt);
}
});
cancelButton.setBackground(new java.awt.Color(255, 255, 255));
cancelButton.setText("Cancel");
cancelButton.setBorder(null);
cancelButton.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
cancelButtonActionPerformed(evt);
}
});
editButton.setBackground(new java.awt.Color(255, 255, 255));
editButton.setText("Edit");
editButton.setBorder(null);
editButton.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
editButtonActionPerformed(evt);
}
});
deleteButton.setBackground(new java.awt.Color(255, 255, 255));
deleteButton.setText("Delete");
deleteButton.setBorder(null);
deleteButton.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
deleteButtonActionPerformed(evt);
}
});
printButton.setBackground(new java.awt.Color(255, 255, 255));
printButton.setText("Print");
printButton.setBorder(null);
printButton.addActionListener(new java.awt.event.ActionListener() {
public void actionPerformed(java.awt.event.ActionEvent evt) {
printButtonActionPerformed(evt);
}
});
javax.swing.GroupLayout mainControlLayout = new javax.swing.GroupLayout(mainControl);
mainControl.setLayout(mainControlLayout);
mainControlLayout.setHorizontalGroup(
mainControlLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(mainControlLayout.createSequentialGroup()
.addGap(1, 1, 1)
.addComponent(addButton, javax.swing.GroupLayout.PREFERRED_SIZE, 75, javax.swing.GroupLayout.PREFERRED_SIZE)
.addGap(20, 20, 20)
.addComponent(saveButton, javax.swing.GroupLayout.PREFERRED_SIZE, 75, javax.swing.GroupLayout.PREFERRED_SIZE)
.addGap(20, 20, 20)
.addComponent(updateButton, javax.swing.GroupLayout.PREFERRED_SIZE, 75, javax.swing.GroupLayout.PREFERRED_SIZE)
.addGap(20, 20, 20)
.addComponent(cancelButton, javax.swing.GroupLayout.PREFERRED_SIZE, 75, javax.swing.GroupLayout.PREFERRED_SIZE)
.addGap(20, 20, 20)
.addComponent(editButton, javax.swing.GroupLayout.PREFERRED_SIZE, 75, javax.swing.GroupLayout.PREFERRED_SIZE)
.addGap(20, 20, 20)
.addComponent(deleteButton, javax.swing.GroupLayout.PREFERRED_SIZE, 75, javax.swing.GroupLayout.PREFERRED_SIZE)
.addGap(20, 20, 20)
.addComponent(printButton, javax.swing.GroupLayout.PREFERRED_SIZE, 75, javax.swing.GroupLayout.PREFERRED_SIZE))
);
mainControlLayout.setVerticalGroup(
mainControlLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(javax.swing.GroupLayout.Alignment.TRAILING, mainControlLayout.createSequentialGroup()
.addGap(1, 1, 1)
.addGroup(mainControlLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.BASELINE)
.addComponent(addButton, javax.swing.GroupLayout.PREFERRED_SIZE, 30, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(saveButton, javax.swing.GroupLayout.PREFERRED_SIZE, 30, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(updateButton, javax.swing.GroupLayout.PREFERRED_SIZE, 30, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(cancelButton, javax.swing.GroupLayout.PREFERRED_SIZE, 30, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(editButton, javax.swing.GroupLayout.PREFERRED_SIZE, 30, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(deleteButton, javax.swing.GroupLayout.PREFERRED_SIZE, 30, javax.swing.GroupLayout.PREFERRED_SIZE)
.addComponent(printButton, javax.swing.GroupLayout.PREFERRED_SIZE, 30, javax.swing.GroupLayout.PREFERRED_SIZE))
.addGap(1, 1, 1))
);
mainControl.setLayer(addButton, javax.swing.JLayeredPane.DEFAULT_LAYER);
mainControl.setLayer(saveButton, javax.swing.JLayeredPane.DEFAULT_LAYER);
mainControl.setLayer(updateButton, javax.swing.JLayeredPane.DEFAULT_LAYER);
mainControl.setLayer(cancelButton, javax.swing.JLayeredPane.DEFAULT_LAYER);
mainControl.setLayer(editButton, javax.swing.JLayeredPane.DEFAULT_LAYER);
mainControl.setLayer(deleteButton, javax.swing.JLayeredPane.DEFAULT_LAYER);
mainControl.setLayer(printButton, javax.swing.JLayeredPane.DEFAULT_LAYER);
javax.swing.GroupLayout contolLPLayout = new javax.swing.GroupLayout(contolLP);
contolLP.setLayout(contolLPLayout);
contolLPLayout.setHorizontalGroup(
contolLPLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(javax.swing.GroupLayout.Alignment.TRAILING, contolLPLayout.createSequentialGroup()
.addContainerGap(javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
.addComponent(mainControl, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addGap(211, 211, 211))
);
contolLPLayout.setVerticalGroup(
contolLPLayout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(contolLPLayout.createSequentialGroup()
.addGap(5, 5, 5)
.addComponent(mainControl, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE)
.addGap(5, 5, 5))
);
contolLP.setLayer(mainControl, javax.swing.JLayeredPane.DEFAULT_LAYER);
javax.swing.GroupLayout layout = new javax.swing.GroupLayout(this);
this.setLayout(layout);
layout.setHorizontalGroup(
layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(layout.createSequentialGroup()
.addContainerGap()
.addComponent(container, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
.addContainerGap())
.addComponent(contolLP)
);
layout.setVerticalGroup(
layout.createParallelGroup(javax.swing.GroupLayout.Alignment.LEADING)
.addGroup(layout.createSequentialGroup()
.addContainerGap()
.addComponent(container, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, Short.MAX_VALUE)
.addPreferredGap(javax.swing.LayoutStyle.ComponentPlacement.RELATED)
.addComponent(contolLP, javax.swing.GroupLayout.PREFERRED_SIZE, javax.swing.GroupLayout.DEFAULT_SIZE, javax.swing.GroupLayout.PREFERRED_SIZE))
);
bindingGroup.bind();
}// </editor-fold>//GEN-END:initComponents
private void insertRowBillSundryActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_insertRowBillSundryActionPerformed
new TableUtils().addingBillSundryTableRow(billSundryTable);
}//GEN-LAST:event_insertRowBillSundryActionPerformed
private void removeRowBillSundryActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_removeRowBillSundryActionPerformed
new TableUtils().removingBillSundryTableRow(billSundryTable);
calculateTotalAmount();
}//GEN-LAST:event_removeRowBillSundryActionPerformed
private void billSundryTableFocusGained(java.awt.event.FocusEvent evt) {//GEN-FIRST:event_billSundryTableFocusGained
errorMessage.setOpaque(false);
errorMessage.setText(null);
}//GEN-LAST:event_billSundryTableFocusGained
private void billSundryTableMouseClicked(java.awt.event.MouseEvent evt) {//GEN-FIRST:event_billSundryTableMouseClicked
billSundryTableControl();
}//GEN-LAST:event_billSundryTableMouseClicked
private void removeRowItemTableActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_removeRowItemTableActionPerformed
int sr = itemTable.getSelectedRow();
if (sr > -1 && sr < itemTable.getRowCount()) {
if (itemTable.getModel().getValueAt(sr, 1) != null) {
int count = 1;
for (int i = sr + 1; i < itemTable.getRowCount() - 1; i++) {
if (itemTable.getModel().getValueAt(i, 1) == null) {
count++;
} else {
break;
}
}
int[] rows = new int[count];
for (int i = 0; i < count; i++) {
rows[i] = sr + i;
}
for (int i = rows.length - 1; i >= 0; i--) {
new TableUtils().removingItemTableRows(itemTable, rows[i]);
}
updateTotalValueItemTable();
calculateAmount();
calculateTotalAmount();
} else {
new TableUtils().removingItemTableRows(itemTable, sr);
updateTotalValueItemTable();
calculateAmount();
calculateTotalAmount();
}
}
}//GEN-LAST:event_removeRowItemTableActionPerformed
private void insertRowItemTableActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_insertRowItemTableActionPerformed
new TableUtils().addingItemTable(itemTable);
}//GEN-LAST:event_insertRowItemTableActionPerformed
private void itemTableFocusGained(java.awt.event.FocusEvent evt) {//GEN-FIRST:event_itemTableFocusGained
errorMessage.setOpaque(false);
errorMessage.setText(null);
}//GEN-LAST:event_itemTableFocusGained
private void backButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_backButtonActionPerformed
list.setVisible(false);
contolLP.setVisible(true);
container.setVisible(true);
}//GEN-LAST:event_backButtonActionPerformed
private void addButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_addButtonActionPerformed
this.clearAllField();
this.enableAllField(true);
setVoucherNumber(companyCode, conn);
this.addControlShow();
}//GEN-LAST:event_addButtonActionPerformed
private void saveButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_saveButtonActionPerformed
if (validateUserInput()) {
if (new PurchaseReturnHandler(mainFrame, this).checkVoucherNumberIsValidOrNot(companyCode, voucherNumberTF.getText(), conn)) {
if (new PurchaseReturnHandler(mainFrame, this).savePurchaseReturnInfo(this, conn, companyCode)) {
if (new PurchaseReturnHandler(mainFrame, this).saveItemOrderInfo(this, conn, companyCode, itemTable)) {
if (billSundryTable.getRowCount() > 1) {
if (new PurchaseReturnHandler(mainFrame, this).saveBillSundryInfo(this, conn, companyCode, billSundryTable)) {
this.enableAllField(false);
this.saveOrUpdateControlShow();
}
} else {
this.enableAllField(false);
this.saveOrUpdateControlShow();
}
}
}
} else {
errorMessage.setOpaque(true);
errorMessage.setText("Voucher number is already exist.");
voucherNumberTF.requestFocusInWindow();
voucherNumberTF.selectAll();
}
}
}//GEN-LAST:event_saveButtonActionPerformed
private void updateButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_updateButtonActionPerformed
if (validateUserInput()) {
if (voucherNumberTF.getText().equals(String.valueOf(voucherNumber))) {
if (new PurchaseReturnHandler(mainFrame, this).deleteVoucherNumber(companyCode, voucherNumber, conn)) {
if (new PurchaseReturnHandler(mainFrame, this).savePurchaseReturnInfo(this, conn, companyCode)) {
if (new PurchaseReturnHandler(mainFrame, this).saveItemOrderInfo(this, conn, companyCode, itemTable)) {
if (billSundryTable.getRowCount() > 1) {
if (new PurchaseReturnHandler(mainFrame, this).saveBillSundryInfo(this, conn, companyCode, billSundryTable)) {
this.enableAllField(false);
this.saveOrUpdateControlShow();
}
} else {
this.enableAllField(false);
this.saveOrUpdateControlShow();
}
}
}
}
} else {
if (new PurchaseReturnHandler(mainFrame, this).checkVoucherNumberIsValidOrNot(companyCode, voucherNumberTF.getText(), conn)) {
if (new PurchaseReturnHandler(mainFrame, this).deleteVoucherNumber(companyCode, voucherNumber, conn)) {
if (new PurchaseReturnHandler(mainFrame, this).savePurchaseReturnInfo(this, conn, companyCode)) {
if (new PurchaseReturnHandler(mainFrame, this).saveItemOrderInfo(this, conn, companyCode, itemTable)) {
if (billSundryTable.getRowCount() > 1) {
if (new PurchaseReturnHandler(mainFrame, this).saveBillSundryInfo(this, conn, companyCode, billSundryTable)) {
this.enableAllField(false);
this.saveOrUpdateControlShow();
}
} else {
this.enableAllField(false);
this.saveOrUpdateControlShow();
}
}
}
}
} else {
errorMessage.setOpaque(true);
errorMessage.setText("Voucher number is already exist.");
voucherNumberTF.requestFocusInWindow();
voucherNumberTF.selectAll();
}
}
}
}//GEN-LAST:event_updateButtonActionPerformed
private void cancelButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_cancelButtonActionPerformed
this.clearAllField();
this.enableAllField(false);
showCancelControl();
}//GEN-LAST:event_cancelButtonActionPerformed
private void editButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_editButtonActionPerformed
if (new PurchaseReturnHandler(mainFrame, this).fetchPurchaseReturnInformation(companyCode, saleOrderListTable, conn)) {
this.add(list);
list.setBounds(0, 0, this.getWidth(), this.getHeight());
container.setVisible(false);
list.setVisible(true);
contolLP.setVisible(false);
backButton.setVisible(true);
}
}//GEN-LAST:event_editButtonActionPerformed
private void deleteButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_deleteButtonActionPerformed
if (new PurchaseReturnHandler(mainFrame, this).deleteVoucherNumber(companyCode, voucherNumberTF.getText(), conn)) {
this.clearAllField();
this.enableAllField(false);
this.deleteControlShow();
}
}//GEN-LAST:event_deleteButtonActionPerformed
private void printButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_printButtonActionPerformed
notesTaker = new PurchaseReturnNotesTaker(mainFrame, this, true);
notesTaker.setLocationRelativeTo(this);
notesTaker.setVisible(true);
}//GEN-LAST:event_printButtonActionPerformed
private void saleOrderListTableMouseClicked(java.awt.event.MouseEvent evt) {//GEN-FIRST:event_saleOrderListTableMouseClicked
if (saleOrderListTable.getRowCount() > 0) {
if (evt.getClickCount() > 1 && saleOrderListTable.getSelectedRow() > -1) {
voucherNumber = saleOrderListTable.getValueAt(saleOrderListTable.getSelectedRow(), 1).toString();
if (new PurchaseReturnHandler(mainFrame, this).fetchDataOfPurchaseReturn(companyCode, voucherNumber, this, conn)) {
if (new PurchaseReturnHandler(mainFrame, this).fetchItemDetail(companyCode, voucherNumber, itemTable, this, conn)) {
if (new PurchaseReturnHandler(mainFrame, this).fetchBillSundryDetail(companyCode, voucherNumber, billSundryTable, this, conn)) {
list.setVisible(false);
container.setVisible(true);
contolLP.setVisible(true);
this.remove(list);
enableAllField(true);
showEditOrListControl();
}
}
}
}
} else {
evt.consume();
}
}//GEN-LAST:event_saleOrderListTableMouseClicked
private void taxTypeClassCBItemStateChanged(java.awt.event.ItemEvent evt) {//GEN-FIRST:event_taxTypeClassCBItemStateChanged
if (itemTable.getSelectedRow() > -1) {
itemTable.getModel().setValueAt(null, itemTable.getSelectedRow(), 10);
itemTable.getModel().setValueAt(null, itemTable.getSelectedRow(), 12);
if (itemTable.getModel().getValueAt(itemTable.getSelectedRow(), 8) != null) {
updateItemBillSundry(itemTable.getSelectedRow());
}
}
}//GEN-LAST:event_taxTypeClassCBItemStateChanged
private void taxTypeClassCBBItemStateChanged(java.awt.event.ItemEvent evt) {//GEN-FIRST:event_taxTypeClassCBBItemStateChanged
if (billSundryTable.getSelectedRow() > -1) {
billSundryTable.getModel().setValueAt(null, billSundryTable.getSelectedRow(), 1);
billSundryTable.getModel().setValueAt(null, billSundryTable.getSelectedRow(), 4);
updateTotalValueItemTable();
}
}//GEN-LAST:event_taxTypeClassCBBItemStateChanged
private void taxTypeClassCBBPopupMenuWillBecomeInvisible(javax.swing.event.PopupMenuEvent evt) {//GEN-FIRST:event_taxTypeClassCBBPopupMenuWillBecomeInvisible
}//GEN-LAST:event_taxTypeClassCBBPopupMenuWillBecomeInvisible
private void billSundryNatureCBPopupMenuWillBecomeInvisible(javax.swing.event.PopupMenuEvent evt) {//GEN-FIRST:event_billSundryNatureCBPopupMenuWillBecomeInvisible
}//GEN-LAST:event_billSundryNatureCBPopupMenuWillBecomeInvisible
private void quantityTFKeyTyped(java.awt.event.KeyEvent evt) {//GEN-FIRST:event_quantityTFKeyTyped
if (itemTable.getModel().getValueAt(itemTable.getSelectedRow(), 1) == null || itemTable.getModel().getValueAt(itemTable.getSelectedRow(), 4) == null) {
evt.consume();
} else {
if (evt.getKeyCode() != KeyEvent.VK_BACK_SPACE && evt.getKeyChar() != '.') {
if ((evt.getKeyChar() < '0') || (evt.getKeyChar() > '9')) {
Toolkit.getDefaultToolkit().beep();
evt.consume();
}
} else if (evt.getKeyChar() == '.') {
if (quantityTF.getText().isEmpty()) {
evt.consume();
} else if (quantityTF.getText().contains(".")) {
evt.consume();
}
}
}
}//GEN-LAST:event_quantityTFKeyTyped
private void atTheRateTFKeyTyped(java.awt.event.KeyEvent evt) {//GEN-FIRST:event_atTheRateTFKeyTyped
if (itemTable.getModel().getValueAt(itemTable.getSelectedRow(), 8) == null || itemTable.getModel().getValueAt(itemTable.getSelectedRow(), 9) == null) {
evt.consume();
} else {
switch (itemTable.getModel().getValueAt(itemTable.getSelectedRow(), 9).toString()) {
case "Fix Amount":
evt.consume();
break;
default:
if (evt.getKeyCode() != KeyEvent.VK_BACK_SPACE && evt.getKeyChar() != '.') {
if ((evt.getKeyChar() < '0') || (evt.getKeyChar() > '9')) {
Toolkit.getDefaultToolkit().beep();
evt.consume();
}
} else if (evt.getKeyChar() == '.') {
if (atTheRateTF.getText().isEmpty()) {
evt.consume();
} else if (atTheRateTF.getText().contains(".")) {
evt.consume();
}
}
break;
}
}
}//GEN-LAST:event_atTheRateTFKeyTyped
private void miscAmountTFKeyTyped(java.awt.event.KeyEvent evt) {//GEN-FIRST:event_miscAmountTFKeyTyped
if (itemTable.getModel().getValueAt(itemTable.getSelectedRow(), 8) == null || itemTable.getModel().getValueAt(itemTable.getSelectedRow(), 9) == null) {
evt.consume();
} else {
switch (itemTable.getModel().getValueAt(itemTable.getSelectedRow(), 9).toString()) {
case "Fix Amount":
if (evt.getKeyCode() != KeyEvent.VK_BACK_SPACE && evt.getKeyChar() != '.') {
if ((evt.getKeyChar() < '0') || (evt.getKeyChar() > '9')) {
Toolkit.getDefaultToolkit().beep();
evt.consume();
}
} else if (evt.getKeyChar() == '.') {
if (miscAmountTF.getText().isEmpty()) {
evt.consume();
} else if (miscAmountTF.getText().contains(".")) {
evt.consume();
}
}
break;
case "Percentage":
evt.consume();
break;
case "Quantity":
evt.consume();
break;
}
}
}//GEN-LAST:event_miscAmountTFKeyTyped
private void aRateTFKeyTyped(java.awt.event.KeyEvent evt) {//GEN-FIRST:event_aRateTFKeyTyped
if (billSundryTable.getModel().getValueAt(billSundryTable.getSelectedRow(), 0) == null || billSundryTable.getModel().getValueAt(billSundryTable.getSelectedRow(), 2) == null) {
evt.consume();
} else {
switch (billSundryTable.getModel().getValueAt(billSundryTable.getSelectedRow(), 2).toString()) {
case "Fix Amount":
evt.consume();
break;
case "Percentage":
if (evt.getKeyCode() != KeyEvent.VK_BACK_SPACE && evt.getKeyChar() != '.') {
if ((evt.getKeyChar() < '0') || (evt.getKeyChar() > '9')) {
Toolkit.getDefaultToolkit().beep();
evt.consume();
}
} else if (evt.getKeyChar() == '.') {
if (aRateTF.getText().isEmpty()) {
evt.consume();
} else if (aRateTF.getText().contains(".")) {
evt.consume();
}
}
break;
}
}
}//GEN-LAST:event_aRateTFKeyTyped
private void amountTFKeyTyped(java.awt.event.KeyEvent evt) {//GEN-FIRST:event_amountTFKeyTyped
if (billSundryTable.getModel().getValueAt(billSundryTable.getSelectedRow(), 0) == null || billSundryTable.getModel().getValueAt(billSundryTable.getSelectedRow(), 2) == null) {
evt.consume();
} else {
switch (billSundryTable.getModel().getValueAt(billSundryTable.getSelectedRow(), 2).toString()) {
case "Fix Amount":
if (evt.getKeyCode() != KeyEvent.VK_BACK_SPACE && evt.getKeyChar() != '.') {
if ((evt.getKeyChar() < '0') || (evt.getKeyChar() > '9')) {
Toolkit.getDefaultToolkit().beep();
evt.consume();
}
} else if (evt.getKeyChar() == '.') {
if (amountTF.getText().isEmpty()) {
evt.consume();
} else if (amountTF.getText().contains(".")) {
evt.consume();
}
}
break;
case "Percentage":
evt.consume();
break;
}
}
}//GEN-LAST:event_amountTFKeyTyped
private void searchKeyTFCaretUpdate(javax.swing.event.CaretEvent evt) {//GEN-FIRST:event_searchKeyTFCaretUpdate
TableModel model = saleOrderListTable.getModel();
sorter = new TableRowSorter<>(model);
saleOrderListTable.setRowSorter(sorter);
String text = searchKeyTF.getText();
if (text.length() == 0) {
sorter.setRowFilter(null);
} else {
switch (searchBy.getSelectedItem().toString()) {
case "Account Number":
sorter.setRowFilter(RowFilter.regexFilter(text, 3));
break;
case "Customer Name":
sorter.setRowFilter(RowFilter.regexFilter(text, 4));
break;
case "Date":
sorter.setRowFilter(RowFilter.regexFilter(text, 2));
break;
case "Voucher Number":
sorter.setRowFilter(RowFilter.regexFilter(text, 1));
break;
}
}
}//GEN-LAST:event_searchKeyTFCaretUpdate
private void itemTableKeyTyped(java.awt.event.KeyEvent evt) {//GEN-FIRST:event_itemTableKeyTyped
if (evt.getKeyChar() < '0' || evt.getKeyChar() > '9') {
Toolkit.getDefaultToolkit().beep();
evt.consume();
} else {
quantityTF.setEnabled(false);
rateTF.setEnabled(false);
taxTypeClassCB.setEnabled(false);
natureCB.setEnabled(false);
atTheRateTF.setEnabled(false);
miscAmountTF.setEnabled(false);
itemTableControl();
}
}//GEN-LAST:event_itemTableKeyTyped
private void accountNumberCBMouseEntered(java.awt.event.MouseEvent evt) {//GEN-FIRST:event_accountNumberCBMouseEntered
new FetchVoucherData().fetchAccountNoSale(mainFrame, accountNumberCB, companyCode, conn);
}//GEN-LAST:event_accountNumberCBMouseEntered
private void accountNumberCBPopupMenuWillBecomeInvisible(javax.swing.event.PopupMenuEvent evt) {//GEN-FIRST:event_accountNumberCBPopupMenuWillBecomeInvisible
retriveAccountName();
}//GEN-LAST:event_accountNumberCBPopupMenuWillBecomeInvisible
private void accountNameCBMouseEntered(java.awt.event.MouseEvent evt) {//GEN-FIRST:event_accountNameCBMouseEntered
new FetchVoucherData().fetchAccountNameSale(mainFrame, accountNameCB, companyCode, conn);
}//GEN-LAST:event_accountNameCBMouseEntered
private void accountNameCBPopupMenuWillBecomeInvisible(javax.swing.event.PopupMenuEvent evt) {//GEN-FIRST:event_accountNameCBPopupMenuWillBecomeInvisible
retriveAccountNumber();
}//GEN-LAST:event_accountNameCBPopupMenuWillBecomeInvisible
private void itemTableKeyPressed(java.awt.event.KeyEvent evt) {//GEN-FIRST:event_itemTableKeyPressed
switch (evt.getKeyCode()) {
case KeyEvent.VK_LEFT:
if (itemTable.getSelectedColumn() == 4) {
itemTableScroll.getHorizontalScrollBar().setValue(0);
} else if (itemTable.getSelectedColumn() == 8) {
itemTableScroll.getHorizontalScrollBar().setValue(500);
} else if (itemTable.getSelectedColumn() == 12) {
itemTableScroll.getHorizontalScrollBar().setValue(1000);
}
break;
case KeyEvent.VK_RIGHT:
if (itemTable.getSelectedColumn() == 4) {
itemTableScroll.getHorizontalScrollBar().setValue(400);
} else if (itemTable.getSelectedColumn() == 8) {
itemTableScroll.getHorizontalScrollBar().setValue(1000);
} else if (itemTable.getSelectedColumn() == 12) {
itemTableScroll.getHorizontalScrollBar().setValue(1600);
}
break;
case KeyEvent.VK_UP:
if (itemTable.getSelectedRow() > -1) {
int i = itemTable.getSelectedRow();
itemTableScroll.getVerticalScrollBar().setValue(i * 25);
}
break;
case KeyEvent.VK_DOWN:
if (itemTable.getSelectedRow() > -1) {
int i = itemTable.getSelectedRow();
if (i != 0 && i % 5 == 0) {
itemTableScroll.getVerticalScrollBar().setValue(++i * 25);
}
}
break;
}
}//GEN-LAST:event_itemTableKeyPressed
private void itemTableMouseClicked(java.awt.event.MouseEvent evt) {//GEN-FIRST:event_itemTableMouseClicked
if (itemTable.getSelectedColumn() == 5 || itemTable.getSelectedColumn() == 6 || itemTable.getSelectedColumn() == 9 || itemTable.getSelectedColumn() == 10 || itemTable.getSelectedColumn() == 11 || itemTable.getSelectedColumn() == 12 || itemTable.getSelectedColumn() == 14) {
itemTableControl();
} else {
if (evt.getClickCount() > 1) {
itemTableControl();
}
}
}//GEN-LAST:event_itemTableMouseClicked
private void quantityTFFocusLost(java.awt.event.FocusEvent evt) {//GEN-FIRST:event_quantityTFFocusLost
int i = itemTable.getSelectedRow();
updateItemAmount(i);
updateTotalValueItemTable();
calculateAmount();
quantityTF.setEnabled(false);
quantityInfo.setText(null);
switch (saveType) {
case "save":
saveButton.setEnabled(true);
break;
case "update":
updateButton.setEnabled(true);
break;
}
}//GEN-LAST:event_quantityTFFocusLost
private void rateTFFocusLost(java.awt.event.FocusEvent evt) {//GEN-FIRST:event_rateTFFocusLost
quantityTFFocusLost(evt);
rateTF.setEnabled(false);
switch (saveType) {
case "save":
saveButton.setEnabled(true);
break;
case "update":
updateButton.setEnabled(true);
break;
}
}//GEN-LAST:event_rateTFFocusLost
private void rateTFKeyTyped(java.awt.event.KeyEvent evt) {//GEN-FIRST:event_rateTFKeyTyped
if (itemTable.getModel().getValueAt(itemTable.getSelectedRow(), 1) == null || itemTable.getModel().getValueAt(itemTable.getSelectedRow(), 4) == null) {
evt.consume();
} else {
if (evt.getKeyCode() != KeyEvent.VK_BACK_SPACE && evt.getKeyChar() != '.') {
if ((evt.getKeyChar() < '0') || (evt.getKeyChar() > '9')) {
Toolkit.getDefaultToolkit().beep();
evt.consume();
}
} else if (evt.getKeyChar() == '.') {
if (rateTF.getText().isEmpty()) {
evt.consume();
} else if (rateTF.getText().contains(".")) {
evt.consume();
}
}
}
}//GEN-LAST:event_rateTFKeyTyped
private void atTheRateTFFocusLost(java.awt.event.FocusEvent evt) {//GEN-FIRST:event_atTheRateTFFocusLost
updateItemBillSundry(itemTable.getSelectedRow());
updateTotalValueItemTable();
calculateAmount();
atTheRateTF.setEnabled(false);
switch (saveType) {
case "save":
saveButton.setEnabled(true);
break;
case "update":
updateButton.setEnabled(true);
break;
}
}//GEN-LAST:event_atTheRateTFFocusLost
private void miscAmountTFFocusLost(java.awt.event.FocusEvent evt) {//GEN-FIRST:event_miscAmountTFFocusLost
updateItemBillSundry(itemTable.getSelectedRow());
updateTotalValueItemTable();
calculateAmount();
miscAmountTF.setEnabled(false);
switch (saveType) {
case "save":
saveButton.setEnabled(true);
break;
case "update":
updateButton.setEnabled(true);
break;
}
}//GEN-LAST:event_miscAmountTFFocusLost
private void taxTypeClassCBFocusLost(java.awt.event.FocusEvent evt) {//GEN-FIRST:event_taxTypeClassCBFocusLost
calculateAmount();
taxTypeClassCB.setEnabled(false);
}//GEN-LAST:event_taxTypeClassCBFocusLost
private void natureCBFocusLost(java.awt.event.FocusEvent evt) {//GEN-FIRST:event_natureCBFocusLost
updateTotalValueItemTable();
calculateAmount();
natureCB.setEnabled(false);
}//GEN-LAST:event_natureCBFocusLost
private void taxTypeClassCBFocusGained(java.awt.event.FocusEvent evt) {//GEN-FIRST:event_taxTypeClassCBFocusGained
if (itemTable.getModel().getValueAt(itemTable.getSelectedRow(), 8) != null) {
taxTypeClassCB.setEnabled(true);
} else {
taxTypeClassCB.setEnabled(false);
}
}//GEN-LAST:event_taxTypeClassCBFocusGained
private void taxTypeClassCBMouseClicked(java.awt.event.MouseEvent evt) {//GEN-FIRST:event_taxTypeClassCBMouseClicked
if (itemTable.getModel().getValueAt(itemTable.getSelectedRow(), 8) != null) {
taxTypeClassCB.setEnabled(true);
} else {
taxTypeClassCB.setEnabled(false);
}
}//GEN-LAST:event_taxTypeClassCBMouseClicked
private void natureCBMouseClicked(java.awt.event.MouseEvent evt) {//GEN-FIRST:event_natureCBMouseClicked
if (itemTable.getModel().getValueAt(itemTable.getSelectedRow(), 8) != null) {
natureCB.setEnabled(true);
} else {
natureCB.setEnabled(false);
}
}//GEN-LAST:event_natureCBMouseClicked
private void natureCBFocusGained(java.awt.event.FocusEvent evt) {//GEN-FIRST:event_natureCBFocusGained
if (itemTable.getModel().getValueAt(itemTable.getSelectedRow(), 8) != null) {
natureCB.setEnabled(true);
} else {
natureCB.setEnabled(false);
}
}//GEN-LAST:event_natureCBFocusGained
private void billSundryTableKeyTyped(java.awt.event.KeyEvent evt) {//GEN-FIRST:event_billSundryTableKeyTyped
if (evt.getKeyChar() < '0' || evt.getKeyChar() > '9') {
Toolkit.getDefaultToolkit().beep();
evt.consume();
} else {
aRateTF.setEnabled(false);
amountTF.setEnabled(false);
taxTypeClassCBB.setEnabled(false);
billSundryNatureCB.setEnabled(false);
billSundryTableControl();
}
}//GEN-LAST:event_billSundryTableKeyTyped
private void aRateTFFocusLost(java.awt.event.FocusEvent evt) {//GEN-FIRST:event_aRateTFFocusLost
updateBillSundry(billSundryTable.getSelectedRow());
calculateTotalAmount();
switch (saveType) {
case "save":
saveButton.setEnabled(true);
break;
case "update":
updateButton.setEnabled(true);
break;
}
}//GEN-LAST:event_aRateTFFocusLost
private void amountTFFocusLost(java.awt.event.FocusEvent evt) {//GEN-FIRST:event_amountTFFocusLost
updateBillSundry(billSundryTable.getSelectedRow());
calculateTotalAmount();
switch (saveType) {
case "save":
saveButton.setEnabled(true);
break;
case "update":
updateButton.setEnabled(true);
break;
}
}//GEN-LAST:event_amountTFFocusLost
private void billSundryNatureCBFocusGained(java.awt.event.FocusEvent evt) {//GEN-FIRST:event_billSundryNatureCBFocusGained
if (billSundryTable.getModel().getValueAt(billSundryTable.getSelectedRow(), 0) != null) {
billSundryNatureCB.setEnabled(true);
} else {
billSundryNatureCB.setEnabled(false);
}
}//GEN-LAST:event_billSundryNatureCBFocusGained
private void billSundryNatureCBFocusLost(java.awt.event.FocusEvent evt) {//GEN-FIRST:event_billSundryNatureCBFocusLost
updateBillSundry(billSundryTable.getSelectedRow());
calculateTotalAmount();
}//GEN-LAST:event_billSundryNatureCBFocusLost
private void billSundryNatureCBMouseClicked(java.awt.event.MouseEvent evt) {//GEN-FIRST:event_billSundryNatureCBMouseClicked
if (billSundryTable.getModel().getValueAt(billSundryTable.getSelectedRow(), 0) != null) {
billSundryNatureCB.setEnabled(true);
} else {
billSundryNatureCB.setEnabled(false);
}
}//GEN-LAST:event_billSundryNatureCBMouseClicked
private void taxTypeClassCBBFocusGained(java.awt.event.FocusEvent evt) {//GEN-FIRST:event_taxTypeClassCBBFocusGained
if (billSundryTable.getModel().getValueAt(billSundryTable.getSelectedRow(), 0) != null) {
taxTypeClassCBB.setEnabled(true);
} else {
taxTypeClassCBB.setEnabled(false);
}
}//GEN-LAST:event_taxTypeClassCBBFocusGained
private void taxTypeClassCBBFocusLost(java.awt.event.FocusEvent evt) {//GEN-FIRST:event_taxTypeClassCBBFocusLost
updateBillSundry(billSundryTable.getSelectedRow());
calculateTotalAmount();
}//GEN-LAST:event_taxTypeClassCBBFocusLost
private void taxTypeClassCBBMouseClicked(java.awt.event.MouseEvent evt) {//GEN-FIRST:event_taxTypeClassCBBMouseClicked
if (billSundryTable.getModel().getValueAt(billSundryTable.getSelectedRow(), 0) != null) {
taxTypeClassCBB.setEnabled(true);
} else {
taxTypeClassCBB.setEnabled(false);
}
}//GEN-LAST:event_taxTypeClassCBBMouseClicked
private void cashTFCaretUpdate(javax.swing.event.CaretEvent evt) {//GEN-FIRST:event_cashTFCaretUpdate
if (cashTF.getText().isEmpty()) {
cashAccCB.setSelectedIndex(-1);
cashAccCB.setVisible(false);
if (bankTF.getText().isEmpty()) {
creditTF.setText(null);
debitTF.setText(null);
} else {
calculateTransactionValues();
}
} else {
cashAccCB.setVisible(true);
calculateTransactionValues();
}
}//GEN-LAST:event_cashTFCaretUpdate
private void cashTFKeyTyped(java.awt.event.KeyEvent evt) {//GEN-FIRST:event_cashTFKeyTyped
if (evt.getKeyCode() != KeyEvent.VK_BACK_SPACE && evt.getKeyChar() != '.') {
if ((evt.getKeyChar() < '0') || (evt.getKeyChar() > '9')) {
Toolkit.getDefaultToolkit().beep();
evt.consume();
}
} else if (evt.getKeyChar() == '.') {
if (cashTF.getText().isEmpty()) {
evt.consume();
} else if (cashTF.getText().contains(".")) {
evt.consume();
}
}
}//GEN-LAST:event_cashTFKeyTyped
private void bankTFCaretUpdate(javax.swing.event.CaretEvent evt) {//GEN-FIRST:event_bankTFCaretUpdate
if (bankTF.getText().isEmpty()) {
bankAccCB.setSelectedIndex(-1);
bankAccCB.setVisible(false);
if (cashTF.getText().isEmpty()) {
creditTF.setText(null);
debitTF.setText(null);
} else {
calculateTransactionValues();
}
} else {
bankAccCB.setVisible(true);
calculateTransactionValues();
}
}//GEN-LAST:event_bankTFCaretUpdate
private void bankTFKeyTyped(java.awt.event.KeyEvent evt) {//GEN-FIRST:event_bankTFKeyTyped
if (evt.getKeyCode() != KeyEvent.VK_BACK_SPACE && evt.getKeyChar() != '.') {
if ((evt.getKeyChar() < '0') || (evt.getKeyChar() > '9')) {
Toolkit.getDefaultToolkit().beep();
evt.consume();
}
} else if (evt.getKeyChar() == '.') {
if (bankTF.getText().isEmpty()) {
evt.consume();
} else if (bankTF.getText().contains(".")) {
evt.consume();
}
}
}//GEN-LAST:event_bankTFKeyTyped
private void creditLimitCBItemStateChanged(java.awt.event.ItemEvent evt) {//GEN-FIRST:event_creditLimitCBItemStateChanged
if (creditLimitCB.getSelectedIndex() == 0) {
dateChooserCombo2.setVisible(false);
creditLimitDayTF.setVisible(true);
} else if (creditLimitCB.getSelectedIndex() == 1) {
creditLimitDayTF.setVisible(false);
dateChooserCombo2.setVisible(true);
} else {
dateChooserCombo2.setVisible(false);
creditLimitDayTF.setVisible(false);
}
}//GEN-LAST:event_creditLimitCBItemStateChanged
private void creditLimitDayTFKeyTyped(java.awt.event.KeyEvent evt) {//GEN-FIRST:event_creditLimitDayTFKeyTyped
if (evt.getKeyCode() != KeyEvent.VK_BACK_SPACE) {
if ((evt.getKeyChar() < '0') || (evt.getKeyChar() > '9')) {
Toolkit.getDefaultToolkit().beep();
evt.consume();
}
}
}//GEN-LAST:event_creditLimitDayTFKeyTyped
private void quantityTFFocusGained(java.awt.event.FocusEvent evt) {//GEN-FIRST:event_quantityTFFocusGained
if (itemTable.getModel().getValueAt(itemTable.getSelectedRow(), 1) != null) {
quantityInfo.setText("Quantity Available: " + listOfData.getMaxQuantity());
} else {
quantityInfo.setText(null);
}
if (saveButton.isEnabled()) {
saveType = "save";
saveButton.setEnabled(false);
} else if (updateButton.isEnabled()) {
saveType = "update";
updateButton.setEnabled(false);
}
}//GEN-LAST:event_quantityTFFocusGained
private void creditTFCaretUpdate(javax.swing.event.CaretEvent evt) {//GEN-FIRST:event_creditTFCaretUpdate
if (creditTF.getText().isEmpty()) {
creditLimitCB.setSelectedIndex(-1);
creditLimitCB.setVisible(false);
} else {
creditLimitCB.setVisible(true);
}
}//GEN-LAST:event_creditTFCaretUpdate
private void expiryDateTFFocusLost(java.awt.event.FocusEvent evt) {//GEN-FIRST:event_expiryDateTFFocusLost
expiryDateTF.setEnabled(false);
switch (saveType) {
case "save":
saveButton.setEnabled(true);
break;
case "update":
updateButton.setEnabled(true);
break;
}
}//GEN-LAST:event_expiryDateTFFocusLost
private void expiryDateTFKeyTyped(java.awt.event.KeyEvent evt) {//GEN-FIRST:event_expiryDateTFKeyTyped
if (itemTable.getModel().getValueAt(itemTable.getSelectedRow(), 1) == null) {
evt.consume();
} else {
if (evt.getKeyCode() != KeyEvent.VK_BACK_SPACE && evt.getKeyChar() != '-') {
if ((evt.getKeyChar() < '0') || (evt.getKeyChar() > '9')) {
Toolkit.getDefaultToolkit().beep();
evt.consume();
} else {
if (expiryDateTF.getText().length() == 4 || expiryDateTF.getText().length() == 7 || expiryDateTF.getText().length() == 10) {
evt.consume();
}
}
} else if (evt.getKeyChar() == '-') {
if (expiryDateTF.getText().isEmpty()) {
evt.consume();
} else if (expiryDateTF.getText().length() == 4 || expiryDateTF.getText().length() == 7) {
} else {
evt.consume();
}
}
}
}//GEN-LAST:event_expiryDateTFKeyTyped
private void rateTFFocusGained(java.awt.event.FocusEvent evt) {//GEN-FIRST:event_rateTFFocusGained
if (saveButton.isEnabled()) {
saveType = "save";
saveButton.setEnabled(false);
} else if (updateButton.isEnabled()) {
saveType = "update";
updateButton.setEnabled(false);
}
}//GEN-LAST:event_rateTFFocusGained
private void atTheRateTFFocusGained(java.awt.event.FocusEvent evt) {//GEN-FIRST:event_atTheRateTFFocusGained
rateTFFocusGained(evt);
}//GEN-LAST:event_atTheRateTFFocusGained
private void miscAmountTFFocusGained(java.awt.event.FocusEvent evt) {//GEN-FIRST:event_miscAmountTFFocusGained
rateTFFocusGained(evt);
}//GEN-LAST:event_miscAmountTFFocusGained
private void aRateTFFocusGained(java.awt.event.FocusEvent evt) {//GEN-FIRST:event_aRateTFFocusGained
rateTFFocusGained(evt);
}//GEN-LAST:event_aRateTFFocusGained
private void amountTFFocusGained(java.awt.event.FocusEvent evt) {//GEN-FIRST:event_amountTFFocusGained
rateTFFocusGained(evt);
}//GEN-LAST:event_amountTFFocusGained
private void expiryDateTFFocusGained(java.awt.event.FocusEvent evt) {//GEN-FIRST:event_expiryDateTFFocusGained
rateTFFocusGained(evt);
}//GEN-LAST:event_expiryDateTFFocusGained
private void cashAccCBMouseEntered(java.awt.event.MouseEvent evt) {//GEN-FIRST:event_cashAccCBMouseEntered
new FetchVoucherData().fetchCashAccNo(mainFrame, cashAccCB, companyCode, conn);
}//GEN-LAST:event_cashAccCBMouseEntered
private void bankAccCBMouseEntered(java.awt.event.MouseEvent evt) {//GEN-FIRST:event_bankAccCBMouseEntered
new FetchVoucherData().fetchBankAccNo(mainFrame, bankAccCB, companyCode, conn);
}//GEN-LAST:event_bankAccCBMouseEntered
// Variables declaration - do not modify//GEN-BEGIN:variables
private javax.swing.JFormattedTextField aRateTF;
private javax.swing.JComboBox accountNameCB;
private javax.swing.JComboBox accountNumberCB;
private javax.swing.JButton addButton;
private javax.swing.JTextArea addressTA;
private javax.swing.JLabel amount;
private javax.swing.JFormattedTextField amountTF;
private javax.swing.JFormattedTextField atTheRateTF;
private javax.swing.JButton backButton;
private javax.swing.JComboBox bankAccCB;
private javax.swing.JTextField bankTF;
private javax.swing.JComboBox billSundryNatureCB;
private javax.swing.JTable billSundryTable;
private javax.swing.JButton cancelButton;
private javax.swing.JComboBox cashAccCB;
private javax.swing.JTextField cashTF;
private javax.swing.JPanel container;
private javax.swing.JLayeredPane contentPanel;
private javax.swing.JLayeredPane contolLP;
private javax.swing.JComboBox creditLimitCB;
private javax.swing.JTextField creditLimitDayTF;
private javax.swing.JTextField creditTF;
private datechooser.beans.DateChooserCombo dateChooserCombo1;
private datechooser.beans.DateChooserCombo dateChooserCombo2;
private javax.swing.JTextField debitTF;
private javax.swing.JButton deleteButton;
private javax.swing.JButton editButton;
private javax.swing.JLabel errorMessage;
private javax.swing.JTextField expiryDateTF;
private javax.swing.JButton insertRowBillSundry;
private javax.swing.JButton insertRowItemTable;
private javax.swing.JTable itemTable;
private javax.swing.JScrollPane itemTableScroll;
private javax.swing.JLabel jLabel1;
private javax.swing.JLabel jLabel10;
private javax.swing.JLabel jLabel11;
private javax.swing.JLabel jLabel12;
private javax.swing.JLabel jLabel15;
private javax.swing.JLabel jLabel17;
private javax.swing.JLabel jLabel18;
private javax.swing.JLabel jLabel2;
private javax.swing.JLabel jLabel3;
private javax.swing.JLabel jLabel4;
private javax.swing.JLabel jLabel5;
private javax.swing.JLabel jLabel6;
private javax.swing.JLabel jLabel7;
private javax.swing.JLabel jLabel8;
private javax.swing.JLabel jLabel9;
private javax.swing.JLayeredPane jLayeredPane1;
private javax.swing.JLayeredPane jLayeredPane2;
private javax.swing.JLayeredPane jLayeredPane3;
private javax.swing.JLayeredPane jLayeredPane5;
private javax.swing.JLayeredPane jLayeredPane6;
private javax.swing.JPanel jPanel1;
private javax.swing.JScrollPane jScrollPane1;
private javax.swing.JScrollPane jScrollPane2;
private javax.swing.JScrollPane jScrollPane3;
private javax.swing.JScrollPane jScrollPane4;
private javax.swing.JScrollPane jScrollPane6;
private javax.swing.JPanel list;
private javax.swing.JLayeredPane mainControl;
private javax.swing.JFormattedTextField miscAmountTF;
private javax.swing.JComboBox natureCB;
private javax.swing.JTextArea otherDetailTA;
private javax.swing.JButton printButton;
private javax.swing.JLabel quantityInfo;
private javax.swing.JFormattedTextField quantityTF;
private javax.swing.JFormattedTextField rateTF;
private javax.swing.JButton removeRowBillSundry;
private javax.swing.JButton removeRowItemTable;
private javax.swing.JTable saleOrderListTable;
private javax.swing.JButton saveButton;
private javax.swing.JComboBox searchBy;
private javax.swing.JTextField searchKeyTF;
private javax.swing.JComboBox taxTypeClassCB;
private javax.swing.JComboBox taxTypeClassCBB;
private javax.swing.JLabel totalAmount;
private javax.swing.JButton updateButton;
private javax.swing.JTextField voucherNumberTF;
private org.jdesktop.beansbinding.BindingGroup bindingGroup;
// End of variables declaration//GEN-END:variables
private void showListOfData(String s, JTable table, String companyCode) {
listOfData = new ListOfData(mainFrame, true, table, companyCode);
if (s.equalsIgnoreCase("UnitName")) {
listOfData.setUnitName(primaryUnit);
}
listOfData.setTypeOfRate("purchase");
listOfData.addData(s);
listOfData.setLocationRelativeTo(mainFrame);
listOfData.setVisible(true);
}
/**
*
* @param companyCode
* @param conn
*/
public void setVoucherNumber(String companyCode, Connection conn) {
voucherNumberTF.setText(new FetchVoucherData().getNextPurchaseReturnVoucherNo(companyCode, conn));
}
private void clearAllField() {
accountNumberCB.setSelectedIndex(-1);
accountNameCB.setSelectedIndex(-1);
addressTA.setText(null);
voucherNumberTF.setText(null);
dateChooserCombo1.setSelectedDate(Calendar.getInstance());
DefaultTableModel model = (DefaultTableModel) itemTable.getModel();
model.setRowCount(0);
Object[] data = {null, null, null, null, null, null, null, null, null, null, null, null, null, null, null};
model.addRow(data);
itemTable.setModel(model);
otherDetailTA.setText(null);
DefaultTableModel model1 = (DefaultTableModel) billSundryTable.getModel();
model1.setRowCount(0);
Object[] data1 = {null, null, null, null};
model1.addRow(data1);
billSundryTable.setModel(model1);
cashAccCB.setSelectedIndex(-1);
cashTF.setText(null);
bankTF.setText(null);
bankAccCB.setSelectedIndex(-1);
debitTF.setText(null);
creditLimitCB.setSelectedIndex(-1);
creditTF.setText(null);
}
private void enableAllField(boolean b) {
errorMessage.setEnabled(b);
jLabel2.setEnabled(b);
accountNumberCB.setEnabled(b);
jLabel3.setEnabled(b);
accountNameCB.setEnabled(b);
jLabel4.setEnabled(b);
jScrollPane1.setEnabled(b);
addressTA.setEnabled(b);
jLabel1.setEnabled(b);
voucherNumberTF.setEnabled(b);
jLabel6.setEnabled(b);
dateChooserCombo1.setEnabled(b);
jScrollPane4.setEnabled(b);
otherDetailTA.setEnabled(b);
itemTableScroll.setEnabled(b);
jLayeredPane1.setEnabled(b);
jScrollPane2.setEnabled(b);
itemTable.setEnabled(b);
jLayeredPane3.setEnabled(b);
jLabel11.setEnabled(b);
amount.setEnabled(b);
insertRowItemTable.setEnabled(b);
removeRowItemTable.setEnabled(b);
jLayeredPane2.setEnabled(b);
jScrollPane3.setEnabled(b);
billSundryTable.setEnabled(b);
removeRowBillSundry.setEnabled(b);
insertRowBillSundry.setEnabled(b);
jLayeredPane6.setEnabled(b);
jLabel15.setEnabled(b);
totalAmount.setEnabled(b);
cashTF.setEnabled(b);
cashAccCB.setEnabled(b);
bankTF.setEnabled(b);
bankAccCB.setEnabled(b);
debitTF.setEnabled(b);
creditTF.setEnabled(b);
creditLimitCB.setEnabled(b);
creditLimitDayTF.setEnabled(b);
dateChooserCombo2.setEnabled(b);
}
private boolean validateUserInput() {
boolean status = false;
if (accountNumberCB.getSelectedIndex() > -1) {
if (!voucherNumberTF.getText().isEmpty()) {
if (itemTable.getRowCount() > 1) {
int rowCount = itemTable.getRowCount();
for (int row = 0; row < (rowCount - 1); row++) {
if (itemTable.getModel().getValueAt(row, 1) != null) {
if (itemTable.getModel().getValueAt(row, 4) != null) {
if (itemTable.getModel().getValueAt(row, 5) != null) {
if (itemTable.getModel().getValueAt(row, 6) != null) {
if (itemTable.getModel().getValueAt(row, 8) != null) {
if (checkBillSundryValidation(row)) {
status = true;
}
} else {
status = true;
}
} else {
status = false;
errorMessage.setOpaque(true);
errorMessage.setText("Please, select the rate for row " + itemTable.getModel().getValueAt(row, 0).toString() + ".");
break;
}
} else {
status = false;
errorMessage.setOpaque(true);
errorMessage.setText("Please, select the quantity for row " + itemTable.getModel().getValueAt(row, 0).toString() + ".");
break;
}
} else {
status = false;
errorMessage.setOpaque(true);
errorMessage.setText("Please, select the unit for row " + itemTable.getModel().getValueAt(row, 0).toString() + ".");
break;
}
} else if (itemTable.getModel().getValueAt(row, 8) != null) {
boolean check = false;
for (int i = row; i >= 0; i--) {
if (itemTable.getModel().getValueAt(i, 1) != null) {
check = true;
break;
}
}
if (!check) {
status = false;
errorMessage.setOpaque(true);
errorMessage.setText("Please, select the item code for row " + itemTable.getModel().getValueAt(row, 0).toString() + ".");
break;
} else if (checkBillSundryValidation(row)) {
status = true;
}
} else {
status = false;
errorMessage.setOpaque(true);
errorMessage.setText("Please, select the item code for row " + itemTable.getModel().getValueAt(row, 0).toString() + ".");
break;
}
}
} else {
status = false;
errorMessage.setOpaque(true);
errorMessage.setText("Please, enter item information.");
}
} else {
status = false;
errorMessage.setOpaque(true);
errorMessage.setText("Please, enter voucher number.");
}
} else {
status = false;
errorMessage.setOpaque(true);
errorMessage.setText("Please, enter account number number.");
}
if (status) {
if (billSundryTable.getRowCount() > 1) {
int rowCount = billSundryTable.getRowCount();
for (int row = 0; row < rowCount - 1; row++) {
if (billSundryTable.getModel().getValueAt(row, 0) != null) {
if (billSundryTable.getModel().getValueAt(row, 2) != null) {
switch (billSundryTable.getModel().getValueAt(row, 2).toString()) {
case "Fix Amount":
if (billSundryTable.getModel().getValueAt(row, 4) != null) {
if (billSundryTable.getModel().getValueAt(row, 3) != null) {
status = true;
} else {
status = false;
errorMessage.setOpaque(true);
errorMessage.setText("Please, select the nature for row " + itemTable.getModel().getValueAt(row, 0).toString() + ".");
}
} else {
status = false;
errorMessage.setOpaque(true);
errorMessage.setText("Misc. Amount can't be left empty for row " + itemTable.getModel().getValueAt(row, 0).toString() + ".");
}
break;
case "Percentage":
if (billSundryTable.getModel().getValueAt(row, 1) != null) {
if (billSundryTable.getModel().getValueAt(row, 3) != null) {
status = true;
} else {
status = false;
errorMessage.setOpaque(true);
errorMessage.setText("Please, select the nature for row " + itemTable.getModel().getValueAt(row, 0).toString() + ".");
}
} else {
status = false;
errorMessage.setOpaque(true);
errorMessage.setText("@ can't be left empty for row " + itemTable.getModel().getValueAt(row, 0).toString() + ".");
}
break;
}
} else {
status = false;
errorMessage.setOpaque(true);
errorMessage.setText("Please, select the tax class for row " + itemTable.getModel().getValueAt(row, 0).toString() + ".");
break;
}
} else {
status = true;
}
}
}
}
if (status) {
if (cashTF.getText().isEmpty() && bankTF.getText().isEmpty()) {
debitTF.setText(null);
creditTF.setText(null);
} else {
status = false;
if (!cashTF.getText().isEmpty()) {
if (cashAccCB.getSelectedIndex() > -1) {
if (!bankTF.getText().isEmpty()) {
if (bankAccCB.getSelectedIndex() > -1) {
status = true;
} else {
status = false;
errorMessage.setOpaque(true);
errorMessage.setText("Please, select bank account number.");
}
} else {
status = true;
}
} else {
status = false;
errorMessage.setOpaque(true);
errorMessage.setText("Please, select cash account number.");
}
} else if (!bankTF.getText().isEmpty()) {
if (bankAccCB.getSelectedIndex() > -1) {
status = true;
} else {
status = false;
errorMessage.setOpaque(true);
errorMessage.setText("Please, select bank account number.");
}
}
}
}
return status;
}
private void defaultControlShow() {
addButton.setEnabled(false);
saveButton.setEnabled(true);
updateButton.setEnabled(false);
cancelButton.setEnabled(true);
deleteButton.setEnabled(false);
editButton.setEnabled(false);
}
private void addControlShow() {
addButton.setEnabled(false);
saveButton.setEnabled(true);
updateButton.setEnabled(false);
cancelButton.setEnabled(true);
editButton.setEnabled(false);
deleteButton.setEnabled(false);
}
private void saveOrUpdateControlShow() {
addButton.setEnabled(true);
saveButton.setEnabled(false);
updateButton.setEnabled(false);
cancelButton.setEnabled(true);
editButton.setEnabled(true);
deleteButton.setEnabled(true);
}
private void deleteControlShow() {
addButton.setEnabled(true);
saveButton.setEnabled(false);
updateButton.setEnabled(false);
cancelButton.setEnabled(false);
editButton.setEnabled(true);
deleteButton.setEnabled(false);
}
private void showEditOrListControl() {
addButton.setEnabled(false);
saveButton.setEnabled(false);
updateButton.setEnabled(true);
cancelButton.setEnabled(true);
editButton.setEnabled(false);
deleteButton.setEnabled(true);
}
private void showCancelControl() {
addButton.setEnabled(true);
saveButton.setEnabled(false);
updateButton.setEnabled(false);
cancelButton.setEnabled(false);
editButton.setEnabled(true);
deleteButton.setEnabled(false);
}
/**
*
* @return
*/
public String getAccountNumber() {
return accountNumberCB.getSelectedItem().toString();
}
/**
*
* @return
*/
public String getCustomerName() {
return accountNameCB.getSelectedItem().toString();
}
/**
*
* @return
*/
public String getAddress() {
return addressTA.getText();
}
/**
*
* @return
*/
public Timestamp getDateT() {
Calendar cal = dateChooserCombo1.getSelectedDate();
Date date = cal.getTime();
return new Timestamp(date.getTime());
}
/**
*
* @return
*/
public String getOtherDetails() {
return otherDetailTA.getText();
}
/**
*
* @return
*/
public String getVoucherNumber() {
return voucherNumberTF.getText();
}
/**
*
* @return
*/
public String getTotalAmount() {
return totalAmount.getText();
}
/**
*
* @return
*/
public String getSumOfRows() {
return amount.getText();
}
/**
*
* @param i
* @param c
* @return
*/
public String getItemTableValue(int i, int c) {
if (itemTable.getModel().getValueAt(i, c) != null) {
return itemTable.getModel().getValueAt(i, c).toString();
} else {
return null;
}
}
/**
*
* @param i
* @param c
* @return
*/
public String getBillSundryTableValue(int i, int c) {
if (billSundryTable.getModel().getValueAt(i, c) != null) {
return billSundryTable.getModel().getValueAt(i, c).toString();
} else {
return null;
}
}
/**
*
* @param aLong
*/
public void setAccountNumber(String aLong) {
accountNumberCB.setSelectedItem(aLong);
}
/**
*
* @param string
*/
public void setCustomerName(String string) {
accountNameCB.setSelectedItem(string);
}
/**
*
* @param string
*/
public void setAddress(String string) {
addressTA.setText(string);
}
/**
*
* @param aLong
*/
public void setVoucherNu(String aLong) {
voucherNumberTF.setText(String.valueOf(aLong));
}
/**
*
* @param timestamp
*/
public void setDatee(Timestamp timestamp) {
Date date = new Date(timestamp.getTime());
Calendar calendar = Calendar.getInstance();
calendar.setTime(date);
dateChooserCombo1.setSelectedDate(calendar);
}
/**
*
* @param string
*/
public void setOtherDetails(String string) {
otherDetailTA.setText(string);
}
/**
*
* @param aDouble
*/
public void setSumOfRows(String aDouble) {
amount.setText(roundTwoDecimals(Double.valueOf(aDouble)));
}
/**
*
* @param aDouble
*/
public void setTotalAmount(String aDouble) {
totalAmount.setText(roundTwoDecimals(Double.valueOf(aDouble)));
}
/**
*
* @param i
*/
public void updateItemBillSundry(int i) {
double itemAmount = 0.0;
int rootRow = 0;
if (i > -1) {
if (itemTable.getModel().getValueAt(i, 1) != null) {
if (itemTable.getModel().getValueAt(i, 7) != null) {
itemAmount = Double.valueOf(itemTable.getModel().getValueAt(i, 7).toString());
rootRow = i;
}
} else {
for (int j = i; j >= 0; j--) {
if (itemTable.getModel().getValueAt(j, 1) != null) {
if (itemTable.getModel().getValueAt(j, 7) != null) {
itemAmount = Double.valueOf(itemTable.getModel().getValueAt(j, 7).toString());
rootRow = j;
}
break;
}
}
}
}
calculateBillSundry(rootRow, itemAmount);
}
private void calculateBillSundry(int rootRow, double itemAmount) {
for (int i = rootRow; i < itemTable.getRowCount(); i++) {
if (i == rootRow) {
if (itemTable.getModel().getValueAt(i, 8) != null) {
Object taxClass = itemTable.getModel().getValueAt(i, 9);
if (taxClass != null) {
switch (taxClass.toString()) {
case "Percentage":
double sundryValue = 0;
if (itemTable.getModel().getValueAt(i, 10) != null) {
sundryValue = itemAmount * Double.valueOf(itemTable.getModel().getValueAt(i, 10).toString()) / 100;
}
itemTable.getModel().setValueAt(roundTwoDecimals(sundryValue), i, 12);
break;
}
}
}
} else if (itemTable.getModel().getValueAt(i, 1) == null) {
if (itemTable.getModel().getValueAt(i, 8) != null) {
Object taxClass = itemTable.getModel().getValueAt(i, 9);
if (taxClass != null) {
switch (taxClass.toString()) {
case "Percentage":
double sundryValue = 0;
if (itemTable.getModel().getValueAt(i, 10) != null) {
sundryValue = itemAmount * Double.valueOf(itemTable.getModel().getValueAt(i, 10).toString()) / 100;
}
itemTable.getModel().setValueAt(sundryValue, i, 12);
break;
}
}
}
} else {
break;
}
}
}
private void resetAllRespectiveBillSundry(int seRow) {
for (int i = seRow; i < itemTable.getRowCount(); i++) {
if (i == seRow) {
itemTable.getModel().setValueAt(null, i, 9);
itemTable.getModel().setValueAt(null, i, 10);
itemTable.getModel().setValueAt(null, i, 11);
itemTable.getModel().setValueAt(null, i, 12);
} else {
if (itemTable.getModel().getValueAt(i, 1) == null) {
itemTable.getModel().setValueAt(null, i, 9);
itemTable.getModel().setValueAt(null, i, 10);
itemTable.getModel().setValueAt(null, i, 11);
itemTable.getModel().setValueAt(null, i, 12);
} else {
break;
}
}
}
updateTotalValueItemTable();
calculateAmount();
}
/**
*
*/
public void updateTotalValueItemTable() {
int count = 0;
double rowAmount = 0;
for (int i = 0; i < itemTable.getRowCount(); i++) {
if (itemTable.getModel().getValueAt(i, 1) != null) {
rowAmount = 0;
count = i;
if (itemTable.getModel().getValueAt(i, 7) != null) {
rowAmount = Double.valueOf(itemTable.getModel().getValueAt(i, 7).toString());
}
if (itemTable.getModel().getValueAt(i, 8) != null) {
if (itemTable.getModel().getValueAt(i, 11) != null) {
if (itemTable.getModel().getValueAt(i, 12) != null) {
if (itemTable.getModel().getValueAt(i, 9) != null) {
if (!itemTable.getModel().getValueAt(i, 9).toString().equalsIgnoreCase("Quantity")) {
switch (itemTable.getModel().getValueAt(i, 11).toString()) {
case "Additive":
String temp = itemTable.getModel().getValueAt(i, 12).toString();
if (!temp.isEmpty()) {
rowAmount = rowAmount + Double.valueOf(temp);
}
break;
case "Subtractive":
String temp1 = itemTable.getModel().getValueAt(i, 12).toString();
if (!temp1.isEmpty()) {
rowAmount = rowAmount - Double.valueOf(temp1);
}
break;
}
}
}
}
}
}
} else {
if (itemTable.getModel().getValueAt(i, 8) != null) {
if (itemTable.getModel().getValueAt(i, 11) != null) {
if (itemTable.getModel().getValueAt(i, 12) != null) {
if (itemTable.getModel().getValueAt(i, 9) != null) {
if (!itemTable.getModel().getValueAt(i, 9).toString().equalsIgnoreCase("Quantity")) {
switch (itemTable.getModel().getValueAt(i, 11).toString()) {
case "Additive":
String temp = itemTable.getModel().getValueAt(i, 12).toString();
if (!temp.isEmpty()) {
rowAmount = rowAmount + Double.valueOf(temp);
}
break;
case "Subtractive":
String temp1 = itemTable.getModel().getValueAt(i, 12).toString();
if (!temp1.isEmpty()) {
rowAmount = rowAmount - Double.valueOf(temp1);
}
break;
}
}
}
}
}
}
}
itemTable.getModel().setValueAt(roundTwoDecimals(rowAmount), count, 13);
}
}
/**
*
*/
public void calculateAmount() {
itemTableTotalAmount = 0;
for (int i = 0; i < itemTable.getRowCount(); i++) {
if (itemTable.getModel().getValueAt(i, 13) != null) {
itemTableTotalAmount = itemTableTotalAmount + Double.valueOf(itemTable.getModel().getValueAt(i, 13).toString());
}
}
amount.setText(roundTwoDecimals(itemTableTotalAmount));
calculateTotalAmount();
}
/**
*
* @param i
*/
public void updateBillSundry(int i) {
if (billSundryTable.getModel().getValueAt(i, 0) != null) {
Object taxClass = billSundryTable.getModel().getValueAt(i, 2);
if (taxClass != null) {
switch (taxClass.toString()) {
case "Percentage":
double sundryValue = 0;
double total = 0;
if (!amount.getText().isEmpty()) {
total = Double.valueOf(amount.getText());
}
if (billSundryTable.getModel().getValueAt(i, 1) != null) {
sundryValue = total * Double.valueOf(billSundryTable.getModel().getValueAt(i, 1).toString()) / 100;
}
billSundryTable.getModel().setValueAt(roundTwoDecimals(sundryValue), i, 4);
break;
}
}
}
}
/**
*
*/
public void calculateTotalAmount() {
if (!amount.getText().isEmpty()) {
totAmount = Double.valueOf(amount.getText());
}
for (int i = 0; i < billSundryTable.getRowCount(); i++) {
if (billSundryTable.getModel().getValueAt(i, 0) != null) {
if (billSundryTable.getModel().getValueAt(i, 4) != null) {
if (billSundryTable.getModel().getValueAt(i, 3) != null) {
switch (billSundryTable.getModel().getValueAt(i, 3).toString()) {
case "Additive":
String temp = billSundryTable.getModel().getValueAt(i, 4).toString();
if (!temp.isEmpty()) {
totAmount = totAmount + Double.valueOf(temp);
}
break;
case "Subtractive":
String temp1 = billSundryTable.getModel().getValueAt(i, 4).toString();
if (!temp1.isEmpty()) {
totAmount = totAmount - Double.valueOf(temp1);
}
}
}
}
}
}
totalAmount.setText(roundTwoDecimals(totAmount));
}
private boolean checkBillSundryValidation(int row) {
boolean status = false;
if (itemTable.getModel().getValueAt(row, 9) != null) {
switch (itemTable.getModel().getValueAt(row, 9).toString()) {
case "Fix Amount":
if (itemTable.getModel().getValueAt(row, 11) != null) {
if (itemTable.getModel().getValueAt(row, 12) != null) {
status = true;
} else {
errorMessage.setOpaque(true);
errorMessage.setText("Please, select the misc. amount for row " + itemTable.getModel().getValueAt(row, 0).toString() + ".");
}
} else {
errorMessage.setOpaque(true);
errorMessage.setText("Please, select the nature for row " + itemTable.getModel().getValueAt(row, 0).toString() + ".");
}
break;
case "Percentage":
if (itemTable.getModel().getValueAt(row, 10) != null) {
if (itemTable.getModel().getValueAt(row, 11) != null) {
status = true;
} else {
errorMessage.setOpaque(true);
errorMessage.setText("Please, select the nature for row " + itemTable.getModel().getValueAt(row, 0).toString() + ".");
}
} else {
errorMessage.setOpaque(true);
errorMessage.setText("Please, select the @ for row " + itemTable.getModel().getValueAt(row, 0).toString() + ".");
}
break;
case "Quantity":
if (itemTable.getModel().getValueAt(row, 10) != null) {
if (itemTable.getModel().getValueAt(row, 11) != null) {
status = true;
} else {
errorMessage.setOpaque(true);
errorMessage.setText("Please, select the nature for row " + itemTable.getModel().getValueAt(row, 0).toString() + ".");
}
} else {
errorMessage.setOpaque(true);
errorMessage.setText("Please, select the @ for row " + itemTable.getModel().getValueAt(row, 0).toString() + ".");
}
break;
}
} else {
errorMessage.setOpaque(true);
errorMessage.setText("Please, select the tax class for row " + itemTable.getModel().getValueAt(row, 0).toString() + ".");
}
return status;
}
/**
*
* @param i
*/
public void updateItemAmount(int i) {
if (i > -1) {
if (itemTable.getModel().getValueAt(i, 1) != null) {
if (itemTable.getModel().getValueAt(i, 4) != null) {
String uName = itemTable.getModel().getValueAt(i, 4).toString();
if (itemTable.getModel().getValueAt(i, 5) != null) {
double quantity = Double.valueOf(itemTable.getModel().getValueAt(i, 5).toString());
if (itemTable.getValueAt(i, 7) != null) {
totalItemAmount = itemTable.getValueAt(i, 7).toString();
}
if (itemTable.getValueAt(i, 6) != null) {
double price = Double.valueOf(itemTable.getValueAt(i, 6).toString());
if (getUnitType(uName, conn).equalsIgnoreCase("Single")) {
itemTable.getModel().setValueAt(roundTwoDecimals(quantity * price), i, 7);
} else {
int j = 0;
double itemAmount = quantity * price;
String perUnit = new FetchVoucherData().getPerUnit(uName, conn);
itemAmount = itemAmount * Double.valueOf(perUnit);
String primaryU = new FetchVoucherData().getPrimaryUnit(uName, conn);
for (;;) {
if (getUnitType(primaryU, conn).equalsIgnoreCase("Single")) {
break;
} else {
perUnit = new FetchVoucherData().getPerUnit(primaryU, conn);
itemAmount = itemAmount * Double.valueOf(perUnit);
primaryU = new FetchVoucherData().getPrimaryUnit(primaryU, conn);
}
j++;
if (j > 20) {
break;
}
}
itemTable.getModel().setValueAt(roundTwoDecimals(itemAmount), i, 7);
}
}
}
}
}
if (totalItemAmount != null) {
if (itemTable.getValueAt(i, 7) != null) {
if (!totalItemAmount.equalsIgnoreCase(itemTable.getValueAt(i, 7).toString())) {
updateItemBillSundry(i);
}
}
}
}
}
/**
*
* @param d
* @return
*/
public String roundTwoDecimals(double d) {
DecimalFormat twoDForm = new DecimalFormat("#.######");
String temp = String.valueOf(twoDForm.format(d));
return temp;
}
private void retriveAccountNumber() {
if (accountNameCB.getSelectedIndex() > -1) {
accountNumberCB.setSelectedItem(new FetchVoucherData().getAccountNo(accountNameCB.getSelectedItem().toString(), companyCode, conn));
} else {
accountNumberCB.setSelectedIndex(-1);
}
}
private void retriveAccountName() {
if (accountNumberCB.getSelectedIndex() > -1) {
accountNameCB.setSelectedItem(new FetchVoucherData().getAccountName(accountNumberCB.getSelectedItem().toString(), companyCode, conn));
} else {
accountNameCB.setSelectedIndex(-1);
}
}
private void itemTableControl() {
errorMessage.setText(null);
errorMessage.setOpaque(false);
selectedRow = itemTable.getSelectedRow();
if (selectedRow > -1) {
if (itemTable.getSelectedRow() == itemTable.getRowCount() - 1) {
new TableUtils().addingItemTable(itemTable);
} else if (itemTable.getSelectedColumn() == 1) {
updateTotalValueItemTable();
calculateAmount();
if (itemTable.getModel().getValueAt(selectedRow, 1) != null) {
itemCode = itemTable.getModel().getValueAt(selectedRow, 1).toString();
}
showListOfData("ItemCode", itemTable, companyCode);
if (itemCode != null) {
if (!itemCode.equalsIgnoreCase(itemTable.getModel().getValueAt(selectedRow, 1).toString())) {
itemTable.getModel().setValueAt(null, itemTable.getSelectedRow(), 4);
itemTable.getModel().setValueAt(null, itemTable.getSelectedRow(), 5);
itemTable.getModel().setValueAt(null, itemTable.getSelectedRow(), 7);
resetAllRespectiveBillSundry(selectedRow);
quantityInfo.setText(null);
}
} else if (itemTable.getModel().getValueAt(selectedRow, 8) != null) {
if (itemTable.getModel().getValueAt(selectedRow, 1) != null) {
resetAllRespectiveBillSundry(selectedRow);
}
}
} else if (itemTable.getSelectedColumn() == 2) {
updateTotalValueItemTable();
calculateAmount();
if (itemTable.getModel().getValueAt(selectedRow, 1) != null) {
itemCode = itemTable.getModel().getValueAt(selectedRow, 1).toString();
}
showListOfData("ItemName", itemTable, companyCode);
if (itemCode != null) {
if (!itemCode.equalsIgnoreCase(itemTable.getModel().getValueAt(selectedRow, 1).toString())) {
itemTable.getModel().setValueAt(null, itemTable.getSelectedRow(), 4);
itemTable.getModel().setValueAt(null, itemTable.getSelectedRow(), 5);
itemTable.getModel().setValueAt(null, itemTable.getSelectedRow(), 7);
resetAllRespectiveBillSundry(selectedRow);
quantityInfo.setText(null);
}
} else if (itemTable.getModel().getValueAt(selectedRow, 8) != null) {
if (itemTable.getModel().getValueAt(selectedRow, 1) != null) {
resetAllRespectiveBillSundry(selectedRow);
}
}
} else if (itemTable.getSelectedColumn() == 4) {
updateTotalValueItemTable();
calculateAmount();
if (itemTable.getModel().getValueAt(itemTable.getSelectedRow(), 1) != null) {
primaryUnit = new PurchaseReturnHandler(mainFrame, this).getUnitName(itemTable.getModel().getValueAt(itemTable.getSelectedRow(), 1).toString(), companyCode, conn);
if (itemTable.getModel().getValueAt(selectedRow, 4) != null) {
unitName = itemTable.getModel().getValueAt(selectedRow, 4).toString();
}
showListOfData("UnitName", itemTable, companyCode);
if (unitName != null) {
if (!unitName.equalsIgnoreCase(itemTable.getModel().getValueAt(selectedRow, 4).toString())) {
itemTable.getModel().setValueAt(null, itemTable.getSelectedRow(), 5);
itemTable.getModel().setValueAt(null, itemTable.getSelectedRow(), 7);
resetAllRespectiveBillSundry(selectedRow);
}
}
}
} else if (itemTable.getSelectedColumn() == 8) {
updateTotalValueItemTable();
calculateAmount();
if (itemTable.getModel().getValueAt(selectedRow, 8) != null) {
billsundry = itemTable.getModel().getValueAt(selectedRow, 8).toString();
}
showListOfData("BillSundry", itemTable, companyCode);
if (billsundry != null) {
if (!billsundry.equalsIgnoreCase(itemTable.getModel().getValueAt(selectedRow, 8).toString())) {
itemTable.getModel().setValueAt(null, itemTable.getSelectedRow(), 10);
itemTable.getModel().setValueAt(null, itemTable.getSelectedRow(), 12);
}
}
} else if (itemTable.getSelectedColumn() == 5) {
if (itemTable.getModel().getValueAt(selectedRow, 1) != null) {
if (itemTable.getModel().getValueAt(selectedRow, 4) != null) {
quantityTF.setEnabled(true);
}
}
} else if (itemTable.getSelectedColumn() == 6) {
if (itemTable.getModel().getValueAt(selectedRow, 1) != null) {
rateTF.setEnabled(true);
}
} else if (itemTable.getSelectedColumn() == 10) {
if (itemTable.getModel().getValueAt(selectedRow, 8) != null) {
if (itemTable.getModel().getValueAt(selectedRow, 9) != null) {
if (!itemTable.getModel().getValueAt(selectedRow, 9).toString().equalsIgnoreCase("Fix Amount")) {
atTheRateTF.setEnabled(true);
}
}
}
} else if (itemTable.getSelectedColumn() == 12) {
if (itemTable.getModel().getValueAt(selectedRow, 8) != null) {
if (itemTable.getModel().getValueAt(selectedRow, 9) != null) {
if (itemTable.getModel().getValueAt(selectedRow, 9).toString().equalsIgnoreCase("Fix Amount")) {
miscAmountTF.setEnabled(true);
}
}
}
} else if (itemTable.getSelectedColumn() == 14) {
if (itemTable.getModel().getValueAt(selectedRow, 1) != null) {
expiryDateTF.setEnabled(true);
}
}
}
}
private void billSundryTableControl() {
errorMessage.setText(null);
errorMessage.setOpaque(false);
selectedRow1 = billSundryTable.getSelectedRow();
if (selectedRow1 > -1) {
if (selectedRow1 == billSundryTable.getRowCount() - 1) {
new TableUtils().addingBillSundryTableRow(billSundryTable);
} else if (billSundryTable.getSelectedRow() > -1) {
if (billSundryTable.getSelectedColumn() == 0) {
if (billSundryTable.getModel().getValueAt(billSundryTable.getSelectedRow(), 0) != null) {
billOverallSUndry = billSundryTable.getModel().getValueAt(billSundryTable.getSelectedRow(), 0).toString();
}
showListOfData("BillSundry", billSundryTable, companyCode);
if (billOverallSUndry != null) {
if (!billOverallSUndry.equalsIgnoreCase(billSundryTable.getModel().getValueAt(billSundryTable.getSelectedRow(), 0).toString())) {
billSundryTable.getModel().setValueAt(null, billSundryTable.getSelectedRow(), 1);
billSundryTable.getModel().setValueAt(null, billSundryTable.getSelectedRow(), 4);
}
}
updateBillSundry(billSundryTable.getSelectedRow());
calculateTotalAmount();
} else if (billSundryTable.getSelectedColumn() == 1) {
if (billSundryTable.getModel().getValueAt(selectedRow1, 0) != null) {
if (billSundryTable.getModel().getValueAt(selectedRow1, 2) != null) {
if (!billSundryTable.getModel().getValueAt(selectedRow1, 2).toString().equalsIgnoreCase("Fix Amount")) {
aRateTF.setEnabled(true);
}
}
}
} else if (billSundryTable.getSelectedColumn() == 4) {
if (billSundryTable.getModel().getValueAt(selectedRow1, 0) != null) {
if (billSundryTable.getModel().getValueAt(selectedRow1, 2) != null) {
if (billSundryTable.getModel().getValueAt(selectedRow1, 2).toString().equalsIgnoreCase("Fix Amount")) {
amountTF.setEnabled(true);
}
}
}
}
}
}
}
private String getUnitType(String unitName, Connection conn) {
return new FetchVoucherData().getUnitType(unitName, conn);
}
public String getIsCash() {
if (cashTF.getText().isEmpty()) {
return "N";
} else {
return "Y";
}
}
public String getIsBank() {
if (bankTF.getText().isEmpty()) {
return "N";
} else {
return "Y";
}
}
public String getCreditAmount() {
return creditTF.getText();
}
public String getCreditType() {
if (creditLimitCB.getSelectedIndex() > -1) {
return creditLimitCB.getSelectedItem().toString();
} else {
return null;
}
}
public String getCreditDay() {
if (creditLimitCB.getSelectedIndex() == 0) {
return creditLimitDayTF.getText();
} else {
return null;
}
}
public Timestamp getCreditDate() {
if (creditLimitCB.getSelectedIndex() == 1) {
return null;
} else {
Calendar cal = dateChooserCombo2.getSelectedDate();
Date date = cal.getTime();
return new Timestamp(date.getTime());
}
}
public String getDebitAmount() {
return debitTF.getText();
}
public String getCashAmount() {
return cashTF.getText();
}
public String getCashAccountNumber() {
if (cashTF.getText().isEmpty()) {
return null;
} else {
return cashAccCB.getSelectedItem().toString();
}
}
public String getBankAmount() {
return bankTF.getText();
}
public String getBankAccountNumber() {
if (bankTF.getText().isEmpty()) {
return null;
} else {
return bankAccCB.getSelectedItem().toString();
}
}
public Timestamp getItemTableTimeValue(int row, int col) {
if (itemTable.getModel().getValueAt(row, col) != null) {
try {
SimpleDateFormat format = new SimpleDateFormat("#yyyy-MM-dd#");
Date date = format.parse(itemTable.getModel().getValueAt(row, col).toString());
return new Timestamp(date.getTime());
} catch (ParseException ex) {
return null;
}
} else {
return null;
}
}
public void setCreditAmount(String CrA, String crType, String crDay, Timestamp timestamp) {
creditTF.setText(CrA);
creditLimitCB.setSelectedItem(crType);
creditLimitDayTF.setText(crDay);
if (timestamp != null) {
Date date = new Date(timestamp.getTime());
Calendar calendar = Calendar.getInstance();
calendar.setTime(date);
dateChooserCombo2.setSelectedDate(calendar);
}
}
public void setDebitAmount(String string) {
debitTF.setText(string);
}
public void setCashAmount(String string) {
cashTF.setText(string);
}
public void setCashAccNo(String string) {
cashAccCB.setSelectedItem(string);
}
public void setBankAmount(String string) {
bankTF.setText(string);
}
public void setBankAccNo(String string) {
bankAccCB.setSelectedItem(string);
}
private void calculateTransactionValues() {
double total = 0;
double bank = 0;
double cash = 0;
if (!totalAmount.getText().isEmpty()) {
total = Double.valueOf(totalAmount.getText());
}
if (!cashTF.getText().isEmpty()) {
cash = Double.valueOf(cashTF.getText());
}
if (!bankTF.getText().isEmpty()) {
bank = Double.valueOf(bankTF.getText());
}
double amt = (total - bank - cash);
if (amt > 0) {
creditTF.setText(String.valueOf(roundTwoDecimals(amt)));
debitTF.setText(null);
} else if (amt < 0) {
double temp = bank + cash - total;
String s = String.valueOf(roundTwoDecimals(temp));
debitTF.setText(s);
creditTF.setText(null);
} else if (amt == 0) {
creditTF.setText(null);
debitTF.setText(null);
}
}
public void printTable(String notes, String reportName) {
String envDirectory = System.getenv("ProgramFiles");
Path path = FileSystems.getDefault().getPath(envDirectory + "/Business Manager/reports/" + companyCode + "/");
Path pathForReport = FileSystems.getDefault().getPath(envDirectory + "/Business Manager/reports/" + companyCode + "/", reportName + ".pdf");
if (Files.exists(path)) {
if (Files.exists(pathForReport)) {
notesTaker.setVisible(false);
int optionPane = JOptionPane.showConfirmDialog(null, "Report already exist. \n Do you want override the file ?", "Warning", JOptionPane.WARNING_MESSAGE);
if (optionPane == 0) {
printV(notes, reportName);
notesTaker.dispose();
} else {
notesTaker.setVisible(true);
}
} else {
printV(notes, reportName);
notesTaker.dispose();
}
} else {
printV(notes, reportName);
notesTaker.dispose();
}
}
private void printV(String notes, String reportName) {
this.notes = notes;
Object rN = "PURCHASE RETURN";
Object vN = voucherNumberTF.getText();
Calendar cal = dateChooserCombo1.getSelectedDate();
Object cN = accountNumberCB.getSelectedItem();
Object cA = cashTF.getText();
Object cAcc = cashAccCB.getSelectedItem();
Object bA = bankTF.getText();
Object bAcc = bankAccCB.getSelectedItem();
Object debit = debitTF.getText();
Object credit = creditTF.getText();
Object totA = amount.getText();
double pos = 0;
double neg = 0;
for (int i = 0; i < billSundryTable.getRowCount(); i++) {
try {
switch (billSundryTable.getValueAt(i, 3).toString()) {
case "Additive":
try {
pos = pos + Double.valueOf(billSundryTable.getValueAt(i, 4).toString());
} catch (NullPointerException | NumberFormatException ex) {
}
break;
case "Subtractive":
try {
neg = neg + Double.valueOf(billSundryTable.getValueAt(i, 4).toString());
} catch (NullPointerException | NumberFormatException ex) {
}
break;
}
} catch (NullPointerException ex) {
}
}
Object posOtherB = pos;
Object negOtherB = neg;
Object gt = totalAmount.getText();
print = new RegRetPrint(mainFrame, companyCode, itemTable, reportName);
print.prepareModel();
// rN,vN,cal,notes,cN,cA,cAcc, bA,bAcc,debit,credit,totA,posOtherB,negOtherB,gt
print.prepareParams(rN, vN, cal, notes, cN, cA, cAcc, bA, bAcc, debit, credit, totA, posOtherB, negOtherB, gt);
print.print();
notesTaker.dispose();
}
}
| [
"\"ProgramFiles\""
]
| []
| [
"ProgramFiles"
]
| [] | ["ProgramFiles"] | java | 1 | 0 | |
internal/pkg/database/connect.go | package database
import (
"encoding/json"
"github.com/NTNU-sondrbaa-2019/CLOUD-O1/pkg/CO1Cache"
_ "github.com/go-sql-driver/mysql"
"github.com/jmoiron/sqlx"
"log"
"os"
)
var connection *sqlx.DB
func Connect() {
var db Connection
var err error
db.Host = os.Getenv("DB-HOST")
db.Port = os.Getenv("DB-PORT")
db.Username = os.Getenv("DB-USERNAME")
db.Password = os.Getenv("DB-PASSWORD")
db.Database = os.Getenv("DB-DATABASE")
dsn := db.Username + ":" + db.Password + "@tcp(" + db.Host + ":" + db.Port + ")/" + db.Database + "?parseTime=true"
connection, err = sqlx.Open("mysql", dsn)
if err != nil || connection.Ping() != nil {
log.Println("Couldn't connect to database using environment variables: ", err, connection.Ping())
if !CO1Cache.Verify("db-config") {
log.Println("Writing default database configuration...")
CO1Cache.WriteJSON("db-config", DEFAULT_CONNECTION)
log.Fatalln("To test the AWS database from localhost, please insert password into the generated ./cache/db-config.json file!")
} else {
err = json.Unmarshal(CO1Cache.Read("db-config"), &db)
if err != nil {
log.Fatalln("Couldn't read database config file: ", err)
}
dsn := db.Username + ":" + db.Password + "@tcp(" + db.Host + ":" + db.Port + ")/" + db.Database + "?parseTime=true"
connection, err = sqlx.Open("mysql", dsn)
if err != nil {
log.Fatalln("Couldn't connect to database using .cache/db-config.json configuration: ", err)
}
err = connection.Ping()
if err != nil {
log.Fatalln("Couldn't connect to database using .cache/db-config.json configuration: ", err)
}
}
}
log.Println("Successfully connected to database!")
}
func GetConnection() *sqlx.DB {
return connection
}
| [
"\"DB-HOST\"",
"\"DB-PORT\"",
"\"DB-USERNAME\"",
"\"DB-PASSWORD\"",
"\"DB-DATABASE\""
]
| []
| [
"DB-DATABASE",
"DB-PORT",
"DB-PASSWORD",
"DB-HOST",
"DB-USERNAME"
]
| [] | ["DB-DATABASE", "DB-PORT", "DB-PASSWORD", "DB-HOST", "DB-USERNAME"] | go | 5 | 0 | |
conanfile.py | # Copyright (c) 2020 The Orbit Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from conans import ConanFile, CMake, tools
from conans.errors import ConanInvalidConfiguration
import os
import shutil
from io import StringIO
class OrbitConan(ConanFile):
name = "OrbitProfiler"
license = "BSD-2-Clause"
url = "https://github.com/pierricgimmig/orbitprofiler.git"
description = "C/C++ Performance Profiler"
settings = "os", "compiler", "build_type", "arch"
generators = ["cmake_find_package_multi", "cmake"]
options = {"system_mesa": [True, False],
"system_qt": [True, False], "with_gui": [True, False],
"debian_packaging": [True, False],
"fPIC": [True, False],
"crashdump_server": "ANY",
"with_crash_handling": [True, False]}
default_options = {"system_mesa": True,
"system_qt": True, "with_gui": True,
"debian_packaging": False,
"fPIC": True,
"crashdump_server": "",
"with_crash_handling": True}
_orbit_channel = "orbitdeps/stable"
exports_sources = "CMakeLists.txt", "Orbit*", "bin/*", "cmake/*", "third_party/*", "LICENSE"
def _version(self):
if not self.version:
buf = StringIO()
self.run("git describe --always --tags", output=buf)
self.version = buf.getvalue().strip()
if self.version[0] == 'v':
self.version = self.version[1:]
return self.version
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
if not self.options.with_gui:
del self.options.with_crash_handling
del self.options.crashdump_server
elif not self.options.with_crash_handling:
del self.options.crashdump_server
def build_requirements(self):
self.build_requires('protoc_installer/3.9.1@bincrafters/stable#0')
self.build_requires('grpc_codegen/1.27.3@orbitdeps/stable#ec39b3cf6031361be942257523c1839a')
self.build_requires('gtest/1.10.0#ef88ba8e54f5ffad7d706062d0731a40', force_host_context=True)
def requirements(self):
if self.settings.os != "Windows" and self.options.with_gui and not self.options.system_qt and self.options.system_mesa:
raise ConanInvalidConfiguration("When disabling system_qt, you also have to "
"disable system mesa.")
self.requires("abseil/20190808@{}#0".format(self._orbit_channel))
self.requires("bzip2/1.0.8@conan/stable#0")
self.requires("capstone/4.0.1@{}#0".format(self._orbit_channel))
self.requires(
"grpc/1.27.3@{}#dc2368a2df63276188566e36a6b7868a".format(self._orbit_channel))
self.requires("llvm_object/9.0.1-2@orbitdeps/stable#9fbb81e87811594e3ed6316e97675b86")
self.requires("lzma_sdk/19.00@orbitdeps/stable#a7bc173325d7463a0757dee5b08bf7fd")
self.requires("openssl/1.1.1d@{}#0".format(self._orbit_channel))
self.requires("Outcome/3dae433e@orbitdeps/stable#0")
self.requires(
"libprotobuf-mutator/20200506@{}#4ed8fc67624c9a35b7b0227e93c9d3c4".format(self._orbit_channel))
if self.settings.os != "Windows":
self.requires(
"libunwindstack/80a734f14@{}#0".format(self._orbit_channel))
self.requires("zlib/1.2.11#9e0c292b60ce77402bd9be60dd68266f")
if self.options.with_gui and self.options.with_crash_handling:
self.requires(
"crashpad/20200624@{}#8c19cb575eb819de0b050cf7d1f317b6".format(self._orbit_channel))
if self.options.with_gui:
self.requires("freetype/2.10.0@bincrafters/stable#0")
self.requires(
"freetype-gl/8d9a97a@{}#2836d28f3d91c308ec9652c2054015db".format(self._orbit_channel))
self.requires("glew/2.1.0@{}#0".format(self._orbit_channel))
self.requires("libssh2/1.9.0#df2b6034da12cc5cb68bd3c5c22601bf")
self.requires("imgui/1.69@bincrafters/stable#0")
self.requires("libpng/1.6.37@bincrafters/stable#0")
if self.options.with_gui:
if not self.options.system_mesa:
self.requires("libxi/1.7.10@bincrafters/stable#0")
if not self.options.system_qt:
self.requires("qt/5.14.1@bincrafters/stable#0")
def configure(self):
if self.options.debian_packaging and (self.settings.get_safe("os.platform") != "GGP" or tools.detected_os() != "Linux"):
raise ConanInvalidConfiguration(
"Debian packaging is only supported for GGP builds!")
if self.settings.os != "Windows" and not self.options.fPIC:
raise ConanInvalidConfiguration(
"We only support compiling with fPIC enabled!")
if self.options.with_gui and self.settings.arch == "x86":
raise ConanInvalidConfiguration(
"We don't actively support building the UI for 32bit platforms. Please remove this check in conanfile.py if you still want to do so!")
self.options["abseil"].cxx_standard = 17
self.options["gtest"].no_main = True
if self.options.with_gui:
self.options["glew"].system_mesa = self.options.system_mesa
if not self.options.system_qt:
self.options["qt"].shared = True
self.options["qt"].with_sqlite3 = False
self.options["qt"].with_mysql = False
self.options["qt"].with_pq = False
self.options["qt"].with_odbc = False
self.options["qt"].with_sdl2 = False
self.options["qt"].with_openal = False
if self.settings.os == "Windows":
self.options["qt"].qttools = True
self.options["qt"].with_glib = False
self.options["qt"].with_harfbuzz = False
self.options["qt"].opengl = "dynamic"
def build(self):
cmake = CMake(self)
cmake.definitions["WITH_GUI"] = "ON" if self.options.with_gui else "OFF"
if self.options.with_gui:
if self.options.with_crash_handling:
cmake.definitions["WITH_CRASH_HANDLING"] = "ON"
cmake.definitions["CRASHDUMP_SERVER"] = self.options.crashdump_server
else:
cmake.definitions["WITH_CRASH_HANDLING"] = "OFF"
cmake.configure()
cmake.build()
if not tools.cross_building(self.settings, skip_x64_x86=True) and self.settings.get_safe("os.platform") != "GGP":
cmake.test(output_on_failure=True)
def imports(self):
dest = os.getenv("CONAN_IMPORT_PATH", "bin")
self.copy("crashpad_handler*", src="@bindirs",
dst=dest, root_package="crashpad")
if self.options.with_gui:
for path in self.deps_cpp_info["freetype-gl"].resdirs:
self.copy("Vera.ttf", src=path, dst="{}/fonts/".format(dest))
self.copy("Vera.ttf", src=path,
dst="{}/fonts/".format("OrbitQt/"))
self.copy("v3f-t2f-c4f.*", src=path,
dst="{}/shaders/".format(dest))
self.copy("v3f-t2f-c4f.*", src=path,
dst="{}/shaders/".format("OrbitQt/"))
self.copy("license*", dst="licenses", folder=True, ignore_case=True)
self.copy("licence*", dst="licenses", folder=True, ignore_case=True)
def package(self):
if self.options.debian_packaging:
shutil.rmtree(self.package_folder)
self.copy("*.so*", src="bin/", dst="{}-{}/usr/lib/x86_64-linux-gnu/".format(
self.name, self._version()), symlinks=True)
self.copy("OrbitService", src="bin/",
dst="{}-{}/opt/developer/tools/".format(self.name, self._version()))
self.copy("NOTICE",
dst="{}-{}/usr/share/doc/{}/".format(self.name, self._version(), self.name))
self.copy("LICENSE",
dst="{}-{}/usr/share/doc/{}/".format(self.name, self._version(), self.name))
basedir = "{}/{}-{}".format(self.package_folder,
self.name, self._version())
os.makedirs("{}/DEBIAN".format(basedir), exist_ok=True)
tools.save("{}/DEBIAN/control".format(basedir), """Package: orbitprofiler
Version: {}
Section: development
Priority: optional
Architecture: amd64
Maintainer: Google, Inc <[email protected]>
Description: Orbit is a C/C++ profiler for Windows, Linux and the Stadia Platform.
Homepage: https://github.com/google/orbit
Installed-Size: `du -ks usr/ | cut -f 1`
""".format(self._version()))
tools.save("{}/DEBIAN/postinst".format(basedir), """
#!/bin/bash
# Setting the setuid-bit for OrbitService
chmod -v 4775 /opt/developer/tools/OrbitService
""")
self.run("chmod +x {}/DEBIAN/postinst".format(basedir))
self.run("chmod g-s {}/DEBIAN".format(basedir))
self.run("chmod g-s {}/".format(basedir))
self.run("dpkg-deb -b --root-owner-group {}".format(basedir))
self.run("dpkg --contents {}.deb".format(basedir))
shutil.rmtree(basedir)
self.copy("*", src="bin/dri", dst="bin/dri", symlinks=True)
self.copy("*", src="bin/fonts", dst="bin/fonts", symlinks=True)
self.copy("*", src="bin/shaders", dst="bin/shaders", symlinks=True)
self.copy("*", src="bin/icons", dst="bin/icons", symlinks=True)
self.copy("*.so*", src="bin/", dst="bin", symlinks=True)
self.copy("*.dll", src="bin/", dst="bin", symlinks=True)
self.copy("*.pdb", src="bin/", dst="bin")
self.copy("Orbit", src="bin/", dst="bin")
self.copy("Orbit.exe", src="bin/", dst="bin")
self.copy("Orbit.debug", src="bin/", dst="bin")
self.copy("OrbitService", src="bin/", dst="bin")
self.copy("OrbitService.exe", src="bin/", dst="bin")
self.copy("OrbitService.debug", src="bin/", dst="bin")
self.copy("crashpad_handler.exe", src="bin/", dst="bin")
self.copy("NOTICE")
self.copy("LICENSE")
def deploy(self):
self.copy("*", src="bin", dst="bin")
| []
| []
| [
"CONAN_IMPORT_PATH"
]
| [] | ["CONAN_IMPORT_PATH"] | python | 1 | 0 | |
examples/cnn_bert/two_stream_bert2.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 4 10:42:00 2019
@author: esat
"""
import os
import time
import argparse
import shutil
import numpy as np
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
from tensorboardX import SummaryWriter
from torch.optim import lr_scheduler
import video_transforms
import models
import datasets
import swats
from opt.AdamW import AdamW
from utils.model_path import rgb_3d_model_path_selection
model_names = sorted(name for name in models.__dict__
if not name.startswith("__")
and callable(models.__dict__[name]))
dataset_names = sorted(name for name in datasets.__all__)
parser = argparse.ArgumentParser(description='PyTorch Two-Stream Action Recognition')
# parser.add_argument('--data', metavar='DIR', default='./datasets/ucf101_frames',
# help='path to dataset')
parser.add_argument('--settings', metavar='DIR', default='./datasets/settings',
help='path to datset setting files')
# parser.add_argument('--modality', '-m', metavar='MODALITY', default='rgb',
# choices=["rgb", "flow"],
# help='modality: rgb | flow')
parser.add_argument('--dataset', '-d', default='hmdb51',
choices=["ucf101", "hmdb51", "smtV2", "window"],
help='dataset: ucf101 | hmdb51 | smtV2')
parser.add_argument('--arch', '-a', default='rgb_resneXt3D64f101_bert10_FRMB',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: rgb_resneXt3D64f101_bert10_FRMB)')
parser.add_argument('-s', '--split', default=1, type=int, metavar='S',
help='which split of data to work on (default: 1)')
parser.add_argument('-j', '--workers', default=2, type=int, metavar='N',
help='number of data loading workers (default: 2)')
parser.add_argument('--epochs', default=200, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('-b', '--batch-size', default=8, type=int,
metavar='N', help='mini-batch size (default: 8)')
parser.add_argument('--iter-size', default=16, type=int,
metavar='I', help='iter size to reduce memory usage (default: 16)')
parser.add_argument('--lr', '--learning-rate', default=1e-5, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum (default: 0.9)')
parser.add_argument('--weight-decay', '--wd', default=1e-3, type=float,
metavar='W', help='weight decay (default: 1e-3)')
parser.add_argument('--print-freq', default=400, type=int,
metavar='N', help='print frequency (default: 400)')
parser.add_argument('--save-freq', default=1, type=int,
metavar='N', help='save frequency (default: 1)')
parser.add_argument('--num-seg', default=1, type=int,
metavar='N', help='Number of segments in dataloader (default: 1)')
# parser.add_argument('--resume', default='./dene4', type=str, metavar='PATH',
# help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('-c', '--continue', dest='contine', action='store_true',
help='evaluate model on validation set')
best_prec1 = 0
best_loss = 30
warmUpEpoch = 5
smt_pretrained = False
HALF = False
training_continue = False
def main():
global args, best_prec1, model, writer, best_loss, length, width, height, input_size, scheduler
args = parser.parse_args()
training_continue = args.contine
if '3D' in args.arch:
if 'I3D' in args.arch or 'MFNET3D' in args.arch:
if '112' in args.arch:
scale = 0.5
else:
scale = 1
else:
if '224' in args.arch:
scale = 1
else:
scale = 0.5
elif 'r2plus1d' in args.arch:
scale = 0.5
else:
scale = 1
print('scale: %.1f' % (scale))
input_size = int(224 * scale)
width = int(340 * scale)
height = int(256 * scale)
saveLocation = "./checkpoint/" + args.dataset + "_" + args.arch + "_split" + str(args.split)
if not os.path.exists(saveLocation):
os.makedirs(saveLocation)
writer = SummaryWriter(saveLocation)
# create model
if args.evaluate:
print("Building validation model ... ")
model = build_model_validate()
optimizer = AdamW(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
elif training_continue:
model, startEpoch, optimizer, best_prec1 = build_model_continue()
for param_group in optimizer.param_groups:
lr = param_group['lr']
# param_group['lr'] = lr
print("Continuing with best precision: %.3f and start epoch %d and lr: %f" % (best_prec1, startEpoch, lr))
else:
print("Building model with ADAMW... ")
model = build_model()
optimizer = AdamW(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
startEpoch = 0
if HALF:
model.half() # convert to half precision
for layer in model.modules():
if isinstance(layer, nn.BatchNorm2d):
layer.float()
print("Model %s is loaded. " % (args.arch))
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
criterion2 = nn.MSELoss().cuda()
scheduler = lr_scheduler.ReduceLROnPlateau(
optimizer, 'min', patience=5, verbose=True)
print("Saving everything to directory %s." % (saveLocation))
if args.dataset == 'ucf101':
dataset = './datasets/ucf101_frames'
elif args.dataset == 'hmdb51':
dataset = './datasets/hmdb51_frames'
elif args.dataset == 'smtV2':
dataset = './datasets/smtV2_frames'
elif args.dataset == 'window':
dataset = './datasets/window_frames'
else:
print("No convenient dataset entered, exiting....")
return 0
cudnn.benchmark = True
modality = args.arch.split('_')[0]
if "3D" in args.arch or 'tsm' in args.arch or 'slowfast' in args.arch or 'r2plus1d' in args.arch:
if '64f' in args.arch:
length = 64
elif '32f' in args.arch:
length = 32
else:
length = 16
else:
length = 1
# Data transforming
if modality == "rgb" or modality == "pose":
is_color = True
scale_ratios = [1.0, 0.875, 0.75, 0.66]
if 'I3D' in args.arch:
if 'resnet' in args.arch:
clip_mean = [0.45, 0.45, 0.45] * args.num_seg * length
clip_std = [0.225, 0.225, 0.225] * args.num_seg * length
else:
clip_mean = [0.5, 0.5, 0.5] * args.num_seg * length
clip_std = [0.5, 0.5, 0.5] * args.num_seg * length
# clip_std = [0.25, 0.25, 0.25] * args.num_seg * length
elif 'MFNET3D' in args.arch:
clip_mean = [0.48627451, 0.45882353, 0.40784314] * args.num_seg * length
clip_std = [0.234, 0.234, 0.234] * args.num_seg * length
elif "3D" in args.arch:
clip_mean = [114.7748, 107.7354, 99.4750] * args.num_seg * length
clip_std = [1, 1, 1] * args.num_seg * length
elif "r2plus1d" in args.arch:
clip_mean = [0.43216, 0.394666, 0.37645] * args.num_seg * length
clip_std = [0.22803, 0.22145, 0.216989] * args.num_seg * length
elif "rep_flow" in args.arch:
clip_mean = [0.5, 0.5, 0.5] * args.num_seg * length
clip_std = [0.5, 0.5, 0.5] * args.num_seg * length
elif "slowfast" in args.arch:
clip_mean = [0.45, 0.45, 0.45] * args.num_seg * length
clip_std = [0.225, 0.225, 0.225] * args.num_seg * length
else:
clip_mean = [0.485, 0.456, 0.406] * args.num_seg * length
clip_std = [0.229, 0.224, 0.225] * args.num_seg * length
elif modality == "pose":
is_color = True
scale_ratios = [1.0, 0.875, 0.75, 0.66]
clip_mean = [0.485, 0.456, 0.406] * args.num_seg
clip_std = [0.229, 0.224, 0.225] * args.num_seg
elif modality == "flow":
is_color = False
scale_ratios = [1.0, 0.875, 0.75, 0.66]
if 'I3D' in args.arch:
clip_mean = [0.5, 0.5] * args.num_seg * length
clip_std = [0.5, 0.5] * args.num_seg * length
elif "3D" in args.arch:
clip_mean = [127.5, 127.5] * args.num_seg * length
clip_std = [1, 1] * args.num_seg * length
else:
clip_mean = [0.5, 0.5] * args.num_seg * length
clip_std = [0.226, 0.226] * args.num_seg * length
elif modality == "both":
is_color = True
scale_ratios = [1.0, 0.875, 0.75, 0.66]
clip_mean = [0.485, 0.456, 0.406, 0.5, 0.5] * args.num_seg * length
clip_std = [0.229, 0.224, 0.225, 0.226, 0.226] * args.num_seg * length
else:
print("No such modality. Only rgb and flow supported.")
normalize = video_transforms.Normalize(mean=clip_mean,
std=clip_std)
if "3D" in args.arch and not ('I3D' in args.arch):
train_transform = video_transforms.Compose([
video_transforms.MultiScaleCrop((input_size, input_size), scale_ratios),
video_transforms.RandomHorizontalFlip(),
video_transforms.ToTensor2(),
normalize,
])
val_transform = video_transforms.Compose([
video_transforms.CenterCrop((input_size)),
video_transforms.ToTensor2(),
normalize,
])
else:
train_transform = video_transforms.Compose([
video_transforms.MultiScaleCrop((input_size, input_size), scale_ratios),
video_transforms.RandomHorizontalFlip(),
video_transforms.ToTensor(),
normalize,
])
val_transform = video_transforms.Compose([
video_transforms.CenterCrop((input_size)),
video_transforms.ToTensor(),
normalize,
])
# data loading
train_setting_file = "train_%s_split%d.txt" % (modality, args.split)
train_split_file = os.path.join(args.settings, args.dataset, train_setting_file)
val_setting_file = "val_%s_split%d.txt" % (modality, args.split)
val_split_file = os.path.join(args.settings, args.dataset, val_setting_file)
if not os.path.exists(train_split_file) or not os.path.exists(val_split_file):
print("No split file exists in %s directory. Preprocess the dataset first" % (args.settings))
train_dataset = datasets.__dict__[args.dataset](root=dataset,
source=train_split_file,
phase="train",
modality=modality,
is_color=is_color,
new_length=length,
new_width=width,
new_height=height,
video_transform=train_transform,
num_segments=args.num_seg)
val_dataset = datasets.__dict__[args.dataset](root=dataset,
source=val_split_file,
phase="val",
modality=modality,
is_color=is_color,
new_length=length,
new_width=width,
new_height=height,
video_transform=val_transform,
num_segments=args.num_seg)
print('{} samples found, {} train samples and {} test samples.'.format(len(val_dataset) + len(train_dataset),
len(train_dataset),
len(val_dataset)))
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
prec1, prec3, lossClassification = validate(val_loader, model, criterion, criterion2, modality)
return
for epoch in range(startEpoch, args.epochs):
# if learning_rate_index > max_learning_rate_decay_count:
# break
# adjust_learning_rate(optimizer, epoch)
train(train_loader, model, criterion, criterion2, optimizer, epoch, modality)
# evaluate on validation set
prec1 = 0.0
lossClassification = 0
if (epoch + 1) % args.save_freq == 0:
prec1, prec3, lossClassification = validate(val_loader, model, criterion, criterion2, modality)
writer.add_scalar('data/top1_validation', prec1, epoch)
writer.add_scalar('data/top3_validation', prec3, epoch)
writer.add_scalar('data/classification_loss_validation', lossClassification, epoch)
scheduler.step(lossClassification)
# remember best prec@1 and save checkpoint
is_best = prec1 >= best_prec1
best_prec1 = max(prec1, best_prec1)
# best_in_existing_learning_rate = max(prec1, best_in_existing_learning_rate)
#
# if best_in_existing_learning_rate > prec1 + 1:
# learning_rate_index = learning_rate_index
# best_in_existing_learning_rate = 0
if (epoch + 1) % args.save_freq == 0:
checkpoint_name = "%03d_%s" % (epoch + 1, "checkpoint.pth.tar")
if is_best:
print("Model son iyi olarak kaydedildi")
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'best_loss': best_loss,
'optimizer': optimizer.state_dict(),
}, is_best, checkpoint_name, saveLocation)
checkpoint_name = "%03d_%s" % (epoch + 1, "checkpoint.pth.tar")
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'best_loss': best_loss,
'optimizer': optimizer.state_dict(),
}, is_best, checkpoint_name, saveLocation)
writer.export_scalars_to_json("./all_scalars.json")
writer.close()
def build_model():
modality = args.arch.split('_')[0]
if modality == "rgb":
model_path = rgb_3d_model_path_selection(args.arch)
# model_path = os.path.join(modelLocation,'model_best.pth.tar')
elif modality == "flow":
model_path = ''
if "3D" in args.arch:
if 'I3D' in args.arch:
model_path = './weights/flow_imagenet.pth'
elif '3D' in args.arch:
model_path = './weights/Flow_Kinetics_64f.pth'
# model_path = os.path.join(modelLocation,'model_best.pth.tar')
elif modality == "both":
model_path = ''
if args.dataset == 'ucf101':
print('model path is: %s' % (model_path))
model = models.__dict__[args.arch](modelPath=model_path, num_classes=101, length=args.num_seg)
elif args.dataset == 'hmdb51':
print('model path is: %s' % (model_path))
model = models.__dict__[args.arch](modelPath=model_path, num_classes=51, length=args.num_seg)
elif args.dataset == 'smtV2':
print('model path is: %s' % (model_path))
model = models.__dict__[args.arch](modelPath=model_path, num_classes=174, length=args.num_seg)
elif args.dataset == 'window':
print('model path is: %s' % (model_path))
model = models.__dict__[args.arch](modelPath=model_path, num_classes=3, length=args.num_seg)
if torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
model = model.cuda()
return model
def build_model_validate():
modelLocation = "./checkpoint/" + args.dataset + "_" + args.arch + "_split" + str(args.split)
model_path = os.path.join(modelLocation, 'model_best.pth.tar')
params = torch.load(model_path)
print(modelLocation)
if args.dataset == 'ucf101':
model = models.__dict__[args.arch](modelPath='', num_classes=101, length=args.num_seg)
elif args.dataset == 'hmdb51':
model = models.__dict__[args.arch](modelPath='', num_classes=51, length=args.num_seg)
if torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
model.load_state_dict(params['state_dict'])
model.cuda()
model.eval()
return model
def build_model_continue():
modelLocation = "./checkpoint/" + args.dataset + "_" + args.arch + "_split" + str(args.split)
model_path = os.path.join(modelLocation, 'model_best.pth.tar')
params = torch.load(model_path)
print(modelLocation)
if args.dataset == 'ucf101':
model = models.__dict__[args.arch](modelPath='', num_classes=101, length=args.num_seg)
elif args.dataset == 'hmdb51':
model = models.__dict__[args.arch](modelPath='', num_classes=51, length=args.num_seg)
if torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
model.load_state_dict(params['state_dict'])
model = model.cuda()
optimizer = AdamW(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
optimizer.load_state_dict(params['optimizer'])
startEpoch = params['epoch']
best_prec = params['best_prec1']
return model, startEpoch, optimizer, best_prec
def train(train_loader, model, criterion, criterion2, optimizer, epoch, modality):
batch_time = AverageMeter()
lossesClassification = AverageMeter()
top1 = AverageMeter()
top3 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
optimizer.zero_grad()
loss_mini_batch_classification = 0.0
acc_mini_batch = 0.0
acc_mini_batch_top3 = 0.0
totalSamplePerIter = 0
for i, (inputs, targets) in enumerate(train_loader):
if modality == "rgb" or modality == "pose":
if "3D" in args.arch or "r2plus1d" in args.arch or 'slowfast' in args.arch:
inputs = inputs.view(-1, length, 3, input_size, input_size).transpose(1, 2)
elif modality == "flow":
if "3D" in args.arch or "r2plus1d" in args.arch:
inputs = inputs.view(-1, length, 2, input_size, input_size).transpose(1, 2)
else:
inputs = inputs.view(-1, 2 * length, input_size, input_size)
elif modality == "both":
inputs = inputs.view(-1, 5 * length, input_size, input_size)
if HALF:
inputs = inputs.cuda().half()
else:
inputs = inputs.cuda()
targets = targets.cuda()
output, input_vectors, sequenceOut, maskSample = model(inputs)
# maskSample=maskSample.cuda()
# input_vectors=(1-maskSample[:,1:]).unsqueeze(2)*input_vectors
# sequenceOut=(1-maskSample[:,1:]).unsqueeze(2)*sequenceOut
# measure accuracy and record loss
# input_vectors_rank=input_vectors.view(-1,input_vectors.shape[-1])
# targetRank=torch.tensor(range(args.num_seg)).repeat(input_vectors.shape[0]).cuda()
# rankingFC = nn.Linear(input_vectors.shape[-1], args.num_seg).cuda()
# out_rank = rankingFC(input_vectors_rank)
prec1, prec3 = accuracy(output.data, targets, topk=(1, 3))
acc_mini_batch += prec1.item()
acc_mini_batch_top3 += prec3.item()
lossClassification = criterion(output, targets)
lossClassification = lossClassification / args.iter_size
# totalLoss=lossMSE
totalLoss = lossClassification
# totalLoss = lossMSE + lossClassification
loss_mini_batch_classification += lossClassification.data.item()
totalLoss.backward()
totalSamplePerIter += output.size(0)
if (i + 1) % args.iter_size == 0:
# compute gradient and do SGD step
optimizer.step()
optimizer.zero_grad()
lossesClassification.update(loss_mini_batch_classification, totalSamplePerIter)
top1.update(acc_mini_batch / args.iter_size, totalSamplePerIter)
top3.update(acc_mini_batch_top3 / args.iter_size, totalSamplePerIter)
batch_time.update(time.time() - end)
end = time.time()
loss_mini_batch_classification = 0
acc_mini_batch = 0
acc_mini_batch_top3 = 0.0
totalSamplePerIter = 0.0
# scheduler.step()
if (i + 1) % args.print_freq == 0:
print('[%d] time: %.3f loss: %.4f' % (i, batch_time.avg, lossesClassification.avg))
print(
' * Epoch: {epoch} Prec@1 {top1.avg:.3f} Prec@3 {top3.avg:.3f} Classification Loss {lossClassification.avg:.4f}\n'
.format(epoch=epoch, top1=top1, top3=top3, lossClassification=lossesClassification))
writer.add_scalar('data/classification_loss_training', lossesClassification.avg, epoch)
writer.add_scalar('data/top1_training', top1.avg, epoch)
writer.add_scalar('data/top3_training', top3.avg, epoch)
def validate(val_loader, model, criterion, criterion2, modality):
batch_time = AverageMeter()
lossesClassification = AverageMeter()
top1 = AverageMeter()
top3 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
with torch.no_grad():
for i, (inputs, targets) in enumerate(val_loader):
if modality == "rgb" or modality == "pose":
if "3D" in args.arch or "r2plus1d" in args.arch or 'slowfast' in args.arch:
inputs = inputs.view(-1, length, 3, input_size, input_size).transpose(1, 2)
elif modality == "flow":
if "3D" in args.arch or "r2plus1d" in args.arch:
inputs = inputs.view(-1, length, 2, input_size, input_size).transpose(1, 2)
else:
inputs = inputs.view(-1, 2 * length, input_size, input_size)
elif modality == "both":
inputs = inputs.view(-1, 5 * length, input_size, input_size)
if HALF:
inputs = inputs.cuda().half()
else:
inputs = inputs.cuda()
targets = targets.cuda()
# compute output
output, input_vectors, sequenceOut, _ = model(inputs)
lossClassification = criterion(output, targets)
# measure accuracy and record loss
prec1, prec3 = accuracy(output.data, targets, topk=(1, 3))
lossesClassification.update(lossClassification.data.item(), output.size(0))
top1.update(prec1.item(), output.size(0))
top3.update(prec3.item(), output.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
print(' * * Prec@1 {top1.avg:.3f} Prec@3 {top3.avg:.3f} Classification Loss {lossClassification.avg:.4f}\n'
.format(top1=top1, top3=top3, lossClassification=lossesClassification))
return top1.avg, top3.avg, lossesClassification.avg
def save_checkpoint(state, is_best, filename, resume_path):
cur_path = os.path.join(resume_path, filename)
torch.save(state, cur_path)
best_path = os.path.join(resume_path, 'model_best.pth.tar')
if is_best:
shutil.copyfile(cur_path, best_path)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 150 epochs"""
decay = 0.1 ** (sum(epoch >= np.array(args.lr_steps)))
lr = args.lr * decay
print("Current learning rate is %4.6f:" % lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def adjust_learning_rate2(optimizer, epoch):
isWarmUp = epoch < warmUpEpoch
decayRate = 0.2
if isWarmUp:
lr = args.lr * (epoch + 1) / warmUpEpoch
else:
lr = args.lr * (1 / (1 + (epoch + 1 - warmUpEpoch) * decayRate))
# decay = 0.1 ** (sum(epoch >= np.array(args.lr_steps)))
print("Current learning rate is %4.6f:" % lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def adjust_learning_rate3(optimizer, epoch):
isWarmUp = epoch < warmUpEpoch
decayRate = 0.97
if isWarmUp:
lr = args.lr * (epoch + 1) / warmUpEpoch
else:
lr = args.lr * decayRate ** (epoch + 1 - warmUpEpoch)
# decay = 0.1 ** (sum(epoch >= np.array(args.lr_steps)))
print("Current learning rate is %4.6f:" % lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def adjust_learning_rate4(optimizer, learning_rate_index):
"""Sets the learning rate to the initial LR decayed by 10 every 150 epochs"""
decay = 0.1 ** learning_rate_index
lr = args.lr * decay
print("Current learning rate is %4.8f:" % lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| []
| []
| [
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"] | python | 2 | 0 | |
experiments/digit/unsupervised_digit_inspect.py | import argparse
import os, sys
import os.path as osp
import torchvision
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import transforms
import network, loss
from torch.utils.data import DataLoader
import random, pdb, math, copy
from tqdm import tqdm
from scipy.spatial.distance import cdist
import pickle
from data_load import mnist, svhn, usps
# inverse_transform = None
# class InverseTransform(torchvision.transforms.Normalize):
# """
# Undoes the normalization and returns the reconstructed images in the input domain.
# """
# def __init__(self, mean, std):
# mean = torch.as_tensor(mean)
# std = torch.as_tensor(std)
# std_inv = 1 / (std + 1e-7)
# mean_inv = -mean * std_inv
# super().__init__(mean=mean_inv, std=std_inv)
# def __call__(self, tensor):
# t = super().__call__(tensor.clone())
# # return transforms.ToPILImage()(t)
# return t
def digit_load(args):
global inverse_transform
train_bs = args.batch_size
if args.dset == 's':
test_source = svhn.SVHN('./data/svhn/', split='test', download=True,
transform=transforms.Compose([
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]))
# assert inverse_transform == None
# inverse_transform = InverseTransform((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
elif args.dset == 'u':
test_source = usps.USPS('./data/usps/', train=False, download=True,
transform=transforms.Compose([
transforms.RandomCrop(28, padding=4),
transforms.RandomRotation(10),
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
]))
# assert inverse_transform == None
# inverse_transform = InverseTransform((0.5,), (0.5,))
elif args.dset == 'm':
test_source = mnist.MNIST('./data/mnist/', train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
]))
# assert inverse_transform == None
# inverse_transform = InverseTransform((0.5,), (0.5,))
dset_loaders = {}
dset_loaders["test"] = DataLoader(test_source, batch_size=train_bs*2, shuffle=False,
num_workers=args.worker, drop_last=False)
return dset_loaders
def cal_acc(loader, netF, netB, netC):
k = 0
start_test = True
with torch.no_grad():
iter_test = iter(loader)
for i in range(len(loader)):
data = iter_test.next()
input_images = []
inputs = data[0]
inputs_clone = inputs.clone()
for j in range(inputs_clone.size(0)):
x = transforms.Normalize((-1,), (2,))(inputs_clone[j])
input_images.append(transforms.ToPILImage()(x))
labels = data[1]
outputs = netC(netB(netF(inputs)))
#
_, predict = torch.max(outputs.float().cpu(), 1)
for j in range(inputs.size(0)):
folder = args.output_dir + '/inspect/label-{}'.format(labels[j])
if not osp.exists(folder):
os.makedirs(folder)
subfolder = folder + '/pred-{}'.format(predict[j])
if not osp.exists(subfolder):
os.makedirs(subfolder)
input_images[j].save(subfolder + '/{}.jpg'.format(k))
k += 1
#
if start_test:
all_output = outputs.float().cpu()
all_label = labels.float()
start_test = False
else:
all_output = torch.cat((all_output, outputs.float().cpu()), 0)
all_label = torch.cat((all_label, labels.float()), 0)
_, predict = torch.max(all_output, 1)
accuracy = torch.sum(torch.squeeze(predict).float() == all_label).item() / float(all_label.size()[0])
mean_ent = torch.mean(loss.Entropy(nn.Softmax(dim=1)(all_output))).cpu().data.item()
return accuracy*100, mean_ent
def test(args):
dset_loaders = digit_load(args)
## set base network
if args.dset == 'u':
netF = network.LeNetBase()#.cuda()
elif args.dset == 'm':
netF = network.LeNetBase()#.cuda()
elif args.dset == 's':
netF = network.DTNBase()#.cuda()
netB = network.feat_bootleneck(type=args.classifier, feature_dim=netF.in_features, bottleneck_dim=args.bottleneck)#.cuda()
netC = network.feat_classifier(type=args.layer, class_num = args.class_num, bottleneck_dim=args.bottleneck)#.cuda()
args.modelpath = args.output_dir + '/F.pt'
netF.load_state_dict(torch.load(args.modelpath))
args.modelpath = args.output_dir + '/B.pt'
netB.load_state_dict(torch.load(args.modelpath))
args.modelpath = args.output_dir + '/C.pt'
netC.load_state_dict(torch.load(args.modelpath))
netF.eval()
netB.eval()
netC.eval()
acc, _ = cal_acc(dset_loaders['test'], netF, netB, netC)
log_str = 'Task: {}, Accuracy = {:.2f}%'.format(args.dset, acc)
try:
args.out_file.write(log_str + '\n')
args.out_file.flush()
except:
pass
print(log_str+'\n')
def print_args(args):
s = "==========================================\n"
for arg, content in args.__dict__.items():
s += "{}:{}\n".format(arg, content)
return s
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='SHOT')
parser.add_argument('--gpu_id', type=str, nargs='?', default='0', help="device id to run")
parser.add_argument('--s', type=int, default=0, help="source")
parser.add_argument('--t', type=int, default=1, help="target")
parser.add_argument('--max_epoch', type=int, default=30, help="maximum epoch")
parser.add_argument('--batch_size', type=int, default=64, help="batch_size")
parser.add_argument('--worker', type=int, default=4, help="number of workers")
parser.add_argument('--dset', type=str, default='s', choices=['u', 'm','s'])
parser.add_argument('--lr', type=float, default=0.01, help="learning rate")
parser.add_argument('--seed', type=int, default=2020, help="random seed")
parser.add_argument('--bottleneck', type=int, default=256)
parser.add_argument('--layer', type=str, default="wn", choices=["linear", "wn"])
parser.add_argument('--classifier', type=str, default="bn", choices=["ori", "bn"])
parser.add_argument('--output', type=str, default='')
parser.add_argument('--issave', type=bool, default=True)
args = parser.parse_args()
args.class_num = 10
# os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
SEED = args.seed
torch.manual_seed(SEED)
# torch.cuda.manual_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)
# torch.backends.cudnn.deterministic = True
args.output_dir = osp.join(args.output, 'seed' + str(args.seed), args.dset)
test(args)
# python unsupervised_digit.py --dset m --gpu_id 0 --output ckps_unsupervised_digit
# python unsupervised_digit.py --dset m --gpu_id 0 --ent --output ckps_unsupervised_digit_ent
# python unsupervised_digit.py --dset m --gpu_id 0 --gent --output ckps_unsupervised_digit_gent
# python unsupervised_digit.py --dset m --gpu_id 0 --ent --gent --output ckps_unsupervised_digit_ent_gent
# na verdade n sem como saber qual classe vai sair .. ideal é ver tsne? ou mostrar as classificacoes primeiro?
# show classification + gradcam (versao mais rapida) | []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
project/wsgi.py | """
Copyright 2016 Michael Spiegel, Wilhelm Kleiminger
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
WSGI config for tutorial project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
code/test_LA.py | import os
import argparse
import torch
from networks.vnet import VNet
from test_util import test_all_case
parser = argparse.ArgumentParser()
parser.add_argument('--root_path', type=str, default='../data/2018LA_Seg_Training Set/', help='Name of Experiment')
parser.add_argument('--model', type=str, default='vnet_supervisedonly_dp', help='model_name')
parser.add_argument('--gpu', type=str, default='0', help='GPU to use')
parser.add_argument('--epoch_num', type=int, default='6000', help='checkpoint to use')
FLAGS = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
snapshot_path = "../model_la/"+FLAGS.model+"/"
test_save_path = "../model_la/prediction/"+FLAGS.model+"_post/"
if not os.path.exists(test_save_path):
os.makedirs(test_save_path)
num_classes = 2
with open(FLAGS.root_path + '/../test.list', 'r') as f:
image_list = f.readlines()
image_list = [FLAGS.root_path +item.replace('\n', '')+"/mri_norm2.h5" for item in image_list]
# print(image_list)
def test_calculate_metric(epoch_num):
net = VNet(n_channels=1, n_classes=num_classes, normalization='batchnorm', has_dropout=False).cuda()
save_mode_path = os.path.join(snapshot_path, 'iter_' + str(epoch_num) + '.pth')
net.load_state_dict(torch.load(save_mode_path))
print("init weight from {}".format(save_mode_path))
net.eval()
avg_metric = test_all_case(net, image_list, num_classes=num_classes,
patch_size=(112, 112, 80), stride_xy=18, stride_z=4,
save_result=True, test_save_path=test_save_path)
return avg_metric
if __name__ == '__main__':
metric = test_calculate_metric(FLAGS.epoch_num)
# print(metric)
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
modules/pymol/Qt/__init__.py | """
Wrapper for PyMOL scripts to get PySide or PyQt
Useful link for PySide/PyQt4 differences:
https://deptinfo-ensip.univ-poitiers.fr/ENS/pyside-docs/pysideapi2.html
PyQt5/PyQt4 differences:
http://pyqt.sourceforge.net/Docs/PyQt5/pyqt4_differences.html
"""
from __future__ import absolute_import as _
DEBUG = False
PYQT_NAME = None
QtWidgets = None
try:
from pymol._Qt_pre import *
except ImportError:
if DEBUG:
print('import _Qt_pre failed')
if not PYQT_NAME:
try:
from PyQt5 import QtGui, QtCore, QtOpenGL, QtWidgets
PYQT_NAME = 'PyQt5'
except ImportError:
if DEBUG:
print('import PyQt5 failed')
if not PYQT_NAME:
try:
from PySide2 import QtGui, QtCore, QtOpenGL, QtWidgets
PYQT_NAME = 'PySide2'
except ImportError:
if DEBUG:
print('import PySide2 failed')
if not PYQT_NAME:
try:
try:
import PyQt4.sip as sip
except ImportError:
import sip
sip.setapi("QString", 2)
from PyQt4 import QtGui, QtCore, QtOpenGL
PYQT_NAME = 'PyQt4'
except ImportError:
if DEBUG:
print('import PyQt4 failed')
if not PYQT_NAME:
try:
from PySide import QtGui, QtCore, QtOpenGL
PYQT_NAME = 'PySide'
except ImportError:
if DEBUG:
print('import PySide failed')
if not PYQT_NAME:
raise ImportError(__name__)
import os
# qtpy compatibility
os.environ['QT_API'] = PYQT_NAME.lower()
if QtWidgets is None:
QtWidgets = QtGui
if hasattr(QtCore, 'QAbstractProxyModel'):
QtCoreModels = QtCore
else:
QtCoreModels = QtGui
if PYQT_NAME == 'PyQt4':
QFileDialog = QtWidgets.QFileDialog
QFileDialog.getOpenFileName = QFileDialog.getOpenFileNameAndFilter
QFileDialog.getOpenFileNames = QFileDialog.getOpenFileNamesAndFilter
QFileDialog.getSaveFileName = QFileDialog.getSaveFileNameAndFilter
del QFileDialog
if PYQT_NAME[:4] == 'PyQt':
QtCore.Signal = QtCore.pyqtSignal
QtCore.Slot = QtCore.pyqtSlot
else:
QtCore.QT_VERSION_STR = QtCore.__version__
QtCore.QT_VERSION = (
0x10000 * QtCore.__version_info__[0] +
0x00100 * QtCore.__version_info__[1] +
0x00001 * QtCore.__version_info__[2])
del os
| []
| []
| [
"QT_API"
]
| [] | ["QT_API"] | python | 1 | 0 | |
main.go | package main
import (
"os"
"os/signal"
"syscall"
log "github.com/Sirupsen/logrus"
api_v1 "k8s.io/api/core/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/workqueue"
)
// retrieve the Kubernetes cluster client from outside of the cluster
func getKubernetesClient() kubernetes.Interface {
// construct the path to resolve to `~/.kube/config`
kubeConfigPath := "" // os.Getenv("HOME") + "/.kube/config"
// create the config from the path
config, err := clientcmd.BuildConfigFromFlags("", kubeConfigPath)
if err != nil {
log.Fatalf("getClusterConfig: %v", err)
}
// generate the client based off of the config
client, err := kubernetes.NewForConfig(config)
if err != nil {
log.Fatalf("getClusterConfig: %v", err)
}
log.Debugf("Successfully constructed k8s client")
return client
}
// main code path
func main() {
// get the Kubernetes client for connectivity
client := getKubernetesClient()
namespace := meta_v1.NamespaceAll
// create the informer so that we can not only list resources
// but also watch them for all pods in the default namespace
informer := cache.NewSharedIndexInformer(
// the ListWatch contains two different functions that our
// informer requires: ListFunc to take care of listing and watching
// the resources we want to handle
&cache.ListWatch{
ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) {
// list all of the pods (core resource) in the deafult namespace
return client.CoreV1().Pods(namespace).List(options)
},
WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) {
// watch all of the pods (core resource) in the default namespace
return client.CoreV1().Pods(namespace).Watch(options)
},
},
&api_v1.Pod{}, // the target type (Pod)
0, // no resync (period of 0)
cache.Indexers{},
)
// create a new queue so that when the informer gets a resource that is either
// a result of listing or watching, we can add an idenfitying key to the queue
// so that it can be handled in the handler
queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
// add event handlers to handle the three types of events for resources:
// - adding new resources
// - updating existing resources
// - deleting resources
informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
// convert the resource object into a key (in this case
// we are just doing it in the format of 'namespace/name')
key, err := cache.MetaNamespaceKeyFunc(obj)
log.Debugf("Add pod: %s", key)
if err == nil {
// add the key to the queue for the handler to get
queue.Add(key)
log.Debugf(" Queue len: %d", queue.Len())
}
},
UpdateFunc: func(oldObj, newObj interface{}) {
key, err := cache.MetaNamespaceKeyFunc(newObj)
log.Debugf("Update pod: %s", key)
if err == nil {
queue.Add(key)
log.Debugf(" Queue len: %d", queue.Len())
}
},
DeleteFunc: func(obj interface{}) {
// DeletionHandlingMetaNamsespaceKeyFunc is a helper function that allows
// us to check the DeletedFinalStateUnknown existence in the event that
// a resource was deleted but it is still contained in the index
//
// this then in turn calls MetaNamespaceKeyFunc
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
log.Infof("Delete pod: %s", key)
if err == nil {
queue.Add(key)
log.Debugf(" Queue len: %d", queue.Len())
}
},
})
// construct the Controller object which has all of the necessary components to
// handle logging, connections, informing (listing and watching), the queue,
// and the handler
controller := Controller{
logger: log.NewEntry(log.New()),
clientset: client,
informer: informer,
queue: queue,
handler: &SidecarShutdownHandler{},
}
// use a channel to synchronize the finalization for a graceful shutdown
stopCh := make(chan struct{})
defer close(stopCh)
// run the controller loop to process items
go controller.Run(stopCh)
// use a channel to handle OS signals to terminate and gracefully shut
// down processing
sigTerm := make(chan os.Signal, 1)
signal.Notify(sigTerm, syscall.SIGTERM)
signal.Notify(sigTerm, syscall.SIGINT)
<-sigTerm
log.Info("Shutting down....")
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.