filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
test/e2e/deploy/volumeleaks.go | /*
Copyright 2020 Intel Corporation.
SPDX-License-Identifier: Apache-2.0
*/
package deploy
import (
"encoding/json"
"fmt"
"os"
"os/exec"
"regexp"
"strings"
"time"
api "github.com/intel/pmem-csi/pkg/apis/pmemcsi/v1beta1"
"github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/stretchr/testify/assert"
)
type Volumes struct {
d *Deployment
output []string
}
// GetHostVolumes list all volumes (LVM and namespaces) on all nodes.
func GetHostVolumes(d *Deployment) Volumes {
var output []string
// Instead of trying to find out number of hosts, we trust the set of
// ssh.N helper scripts matches running hosts, which should be the case in
// correctly running tester system. We run ssh.N commands until a ssh.N
// script appears to be "no such file".
for host := 0; ; host++ {
sshcmd := fmt.Sprintf("%s/_work/%s/ssh.%d", os.Getenv("REPO_ROOT"), os.Getenv("CLUSTER"), host)
if _, err := os.Stat(sshcmd); err == nil {
output = append(output,
listVolumes(sshcmd, fmt.Sprintf("host #%d, LVM: ", host), "sudo lvs --foreign --noheadings")...)
// On the master node we never expect to leak
// namespaces. On workers it is a bit more
// tricky: when starting in LVM mode, the
// namespace created for that by the driver is
// left behind, which is okay.
//
// Detecting that particular namespace is
// tricky, so for the sake of simplicity we
// skip leak detection of namespaces unless we
// know for sure that the test doesn't use LVM
// mode. Unfortunately, that is currently only
// the case when the device mode is explicitly
// set to direct mode. For operator tests that
// mode is unset and thus namespace leaks are
// not detected.
if host == 0 || d.Mode == api.DeviceModeDirect {
output = append(output,
listVolumes(sshcmd, fmt.Sprintf("host #%d, direct: ", host), "sudo ndctl list")...)
}
} else {
// ssh wrapper does not exist: all nodes handled.
break
}
}
return Volumes{
d: d,
output: output,
}
}
// Some lines are allowed to change, for example the enumeration of
// namespaces and devices because those change when rebooting a
// node. We filter out those lines.
var ignored = regexp.MustCompile(`direct: "dev":"namespace|direct: "blockdev":"pmem`)
func listVolumes(sshcmd, prefix, cmd string) []string {
i := 0
for {
ssh := exec.Command(sshcmd, cmd)
// Intentional Output instead of CombinedOutput to dismiss warnings from stderr.
// lvs may emit lvmetad-related WARNING msg which can't be silenced using -q option.
out, err := ssh.Output()
if err != nil {
if i >= 3 {
ginkgo.Fail(fmt.Sprintf("%s: repeated ssh attempts failed: %v", sshcmd, err))
}
ginkgo.By(fmt.Sprintf("%s: attempt #%d failed, retry: %v", sshcmd, i, err))
time.Sleep(10 * time.Second)
} else {
var lines []string
for _, line := range strings.Split(string(out), "\n") {
volumeLine := prefix + strings.TrimSpace(line)
if !ignored.MatchString(volumeLine) {
lines = append(lines, volumeLine)
}
}
return lines
}
}
return nil
}
// CheckForLeftovers lists volumes again after test, diff means leftovers.
func (v Volumes) CheckForLeaks() {
volNow := GetHostVolumes(v.d)
if !assert.Equal(ginkgo.GinkgoT(), v, volNow) {
ginkgo.Fail("volume leak")
}
}
type NdctlOutput struct {
Regions []Region `json:"regions"`
}
type Region struct {
Size int64 `json:"size"`
AvailableSize int64 `json:"available_size"`
Namespaces []Namespace `json:"namespaces"`
}
type Namespace struct {
Mode string `json:"mode"`
}
func ParseNdctlOutput(out []byte) (NdctlOutput, error) {
var parsed NdctlOutput
// `ndctl list` output is inconsistent:
// [ { "dev":"region0", ...
// vs.
// { regions: [ {"dev": "region0" ...
var err error
if strings.HasPrefix(string(out), "[") {
err = json.Unmarshal(out, &parsed.Regions)
} else {
err = json.Unmarshal(out, &parsed)
}
return parsed, err
}
// CheckPMEM ensures that a test does not permanently use more than
// half of the available PMEM in each region. We want that to ensure that
// tests in direct mode still have space to work with.
//
// Volume leaks (in direct mode) or allocating all space for a volume group (in LVM mode)
// trigger this check.
func CheckPMEM() {
for worker := 1; ; worker++ {
sshcmd := fmt.Sprintf("%s/_work/%s/ssh.%d", os.Getenv("REPO_ROOT"), os.Getenv("CLUSTER"), worker)
ssh := exec.Command(sshcmd, "sudo ndctl list -Rv")
out, err := ssh.CombinedOutput()
if err != nil && os.IsNotExist(err) {
break
}
Expect(err).ShouldNot(HaveOccurred(), "unexpected output for `ndctl list` on on host #%d:\n%s", worker, string(out))
parsed, err := ParseNdctlOutput(out)
Expect(err).ShouldNot(HaveOccurred(), "unexpected error parsing the ndctl output %q: %v", string(out), err)
regions := parsed.Regions
Expect(regions).ShouldNot(BeEmpty(), "unexpected `ndctl list` output on host #%d, no regions: %s", worker, string(out))
Expect(err).ShouldNot(HaveOccurred(), "unexpected JSON parsing error for `ndctl list` output on on host #%d:\n%s", worker, string(out))
for i, region := range regions {
if region.AvailableSize < region.Size/2 {
ginkgo.Fail(fmt.Sprintf("more than half of region #%d is in use:\n%s", i, string(out)))
}
}
}
}
| [
"\"REPO_ROOT\"",
"\"CLUSTER\"",
"\"REPO_ROOT\"",
"\"CLUSTER\""
]
| []
| [
"CLUSTER",
"REPO_ROOT"
]
| [] | ["CLUSTER", "REPO_ROOT"] | go | 2 | 0 | |
test/e2e/framework/repository.go | package framework
import (
"math"
"os"
"strconv"
api "github.com/appscode/stash/apis/stash/v1alpha1"
. "github.com/onsi/gomega"
apps "k8s.io/api/apps/v1beta1"
core "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type KindMetaReplicas struct {
Kind string
Meta metav1.ObjectMeta
Replicas int
}
func (f *Framework) EventuallyRepository(workload interface{}) GomegaAsyncAssertion {
return Eventually(func() []*api.Repository {
switch workload.(type) {
case *extensions.DaemonSet:
return f.DaemonSetRepos(workload.(*extensions.DaemonSet))
case *apps.Deployment:
return f.DeploymentRepos(workload.(*apps.Deployment))
case *core.ReplicationController:
return f.ReplicationControllerRepos(workload.(*core.ReplicationController))
case *extensions.ReplicaSet:
return f.ReplicaSetRepos(workload.(*extensions.ReplicaSet))
case *apps.StatefulSet:
return f.StatefulSetRepos(workload.(*apps.StatefulSet))
default:
return nil
}
})
}
func (f *Framework) GetRepositories(kmr KindMetaReplicas) []*api.Repository {
repoNames := make([]string, 0)
nodeName := os.Getenv("NODE_NAME")
if nodeName == "" {
nodeName = "minikube"
}
workload := api.LocalTypedReference{Name: kmr.Meta.Name, Kind: kmr.Kind}
switch kmr.Kind {
case api.KindDeployment, api.KindReplicationController, api.KindReplicaSet, api.KindDaemonSet:
repoNames = append(repoNames, workload.GetRepositoryCRDName("", nodeName))
case api.KindStatefulSet:
for i := 0; i < kmr.Replicas; i++ {
repoNames = append(repoNames, workload.GetRepositoryCRDName(kmr.Meta.Name+"-"+strconv.Itoa(i), nodeName))
}
}
repositories := make([]*api.Repository, 0)
for _, repoName := range repoNames {
obj, err := f.StashClient.StashV1alpha1().Repositories(kmr.Meta.Namespace).Get(repoName, metav1.GetOptions{})
if err == nil {
repositories = append(repositories, obj)
}
}
return repositories
}
func (f *Framework) DeleteRepositories(repositories []*api.Repository) {
for _, repo := range repositories {
err := f.StashClient.StashV1alpha1().Repositories(repo.Namespace).Delete(repo.Name, deleteInForeground())
Expect(err).NotTo(HaveOccurred())
}
}
func (f *Framework) BackupCountInRepositoriesStatus(repos []*api.Repository) int64 {
var backupCount int64 = math.MaxInt64
// use minimum backupCount among all repos
for _, repo := range repos {
if repo.Status.BackupCount < backupCount {
backupCount = repo.Status.BackupCount
}
}
return backupCount
}
| [
"\"NODE_NAME\""
]
| []
| [
"NODE_NAME"
]
| [] | ["NODE_NAME"] | go | 1 | 0 | |
12-junit5/src/test/java/course/java/simple/AssumptionDemo.java | package course.java.simple;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.Test;
import java.math.BigInteger;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assumptions.assumeTrue;
import static org.junit.jupiter.api.Assumptions.assumingThat;
@Tag("fast")
@Tag("service")
public class AssumptionDemo {
private final Calculator calculator = new Calculator();
// @Test
// void testOnlyInCIServer() {
// assumeTrue("CI".equals(System.getenv("ENV")));
// assertEquals(42, calculator.multiply(6, 7));
// }
@Test
void testOnlyOnDeveloperWorkstation() {
assumeTrue("DEV".equals(System.getenv("ENV")));
assertEquals(BigInteger.valueOf(1000000007), calculator.generateNextPrime(BigInteger.valueOf(1000000000)));
}
@Test
void testInAllEnvironments() {
assumingThat("CI".equals(System.getenv("ENV")),
() -> assertEquals(42, calculator.multiply(6, 7)));
assumingThat("DEV".equals(System.getenv("ENV")),
() -> assertEquals(BigInteger.valueOf(1000000007), calculator.generateNextPrime(BigInteger.valueOf(1000000000))));
}
}
| [
"\"ENV\"",
"\"ENV\"",
"\"ENV\"",
"\"ENV\""
]
| []
| [
"ENV"
]
| [] | ["ENV"] | java | 1 | 0 | |
test/recover.go | // run
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Test of basic recover functionality.
package main
import (
"os"
"reflect"
"runtime"
)
func main() {
// go.tools/ssa/interp still has:
// - some lesser bugs in recover()
// - incomplete support for reflection
interp := os.Getenv("GOSSAINTERP") != ""
test1()
test1WithClosures()
test2()
test3()
if !interp {
test4()
}
test5()
test6()
test6WithClosures()
test7()
test8()
test9()
if !interp {
test9reflect1()
test9reflect2()
}
test10()
if !interp {
test10reflect1()
test10reflect2()
}
test11()
if !interp {
test11reflect1()
test11reflect2()
}
test111()
test12()
if !interp {
test12reflect1()
test12reflect2()
}
test13()
if !interp {
test13reflect1()
test13reflect2()
}
test14()
if !interp {
test14reflect1()
test14reflect2()
test15()
}
}
func die() {
runtime.Breakpoint() // can't depend on panic
}
func mustRecoverBody(v1, v2, v3, x interface{}) {
v := v1
if v != nil {
println("spurious recover", v)
die()
}
v = v2
if v == nil {
println("missing recover", x.(int))
die() // panic is useless here
}
if v != x {
println("wrong value", v, x)
die()
}
// the value should be gone now regardless
v = v3
if v != nil {
println("recover didn't recover")
die()
}
}
func doubleRecover() interface{} {
return recover()
}
func mustRecover(x interface{}) {
mustRecoverBody(doubleRecover(), recover(), recover(), x)
}
func mustNotRecover() {
v := recover()
if v != nil {
println("spurious recover", v)
die()
}
}
func withoutRecover() {
mustNotRecover() // because it's a sub-call
}
func test1() {
defer mustNotRecover() // because mustRecover will squelch it
defer mustRecover(1) // because of panic below
defer withoutRecover() // should be no-op, leaving for mustRecover to find
panic(1)
}
// Repeat test1 with closures instead of standard function.
// Interesting because recover bases its decision
// on the frame pointer of its caller, and a closure's
// frame pointer is in the middle of its actual arguments
// (after the hidden ones for the closed-over variables).
func test1WithClosures() {
defer func() {
v := recover()
if v != nil {
println("spurious recover in closure")
die()
}
}()
defer func(x interface{}) {
mustNotRecover()
v := recover()
if v == nil {
println("missing recover", x.(int))
die()
}
if v != x {
println("wrong value", v, x)
die()
}
}(1)
defer func() {
mustNotRecover()
}()
panic(1)
}
func test2() {
// Recover only sees the panic argument
// if it is called from a deferred call.
// It does not see the panic when called from a call within a deferred call (too late)
// nor does it see the panic when it *is* the deferred call (too early).
defer mustRecover(2)
defer recover() // should be no-op
panic(2)
}
func test3() {
defer mustNotRecover()
defer func() {
recover() // should squelch
}()
panic(3)
}
func test4() {
// Equivalent to test3 but using defer to make the call.
defer mustNotRecover()
defer func() {
defer recover() // should squelch
}()
panic(4)
}
// Check that closures can set output arguments.
// Run g(). If it panics, return x; else return deflt.
func try(g func(), deflt interface{}) (x interface{}) {
defer func() {
if v := recover(); v != nil {
x = v
}
}()
defer g()
return deflt
}
// Check that closures can set output arguments.
// Run g(). If it panics, return x; else return deflt.
func try1(g func(), deflt interface{}) (x interface{}) {
defer func() {
if v := recover(); v != nil {
x = v
}
}()
defer g()
x = deflt
return
}
func test5() {
v := try(func() { panic(5) }, 55).(int)
if v != 5 {
println("wrong value", v, 5)
die()
}
s := try(func() {}, "hi").(string)
if s != "hi" {
println("wrong value", s, "hi")
die()
}
v = try1(func() { panic(5) }, 55).(int)
if v != 5 {
println("try1 wrong value", v, 5)
die()
}
s = try1(func() {}, "hi").(string)
if s != "hi" {
println("try1 wrong value", s, "hi")
die()
}
}
// When a deferred big call starts, it must first
// create yet another stack segment to hold the
// giant frame for x. Make sure that doesn't
// confuse recover.
func big(mustRecover bool) {
var x [100000]int
x[0] = 1
x[99999] = 1
_ = x
v := recover()
if mustRecover {
if v == nil {
println("missing big recover")
die()
}
} else {
if v != nil {
println("spurious big recover")
die()
}
}
}
func test6() {
defer big(false)
defer big(true)
panic(6)
}
func test6WithClosures() {
defer func() {
var x [100000]int
x[0] = 1
x[99999] = 1
_ = x
if recover() != nil {
println("spurious big closure recover")
die()
}
}()
defer func() {
var x [100000]int
x[0] = 1
x[99999] = 1
_ = x
if recover() == nil {
println("missing big closure recover")
die()
}
}()
panic("6WithClosures")
}
func test7() {
ok := false
func() {
// should panic, then call mustRecover 7, which stops the panic.
// then should keep processing ordinary defers earlier than that one
// before returning.
// this test checks that the defer func on the next line actually runs.
defer func() { ok = true }()
defer mustRecover(7)
panic(7)
}()
if !ok {
println("did not run ok func")
die()
}
}
func varargs(s *int, a ...int) {
*s = 0
for _, v := range a {
*s += v
}
if recover() != nil {
*s += 100
}
}
func test8a() (r int) {
defer varargs(&r, 1, 2, 3)
panic(0)
}
func test8b() (r int) {
defer varargs(&r, 4, 5, 6)
return
}
func test8() {
if test8a() != 106 || test8b() != 15 {
println("wrong value")
die()
}
}
type I interface {
M()
}
// pointer receiver, so no wrapper in i.M()
type T1 struct{}
func (*T1) M() {
mustRecoverBody(doubleRecover(), recover(), recover(), 9)
}
func test9() {
var i I = &T1{}
defer i.M()
panic(9)
}
func test9reflect1() {
f := reflect.ValueOf(&T1{}).Method(0).Interface().(func())
defer f()
panic(9)
}
func test9reflect2() {
f := reflect.TypeOf(&T1{}).Method(0).Func.Interface().(func(*T1))
defer f(&T1{})
panic(9)
}
// word-sized value receiver, so no wrapper in i.M()
type T2 uintptr
func (T2) M() {
mustRecoverBody(doubleRecover(), recover(), recover(), 10)
}
func test10() {
var i I = T2(0)
defer i.M()
panic(10)
}
func test10reflect1() {
f := reflect.ValueOf(T2(0)).Method(0).Interface().(func())
defer f()
panic(10)
}
func test10reflect2() {
f := reflect.TypeOf(T2(0)).Method(0).Func.Interface().(func(T2))
defer f(T2(0))
panic(10)
}
// tiny receiver, so basic wrapper in i.M()
type T3 struct{}
func (T3) M() {
mustRecoverBody(doubleRecover(), recover(), recover(), 11)
}
func test11() {
var i I = T3{}
defer i.M()
panic(11)
}
func test11reflect1() {
f := reflect.ValueOf(T3{}).Method(0).Interface().(func())
defer f()
panic(11)
}
func test11reflect2() {
f := reflect.TypeOf(T3{}).Method(0).Func.Interface().(func(T3))
defer f(T3{})
panic(11)
}
// tiny receiver, so basic wrapper in i.M()
type T3deeper struct{}
func (T3deeper) M() {
badstate() // difference from T3
mustRecoverBody(doubleRecover(), recover(), recover(), 111)
}
func test111() {
var i I = T3deeper{}
defer i.M()
panic(111)
}
type Tiny struct{}
func (Tiny) M() {
panic(112)
}
// i.M is a wrapper, and i.M panics.
//
// This is a torture test for an old implementation of recover that
// tried to deal with wrapper functions by doing some argument
// positioning math on both entry and exit. Doing anything on exit
// is a problem because sometimes functions exit via panic instead
// of an ordinary return, so panic would have to know to do the
// same math when unwinding the stack. It gets complicated fast.
// This particular test never worked with the old scheme, because
// panic never did the right unwinding math.
//
// The new scheme adjusts Panic.argp on entry to a wrapper.
// It has no exit work, so if a wrapper is interrupted by a panic,
// there's no cleanup that panic itself must do.
// This test just works now.
func badstate() {
defer func() {
recover()
}()
var i I = Tiny{}
i.M()
}
// large receiver, so basic wrapper in i.M()
type T4 [2]string
func (T4) M() {
mustRecoverBody(doubleRecover(), recover(), recover(), 12)
}
func test12() {
var i I = T4{}
defer i.M()
panic(12)
}
func test12reflect1() {
f := reflect.ValueOf(T4{}).Method(0).Interface().(func())
defer f()
panic(12)
}
func test12reflect2() {
f := reflect.TypeOf(T4{}).Method(0).Func.Interface().(func(T4))
defer f(T4{})
panic(12)
}
// enormous receiver, so wrapper splits stack to call M
type T5 [8192]byte
func (T5) M() {
mustRecoverBody(doubleRecover(), recover(), recover(), 13)
}
func test13() {
var i I = T5{}
defer i.M()
panic(13)
}
func test13reflect1() {
f := reflect.ValueOf(T5{}).Method(0).Interface().(func())
defer f()
panic(13)
}
func test13reflect2() {
f := reflect.TypeOf(T5{}).Method(0).Func.Interface().(func(T5))
defer f(T5{})
panic(13)
}
// enormous receiver + enormous method frame, so wrapper splits stack to call M,
// and then M splits stack to allocate its frame.
// recover must look back two frames to find the panic.
type T6 [8192]byte
var global byte
func (T6) M() {
var x [8192]byte
x[0] = 1
x[1] = 2
for i := range x {
global += x[i]
}
mustRecoverBody(doubleRecover(), recover(), recover(), 14)
}
func test14() {
var i I = T6{}
defer i.M()
panic(14)
}
func test14reflect1() {
f := reflect.ValueOf(T6{}).Method(0).Interface().(func())
defer f()
panic(14)
}
func test14reflect2() {
f := reflect.TypeOf(T6{}).Method(0).Func.Interface().(func(T6))
defer f(T6{})
panic(14)
}
// function created by reflect.MakeFunc
func reflectFunc(args []reflect.Value) (results []reflect.Value) {
mustRecoverBody(doubleRecover(), recover(), recover(), 15)
return nil
}
func test15() {
f := reflect.MakeFunc(reflect.TypeOf((func())(nil)), reflectFunc).Interface().(func())
defer f()
panic(15)
}
| [
"\"GOSSAINTERP\""
]
| []
| [
"GOSSAINTERP"
]
| [] | ["GOSSAINTERP"] | go | 1 | 0 | |
cache/main.go | package cache
import (
"goVideo/util"
"os"
"strconv"
"github.com/go-redis/redis"
)
// RedisClient Redis缓存客户端单例
var RedisClient *redis.Client
// Redis 在中间件中初始化redis链接
func Redis() {
db, _ := strconv.ParseUint(os.Getenv("REDIS_DB"), 10, 64)
client := redis.NewClient(&redis.Options{
Addr: os.Getenv("REDIS_ADDR"),
Password: os.Getenv("REDIS_PW"),
DB: int(db),
MaxRetries: 1,
})
_, err := client.Ping().Result()
if err != nil {
util.Log().Panic("连接Redis不成功", err)
}
RedisClient = client
}
| [
"\"REDIS_DB\"",
"\"REDIS_ADDR\"",
"\"REDIS_PW\""
]
| []
| [
"REDIS_DB",
"REDIS_ADDR",
"REDIS_PW"
]
| [] | ["REDIS_DB", "REDIS_ADDR", "REDIS_PW"] | go | 3 | 0 | |
renderer_blender_src.py | import argparse
import re
####
# # Box 1
####
import sys,os,imageio,lpips
root = '/home/youngsun/documents/mvs/mvsnerf_timing'
os.chdir(root)
sys.path.append(root)
from opt_src import config_parser
from data import dataset_dict
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
# models
from models_src import *
from renderer_src import *
from data.ray_utils import get_rays
from tqdm import tqdm
from skimage.metrics import structural_similarity
# pytorch-lightning
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning import LightningModule, Trainer, loggers
from data.ray_utils import ray_marcher
import torch
torch.cuda.set_device(0)
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
####
# # Box 2
####
def decode_batch(batch):
rays = batch['rays'] # (B, 8)
rgbs = batch['rgbs'] # (B, 3)
return rays, rgbs
def unpreprocess(data, shape=(1,1,3,1,1)):
# to unnormalize image for visualization
# data N V C H W
device = data.device
mean = torch.tensor([-0.485 / 0.229, -0.456 / 0.224, -0.406 / 0.225]).view(*shape).to(device)
std = torch.tensor([1 / 0.229, 1 / 0.224, 1 / 0.225]).view(*shape).to(device)
return (data - mean) / std
def read_depth(filename):
depth_h = np.array(read_pfm(filename)[0], dtype=np.float32) # (800, 800)
depth_h = cv2.resize(depth_h, None, fx=0.5, fy=0.5,
interpolation=cv2.INTER_NEAREST) # (600, 800)
depth_h = depth_h[44:556, 80:720] # (512, 640)
# depth = cv2.resize(depth_h, None, fx=0.5, fy=0.5,interpolation=cv2.INTER_NEAREST)#!!!!!!!!!!!!!!!!!!!!!!!!!
mask = depth>0
return depth_h,mask
loss_fn_vgg = lpips.LPIPS(net='vgg')
mse2psnr = lambda x : -10. * np.log(x) / np.log(10.)
####
# # Box 3
####
# create function for returning dense, sparse, far views
def get_source_imgs(source_dataset, target_position, N_views, device, view_type='nearest',
fixed_idxs=None,
is_source_target_overlap=False):
pair_idx = get_pair_idx(source_dataset, target_position, N_views, view_type, fixed_idxs, is_source_target_overlap)
imgs_source, proj_mats, near_far_source, pose_source = source_dataset.read_source_views(pair_idx=pair_idx,device=device)
return imgs_source, proj_mats, near_far_source, pose_source
def get_pair_idx(source_dataset, target_position, N_views, view_type='nearest',
fixed_idxs=None,
is_source_target_overlap=False):
positions = source_dataset.poses[:,:3,3]
dis = np.sum(np.abs(positions - target_position), axis=-1)
dis_sort = np.argsort(dis)
if is_source_target_overlap:
dis_sort = dis_sort[1:]
if view_type == 'nearest': # or "as dense as possible ㅎㅎ"
pair_idx = dis_sort[:N_views]
pair_idx = [source_dataset.img_idx[item] for item in pair_idx]
if view_type == 'dense':
idxs = torch.randperm(int(np.rint(N_views*1.5)))[:N_views].sort()[0]
pair_idx = dis_sort[idxs]
pair_idx = [source_dataset.img_idx[item] for item in pair_idx]
if view_type == 'random': # i know its unnecessarily long...
idxs = torch.randperm(len(dis_sort))[:N_views]
pair_idx = dis_sort[idxs]
pair_idx = [source_dataset.img_idx[item] for item in pair_idx]
if view_type == 'sparse':
idxs = torch.linspace(0, len(dis_sort), steps=N_views+1).round()
idxs = [np.random.choice(range(int(idxs[i]), int(idxs[i+1]))) for i in range(len(idxs)-1)]
pair_idx = dis_sort[idxs]
pair_idx = [source_dataset.img_idx[item] for item in pair_idx]
if view_type == 'far':
idxs = torch.randperm(int(np.rint(N_views*1.5)))[:N_views].sort(descending=True)[0]
pair_idx = dis_sort[::-1][idxs]
pair_idx = [source_dataset.img_idx[item] for item in pair_idx]
if view_type == 'farthest':
pair_idx = dis_sort[::-1][:N_views]
pair_idx = [source_dataset.img_idx[item] for item in pair_idx]
# return index for the case of 'fixed'
if view_type == 'fixed':
pair_idx = fixed_idxs
return pair_idx
####
# # Box 4
####
def render_blender(view_type='nearest',
scenes=['ficus'],
num_src_views=3,
ckpt='base-3src-dense.tar',
source_split='train',
target_split='val',
select_index=None,
is_fixed=False,
is_source_target_overlap=False
):
psnr_all,ssim_all,LPIPS_vgg_all = [],[],[]
# for i_scene, scene in enumerate(['ship','mic','chair','lego','drums','ficus','materials','hotdog']):#
for i_scene, scene in enumerate(scenes):#
psnr,ssim,LPIPS_vgg = [],[],[]
cmd = f'--datadir /mnt/hdd/mvsnerf_data/nerf_synthetic/{scene} \
--dataset_name blender_src --white_bkgd \
--net_type v0 --ckpt ./ckpts/{ckpt} --num_src_views {num_src_views}'
save_dir = f'/mnt/hdd/youngsun/mvsnerf_timing/results/{ckpt[:-4]}/blender-{num_src_views}-'
if is_fixed:
save_dir += 'fixed-'
save_dir += f'{view_type}-'
save_dir += f'{source_split}-{target_split}/{scene}'
args = config_parser(cmd.split())
args.use_viewdirs = True
args.N_samples = 128
# args.feat_dim = 8+12
args.feat_dim = 8+4*num_src_views
# create models
if 0==i_scene:
render_kwargs_train, render_kwargs_test, start, grad_vars = create_nerf_mvs(args, use_mvs=True, dir_embedder=False, pts_embedder=True)
filter_keys(render_kwargs_train)
MVSNet = render_kwargs_train['network_mvs']
render_kwargs_train.pop('network_mvs')
datadir = args.datadir
datatype = 'train'
pad = 16
args.chunk = 5120
print('============> rendering dataset <===================')
dataset_source = dataset_dict[args.dataset_name](args, split=source_split)
dataset_target = dataset_dict[args.dataset_name](args, split=target_split, select_index=select_index)
target_idx = dataset_target.img_idx
save_as_image = True
os.makedirs(save_dir, exist_ok=True)
MVSNet.train()
MVSNet = MVSNet.cuda()
with torch.no_grad():
try:
tqdm._instances.clear()
except Exception:
pass
for i, batch in enumerate(tqdm(dataset_target)):
torch.cuda.empty_cache()
rays, img = decode_batch(batch)
rays = rays.squeeze().to(device) # (H*W, 3)
img = img.squeeze().cpu().numpy() # (H, W, 3)
if is_fixed:
if i == 0:
if select_index is not None:
pair_idx = get_pair_idx(source_dataset=dataset_source,
target_position=dataset_target.poses[[len(select_index)//2],:3,3],
N_views=args.num_src_views,
view_type=view_type)
else:
pair_idx = get_pair_idx(source_dataset=dataset_source,
target_position=dataset_target.poses[[50],:3,3],
N_views=args.num_src_views,
view_type=view_type)
imgs_source, proj_mats, near_far_source, pose_source = dataset_source.read_source_views(pair_idx=pair_idx,
device=device)
else:
# created fixed image_source
imgs_source, proj_mats, near_far_source, pose_source = get_source_imgs(source_dataset=dataset_source,
target_position=dataset_target.poses[[i],:3,3],
N_views=args.num_src_views, device=device,
view_type=view_type)
volume_feature, _, _ = MVSNet(imgs_source, proj_mats, near_far_source, pad=pad)
imgs_source = unpreprocess(imgs_source)
N_rays_all = rays.shape[0]
rgb_rays, depth_rays_preds = [],[]
for chunk_idx in range(N_rays_all//args.chunk + int(N_rays_all%args.chunk>0)):
xyz_coarse_sampled, rays_o, rays_d, z_vals = ray_marcher(rays[chunk_idx*args.chunk:(chunk_idx+1)*args.chunk],
N_samples=args.N_samples)
# Converting world coordinate to ndc coordinate
H, W = img.shape[:2]
inv_scale = torch.tensor([W - 1, H - 1]).to(device)
w2c_ref, intrinsic_ref = pose_source['w2cs'][0], pose_source['intrinsics'][0].clone()
intrinsic_ref[:2] *= args.imgScale_test/args.imgScale_train
xyz_NDC = get_ndc_coordinate(w2c_ref, intrinsic_ref, xyz_coarse_sampled, inv_scale,
near=near_far_source[0], far=near_far_source[1], pad=pad*args.imgScale_test)
# rendering
rgb, disp, acc, depth_pred, alpha, extras = rendering(args, pose_source, xyz_coarse_sampled,
xyz_NDC, z_vals, rays_o, rays_d,
volume_feature,imgs_source, **render_kwargs_train)
rgb, depth_pred = torch.clamp(rgb.cpu(),0,1.0).numpy(), depth_pred.cpu().numpy()
rgb_rays.append(rgb)
depth_rays_preds.append(depth_pred)
depth_rays_preds = np.concatenate(depth_rays_preds).reshape(H, W)
depth_rays_preds, _ = visualize_depth_numpy(depth_rays_preds, near_far_source)
rgb_rays = np.concatenate(rgb_rays).reshape(H, W, 3)
img_vis = np.concatenate((img*255,rgb_rays*255,depth_rays_preds),axis=1)
img_vis = np.concatenate((torch.cat(torch.split(imgs_source*255, [1]*num_src_views, dim=1),-1).squeeze().permute(1,2,0).cpu().numpy(),img_vis),axis=1)
if save_as_image:
imageio.imwrite(f'{save_dir}/{scene}_{target_idx[i]:03d}.png', img_vis.astype('uint8'))
else:
rgbs.append(img_vis.astype('uint8'))
# quantity
# center crop 0.8 ratio
H_crop, W_crop = np.array(rgb_rays.shape[:2])//10
img = img[H_crop:-H_crop,W_crop:-W_crop]
rgb_rays = rgb_rays[H_crop:-H_crop,W_crop:-W_crop]
psnr.append( mse2psnr(np.mean((rgb_rays-img)**2)))
ssim.append( structural_similarity(rgb_rays, img, multichannel=True))
img_tensor = torch.from_numpy(rgb_rays)[None].permute(0,3,1,2).float()*2-1.0 # image should be RGB, IMPORTANT: normalized to [-1,1]
img_gt_tensor = torch.from_numpy(img)[None].permute(0,3,1,2).float()*2-1.0
LPIPS_vgg.append( loss_fn_vgg(img_tensor, img_gt_tensor).item())
print(f'=====> scene: {scene} mean psnr {np.mean(psnr)} ssim: {np.mean(ssim)} lpips: {np.mean(LPIPS_vgg)}')
psnr_all.append(psnr);ssim_all.append(ssim);LPIPS_vgg_all.append(LPIPS_vgg)
if not save_as_image:
imageio.mimwrite(f'{save_dir}/{scene}_spiral.mp4', np.stack(rgbs), fps=20, quality=10)
print(f'=====> all mean psnr {np.mean(psnr_all)} ssim: {np.mean(ssim_all)} lpips: {np.mean(LPIPS_vgg_all)}')
####
# # Box 5
####
def render_blender_all_settings(scenes=['lego'], num_src_views=3, ckpt='base-3src-dense.tar',source_split='train', target_split='val', select_index=[30,60,90], view_types=[1]):
if 1 in view_types:
render_blender('nearest', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None)
if 2 in view_types:
render_blender('dense', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None)
if 3 in view_types:
render_blender('sparse', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None)
if 4 in view_types:
render_blender('far', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None)
if 5 in view_types:
render_blender('random', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None)
if 6 in view_types:
render_blender('nearest', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=True)
if 7 in view_types:
render_blender('sparse', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=True)
if 8 in view_types:
render_blender('nearest', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None, is_source_target_overlap=True)
if 9 in view_types:
render_blender('sparse', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None, is_source_target_overlap=True)
return None
####
# # Box 6
####
####
# # Box 7
####
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--view_types', nargs="+", type=int,
help= 'Enter list of view types to render:' \
' 1 - nearest, 2 - dense, 3 - sparse, 4 - far, 5 - random, ' \
'6 - fixed nearset, 7 - fixed sparse, 8 - unseen nearest, 9 - unseen sparse')
parser.add_argument('--view_indexes', nargs="+", type=int, const=None, default=None,
help= 'default - all views (100)')
parser.add_argument('--scenes', nargs='+', default=[])
parser.add_argument('--ckpts', nargs='+', default=[])
parser.add_argument('--source', type=str, default='train')
parser.add_argument('--target', type=str, default='val')
args = parser.parse_args()
for ckpt in args.ckpts:
num_src_views = int(re.findall('[0-9]+', ckpt)[0])
render_blender_all_settings(scenes=args.scenes,
num_src_views=num_src_views,
ckpt=ckpt,
source_split=args.source,
target_split=args.target,
select_index=args.view_indexes,
view_types=args.view_types)
torch.cuda.empty_cache() | []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
cmd/cql-minerd/integration_test.go | // +build !testbinary
/*
* Copyright 2018 The CovenantSQL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"context"
"database/sql"
"flag"
"fmt"
"io/ioutil"
"math/rand"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"sync"
"sync/atomic"
"syscall"
"testing"
"time"
sqlite3 "github.com/CovenantSQL/go-sqlite3-encrypt"
. "github.com/smartystreets/goconvey/convey"
"github.com/CovenantSQL/CovenantSQL/client"
"github.com/CovenantSQL/CovenantSQL/conf"
"github.com/CovenantSQL/CovenantSQL/crypto"
"github.com/CovenantSQL/CovenantSQL/crypto/asymmetric"
"github.com/CovenantSQL/CovenantSQL/crypto/kms"
"github.com/CovenantSQL/CovenantSQL/naconn"
"github.com/CovenantSQL/CovenantSQL/proto"
"github.com/CovenantSQL/CovenantSQL/route"
rpc "github.com/CovenantSQL/CovenantSQL/rpc/mux"
"github.com/CovenantSQL/CovenantSQL/test"
"github.com/CovenantSQL/CovenantSQL/types"
"github.com/CovenantSQL/CovenantSQL/utils"
"github.com/CovenantSQL/CovenantSQL/utils/log"
"github.com/CovenantSQL/CovenantSQL/utils/trace"
)
var (
baseDir = utils.GetProjectSrcDir()
testWorkingDir = FJ(baseDir, "./test/")
gnteConfDir = FJ(testWorkingDir, "./GNTE/conf/node_c/")
testnetConfDir = FJ(baseDir, "./conf/testnet/")
logDir = FJ(testWorkingDir, "./log/")
testGasPrice uint64 = 1
testAdvancePayment uint64 = 20000000
nodeCmds []*utils.CMD
FJ = filepath.Join
// Benchmark flags
benchMinerCount int
benchBypassSignature bool
benchEventualConsistency bool
benchMinerDirectRPC bool
benchMinerConfigDir string
)
func init() {
flag.IntVar(&benchMinerCount, "bench-miner-count", 1,
"Benchmark miner count.")
flag.BoolVar(&benchBypassSignature, "bench-bypass-signature", false,
"Benchmark bypassing signature.")
flag.BoolVar(&benchEventualConsistency, "bench-eventual-consistency", false,
"Benchmark with eventaul consistency.")
flag.BoolVar(&benchMinerDirectRPC, "bench-direct-rpc", false,
"Benchmark with with direct RPC protocol.")
flag.StringVar(&benchMinerConfigDir, "bench-miner-config-dir", "",
"Benchmark custome miner config directory.")
}
func TestMain(m *testing.M) {
flag.Parse()
if benchMinerDirectRPC {
naconn.RegisterResolver(rpc.NewDirectResolver())
}
os.Exit(m.Run())
}
func startNodes() {
ctx := context.Background()
// wait for ports to be available
var err error
err = utils.WaitForPorts(ctx, "127.0.0.1", []int{
3122,
3121,
3120,
}, time.Millisecond*200)
if err != nil {
log.Fatalf("wait for port ready timeout: %v", err)
}
// start 3bps
var cmd *utils.CMD
if cmd, err = utils.RunCommandNB(
FJ(baseDir, "./bin/cqld.test"),
[]string{"-config", FJ(testWorkingDir, "./integration/node_0/config.yaml"),
"-test.coverprofile", FJ(baseDir, "./cmd/cql-minerd/leader.cover.out"),
"-metric-web", "0.0.0.0:13122",
},
"leader", testWorkingDir, logDir, true,
); err == nil {
nodeCmds = append(nodeCmds, cmd)
} else {
log.Errorf("start node failed: %v", err)
}
if cmd, err = utils.RunCommandNB(
FJ(baseDir, "./bin/cqld.test"),
[]string{"-config", FJ(testWorkingDir, "./integration/node_1/config.yaml"),
"-test.coverprofile", FJ(baseDir, "./cmd/cql-minerd/follower1.cover.out"),
"-metric-web", "0.0.0.0:13121",
},
"follower1", testWorkingDir, logDir, false,
); err == nil {
nodeCmds = append(nodeCmds, cmd)
} else {
log.Errorf("start node failed: %v", err)
}
if cmd, err = utils.RunCommandNB(
FJ(baseDir, "./bin/cqld.test"),
[]string{"-config", FJ(testWorkingDir, "./integration/node_2/config.yaml"),
"-test.coverprofile", FJ(baseDir, "./cmd/cql-minerd/follower2.cover.out"),
"-metric-web", "0.0.0.0:13120",
},
"follower2", testWorkingDir, logDir, false,
); err == nil {
nodeCmds = append(nodeCmds, cmd)
} else {
log.Errorf("start node failed: %v", err)
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
err = utils.WaitToConnect(ctx, "127.0.0.1", []int{
3122,
3121,
3120,
}, time.Second)
if err != nil {
log.Fatalf("wait for port ready timeout: %v", err)
}
ctx, cancel = context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
err = utils.WaitForPorts(ctx, "127.0.0.1", []int{
2144,
2145,
2146,
}, time.Millisecond*200)
if err != nil {
log.Fatalf("wait for port ready timeout: %v", err)
}
time.Sleep(10 * time.Second)
// start 3miners
os.RemoveAll(FJ(testWorkingDir, "./integration/node_miner_0/data"))
if cmd, err = utils.RunCommandNB(
FJ(baseDir, "./bin/cql-minerd.test"),
[]string{"-config", FJ(testWorkingDir, "./integration/node_miner_0/config.yaml"),
"-test.coverprofile", FJ(baseDir, "./cmd/cql-minerd/miner0.cover.out"),
"-metric-web", "0.0.0.0:12144",
},
"miner0", testWorkingDir, logDir, true,
); err == nil {
nodeCmds = append(nodeCmds, cmd)
} else {
log.Errorf("start node failed: %v", err)
}
os.RemoveAll(FJ(testWorkingDir, "./integration/node_miner_1/data"))
if cmd, err = utils.RunCommandNB(
FJ(baseDir, "./bin/cql-minerd.test"),
[]string{"-config", FJ(testWorkingDir, "./integration/node_miner_1/config.yaml"),
"-test.coverprofile", FJ(baseDir, "./cmd/cql-minerd/miner1.cover.out"),
"-metric-web", "0.0.0.0:12145",
},
"miner1", testWorkingDir, logDir, false,
); err == nil {
nodeCmds = append(nodeCmds, cmd)
} else {
log.Errorf("start node failed: %v", err)
}
os.RemoveAll(FJ(testWorkingDir, "./integration/node_miner_2/data"))
if cmd, err = utils.RunCommandNB(
FJ(baseDir, "./bin/cql-minerd.test"),
[]string{"-config", FJ(testWorkingDir, "./integration/node_miner_2/config.yaml"),
"-test.coverprofile", FJ(baseDir, "./cmd/cql-minerd/miner2.cover.out"),
"-metric-web", "0.0.0.0:12146",
},
"miner2", testWorkingDir, logDir, false,
); err == nil {
nodeCmds = append(nodeCmds, cmd)
} else {
log.Errorf("start node failed: %v", err)
}
}
func startNodesProfile(bypassSign bool) {
ctx := context.Background()
bypassArg := ""
if bypassSign {
bypassArg = "-bypass-signature"
}
// wait for ports to be available
var err error
err = utils.WaitForPorts(ctx, "127.0.0.1", []int{
2144,
2145,
2146,
}, time.Millisecond*200)
if err != nil {
log.Fatalf("wait for port ready timeout: %v", err)
}
err = utils.WaitForPorts(ctx, "127.0.0.1", []int{
3122,
3121,
3120,
}, time.Millisecond*200)
if err != nil {
log.Fatalf("wait for port ready timeout: %v", err)
}
// start 3bps
var cmd *utils.CMD
if cmd, err = utils.RunCommandNB(
FJ(baseDir, "./bin/cqld"),
[]string{"-config", FJ(testWorkingDir, "./integration/node_0/config.yaml"),
bypassArg,
},
"leader", testWorkingDir, logDir, false,
); err == nil {
nodeCmds = append(nodeCmds, cmd)
} else {
log.Errorf("start node failed: %v", err)
}
if cmd, err = utils.RunCommandNB(
FJ(baseDir, "./bin/cqld"),
[]string{"-config", FJ(testWorkingDir, "./integration/node_1/config.yaml"),
bypassArg,
},
"follower1", testWorkingDir, logDir, false,
); err == nil {
nodeCmds = append(nodeCmds, cmd)
} else {
log.Errorf("start node failed: %v", err)
}
if cmd, err = utils.RunCommandNB(
FJ(baseDir, "./bin/cqld"),
[]string{"-config", FJ(testWorkingDir, "./integration/node_2/config.yaml"),
bypassArg,
},
"follower2", testWorkingDir, logDir, false,
); err == nil {
nodeCmds = append(nodeCmds, cmd)
} else {
log.Errorf("start node failed: %v", err)
}
time.Sleep(time.Second * 3)
// start 3miners
os.RemoveAll(FJ(testWorkingDir, "./integration/node_miner_0/data"))
if cmd, err = utils.RunCommandNB(
FJ(baseDir, "./bin/cql-minerd"),
[]string{"-config", FJ(testWorkingDir, "./integration/node_miner_0/config.yaml"),
"-cpu-profile", FJ(baseDir, "./cmd/cql-minerd/miner0.profile"),
//"-trace-file", FJ(baseDir, "./cmd/cql-minerd/miner0.trace"),
"-metric-graphite-server", "192.168.2.100:2003",
"-profile-server", "0.0.0.0:8080",
"-metric-log",
bypassArg,
},
"miner0", testWorkingDir, logDir, false,
); err == nil {
nodeCmds = append(nodeCmds, cmd)
} else {
log.Errorf("start node failed: %v", err)
}
os.RemoveAll(FJ(testWorkingDir, "./integration/node_miner_1/data"))
if cmd, err = utils.RunCommandNB(
FJ(baseDir, "./bin/cql-minerd"),
[]string{"-config", FJ(testWorkingDir, "./integration/node_miner_1/config.yaml"),
"-cpu-profile", FJ(baseDir, "./cmd/cql-minerd/miner1.profile"),
//"-trace-file", FJ(baseDir, "./cmd/cql-minerd/miner1.trace"),
"-metric-graphite-server", "192.168.2.100:2003",
"-profile-server", "0.0.0.0:8081",
"-metric-log",
bypassArg,
},
"miner1", testWorkingDir, logDir, false,
); err == nil {
nodeCmds = append(nodeCmds, cmd)
} else {
log.Errorf("start node failed: %v", err)
}
os.RemoveAll(FJ(testWorkingDir, "./integration/node_miner_2/data"))
if cmd, err = utils.RunCommandNB(
FJ(baseDir, "./bin/cql-minerd"),
[]string{"-config", FJ(testWorkingDir, "./integration/node_miner_2/config.yaml"),
"-cpu-profile", FJ(baseDir, "./cmd/cql-minerd/miner2.profile"),
//"-trace-file", FJ(baseDir, "./cmd/cql-minerd/miner2.trace"),
"-metric-graphite-server", "192.168.2.100:2003",
"-profile-server", "0.0.0.0:8082",
"-metric-log",
bypassArg,
},
"miner2", testWorkingDir, logDir, false,
); err == nil {
nodeCmds = append(nodeCmds, cmd)
} else {
log.Errorf("start node failed: %v", err)
}
}
func stopNodes() {
var wg sync.WaitGroup
for _, nodeCmd := range nodeCmds {
wg.Add(1)
go func(thisCmd *utils.CMD) {
defer wg.Done()
thisCmd.Cmd.Process.Signal(syscall.SIGTERM)
thisCmd.Cmd.Wait()
grepRace := exec.Command("/bin/sh", "-c", "grep -a -A 50 'DATA RACE' "+thisCmd.LogPath)
out, _ := grepRace.Output()
if len(out) > 2 {
log.Fatalf("DATA RACE in %s :\n%s", thisCmd.Cmd.Path, string(out))
}
}(nodeCmd)
}
wg.Wait()
}
func TestFullProcess(t *testing.T) {
log.SetLevel(log.DebugLevel)
Convey("test full process", t, func(c C) {
startNodes()
defer stopNodes()
var err error
time.Sleep(10 * time.Second)
So(err, ShouldBeNil)
err = client.Init(FJ(testWorkingDir, "./integration/node_c/config.yaml"), []byte(""))
So(err, ShouldBeNil)
var (
clientPrivKey *asymmetric.PrivateKey
clientAddr proto.AccountAddress
minersPrivKeys = make([]*asymmetric.PrivateKey, 3)
minersAddrs = make([]proto.AccountAddress, 3)
)
// get miners' private keys
minersPrivKeys[0], err = kms.LoadPrivateKey(FJ(testWorkingDir, "./integration/node_miner_0/private.key"), []byte{})
So(err, ShouldBeNil)
minersPrivKeys[1], err = kms.LoadPrivateKey(FJ(testWorkingDir, "./integration/node_miner_1/private.key"), []byte{})
So(err, ShouldBeNil)
minersPrivKeys[2], err = kms.LoadPrivateKey(FJ(testWorkingDir, "./integration/node_miner_2/private.key"), []byte{})
So(err, ShouldBeNil)
clientPrivKey, err = kms.LoadPrivateKey(FJ(testWorkingDir, "./integration/node_c/private.key"), []byte{})
So(err, ShouldBeNil)
// get miners' addr
minersAddrs[0], err = crypto.PubKeyHash(minersPrivKeys[0].PubKey())
So(err, ShouldBeNil)
minersAddrs[1], err = crypto.PubKeyHash(minersPrivKeys[1].PubKey())
So(err, ShouldBeNil)
minersAddrs[2], err = crypto.PubKeyHash(minersPrivKeys[2].PubKey())
So(err, ShouldBeNil)
clientAddr, err = crypto.PubKeyHash(clientPrivKey.PubKey())
So(err, ShouldBeNil)
// client send create database transaction
meta := client.ResourceMeta{
TargetMiners: minersAddrs,
Node: uint16(len(minersAddrs)),
IsolationLevel: int(sql.LevelReadUncommitted),
GasPrice: testGasPrice,
AdvancePayment: testAdvancePayment,
}
// wait for chain service
var ctx1, cancel1 = context.WithTimeout(context.Background(), 1*time.Minute)
defer cancel1()
err = test.WaitBPChainService(ctx1, 3*time.Second)
if err != nil {
t.Fatalf("wait for chain service failed: %v", err)
}
_, dsn, err := client.Create(meta)
So(err, ShouldBeNil)
dsnCfg, err := client.ParseDSN(dsn)
So(err, ShouldBeNil)
// create dsn
log.Infof("the created database dsn is %v", dsn)
db, err := sql.Open("covenantsql", dsn)
So(err, ShouldBeNil)
// wait for creation
var ctx, cancel = context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
err = client.WaitDBCreation(ctx, dsn)
So(err, ShouldBeNil)
// check sqlchain profile exist
dbID := proto.DatabaseID(dsnCfg.DatabaseID)
profileReq := &types.QuerySQLChainProfileReq{}
profileResp := &types.QuerySQLChainProfileResp{}
profileReq.DBID = dbID
err = rpc.RequestBP(route.MCCQuerySQLChainProfile.String(), profileReq, profileResp)
So(err, ShouldBeNil)
profile := profileResp.Profile
So(profile.Address.DatabaseID(), ShouldEqual, dbID)
So(profile.Owner.String(), ShouldEqual, clientAddr.String())
So(profile.TokenType, ShouldEqual, types.Particle)
minersMap := make(map[proto.AccountAddress]bool)
for _, miner := range profile.Miners {
minersMap[miner.Address] = true
}
for _, miner := range minersAddrs {
So(minersMap[miner], ShouldBeTrue)
}
usersMap := make(map[proto.AccountAddress]types.PermStat)
for _, user := range profile.Users {
usersMap[user.Address] = types.PermStat{
Permission: user.Permission,
Status: user.Status,
}
}
permStat, ok := usersMap[clientAddr]
So(ok, ShouldBeTrue)
So(permStat.Permission, ShouldNotBeNil)
So(permStat.Permission.Role, ShouldEqual, types.Admin)
So(permStat.Status, ShouldEqual, types.Normal)
_, err = db.Exec("CREATE TABLE test (test int)")
So(err, ShouldBeNil)
_, err = db.Exec("INSERT INTO test VALUES(?)", 4)
So(err, ShouldBeNil)
row := db.QueryRow("SELECT * FROM test LIMIT 1")
var result int
err = row.Scan(&result)
So(err, ShouldBeNil)
So(result, ShouldEqual, 4)
// test timestamp fields
_, err = db.Exec("CREATE TABLE test_time (test timestamp)")
So(err, ShouldBeNil)
_, err = db.Exec("INSERT INTO test_time VALUES(DATE('NOW'))")
So(err, ShouldBeNil)
row = db.QueryRow("SELECT * FROM test_time LIMIT 1")
var tmResult time.Time
err = row.Scan(&tmResult)
So(err, ShouldBeNil)
So(tmResult, ShouldHappenBefore, time.Now())
// test string fields
row = db.QueryRow("SELECT name FROM sqlite_master WHERE type = ? LIMIT 1", "table")
var resultString string
err = row.Scan(&resultString)
So(err, ShouldBeNil)
So(resultString, ShouldBeIn, []string{"test", "test_time"})
// try raw bytes
_, err = db.Exec("CREATE TABLE test_raw (test blob)")
So(err, ShouldBeNil)
_, err = db.Exec("INSERT INTO test_raw VALUES(?)", []byte("ha\001ppy"))
So(err, ShouldBeNil)
row = db.QueryRow("SELECT * FROM test_raw LIMIT 1")
var resultBytes []byte
err = row.Scan(&resultBytes)
So(err, ShouldBeNil)
So(resultBytes, ShouldResemble, []byte("ha\001ppy"))
SkipConvey("test query cancel", FailureContinues, func(c C) {
/* test cancel write query */
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
defer wg.Done()
db.Exec("INSERT INTO test VALUES(sleep(10000000000))")
}()
time.Sleep(time.Second)
wg.Add(1)
go func() {
defer wg.Done()
var err error
_, err = db.Exec("UPDATE test SET test = 100;")
// should be canceled
c.So(err, ShouldNotBeNil)
}()
time.Sleep(time.Second)
for _, n := range conf.GConf.KnownNodes {
if n.Role == proto.Miner {
rpc.GetSessionPoolInstance().Remove(n.ID)
}
}
time.Sleep(time.Second)
// ensure connection
db.Query("SELECT 1")
// test before write operation complete
var result int
err = db.QueryRow("SELECT * FROM test WHERE test = 4 LIMIT 1").Scan(&result)
c.So(err, ShouldBeNil)
c.So(result, ShouldEqual, 4)
wg.Wait()
/* test cancel read query */
go func() {
_, err = db.Query("SELECT * FROM test WHERE test = sleep(10000000000)")
// call write query using read query interface
//_, err = db.Query("INSERT INTO test VALUES(sleep(10000000000))")
c.So(err, ShouldNotBeNil)
}()
time.Sleep(time.Second)
for _, n := range conf.GConf.KnownNodes {
if n.Role == proto.Miner {
rpc.GetSessionPoolInstance().Remove(n.ID)
}
}
time.Sleep(time.Second)
// ensure connection
db.Query("SELECT 1")
/* test long running write query */
row = db.QueryRow("SELECT * FROM test WHERE test = 10000000000 LIMIT 1")
err = row.Scan(&result)
c.So(err, ShouldBeNil)
c.So(result, ShouldEqual, 10000000000)
})
ctx2, ccl2 := context.WithTimeout(context.Background(), 3*time.Minute)
defer ccl2()
err = waitProfileChecking(ctx2, 3*time.Second, dbID, func(profile *types.SQLChainProfile) bool {
for _, user := range profile.Users {
if user.AdvancePayment != testAdvancePayment {
return true
}
}
return false
})
So(err, ShouldBeNil)
ctx3, ccl3 := context.WithTimeout(context.Background(), 1*time.Minute)
defer ccl3()
err = waitProfileChecking(ctx3, 3*time.Second, dbID, func(profile *types.SQLChainProfile) bool {
getIncome := false
for _, miner := range profile.Miners {
getIncome = getIncome || (miner.PendingIncome != 0 || miner.ReceivedIncome != 0)
}
return getIncome
})
So(err, ShouldBeNil)
err = db.Close()
So(err, ShouldBeNil)
// test query from follower node
dsnCfgMix := *dsnCfg
dsnCfgMix.UseLeader = true
dsnCfgMix.UseFollower = true
dbMix, err := sql.Open("covenantsql", dsnCfgMix.FormatDSN())
So(err, ShouldBeNil)
defer dbMix.Close()
result = 0
err = dbMix.QueryRow("SELECT * FROM test LIMIT 1").Scan(&result)
So(err, ShouldBeNil)
So(result, ShouldEqual, 4)
_, err = dbMix.Exec("INSERT INTO test VALUES(2)")
So(err, ShouldBeNil)
// test query from follower only
dsnCfgFollower := *dsnCfg
dsnCfgFollower.UseLeader = false
dsnCfgFollower.UseFollower = true
dbFollower, err := sql.Open("covenantsql", dsnCfgFollower.FormatDSN())
So(err, ShouldBeNil)
defer dbFollower.Close()
err = dbFollower.QueryRow("SELECT * FROM test LIMIT 1").Scan(&result)
So(err, ShouldBeNil)
So(result, ShouldEqual, 4)
_, err = dbFollower.Exec("INSERT INTO test VALUES(2)")
So(err, ShouldNotBeNil)
// TODO(lambda): Drop database
})
}
func waitProfileChecking(ctx context.Context, period time.Duration, dbID proto.DatabaseID,
checkFunc func(profile *types.SQLChainProfile) bool) (err error) {
var (
ticker = time.NewTicker(period)
req = &types.QuerySQLChainProfileReq{}
resp = &types.QuerySQLChainProfileResp{}
)
defer ticker.Stop()
req.DBID = dbID
for {
select {
case <-ticker.C:
err = rpc.RequestBP(route.MCCQuerySQLChainProfile.String(), req, resp)
if err == nil {
if checkFunc(&resp.Profile) {
return
}
log.WithFields(log.Fields{
"dbID": resp.Profile.Address,
"num_of_user": len(resp.Profile.Users),
}).Debugf("get profile but failed to check in waitProfileChecking")
}
case <-ctx.Done():
err = ctx.Err()
return
}
}
}
const ROWSTART = 1000000
const TABLENAME = "insert_table0"
func prepareBenchTable(db *sql.DB) {
_, err := db.Exec("DROP TABLE IF EXISTS " + TABLENAME + ";")
So(err, ShouldBeNil)
_, err = db.Exec(`CREATE TABLE ` + TABLENAME + ` ("k" INT, "v1" TEXT, PRIMARY KEY("k"))`)
So(err, ShouldBeNil)
_, err = db.Exec("REPLACE INTO "+TABLENAME+" VALUES(?, ?)", ROWSTART-1, "test")
So(err, ShouldBeNil)
}
func cleanBenchTable(db *sql.DB) {
_, err := db.Exec("DELETE FROM "+TABLENAME+" WHERE k >= ?", ROWSTART)
So(err, ShouldBeNil)
}
func makeBenchName(trailings ...string) string {
var parts = make([]string, 0, 3+len(trailings))
parts = append(parts, fmt.Sprintf("%d-Miner", benchMinerCount))
if benchBypassSignature {
parts = append(parts, "BypassSignature")
}
if benchEventualConsistency {
parts = append(parts, "EventualConsistency")
}
parts = append(parts, trailings...)
return strings.Join(parts, "-")
}
func benchDB(b *testing.B, db *sql.DB, createDB bool) {
var err error
if createDB {
prepareBenchTable(db)
}
cleanBenchTable(db)
var i int64
i = -1
db.SetMaxIdleConns(256)
b.Run(makeBenchName("INSERT"), func(b *testing.B) {
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
ii := atomic.AddInt64(&i, 1)
index := ROWSTART + ii
//start := time.Now()
ctx, task := trace.NewTask(context.Background(), "BenchInsert")
_, err = db.ExecContext(ctx, "INSERT INTO "+TABLENAME+" ( k, v1 ) VALUES"+
"(?, ?)", index, ii,
)
//log.Warnf("insert index = %d %v", index, time.Since(start))
for err != nil && err.Error() == sqlite3.ErrBusy.Error() {
// retry forever
log.Warnf("index = %d retried", index)
_, err = db.ExecContext(ctx, "INSERT INTO "+TABLENAME+" ( k, v1 ) VALUES"+
"(?, ?)", index, ii,
)
}
if err != nil {
b.Fatal(err)
}
task.End()
}
})
})
rowCount := db.QueryRow("SELECT COUNT(1) FROM " + TABLENAME)
var count int64
err = rowCount.Scan(&count)
if err != nil {
b.Fatal(err)
}
log.Warnf("row Count: %v", count)
b.Run(makeBenchName("SELECT"), func(b *testing.B) {
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
var index int64
if createDB { //only data by insert
index = rand.Int63n(count-1) + ROWSTART
} else { //has data before ROWSTART
index = rand.Int63n(count - 1)
}
ctx, task := trace.NewTask(context.Background(), "BenchSelect")
//log.Debugf("index = %d", index)
//start := time.Now()
row := db.QueryRowContext(ctx, "SELECT v1 FROM "+TABLENAME+" WHERE k = ? LIMIT 1", index)
//log.Warnf("select index = %d %v", index, time.Since(start))
var result []byte
err = row.Scan(&result)
if err != nil || (len(result) == 0) {
log.Errorf("index = %d", index)
b.Fatal(err)
}
task.End()
}
})
})
//row := db.QueryRow("SELECT nonIndexedColumn FROM test LIMIT 1")
//var result int
//err = row.Scan(&result)
//So(err, ShouldBeNil)
//So(result, ShouldEqual, 4)
err = db.Close()
So(err, ShouldBeNil)
routineCount := runtime.NumGoroutine()
if routineCount > 500 {
b.Errorf("go routine count: %d", routineCount)
} else {
log.Infof("go routine count: %d", routineCount)
}
}
func benchMiner(b *testing.B, minerCount uint16) {
log.Warnf("benchmark for %d Miners, BypassSignature: %v", minerCount, benchBypassSignature)
asymmetric.BypassSignature = benchBypassSignature
if minerCount > 0 {
startNodesProfile(benchBypassSignature)
utils.WaitToConnect(context.Background(), "127.0.0.1", []int{
2144,
2145,
2146,
3122,
3121,
3120,
}, 2*time.Second)
time.Sleep(time.Second)
}
// Create temp directory
testDataDir, err := ioutil.TempDir(testWorkingDir, "covenantsql")
if err != nil {
panic(err)
}
defer os.RemoveAll(testDataDir)
clientConf := FJ(testWorkingDir, "./integration/node_c/config.yaml")
tempConf := FJ(testDataDir, "config.yaml")
clientKey := FJ(testWorkingDir, "./integration/node_c/private.key")
tempKey := FJ(testDataDir, "private.key")
utils.CopyFile(clientConf, tempConf)
utils.CopyFile(clientKey, tempKey)
err = client.Init(tempConf, []byte(""))
So(err, ShouldBeNil)
dsnFile := FJ(baseDir, "./cmd/cql-minerd/.dsn")
var dsn string
if minerCount > 0 {
// create
meta := client.ResourceMeta{
Node: minerCount,
UseEventualConsistency: benchEventualConsistency,
IsolationLevel: int(sql.LevelReadUncommitted),
}
// wait for chain service
var ctx1, cancel1 = context.WithTimeout(context.Background(), 1*time.Minute)
defer cancel1()
err = test.WaitBPChainService(ctx1, 3*time.Second)
if err != nil {
b.Fatalf("wait for chain service failed: %v", err)
}
_, dsn, err = client.Create(meta)
So(err, ShouldBeNil)
log.Infof("the created database dsn is %v", dsn)
err = ioutil.WriteFile(dsnFile, []byte(dsn), 0666)
if err != nil {
log.Errorf("write .dsn failed: %v", err)
}
defer os.Remove(dsnFile)
} else {
dsn = os.Getenv("DSN")
}
if benchMinerDirectRPC {
dsnCfg, err := client.ParseDSN(dsn)
So(err, ShouldBeNil)
dsnCfg.UseDirectRPC = true
dsn = dsnCfg.FormatDSN()
}
db, err := sql.Open("covenantsql", dsn)
So(err, ShouldBeNil)
// wait for creation
var ctx, cancel = context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
err = client.WaitDBCreation(ctx, dsn)
So(err, ShouldBeNil)
benchDB(b, db, minerCount > 0)
_, err = client.Drop(dsn)
So(err, ShouldBeNil)
time.Sleep(5 * time.Second)
stopNodes()
}
func BenchmarkSQLite(b *testing.B) {
var db *sql.DB
var createDB bool
millionFile := fmt.Sprintf("/data/sqlite_bigdata/insert_multi_sqlitedb0_1_%v", ROWSTART)
f, err := os.Open(millionFile)
if err != nil && os.IsNotExist(err) {
os.Remove("./foo.db")
defer os.Remove("./foo.db")
db, err = sql.Open("sqlite3", "./foo.db?_journal_mode=WAL&_synchronous=NORMAL&cache=shared")
if err != nil {
log.Fatal(err)
}
createDB = true
defer db.Close()
} else {
f.Close()
db, err = sql.Open("sqlite3", millionFile+"?_journal_mode=WAL&_synchronous=NORMAL&cache=shared")
log.Infof("testing sqlite3 million data exist file %v", millionFile)
if err != nil {
log.Fatal(err)
}
createDB = false
defer db.Close()
}
Convey("bench SQLite", b, func() {
benchDB(b, db, createDB)
})
}
func benchOutsideMiner(b *testing.B, minerCount uint16, confDir string) {
benchOutsideMinerWithTargetMinerList(b, minerCount, nil, confDir)
}
func benchOutsideMinerWithTargetMinerList(
b *testing.B, minerCount uint16, targetMiners []proto.AccountAddress, confDir string,
) {
log.Warnf("benchmark %v for %d Miners:", confDir, minerCount)
// Create temp directory
testDataDir, err := ioutil.TempDir(testWorkingDir, "covenantsql")
if err != nil {
panic(err)
}
defer os.RemoveAll(testDataDir)
clientConf := FJ(confDir, "config.yaml")
tempConf := FJ(testDataDir, "config.yaml")
clientKey := FJ(confDir, "private.key")
tempKey := FJ(testDataDir, "private.key")
utils.CopyFile(clientConf, tempConf)
utils.CopyFile(clientKey, tempKey)
err = client.Init(tempConf, []byte(""))
So(err, ShouldBeNil)
for _, node := range conf.GConf.KnownNodes {
if node.Role == proto.Leader {
log.Infof("Benching started on bp addr: %v", node.Addr)
break
}
}
dsnFile := FJ(baseDir, "./cmd/cql-minerd/.dsn")
var dsn string
if minerCount > 0 {
// create
meta := client.ResourceMeta{
TargetMiners: targetMiners,
Node: minerCount,
UseEventualConsistency: benchEventualConsistency,
IsolationLevel: int(sql.LevelReadUncommitted),
AdvancePayment: testAdvancePayment,
}
// wait for chain service
var ctx1, cancel1 = context.WithTimeout(context.Background(), 1*time.Minute)
defer cancel1()
err = test.WaitBPChainService(ctx1, 3*time.Second)
if err != nil {
b.Fatalf("wait for chain service failed: %v", err)
}
_, dsn, err = client.Create(meta)
So(err, ShouldBeNil)
log.Infof("the created database dsn is %v", dsn)
// wait for creation
var ctx, cancel = context.WithTimeout(context.Background(), 5*time.Minute)
defer cancel()
err = client.WaitDBCreation(ctx, dsn)
So(err, ShouldBeNil)
err = ioutil.WriteFile(dsnFile, []byte(dsn), 0666)
if err != nil {
log.Errorf("write .dsn failed: %v", err)
}
defer os.Remove(dsnFile)
defer client.Drop(dsn)
} else {
dsn = os.Getenv("DSN")
}
db, err := sql.Open("covenantsql", dsn)
So(err, ShouldBeNil)
benchDB(b, db, minerCount > 0)
}
func BenchmarkClientOnly(b *testing.B) {
Convey("bench three node", b, func() {
benchMiner(b, 0)
})
}
func BenchmarkMiner(b *testing.B) {
Convey(fmt.Sprintf("bench %d node(s)", benchMinerCount), b, func() {
benchMiner(b, uint16(benchMinerCount))
})
}
func BenchmarkMinerGNTE(b *testing.B) {
Convey(fmt.Sprintf("bench GNTE %d node(s)", benchMinerCount), b, func() {
benchOutsideMiner(b, uint16(benchMinerCount), gnteConfDir)
})
}
func BenchmarkTestnetMiner(b *testing.B) {
Convey(fmt.Sprintf("bench testnet %d node(s)", benchMinerCount), b, func() {
benchOutsideMiner(b, uint16(benchMinerCount), testnetConfDir)
})
}
func BenchmarkCustomMiner(b *testing.B) {
Convey(fmt.Sprintf("bench custom %d node(s)", benchMinerCount), b, func() {
benchOutsideMiner(b, uint16(benchMinerCount), benchMinerConfigDir)
})
}
| [
"\"DSN\"",
"\"DSN\""
]
| []
| [
"DSN"
]
| [] | ["DSN"] | go | 1 | 0 | |
docs/python_docs/python/scripts/conf.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# -*- coding: utf-8 -*-
import sys, os, re, subprocess
from recommonmark.parser import CommonMarkParser
from recommonmark.transform import AutoStructify
# -- mock out modules
MOCK_MODULES = ['scipy', 'scipy.sparse', 'sklearn']
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.5.6'
# General information about the project.
project = u'Apache MXNet'
author = u'%s developers' % project
copyright = u'2015-2019, %s' % author
github_doc_root = 'https://github.com/apache/incubator-mxnet/tree/master/docs/'
doc_root = 'https://mxnet.apache.org/'
# add markdown parser
source_parsers = {
'.md': CommonMarkParser,
}
# Version information.
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
# 'sphinxcontrib.fulltoc',
'nbsphinx',
'IPython.sphinxext.ipython_console_highlighting',
# 'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
# 'sphinx.ext.mathjax',
# 'sphinx.ext.viewcode',
'breathe',
# 'mxdoc'
]
doctest_global_setup = '''
import mxnet as mx
'''
autodoc_member_order = 'bysource'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = ['.rst', '.ipynb', '.md', '.Rmd']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# Version and release are passed from CMake.
#version = None
# The full version, including alpha/beta/rc tags.
#release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['templates',
# 'api',
'guide/modules/others', 'guide/guide', 'blog']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
suppress_warnings = [
'image.nonlocal_uri',
]
# -- Options for HTML output ---------------------------------------------------
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['../../themes/mx-theme']
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'mxtheme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'primary_color': 'blue',
'accent_color': 'deep_orange',
'show_footer': True,
'relative_url': os.environ.get('SPHINX_RELATIVE_URL', '/')
}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '../../_static/mxnet_logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '../../_static/mxnet-icon.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['../../_static']
html_css_files = [
'mxnet.css',
]
html_js_files = [
]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': 'relations.html'
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'formatdoc'
nbsphinx_execute = 'never'
# let the source file format to be xxx.ipynb instead of xxx.ipynb.txt
html_sourcelink_suffix = ''
def setup(app):
app.add_transform(AutoStructify)
app.add_config_value('recommonmark_config', {
}, True)
app.add_javascript('google_analytics.js')
import mxtheme
app.add_directive('card', mxtheme.CardDirective)
| []
| []
| [
"SPHINX_RELATIVE_URL"
]
| [] | ["SPHINX_RELATIVE_URL"] | python | 1 | 0 | |
cmd/mavenExecute_generated.go | // Code generated by piper's step-generator. DO NOT EDIT.
package cmd
import (
"fmt"
"os"
"time"
"github.com/SAP/jenkins-library/pkg/config"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/spf13/cobra"
)
type mavenExecuteOptions struct {
PomPath string `json:"pomPath,omitempty"`
Goals []string `json:"goals,omitempty"`
Defines []string `json:"defines,omitempty"`
Flags []string `json:"flags,omitempty"`
ReturnStdout bool `json:"returnStdout,omitempty"`
ProjectSettingsFile string `json:"projectSettingsFile,omitempty"`
GlobalSettingsFile string `json:"globalSettingsFile,omitempty"`
M2Path string `json:"m2Path,omitempty"`
LogSuccessfulMavenTransfers bool `json:"logSuccessfulMavenTransfers,omitempty"`
}
// MavenExecuteCommand This step allows to run maven commands
func MavenExecuteCommand() *cobra.Command {
metadata := mavenExecuteMetadata()
var stepConfig mavenExecuteOptions
var startTime time.Time
var createMavenExecuteCmd = &cobra.Command{
Use: "mavenExecute",
Short: "This step allows to run maven commands",
Long: `This step runs a maven command based on the parameters provided to the step.`,
PreRunE: func(cmd *cobra.Command, args []string) error {
startTime = time.Now()
log.SetStepName("mavenExecute")
log.SetVerbose(GeneralConfig.Verbose)
err := PrepareConfig(cmd, &metadata, "mavenExecute", &stepConfig, config.OpenPiperFile)
if err != nil {
return err
}
return nil
},
Run: func(cmd *cobra.Command, args []string) {
telemetryData := telemetry.CustomData{}
telemetryData.ErrorCode = "1"
handler := func() {
telemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds())
telemetry.Send(&telemetryData)
}
log.DeferExitHandler(handler)
defer handler()
telemetry.Initialize(GeneralConfig.NoTelemetry, "mavenExecute")
mavenExecute(stepConfig, &telemetryData)
telemetryData.ErrorCode = "0"
},
}
addMavenExecuteFlags(createMavenExecuteCmd, &stepConfig)
return createMavenExecuteCmd
}
func addMavenExecuteFlags(cmd *cobra.Command, stepConfig *mavenExecuteOptions) {
cmd.Flags().StringVar(&stepConfig.PomPath, "pomPath", os.Getenv("PIPER_pomPath"), "Path to the pom file that should be used.")
cmd.Flags().StringSliceVar(&stepConfig.Goals, "goals", []string{}, "Maven goals that should be executed.")
cmd.Flags().StringSliceVar(&stepConfig.Defines, "defines", []string{}, "Additional properties in form of -Dkey=value.")
cmd.Flags().StringSliceVar(&stepConfig.Flags, "flags", []string{}, "Flags to provide when running mvn.")
cmd.Flags().BoolVar(&stepConfig.ReturnStdout, "returnStdout", false, "Returns the output of the maven command for further processing.")
cmd.Flags().StringVar(&stepConfig.ProjectSettingsFile, "projectSettingsFile", os.Getenv("PIPER_projectSettingsFile"), "Path to the mvn settings file that should be used as project settings file.")
cmd.Flags().StringVar(&stepConfig.GlobalSettingsFile, "globalSettingsFile", os.Getenv("PIPER_globalSettingsFile"), "Path to the mvn settings file that should be used as global settings file.")
cmd.Flags().StringVar(&stepConfig.M2Path, "m2Path", os.Getenv("PIPER_m2Path"), "Path to the location of the local repository that should be used.")
cmd.Flags().BoolVar(&stepConfig.LogSuccessfulMavenTransfers, "logSuccessfulMavenTransfers", false, "Configures maven to log successful downloads. This is set to `false` by default to reduce the noise in build logs.")
cmd.MarkFlagRequired("goals")
}
// retrieve step metadata
func mavenExecuteMetadata() config.StepData {
var theMetaData = config.StepData{
Metadata: config.StepMetadata{
Name: "mavenExecute",
Aliases: []config.Alias{},
},
Spec: config.StepSpec{
Inputs: config.StepInputs{
Parameters: []config.StepParameters{
{
Name: "pomPath",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STEPS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "goals",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS"},
Type: "[]string",
Mandatory: true,
Aliases: []config.Alias{},
},
{
Name: "defines",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS"},
Type: "[]string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "flags",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STEPS"},
Type: "[]string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "returnStdout",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS"},
Type: "bool",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "projectSettingsFile",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "STEPS", "STAGES", "PARAMETERS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "maven/projectSettingsFile"}},
},
{
Name: "globalSettingsFile",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "STEPS", "STAGES", "PARAMETERS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "maven/globalSettingsFile"}},
},
{
Name: "m2Path",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "STEPS", "STAGES", "PARAMETERS"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "maven/m2Path"}},
},
{
Name: "logSuccessfulMavenTransfers",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "STEPS", "STAGES", "PARAMETERS"},
Type: "bool",
Mandatory: false,
Aliases: []config.Alias{{Name: "maven/logSuccessfulMavenTransfers"}},
},
},
},
},
}
return theMetaData
}
| [
"\"PIPER_pomPath\"",
"\"PIPER_projectSettingsFile\"",
"\"PIPER_globalSettingsFile\"",
"\"PIPER_m2Path\""
]
| []
| [
"PIPER_globalSettingsFile",
"PIPER_m2Path",
"PIPER_projectSettingsFile",
"PIPER_pomPath"
]
| [] | ["PIPER_globalSettingsFile", "PIPER_m2Path", "PIPER_projectSettingsFile", "PIPER_pomPath"] | go | 4 | 0 | |
webapp/python/python/asgi.py | """
ASGI config for python project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'python.settings')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
weblog/wsgi.py | """
WSGI config for weblog project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "weblog.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
containers/api/api/config.py | import os
class BaseConfig:
TESTING = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL")
SECRET_KEY = os.environ.get("SECRET_KEY")
BCRYPT_LOG_ROUNDS = 4
class DevelopmentConfig(BaseConfig):
pass
class TestingConfig(BaseConfig):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_TEST_URL")
class ProductionConfig(BaseConfig):
BCRYPT_LOG_ROUNDS = 13
| []
| []
| [
"SECRET_KEY",
"DATABASE_URL",
"DATABASE_TEST_URL"
]
| [] | ["SECRET_KEY", "DATABASE_URL", "DATABASE_TEST_URL"] | python | 3 | 0 | |
mbhd-core/src/main/java/org/multibit/hd/core/managers/InstallationManager.java | package org.multibit.hd.core.managers;
import com.google.common.base.Preconditions;
import com.google.common.io.ByteStreams;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.multibit.hd.core.events.ShutdownEvent;
import org.multibit.hd.core.files.SecureFiles;
import org.multibit.hd.core.utils.OSUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.lang.reflect.Field;
import java.net.URI;
import java.security.Permission;
import java.security.PermissionCollection;
import java.util.Map;
/**
* <p>Manager to provide the following to other core classes:</p>
* <ul>
* <li>Location of the installation directory</li>
* <li>Access the configuration file</li>
* <li>Utility methods eg copying checkpoint files from installation directory</li>
* </ul>
*/
public class InstallationManager {
private static final Logger log = LoggerFactory.getLogger(InstallationManager.class);
/**
* The main MultiBit download site (HTTPS)
*/
public static final URI MBHD_WEBSITE_URI = URI.create("https://multibit.org");
/**
* The main MultiBit help site (HTTPS to allow secure connection without redirect, with fall back to local help on failure)
*/
public static final String MBHD_WEBSITE_HELP_DOMAIN = "https://beta.multibit.org"; // TODO remove beta when release-4.0.0 website pushed to multibit.org
public static final String MBHD_WEBSITE_HELP_BASE = MBHD_WEBSITE_HELP_DOMAIN + "/hd0.1";
public static final String MBHD_APP_NAME = "MultiBitLTC";
public static final String MBHD_PREFIX = "mbhd";
public static final String MBHD_CONFIGURATION_FILE = MBHD_PREFIX + ".yaml";
public static final String SPV_BLOCKCHAIN_SUFFIX = ".spvchain";
public static final String CHECKPOINTS_SUFFIX = ".checkpoints";
public static final String CA_CERTS_NAME = MBHD_PREFIX + "-cacerts";
/**
* The current application data directory
*/
private static File currentApplicationDataDirectory = null;
/**
* A test flag to allow FEST tests to run efficiently
*/
// This arises from the global nature of the flag - consider deriving it from test classpath presence
@SuppressFBWarnings({"MS_SHOULD_BE_FINAL"})
public static boolean unrestricted = false;
/**
* <p>Handle any shutdown code</p>
*
* @param shutdownType The shutdown type
*/
public static void shutdownNow(ShutdownEvent.ShutdownType shutdownType) {
log.debug("Received shutdown: {}", shutdownType.name());
switch (shutdownType) {
case HARD:
case SOFT:
// Force a reset of the application directory (useful for persistence tests)
currentApplicationDataDirectory = null;
break;
case SWITCH:
// Reset of the current application directory causes problems during
// switch and is not required in normal operation
break;
}
// Reset of the unrestricted field causes problems during FEST tests
}
/**
* @return A reference to where the configuration file should be located
*/
public static File getConfigurationFile() {
return new File(getOrCreateApplicationDataDirectory().getAbsolutePath() + File.separator + MBHD_CONFIGURATION_FILE);
}
/**
* <p>Get the directory for the user's application data, creating if not present</p>
* <p>Checks a few OS-dependent locations first</p>
* <p>For tests (unrestricted mode) this will create a long-lived temporary directory - use reset() to clear in the tearDown() phase</p>
*
* @return A suitable application directory for the OS and if running unit tests (unrestricted mode)
*/
public static File getOrCreateApplicationDataDirectory() {
if (currentApplicationDataDirectory != null) {
return currentApplicationDataDirectory;
}
if (unrestricted) {
try {
log.debug("Unrestricted mode requires a temporary application directory");
// In order to preserve the same behaviour between the test and production environments
// this must be maintained throughout the lifetime of a unit test
// At tearDown() use reset() to clear
currentApplicationDataDirectory = SecureFiles.createTemporaryDirectory();
return currentApplicationDataDirectory;
} catch (IOException e) {
log.error("Failed to create temporary directory", e);
return null;
}
} else {
// Fail safe check for unit tests to avoid overwriting existing configuration file
try {
Class.forName("org.multibit.hd.core.managers.InstallationManagerTest");
throw new IllegalStateException("Cannot run without unrestricted when unit tests are present. You could overwrite live configuration.");
} catch (ClassNotFoundException e) {
// We have passed the fail safe check
}
}
// Check the current working directory for the configuration file
File multibitPropertiesFile = new File(MBHD_CONFIGURATION_FILE);
if (multibitPropertiesFile.exists()) {
return new File(".");
}
final String applicationDataDirectoryName;
// Locations are OS-dependent
if (OSUtils.isWindows()) {
// Windows
applicationDataDirectoryName = System.getenv("APPDATA") + File.separator + MBHD_APP_NAME;
} else if (OSUtils.isMac()) {
// OSX
if ((new File("../../../../" + MBHD_CONFIGURATION_FILE)).exists()) {
applicationDataDirectoryName = new File("../../../..").getAbsolutePath();
} else {
applicationDataDirectoryName = System.getProperty("user.home") + "/Library/Application Support/" + MBHD_APP_NAME;
}
} else {
// Other (probably a Unix variant)
// Keep a clean home directory by prefixing with "."
applicationDataDirectoryName = System.getProperty("user.home") + "/." + MBHD_APP_NAME;
}
log.debug("Application data directory is\n'{}'", applicationDataDirectoryName);
// Create the application data directory if it does not exist
File applicationDataDirectory = new File(applicationDataDirectoryName);
SecureFiles.verifyOrCreateDirectory(applicationDataDirectory);
// Must be OK to be here so set this as the current
currentApplicationDataDirectory = applicationDataDirectory;
return applicationDataDirectory;
}
/**
* Copy the checkpoints file from the MultiBitHD installation to the specified filename
*
* @param destinationCheckpointsFile The sink to receive the source checkpoints file
*/
public static void copyCheckpointsTo(File destinationCheckpointsFile) throws IOException {
Preconditions.checkNotNull(destinationCheckpointsFile, "'checkpointsFile' must be present");
// TODO overwrite if larger/ newer
if (!destinationCheckpointsFile.exists() || destinationCheckpointsFile.length() == 0) {
log.debug("Copying checkpoints to '{}'", destinationCheckpointsFile);
// Work out the source checkpoints (put into the program installation directory by the installer)
File currentWorkingDirectory = new File(".");
File sourceBlockCheckpointsFile = new File(currentWorkingDirectory.getAbsolutePath() + File.separator + MBHD_PREFIX + CHECKPOINTS_SUFFIX);
// Prepare an input stream to the checkpoints
final InputStream sourceCheckpointsStream;
if (sourceBlockCheckpointsFile.exists()) {
// Use the file system
log.debug("Using source checkpoints from working directory.");
sourceCheckpointsStream = new FileInputStream(sourceBlockCheckpointsFile);
} else {
// Use the classpath
log.debug("Using source checkpoints from classpath.");
sourceCheckpointsStream = InstallationManager.class.getResourceAsStream("/mbhd.checkpoints");
}
// Create the output stream
long bytes;
try (FileOutputStream sinkCheckpointsStream = new FileOutputStream(destinationCheckpointsFile)) {
// Copy the checkpoints
bytes = ByteStreams.copy(sourceCheckpointsStream, sinkCheckpointsStream);
// Clean up
sourceCheckpointsStream.close();
sinkCheckpointsStream.flush();
sinkCheckpointsStream.close();
} finally {
if (sourceCheckpointsStream != null) {
sourceCheckpointsStream.close();
}
}
log.debug("New checkpoints are {} bytes in length.", bytes);
if (bytes < 13_000) {
log.warn("Checkpoints are short.");
}
} else {
log.debug("Checkpoints already exist.");
}
}
/**
* Use for testing only (several different test packages use this)
*
* @param currentApplicationDataDirectory the application data directory to use
*/
public static void setCurrentApplicationDataDirectory(File currentApplicationDataDirectory) {
InstallationManager.currentApplicationDataDirectory = currentApplicationDataDirectory;
}
/**
* Do the following, but with reflection to bypass access checks:
*
* JceSecurity.isRestricted = false;
* JceSecurity.defaultPolicy.perms.clear();
* JceSecurity.defaultPolicy.add(CryptoAllPermission.INSTANCE);
*/
public static void removeCryptographyRestrictions() {
if (!isRestrictedCryptography()) {
log.debug("Cryptography restrictions removal not needed");
return;
}
try {
final Class<?> jceSecurity = Class.forName("javax.crypto.JceSecurity");
final Class<?> cryptoPermissions = Class.forName("javax.crypto.CryptoPermissions");
final Class<?> cryptoAllPermission = Class.forName("javax.crypto.CryptoAllPermission");
final Field isRestrictedField = jceSecurity.getDeclaredField("isRestricted");
isRestrictedField.setAccessible(true);
isRestrictedField.set(null, false);
final Field defaultPolicyField = jceSecurity.getDeclaredField("defaultPolicy");
defaultPolicyField.setAccessible(true);
final PermissionCollection defaultPolicy = (PermissionCollection) defaultPolicyField.get(null);
final Field perms = cryptoPermissions.getDeclaredField("perms");
perms.setAccessible(true);
((Map<?, ?>) perms.get(defaultPolicy)).clear();
final Field instance = cryptoAllPermission.getDeclaredField("INSTANCE");
instance.setAccessible(true);
defaultPolicy.add((Permission) instance.get(null));
log.debug("Successfully removed cryptography restrictions");
} catch (final Exception e) {
log.warn("Failed to remove cryptography restrictions", e);
}
}
private static boolean isRestrictedCryptography() {
// This simply matches the Oracle JRE, but not OpenJDK
return "Java(TM) SE Runtime Environment".equals(System.getProperty("java.runtime.name"));
}
}
| [
"\"APPDATA\""
]
| []
| [
"APPDATA"
]
| [] | ["APPDATA"] | java | 1 | 0 | |
daemon/daemon.go | package daemon
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"os"
"strings"
"sync"
"time"
"github.com/AliyunContainerService/terway/pkg/aliyun"
"github.com/AliyunContainerService/terway/pkg/metric"
"github.com/AliyunContainerService/terway/pkg/pool"
"github.com/AliyunContainerService/terway/pkg/storage"
"github.com/AliyunContainerService/terway/pkg/tracing"
"github.com/AliyunContainerService/terway/rpc"
"github.com/AliyunContainerService/terway/types"
"github.com/containernetworking/cni/libcni"
containertypes "github.com/containernetworking/cni/pkg/types"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
)
const (
daemonModeVPC = "VPC"
daemonModeENIMultiIP = "ENIMultiIP"
daemonModeENIOnly = "ENIOnly"
gcPeriod = 5 * time.Minute
poolCheckPeriod = 10 * time.Minute
conditionFalse = "false"
conditionTrue = "true"
networkServiceName = "default"
tracingKeyName = "name"
tracingKeyDaemonMode = "daemon_mode"
tracingKeyConfigFilePath = "config_file_path"
tracingKeyKubeConfig = "kubeconfig"
tracingKeyMaster = "master"
tracingKeyPendingPodsCount = "pending_pods_count"
commandMapping = "mapping"
cniDefaultPath = "/opt/cni/bin"
// this file is generated from configmap
terwayCNIConf = "/etc/eni/10-terway.conf"
cniExecTimeout = 10 * time.Second
)
type networkService struct {
daemonMode string
configFilePath string
kubeConfig string
master string
k8s Kubernetes
resourceDB storage.Storage
vethResMgr ResourceManager
eniResMgr ResourceManager
eniIPResMgr ResourceManager
eipResMgr ResourceManager
//networkResourceMgr ResourceManager
mgrForResource map[string]ResourceManager
pendingPods map[string]interface{}
pendingPodsLock sync.RWMutex
sync.RWMutex
cniBinPath string
rpc.UnimplementedTerwayBackendServer
}
var _ rpc.TerwayBackendServer = (*networkService)(nil)
func (networkService *networkService) getResourceManagerForRes(resType string) ResourceManager {
return networkService.mgrForResource[resType]
}
//return resource relation in db, or return nil.
func (networkService *networkService) getPodResource(info *podInfo) (PodResources, error) {
obj, err := networkService.resourceDB.Get(podInfoKey(info.Namespace, info.Name))
if err == nil {
return obj.(PodResources), nil
}
if err == storage.ErrNotFound {
return PodResources{}, nil
}
return PodResources{}, err
}
func (networkService *networkService) deletePodResource(info *podInfo) error {
key := podInfoKey(info.Namespace, info.Name)
return networkService.resourceDB.Delete(key)
}
func (networkService *networkService) allocateVeth(ctx *networkContext, old *PodResources) (*types.Veth, error) {
oldVethRes := old.GetResourceItemByType(types.ResourceTypeVeth)
oldVethID := ""
if old.PodInfo != nil {
if len(oldVethRes) == 0 {
ctx.Log().Debugf("veth for pod %s is zero", podInfoKey(old.PodInfo.Namespace, old.PodInfo.Name))
} else if len(oldVethRes) > 1 {
ctx.Log().Warnf("veth for pod %s is zero", podInfoKey(old.PodInfo.Namespace, old.PodInfo.Name))
} else {
oldVethID = oldVethRes[0].ID
}
}
res, err := networkService.vethResMgr.Allocate(ctx, oldVethID)
if err != nil {
return nil, err
}
return res.(*types.Veth), nil
}
func (networkService *networkService) allocateENI(ctx *networkContext, old *PodResources) (*types.ENI, error) {
oldENIRes := old.GetResourceItemByType(types.ResourceTypeENI)
oldENIID := ""
if old.PodInfo != nil {
if len(oldENIRes) == 0 {
ctx.Log().Debugf("eniip for pod %s is zero", podInfoKey(old.PodInfo.Namespace, old.PodInfo.Name))
} else if len(oldENIRes) > 1 {
ctx.Log().Warnf("eniip for pod %s more than one", podInfoKey(old.PodInfo.Namespace, old.PodInfo.Name))
} else {
oldENIID = oldENIRes[0].ID
}
}
res, err := networkService.eniResMgr.Allocate(ctx, oldENIID)
if err != nil {
return nil, err
}
return res.(*types.ENI), nil
}
func (networkService *networkService) allocateENIMultiIP(ctx *networkContext, old *PodResources) (*types.ENIIP, error) {
oldENIIPRes := old.GetResourceItemByType(types.ResourceTypeENIIP)
oldENIIPID := ""
if old.PodInfo != nil {
if len(oldENIIPRes) == 0 {
ctx.Log().Debugf("eniip for pod %s is zero", podInfoKey(old.PodInfo.Namespace, old.PodInfo.Name))
} else if len(oldENIIPRes) > 1 {
ctx.Log().Warnf("eniip for pod %s more than one", podInfoKey(old.PodInfo.Namespace, old.PodInfo.Name))
} else {
oldENIIPID = oldENIIPRes[0].ID
}
}
res, err := networkService.eniIPResMgr.Allocate(ctx, oldENIIPID)
if err != nil {
return nil, err
}
return res.(*types.ENIIP), nil
}
func (networkService *networkService) allocateEIP(ctx *networkContext, old *PodResources) (*types.EIP, error) {
oldEIPRes := old.GetResourceItemByType(types.ResourceTypeEIP)
oldEIPID := ""
if old.PodInfo != nil {
if len(oldEIPRes) == 0 {
ctx.Log().Debugf("eip for pod %s is zero", podInfoKey(old.PodInfo.Namespace, old.PodInfo.Name))
} else if len(oldEIPRes) > 1 {
ctx.Log().Warnf("eip for pod %s more than one", podInfoKey(old.PodInfo.Namespace, old.PodInfo.Name))
} else {
oldEIPID = oldEIPRes[0].ID
}
}
res, err := networkService.eipResMgr.Allocate(ctx, oldEIPID)
if err != nil {
return nil, err
}
return res.(*types.EIP), nil
}
func (networkService *networkService) AllocIP(ctx context.Context, r *rpc.AllocIPRequest) (*rpc.AllocIPReply, error) {
log.Infof("alloc ip request: %+v", r)
networkService.pendingPodsLock.Lock()
_, ok := networkService.pendingPods[podInfoKey(r.K8SPodNamespace, r.K8SPodName)]
if !ok {
networkService.pendingPods[podInfoKey(r.K8SPodNamespace, r.K8SPodName)] = struct{}{}
networkService.pendingPodsLock.Unlock()
defer func() {
networkService.pendingPodsLock.Lock()
delete(networkService.pendingPods, podInfoKey(r.K8SPodNamespace, r.K8SPodName))
networkService.pendingPodsLock.Unlock()
}()
} else {
networkService.pendingPodsLock.Unlock()
return nil, fmt.Errorf("pod %s/%s resource processing", r.K8SPodNamespace, r.K8SPodName)
}
networkService.RLock()
defer networkService.RUnlock()
var (
start = time.Now()
err error
)
defer func() {
metric.RPCLatency.WithLabelValues("AllocIP", fmt.Sprint(err != nil)).Observe(metric.MsSince(start))
}()
// 0. Get pod Info
podinfo, err := networkService.k8s.GetPod(r.K8SPodNamespace, r.K8SPodName)
if err != nil {
return nil, errors.Wrapf(err, "error get pod info for: %+v", r)
}
// 1. Init Context
networkContext := &networkContext{
Context: ctx,
resources: []ResourceItem{},
pod: podinfo,
k8sService: networkService.k8s,
}
allocIPReply := &rpc.AllocIPReply{}
defer func() {
// roll back allocated resource when error
if err != nil {
networkContext.Log().Errorf("alloc result with error, %+v", err)
for _, res := range networkContext.resources {
err = networkService.deletePodResource(podinfo)
networkContext.Log().Errorf("rollback res[%v] with error, %+v", res, err)
mgr := networkService.getResourceManagerForRes(res.Type)
if mgr == nil {
networkContext.Log().Warnf("error cleanup allocated network resource %s, %s: %v", res.ID, res.Type, err)
continue
}
err = mgr.Release(networkContext, res)
if err != nil {
networkContext.Log().Infof("rollback res error: %+v", err)
}
}
} else {
networkContext.Log().Infof("alloc result: %+v", allocIPReply)
}
}()
// 2. Find old resource info
oldRes, err := networkService.getPodResource(podinfo)
if err != nil {
return nil, errors.Wrapf(err, "error get pod resources from db for pod %+v", podinfo)
}
if !networkService.verifyPodNetworkType(podinfo.PodNetworkType) {
return nil, fmt.Errorf("unexpect pod network type allocate, maybe daemon mode changed: %+v", podinfo.PodNetworkType)
}
// 3. Allocate network resource for pod
switch podinfo.PodNetworkType {
case podNetworkTypeENIMultiIP:
var eniMultiIP *types.ENIIP
eniMultiIP, err = networkService.allocateENIMultiIP(networkContext, &oldRes)
if err != nil {
return nil, fmt.Errorf("error get allocated eniip ip for: %+v, result: %+v", podinfo, err)
}
newRes := PodResources{
PodInfo: podinfo,
Resources: []ResourceItem{
{
ID: eniMultiIP.GetResourceID(),
Type: eniMultiIP.GetType(),
},
},
NetNs: func(s string) *string {
return &s
}(r.Netns),
}
networkContext.resources = append(networkContext.resources, newRes.Resources...)
if networkService.eipResMgr != nil && podinfo.EipInfo.PodEip {
podinfo.PodIP = eniMultiIP.SecAddress.String()
var eipRes *types.EIP
eipRes, err = networkService.allocateEIP(networkContext, &oldRes)
if err != nil {
return nil, fmt.Errorf("error get allocated eip for: %+v, result: %+v", podinfo, err)
}
eipResItem := ResourceItem{
Type: eipRes.GetType(),
ID: eipRes.GetResourceID(),
ExtraEipInfo: &ExtraEipInfo{
Delete: eipRes.Delete,
AssociateENI: eipRes.AssociateENI,
AssociateENIIP: eipRes.AssociateENIIP,
},
}
newRes.Resources = append(newRes.Resources, eipResItem)
networkContext.resources = append(networkContext.resources, eipResItem)
}
err = networkService.resourceDB.Put(podInfoKey(podinfo.Namespace, podinfo.Name), newRes)
if err != nil {
return nil, errors.Wrapf(err, "error put resource into store")
}
allocIPReply.IPType = rpc.IPType_TypeENIMultiIP
allocIPReply.Success = true
allocIPReply.NetworkInfo = &rpc.AllocIPReply_ENIMultiIP{
ENIMultiIP: &rpc.ENIMultiIP{
EniConfig: &rpc.ENI{
IPv4Addr: eniMultiIP.SecAddress.String(),
IPv4Subnet: eniMultiIP.Eni.Address.String(),
MacAddr: eniMultiIP.Eni.MAC,
Gateway: eniMultiIP.Eni.Gateway.String(),
DeviceNumber: eniMultiIP.Eni.DeviceNumber,
PrimaryIPv4Addr: eniMultiIP.PrimaryIP.String(),
},
PodConfig: &rpc.Pod{
Ingress: podinfo.TcIngress,
Egress: podinfo.TcEgress,
},
ServiceCidr: networkService.k8s.GetServiceCidr().String(),
},
}
case podNetworkTypeVPCENI:
var vpcEni *types.ENI
vpcEni, err = networkService.allocateENI(networkContext, &oldRes)
if err != nil {
return nil, fmt.Errorf("error get allocated vpc ENI ip for: %+v, result: %+v", podinfo, err)
}
newRes := PodResources{
PodInfo: podinfo,
Resources: []ResourceItem{
{
ID: vpcEni.GetResourceID(),
Type: vpcEni.GetType(),
},
},
NetNs: func(s string) *string {
return &s
}(r.Netns),
}
networkContext.resources = append(networkContext.resources, newRes.Resources...)
if networkService.eipResMgr != nil && podinfo.EipInfo.PodEip {
podinfo.PodIP = vpcEni.Address.IP.String()
var eipRes *types.EIP
eipRes, err = networkService.allocateEIP(networkContext, &oldRes)
if err != nil {
return nil, fmt.Errorf("error get allocated eip for: %+v, result: %+v", podinfo, err)
}
newRes.Resources = append(newRes.Resources, ResourceItem{
Type: eipRes.GetType(),
ID: eipRes.GetResourceID(),
ExtraEipInfo: &ExtraEipInfo{
Delete: eipRes.Delete,
AssociateENI: eipRes.AssociateENI,
AssociateENIIP: eipRes.AssociateENIIP,
},
})
eipResItem := ResourceItem{
Type: eipRes.GetType(),
ID: eipRes.GetResourceID(),
}
newRes.Resources = append(newRes.Resources, eipResItem)
networkContext.resources = append(networkContext.resources, eipResItem)
}
err = networkService.resourceDB.Put(podInfoKey(podinfo.Namespace, podinfo.Name), newRes)
if err != nil {
return nil, errors.Wrapf(err, "error put resource into store")
}
allocIPReply.IPType = rpc.IPType_TypeVPCENI
allocIPReply.Success = true
allocIPReply.NetworkInfo = &rpc.AllocIPReply_VpcEni{
VpcEni: &rpc.VPCENI{
EniConfig: &rpc.ENI{
IPv4Addr: vpcEni.Address.IP.String(),
IPv4Subnet: vpcEni.Address.String(),
MacAddr: vpcEni.MAC,
Gateway: vpcEni.Gateway.String(),
DeviceNumber: vpcEni.DeviceNumber,
PrimaryIPv4Addr: vpcEni.Address.IP.String(),
},
PodConfig: &rpc.Pod{
Ingress: podinfo.TcIngress,
Egress: podinfo.TcEgress,
},
ServiceCidr: networkService.k8s.GetServiceCidr().String(),
},
}
case podNetworkTypeVPCIP:
var vpcVeth *types.Veth
vpcVeth, err = networkService.allocateVeth(networkContext, &oldRes)
if err != nil {
return nil, fmt.Errorf("error get allocated vpc ip for: %+v, result: %+v", podinfo, err)
}
newRes := PodResources{
PodInfo: podinfo,
Resources: []ResourceItem{
{
ID: vpcVeth.GetResourceID(),
Type: vpcVeth.GetType(),
},
},
NetNs: func(s string) *string {
return &s
}(r.Netns),
}
networkContext.resources = append(networkContext.resources, newRes.Resources...)
err = networkService.resourceDB.Put(podInfoKey(podinfo.Namespace, podinfo.Name), newRes)
if err != nil {
return nil, errors.Wrapf(err, "error put resource into store")
}
allocIPReply.IPType = rpc.IPType_TypeVPCIP
allocIPReply.Success = true
allocIPReply.NetworkInfo = &rpc.AllocIPReply_VpcIp{
VpcIp: &rpc.VPCIP{
PodConfig: &rpc.Pod{
Ingress: podinfo.TcIngress,
Egress: podinfo.TcEgress,
},
NodeCidr: networkService.k8s.GetNodeCidr().String(),
},
}
default:
return nil, fmt.Errorf("not support pod network type")
}
// 3. grpc connection
if ctx.Err() != nil {
err = ctx.Err()
return nil, errors.Wrapf(err, "error on grpc connection")
}
// 4. return allocate result
return allocIPReply, err
}
func (networkService *networkService) ReleaseIP(ctx context.Context, r *rpc.ReleaseIPRequest) (*rpc.ReleaseIPReply, error) {
log.Infof("release ip request: %+v", r)
networkService.RLock()
defer networkService.RUnlock()
var (
start = time.Now()
err error
)
defer func() {
metric.RPCLatency.WithLabelValues("ReleaseIP", fmt.Sprint(err != nil)).Observe(metric.MsSince(start))
}()
// 0. Get pod Info
podinfo, err := networkService.k8s.GetPod(r.K8SPodNamespace, r.K8SPodName)
if err != nil {
return nil, errors.Wrapf(err, "error get pod info for: %+v", r)
}
// 1. Init Context
networkContext := &networkContext{
Context: ctx,
resources: []ResourceItem{},
pod: podinfo,
k8sService: networkService.k8s,
}
releaseReply := &rpc.ReleaseIPReply{
Success: true,
}
defer func() {
if err != nil {
networkContext.Log().Errorf("release result with error, %+v", err)
} else {
networkContext.Log().Infof("release result: %+v", releaseReply)
}
}()
oldRes, err := networkService.getPodResource(podinfo)
if err != nil {
return nil, err
}
if !networkService.verifyPodNetworkType(podinfo.PodNetworkType) {
networkContext.Log().Warnf("unexpect pod network type release, maybe daemon mode changed: %+v", podinfo.PodNetworkType)
return releaseReply, nil
}
for _, res := range oldRes.Resources {
//record old resource for pod
networkContext.resources = append(networkContext.resources, res)
mgr := networkService.getResourceManagerForRes(res.Type)
if mgr == nil {
networkContext.Log().Warnf("error cleanup allocated network resource %s, %s: %v", res.ID, res.Type, err)
continue
}
if podinfo.IPStickTime == 0 {
if err = mgr.Release(networkContext, res); err != nil && err != pool.ErrInvalidState {
return nil, errors.Wrapf(err, "error release request network resource for: %+v", r)
}
if err = networkService.deletePodResource(podinfo); err != nil {
return nil, errors.Wrapf(err, "error delete resource from db: %+v", r)
}
}
}
if networkContext.Err() != nil {
err = ctx.Err()
return nil, errors.Wrapf(err, "error on grpc connection")
}
return releaseReply, nil
}
func (networkService *networkService) GetIPInfo(ctx context.Context, r *rpc.GetInfoRequest) (*rpc.GetInfoReply, error) {
log.Infof("GetIPInfo request: %+v", r)
// 0. Get pod Info
podinfo, err := networkService.k8s.GetPod(r.K8SPodNamespace, r.K8SPodName)
if err != nil {
return nil, errors.Wrapf(err, "error get pod info for: %+v", r)
}
if !networkService.verifyPodNetworkType(podinfo.PodNetworkType) {
return nil, fmt.Errorf("unexpect pod network type get info, maybe daemon mode changed: %+v", podinfo.PodNetworkType)
}
// 1. Init Context
networkContext := &networkContext{
Context: ctx,
resources: []ResourceItem{},
pod: podinfo,
k8sService: networkService.k8s,
}
var getIPInfoResult *rpc.GetInfoReply
defer func() {
networkContext.Log().Infof("getIpInfo result: %+v", getIPInfoResult)
}()
// 2. return network info for pod
switch podinfo.PodNetworkType {
case podNetworkTypeENIMultiIP:
getIPInfoResult = &rpc.GetInfoReply{
IPType: rpc.IPType_TypeENIMultiIP,
PodConfig: &rpc.Pod{
Ingress: podinfo.TcIngress,
Egress: podinfo.TcEgress,
},
PodIP: podinfo.PodIP,
}
return getIPInfoResult, nil
case podNetworkTypeVPCIP:
getIPInfoResult = &rpc.GetInfoReply{
IPType: rpc.IPType_TypeVPCIP,
PodConfig: &rpc.Pod{
Ingress: podinfo.TcIngress,
Egress: podinfo.TcEgress,
},
NodeCidr: networkService.k8s.GetNodeCidr().String(),
}
return getIPInfoResult, nil
case podNetworkTypeVPCENI:
getIPInfoResult = &rpc.GetInfoReply{
IPType: rpc.IPType_TypeVPCENI,
PodConfig: &rpc.Pod{
Ingress: podinfo.TcIngress,
Egress: podinfo.TcEgress,
},
}
return getIPInfoResult, nil
default:
return getIPInfoResult, errors.Errorf("unknown or unsupport network type for: %v", r)
}
}
func (networkService *networkService) RecordEvent(_ context.Context, r *rpc.EventRequest) (*rpc.EventReply, error) {
eventType := eventTypeNormal
if r.EventType == rpc.EventType_EventTypeWarning {
eventType = eventTypeWarning
}
reply := &rpc.EventReply{
Succeed: true,
Error: "",
}
if r.EventTarget == rpc.EventTarget_EventTargetNode { // Node
networkService.k8s.RecordNodeEvent(eventType, r.Reason, r.Message)
return reply, nil
}
// Pod
err := networkService.k8s.RecordPodEvent(r.K8SPodName, r.K8SPodNamespace, eventType, r.Reason, r.Message)
if err != nil {
reply.Succeed = false
reply.Error = err.Error()
return reply, err
}
return reply, nil
}
func (networkService *networkService) verifyPodNetworkType(podNetworkMode string) bool {
return (networkService.daemonMode == daemonModeVPC && //vpc
(podNetworkMode == podNetworkTypeVPCENI || podNetworkMode == podNetworkTypeVPCIP)) ||
// eni-multi-ip
(networkService.daemonMode == daemonModeENIMultiIP && podNetworkMode == podNetworkTypeENIMultiIP) ||
// eni-only
(networkService.daemonMode == daemonModeENIOnly && podNetworkMode == podNetworkTypeVPCENI)
}
func (networkService *networkService) startGarbageCollectionLoop() {
// period do network resource gc
gcTicker := time.NewTicker(gcPeriod)
go func() {
for range gcTicker.C {
log.Debugf("do resource gc on node")
networkService.Lock()
pods, err := networkService.k8s.GetLocalPods()
if err != nil {
log.Warnf("error get local pods for gc")
networkService.Unlock()
continue
}
podKeyMap := make(map[string]bool)
for _, pod := range pods {
if !pod.SandboxExited {
podKeyMap[podInfoKey(pod.Namespace, pod.Name)] = true
}
}
var (
inUseSet = make(map[string]map[string]ResourceItem)
expireSet = make(map[string]map[string]ResourceItem)
relateExpireList = make([]string, 0)
)
resRelateList, err := networkService.resourceDB.List()
if err != nil {
log.Warnf("error list resource db for gc")
networkService.Unlock()
continue
}
for _, resRelateObj := range resRelateList {
resRelate := resRelateObj.(PodResources)
_, podExist := podKeyMap[podInfoKey(resRelate.PodInfo.Namespace, resRelate.PodInfo.Name)]
if !podExist {
if resRelate.PodInfo.IPStickTime != 0 {
// delay resource garbage collection for sticky ip
resRelate.PodInfo.IPStickTime = 0
if err = networkService.resourceDB.Put(podInfoKey(resRelate.PodInfo.Namespace, resRelate.PodInfo.Name),
resRelate); err != nil {
log.Warnf("error store pod info to resource db")
}
} else {
relateExpireList = append(relateExpireList, podInfoKey(resRelate.PodInfo.Namespace, resRelate.PodInfo.Name))
}
}
for _, res := range resRelate.Resources {
if _, ok := inUseSet[res.Type]; !ok {
inUseSet[res.Type] = make(map[string]ResourceItem)
expireSet[res.Type] = make(map[string]ResourceItem)
}
// already in use by others
if _, ok := inUseSet[res.Type][res.ID]; ok {
continue
}
if podExist {
// remove resource from expirelist
delete(expireSet[res.Type], res.ID)
inUseSet[res.Type][res.ID] = res
} else {
if _, ok := inUseSet[res.Type][res.ID]; !ok {
expireSet[res.Type][res.ID] = res
}
}
}
}
gcDone := true
for mgrType := range inUseSet {
mgr, ok := networkService.mgrForResource[mgrType]
if ok {
log.Debugf("start garbage collection for %v, list: %+v, %+v", mgrType, inUseSet[mgrType], expireSet[mgrType])
err = mgr.GarbageCollection(inUseSet[mgrType], expireSet[mgrType])
if err != nil {
log.Warnf("error do garbage collection for %+v, inuse: %v, expire: %v, err: %v", mgrType, inUseSet[mgrType], expireSet[mgrType], err)
gcDone = false
}
}
}
if gcDone {
for _, relate := range relateExpireList {
err = networkService.resourceDB.Delete(relate)
if err != nil {
log.Warnf("error delete resource db relation: %v", err)
}
}
}
networkService.Unlock()
}
}()
}
func (networkService *networkService) startPeriodCheck() {
// check pool
func() {
log.Debugf("compare poll with metadata")
podMapping, err := networkService.GetResourceMapping()
if err != nil {
log.Error(err)
return
}
for _, res := range podMapping {
if res.Valid {
continue
}
_ = tracing.RecordPodEvent(res.Name, res.Namespace, corev1.EventTypeWarning, "ResourceInvalid", fmt.Sprintf("resource %s", res.LocalResID))
}
}()
// call CNI CHECK, make sure all dev is ok
func() {
log.Debugf("call CNI CHECK")
defer func() {
log.Debugf("call CNI CHECK end")
}()
networkService.RLock()
podResList, err := networkService.resourceDB.List()
networkService.RUnlock()
if err != nil {
log.Error(err)
return
}
ff, err := ioutil.ReadFile(terwayCNIConf)
if err != nil {
log.Error(err)
return
}
for _, v := range podResList {
res := v.(PodResources)
if res.NetNs == nil {
continue
}
log.Debugf("checking pod name %s", res.PodInfo.Name)
cniCfg := libcni.NewCNIConfig([]string{networkService.cniBinPath}, nil)
func() {
ctx, cancel := context.WithTimeout(context.Background(), cniExecTimeout)
defer cancel()
err := cniCfg.CheckNetwork(ctx, &libcni.NetworkConfig{
Network: &containertypes.NetConf{
CNIVersion: "0.4.0",
Name: "terway",
Type: "terway",
},
Bytes: ff,
}, &libcni.RuntimeConf{
ContainerID: "fake", // must provide
NetNS: *res.NetNs,
IfName: "eth0",
Args: [][2]string{
{"K8S_POD_NAME", res.PodInfo.Name},
{"K8S_POD_NAMESPACE", res.PodInfo.Namespace},
},
})
if err != nil {
log.Error(err)
return
}
}()
}
}()
}
// tracing
func (networkService *networkService) Config() []tracing.MapKeyValueEntry {
// name, daemon_mode, configFilePath, kubeconfig, master
config := []tracing.MapKeyValueEntry{
{Key: tracingKeyName, Value: networkServiceName}, // use a unique name?
{Key: tracingKeyDaemonMode, Value: networkService.daemonMode},
{Key: tracingKeyConfigFilePath, Value: networkService.configFilePath},
{Key: tracingKeyKubeConfig, Value: networkService.kubeConfig},
{Key: tracingKeyMaster, Value: networkService.master},
}
return config
}
func (networkService *networkService) Trace() []tracing.MapKeyValueEntry {
trace := []tracing.MapKeyValueEntry{
{Key: tracingKeyPendingPodsCount, Value: fmt.Sprint(len(networkService.pendingPods))},
}
resList, err := networkService.resourceDB.List()
if err != nil {
trace = append(trace, tracing.MapKeyValueEntry{Key: "error", Value: err.Error()})
return trace
}
for _, v := range resList {
res := v.(PodResources)
var resources []string
for _, v := range res.Resources {
resource := fmt.Sprintf("(%s)%s", v.Type, v.ID)
resources = append(resources, resource)
}
key := fmt.Sprintf("pods/%s/%s/resources", res.PodInfo.Namespace, res.PodInfo.Name)
trace = append(trace, tracing.MapKeyValueEntry{Key: key, Value: strings.Join(resources, " ")})
}
return trace
}
func (networkService *networkService) Execute(cmd string, _ []string, message chan<- string) {
switch cmd {
case commandMapping:
mapping, err := networkService.GetResourceMapping()
message <- fmt.Sprintf("mapping: %v, err: %s\n", mapping, err)
default:
message <- "can't recognize command\n"
}
close(message)
}
func (networkService *networkService) GetResourceMapping() ([]tracing.PodMapping, error) {
var poolStats tracing.ResourcePoolStats
var err error
networkService.RLock()
// get []ResourceMapping
switch networkService.daemonMode {
case daemonModeENIMultiIP:
poolStats, err = networkService.eniIPResMgr.GetResourceMapping()
case daemonModeVPC:
poolStats, err = networkService.eniResMgr.GetResourceMapping()
case daemonModeENIOnly:
poolStats, err = networkService.eniResMgr.GetResourceMapping()
}
if err != nil {
networkService.RUnlock()
return nil, err
}
// pod related res
pods, err := networkService.resourceDB.List()
networkService.RUnlock()
if err != nil {
return nil, err
}
mapping := make([]tracing.PodMapping, 0, len(pods))
// three way compare
// pod pool metadata
for _, pod := range pods {
p := pod.(PodResources)
for _, res := range p.Resources {
loID := ""
RemoteID := ""
lo, ok1 := poolStats.GetLocal()[res.ID]
if ok1 {
loID = lo.GetID()
}
remote, ok2 := poolStats.GetRemote()[res.ID]
if ok2 {
RemoteID = remote.GetID()
}
m := tracing.PodMapping{
Name: p.PodInfo.Name,
Namespace: p.PodInfo.Namespace,
Valid: ok1 && ok2,
PodBindResID: res.ID,
LocalResID: loID,
RemoteResID: RemoteID,
}
mapping = append(mapping, m)
}
}
return mapping, nil
}
func newNetworkService(configFilePath, kubeconfig, master, daemonMode string) (rpc.TerwayBackendServer, error) {
log.Debugf("start network service with: %s, %s", configFilePath, daemonMode)
cniBinPath := os.Getenv("CNI_PATH")
if cniBinPath == "" {
cniBinPath = cniDefaultPath
}
netSrv := &networkService{
configFilePath: configFilePath,
kubeConfig: kubeconfig,
master: master,
pendingPods: map[string]interface{}{},
pendingPodsLock: sync.RWMutex{},
cniBinPath: cniBinPath,
}
if daemonMode == daemonModeENIMultiIP || daemonMode == daemonModeVPC || daemonMode == daemonModeENIOnly {
netSrv.daemonMode = daemonMode
} else {
return nil, fmt.Errorf("unsupport daemon mode")
}
var err error
netSrv.k8s, err = newK8S(master, kubeconfig, daemonMode)
if err != nil {
return nil, errors.Wrapf(err, "error init k8s service")
}
// load default config
f, err := os.Open(configFilePath)
if err != nil {
return nil, errors.Wrapf(err, "failed open config file")
}
data, err := ioutil.ReadAll(f)
if err != nil {
return nil, fmt.Errorf("failed read file %s: %v", configFilePath, err)
}
// load dynamic config
dynamicCfg, nodeLabel, err := getDynamicConfig(netSrv.k8s)
if err != nil {
log.Warnf("get dynamic config error: %s. fallback to default config", err.Error())
dynamicCfg = ""
}
config, err := mergeConfigAndUnmarshal([]byte(dynamicCfg), data)
if err != nil {
return nil, fmt.Errorf("failed parse config: %v", err)
}
if len(dynamicCfg) == 0 {
log.Infof("got config: %+v from: %+v", config, configFilePath)
} else {
log.Infof("got config: %+v from %+v, with dynamic config %+v", config, configFilePath, nodeLabel)
}
if err := validateConfig(config); err != nil {
return nil, err
}
if err := setDefault(config); err != nil {
return nil, err
}
regionID, err := aliyun.GetLocalRegion()
if err != nil {
return nil, errors.Wrapf(err, "error get region-id")
}
ignoreLinkNotExist := false
if daemonMode == daemonModeENIOnly {
ignoreLinkNotExist = true
}
ecs, err := aliyun.NewECS(config.AccessID, config.AccessSecret, config.CredentialPath, regionID, ignoreLinkNotExist)
if err != nil {
return nil, errors.Wrapf(err, "error get aliyun client")
}
var ipnet *net.IPNet
if config.ServiceCIDR != "" {
_, ipnet, err = net.ParseCIDR(config.ServiceCIDR)
if err != nil {
return nil, errors.Wrapf(err, "error parse service cidr: %s", config.ServiceCIDR)
}
}
err = netSrv.k8s.SetSvcCidr(ipnet)
if err != nil {
return nil, errors.Wrapf(err, "error set k8s svcCidr")
}
netSrv.resourceDB, err = storage.NewDiskStorage(
resDBName, resDBPath, json.Marshal, func(bytes []byte) (interface{}, error) {
resourceRel := &PodResources{}
err = json.Unmarshal(bytes, resourceRel)
if err != nil {
return nil, errors.Wrapf(err, "error unmarshal pod relate resource")
}
return *resourceRel, nil
})
if err != nil {
return nil, errors.Wrapf(err, "error init resource manager storage")
}
// get pool config
poolConfig, err := getPoolConfig(config, ecs)
if err != nil {
return nil, errors.Wrapf(err, "error get pool config")
}
log.Infof("init pool config: %+v", poolConfig)
err = restoreLocalENIRes(ecs, poolConfig, netSrv.k8s, netSrv.resourceDB)
if err != nil {
return nil, errors.Wrapf(err, "error restore local eni resources")
}
localResource := make(map[string][]resourceManagerInitItem)
resObjList, err := netSrv.resourceDB.List()
if err != nil {
return nil, errors.Wrapf(err, "error list resource relation db")
}
for _, resObj := range resObjList {
podRes := resObj.(PodResources)
for _, res := range podRes.Resources {
if localResource[res.Type] == nil {
localResource[res.Type] = make([]resourceManagerInitItem, 0)
}
localResource[res.Type] = append(localResource[res.Type], resourceManagerInitItem{resourceID: res.ID, podInfo: podRes.PodInfo})
}
}
log.Debugf("local resources to restore: %+v", localResource)
switch daemonMode {
case daemonModeVPC:
//init ENI
netSrv.eniResMgr, err = newENIResourceManager(poolConfig, ecs, localResource[types.ResourceTypeENI])
if err != nil {
return nil, errors.Wrapf(err, "error init ENI resource manager")
}
netSrv.vethResMgr, err = newVPCResourceManager()
if err != nil {
return nil, errors.Wrapf(err, "error init vpc resource manager")
}
netSrv.mgrForResource = map[string]ResourceManager{
types.ResourceTypeENI: netSrv.eniResMgr,
types.ResourceTypeVeth: netSrv.vethResMgr,
}
case daemonModeENIMultiIP:
//init ENI multi ip
netSrv.eniIPResMgr, err = newENIIPResourceManager(poolConfig, ecs, localResource[types.ResourceTypeENIIP])
if err != nil {
return nil, errors.Wrapf(err, "error init ENI ip resource manager")
}
if config.EnableEIPPool == conditionTrue {
netSrv.eipResMgr = newEipResourceManager(ecs, netSrv.k8s, config.AllowEIPRob == conditionTrue)
}
netSrv.mgrForResource = map[string]ResourceManager{
types.ResourceTypeENIIP: netSrv.eniIPResMgr,
types.ResourceTypeEIP: netSrv.eipResMgr,
}
case daemonModeENIOnly:
//init eni
netSrv.eniResMgr, err = newENIResourceManager(poolConfig, ecs, localResource[types.ResourceTypeENI])
if err != nil {
return nil, errors.Wrapf(err, "error init eni resource manager")
}
if config.EnableEIPPool == conditionTrue {
netSrv.eipResMgr = newEipResourceManager(ecs, netSrv.k8s, config.AllowEIPRob == conditionTrue)
}
netSrv.mgrForResource = map[string]ResourceManager{
types.ResourceTypeENI: netSrv.eniResMgr,
types.ResourceTypeEIP: netSrv.eipResMgr,
}
default:
panic("unsupported daemon mode" + daemonMode)
}
//start gc loop
netSrv.startGarbageCollectionLoop()
go wait.JitterUntil(netSrv.startPeriodCheck, poolCheckPeriod, 1, true, wait.NeverStop)
// register for tracing
_ = tracing.Register(tracing.ResourceTypeNetworkService, "default", netSrv)
tracing.RegisterResourceMapping(netSrv)
tracing.RegisterEventRecorder(netSrv.k8s.RecordNodeEvent, netSrv.k8s.RecordPodEvent)
return netSrv, nil
}
// restore local eni resources for old terway migration
func restoreLocalENIRes(ecs aliyun.ECS, pc *types.PoolConfig, k8s Kubernetes, resourceDB storage.Storage) error {
resList, err := resourceDB.List()
if err != nil {
return errors.Wrapf(err, "error list resourceDB storage")
}
if len(resList) != 0 {
log.Debugf("skip restore for upgraded")
return nil
}
eniList, err := ecs.GetAttachedENIs(pc.InstanceID, false)
if err != nil {
return errors.Wrapf(err, "error get attached eni for restore")
}
ipEniMap := map[string]*types.ENI{}
for _, eni := range eniList {
ipEniMap[eni.Address.IP.String()] = eni
}
podList, err := k8s.GetLocalPods()
if err != nil {
return errors.Wrapf(err, "error get local pod for restore")
}
for _, pod := range podList {
if pod.PodNetworkType == podNetworkTypeVPCENI {
log.Debugf("restore for local pod: %+v, enis: %+v", pod, ipEniMap)
eni, ok := ipEniMap[pod.PodIP]
if ok {
err = resourceDB.Put(podInfoKey(pod.Namespace, pod.Name), PodResources{
PodInfo: pod,
Resources: []ResourceItem{
{
ID: eni.GetResourceID(),
Type: eni.GetType(),
},
},
})
if err != nil {
return errors.Wrapf(err, "error put resource into store")
}
} else {
log.Warnf("error found pod relate eni, pod: %+v", pod)
}
}
}
return nil
}
//setup default value
func setDefault(cfg *types.Configure) error {
if cfg.EniCapRatio == 0 {
cfg.EniCapRatio = 1
}
if cfg.HotPlug == "" {
cfg.HotPlug = conditionTrue
}
if cfg.HotPlug == conditionFalse || cfg.HotPlug == "0" {
cfg.HotPlug = conditionFalse
}
// Default policy for vswitch selection is random.
if cfg.VSwitchSelectionPolicy == "" {
cfg.VSwitchSelectionPolicy = types.VSwitchSelectionPolicyRandom
}
return nil
}
func validateConfig(cfg *types.Configure) error {
return nil
}
func getPoolConfig(cfg *types.Configure, ecs aliyun.ECS) (*types.PoolConfig, error) {
poolConfig := &types.PoolConfig{
MaxPoolSize: cfg.MaxPoolSize,
MinPoolSize: cfg.MinPoolSize,
MaxENI: cfg.MaxENI,
MinENI: cfg.MinENI,
AccessID: cfg.AccessID,
AccessSecret: cfg.AccessSecret,
HotPlug: cfg.HotPlug == conditionTrue,
EniCapRatio: cfg.EniCapRatio,
EniCapShift: cfg.EniCapShift,
SecurityGroup: cfg.SecurityGroup,
VSwitchSelectionPolicy: cfg.VSwitchSelectionPolicy,
}
zone, err := aliyun.GetLocalZone()
if err != nil {
return nil, err
}
if cfg.VSwitches != nil {
zoneVswitchs, ok := cfg.VSwitches[zone]
if ok && len(zoneVswitchs) > 0 {
poolConfig.VSwitch = cfg.VSwitches[zone]
}
}
if len(poolConfig.VSwitch) == 0 {
vSwitch, err := aliyun.GetLocalVswitch()
if err != nil {
return nil, err
}
poolConfig.VSwitch = []string{vSwitch}
}
poolConfig.ENITags = cfg.ENITags
if poolConfig.Region, err = aliyun.GetLocalRegion(); err != nil {
return nil, err
}
if poolConfig.VPC, err = aliyun.GetLocalVPC(); err != nil {
return nil, err
}
if poolConfig.InstanceID, err = aliyun.GetLocalInstanceID(); err != nil {
return nil, err
}
return poolConfig, nil
}
| [
"\"CNI_PATH\""
]
| []
| [
"CNI_PATH"
]
| [] | ["CNI_PATH"] | go | 1 | 0 | |
JYTools/demo/pbs_agent_worker.py | #! /usr/bin/env python
# coding: utf-8
import sys
import os
import time
import json
import uuid
import tempfile
import ConfigParser
from JYTools import StringTool
from JYTools.JYWorker import RedisWorker, worker_run
__author__ = '鹛桑够'
sys_tmp_dir = tempfile.gettempdir()
log_dir = os.environ.get("JINGD_LOG_DIR", sys_tmp_dir)
agent_dir = StringTool.path_join(log_dir, "pbs_agent")
if os.path.isdir(agent_dir) is False:
os.mkdir(agent_dir)
example_dir = StringTool.path_join(agent_dir, "example")
pbs_task_dir = StringTool.path_join(agent_dir, "pbs")
pbs_log_dir = StringTool.path_join(agent_dir, "log")
if os.path.isdir(example_dir) is False:
os.mkdir(example_dir)
if os.path.isdir(pbs_task_dir) is False:
os.mkdir(pbs_task_dir)
if os.path.isdir(pbs_log_dir) is False:
os.mkdir(pbs_log_dir)
pbs_template = """#PBS -S /bin/bash
#PBS -m n
#PBS -M <[email protected]>
"""
pw_info = os.environ.get("JY_PBS_WORKER_INFO", "pbs_worker.info")
with open(pw_info) as pwr:
c_info = pwr.read()
nc_info = c_info % os.environ
info_dir, info_name = os.path.split(pw_info)
temp_info_name = StringTool.join_encode([".", uuid.uuid4().hex, info_name], join_str="")
temp_info_path = StringTool.path_join(info_dir, temp_info_name)
with open(temp_info_path, "w") as tiw:
tiw.write(StringTool.encode(nc_info))
pbs_worker_config = ConfigParser.ConfigParser()
pbs_worker_config.read(temp_info_path)
os.remove(temp_info_path)
class PBSAgentWorker(RedisWorker):
expect_params_type = dict
def write_pbs_task(self, work_tag, cmd):
save_name = StringTool.join_decode([self.current_task.task_key, self.current_task.task_sub_key,
int(time.time()), "pbs"], join_str=".")
save_dir = StringTool.path_join(pbs_task_dir, work_tag)
if os.path.isdir(save_dir) is False:
os.mkdir(save_dir)
save_path = StringTool.path_join(save_dir, save_name)
with open(save_path, "w") as wp:
cmd = StringTool.join_encode(cmd, join_str=" ")
s = StringTool.join_encode([pbs_template, cmd], join_str="\n")
wp.write(StringTool.encode(s))
return save_path
def write_example(self, work_tag, params):
save_name = StringTool.join_decode([self.current_task.task_key, self.current_task.task_sub_key,
int(time.time()), "json"], join_str=".")
save_dir = StringTool.path_join(example_dir, work_tag)
if os.path.isdir(save_dir) is False:
os.mkdir(save_dir)
save_path = StringTool.path_join(save_dir, save_name)
with open(save_path, "w") as wp:
wp.write(StringTool.encode(json.dumps(params)))
return save_path
def package_docker_cmd(self, image, volumes):
cmd = ["docker", "run"]
if isinstance(volumes, dict) is True:
for key in volumes.keys():
cmd.extend(["-v", "%s:%s" % (key, volumes[key])])
cmd.append(image)
return cmd
def package_cmd(self, work_tag, report_tag, example_path):
py_path = pbs_worker_config.get(work_tag, "file")
key = self.current_task.task_key
cmd = ["python", py_path, "-c", self.conf_path, "-l", self.log_dir, "-w", work_tag, "-e",
example_path, "-k", key]
sub_key = self.current_task.task_sub_key
if sub_key is not None:
cmd.extend(["-s", sub_key])
if report_tag is not None:
cmd.extend(["-r", report_tag])
cmd.append(pbs_worker_config.get(work_tag, "cmd"))
return cmd
def handle_task(self, key, params):
report_tag = self.current_task.task_report_tag
work_tag = params["work_tag"]
n_params = params["params"]
if pbs_worker_config.has_section(work_tag) is True:
example_path = self.write_example(work_tag, n_params)
exec_cmd = self.package_cmd(work_tag, report_tag, example_path)
print(exec_cmd)
# self.execute_subprocess(exec_cmd)
pbs_path = self.write_pbs_task(work_tag, exec_cmd)
self.execute_subprocess(["qsub", pbs_path])
else:
self.push_task(key, n_params, work_tag=work_tag, sub_key=self.current_task.task_sub_key,
report_tag=report_tag)
self.current_task.task_report_tag = None
if __name__ == "__main__":
os.chdir(pbs_log_dir)
sys.exit(worker_run(PBSAgentWorker, default_work_tag="PBSAgent"))
| []
| []
| [
"JY_PBS_WORKER_INFO",
"JINGD_LOG_DIR"
]
| [] | ["JY_PBS_WORKER_INFO", "JINGD_LOG_DIR"] | python | 2 | 0 | |
tests/layers/test_layers_convolution.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
import tensorlayer as tl
from tensorlayer.layers import *
from tensorlayer.models import *
from tests.utils import CustomTestCase
class Layer_Convolution_1D_Test(CustomTestCase):
@classmethod
def setUpClass(cls):
print("\n#################################")
cls.batch_size = 8
cls.inputs_shape = [cls.batch_size, 100, 1]
cls.input_layer = Input(cls.inputs_shape, name='input_layer')
cls.n1 = tl.layers.Conv1dLayer(
shape=(5, 1, 32), stride=2
)(cls.input_layer)
cls.n2 = tl.layers.Conv1d(
n_filter=32, filter_size=5, stride=2
)(cls.n1)
cls.n3 = tl.layers.DeConv1dLayer(
shape=(5, 64, 32), outputs_shape=(cls.batch_size, 50, 64), strides=(1, 2, 1), name='deconv1dlayer'
)(cls.n2)
cls.n4 = tl.layers.SeparableConv1d(
n_filter=32, filter_size=3, strides=2, padding='SAME', act=tf.nn.relu, name='separable_1d'
)(cls.n3)
cls.n5 = tl.layers.SubpixelConv1d(
scale=2, act=tf.nn.relu, in_channels=32, name='subpixel_1d'
)(cls.n4)
cls.model = Model(
inputs=cls.input_layer, outputs=cls.n5
)
print("Testing Conv1d model: \n", cls.model)
@classmethod
def tearDownClass(cls):
pass
# tf.reset_default_graph()
def test_layer_n1(self):
# self.assertEqual(len(self.n1.all_layers), 2)
# self.assertEqual(len(self.n1.all_params), 2)
# self.assertEqual(self.n1.count_params(), 192)
self.assertEqual(len(self.n1._info[0].layer.weights), 2)
self.assertEqual(self.n1.get_shape().as_list()[1:], [50, 32])
def test_layer_n2(self):
# self.assertEqual(len(self.n2.all_layers), 3)
# self.assertEqual(len(self.n2.all_params), 4)
# self.assertEqual(self.n2.count_params(), 5344)
self.assertEqual(len(self.n2._info[0].layer.weights), 2)
self.assertEqual(self.n2.get_shape().as_list()[1:], [25, 32])
def test_layer_n3(self):
# self.assertEqual(len(self.n2.all_layers), 3)
# self.assertEqual(len(self.n2.all_params), 4)
# self.assertEqual(self.n2.count_params(), 5344)
self.assertEqual(len(self.n3._info[0].layer.weights), 2)
self.assertEqual(self.n3.get_shape().as_list()[1:], [50, 64])
def test_layer_n4(self):
# self.assertEqual(len(self.n2.all_layers), 3)
# self.assertEqual(len(self.n2.all_params), 4)
# self.assertEqual(self.n2.count_params(), 5344)
self.assertEqual(len(self.n4._info[0].layer.weights), 3)
self.assertEqual(self.n4.get_shape().as_list()[1:], [25, 32])
def test_layer_n5(self):
# self.assertEqual(len(self.n2.all_layers), 3)
# self.assertEqual(len(self.n2.all_params), 4)
# self.assertEqual(self.n2.count_params(), 5344)
self.assertEqual(self.n5.get_shape().as_list()[1:], [50, 16])
# def test_layer_n3(self):
#
# self.assertEqual(len(self.n3.all_layers), 4)
# self.assertEqual(len(self.n3.all_params), 7)
# self.assertEqual(self.n3.count_params(), 6496)
# self.assertEqual(self.n3.outputs.get_shape().as_list()[1:], [23, 32])
# FIXME: TF2.0 only supports NHWC now
# class Layer_Convolution_1D_NCW_Test(CustomTestCase):
#
# @classmethod
# def setUpClass(cls):
# print("\n#################################")
#
# cls.batch_size = 8
# cls.inputs_shape = [cls.batch_size, 1, 100]
# cls.input_layer = Input(cls.inputs_shape, name='input_layer')
#
# cls.n1 = tl.layers.Conv1dLayer(
# shape=(5, 1, 32), stride=2, data_format="NCW"
# )(cls.input_layer)
# cls.n2 = tl.layers.Conv1d(
# n_filter=32, filter_size=5, stride=2, data_format='channels_first'
# )(cls.n1)
# cls.model = Model(inputs=cls.input_layer, outputs=cls.n2)
# print("Testing Conv1d model: \n", cls.model)
#
# # cls.n3 = tl.layers.SeparableConv1d(
# # cls.n2, n_filter=32, filter_size=3, strides=1, padding='VALID', act=tf.nn.relu, name='separable_1d'
# # )
#
# @classmethod
# def tearDownClass(cls):
# pass
# # tf.reset_default_graph()
#
# def test_layer_n1(self):
#
# # self.assertEqual(len(self.n1.all_layers), 2)
# # self.assertEqual(len(self.n1.all_params), 2)
# # self.assertEqual(self.n1.count_params(), 192)
# self.assertEqual(len(self.n1._info[0].layer.weights), 2)
# self.assertEqual(self.n1.get_shape().as_list()[1:], [50, 32])
#
# def test_layer_n2(self):
#
# # self.assertEqual(len(self.n2.all_layers), 3)
# # self.assertEqual(len(self.n2.all_params), 4)
# # self.assertEqual(self.n2.count_params(), 5344)
# self.assertEqual(len(self.n2._info[0].layer.weights), 2)
# self.assertEqual(self.n2.get_shape().as_list()[1:], [25, 32])
#
# # def test_layer_n3(self):
# #
# # self.assertEqual(len(self.n3.all_layers), 4)
# # self.assertEqual(len(self.n3.all_params), 7)
# # self.assertEqual(self.n3.count_params(), 6496)
# # self.assertEqual(self.n3.outputs.get_shape().as_list()[1:], [23, 32])
class Layer_Convolution_2D_Test(CustomTestCase):
@classmethod
def setUpClass(cls):
print("\n#################################")
cls.batch_size = 5
cls.inputs_shape = [cls.batch_size, 400, 400, 3]
cls.input_layer = Input(cls.inputs_shape, name='input_layer')
cls.n1 = tl.layers.Conv2dLayer(
act=tf.nn.relu, shape=(5, 5, 3, 32), strides=(1, 2, 2, 1), padding='SAME',
b_init=tf.constant_initializer(value=0.0),
name='conv2dlayer'
)(cls.input_layer)
cls.n2 = tl.layers.Conv2d(
n_filter=32, filter_size=(3, 3), strides=(2, 2), act=None, name='conv2d'
)(cls.n1)
cls.n3 = tl.layers.Conv2d(
n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, b_init=None, name='conv2d_no_bias'
)(cls.n2)
cls.n4 = tl.layers.DeConv2dLayer(
shape=(5, 5, 32, 32), outputs_shape=(cls.batch_size, 100, 100, 32), strides=(1, 2, 2, 1), name='deconv2dlayer'
)(cls.n3)
cls.n5 = tl.layers.DeConv2d(
n_filter=32, filter_size=(3, 3), strides=(2, 2), name='DeConv2d'
)(cls.n4)
cls.n6 = tl.layers.DepthwiseConv2d(
filter_size=(3, 3), strides=(1, 1), dilation_rate=(2, 2), act=tf.nn.relu, depth_multiplier=2, name='depthwise'
)(cls.n5)
cls.n7 = tl.layers.Conv2d(
n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, in_channels=64, name='conv2d2'
)(cls.n6)
cls.n8 = tl.layers.BinaryConv2d(
n_filter=64, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, in_channels=32, name='binaryconv2d'
)(cls.n7)
cls.n9 = tl.layers.SeparableConv2d(
n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, name='separableconv2d'
)(cls.n8)
cls.n10 = tl.layers.GroupConv2d(
n_filter=64, filter_size=(3, 3), strides=(2, 2), n_group=2, name='group'
)(cls.n9)
cls.n11 = tl.layers.DorefaConv2d(
n_filter=32, filter_size=(5, 5), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='dorefaconv2d'
)(cls.n10)
cls.n12 = tl.layers.TernaryConv2d(
n_filter=64, filter_size=(5, 5), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='ternaryconv2d'
)(cls.n11)
cls.n13 = tl.layers.QuanConv2d(
n_filter=32, filter_size=(5, 5), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='quancnn2d'
)(cls.n12)
cls.n14 = tl.layers.SubpixelConv2d(
scale=2, act=tf.nn.relu, name='subpixelconv2d'
)(cls.n13)
cls.model = Model(cls.input_layer, cls.n14)
print("Testing Conv2d model: \n", cls.model)
# cls.n12 = tl.layers.QuanConv2d(cls.n11, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', name='quancnn')
@classmethod
def tearDownClass(cls):
pass
# tf.reset_default_graph()
def test_layer_n1(self):
# self.assertEqual(len(self.n1.all_layers), 2)
# self.assertEqual(len(self.n1.all_params), 2)
# self.assertEqual(self.n1.count_params(), 2432)
self.assertEqual(len(self.n1._info[0].layer.weights), 2)
self.assertEqual(self.n1.get_shape().as_list()[1:], [200, 200, 32])
def test_layer_n2(self):
# self.assertEqual(len(self.n2.all_layers), 3)
# self.assertEqual(len(self.n2.all_params), 4)
# self.assertEqual(self.n2.count_params(), 11680)
self.assertEqual(len(self.n2._info[0].layer.weights), 2)
self.assertEqual(self.n2.get_shape().as_list()[1:], [100, 100, 32])
def test_layer_n3(self):
# self.assertEqual(len(self.n3.all_layers), 4)
# self.assertEqual(len(self.n3.all_params), 5)
# self.assertEqual(self.n3.count_params(), 20896)
self.assertEqual(len(self.n3._info[0].layer.weights), 1) # b_init is None
self.assertEqual(self.n3.get_shape().as_list()[1:], [50, 50, 32])
def test_layer_n4(self):
# self.assertEqual(len(self.n4.all_layers), 5)
# self.assertEqual(len(self.n4.all_params), 7)
# self.assertEqual(self.n4.count_params(), 46528)
self.assertEqual(len(self.n4._info[0].layer.weights), 2)
self.assertEqual(self.n4.get_shape().as_list()[1:], [100, 100, 32])
def test_layer_n5(self):
# self.assertEqual(len(self.n5.all_layers), 6)
# self.assertEqual(len(self.n5.all_params), 9)
# self.assertEqual(self.n5.count_params(), 55776)
self.assertEqual(len(self.n5._info[0].layer.weights), 2)
self.assertEqual(self.n5.get_shape().as_list()[1:], [200, 200, 32])
def test_layer_n6(self):
# self.assertEqual(len(self.n6.all_layers), 7)
# self.assertEqual(len(self.n6.all_params), 11)
# self.assertEqual(self.n6.count_params(), 56416)
self.assertEqual(len(self.n6._info[0].layer.weights), 2)
self.assertEqual(self.n6.get_shape().as_list()[1:], [200, 200, 64])
def test_layer_n7(self):
# self.assertEqual(len(self.n7.all_layers), 8)
# self.assertEqual(len(self.n7.all_params), 13)
# self.assertEqual(self.n7.count_params(), 74880)
self.assertEqual(len(self.n7._info[0].layer.weights), 2)
self.assertEqual(self.n7.get_shape().as_list()[1:], [100, 100, 32])
def test_layer_n8(self):
# self.assertEqual(len(self.n7.all_layers), 8)
# self.assertEqual(len(self.n7.all_params), 13)
# self.assertEqual(self.n7.count_params(), 74880)
self.assertEqual(len(self.n8._info[0].layer.weights), 2)
self.assertEqual(self.n8.get_shape().as_list()[1:], [50, 50, 64])
def test_layer_n9(self):
# self.assertEqual(len(self.n7.all_layers), 8)
# self.assertEqual(len(self.n7.all_params), 13)
# self.assertEqual(self.n7.count_params(), 74880)
self.assertEqual(len(self.n9._info[0].layer.weights), 3)
self.assertEqual(self.n9.get_shape().as_list()[1:], [24, 24, 32])
def test_layer_n10(self):
# self.assertEqual(len(self.n7.all_layers), 8)
# self.assertEqual(len(self.n7.all_params), 13)
# self.assertEqual(self.n7.count_params(), 74880)
self.assertEqual(len(self.n10._info[0].layer.weights), 2)
self.assertEqual(self.n10.get_shape().as_list()[1:], [12, 12, 64])
def test_layer_n11(self):
# self.assertEqual(len(self.n7.all_layers), 8)
# self.assertEqual(len(self.n7.all_params), 13)
# self.assertEqual(self.n7.count_params(), 74880)
self.assertEqual(len(self.n11._info[0].layer.weights), 2)
self.assertEqual(self.n11.get_shape().as_list()[1:], [12, 12, 32])
def test_layer_n12(self):
# self.assertEqual(len(self.n7.all_layers), 8)
# self.assertEqual(len(self.n7.all_params), 13)
# self.assertEqual(self.n7.count_params(), 74880)
self.assertEqual(len(self.n12._info[0].layer.weights), 2)
self.assertEqual(self.n12.get_shape().as_list()[1:], [12, 12, 64])
def test_layer_n13(self):
# self.assertEqual(len(self.n7.all_layers), 8)
# self.assertEqual(len(self.n7.all_params), 13)
# self.assertEqual(self.n7.count_params(), 74880)
self.assertEqual(len(self.n13._info[0].layer.weights), 2)
self.assertEqual(self.n13.get_shape().as_list()[1:], [12, 12, 32])
def test_layer_n14(self):
self.assertEqual(self.n14.get_shape().as_list()[1:], [24, 24, 8])
# def test_layer_n8(self):
#
# self.assertEqual(len(self.n8.all_layers), 9)
# self.assertEqual(len(self.n8.all_params), 15)
# self.assertEqual(self.n8.count_params(), 79520)
# self.assertEqual(self.n8.outputs.get_shape().as_list()[1:], [50, 50, 32])
#
# def test_layer_n9(self):
#
# self.assertEqual(len(self.n9.all_layers), 10)
# self.assertEqual(len(self.n9.all_params), 18)
# self.assertEqual(self.n9.count_params(), 80864)
# self.assertEqual(self.n9.outputs.get_shape().as_list()[1:], [48, 48, 32])
#
# def test_layer_n10(self):
#
# self.assertEqual(len(self.n10.all_layers), 11)
# self.assertEqual(len(self.n10.all_params), 20)
# self.assertEqual(self.n10.count_params(), 132128)
# self.assertEqual(self.n10.outputs.get_shape().as_list()[1:], [48, 48, 64])
#
# def test_layer_n11(self):
#
# self.assertEqual(len(self.n11.all_layers), 12)
# self.assertEqual(len(self.n11.all_params), 22)
# self.assertEqual(self.n11.count_params(), 150592)
# self.assertEqual(self.n11.outputs.get_shape().as_list()[1:], [96, 96, 32])
#
# def test_layer_n12(self):
#
# self.assertEqual(len(self.n12.all_layers), 13)
# self.assertEqual(len(self.n12.all_params), 24)
# self.assertEqual(self.n12.count_params(), 201856)
# self.assertEqual(self.n12.outputs.get_shape().as_list()[1:], [96, 96, 64])
class Layer_Convolution_3D_Test(CustomTestCase):
@classmethod
def setUpClass(cls):
print("\n#################################")
cls.batch_size = 5
cls.inputs_shape = [cls.batch_size, 20, 20, 20, 3]
cls.input_layer = Input(cls.inputs_shape, name='input_layer')
cls.n1 = tl.layers.Conv3dLayer(
shape=(2, 2, 2, 3, 32), strides=(1, 2, 2, 2, 1)
)(cls.input_layer)
cls.n2 = tl.layers.DeConv3dLayer(
shape=(2, 2, 2, 128, 32), outputs_shape=(cls.batch_size, 20, 20, 20, 128), strides=(1, 2, 2, 2, 1)
)(cls.n1)
cls.n3 = tl.layers.Conv3d(
n_filter=64, filter_size=(3, 3, 3), strides=(3, 3, 3), act=tf.nn.relu, b_init=None, in_channels=128, name='conv3d_no_bias'
)(cls.n2)
cls.n4 = tl.layers.DeConv3d(
n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2)
)(cls.n3)
cls.model = Model(inputs=cls.input_layer, outputs=cls.n4)
print("Testing Conv3d model: \n", cls.model)
@classmethod
def tearDownClass(cls):
pass
# tf.reset_default_graph()
def test_layer_n1(self):
# self.assertEqual(len(self.n1.all_layers), 2)
# self.assertEqual(len(self.n1.all_params), 2)
# self.assertEqual(self.n1.count_params(), 800)
self.assertEqual(len(self.n1._info[0].layer.weights), 2)
self.assertEqual(self.n1.get_shape().as_list()[1:], [10, 10, 10, 32])
def test_layer_n2(self):
# self.assertEqual(len(self.n2.all_layers), 3)
# self.assertEqual(len(self.n2.all_params), 4)
# self.assertEqual(self.n2.count_params(), 33696)
self.assertEqual(len(self.n2._info[0].layer.weights), 2)
self.assertEqual(self.n2.get_shape().as_list()[1:], [20, 20, 20, 128])
def test_layer_n3(self):
# self.assertEqual(len(self.n3.all_layers), 4)
# self.assertEqual(len(self.n3.all_params), 6)
# self.assertEqual(self.n3.count_params(), 144320)
self.assertEqual(len(self.n3._info[0].layer.weights), 1) # b_init is None
self.assertEqual(self.n3.get_shape().as_list()[1:], [7, 7, 7, 64])
def test_layer_n4(self):
# self.assertEqual(len(self.n3.all_layers), 4)
# self.assertEqual(len(self.n3.all_params), 6)
# self.assertEqual(self.n3.count_params(), 144320)
self.assertEqual(len(self.n4._info[0].layer.weights), 2)
self.assertEqual(self.n4.get_shape().as_list()[1:], [14, 14, 14, 32])
# class Layer_DeformableConvolution_Test(CustomTestCase):
#
# @classmethod
# def setUpClass(cls):
#
# cls.batch_size = 5
# cls.inputs_shape = [cls.batch_size, 299, 299, 3]
# cls.input_layer = Input(cls.inputs_shape, name='input_layer')
#
# offset1 = tl.layers.Conv2d(
# 18, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', name='offset1'
# )(cls.input_layer)
# cls.net1 = tl.layers.DeformableConv2d(
# offset1, 32, (3, 3), act=tf.nn.relu, name='deformable1'
# )(cls.input_layer)
#
# offset2 = tl.layers.Conv2d(
# 18, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', name='offset2'
# )(cls.net1)
# cls.net2 = tl.layers.DeformableConv2d(
# offset2, 64, (3, 3), act=tf.nn.relu, name='deformable2'
# )(cls.net1)
#
# @classmethod
# def tearDownClass(cls):
# pass
#
# def test_layer_n1(self):
#
# self.assertEqual(len(self.net1.all_layers), 2)
# self.assertEqual(len(self.net1.all_params), 2)
# self.assertEqual(self.net1.count_params(), 896)
# self.assertEqual(self.net1.outputs.get_shape().as_list()[1:], [299, 299, 32])
#
# def test_layer_n2(self):
#
# self.assertEqual(len(self.net2.all_layers), 3)
# self.assertEqual(len(self.net2.all_params), 4)
# self.assertEqual(self.net2.count_params(), 19392)
# self.assertEqual(self.net2.outputs.get_shape().as_list()[1:], [299, 299, 64])
if __name__ == '__main__':
tl.logging.set_verbosity(tl.logging.DEBUG)
unittest.main()
| []
| []
| [
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
containers/tensorflow-training/train.py | from __future__ import absolute_import, division, print_function
import os
import math
import hmac
import json
import hashlib
import argparse
from random import shuffle, random
from pathlib2 import Path
import numpy as np
import tensorflow as tf
from tensorflow.data import Dataset
from tensorflow.python.lib.io import file_io
import pandas as pd
import mlflow
import mlflow.tensorflow
# Initially derived from https://github.com/kaizentm/kubemlops
def info(msg, char="#", width=75):
print("")
print(char * width)
print(char + " %0*s" % ((-1 * width) + 5, msg) + char)
print(char * width)
def check_dir(path):
if not os.path.exists(path):
os.makedirs(path)
return Path(path).resolve(strict=False)
def process_image(path, label, img_size):
img_raw = tf.io.read_file(path)
img_tensor = tf.image.decode_jpeg(img_raw, channels=3)
img_final = tf.image.resize(img_tensor, [img_size, img_size]) / 255
return img_final, label
def load_dataset(base_path, dset, split=None):
if split is None:
split = [8, 1, 1]
splits = np.array(split) / np.sum(np.array(split))
labels = {}
for (_, dirs, _) in os.walk(base_path):
print('found {}'.format(dirs))
labels = {k: v for (v, k) in enumerate(dirs)}
print('using {}'.format(labels))
break
print('loading dataset from {}'.format(dset))
with open(dset, 'r') as d:
data = [(str(Path(line.strip()).absolute()),
labels[Path(line.strip()).parent.name]) for line in d.readlines()] # noqa: E501
print('dataset size: {}\nsuffling data...'.format(len(data)))
shuffle(data)
print('splitting data...')
train_idx = int(len(data) * splits[0])
return data[:train_idx]
# @print_info
def run(
dpath,
img_size=160,
epochs=10,
batch_size=32,
learning_rate=0.0001,
output='model',
dset=None):
global g_image_size
g_image_size = img_size
img_shape = (img_size, img_size, 3)
info('Loading Data Set')
train = load_dataset(dpath, dset)
train_data, train_labels = zip(*train)
train_ds = Dataset.zip((Dataset.from_tensor_slices(list(train_data)),
Dataset.from_tensor_slices(list(train_labels)),
Dataset.from_tensor_slices([img_size]*len(train_data)))) # noqa: E501
print(train_ds)
train_ds = train_ds.map(map_func=process_image,
num_parallel_calls=5)
train_ds = train_ds.apply(tf.data.experimental.ignore_errors())
train_ds = train_ds.batch(batch_size)
train_ds = train_ds.prefetch(buffer_size=5)
train_ds = train_ds.repeat()
info('Creating Model')
base_model = tf.keras.applications.MobileNetV2(input_shape=img_shape,
include_top=False,
weights='imagenet')
base_model.trainable = True
model = tf.keras.Sequential([
base_model,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(optimizer=tf.keras.optimizers.Adam(lr=learning_rate),
loss='binary_crossentropy',
metrics=['accuracy'])
model.summary()
info('Training')
steps_per_epoch = math.ceil(len(train) / batch_size)
mlflow.tensorflow.autolog()
model.fit(train_ds, epochs=epochs, steps_per_epoch=steps_per_epoch)
# Log metric
accuracy = random() # dummy score
metric = {
'name': 'accuracy-score',
'numberValue': accuracy,
'format': "PERCENTAGE",
}
metrics = {
'metrics': [metric]}
mlflow.log_metrics({"accuracy": accuracy})
info('Writing Pipeline Metric')
with file_io.FileIO('/mlpipeline-metrics.json', 'w') as f:
json.dump(metrics, f)
info('Saving Model')
output = check_dir(output)
print('Serializing into saved_model format')
tf.saved_model.save(model, str(output))
print('Done!')
file_output = str(Path(output).joinpath('latest.h5'))
print('Serializing h5 model to:\n{}'.format(file_output))
model.save(file_output)
return generate_hash(file_output, 'kf_pipeline')
def generate_hash(dfile, key):
print('Generating hash for {}'.format(dfile))
m = hmac.new(str.encode(key), digestmod=hashlib.sha256)
BUF_SIZE = 65536
with open(str(dfile), 'rb') as myfile:
while True:
data = myfile.read(BUF_SIZE)
if not data:
break
m.update(data)
return m.hexdigest()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='transfer learning for binary image task')
parser.add_argument('-s', '--base_path',
help='directory to base data', default='../../data')
parser.add_argument(
'-d', '--data', help='directory to training and test data', default='train') # noqa: E501
parser.add_argument(
'-e', '--epochs', help='number of epochs', default=10, type=int)
parser.add_argument('-b', '--batch', help='batch size',
default=32, type=int)
parser.add_argument('-i', '--image_size',
help='image size', default=160, type=int)
parser.add_argument('-l', '--lr', help='learning rate',
default=0.0001, type=float)
parser.add_argument('-o', '--outputs',
help='output directory', default='model')
parser.add_argument('-f', '--dataset', help='cleaned data listing')
args = parser.parse_args()
info('Using TensorFlow v.{}'.format(tf.__version__))
data_path = Path(args.base_path).joinpath(args.data).resolve(strict=False)
target_path = Path(args.base_path).resolve(
strict=False).joinpath(args.outputs)
dataset = Path(args.base_path).joinpath(args.dataset)
image_size = args.image_size
params = Path(args.base_path).joinpath('params.json')
args = {
"dpath": str(data_path),
"img_size": image_size,
"epochs": args.epochs,
"batch_size": args.batch,
"learning_rate": args.lr,
"output": str(target_path),
"dset": str(dataset)
}
dataset_signature = generate_hash(dataset, 'kf_pipeline')
for i in args:
print('{} => {}'.format(i, args[i]))
mlflow.set_experiment("kubeflow-mlops")
mlflow.set_tag("external_run_id", os.getenv("RUN_ID"))
model_signature = run(**args)
args['dataset_signature'] = dataset_signature.upper()
args['model_signature'] = model_signature.upper()
args['model_type'] = 'tfv2-MobileNetV2'
print('Writing out params...', end='')
with open(str(params), 'w') as f:
json.dump(args, f)
print(' Saved to {}'.format(str(params)))
info('Log Training Parameters')
parmeters = pd.read_json(str(params), typ='series')
metadata = {
'outputs': [{
'type': 'table',
'storage': 'inline',
'format': 'csv',
'header': ['Name', 'Value'],
'source': parmeters.to_csv()
}]
}
with open('/mlpipeline-ui-metadata.json', 'w') as f:
json.dump(metadata, f)
| []
| []
| [
"RUN_ID"
]
| [] | ["RUN_ID"] | python | 1 | 0 | |
taskmanager/migrations/0001_initial.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-29 02:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('cmdb', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Cron_Config',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cron_minute', models.CharField(default=None, max_length=10, verbose_name='分')),
('cron_hour', models.CharField(default=None, max_length=10, verbose_name='时')),
('cron_day', models.CharField(default=None, max_length=10, verbose_name='天')),
('cron_week', models.CharField(default=None, max_length=10, verbose_name='周')),
('cron_month', models.CharField(default=None, max_length=10, verbose_name='月')),
('cron_user', models.CharField(default=None, max_length=50, verbose_name='任务用户')),
('cron_name', models.CharField(default=None, max_length=100, verbose_name='任务名称')),
('cron_desc', models.CharField(blank=True, default=None, max_length=200, null=True, verbose_name='任务描述')),
('cron_command', models.CharField(default=None, max_length=200, verbose_name='任务参数')),
('cron_script', models.FileField(blank=True, default=None, null=True, upload_to='./upload/cron/', verbose_name='脚本路径')),
('cron_script_path', models.CharField(blank=True, default=None, max_length=100, null=True, verbose_name='脚本路径')),
('cron_status', models.SmallIntegerField(default=None, verbose_name='任务状态')),
('cron_server', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cmdb.Server_Assets')),
],
options={
'verbose_name': '任务配置表',
'db_table': 'opssystem_cron_config',
'permissions': (('can_read_cron_config', '读取任务配置权限'), ('can_change_cron_config', '更改任务配置权限'), ('can_add_cron_config', '添加任务配置权限'), ('can_delete_cron_config', '删除任务配置权限')),
'verbose_name_plural': '任务配置表',
},
),
migrations.CreateModel(
name='Log_Cron_Config',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cron_id', models.IntegerField(blank=True, default=None, null=True, verbose_name='id')),
('cron_user', models.CharField(default=None, max_length=50, verbose_name='操作用户')),
('cron_name', models.CharField(default=None, max_length=100, verbose_name='名称')),
('cron_content', models.CharField(default=None, max_length=100)),
('cron_server', models.CharField(default=None, max_length=100)),
('create_time', models.DateTimeField(auto_now_add=True, null=True, verbose_name='执行时间')),
],
options={
'verbose_name': '任务配置操作记录表',
'db_table': 'opssystem_log_cron_config',
'verbose_name_plural': '任务配置操作记录表',
},
),
migrations.AlterUniqueTogether(
name='cron_config',
unique_together=set([('cron_name', 'cron_server', 'cron_user')]),
),
]
| []
| []
| []
| [] | [] | python | null | null | null |
pkg/scalers/azure_servicebus_scaler_test.go | package scalers
import (
"context"
"os"
"testing"
)
const (
topicName = "testtopic"
subscriptionName = "testsubscription"
queueName = "testqueue"
connectionSetting = "none"
namespaceName = "ns"
)
type parseServiceBusMetadataTestData struct {
metadata map[string]string
isError bool
entityType EntityType
authParams map[string]string
podIdentity string
}
// not testing connections so it doesn't matter what the resolved env value is for this
var sampleResolvedEnv = map[string]string{
connectionSetting: "none",
}
var parseServiceBusMetadataDataset = []parseServiceBusMetadataTestData{
{map[string]string{}, true, None, map[string]string{}, ""},
// properly formed queue
{map[string]string{"queueName": queueName, "connection": connectionSetting}, false, Queue, map[string]string{}, ""},
// properly formed topic & subscription
{map[string]string{"topicName": topicName, "subscriptionName": subscriptionName, "connection": connectionSetting}, false, Subscription, map[string]string{}, ""},
// queue and topic specified
{map[string]string{"queueName": queueName, "topicName": topicName, "connection": connectionSetting}, true, None, map[string]string{}, ""},
// queue and subscription specified
{map[string]string{"queueName": queueName, "subscriptionName": subscriptionName, "connection": connectionSetting}, true, None, map[string]string{}, ""},
// topic but no subscription specified
{map[string]string{"topicName": topicName, "connection": connectionSetting}, true, None, map[string]string{}, ""},
// subscription but no topic specified
{map[string]string{"subscriptionName": subscriptionName, "connection": connectionSetting}, true, None, map[string]string{}, ""},
// connection not set
{map[string]string{"queueName": queueName}, true, Queue, map[string]string{}, ""},
// connection set in auth params
{map[string]string{"queueName": queueName}, false, Queue, map[string]string{"connection": connectionSetting}, ""},
// pod identity but missing namespace
{map[string]string{"queueName": queueName}, true, Queue, map[string]string{}, "azure"},
// correct pod identity
{map[string]string{"queueName": queueName, "namespace": namespaceName}, false, Queue, map[string]string{}, "azure"},
}
var getServiceBusLengthTestScalers = []azureServiceBusScaler{
{metadata: &azureServiceBusMetadata{
entityType: Queue,
queueName: queueName,
}},
{metadata: &azureServiceBusMetadata{
entityType: Subscription,
topicName: topicName,
subscriptionName: subscriptionName,
}},
}
func TestParseServiceBusMetadata(t *testing.T) {
for _, testData := range parseServiceBusMetadataDataset {
meta, err := parseAzureServiceBusMetadata(sampleResolvedEnv, testData.metadata, testData.authParams, testData.podIdentity)
if err != nil && !testData.isError {
t.Error("Expected success but got error", err)
}
if testData.isError && err == nil {
t.Error("Expected error but got success")
}
if meta != nil && meta.entityType != testData.entityType {
t.Errorf("Expected entity type %v but got %v\n", testData.entityType, meta.entityType)
}
}
}
func TestGetServiceBusLength(t *testing.T) {
t.Log("This test will use the environment variable SERVICEBUS_CONNECTION_STRING if it is set")
t.Log("If set, it will connect to the servicebus namespace specified by the connection string & check:")
t.Logf("\tQueue '%s' has 1 message\n", queueName)
t.Logf("\tTopic '%s' with subscription '%s' has 1 message\n", topicName, subscriptionName)
connection_string := os.Getenv("SERVICEBUS_CONNECTION_STRING")
for _, scaler := range getServiceBusLengthTestScalers {
if connection_string != "" {
// Can actually test that numbers return
scaler.metadata.connection = connection_string
length, err := scaler.GetAzureServiceBusLength(context.TODO())
if err != nil {
t.Errorf("Expected success but got error: %s", err)
}
if length != 1 {
t.Errorf("Expected 1 message, got %d", length)
}
} else {
// Just test error message
length, err := scaler.GetAzureServiceBusLength(context.TODO())
if length != -1 || err == nil {
t.Errorf("Expected error but got success")
}
}
}
}
| [
"\"SERVICEBUS_CONNECTION_STRING\""
]
| []
| [
"SERVICEBUS_CONNECTION_STRING"
]
| [] | ["SERVICEBUS_CONNECTION_STRING"] | go | 1 | 0 | |
cli/internal/nodeprovider/local/createNodeIfNotExists_unix.go | // +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris
package local
import (
"context"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"syscall"
"github.com/opctl/opctl/sdks/go/node"
"github.com/pkg/errors"
)
func (np nodeProvider) CreateNodeIfNotExists(ctx context.Context) (node.Node, error) {
nodes, err := np.ListNodes()
if err != nil {
return nil, err
}
apiClientNode, err := newAPIClientNode(np.listenAddress)
if err != nil {
return nil, err
}
if len(nodes) > 0 {
return apiClientNode, nil
}
pathToOpctlBin, err := os.Executable()
if err != nil {
return nil, err
}
pathToOpctlBin, err = filepath.EvalSymlinks(pathToOpctlBin)
if err != nil {
return nil, err
}
nodeCmd := exec.Command(
pathToOpctlBin,
"--data-dir",
np.dataDir.Path(),
"--listen-address",
np.listenAddress,
"node",
"create",
)
// don't inherit env; some things like jenkins track and kill processes via injecting env vars
nodeCmd.Env = []string{
fmt.Sprintf("HOME=%s", os.Getenv("HOME")),
}
// ensure node gets it's own process group
nodeCmd.SysProcAttr = &syscall.SysProcAttr{
Setpgid: true,
}
nodeLogFilePath := filepath.Join(np.dataDir.Path(), "node.log")
nodeLogFile, err := os.Create(nodeLogFilePath)
if err != nil {
return nil, err
}
nodeCmd.Stderr = nodeLogFile
nodeCmd.Stdout = nodeLogFile
if err := nodeCmd.Start(); err != nil {
return nil, err
}
err = apiClientNode.Liveness(ctx)
nodeLogBytes, _ := ioutil.ReadFile(nodeLogFilePath)
fmt.Println(string(nodeLogBytes))
if err != nil {
return nil, errors.Wrap(err, "failed to create daemonized opctl node")
}
return apiClientNode, nil
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
commands/root.go | package commands
import (
"io/ioutil"
"os"
"path/filepath"
"time"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"gopkg.in/yaml.v2"
)
const (
updateLink = "https://github.com/eugene-babichenko/zpm"
configKeyPlugins = "plugins"
configKeyLoggingLevel = "logging_level"
configKeyOnLoadInstallMissingPlugins = "on_load.install_missing_plugins"
configKeyOnLoadCheckForUpdates = "on_load.check_for_updates"
configKeyOnLoadUpdateCheckPeriod = "on_load.update_check_period"
)
var (
Version string
appConfigFile string
rootDir string
pluginsSpecs []string
updateCheckPeriod time.Duration
RootCmd = &cobra.Command{
Use: "zpm [command]",
Short: "A simple zsh plugin manager",
}
)
func Execute() {
if err := RootCmd.Execute(); err != nil {
log.Fatalf("failed to execute the command: %s", err)
}
}
func init() {
cobra.OnInitialize(initConfig)
RootCmd.PersistentFlags().StringVar(
&appConfigFile,
"config",
"",
"Config file location (default: $HOME/.zpm.yaml)",
)
}
// prefixedWriter allows to add "zsh: " between log lines
type prefixedWriter struct{}
func (prefixedWriter) Write(p []byte) (n int, err error) {
// Writing logs to stderr is workaround. In `source <(zpm load)` the
// `<(...)` consumes only what is written to stdout. Thus, writing logs to
// stderr allows us to have nice logs while loading plugins.
nPrefix, err := os.Stderr.Write([]byte("zpm: "))
if err != nil {
return nPrefix, err
}
np, err := os.Stderr.Write(p)
return nPrefix + np, err
}
func getHomeDir() (string, error) {
// check if we are running inside of a snapcraft package
if _, isSnap := os.LookupEnv("SNAP_NAME"); isSnap {
// $HOME for snapcraft packages is not equal to the user homedir path,
// so we build a path according to filesystem hierarchy standard (FHS)
username := os.Getenv("USER")
if username == "root" {
return "/root", nil
}
return filepath.Join("/home", username), nil
}
home, err := os.UserHomeDir()
if err != nil {
return "", err
}
return home, nil
}
func initConfig() {
formatter := &log.TextFormatter{}
formatter.DisableLevelTruncation = true
formatter.DisableTimestamp = true
// this is required to have colored output with a custom writer
formatter.ForceColors = true
log.SetFormatter(formatter)
log.SetOutput(prefixedWriter{})
viper.SetConfigName(".zpm")
viper.AddConfigPath("$HOME")
viper.SetDefault(configKeyPlugins, []string{})
viper.SetDefault(configKeyLoggingLevel, "info")
viper.SetDefault(configKeyOnLoadInstallMissingPlugins, true)
viper.SetDefault(configKeyOnLoadCheckForUpdates, true)
viper.SetDefault(configKeyOnLoadUpdateCheckPeriod, "24h")
home, err := getHomeDir()
rootDir = filepath.Join(home, ".zpm_plugins")
if err := os.MkdirAll(rootDir, os.ModePerm); err != nil && !os.IsExist(err) {
log.Fatalf("while creating the plugin storage directory: %s", err)
}
if err := viper.ReadInConfig(); err != nil {
if _, ok := err.(viper.ConfigFileNotFoundError); !ok {
log.Fatalf("failed to read configuration: %s", err)
}
// write defaults
configFilePath := filepath.Join(home, ".zpm.yaml")
allSettings := viper.AllSettings()
allSettingsBytes, err := yaml.Marshal(allSettings)
if err != nil {
log.Fatalf("failed to serialize settings: %s", err)
}
if err := ioutil.WriteFile(configFilePath, allSettingsBytes, os.ModePerm); err != nil {
log.Fatalf("failed to write the default config to the drive: %s", err)
}
}
pluginsSpecs = viper.GetStringSlice(configKeyPlugins)
level, err := log.ParseLevel(viper.GetString(configKeyLoggingLevel))
if err != nil {
log.Errorf("failed to set the logging level: %s", err)
}
log.SetLevel(level)
updateCheckPeriod, err = time.ParseDuration(viper.GetString(configKeyOnLoadUpdateCheckPeriod))
if err != nil {
log.Fatalf("failed to parse OnLoad.UpdateCheckPeriod")
}
}
| [
"\"USER\""
]
| []
| [
"USER"
]
| [] | ["USER"] | go | 1 | 0 | |
integration_test.go | // Copyright (C) 2015 The GoHBase Authors. All rights reserved.
// This file is part of GoHBase.
// Use of this source code is governed by the Apache License 2.0
// that can be found in the COPYING file.
// +build integration
package gohbase_test
import (
"bytes"
"context"
"flag"
"fmt"
"io"
"os"
"os/exec"
"reflect"
"strconv"
"strings"
"sync"
"testing"
"time"
"math"
log "github.com/sirupsen/logrus"
"github.com/tsuna/gohbase"
"github.com/tsuna/gohbase/filter"
"github.com/tsuna/gohbase/hrpc"
"github.com/tsuna/gohbase/pb"
"google.golang.org/protobuf/proto"
)
var host = flag.String("host", "localhost", "The location where HBase is running")
var table string
func init() {
table = fmt.Sprintf("gohbase_test_%d", time.Now().UnixNano())
}
// CreateTable creates the given table with the given families
func CreateTable(client gohbase.AdminClient, table string, cFamilies []string) error {
// If the table exists, delete it
DeleteTable(client, table)
// Don't check the error, since one will be returned if the table doesn't
// exist
cf := make(map[string]map[string]string, len(cFamilies))
for _, f := range cFamilies {
cf[f] = nil
}
// pre-split table for reverse scan test of region changes
keySplits := [][]byte{[]byte("REVTEST-100"), []byte("REVTEST-200"), []byte("REVTEST-300")}
ct := hrpc.NewCreateTable(context.Background(), []byte(table), cf, hrpc.SplitKeys(keySplits))
if err := client.CreateTable(ct); err != nil {
return err
}
return nil
}
// DeleteTable finds the HBase shell via the HBASE_HOME environment variable,
// and disables and drops the given table
func DeleteTable(client gohbase.AdminClient, table string) error {
dit := hrpc.NewDisableTable(context.Background(), []byte(table))
err := client.DisableTable(dit)
if err != nil {
if !strings.Contains(err.Error(), "TableNotEnabledException") {
return err
}
}
det := hrpc.NewDeleteTable(context.Background(), []byte(table))
err = client.DeleteTable(det)
if err != nil {
return err
}
return nil
}
// LaunchRegionServers uses the script local-regionservers.sh to create new
// RegionServers. Fails silently if server already exists.
// Ex. LaunchRegions([]string{"2", "3"}) launches two servers with id=2,3
func LaunchRegionServers(servers []string) {
hh := os.Getenv("HBASE_HOME")
servers = append([]string{"start"}, servers...)
exec.Command(hh+"/bin/local-regionservers.sh", servers...).Run()
}
// StopRegionServers uses the script local-regionservers.sh to stop existing
// RegionServers. Fails silently if server isn't running.
func StopRegionServers(servers []string) {
hh := os.Getenv("HBASE_HOME")
servers = append([]string{"stop"}, servers...)
exec.Command(hh+"/bin/local-regionservers.sh", servers...).Run()
}
func TestMain(m *testing.M) {
flag.Parse()
if host == nil {
panic("Host is not set!")
}
log.SetLevel(log.DebugLevel)
ac := gohbase.NewAdminClient(*host)
var err error
for {
err = CreateTable(ac, table, []string{"cf", "cf1", "cf2"})
if err != nil &&
(strings.Contains(err.Error(), "org.apache.hadoop.hbase.PleaseHoldException") ||
strings.Contains(err.Error(),
"org.apache.hadoop.hbase.ipc.ServerNotRunningYetException")) {
time.Sleep(time.Second)
continue
} else if err != nil {
panic(err)
} else {
break
}
}
res := m.Run()
err = DeleteTable(ac, table)
if err != nil {
panic(err)
}
os.Exit(res)
}
//Test retrieval of cluster status
func TestClusterStatus(t *testing.T) {
ac := gohbase.NewAdminClient(*host)
stats, err := ac.ClusterStatus()
if err != nil {
t.Fatal(err)
}
//Sanity check the data coming back
if len(stats.GetMaster().GetHostName()) == 0 {
t.Fatal("Master hostname is empty in ClusterStatus")
}
}
func TestGet(t *testing.T) {
key := "row1"
val := []byte("1")
headers := map[string][]string{"cf": nil}
if host == nil {
t.Fatal("Host is not set!")
}
c := gohbase.NewClient(*host)
defer c.Close()
err := insertKeyValue(c, key, "cf", val)
if err != nil {
t.Errorf("Put returned an error: %v", err)
}
get, err := hrpc.NewGetStr(context.Background(), table, key, hrpc.Families(headers))
if err != nil {
t.Fatalf("Failed to create Get request: %s", err)
}
rsp, err := c.Get(get)
if err != nil {
t.Errorf("Get returned an error: %v", err)
}
rsp_value := rsp.Cells[0].Value
if !bytes.Equal(rsp_value, val) {
t.Errorf("Get returned an incorrect result. Expected: %v, Got: %v",
val, rsp_value)
}
get.ExistsOnly()
rsp, err = c.Get(get)
if err != nil {
t.Errorf("Get returned an error: %v", err)
} else if !*rsp.Exists {
t.Error("Get claimed that our row didn't exist")
}
ctx, _ := context.WithTimeout(context.Background(), 0)
get, err = hrpc.NewGetStr(ctx, table, key, hrpc.Families(headers))
if err != nil {
t.Fatalf("Failed to create Get request: %s", err)
}
_, err = c.Get(get)
if err != context.DeadlineExceeded {
t.Errorf("Get ignored the deadline")
}
}
func TestGetDoesntExist(t *testing.T) {
key := "row1.5"
c := gohbase.NewClient(*host)
defer c.Close()
headers := map[string][]string{"cf": nil}
get, err := hrpc.NewGetStr(context.Background(), table, key, hrpc.Families(headers))
rsp, err := c.Get(get)
if err != nil {
t.Errorf("Get returned an error: %v", err)
} else if results := len(rsp.Cells); results != 0 {
t.Errorf("Get expected 0 cells. Received: %d", results)
}
get.ExistsOnly()
rsp, err = c.Get(get)
if err != nil {
t.Errorf("Get returned an error: %v", err)
} else if *rsp.Exists {
t.Error("Get claimed that our non-existent row exists")
}
}
func TestMutateGetTableNotFound(t *testing.T) {
c := gohbase.NewClient(*host)
defer c.Close()
key := "whatever"
table := "NonExistentTable"
headers := map[string][]string{"cf": nil}
get, err := hrpc.NewGetStr(context.Background(),
table, key, hrpc.Families(headers))
if err != nil {
t.Fatalf("NewGetStr returned an error: %v", err)
}
_, err = c.Get(get)
if err != gohbase.TableNotFound {
t.Errorf("Get returned unexpected error: %v", err)
}
values := map[string]map[string][]byte{"cf": map[string][]byte{"a": []byte("1")}}
putRequest, err := hrpc.NewPutStr(context.Background(), table, key, values)
if err != nil {
t.Fatalf("NewPutStr returned an error: %v", err)
}
_, err = c.Put(putRequest)
if err != gohbase.TableNotFound {
t.Errorf("Put returned an unexpected error: %v", err)
}
}
func TestGetBadColumnFamily(t *testing.T) {
key := "row1.625"
c := gohbase.NewClient(*host)
defer c.Close()
err := insertKeyValue(c, key, "cf", []byte("Bad!"))
if err != nil {
t.Errorf("Put returned an error: %v", err)
}
families := map[string][]string{"badcf": nil}
get, err := hrpc.NewGetStr(context.Background(), table, key, hrpc.Families(families))
rsp, err := c.Get(get)
if err == nil {
t.Errorf("Get didn't return an error! (It should have)")
}
if rsp != nil {
t.Errorf("Get expected no result. Received: %v", rsp)
}
}
func TestGetMultipleCells(t *testing.T) {
key := "row1.75"
c := gohbase.NewClient(*host, gohbase.FlushInterval(time.Millisecond*2))
defer c.Close()
err := insertKeyValue(c, key, "cf", []byte("cf"))
if err != nil {
t.Errorf("Put returned an error: %v", err)
}
err = insertKeyValue(c, key, "cf2", []byte("cf2"))
if err != nil {
t.Errorf("Put returned an error: %v", err)
}
families := map[string][]string{"cf": nil, "cf2": nil}
get, err := hrpc.NewGetStr(context.Background(), table, key, hrpc.Families(families))
rsp, err := c.Get(get)
cells := rsp.Cells
num_results := len(cells)
if num_results != 2 {
t.Errorf("Get expected 2 cells. Received: %d", num_results)
}
for _, cell := range cells {
if !bytes.Equal(cell.Family, cell.Value) {
t.Errorf("Get returned an incorrect result. Expected: %v, Received: %v",
cell.Family, cell.Value)
}
}
}
func TestGetNonDefaultNamespace(t *testing.T) {
c := gohbase.NewClient(*host)
defer c.Close()
get, err := hrpc.NewGetStr(context.Background(), "hbase:namespace", "default")
if err != nil {
t.Fatalf("Failed to create Get request: %s", err)
}
rsp, err := c.Get(get)
if err != nil {
t.Fatalf("Get returned an error: %v", err)
}
if !bytes.Equal(rsp.Cells[0].Family, []byte("info")) {
t.Errorf("Got unexpected column family: %q", rsp.Cells[0].Family)
}
}
func TestPut(t *testing.T) {
key := "row2"
values := map[string]map[string][]byte{"cf": map[string][]byte{"a": []byte("1")}}
if host == nil {
t.Fatal("Host is not set!")
}
c := gohbase.NewClient(*host)
defer c.Close()
putRequest, err := hrpc.NewPutStr(context.Background(), table, key, values)
if err != nil {
t.Errorf("NewPutStr returned an error: %v", err)
}
_, err = c.Put(putRequest)
if err != nil {
t.Errorf("Put returned an error: %v", err)
}
ctx, _ := context.WithTimeout(context.Background(), 0)
putRequest, err = hrpc.NewPutStr(ctx, table, key, values)
_, err = c.Put(putRequest)
if err != context.DeadlineExceeded {
t.Errorf("Put ignored the deadline")
}
}
func TestPutMultipleCells(t *testing.T) {
key := "row2.5"
values := map[string]map[string][]byte{"cf": map[string][]byte{}, "cf2": map[string][]byte{}}
values["cf"]["a"] = []byte("a")
values["cf"]["b"] = []byte("b")
values["cf2"]["a"] = []byte("a")
c := gohbase.NewClient(*host)
defer c.Close()
putRequest, err := hrpc.NewPutStr(context.Background(), table, key, values)
_, err = c.Put(putRequest)
if err != nil {
t.Errorf("Put returned an error: %v", err)
}
families := map[string][]string{"cf": nil, "cf2": nil}
get, err := hrpc.NewGetStr(context.Background(), table, key, hrpc.Families(families))
rsp, err := c.Get(get)
if err != nil {
t.Errorf("Get returned an error: %v", err)
}
cells := rsp.Cells
if len(cells) != 3 {
t.Errorf("Get expected 3 cells. Received: %d", len(cells))
}
for _, cell := range cells {
if !bytes.Equal(cell.Qualifier, cell.Value) {
t.Errorf("Get returned an incorrect result. Expected: %v, Received: %v",
cell.Qualifier, cell.Value)
}
}
}
func TestMultiplePutsGetsSequentially(t *testing.T) {
const num_ops = 100
keyPrefix := "row3"
headers := map[string][]string{"cf": nil}
c := gohbase.NewClient(*host, gohbase.FlushInterval(time.Millisecond))
defer c.Close()
err := performNPuts(keyPrefix, num_ops)
if err != nil {
t.Errorf("Put returned an error: %v", err)
}
for i := num_ops - 1; i >= 0; i-- {
key := keyPrefix + fmt.Sprintf("%d", i)
get, err := hrpc.NewGetStr(context.Background(), table, key, hrpc.Families(headers))
rsp, err := c.Get(get)
if err != nil {
t.Errorf("Get returned an error: %v", err)
}
if len(rsp.Cells) != 1 {
t.Errorf("Incorrect number of cells returned by Get: %d", len(rsp.Cells))
}
rsp_value := rsp.Cells[0].Value
if !bytes.Equal(rsp_value, []byte(fmt.Sprintf("%d", i))) {
t.Errorf("Get returned an incorrect result. Expected: %v, Got: %v",
[]byte(fmt.Sprintf("%d", i)), rsp_value)
}
}
}
func TestMultiplePutsGetsParallel(t *testing.T) {
c := gohbase.NewClient(*host)
defer c.Close()
const n = 1000
var wg sync.WaitGroup
for i := 0; i < n; i++ {
key := fmt.Sprintf("%s_%d", t.Name(), i)
wg.Add(1)
go func() {
if err := insertKeyValue(c, key, "cf", []byte(key)); err != nil {
t.Error(key, err)
}
wg.Done()
}()
}
wg.Wait()
// All puts are complete. Now do the same for gets.
headers := map[string][]string{"cf": []string{"a"}}
for i := n - 1; i >= 0; i-- {
key := fmt.Sprintf("%s_%d", t.Name(), i)
wg.Add(1)
go func() {
defer wg.Done()
get, err := hrpc.NewGetStr(context.Background(), table, key, hrpc.Families(headers))
if err != nil {
t.Error(key, err)
return
}
rsp, err := c.Get(get)
if err != nil {
t.Error(key, err)
return
}
if len(rsp.Cells) == 0 {
t.Error(key, " got zero cells")
return
}
rsp_value := rsp.Cells[0].Value
if !bytes.Equal(rsp_value, []byte(key)) {
t.Errorf("expected %q, got %q", key, rsp_value)
}
}()
}
wg.Wait()
}
func TestTimestampIncreasing(t *testing.T) {
key := "row4"
c := gohbase.NewClient(*host)
defer c.Close()
var oldTime uint64 = 0
headers := map[string][]string{"cf": nil}
for i := 0; i < 10; i++ {
insertKeyValue(c, key, "cf", []byte("1"))
get, err := hrpc.NewGetStr(context.Background(), table, key, hrpc.Families(headers))
rsp, err := c.Get(get)
if err != nil {
t.Errorf("Get returned an error: %v", err)
break
}
newTime := *rsp.Cells[0].Timestamp
if newTime <= oldTime {
t.Errorf("Timestamps are not increasing. Old Time: %v, New Time: %v",
oldTime, newTime)
}
oldTime = newTime
time.Sleep(time.Millisecond)
}
}
func TestPutTimestamp(t *testing.T) {
key := "TestPutTimestamp"
c := gohbase.NewClient(*host)
defer c.Close()
var putTs uint64 = 50
timestamp := time.Unix(0, int64(putTs*1e6))
err := insertKeyValue(c, key, "cf", []byte("1"), hrpc.Timestamp(timestamp))
if err != nil {
t.Fatalf("Put failed: %s", err)
}
get, err := hrpc.NewGetStr(context.Background(), table, key,
hrpc.Families(map[string][]string{"cf": nil}))
rsp, err := c.Get(get)
if err != nil {
t.Fatalf("Get failed: %s", err)
}
getTs := *rsp.Cells[0].Timestamp
if getTs != putTs {
t.Errorf("Timestamps are not the same. Put Time: %v, Get Time: %v",
putTs, getTs)
}
}
// TestDelete preps state with two column families, cf1 and cf2,
// each having 3 versions at timestamps 50, 51, 52
func TestDelete(t *testing.T) {
c := gohbase.NewClient(*host)
defer c.Close()
ts := uint64(50)
tests := []struct {
in func(string) (*hrpc.Mutate, error)
out []*hrpc.Cell
}{
{
// delete at the second version
in: func(key string) (*hrpc.Mutate, error) {
return hrpc.NewDelStr(context.Background(), table, key,
map[string]map[string][]byte{"cf1": map[string][]byte{"a": nil}},
hrpc.TimestampUint64(ts+1))
},
// should delete everything at and before the delete timestamp
out: []*hrpc.Cell{
&hrpc.Cell{
Family: []byte("cf1"),
Qualifier: []byte("a"),
Timestamp: proto.Uint64(ts + 2),
Value: []byte("v3"),
},
&hrpc.Cell{
Family: []byte("cf1"),
Qualifier: []byte("b"),
Timestamp: proto.Uint64(ts),
Value: []byte("v1"),
},
&hrpc.Cell{
Family: []byte("cf2"),
Qualifier: []byte("a"),
Timestamp: proto.Uint64(ts + 2),
Value: []byte("v3"),
},
&hrpc.Cell{
Family: []byte("cf2"),
Qualifier: []byte("a"),
Timestamp: proto.Uint64(ts + 1),
Value: []byte("v2"),
},
&hrpc.Cell{
Family: []byte("cf2"),
Qualifier: []byte("a"),
Timestamp: proto.Uint64(ts),
Value: []byte("v1"),
},
&hrpc.Cell{
Family: []byte("cf2"),
Qualifier: []byte("b"),
Timestamp: proto.Uint64(ts),
Value: []byte("v1"),
},
},
},
{
// delete at the second version
in: func(key string) (*hrpc.Mutate, error) {
return hrpc.NewDelStr(context.Background(), table, key,
map[string]map[string][]byte{"cf1": map[string][]byte{"a": nil}},
hrpc.TimestampUint64(ts+1), hrpc.DeleteOneVersion())
},
// should delete only the second version
out: []*hrpc.Cell{
&hrpc.Cell{
Family: []byte("cf1"),
Qualifier: []byte("a"),
Timestamp: proto.Uint64(ts + 2),
Value: []byte("v3"),
},
&hrpc.Cell{
Family: []byte("cf1"),
Qualifier: []byte("a"),
Timestamp: proto.Uint64(ts),
Value: []byte("v1"),
},
&hrpc.Cell{
Family: []byte("cf1"),
Qualifier: []byte("b"),
Timestamp: proto.Uint64(ts),
Value: []byte("v1"),
},
&hrpc.Cell{
Family: []byte("cf2"),
Qualifier: []byte("a"),
Timestamp: proto.Uint64(ts + 2),
Value: []byte("v3"),
},
&hrpc.Cell{
Family: []byte("cf2"),
Qualifier: []byte("a"),
Timestamp: proto.Uint64(ts + 1),
Value: []byte("v2"),
},
&hrpc.Cell{
Family: []byte("cf2"),
Qualifier: []byte("a"),
Timestamp: proto.Uint64(ts),
Value: []byte("v1"),
},
&hrpc.Cell{
Family: []byte("cf2"),
Qualifier: []byte("b"),
Timestamp: proto.Uint64(ts),
Value: []byte("v1"),
},
},
},
{
// delete the cf1 at and before ts + 1
in: func(key string) (*hrpc.Mutate, error) {
return hrpc.NewDelStr(context.Background(), table, key,
map[string]map[string][]byte{"cf1": nil},
hrpc.TimestampUint64(ts+1))
},
// should leave cf2 untouched
out: []*hrpc.Cell{
&hrpc.Cell{
Family: []byte("cf1"),
Qualifier: []byte("a"),
Timestamp: proto.Uint64(ts + 2),
Value: []byte("v3"),
},
&hrpc.Cell{
Family: []byte("cf2"),
Qualifier: []byte("a"),
Timestamp: proto.Uint64(ts + 2),
Value: []byte("v3"),
},
&hrpc.Cell{
Family: []byte("cf2"),
Qualifier: []byte("a"),
Timestamp: proto.Uint64(ts + 1),
Value: []byte("v2"),
},
&hrpc.Cell{
Family: []byte("cf2"),
Qualifier: []byte("a"),
Timestamp: proto.Uint64(ts),
Value: []byte("v1"),
},
&hrpc.Cell{
Family: []byte("cf2"),
Qualifier: []byte("b"),
Timestamp: proto.Uint64(ts),
Value: []byte("v1"),
},
},
},
{
// delete the whole cf1 and qualifer a in cf2
in: func(key string) (*hrpc.Mutate, error) {
return hrpc.NewDelStr(context.Background(), table, key,
map[string]map[string][]byte{
"cf1": nil,
"cf2": map[string][]byte{
"a": nil,
},
})
},
out: []*hrpc.Cell{
&hrpc.Cell{
Family: []byte("cf2"),
Qualifier: []byte("b"),
Timestamp: proto.Uint64(ts),
Value: []byte("v1"),
},
},
},
{
// delete only version at ts for all qualifiers of cf1
in: func(key string) (*hrpc.Mutate, error) {
return hrpc.NewDelStr(context.Background(), table, key,
map[string]map[string][]byte{
"cf1": nil,
}, hrpc.TimestampUint64(ts), hrpc.DeleteOneVersion())
},
out: []*hrpc.Cell{
&hrpc.Cell{
Family: []byte("cf1"),
Qualifier: []byte("a"),
Timestamp: proto.Uint64(ts + 2),
Value: []byte("v3"),
},
&hrpc.Cell{
Family: []byte("cf1"),
Qualifier: []byte("a"),
Timestamp: proto.Uint64(ts + 1),
Value: []byte("v2"),
},
&hrpc.Cell{
Family: []byte("cf2"),
Qualifier: []byte("a"),
Timestamp: proto.Uint64(ts + 2),
Value: []byte("v3"),
},
&hrpc.Cell{
Family: []byte("cf2"),
Qualifier: []byte("a"),
Timestamp: proto.Uint64(ts + 1),
Value: []byte("v2"),
},
&hrpc.Cell{
Family: []byte("cf2"),
Qualifier: []byte("a"),
Timestamp: proto.Uint64(ts),
Value: []byte("v1"),
},
&hrpc.Cell{
Family: []byte("cf2"),
Qualifier: []byte("b"),
Timestamp: proto.Uint64(ts),
Value: []byte("v1"),
},
},
},
{
// delete the whole row
in: func(key string) (*hrpc.Mutate, error) {
return hrpc.NewDelStr(context.Background(), table, key, nil)
},
out: nil,
},
{
// delete the whole row at ts
in: func(key string) (*hrpc.Mutate, error) {
return hrpc.NewDelStr(context.Background(), table, key, nil,
hrpc.TimestampUint64(ts+1))
},
out: []*hrpc.Cell{
&hrpc.Cell{
Family: []byte("cf1"),
Qualifier: []byte("a"),
Timestamp: proto.Uint64(ts + 2),
Value: []byte("v3"),
},
&hrpc.Cell{
Family: []byte("cf2"),
Qualifier: []byte("a"),
Timestamp: proto.Uint64(ts + 2),
Value: []byte("v3"),
},
},
},
}
for i, tcase := range tests {
t.Run(strconv.Itoa(i), func(t *testing.T) {
key := t.Name()
// insert three versions
prep := func(cf string) {
if err := insertKeyValue(c, key, cf, []byte("v1"),
hrpc.TimestampUint64(ts)); err != nil {
t.Fatal(err)
}
if err := insertKeyValue(c, key, cf, []byte("v2"),
hrpc.TimestampUint64(ts+1)); err != nil {
t.Fatal(err)
}
if err := insertKeyValue(c, key, cf, []byte("v3"),
hrpc.TimestampUint64(ts+2)); err != nil {
t.Fatal(err)
}
// insert b
values := map[string]map[string][]byte{cf: map[string][]byte{
"b": []byte("v1"),
}}
put, err := hrpc.NewPutStr(context.Background(), table, key, values,
hrpc.TimestampUint64(ts))
if err != nil {
t.Fatal(err)
}
if _, err = c.Put(put); err != nil {
t.Fatal(err)
}
}
prep("cf1")
prep("cf2")
delete, err := tcase.in(key)
if err != nil {
t.Fatal(err)
}
_, err = c.Delete(delete)
if err != nil {
t.Fatal(err)
}
get, err := hrpc.NewGetStr(context.Background(), table, key,
hrpc.MaxVersions(math.MaxInt32))
if err != nil {
t.Fatal(err)
}
rsp, err := c.Get(get)
if err != nil {
t.Fatal(err)
}
for _, c := range tcase.out {
c.Row = []byte(t.Name())
c.CellType = pb.CellType_PUT.Enum()
}
if !reflect.DeepEqual(tcase.out, rsp.Cells) {
t.Fatalf("expected %v, got %v", tcase.out, rsp.Cells)
}
})
}
}
func TestGetTimeRangeVersions(t *testing.T) {
key := "TestGetTimeRangeVersions"
c := gohbase.NewClient(*host)
defer c.Close()
err := insertKeyValue(c, key, "cf", []byte("1"), hrpc.Timestamp(time.Unix(0, 50*1e6)))
if err != nil {
t.Fatalf("Put failed: %s", err)
}
err = insertKeyValue(c, key, "cf", []byte("1"), hrpc.Timestamp(time.Unix(0, 51*1e6)))
if err != nil {
t.Fatalf("Put failed: %s", err)
}
err = insertKeyValue(c, key, "cf", []byte("1"), hrpc.Timestamp(time.Unix(0, 49*1e6)))
if err != nil {
t.Fatalf("Put failed: %s", err)
}
var maxVersions uint32 = 2
get, err := hrpc.NewGetStr(context.Background(), table, key,
hrpc.Families(map[string][]string{"cf": nil}), hrpc.TimeRange(time.Unix(0, 0),
time.Unix(0, 51*1e6)), hrpc.MaxVersions(maxVersions))
rsp, err := c.Get(get)
if err != nil {
t.Fatalf("Get failed: %s", err)
}
if uint32(len(rsp.Cells)) != maxVersions {
t.Fatalf("Expected versions: %d, Got versions: %d", maxVersions, len(rsp.Cells))
}
getTs1 := *rsp.Cells[0].Timestamp
if getTs1 != 50 {
t.Errorf("Timestamps are not the same. Expected Time: %v, Got Time: %v",
50, getTs1)
}
getTs2 := *rsp.Cells[1].Timestamp
if getTs2 != 49 {
t.Errorf("Timestamps are not the same. Expected Time: %v, Got Time: %v",
49, getTs2)
}
// get with no versions set
get, err = hrpc.NewGetStr(context.Background(), table, key,
hrpc.Families(map[string][]string{"cf": nil}), hrpc.TimeRange(time.Unix(0, 0),
time.Unix(0, 51*1e6)))
rsp, err = c.Get(get)
if err != nil {
t.Fatalf("Get failed: %s", err)
}
if uint32(len(rsp.Cells)) != 1 {
t.Fatalf("Expected versions: %d, Got versions: %d", 1, len(rsp.Cells))
}
getTs1 = *rsp.Cells[0].Timestamp
if getTs1 != 50 {
t.Errorf("Timestamps are not the same. Expected Time: %v, Got Time: %v",
50, getTs1)
}
}
func TestScanTimeRangeVersions(t *testing.T) {
key := "TestScanTimeRangeVersions"
c := gohbase.NewClient(*host)
defer c.Close()
err := insertKeyValue(c, key+"1", "cf", []byte("1"), hrpc.Timestamp(time.Unix(0, 50*1e6)))
if err != nil {
t.Fatalf("Put failed: %s", err)
}
err = insertKeyValue(c, key+"1", "cf", []byte("1"), hrpc.Timestamp(time.Unix(0, 51*1e6)))
if err != nil {
t.Fatalf("Put failed: %s", err)
}
err = insertKeyValue(c, key+"2", "cf", []byte("1"), hrpc.Timestamp(time.Unix(0, 51*1e6)))
if err != nil {
t.Fatalf("Put failed: %s", err)
}
err = insertKeyValue(c, key+"2", "cf", []byte("1"), hrpc.Timestamp(time.Unix(0, 52*1e6)))
if err != nil {
t.Fatalf("Put failed: %s", err)
}
var maxVersions uint32 = 2
scan, err := hrpc.NewScanRangeStr(context.Background(), table,
"TestScanTimeRangeVersions1", "TestScanTimeRangeVersions3",
hrpc.Families(map[string][]string{"cf": nil}), hrpc.TimeRange(time.Unix(0, 50*1e6),
time.Unix(0, 53*1e6)), hrpc.MaxVersions(maxVersions))
if err != nil {
t.Fatalf("Scan req failed: %s", err)
}
var rsp []*hrpc.Result
scanner := c.Scan(scan)
for {
res, err := scanner.Next()
if err == io.EOF {
break
}
if err != nil {
t.Fatal(err)
}
rsp = append(rsp, res)
}
if len(rsp) != 2 {
t.Fatalf("Expected rows: %d, Got rows: %d", maxVersions, len(rsp))
}
if uint32(len(rsp[0].Cells)) != maxVersions {
t.Fatalf("Expected versions: %d, Got versions: %d", maxVersions, len(rsp[0].Cells))
}
scan1 := *rsp[0].Cells[0]
if string(scan1.Row) != "TestScanTimeRangeVersions1" && *scan1.Timestamp != 51 {
t.Errorf("Timestamps are not the same. Expected Time: %v, Got Time: %v",
51, *scan1.Timestamp)
}
scan2 := *rsp[0].Cells[1]
if string(scan2.Row) != "TestScanTimeRangeVersions1" && *scan2.Timestamp != 50 {
t.Errorf("Timestamps are not the same. Expected Time: %v, Got Time: %v",
50, *scan2.Timestamp)
}
if uint32(len(rsp[1].Cells)) != maxVersions {
t.Fatalf("Expected versions: %d, Got versions: %d", maxVersions, len(rsp[1].Cells))
}
scan3 := *rsp[1].Cells[0]
if string(scan3.Row) != "TestScanTimeRangeVersions2" && *scan3.Timestamp != 52 {
t.Errorf("Timestamps are not the same. Expected Time: %v, Got Time: %v",
52, *scan3.Timestamp)
}
scan4 := *rsp[1].Cells[1]
if string(scan4.Row) != "TestScanTimeRangeVersions2" && *scan4.Timestamp != 51 {
t.Errorf("Timestamps are not the same. Expected Time: %v, Got Time: %v",
51, *scan4.Timestamp)
}
// scan with no versions set
scan, err = hrpc.NewScanRangeStr(context.Background(), table,
"TestScanTimeRangeVersions1", "TestScanTimeRangeVersions3",
hrpc.Families(map[string][]string{"cf": nil}), hrpc.TimeRange(time.Unix(0, 50*1e6),
time.Unix(0, 53*1e6)),
hrpc.NumberOfRows(1)) // set number of rows to 1 to also check that we are doing fetches
if err != nil {
t.Fatalf("Scan req failed: %s", err)
}
rsp = nil
scanner = c.Scan(scan)
for {
res, err := scanner.Next()
if err == io.EOF {
break
}
if err != nil {
t.Fatal(err)
}
rsp = append(rsp, res)
}
if len(rsp) != 2 {
t.Fatalf("Expected rows: %d, Got rows: %d", 2, len(rsp))
}
if len(rsp[0].Cells) != 1 {
t.Fatalf("Expected versions: %d, Got versions: %d", 2, len(rsp[0].Cells))
}
if len(rsp[1].Cells) != 1 {
t.Fatalf("Expected versions: %d, Got versions: %d", 2, len(rsp[0].Cells))
}
}
func TestPutTTL(t *testing.T) {
key := "TestPutTTL"
c := gohbase.NewClient(*host)
defer c.Close()
var ttl = 2 * time.Second
err := insertKeyValue(c, key, "cf", []byte("1"), hrpc.TTL(ttl))
if err != nil {
t.Fatalf("Put failed: %s", err)
}
//Wait ttl duration and try to get the value
time.Sleep(ttl)
get, err := hrpc.NewGetStr(context.Background(), table, key,
hrpc.Families(map[string][]string{"cf": nil}))
//Make sure we dont get a result back
res, err := c.Get(get)
if err != nil {
t.Fatalf("Get failed: %s", err)
}
if len(res.Cells) > 0 {
t.Errorf("TTL did not expire row. Expected 0 cells, got: %d", len(res.Cells))
}
}
func checkResultRow(t *testing.T, res *hrpc.Result, expectedRow string, err, expectedErr error) {
if err != expectedErr {
t.Fatalf("Expected error %v, got error %v", expectedErr, err)
}
if len(expectedRow) > 0 && res != nil && len(res.Cells) > 0 {
got := string(res.Cells[0].Row)
if got != expectedRow {
t.Fatalf("Expected row %s, got row %s", expectedRow, got)
}
} else if len(expectedRow) == 0 && res != nil {
t.Fatalf("Expected no result, got %+v", *res)
}
}
func TestScannerClose(t *testing.T) {
key := t.Name()
c := gohbase.NewClient(*host)
defer c.Close()
err := insertKeyValue(c, key+"1", "cf", []byte("1"))
if err != nil {
t.Fatalf("Put failed: %s", err)
}
err = insertKeyValue(c, key+"2", "cf", []byte("1"))
if err != nil {
t.Fatalf("Put failed: %s", err)
}
err = insertKeyValue(c, key+"3", "cf", []byte("1"))
if err != nil {
t.Fatalf("Put failed: %s", err)
}
scan, err := hrpc.NewScanRangeStr(context.Background(), table,
key+"1", key+"4",
hrpc.Families(map[string][]string{"cf": nil}),
hrpc.NumberOfRows(1)) // fetch only one row at a time
if err != nil {
t.Fatalf("Scan req failed: %s", err)
}
scanner := c.Scan(scan)
res, err := scanner.Next()
checkResultRow(t, res, key+"1", err, nil)
res, err = scanner.Next()
checkResultRow(t, res, key+"2", err, nil)
scanner.Close()
// make sure we get io.EOF eventually
for {
if _, err = scanner.Next(); err == io.EOF {
break
}
}
}
func TestScannerContextCancel(t *testing.T) {
key := t.Name()
c := gohbase.NewClient(*host)
defer c.Close()
err := insertKeyValue(c, key+"1", "cf", []byte("1"))
if err != nil {
t.Fatalf("Put failed: %s", err)
}
err = insertKeyValue(c, key+"2", "cf", []byte("1"))
if err != nil {
t.Fatalf("Put failed: %s", err)
}
err = insertKeyValue(c, key+"3", "cf", []byte("1"))
if err != nil {
t.Fatalf("Put failed: %s", err)
}
ctx, cancel := context.WithCancel(context.Background())
scan, err := hrpc.NewScanRangeStr(ctx, table,
key+"1", key+"4",
hrpc.Families(map[string][]string{"cf": nil}),
hrpc.NumberOfRows(1)) // fetch only one row at a time
if err != nil {
t.Fatalf("Scan req failed: %s", err)
}
scanner := c.Scan(scan)
res, err := scanner.Next()
checkResultRow(t, res, key+"1", err, nil)
cancel()
if _, err = scanner.Next(); err != context.Canceled {
t.Fatalf("unexpected error %v, expected %v", err, context.Canceled)
}
}
func TestAppend(t *testing.T) {
key := "row7"
c := gohbase.NewClient(*host)
defer c.Close()
// Inserting "Hello"
insertErr := insertKeyValue(c, key, "cf", []byte("Hello"))
if insertErr != nil {
t.Errorf("Put returned an error: %v", insertErr)
}
// Appending " my name is Dog."
values := map[string]map[string][]byte{"cf": map[string][]byte{}}
values["cf"]["a"] = []byte(" my name is Dog.")
appRequest, err := hrpc.NewAppStr(context.Background(), table, key, values)
appRsp, err := c.Append(appRequest)
if err != nil {
t.Errorf("Append returned an error: %v", err)
}
if appRsp == nil {
t.Errorf("Append doesn't return updated value.")
}
// Verifying new result is "Hello my name is Dog."
result := appRsp.Cells[0].Value
if !bytes.Equal([]byte("Hello my name is Dog."), result) {
t.Errorf("Append returned an incorrect result. Expected: %v, Receieved: %v",
[]byte("Hello my name is Dog."), result)
}
// Make sure the change was actually committed.
headers := map[string][]string{"cf": nil}
get, err := hrpc.NewGetStr(context.Background(), table, key, hrpc.Families(headers))
rsp, err := c.Get(get)
cells := rsp.Cells
if len(cells) != 1 {
t.Errorf("Get expected 1 cells. Received: %d", len(cells))
}
result = cells[0].Value
if !bytes.Equal([]byte("Hello my name is Dog."), result) {
t.Errorf("Append returned an incorrect result. Expected: %v, Receieved: %v",
[]byte("Hello my name is Dog."), result)
}
}
func TestIncrement(t *testing.T) {
c := gohbase.NewClient(*host)
defer c.Close()
key := "row102"
// test incerement
incRequest, err := hrpc.NewIncStrSingle(context.Background(), table, key, "cf", "a", 1)
result, err := c.Increment(incRequest)
if err != nil {
t.Fatalf("Increment returned an error: %v", err)
}
if result != 1 {
t.Fatalf("Increment's result is %d, want 1", result)
}
incRequest, err = hrpc.NewIncStrSingle(context.Background(), table, key, "cf", "a", 5)
result, err = c.Increment(incRequest)
if err != nil {
t.Fatalf("Increment returned an error: %v", err)
}
if result != 6 {
t.Fatalf("Increment's result is %d, want 6", result)
}
}
func TestIncrementParallel(t *testing.T) {
c := gohbase.NewClient(*host)
defer c.Close()
key := "row102.5"
numParallel := 10
// test incerement
var wg sync.WaitGroup
for i := 0; i < numParallel; i++ {
wg.Add(1)
go func() {
defer wg.Done()
incRequest, err := hrpc.NewIncStrSingle(context.Background(), table, key, "cf", "a", 1)
_, err = c.Increment(incRequest)
if err != nil {
t.Errorf("Increment returned an error: %v", err)
}
}()
}
wg.Wait()
// do one more to check if there's a correct value
incRequest, err := hrpc.NewIncStrSingle(context.Background(), table, key, "cf", "a", 1)
result, err := c.Increment(incRequest)
if err != nil {
t.Fatalf("Increment returned an error: %v", err)
}
if result != int64(numParallel+1) {
t.Fatalf("Increment's result is %d, want %d", result, numParallel+1)
}
}
func TestCheckAndPut(t *testing.T) {
c := gohbase.NewClient(*host)
defer c.Close()
key := "row100"
ef := "cf"
eq := "a"
var castests = []struct {
inValues map[string]map[string][]byte
inExpectedValue []byte
out bool
}{
{map[string]map[string][]byte{"cf": map[string][]byte{"b": []byte("2")}},
nil, true}, // nil instead of empty byte array
{map[string]map[string][]byte{"cf": map[string][]byte{"a": []byte("1")}},
[]byte{}, true},
{map[string]map[string][]byte{"cf": map[string][]byte{"a": []byte("1")}},
[]byte{}, false},
{map[string]map[string][]byte{"cf": map[string][]byte{"a": []byte("2")}},
[]byte("1"), true},
{map[string]map[string][]byte{"cf": map[string][]byte{"b": []byte("2")}},
[]byte("2"), true}, // put diff column
{map[string]map[string][]byte{"cf": map[string][]byte{"b": []byte("2")}},
[]byte{}, false}, // diff column
{map[string]map[string][]byte{"cf": map[string][]byte{
"b": []byte("100"),
"a": []byte("100"),
}}, []byte("2"), true}, // multiple values
}
for _, tt := range castests {
putRequest, err := hrpc.NewPutStr(context.Background(), table, key, tt.inValues)
if err != nil {
t.Fatalf("NewPutStr returned an error: %v", err)
}
casRes, err := c.CheckAndPut(putRequest, ef, eq, tt.inExpectedValue)
if err != nil {
t.Fatalf("CheckAndPut error: %s", err)
}
if casRes != tt.out {
t.Errorf("CheckAndPut with put values=%q and expectedValue=%q returned %v, want %v",
tt.inValues, tt.inExpectedValue, casRes, tt.out)
}
}
// TODO: check the resulting state by performing a Get request
}
func TestCheckAndPutNotPut(t *testing.T) {
key := "row101"
c := gohbase.NewClient(*host)
defer c.Close()
values := map[string]map[string][]byte{"cf": map[string][]byte{"a": []byte("lol")}}
appRequest, err := hrpc.NewAppStr(context.Background(), table, key, values)
_, err = c.CheckAndPut(appRequest, "cf", "a", []byte{})
if err == nil {
t.Error("CheckAndPut: should not allow anything but Put request")
}
}
func TestCheckAndPutParallel(t *testing.T) {
c := gohbase.NewClient(*host)
defer c.Close()
keyPrefix := "row100.5"
values := map[string]map[string][]byte{"cf": map[string][]byte{"a": []byte("1")}}
capTestFunc := func(p *hrpc.Mutate, ch chan bool) {
casRes, err := c.CheckAndPut(p, "cf", "a", []byte{})
if err != nil {
t.Errorf("CheckAndPut error: %s", err)
}
ch <- casRes
}
// make 10 pairs of CheckAndPut requests
for i := 0; i < 10; i++ {
ch := make(chan bool, 2)
putRequest1, err := hrpc.NewPutStr(context.Background(), table, keyPrefix+fmt.Sprint(i), values)
if err != nil {
t.Fatalf("NewPutStr returned an error: %v", err)
}
putRequest2, err := hrpc.NewPutStr(context.Background(), table, keyPrefix+fmt.Sprint(i), values)
if err != nil {
t.Fatalf("NewPutStr returned an error: %v", err)
}
go capTestFunc(putRequest1, ch)
go capTestFunc(putRequest2, ch)
first := <-ch
second := <-ch
if first && second {
t.Error("CheckAndPut: both requests cannot succeed")
}
if !first && !second {
t.Error("CheckAndPut: both requests cannot fail")
}
}
}
func TestClose(t *testing.T) {
c := gohbase.NewClient(*host)
values := map[string]map[string][]byte{"cf": map[string][]byte{"a": []byte("1")}}
r, err := hrpc.NewPutStr(context.Background(), table, t.Name(), values)
if err != nil {
t.Fatal(err)
}
_, err = c.Put(r)
if err != nil {
t.Fatal(err)
}
c.Close()
_, err = c.Put(r)
if err != gohbase.ErrClientClosed {
t.Fatalf("unexpected error: %v", err)
}
}
func TestCloseWithoutMeta(t *testing.T) {
c := gohbase.NewClient(*host)
c.Close()
values := map[string]map[string][]byte{"cf": map[string][]byte{"a": []byte("1")}}
r, err := hrpc.NewPutStr(context.Background(), table, t.Name(), values)
if err != nil {
t.Fatal(err)
}
_, err = c.Put(r)
if err != gohbase.ErrClientClosed {
t.Fatalf("unexpected error: %v", err)
}
}
// Note: This function currently causes an infinite loop in the client throwing the error -
// 2015/06/19 14:34:11 Encountered an error while reading: Failed to read from the RS: EOF
func TestChangingRegionServers(t *testing.T) {
key := "row8"
val := []byte("1")
headers := map[string][]string{"cf": nil}
if host == nil {
t.Fatal("Host is not set!")
}
c := gohbase.NewClient(*host)
defer c.Close()
err := insertKeyValue(c, key, "cf", val)
if err != nil {
t.Errorf("Put returned an error: %v", err)
}
// RegionServer 1 hosts all the current regions.
// Now launch servers 2,3
LaunchRegionServers([]string{"2", "3"})
// Now (gracefully) stop servers 1,2.
// All regions should now be on server 3.
StopRegionServers([]string{"1", "2"})
get, err := hrpc.NewGetStr(context.Background(), table, key, hrpc.Families(headers))
rsp, err := c.Get(get)
if err != nil {
t.Errorf("Get returned an error: %v", err)
}
rsp_value := rsp.Cells[0].Value
if !bytes.Equal(rsp_value, val) {
t.Errorf("Get returned an incorrect result. Expected: %v, Received: %v",
val, rsp_value)
}
// Clean up by re-launching RS1 and closing RS3
LaunchRegionServers([]string{"1"})
StopRegionServers([]string{"3"})
}
func BenchmarkPut(b *testing.B) {
b.ReportAllocs()
keyPrefix := "row9"
err := performNPuts(keyPrefix, b.N)
if err != nil {
b.Errorf("Put returned an error: %v", err)
}
}
func BenchmarkGet(b *testing.B) {
b.ReportAllocs()
keyPrefix := "row10"
err := performNPuts(keyPrefix, b.N)
if err != nil {
b.Errorf("Put returned an error: %v", err)
}
c := gohbase.NewClient(*host)
defer c.Close()
b.ResetTimer()
headers := map[string][]string{"cf": nil}
for i := 0; i < b.N; i++ {
key := keyPrefix + fmt.Sprintf("%d", i)
get, _ := hrpc.NewGetStr(context.Background(), table, key, hrpc.Families(headers))
c.Get(get)
}
}
// Helper function. Given a key_prefix, num_ops, performs num_ops.
func performNPuts(keyPrefix string, num_ops int) error {
c := gohbase.NewClient(*host)
defer c.Close()
for i := 0; i < num_ops; i++ {
key := keyPrefix + fmt.Sprintf("%d", i)
err := insertKeyValue(c, key, "cf", []byte(fmt.Sprintf("%d", i)))
if err != nil {
return err
}
}
return nil
}
// Helper function. Given a client, key, columnFamily, value inserts into the table under column 'a'
func insertKeyValue(c gohbase.Client, key, columnFamily string, value []byte,
options ...func(hrpc.Call) error) error {
values := map[string]map[string][]byte{columnFamily: map[string][]byte{}}
values[columnFamily]["a"] = value
putRequest, err := hrpc.NewPutStr(context.Background(), table, key, values, options...)
if err != nil {
return err
}
_, err = c.Put(putRequest)
return err
}
func deleteKeyValue(c gohbase.Client, key, columnFamily string, value []byte,
options ...func(hrpc.Call) error) error {
values := map[string]map[string][]byte{columnFamily: map[string][]byte{}}
values[columnFamily]["a"] = value
d, err := hrpc.NewDel(context.Background(), []byte(table), []byte(key), values)
if err != nil {
return err
}
_, err = c.Delete(d)
return err
}
func TestMaxResultsPerColumnFamilyGet(t *testing.T) {
c := gohbase.NewClient(*host)
defer c.Close()
key := "variablecolumnrow"
baseErr := "MaxResultsPerColumnFamilyGet error "
values := make(map[string]map[string][]byte)
values["cf"] = map[string][]byte{}
// Save a row with 20 columns
for i := 0; i < 20; i++ {
colKey := fmt.Sprintf("%02d", i)
values["cf"][colKey] = []byte(fmt.Sprintf("value %d", i))
}
// First test that the function can't be used on types other than get or scan
putRequest, err := hrpc.NewPutStr(context.Background(),
table,
key,
values,
hrpc.MaxResultsPerColumnFamily(5),
)
if err == nil {
t.Errorf(baseErr+"- Option allowed to be used with incorrect type: %s", err)
}
putRequest, err = hrpc.NewPutStr(context.Background(),
table,
key,
values,
hrpc.ResultOffset(5),
)
if err == nil {
t.Errorf(baseErr+"- Option allowed to be used with incorrect type: %s", err)
}
// Now actually save the values
putRequest, err = hrpc.NewPutStr(context.Background(), table, key, values)
if err != nil {
t.Errorf(baseErr+"building put string: %s", err)
}
_, err = c.Put(putRequest)
if err != nil {
t.Errorf(baseErr+"saving row: %s", err)
}
family := hrpc.Families(map[string][]string{"cf": nil})
// Do we get the correct number of cells without qualifier
getRequest, err := hrpc.NewGetStr(context.Background(),
table,
key,
family,
hrpc.MaxVersions(1),
)
if err != nil {
t.Errorf(baseErr+"building get request: %s", err)
}
result, err := c.Get(getRequest)
if len(result.Cells) != 20 {
t.Errorf(baseErr+"- expecting %d results with parameters; received %d",
20,
len(result.Cells),
)
}
// Simple test for max columns per column family. Return the first n columns in order
for testCnt := 1; testCnt <= 20; testCnt++ {
// Get the first n columns
getRequest, err := hrpc.NewGetStr(context.Background(),
table,
key,
family,
hrpc.MaxVersions(1),
hrpc.MaxResultsPerColumnFamily(uint32(testCnt)),
)
if err != nil {
t.Errorf(baseErr+"building get request: %s", err)
}
result, err := c.Get(getRequest)
if len(result.Cells) != testCnt {
t.Errorf(baseErr+"- expecting %d results; received %d", testCnt, len(result.Cells))
}
for i, x := range result.Cells {
// Make sure the column name and value are what is expected and in correct sequence
if string(x.Qualifier) != fmt.Sprintf("%02d", i) ||
string(x.Value) != fmt.Sprintf("value %d", i) {
t.Errorf(baseErr+"- unexpected return value. Expecting %s received %s",
fmt.Sprintf("value %d", i),
string(x.Value),
)
}
}
// Get with out of range values
getRequest, err = hrpc.NewGetStr(context.Background(),
table,
key,
family,
hrpc.MaxVersions(1),
hrpc.MaxResultsPerColumnFamily(math.MaxUint32),
)
if err == nil {
t.Error(baseErr + "- out of range column result parameter accepted")
}
// Get with out of range values
getRequest, err = hrpc.NewGetStr(context.Background(),
table,
key,
family,
hrpc.MaxVersions(1),
hrpc.ResultOffset(math.MaxUint32),
)
if err == nil {
t.Error(baseErr + "- out of range column offset parameter accepted")
}
}
// Max columns per column family. Return first n cells in order with offset.
for offset := 0; offset < 20; offset++ {
for maxResults := 1; maxResults <= 20-offset; maxResults++ {
getRequest, err := hrpc.NewGetStr(context.Background(),
table,
key,
family,
hrpc.MaxVersions(1),
hrpc.MaxResultsPerColumnFamily(uint32(maxResults)),
hrpc.ResultOffset(uint32(offset)),
)
if err != nil {
t.Errorf(baseErr+"building get request testing offset: %s", err)
}
result, err := c.Get(getRequest)
// Make sure number of cells returned is still correct
if len(result.Cells) != maxResults {
t.Errorf(baseErr+"with offset - expecting %d results; received %d",
maxResults,
len(result.Cells),
)
}
// make sure the cells returned are what is expected and in correct sequence
for i, _ := range result.Cells {
if string(result.Cells[i].Value) != fmt.Sprintf("value %d", offset+i) {
t.Errorf(baseErr+"with offset - Expected value %s but received %s",
fmt.Sprintf("value %d", offset+i),
string(result.Cells[i].Value),
)
}
}
}
}
}
func TestMaxResultsPerColumnFamilyScan(t *testing.T) {
c := gohbase.NewClient(*host)
defer c.Close()
baseErr := "MaxResultsPerColumnFamilyScan error "
key := "variablecolumnrow_1"
values := make(map[string]map[string][]byte)
values["cf"] = map[string][]byte{}
// Save a row with 20 columns
for i := 0; i < 20; i++ {
colKey := fmt.Sprintf("%02d", i)
values["cf"][colKey] = []byte(fmt.Sprintf("value %d", i))
}
putRequest, err := hrpc.NewPutStr(context.Background(), table, key, values)
if err != nil {
t.Errorf(baseErr+"building put string: %s", err)
}
_, err = c.Put(putRequest)
if err != nil {
t.Errorf(baseErr+"saving row: %s", err)
}
// Save another row with 20 columns
key = "variablecolumnrow_2"
putRequest, err = hrpc.NewPutStr(context.Background(), table, key, values)
if err != nil {
t.Errorf(baseErr+"building put string: %s", err)
}
_, err = c.Put(putRequest)
if err != nil {
t.Errorf(baseErr+"saving row: %s", err)
}
family := hrpc.Families(map[string][]string{"cf": nil})
pFilter := filter.NewPrefixFilter([]byte("variablecolumnrow_"))
// Do we get the correct number of cells without qualifier
scanRequest, err := hrpc.NewScanStr(context.Background(),
table,
family,
hrpc.Filters(pFilter),
hrpc.MaxVersions(1),
)
if err != nil {
t.Errorf(baseErr+"building scan request: %s", err)
}
result := c.Scan(scanRequest)
resultCnt := 0
for {
rRow, err := result.Next()
if err == io.EOF {
break
}
if err != nil {
t.Errorf(baseErr+"scanning result: %s", err)
}
if len(rRow.Cells) != 20 {
t.Errorf(baseErr+"- expected all 20 columns but received %d", len(rRow.Cells))
}
resultCnt++
}
if resultCnt != 2 {
t.Errorf(baseErr+"- expected 2 rows; received %d", resultCnt)
}
// Do we get a limited number of columns per row
baseErr = "MaxResultsPerColumnFamilyScan with limited columns error "
scanRequest, err = hrpc.NewScanStr(context.Background(),
table,
family,
hrpc.Filters(pFilter),
hrpc.MaxVersions(1),
hrpc.MaxResultsPerColumnFamily(15),
)
if err != nil {
t.Errorf(baseErr+"building scan request: %s", err)
}
result = c.Scan(scanRequest)
resultCnt = 0
for {
rRow, err := result.Next()
if err == io.EOF {
break
}
if err != nil {
t.Errorf(baseErr+"scanning result: %s", err)
}
if len(rRow.Cells) != 15 {
t.Errorf(baseErr+"- expected 15 columns but received %d", len(rRow.Cells))
}
resultCnt++
}
if resultCnt != 2 {
t.Errorf(baseErr+"- expected 2 rows; received %d", resultCnt)
}
// Do we get a limited number of columns per row and are they correctly offset
baseErr = "MaxResultsPerColumnFamilyScan with limited columns and offset error "
scanRequest, err = hrpc.NewScanStr(context.Background(),
table,
family,
hrpc.Filters(pFilter),
hrpc.MaxVersions(1),
hrpc.MaxResultSize(1),
hrpc.MaxResultsPerColumnFamily(2),
hrpc.ResultOffset(10),
)
if err != nil {
t.Errorf(baseErr+"building scan request: %s", err)
}
result = c.Scan(scanRequest)
resultCnt = 0
for {
rRow, err := result.Next()
if err == io.EOF {
break
}
if err != nil {
t.Errorf(baseErr+"scanning result: %s", err)
}
if len(rRow.Cells) != 2 {
t.Errorf(baseErr+"- expected 2 columns but received %d", len(rRow.Cells))
}
if string(rRow.Cells[0].Value) != "value 10" || string(rRow.Cells[1].Value) != "value 11" {
t.Errorf(baseErr+"- unexpected cells values. "+
"Expected 'value 10' and 'value 11' - received %s and %s",
string(rRow.Cells[0].Value),
string(rRow.Cells[1].Value),
)
}
resultCnt++
}
if resultCnt != 1 {
t.Errorf(baseErr+"- expected 1 row; received %d", resultCnt)
}
// Test with out of range values
scanRequest, err = hrpc.NewScanStr(context.Background(),
table,
family,
hrpc.Filters(pFilter),
hrpc.MaxVersions(1),
hrpc.MaxResultsPerColumnFamily(math.MaxUint32),
)
if err == nil {
t.Error(baseErr + "- out of range column result parameter accepted")
}
scanRequest, err = hrpc.NewScanStr(context.Background(),
table,
family,
hrpc.Filters(pFilter),
hrpc.MaxVersions(1),
hrpc.ResultOffset(math.MaxUint32),
)
if err == nil {
t.Error(baseErr + "- out of range column result parameter accepted")
}
}
func TestMultiRequest(t *testing.T) {
// pre-populate the table
var (
getKey = t.Name() + "_Get"
putKey = t.Name() + "_Put"
deleteKey = t.Name() + "_Delete"
appendKey = t.Name() + "_Append"
incrementKey = t.Name() + "_Increment"
)
c := gohbase.NewClient(*host, gohbase.RpcQueueSize(1))
if err := insertKeyValue(c, getKey, "cf", []byte{1}); err != nil {
t.Fatal(err)
}
if err := insertKeyValue(c, deleteKey, "cf", []byte{3}); err != nil {
t.Fatal(err)
}
if err := insertKeyValue(c, appendKey, "cf", []byte{4}); err != nil {
t.Fatal(err)
}
i, err := hrpc.NewIncStrSingle(context.Background(), table, incrementKey, "cf", "a", 5)
if err != nil {
t.Fatal(err)
}
_, err = c.Increment(i)
if err != nil {
t.Fatal(err)
}
c.Close()
c = gohbase.NewClient(*host, gohbase.FlushInterval(1000*time.Hour), gohbase.RpcQueueSize(5))
defer c.Close()
var wg sync.WaitGroup
wg.Add(5)
go func() {
g, err := hrpc.NewGetStr(context.Background(), table, getKey)
if err != nil {
t.Error(err)
}
r, err := c.Get(g)
if err != nil {
t.Error(err)
}
expV := []byte{1}
if !bytes.Equal(r.Cells[0].Value, expV) {
t.Errorf("expected %v, got %v:", expV, r.Cells[0].Value)
}
wg.Done()
}()
go func() {
v := map[string]map[string][]byte{"cf": map[string][]byte{"a": []byte{2}}}
p, err := hrpc.NewPutStr(context.Background(), table, putKey, v)
if err != nil {
t.Error(err)
}
r, err := c.Put(p)
if err != nil {
t.Error(err)
}
if len(r.Cells) != 0 {
t.Errorf("expected no cells, got %d", len(r.Cells))
}
wg.Done()
}()
go func() {
d, err := hrpc.NewDelStr(context.Background(), table, deleteKey, nil)
if err != nil {
t.Error(err)
}
r, err := c.Delete(d)
if err != nil {
t.Error(err)
}
if len(r.Cells) != 0 {
t.Errorf("expected no cells, got %d", len(r.Cells))
}
wg.Done()
}()
go func() {
v := map[string]map[string][]byte{"cf": map[string][]byte{"a": []byte{4}}}
a, err := hrpc.NewAppStr(context.Background(), table, appendKey, v)
if err != nil {
t.Error(err)
}
r, err := c.Append(a)
if err != nil {
t.Error(err)
}
expV := []byte{4, 4}
if !bytes.Equal(r.Cells[0].Value, expV) {
t.Errorf("expected %v, got %v:", expV, r.Cells[0].Value)
}
wg.Done()
}()
go func() {
i, err := hrpc.NewIncStrSingle(context.Background(), table, incrementKey, "cf", "a", 1)
if err != nil {
t.Error(err)
}
r, err := c.Increment(i)
if err != nil {
t.Error(err)
}
if r != 6 {
t.Errorf("expected %d, got %d:", 6, r)
}
wg.Done()
}()
wg.Wait()
}
func TestReverseScan(t *testing.T) {
c := gohbase.NewClient(*host)
defer c.Close()
baseErr := "Reverse Scan error "
values := make(map[string]map[string][]byte)
values["cf"] = map[string][]byte{}
// Save 500 rows
for i := 0; i < 500; i++ {
key := fmt.Sprintf("REVTEST-%03d", i)
values["cf"]["reversetest"] = []byte(fmt.Sprintf("%d", i))
putRequest, err := hrpc.NewPutStr(context.Background(), table, key, values)
if err != nil {
t.Errorf(baseErr+"building put string: %s", err)
}
_, err = c.Put(putRequest)
if err != nil {
t.Errorf(baseErr+"saving row: %s", err)
}
}
// Read them back in reverse order
scanRequest, err := hrpc.NewScanRangeStr(context.Background(),
table,
"REVTEST-999",
"REVTEST-",
hrpc.Families(map[string][]string{"cf": []string{"reversetest"}}),
hrpc.Reversed(),
)
if err != nil {
t.Errorf(baseErr+"setting up reverse scan: %s", err)
}
i := 0
results := c.Scan(scanRequest)
for {
r, err := results.Next()
if err != nil {
if err == io.EOF {
break
}
t.Errorf(baseErr+"scanning results: %s", err)
}
i++
expected := fmt.Sprintf("%d", 500-i)
if string(r.Cells[0].Value) != expected {
t.Errorf(baseErr + "- unexpected rowkey returned")
}
}
if i != 500 {
t.Errorf(baseErr+" expected 500 rows returned; found %d", i)
}
results.Close()
// Read part of them back in reverse order. Stoprow should be exclusive just like forward scan
scanRequest, err = hrpc.NewScanRangeStr(context.Background(),
table,
"REVTEST-250",
"REVTEST-150",
hrpc.Families(map[string][]string{"cf": []string{"reversetest"}}),
hrpc.Reversed(),
)
if err != nil {
t.Errorf(baseErr+"setting up reverse scan: %s", err)
}
i = 0
results = c.Scan(scanRequest)
for {
r, err := results.Next()
if err != nil {
if err == io.EOF {
break
}
t.Errorf(baseErr+"scanning results: %s", err)
}
i++
expected := fmt.Sprintf("%d", 251-i)
if string(r.Cells[0].Value) != expected {
t.Errorf(baseErr + "- unexpected rowkey returned when doing partial reverse scan")
}
}
if i != 100 {
t.Errorf(baseErr+" expected 100 rows returned; found %d", i)
}
results.Close()
}
func TestListTableNames(t *testing.T) {
// Initialize our tables
ac := gohbase.NewAdminClient(*host)
tables := []string{
table + "_MATCH1",
table + "_MATCH2",
table + "nomatch",
}
for _, tn := range tables {
// Since this test is called by TestMain which waits for hbase init
// there is no need to wait here.
err := CreateTable(ac, tn, []string{"cf"})
if err != nil {
panic(err)
}
}
defer func() {
for _, tn := range tables {
err := DeleteTable(ac, tn)
if err != nil {
panic(err)
}
}
}()
m1 := []byte(table + "_MATCH1")
m2 := []byte(table + "_MATCH2")
tcases := []struct {
desc string
regex string
namespace string
sys bool
match []*pb.TableName
}{
{
desc: "match all",
regex: ".*",
match: []*pb.TableName{
&pb.TableName{Qualifier: []byte(table)},
&pb.TableName{Qualifier: m1},
&pb.TableName{Qualifier: m2},
&pb.TableName{Qualifier: []byte(table + "nomatch")},
},
},
{
desc: "match_some",
regex: ".*_MATCH.*",
match: []*pb.TableName{
&pb.TableName{Qualifier: m1},
&pb.TableName{Qualifier: m2},
},
},
{
desc: "match_none",
},
{
desc: "match meta",
regex: ".*meta.*",
namespace: "hbase",
sys: true,
match: []*pb.TableName{
&pb.TableName{Qualifier: []byte("meta")},
},
},
}
for _, tcase := range tcases {
t.Run(tcase.desc, func(t *testing.T) {
tn, err := hrpc.NewListTableNames(
context.Background(),
hrpc.ListRegex(tcase.regex),
hrpc.ListSysTables(tcase.sys),
hrpc.ListNamespace(tcase.namespace),
)
if err != nil {
t.Fatal(err)
}
names, err := ac.ListTableNames(tn)
if err != nil {
t.Error(err)
}
// filter to have only tables that we've created
var got []*pb.TableName
for _, m := range names {
if strings.HasPrefix(string(m.Qualifier), table) ||
string(m.Namespace) == "hbase" {
got = append(got, m)
}
}
if len(got) != len(tcase.match) {
t.Errorf("expected %v, got %v", tcase.match, got)
}
for i, m := range tcase.match {
want := string(m.Qualifier)
got := string(tcase.match[i].Qualifier)
if want != got {
t.Errorf("index %d: expected: %v, got %v", i, want, got)
}
}
})
}
}
// Test snapshot creation
func TestSnapshot(t *testing.T) {
ac := gohbase.NewAdminClient(*host)
name := "snapshot-" + table
sn, err := hrpc.NewSnapshot(context.Background(), name, table)
if err != nil {
t.Fatal(err)
}
if err = ac.CreateSnapshot(sn); err != nil {
t.Error(err)
}
defer func() {
if err = ac.DeleteSnapshot(sn); err != nil {
t.Error(err)
}
}()
ls := hrpc.NewListSnapshots(context.Background())
snaps, err := ac.ListSnapshots(ls)
if err != nil {
t.Error(err)
}
if len(snaps) != 1 {
t.Errorf("expection 1 snapshot, got %v", len(snaps))
}
gotName := snaps[0].GetName()
if gotName != name {
t.Errorf("expection snapshot name to be %v got %v", name, gotName)
}
}
// Test snapshot restoration
func TestRestoreSnapshot(t *testing.T) {
// Prochedure for this test is roughly:
// - Create some data in a table.
// - Create a snapshot.
// - Remove all data.
// - Restore snapshot.
// - Ensure data is there.
var (
key = t.Name() + "_Get"
name = "snapshot-" + table
)
c := gohbase.NewClient(*host, gohbase.RpcQueueSize(1))
if err := insertKeyValue(c, key, "cf", []byte{1}); err != nil {
t.Fatal(err)
}
ac := gohbase.NewAdminClient(*host)
sn, err := hrpc.NewSnapshot(context.Background(), name, table)
if err != nil {
t.Fatal(err)
}
if err := ac.CreateSnapshot(sn); err != nil {
t.Error(err)
}
defer func() {
err = ac.DeleteSnapshot(sn)
if err != nil {
t.Error(err)
}
}()
if err := deleteKeyValue(c, key, "cf", []byte{1}); err != nil {
t.Error(err)
}
g, err := hrpc.NewGetStr(context.Background(), table, key)
if err != nil {
t.Error(err)
}
r, err := c.Get(g)
if err != nil {
t.Error(err)
}
if len(r.Cells) != 0 {
t.Fatalf("expected no cells in table %s key %s", table, key)
}
c.Close()
td := hrpc.NewDisableTable(context.Background(), []byte(table))
if err := ac.DisableTable(td); err != nil {
t.Error(err)
}
if err = ac.RestoreSnapshot(sn); err != nil {
t.Error(err)
}
te := hrpc.NewEnableTable(context.Background(), []byte(table))
if err := ac.EnableTable(te); err != nil {
t.Error(err)
}
c = gohbase.NewClient(*host, gohbase.RpcQueueSize(1))
r, err = c.Get(g)
if err != nil {
t.Error(err)
}
expV := []byte{1}
if !bytes.Equal(r.Cells[0].Value, expV) {
t.Errorf("expected %v, got %v:", expV, r.Cells[0].Value)
}
}
func TestSetBalancer(t *testing.T) {
ac := gohbase.NewAdminClient(*host)
sb, err := hrpc.NewSetBalancer(context.Background(), false)
if err != nil {
t.Fatal(err)
}
prevState, err := ac.SetBalancer(sb)
if err != nil {
t.Fatal(err)
}
if !prevState {
t.Fatal("expected balancer to be previously enabled")
}
sb, err = hrpc.NewSetBalancer(context.Background(), true)
if err != nil {
t.Fatal(err)
}
prevState, err = ac.SetBalancer(sb)
if err != nil {
t.Fatal(err)
}
if prevState {
t.Fatal("expected balancer to be previously disabled")
}
}
func TestMoveRegion(t *testing.T) {
c := gohbase.NewClient(*host)
ac := gohbase.NewAdminClient(*host)
// scan meta to get a region to move
scan, err := hrpc.NewScan(context.Background(),
[]byte("hbase:meta"),
hrpc.Families(map[string][]string{"info": []string{"regioninfo"}}))
if err != nil {
t.Fatal(err)
}
var rsp []*hrpc.Result
scanner := c.Scan(scan)
for {
res, err := scanner.Next()
if err == io.EOF {
break
}
if err != nil {
t.Fatal(err)
}
rsp = append(rsp, res)
}
// use the first region
if len(rsp) == 0 {
t.Fatal("got 0 results")
}
if len(rsp[0].Cells) == 0 {
t.Fatal("got 0 cells")
}
regionName := rsp[0].Cells[0].Row
regionName = regionName[len(regionName)-33 : len(regionName)-1]
mr, err := hrpc.NewMoveRegion(context.Background(), regionName)
if err != nil {
t.Fatal(err)
}
if err := ac.MoveRegion(mr); err != nil {
t.Fatal(err)
}
}
| [
"\"HBASE_HOME\"",
"\"HBASE_HOME\""
]
| []
| [
"HBASE_HOME"
]
| [] | ["HBASE_HOME"] | go | 1 | 0 | |
url_condenser/url_condenser/wsgi.py | """
WSGI config for url_condenser project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "url_condenser.settings")
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
ediaristas/ediaristas/asgi.py | """
ASGI config for ediaristas project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ediaristas.settings')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
swat/clib.py | #!/usr/bin/env python
# encoding: utf-8
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''
SWAT C library functions
'''
from __future__ import print_function, division, absolute_import, unicode_literals
import glob
import os
import itertools
import platform
import struct
import sys
from .utils.compat import PY3, WIDE_CHARS, a2u
from .exceptions import SWATError
# pylint: disable=E1101
_pyswat = None
def _import_pyswat():
''' Import version-specific _pyswat package '''
global _pyswat
import importlib
plat = 'linux'
if sys.platform.lower().startswith('win'):
plat = 'win'
elif sys.platform.lower().startswith('darwin'):
plat = 'mac'
if PY3:
libname = '_py%s%sswat' % (sys.version_info[0], sys.version_info[1])
elif WIDE_CHARS:
libname = '_pyswatw'
else:
libname = '_pyswat'
# Bail out if we aren't on Linux
# if plat != 'linux':
# raise ValueError('Currently, Linux is the only platform with support '
# 'for the binary protocol. You must connect to CAS '
# 'using the REST interface on this platform.')
# Bail out if the C extension doesn't exist
if not glob.glob(os.path.join(os.path.dirname(__file__), 'lib',
plat, libname + '.*')):
raise ValueError('The extensions for the binary protocol have not been '
'installed. You can either install them using the full '
'platform-dependent install file, or use the REST interface '
'as an alternative.')
libssl_locs = [
'/usr/lib64/libssl.so.10',
'/usr/lib64/libssl.so.1.0*',
'/usr/lib/x86_64-linux-gnu/libssl.so.1.0*',
os.path.join(sys.prefix, 'lib', 'libssl.so.10'),
os.path.join(sys.prefix, 'lib', 'libssl.so.1.0*'),
'/usr/lib64/libssl.so.1.1*',
'/usr/lib/x86_64-linux-gnu/libssl.so.1.1*',
os.path.join(sys.prefix, 'lib', 'libssl.so.1.1*'),
]
libcrypto_locs = [
'/usr/lib64/libcrypto.so*',
'/usr/lib/x86_64-linux-gnu/libcrypto.so*',
os.path.join(sys.prefix, 'lib', 'libcrypto.so*'),
]
if not os.environ.get('TKESSL_OPENSSL_LIB', '').strip():
# Make sure the correct libssl.so is used
libssl = list(itertools.chain(*[list(sorted(glob.glob(x)))
for x in libssl_locs]))
if libssl:
os.environ['TKESSL_OPENSSL_LIB'] = libssl[-1]
if not os.environ.get('TKERSA2_OPENSSL_LIB', '').strip():
# Make sure the correct libssl.so is used
libssl = list(itertools.chain(*[list(sorted(glob.glob(x)))
for x in libssl_locs]))
if libssl:
os.environ['TKERSA2_OPENSSL_LIB'] = libssl[-1]
if not os.environ.get('TKECERT_CRYPTO_LIB', '').strip():
# Make sure the correct libcrypto.so is used
libcrypto = list(itertools.chain(*[list(sorted(glob.glob(x)))
for x in libcrypto_locs]))
if libcrypto:
os.environ['TKECERT_CRYPTO_LIB'] = libcrypto[-1]
if struct.calcsize('P') < 8:
raise RuntimeError('A 64-bit build of Python is required for the '
'binary protocol. You can either install a 64-bit '
'version of Python, or use the REST interface as '
'an alternative.')
# Try to import the C extension
try:
_pyswat = importlib.import_module('.lib.%s.%s' % (plat, libname),
package='swat')
except ImportError:
raise ValueError(('Could not import import %s. This is likely due to an '
'incorrect SAS TK path or an error while loading the SAS TK '
'subsystem. You can try using the REST interface '
'as an alternative.') % libname)
def SW_CASConnection(*args, **kwargs):
''' Return a CASConnection (importing _pyswat as needed) '''
if _pyswat is None:
_import_pyswat()
return _pyswat.SW_CASConnection(*args, **kwargs)
def SW_CASValueList(*args, **kwargs):
''' Return a CASValueList (importing _pyswat as needed) '''
if _pyswat is None:
_import_pyswat()
return _pyswat.SW_CASValueList(*args, **kwargs)
def SW_CASFormatter(*args, **kwargs):
''' Return a CASFormatter (importing _pyswat as needed) '''
if _pyswat is None:
_import_pyswat()
return _pyswat.SW_CASFormatter(*args, **kwargs)
def SW_CASConnectionEventWatcher(*args, **kwargs):
''' Return a CASConnectionEventWatcher (importing _pyswat as needed) '''
if _pyswat is None:
_import_pyswat()
return _pyswat.SW_CASConnectionEventWatcher(*args, **kwargs)
def SW_CASDataBuffer(*args, **kwargs):
''' Return a CASDataBuffer (importing _pyswat as needed) '''
if _pyswat is None:
_import_pyswat()
return _pyswat.SW_CASDataBuffer(*args, **kwargs)
def SW_CASError(*args, **kwargs):
''' Return a CASError (importing _pyswat as needed) '''
if _pyswat is None:
_import_pyswat()
return _pyswat.SW_CASError(*args, **kwargs)
def InitializeTK(*args, **kwargs):
''' Initialize the TK subsystem (importing _pyswat as needed) '''
if _pyswat is None:
_import_pyswat()
# Patch ppc linux path
set_tkpath_env = 'ppc' in platform.machine() and 'TKPATH' not in os.environ
if set_tkpath_env and args:
os.environ['TKPATH'] = args[0]
try:
out = _pyswat.InitializeTK(*args, **kwargs)
finally:
if set_tkpath_env:
del os.environ['TKPATH']
# Override TKPATH after initialization so that other TK applications
# won't be affected (Windows only).
if sys.platform.lower().startswith('win') and 'TKPATH' not in os.environ:
os.environ['TKPATH'] = os.pathsep
return out
def TKVersion():
''' Return the TK subsystem version '''
try:
if _pyswat is None:
_import_pyswat()
except (ValueError, RuntimeError):
return 'none'
try:
return _pyswat.TKVersion()
except AttributeError:
return 'vb025'
def errorcheck(expr, obj):
'''
Check for generated error message
Parameters
----------
expr : any
Result to return if no error happens
obj : SWIG-based class
Object to check for messages
Raises
------
SWATError
If error message exists
Returns
-------
`expr` argument
The result of `expr`
'''
if obj is not None:
msg = obj.getLastErrorMessage()
if msg:
raise SWATError(a2u(msg, 'utf-8'))
return expr
| []
| []
| [
"TKPATH",
"TKESSL_OPENSSL_LIB",
"TKERSA2_OPENSSL_LIB",
"TKECERT_CRYPTO_LIB"
]
| [] | ["TKPATH", "TKESSL_OPENSSL_LIB", "TKERSA2_OPENSSL_LIB", "TKECERT_CRYPTO_LIB"] | python | 4 | 0 | |
GetAuth/__init__.py | import logging
import os
import json
import azure.functions as func
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Received auth request.')
validLogin = False
try:
req_body = req.get_json()
except ValueError:
logging.info("Could not get json body")
pass
else:
user = req_body.get('user')
password = req_body.get('password')
# simulate logging into an auth provider
if (user == "user1" and password == "pass123"):
validLogin = True
else:
logging.info("Invalid username or password")
if not validLogin:
return func.HttpResponse(
"Unauthorized",
status_code=403
)
# Valid - return auth code to use for subsequent requests
authCode = os.environ["AuthCode"]
return func.HttpResponse(json.dumps({'AuthCode': authCode}))
# To test: curl --header "Content-Type: application/json" --request POST --data '{"user":"user1","password":"pass123"}' http://localhost:7071/api/GetAuth | []
| []
| [
"AuthCode"
]
| [] | ["AuthCode"] | python | 1 | 0 | |
deepimpute/multinet.py | import os
import warnings
import tempfile
import pandas as pd
import numpy as np
from scipy.stats import pearsonr
import tensorflow.keras as keras
from keras import backend as K
from keras.models import Model,model_from_json
from keras.layers import Dense,Dropout,Input
from keras.callbacks import EarlyStopping
import keras.losses
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def get_distance_matrix(raw, n_pred=None):
VMR = raw.std() / raw.mean()
VMR[np.isinf(VMR)] = 0
if n_pred is None:
potential_pred = raw.columns[VMR > 0]
else:
print("Using {} predictors".format(n_pred))
potential_pred = VMR.sort_values(ascending=False).index[:n_pred]
covariance_matrix = pd.DataFrame(np.abs(np.corrcoef(raw.T.loc[potential_pred])),
index=potential_pred,
columns=potential_pred).fillna(0)
return covariance_matrix
def wMSE(y_true, y_pred, binary=False):
if binary:
weights = tf.cast(y_true>0, tf.float32)
else:
weights = y_true
return tf.reduce_mean(weights*tf.square(y_true-y_pred))
def inspect_data(data):
# Check if there area any duplicated cell/gene labels
if sum(data.index.duplicated()):
print("ERROR: duplicated cell labels. Please provide unique cell labels.")
exit(1)
if sum(data.columns.duplicated()):
print("ERROR: duplicated gene labels. Please provide unique gene labels.")
exit(1)
max_value = np.max(data.values)
if max_value < 10:
print("ERROR: max value = {}. Is your data log-transformed? Please provide raw counts"
.format(max_value))
exit(1)
print("Input dataset is {} cells (rows) and {} genes (columns)"
.format(*data.shape))
print("First 3 rows and columns:")
print(data.iloc[:3,:3])
class MultiNet:
def __init__(self,
learning_rate=1e-4,
batch_size=64,
max_epochs=500,
patience=5,
ncores=-1,
loss="wMSE",
output_prefix=tempfile.mkdtemp(),
sub_outputdim=512,
verbose=1,
seed=1234,
architecture=None
):
self.NN_parameters = {"learning_rate": learning_rate,
"batch_size": batch_size,
"loss": loss,
"architecture": architecture,
"max_epochs": max_epochs,
"patience": patience}
self.sub_outputdim = sub_outputdim
self.outputdir = output_prefix
self.verbose = verbose
self.seed = seed
self.setCores(ncores)
def setCores(self, ncores):
if ncores > 0:
self.ncores = ncores
else:
self.ncores = os.cpu_count()
print("Using all the cores ({})".format(self.ncores))
def loadDefaultArchitecture(self):
self.NN_parameters['architecture'] = [
{"type": "dense", "neurons": self.sub_outputdim//2, "activation": "relu"},
{"type": "dropout", "rate": 0.2},
]
def save(self, model):
os.system("mkdir -p {}".format(self.outputdir))
model_json = model.to_json()
with open("{}/model.json".format(self.outputdir), "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("{}/model.h5".format(self.outputdir))
print("Saved model to disk in {}".format(self.outputdir))
def load(self):
json_file = open('{}/model.json'.format(self.outputdir), 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
model.load_weights('{}/model.h5'.format(self.outputdir))
return model
def build(self, inputdims):
if self.NN_parameters['architecture'] is None:
self.loadDefaultArchitecture()
print(self.NN_parameters['architecture'])
inputs = [ Input(shape=(inputdim,)) for inputdim in inputdims ]
outputs = inputs
for layer in self.NN_parameters['architecture']:
if layer['type'].lower() == 'dense':
outputs = [ Dense(layer['neurons'], activation=layer['activation'])(output)
for output in outputs ]
elif layer['type'].lower() == 'dropout':
outputs = [ Dropout(layer['rate'], seed=self.seed)(output)
for output in outputs]
else:
print("Unknown layer type.")
outputs = [Dense(self.sub_outputdim, activation="softplus")(output)
for output in outputs]
model = Model(inputs=inputs, outputs=outputs)
loss = self.NN_parameters['loss']
if loss in [k for k, v in globals().items() if callable(v)]:
# if loss is a defined function
loss = eval(self.NN_parameters['loss'])
if not callable(loss):
# it is defined in Keras
if hasattr(keras.losses, loss):
loss = getattr(keras.losses, loss)
else:
print('Unknown loss: {}. Aborting.'.format(loss))
exit(1)
model.compile(optimizer=keras.optimizer_v2.adam.Adam(lr=self.NN_parameters['learning_rate']),
loss=loss)
return model
def fit(self,
raw,
cell_subset=1,
NN_lim=None,
genes_to_impute=None,
n_pred=None,
ntop=5,
minVMR=0.5,
mode='random',
):
inspect_data(raw)
if self.seed is not None:
np.random.seed(self.seed)
if cell_subset != 1:
if cell_subset < 1:
raw = raw.sample(frac=cell_subset)
else:
raw = raw.sample(cell_subset)
gene_metric = (raw.var()/(1+raw.mean())).sort_values(ascending=False)
gene_metric = gene_metric[gene_metric > 0]
if genes_to_impute is None:
genes_to_impute = self.filter_genes(gene_metric, minVMR, NN_lim=NN_lim)
else:
# Make the number of genes to impute a multiple of the network output dim
n_genes = len(genes_to_impute)
if n_genes % self.sub_outputdim != 0:
print("The number of input genes is not a multiple of {}. Filling with other genes.".format(n_genes))
fill_genes = gene_metric.index[:self.sub_outputdim-n_genes]
if len(fill_genes) < self.sub_outputdim-n_genes:
# Not enough genes in gene_metric. Sample with replacement
rest = self.sub_outputdim - n_genes - len(fill_genes)
fill_genes = np.concatenate([fill_genes,
np.random.choice(gene_metric.index, rest, replace=True)])
genes_to_impute = np.concatenate([genes_to_impute, fill_genes])
covariance_matrix = get_distance_matrix(raw, n_pred=n_pred)
self.setTargets(raw.reindex(columns=genes_to_impute), mode=mode)
self.setPredictors(covariance_matrix, ntop=ntop)
print("Normalization")
norm_data = np.log1p(raw).astype(np.float32) # normalizer.transform(raw)
np.random.seed(self.seed)
tf.random.set_seed(self.seed)
tf.config.threading.set_inter_op_parallelism_threads(self.ncores)
tf.config.threading.set_intra_op_parallelism_threads(self.ncores)
print("Building network")
model = self.build([len(genes) for genes in self.predictors])
test_cells = np.random.choice(norm_data.index, int(0.05 * norm_data.shape[0]), replace=False)
train_cells = np.setdiff1d(norm_data.index, test_cells)
X_train = [norm_data.loc[train_cells, inputgenes].values for inputgenes in self.predictors]
Y_train = [norm_data.loc[train_cells, targetgenes].values for targetgenes in self.targets]
X_test = [norm_data.loc[test_cells, inputgenes].values for inputgenes in self.predictors]
Y_test = [norm_data.loc[test_cells, targetgenes].values for targetgenes in self.targets]
print("Fitting with {} cells".format(norm_data.shape[0]))
result = model.fit(X_train, Y_train,
validation_data=(X_test,Y_test),
epochs=self.NN_parameters["max_epochs"],
batch_size=self.NN_parameters["batch_size"],
callbacks=[EarlyStopping(monitor='val_loss',
patience=self.NN_parameters["patience"])],
verbose=self.verbose)
self.trained_epochs = len(result.history['loss'])
print("Stopped fitting after {} epochs".format(self.trained_epochs))
self.save(model)
# Save some metrics on test data
Y_test_raw = np.hstack(Y_test).flatten()
Y_test_imputed = np.hstack(model.predict(X_test)).flatten()
# Keep only positive values (since negative values could be dropouts)
Y_test_imputed = Y_test_imputed[Y_test_raw>0]
Y_test_raw = Y_test_raw[Y_test_raw>0]
self.test_metrics = {
'correlation': pearsonr(Y_test_raw,Y_test_imputed)[0],
'MSE': np.sum((Y_test_raw-Y_test_imputed)**2)/len(Y_test_raw)
}
return self
def predict(self,
raw,
imputed_only=False,
policy="restore"):
norm_raw = np.log1p(raw)
inputs = [ norm_raw.loc[:,predictors].values.astype(np.float32)
for predictors in self.predictors ]
model = self.load()
predicted = model.predict(inputs)
if len(inputs)>1:
predicted = np.hstack(predicted)
predicted = pd.DataFrame(predicted, index=raw.index, columns=self.targets.flatten())
predicted = predicted.groupby(by=predicted.columns, axis=1).mean()
not_predicted = norm_raw.drop(self.targets.flatten(), axis=1)
imputed = (pd.concat([predicted,not_predicted],axis=1)
.loc[raw.index, raw.columns]
.values)
# To prevent overflow
imputed[ (imputed > 2*norm_raw.values.max()) | (np.isnan(imputed)) ] = 0
# Convert back to counts
imputed = np.expm1(imputed)
if policy == "restore":
print("Filling zeros")
mask = (raw.values > 0)
imputed[mask] = raw.values[mask]
elif policy == "max":
print("Imputing data with 'max' policy")
mask = (raw.values > imputed)
imputed[mask] = raw.values[mask]
imputed = pd.DataFrame(imputed, index=raw.index, columns=raw.columns)
if imputed_only:
return imputed.loc[:, predicted.columns]
else:
return imputed
def filter_genes(self,
gene_metric, # assumes gene_metric is sorted
threshold,
NN_lim=None
):
if not str(NN_lim).isdigit():
NN_lim = (gene_metric > threshold).sum()
n_subsets = int(np.ceil(NN_lim / self.sub_outputdim))
genes_to_impute = gene_metric.index[:n_subsets*self.sub_outputdim]
rest = self.sub_outputdim - (len(genes_to_impute) % self.sub_outputdim)
if rest > 0:
fill_genes = np.random.choice(gene_metric.index, rest)
genes_to_impute = np.concatenate([genes_to_impute, fill_genes])
print("{} genes selected for imputation".format(len(genes_to_impute)))
return genes_to_impute
def setTargets(self,data, mode='random'):
n_subsets = int(data.shape[1]/self.sub_outputdim)
if mode == 'progressive':
self.targets = data.columns.values.reshape([n_subsets, self.sub_outputdim])
else:
self.targets = np.random.choice(data.columns,
[n_subsets, self.sub_outputdim],
replace=False)
def setPredictors(self, covariance_matrix, ntop=5):
self.predictors = []
for i,targets in enumerate(self.targets):
genes_not_in_target = np.setdiff1d(covariance_matrix.columns, targets)
if genes_not_in_target.size == 0:
warnings.warn('Warning: number of target genes lower than output dim. Consider lowering down the sub_outputdim parameter',
UserWarning)
genes_not_in_target = covariance_matrix.columns
subMatrix = ( covariance_matrix
.loc[targets, genes_not_in_target]
)
sorted_idx = np.argsort(-subMatrix.values, axis=1)
predictors = subMatrix.columns[sorted_idx[:,:ntop].flatten()]
self.predictors.append(predictors.unique())
print("Net {}: {} predictors, {} targets"
.format(i,len(np.unique(predictors)),len(targets)))
def score(self, data, policy=None):
warnings.warn(
"This method is deprecated. Please use model.test_metrics to measure model accuracy instead",
DeprecationWarning)
Y_hat = self.predict(data, policy=policy)
Y = data.loc[Y_hat.index, Y_hat.columns]
return pearsonr(Y_hat.values.reshape(-1), Y.values.reshape(-1))
| []
| []
| [
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
cmd/githubCreatePullRequest_generated.go | // Code generated by piper's step-generator. DO NOT EDIT.
package cmd
import (
"fmt"
"os"
"time"
"github.com/SAP/jenkins-library/pkg/config"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/splunk"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/SAP/jenkins-library/pkg/validation"
"github.com/spf13/cobra"
)
type githubCreatePullRequestOptions struct {
Assignees []string `json:"assignees,omitempty"`
Base string `json:"base,omitempty"`
Body string `json:"body,omitempty"`
APIURL string `json:"apiUrl,omitempty"`
Head string `json:"head,omitempty"`
Owner string `json:"owner,omitempty"`
Repository string `json:"repository,omitempty"`
ServerURL string `json:"serverUrl,omitempty"`
Title string `json:"title,omitempty"`
Token string `json:"token,omitempty"`
Labels []string `json:"labels,omitempty"`
}
// GithubCreatePullRequestCommand Create a pull request on GitHub
func GithubCreatePullRequestCommand() *cobra.Command {
const STEP_NAME = "githubCreatePullRequest"
metadata := githubCreatePullRequestMetadata()
var stepConfig githubCreatePullRequestOptions
var startTime time.Time
var logCollector *log.CollectorHook
var createGithubCreatePullRequestCmd = &cobra.Command{
Use: STEP_NAME,
Short: "Create a pull request on GitHub",
Long: `This step allows you to create a pull request on Github.
It can for example be used for GitOps scenarios or for scenarios where you want to have a manual confirmation step which is delegated to a GitHub pull request.`,
PreRunE: func(cmd *cobra.Command, _ []string) error {
startTime = time.Now()
log.SetStepName(STEP_NAME)
log.SetVerbose(GeneralConfig.Verbose)
GeneralConfig.GitHubAccessTokens = ResolveAccessTokens(GeneralConfig.GitHubTokens)
path, _ := os.Getwd()
fatalHook := &log.FatalHook{CorrelationID: GeneralConfig.CorrelationID, Path: path}
log.RegisterHook(fatalHook)
err := PrepareConfig(cmd, &metadata, STEP_NAME, &stepConfig, config.OpenPiperFile)
if err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
log.RegisterSecret(stepConfig.Token)
if len(GeneralConfig.HookConfig.SentryConfig.Dsn) > 0 {
sentryHook := log.NewSentryHook(GeneralConfig.HookConfig.SentryConfig.Dsn, GeneralConfig.CorrelationID)
log.RegisterHook(&sentryHook)
}
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
logCollector = &log.CollectorHook{CorrelationID: GeneralConfig.CorrelationID}
log.RegisterHook(logCollector)
}
validation, err := validation.New(validation.WithJSONNamesForStructFields(), validation.WithPredefinedErrorMessages())
if err != nil {
return err
}
if err = validation.ValidateStruct(stepConfig); err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
return nil
},
Run: func(_ *cobra.Command, _ []string) {
telemetryData := telemetry.CustomData{}
telemetryData.ErrorCode = "1"
handler := func() {
config.RemoveVaultSecretFiles()
telemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds())
telemetryData.ErrorCategory = log.GetErrorCategory().String()
telemetry.Send(&telemetryData)
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunk.Send(&telemetryData, logCollector)
}
}
log.DeferExitHandler(handler)
defer handler()
telemetry.Initialize(GeneralConfig.NoTelemetry, STEP_NAME)
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunk.Initialize(GeneralConfig.CorrelationID,
GeneralConfig.HookConfig.SplunkConfig.Dsn,
GeneralConfig.HookConfig.SplunkConfig.Token,
GeneralConfig.HookConfig.SplunkConfig.Index,
GeneralConfig.HookConfig.SplunkConfig.SendLogs)
}
githubCreatePullRequest(stepConfig, &telemetryData)
telemetryData.ErrorCode = "0"
log.Entry().Info("SUCCESS")
},
}
addGithubCreatePullRequestFlags(createGithubCreatePullRequestCmd, &stepConfig)
return createGithubCreatePullRequestCmd
}
func addGithubCreatePullRequestFlags(cmd *cobra.Command, stepConfig *githubCreatePullRequestOptions) {
cmd.Flags().StringSliceVar(&stepConfig.Assignees, "assignees", []string{}, "Login names of users to which the PR should be assigned to.")
cmd.Flags().StringVar(&stepConfig.Base, "base", os.Getenv("PIPER_base"), "The name of the branch you want the changes pulled into.")
cmd.Flags().StringVar(&stepConfig.Body, "body", os.Getenv("PIPER_body"), "The description text of the pull request in markdown format.")
cmd.Flags().StringVar(&stepConfig.APIURL, "apiUrl", `https://api.github.com`, "Set the GitHub API url.")
cmd.Flags().StringVar(&stepConfig.Head, "head", os.Getenv("PIPER_head"), "The name of the branch where your changes are implemented.")
cmd.Flags().StringVar(&stepConfig.Owner, "owner", os.Getenv("PIPER_owner"), "Name of the GitHub organization.")
cmd.Flags().StringVar(&stepConfig.Repository, "repository", os.Getenv("PIPER_repository"), "Name of the GitHub repository.")
cmd.Flags().StringVar(&stepConfig.ServerURL, "serverUrl", `https://github.com`, "GitHub server url for end-user access.")
cmd.Flags().StringVar(&stepConfig.Title, "title", os.Getenv("PIPER_title"), "Title of the pull request.")
cmd.Flags().StringVar(&stepConfig.Token, "token", os.Getenv("PIPER_token"), "GitHub personal access token as per https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line")
cmd.Flags().StringSliceVar(&stepConfig.Labels, "labels", []string{}, "Labels to be added to the pull request.")
cmd.MarkFlagRequired("base")
cmd.MarkFlagRequired("body")
cmd.MarkFlagRequired("apiUrl")
cmd.MarkFlagRequired("head")
cmd.MarkFlagRequired("owner")
cmd.MarkFlagRequired("repository")
cmd.MarkFlagRequired("serverUrl")
cmd.MarkFlagRequired("title")
cmd.MarkFlagRequired("token")
}
// retrieve step metadata
func githubCreatePullRequestMetadata() config.StepData {
var theMetaData = config.StepData{
Metadata: config.StepMetadata{
Name: "githubCreatePullRequest",
Aliases: []config.Alias{},
Description: "Create a pull request on GitHub",
},
Spec: config.StepSpec{
Inputs: config.StepInputs{
Secrets: []config.StepSecrets{
{Name: "githubTokenCredentialsId", Description: "Jenkins 'Secret text' credentials ID containing token to authenticate to GitHub.", Type: "jenkins"},
},
Parameters: []config.StepParameters{
{
Name: "assignees",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "[]string",
Mandatory: false,
Aliases: []config.Alias{},
Default: []string{},
},
{
Name: "base",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_base"),
},
{
Name: "body",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_body"),
},
{
Name: "apiUrl",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "githubApiUrl"}},
Default: `https://api.github.com`,
},
{
Name: "head",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_head"),
},
{
Name: "owner",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "github/owner",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "githubOrg"}},
Default: os.Getenv("PIPER_owner"),
},
{
Name: "repository",
ResourceRef: []config.ResourceReference{
{
Name: "commonPipelineEnvironment",
Param: "github/repository",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "githubRepo"}},
Default: os.Getenv("PIPER_repository"),
},
{
Name: "serverUrl",
ResourceRef: []config.ResourceReference{},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "githubServerUrl"}},
Default: `https://github.com`,
},
{
Name: "title",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_title"),
},
{
Name: "token",
ResourceRef: []config.ResourceReference{
{
Name: "githubTokenCredentialsId",
Type: "secret",
},
{
Name: "githubVaultSecretName",
Type: "vaultSecret",
Default: "github",
},
},
Scope: []string{"GENERAL", "PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "githubToken"}, {Name: "access_token"}},
Default: os.Getenv("PIPER_token"),
},
{
Name: "labels",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "[]string",
Mandatory: false,
Aliases: []config.Alias{},
Default: []string{},
},
},
},
},
}
return theMetaData
}
| [
"\"PIPER_base\"",
"\"PIPER_body\"",
"\"PIPER_head\"",
"\"PIPER_owner\"",
"\"PIPER_repository\"",
"\"PIPER_title\"",
"\"PIPER_token\"",
"\"PIPER_base\"",
"\"PIPER_body\"",
"\"PIPER_head\"",
"\"PIPER_owner\"",
"\"PIPER_repository\"",
"\"PIPER_title\"",
"\"PIPER_token\""
]
| []
| [
"PIPER_title",
"PIPER_body",
"PIPER_token",
"PIPER_head",
"PIPER_base",
"PIPER_repository",
"PIPER_owner"
]
| [] | ["PIPER_title", "PIPER_body", "PIPER_token", "PIPER_head", "PIPER_base", "PIPER_repository", "PIPER_owner"] | go | 7 | 0 | |
example/manage.py | #!/usr/bin/env python
import os
import sys
import warnings
if __name__ == "__main__":
here = os.path.dirname(__file__)
there = os.path.join(here, '..')
there = os.path.abspath(there)
sys.path.insert(0, there)
print "NOTE Using jingo_offline_compressor from %s" % there
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_04_01/aio/_configuration.py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class ContainerRegistryManagementClientConfiguration(Configuration):
"""Configuration for ContainerRegistryManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The Microsoft Azure subscription ID.
:type subscription_id: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(ContainerRegistryManagementClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2019-04-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-containerregistry/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| []
| []
| []
| [] | [] | python | null | null | null |
practical/manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'practical.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
test_video_stream.py | from imutils.video import VideoStream
import imutils
import time
import cv2
import os
import mxnet as mx
from mxnet.gluon.data.vision import transforms
import numpy as np
from tinydb import TinyDB, Query
import vptree
db = TinyDB('./d_single_image_ids.json')
os.environ['MXNET_CUDNN_AUTOTUNE_DEFAULT'] = '2'
sample_size = 2
factor = 0.02 * 512 # loosely crop face
output = "output.avi"
writer = None
ctx = [mx.gpu(0)]
# load face detection, face feature extraction and classification model
det_net = mx.gluon.nn.SymbolBlock.imports("./models/face_detection/center_net_resnet18_v1b_face_best-symbol.json", [
'data'], "./models/face_detection/center_net_resnet18_v1b_face_best-0113.params", ctx=ctx)
features_net = mx.gluon.nn.SymbolBlock.imports(
"./models/face_feature_extraction/mobilefacenet-symbol.json", ['data'], "./models/face_feature_extraction/mobilefacenet-0000.params", ctx=ctx)
mlp_net = mx.gluon.nn.SymbolBlock.imports(
"./models/mlp-symbol.json", ['data'], "./models/mlp-0029.params", ctx=ctx)
face_transforms = transforms.Compose([
transforms.Resize((128, 128)),
transforms.ToTensor(),
])
vs = VideoStream(src=0).start()
enrolledIds = db.all()
synsets = enrolledIds[0]['labels']
while True:
frame = vs.read()
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
rgb = cv2.resize(rgb, (512, 512))
yr = frame.shape[0] / float(rgb.shape[0])
xr = frame.shape[1] / float(rgb.shape[1])
x = mx.nd.image.normalize(mx.nd.image.to_tensor(mx.nd.array(rgb)), mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225)).as_in_context(ctx[0]).expand_dims(0).astype('float16', copy=False)
_, scores, bboxes = det_net(x)
names = []
for (s, (left, top, right, bottom)) in zip(scores.asnumpy()[0], bboxes.asnumpy()[0]):
if s < 0.4:
continue
top = int(max(top-factor, 0) * yr)
right = int(min(right+factor, 512) * xr)
bottom = int(min(bottom+factor, 512) * yr)
left = int(max(left-factor, 0) * xr)
face_image = frame[top:bottom, left:right]
color = (0, 0, 255)
if face_image.shape[0] >= 128 and face_image.shape[1] >= 128:
rgb = cv2.cvtColor(
face_image, cv2.COLOR_BGR2RGB)
face = mx.nd.array(rgb)
encodings = []
xface = face_transforms(face).expand_dims(0)
y = features_net(xface.as_in_context(ctx[0]))
predictions = mlp_net(y)[0]
p = mx.nd.argmax(predictions)
y = top - 15 if top - 15 > 15 else top + 15
cv2.putText(frame, synsets[int(p.asscalar())] + '\n' + str(predictions[p].asscalar()), (left, y), cv2.FONT_HERSHEY_SIMPLEX,
0.5, color, 1)
cv2.rectangle(frame, (left, top), (right, bottom),
color, 2)
frame = imutils.resize(frame, width=1024)
if writer is None and output is not "" or None:
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
writer = cv2.VideoWriter(output, fourcc, 20,
(frame.shape[1], frame.shape[0]), True)
if writer is not None:
writer.write(frame)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
vs.stop()
cv2.destroyAllWindows()
| []
| []
| [
"MXNET_CUDNN_AUTOTUNE_DEFAULT"
]
| [] | ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] | python | 1 | 0 | |
components/lwip/weekend_test/net_suite_test.py | import os
import re
import socket
import subprocess
import time
from shutil import copyfile
from threading import Event, Thread
import ttfw_idf
from tiny_test_fw import DUT, Utility
stop_sock_listener = Event()
stop_io_listener = Event()
sock = None
client_address = None
manual_test = False
def io_listener(dut1):
global sock
global client_address
data = b''
while not stop_io_listener.is_set():
try:
data = dut1.expect(re.compile(r'PacketOut:\[([a-fA-F0-9]+)\]'), timeout=5)
except DUT.ExpectTimeout:
continue
if data != () and data[0] != b'':
packet_data = data[0]
print('Packet_data>{}<'.format(packet_data))
response = bytearray.fromhex(packet_data.decode())
print('Sending to socket:')
packet = ' '.join(format(x, '02x') for x in bytearray(response))
print('Packet>{}<'.format(packet))
if client_address is not None:
sock.sendto(response, ('127.0.0.1', 7777))
def sock_listener(dut1):
global sock
global client_address
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.settimeout(5)
server_address = '0.0.0.0'
server_port = 7771
server = (server_address, server_port)
sock.bind(server)
try:
while not stop_sock_listener.is_set():
try:
payload, client_address = sock.recvfrom(1024)
packet = ' '.join(format(x, '02x') for x in bytearray(payload))
print('Received from address {}, data {}'.format(client_address, packet))
dut1.write(str.encode(packet))
except socket.timeout:
pass
finally:
sock.close()
sock = None
@ttfw_idf.idf_example_test(env_tag='Example_WIFI')
def lwip_test_suite(env, extra_data):
global stop_io_listener
global stop_sock_listener
"""
steps: |
1. Rebuilds test suite with esp32_netsuite.ttcn
2. Starts listeners on stdout and socket
3. Execute ttcn3 test suite
4. Collect result from ttcn3
"""
dut1 = env.get_dut('net_suite', 'examples/system/network_tests', dut_class=ttfw_idf.ESP32DUT)
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, 'net_suite.bin')
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('net_suite', '{}KB'.format(bin_size // 1024))
ttfw_idf.check_performance('net_suite', bin_size // 1024, dut1.TARGET)
dut1.start_app()
thread1 = Thread(target=sock_listener, args=(dut1, ))
thread2 = Thread(target=io_listener, args=(dut1, ))
if not manual_test:
# Variables refering to esp32 ttcn test suite
TTCN_SRC = 'esp32_netsuite.ttcn'
TTCN_CFG = 'esp32_netsuite.cfg'
# System Paths
netsuite_path = os.getenv('NETSUITE_PATH')
netsuite_src_path = os.path.join(netsuite_path, 'src')
test_dir = os.path.dirname(os.path.realpath(__file__))
# Building the suite
print('Rebuilding the test suite')
print('-------------------------')
# copy esp32 specific files to ttcn net-suite dir
copyfile(os.path.join(test_dir, TTCN_SRC), os.path.join(netsuite_src_path, TTCN_SRC))
copyfile(os.path.join(test_dir, TTCN_CFG), os.path.join(netsuite_src_path, TTCN_CFG))
proc = subprocess.Popen(['bash', '-c', 'cd ' + netsuite_src_path + ' && source make.sh'],
cwd=netsuite_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = proc.stdout.read()
print('Note: First build step we expect failure (titan/net_suite build system not suitable for multijob make)')
print(output)
proc = subprocess.Popen(['bash', '-c', 'cd ' + netsuite_src_path + ' && make'],
cwd=netsuite_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print('Note: This time all dependencies shall be generated -- multijob make shall pass')
output = proc.stdout.read()
print(output)
# Executing the test suite
thread1.start()
thread2.start()
time.sleep(2)
print('Executing the test suite')
print('------------------------')
proc = subprocess.Popen(['ttcn3_start', os.path.join(netsuite_src_path,'test_suite'), os.path.join(netsuite_src_path, TTCN_CFG)],
stdout=subprocess.PIPE)
output = proc.stdout.read()
print(output)
print('Collecting results')
print('------------------')
verdict_stats = re.search('(Verdict statistics:.*)', output)
if verdict_stats:
verdict_stats = verdict_stats.group(1)
else:
verdict_stats = b''
verdict = re.search('Overall verdict: pass', output)
if verdict:
print('Test passed!')
Utility.console_log(verdict_stats, 'green')
else:
Utility.console_log(verdict_stats, 'red')
raise ValueError('Test failed with: {}'.format(verdict_stats))
else:
try:
# Executing the test suite
thread1.start()
thread2.start()
time.sleep(2)
while True:
time.sleep(0.5)
except KeyboardInterrupt:
pass
print('Executing done, waiting for tests to finish')
print('-------------------------------------------')
stop_io_listener.set()
stop_sock_listener.set()
thread1.join()
thread2.join()
if __name__ == '__main__':
print('Manual execution, please build and start ttcn in a separate console')
manual_test = True
lwip_test_suite()
| []
| []
| [
"NETSUITE_PATH"
]
| [] | ["NETSUITE_PATH"] | python | 1 | 0 | |
cmd/kubeturbo/kubeturbo.go | /*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
goflag "flag"
"os"
"runtime"
"time"
"github.com/golang/glog"
"github.com/spf13/pflag"
"github.com/turbonomic/kubeturbo/cmd/kubeturbo/app"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/util/flag"
"k8s.io/klog"
)
// Initialize logs with the following steps:
// - Merge glog and klog flags into goflag FlagSet
// - Add the above merged goflag set into pflag CommandLine FlagSet
// - Add kubeturbo flags into pflag CommandLine FlagSet
// - Parse pflag FlagSet:
// - goflag FlagSet will be parsed first
// - pflag FlagSet will be parsed next
// - Sync those glog flags that also appear in klog flags
//
// Return log flush frequency
func initLogs(s *app.VMTServer) *time.Duration {
// Change default behavior: log to both stderr and /var/log/
// These arguments can be overwritten from the command-line args
_ = goflag.Set("alsologtostderr", "true")
_ = goflag.Set("log_dir", "/var/log")
// Initialize klog specific flags into a new FlagSet
klogFlags := goflag.NewFlagSet("klog", goflag.ExitOnError)
klog.InitFlags(klogFlags)
// Add klog specific flags into goflag
klogFlags.VisitAll(func(klogFlag *goflag.Flag) {
if goflag.CommandLine.Lookup(klogFlag.Name) == nil {
// This is a klog specific flag
goflag.CommandLine.Var(klogFlag.Value, klogFlag.Name, klogFlag.Usage)
}
})
// Add log flush frequency
logFlushFreq := goflag.Duration("log-flush-frequency", 5*time.Second,
"Maximum number of seconds between log flushes")
// Add goflag to pflag
// During pflag.Parse(), all goflag will be parsed using goflag.Parse()
pflag.CommandLine.AddGoFlagSet(goflag.CommandLine)
// Add kubeturbo specific flags to pflag
s.AddFlags(pflag.CommandLine)
// We have all the defined flags, now parse it
pflag.CommandLine.SetNormalizeFunc(flag.WordSepNormalizeFunc)
pflag.Parse()
// Sync the glog and klog flags
pflag.CommandLine.VisitAll(func(glogFlag *pflag.Flag) {
klogFlag := klogFlags.Lookup(glogFlag.Name)
if klogFlag != nil {
value := glogFlag.Value.String()
_ = klogFlag.Value.Set(value)
}
})
// Print out all parsed flags
pflag.VisitAll(func(flag *pflag.Flag) {
glog.V(2).Infof("FLAG: --%s=%q", flag.Name, flag.Value)
})
return logFlushFreq
}
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
s := app.NewVMTServer()
logFlushFreq := initLogs(s)
// Launch separate goroutines to flush glog and klog
go wait.Forever(klog.Flush, *logFlushFreq)
go wait.Forever(glog.Flush, *logFlushFreq)
defer klog.Flush()
defer glog.Flush()
glog.Infof("Run Kubeturbo service (GIT_COMMIT: %s)", os.Getenv("GIT_COMMIT"))
s.Run()
}
| [
"\"GIT_COMMIT\""
]
| []
| [
"GIT_COMMIT"
]
| [] | ["GIT_COMMIT"] | go | 1 | 0 | |
setup.py | #!/usr/bin/env python
import os
import re
from setuptools import setup
from setuptools import find_packages
from setuptools.command.sdist import sdist as _sdist
from setuptools.extension import Extension as _Extension
cmdclass = {}
ext_modules = []
cython_modules = [
'thriftrw._buffer',
'thriftrw._cython',
'thriftrw._runtime',
'thriftrw.protocol.core',
'thriftrw.protocol.binary',
'thriftrw.spec.base',
'thriftrw.spec.check',
'thriftrw.spec.common',
'thriftrw.spec.enum',
'thriftrw.spec.exc',
'thriftrw.spec.field',
'thriftrw.spec.list',
'thriftrw.spec.map',
'thriftrw.spec.reference',
'thriftrw.spec.primitive',
'thriftrw.spec.service',
'thriftrw.spec.set',
'thriftrw.spec.spec_mapper',
'thriftrw.spec.struct',
'thriftrw.spec.typedef',
'thriftrw.spec.union',
'thriftrw.wire.message',
'thriftrw.wire.mtype',
'thriftrw.wire.ttype',
'thriftrw.wire.value',
]
extension_extras = {}
# If Cython is available we will re-cythonize the pyx files, otherwise we just
# compile the packaged C files.
extension_filetype = '.c'
Extension = None
try:
import Cython.Distutils
cmdclass.update(build_ext=Cython.Distutils.build_ext)
# Check if we forgot to add something to cython_modules.
for root, _, files in os.walk('thriftrw'):
for name in files:
if not name.endswith('.pyx'):
continue
path = os.path.join(root, name)
module = path.replace('/', '.')[:-4]
if module not in cython_modules:
raise Exception(
'Module "%s" (%s) is not present in the '
'"cython_modules" list.'
% (module, path)
)
Extension = Cython.Distutils.Extension
extension_filetype = '.pyx'
cython_directives = {
'embedsignature': True,
}
if os.getenv('THRIFTRW_PROFILE'):
# Add hooks for the profiler in the generated C code.
cython_directives['profile'] = True
if os.getenv('THRIFTRW_COVERAGE'):
# Add line tracing hooks to the generated C code. The hooks aren't
# actually enabled unless the CYTHON_TRACE macre is also set. This
# affects performance negatively and should only be used during
# testing.
extension_extras['define_macros'] = [('CYTHON_TRACE', '1')]
cython_directives['linetrace'] = True
if cython_directives:
extension_extras['cython_directives'] = cython_directives
except ImportError:
pass
if Extension is None:
Extension = _Extension
for module in cython_modules:
ext_modules.append(
Extension(
module,
[module.replace('.', '/') + extension_filetype],
**extension_extras
)
)
class sdist(_sdist):
"""This forces us to always re-compile extensions before releasing."""
def run(self):
try:
from Cython.Build import cythonize
cythonize([
module.replace('.', '/') + '.pyx' for module in cython_modules
])
except ImportError:
pass
_sdist.run(self)
cmdclass['sdist'] = sdist
version = None
with open('thriftrw/__init__.py', 'r') as f:
for line in f:
m = re.match(r'^__version__\s*=\s*(["\'])([^"\']+)\1', line)
if m:
version = m.group(2)
break
if not version:
raise Exception(
'Could not determine version number from thriftrw/__init__.py'
)
with open('README.rst') as f:
long_description = f.read()
setup(
name='thriftrw',
version=version,
description=(
'A library to serialize and deserialize Thrift values.'
),
long_description=long_description,
author='Abhinav Gupta',
author_email='[email protected]',
url='https://github.com/thriftrw/thriftrw-python',
packages=find_packages(exclude=('tests', 'tests.*')),
license='MIT',
install_requires=['ply'],
tests_require=['pytest', 'mock'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
],
ext_modules=ext_modules,
cmdclass=cmdclass,
)
| []
| []
| [
"THRIFTRW_PROFILE",
"THRIFTRW_COVERAGE"
]
| [] | ["THRIFTRW_PROFILE", "THRIFTRW_COVERAGE"] | python | 2 | 0 | |
pedantic/examples/validate.py | import os
from dataclasses import dataclass
from pedantic import validate, ExternalParameter, overrides, Validator, Parameter, Min, ReturnAs
@dataclass(frozen=True)
class Configuration:
iterations: int
max_error: float
class ConfigurationValidator(Validator):
@overrides(Validator)
def validate(self, value: Configuration) -> Configuration:
if value.iterations < 1 or value.max_error < 0:
self.raise_exception(msg=f'Invalid configuration: {value}', value=value)
return value
class ConfigFromEnvVar(ExternalParameter):
""" Reads the configuration from environment variables. """
@overrides(ExternalParameter)
def has_value(self) -> bool:
return 'iterations' in os.environ and 'max_error' in os.environ
@overrides(ExternalParameter)
def load_value(self) -> Configuration:
return Configuration(
iterations=int(os.environ['iterations']),
max_error=float(os.environ['max_error']),
)
class ConfigFromFile(ExternalParameter):
""" Reads the configuration from a config file. """
@overrides(ExternalParameter)
def has_value(self) -> bool:
return os.path.isfile('config.csv')
@overrides(ExternalParameter)
def load_value(self) -> Configuration:
with open(file='config.csv', mode='r') as file:
content = file.readlines()
return Configuration(
iterations=int(content[0].strip('\n')),
max_error=float(content[1]),
)
# choose your configuration source here:
@validate(ConfigFromEnvVar(name='config', validators=[ConfigurationValidator()]), strict=False, return_as=ReturnAs.KWARGS_WITH_NONE)
# @validate(ConfigFromFile(name='config', validators=[ConfigurationValidator()]), strict=False)
# with strict_mode = True (which is the default)
# you need to pass a Parameter for each parameter of the decorated function
# @validate(
# Parameter(name='value', validators=[Min(5, include_boundary=False)]),
# ConfigFromFile(name='config', validators=[ConfigurationValidator()]),
# )
def my_algorithm(value: float, config: Configuration) -> float:
"""
This method calculates something that depends on the given value with considering the configuration.
Note how well this small piece of code is designed:
- Fhe function my_algorithm() need a Configuration but has no knowledge where this come from.
- Furthermore, it need does not care about parameter validation.
- The ConfigurationValidator doesn't now anything about the creation of the data.
- The @validate decorator is the only you need to change, if you want a different configuration source.
"""
print(value)
print(config)
return value
if __name__ == '__main__':
# we can call the function with a config like there is no decorator.
# This makes testing extremely easy: no config files, no environment variables or stuff like that
print(my_algorithm(value=2, config=Configuration(iterations=3, max_error=4.4)))
os.environ['iterations'] = '12'
os.environ['max_error'] = '3.1415'
# but we also can omit the config and load it implicitly by our custom Parameters
print(my_algorithm(value=42.0))
| []
| []
| [
"iterations",
"max_error"
]
| [] | ["iterations", "max_error"] | python | 2 | 0 | |
mypy/util.py | """Utility functions with no non-trivial dependencies."""
import os
import pathlib
import re
import subprocess
import sys
import hashlib
import io
import shutil
import time
from typing import (
TypeVar, List, Tuple, Optional, Dict, Sequence, Iterable, Container, IO, Callable, Union, Sized
)
from typing_extensions import Final, Type, Literal
try:
import curses
import _curses # noqa
CURSES_ENABLED = True
except ImportError:
CURSES_ENABLED = False
T = TypeVar('T')
ENCODING_RE: Final = re.compile(br"([ \t\v]*#.*(\r\n?|\n))??[ \t\v]*#.*coding[:=][ \t]*([-\w.]+)")
DEFAULT_SOURCE_OFFSET: Final = 4
DEFAULT_COLUMNS: Final = 80
# At least this number of columns will be shown on each side of
# error location when printing source code snippet.
MINIMUM_WIDTH: Final = 20
# VT100 color code processing was added in Windows 10, but only the second major update,
# Threshold 2. Fortunately, everyone (even on LTSB, Long Term Support Branch) should
# have a version of Windows 10 newer than this. Note that Windows 8 and below are not
# supported, but are either going out of support, or make up only a few % of the market.
MINIMUM_WINDOWS_MAJOR_VT100: Final = 10
MINIMUM_WINDOWS_BUILD_VT100: Final = 10586
default_python2_interpreter: Final = [
"python2",
"python",
"/usr/bin/python",
"C:\\Python27\\python.exe",
]
SPECIAL_DUNDERS: Final = frozenset((
"__init__", "__new__", "__call__", "__init_subclass__", "__class_getitem__",
))
def is_dunder(name: str, exclude_special: bool = False) -> bool:
"""Returns whether name is a dunder name.
Args:
exclude_special: Whether to return False for a couple special dunder methods.
"""
if exclude_special and name in SPECIAL_DUNDERS:
return False
return name.startswith("__") and name.endswith("__")
def is_sunder(name: str) -> bool:
return not is_dunder(name) and name.startswith('_') and name.endswith('_')
def split_module_names(mod_name: str) -> List[str]:
"""Return the module and all parent module names.
So, if `mod_name` is 'a.b.c', this function will return
['a.b.c', 'a.b', and 'a'].
"""
out = [mod_name]
while '.' in mod_name:
mod_name = mod_name.rsplit('.', 1)[0]
out.append(mod_name)
return out
def module_prefix(modules: Iterable[str], target: str) -> Optional[str]:
result = split_target(modules, target)
if result is None:
return None
return result[0]
def split_target(modules: Iterable[str], target: str) -> Optional[Tuple[str, str]]:
remaining: List[str] = []
while True:
if target in modules:
return target, '.'.join(remaining)
components = target.rsplit('.', 1)
if len(components) == 1:
return None
target = components[0]
remaining.insert(0, components[1])
def short_type(obj: object) -> str:
"""Return the last component of the type name of an object.
If obj is None, return 'nil'. For example, if obj is 1, return 'int'.
"""
if obj is None:
return 'nil'
t = str(type(obj))
return t.split('.')[-1].rstrip("'>")
def find_python_encoding(text: bytes, pyversion: Tuple[int, int]) -> Tuple[str, int]:
"""PEP-263 for detecting Python file encoding"""
result = ENCODING_RE.match(text)
if result:
line = 2 if result.group(1) else 1
encoding = result.group(3).decode('ascii')
# Handle some aliases that Python is happy to accept and that are used in the wild.
if encoding.startswith(('iso-latin-1-', 'latin-1-')) or encoding == 'iso-latin-1':
encoding = 'latin-1'
return encoding, line
else:
default_encoding = 'utf8' if pyversion[0] >= 3 else 'ascii'
return default_encoding, -1
def bytes_to_human_readable_repr(b: bytes) -> str:
"""Converts bytes into some human-readable representation. Unprintable
bytes such as the nul byte are escaped. For example:
>>> b = bytes([102, 111, 111, 10, 0])
>>> s = bytes_to_human_readable_repr(b)
>>> print(s)
foo\n\x00
>>> print(repr(s))
'foo\\n\\x00'
"""
return repr(b)[2:-1]
class DecodeError(Exception):
"""Exception raised when a file cannot be decoded due to an unknown encoding type.
Essentially a wrapper for the LookupError raised by `bytearray.decode`
"""
def decode_python_encoding(source: bytes, pyversion: Tuple[int, int]) -> str:
"""Read the Python file with while obeying PEP-263 encoding detection.
Returns the source as a string.
"""
# check for BOM UTF-8 encoding and strip it out if present
if source.startswith(b'\xef\xbb\xbf'):
encoding = 'utf8'
source = source[3:]
else:
# look at first two lines and check if PEP-263 coding is present
encoding, _ = find_python_encoding(source, pyversion)
try:
source_text = source.decode(encoding)
except LookupError as lookuperr:
raise DecodeError(str(lookuperr)) from lookuperr
return source_text
def read_py_file(path: str, read: Callable[[str], bytes],
pyversion: Tuple[int, int]) -> Optional[List[str]]:
"""Try reading a Python file as list of source lines.
Return None if something goes wrong.
"""
try:
source = read(path)
except OSError:
return None
else:
try:
source_lines = decode_python_encoding(source, pyversion).splitlines()
except DecodeError:
return None
return source_lines
def trim_source_line(line: str, max_len: int, col: int, min_width: int) -> Tuple[str, int]:
"""Trim a line of source code to fit into max_len.
Show 'min_width' characters on each side of 'col' (an error location). If either
start or end is trimmed, this is indicated by adding '...' there.
A typical result looks like this:
...some_variable = function_to_call(one_arg, other_arg) or...
Return the trimmed string and the column offset to to adjust error location.
"""
if max_len < 2 * min_width + 1:
# In case the window is too tiny it is better to still show something.
max_len = 2 * min_width + 1
# Trivial case: line already fits in.
if len(line) <= max_len:
return line, 0
# If column is not too large so that there is still min_width after it,
# the line doesn't need to be trimmed at the start.
if col + min_width < max_len:
return line[:max_len] + '...', 0
# Otherwise, if the column is not too close to the end, trim both sides.
if col < len(line) - min_width - 1:
offset = col - max_len + min_width + 1
return '...' + line[offset:col + min_width + 1] + '...', offset - 3
# Finally, if the column is near the end, just trim the start.
return '...' + line[-max_len:], len(line) - max_len - 3
def get_mypy_comments(source: str) -> List[Tuple[int, str]]:
PREFIX = '# mypy: '
# Don't bother splitting up the lines unless we know it is useful
if PREFIX not in source:
return []
lines = source.split('\n')
results = []
for i, line in enumerate(lines):
if line.startswith(PREFIX):
results.append((i + 1, line[len(PREFIX):]))
return results
_python2_interpreter: Optional[str] = None
def try_find_python2_interpreter() -> Optional[str]:
global _python2_interpreter
if _python2_interpreter:
return _python2_interpreter
for interpreter in default_python2_interpreter:
try:
retcode = subprocess.Popen([
interpreter, '-c',
'import sys, typing; assert sys.version_info[:2] == (2, 7)'
]).wait()
if not retcode:
_python2_interpreter = interpreter
return interpreter
except OSError:
pass
return None
PASS_TEMPLATE: Final = """<?xml version="1.0" encoding="utf-8"?>
<testsuite errors="0" failures="0" name="mypy" skips="0" tests="1" time="{time:.3f}">
<testcase classname="mypy" file="mypy" line="1" name="mypy-py{ver}-{platform}" time="{time:.3f}">
</testcase>
</testsuite>
"""
FAIL_TEMPLATE: Final = """<?xml version="1.0" encoding="utf-8"?>
<testsuite errors="0" failures="1" name="mypy" skips="0" tests="1" time="{time:.3f}">
<testcase classname="mypy" file="mypy" line="1" name="mypy-py{ver}-{platform}" time="{time:.3f}">
<failure message="mypy produced messages">{text}</failure>
</testcase>
</testsuite>
"""
ERROR_TEMPLATE: Final = """<?xml version="1.0" encoding="utf-8"?>
<testsuite errors="1" failures="0" name="mypy" skips="0" tests="1" time="{time:.3f}">
<testcase classname="mypy" file="mypy" line="1" name="mypy-py{ver}-{platform}" time="{time:.3f}">
<error message="mypy produced errors">{text}</error>
</testcase>
</testsuite>
"""
def write_junit_xml(dt: float, serious: bool, messages: List[str], path: str,
version: str, platform: str) -> None:
from xml.sax.saxutils import escape
if not messages and not serious:
xml = PASS_TEMPLATE.format(time=dt, ver=version, platform=platform)
elif not serious:
xml = FAIL_TEMPLATE.format(text=escape('\n'.join(messages)), time=dt,
ver=version, platform=platform)
else:
xml = ERROR_TEMPLATE.format(text=escape('\n'.join(messages)), time=dt,
ver=version, platform=platform)
# checks for a directory structure in path and creates folders if needed
xml_dirs = os.path.dirname(os.path.abspath(path))
if not os.path.isdir(xml_dirs):
os.makedirs(xml_dirs)
with open(path, 'wb') as f:
f.write(xml.encode('utf-8'))
class IdMapper:
"""Generate integer ids for objects.
Unlike id(), these start from 0 and increment by 1, and ids won't
get reused across the life-time of IdMapper.
Assume objects don't redefine __eq__ or __hash__.
"""
def __init__(self) -> None:
self.id_map: Dict[object, int] = {}
self.next_id = 0
def id(self, o: object) -> int:
if o not in self.id_map:
self.id_map[o] = self.next_id
self.next_id += 1
return self.id_map[o]
def get_prefix(fullname: str) -> str:
"""Drop the final component of a qualified name (e.g. ('x.y' -> 'x')."""
return fullname.rsplit('.', 1)[0]
def get_top_two_prefixes(fullname: str) -> Tuple[str, str]:
"""Return one and two component prefixes of a fully qualified name.
Given 'a.b.c.d', return ('a', 'a.b').
If fullname has only one component, return (fullname, fullname).
"""
components = fullname.split('.', 3)
return components[0], '.'.join(components[:2])
def correct_relative_import(cur_mod_id: str,
relative: int,
target: str,
is_cur_package_init_file: bool) -> Tuple[str, bool]:
if relative == 0:
return target, True
parts = cur_mod_id.split(".")
rel = relative
if is_cur_package_init_file:
rel -= 1
ok = len(parts) >= rel
if rel != 0:
cur_mod_id = ".".join(parts[:-rel])
return cur_mod_id + (("." + target) if target else ""), ok
fields_cache: Final[Dict[Type[object], List[str]]] = {}
def get_class_descriptors(cls: 'Type[object]') -> Sequence[str]:
import inspect # Lazy import for minor startup speed win
# Maintain a cache of type -> attributes defined by descriptors in the class
# (that is, attributes from __slots__ and C extension classes)
if cls not in fields_cache:
members = inspect.getmembers(
cls,
lambda o: inspect.isgetsetdescriptor(o) or inspect.ismemberdescriptor(o))
fields_cache[cls] = [x for x, y in members if x != '__weakref__' and x != '__dict__']
return fields_cache[cls]
def replace_object_state(new: object, old: object, copy_dict: bool = False) -> None:
"""Copy state of old node to the new node.
This handles cases where there is __dict__ and/or attribute descriptors
(either from slots or because the type is defined in a C extension module).
Assume that both objects have the same __class__.
"""
if hasattr(old, '__dict__'):
if copy_dict:
new.__dict__ = dict(old.__dict__)
else:
new.__dict__ = old.__dict__
for attr in get_class_descriptors(old.__class__):
try:
if hasattr(old, attr):
setattr(new, attr, getattr(old, attr))
elif hasattr(new, attr):
delattr(new, attr)
# There is no way to distinguish getsetdescriptors that allow
# writes from ones that don't (I think?), so we just ignore
# AttributeErrors if we need to.
# TODO: What about getsetdescriptors that act like properties???
except AttributeError:
pass
def is_sub_path(path1: str, path2: str) -> bool:
"""Given two paths, return if path1 is a sub-path of path2."""
return pathlib.Path(path2) in pathlib.Path(path1).parents
def hard_exit(status: int = 0) -> None:
"""Kill the current process without fully cleaning up.
This can be quite a bit faster than a normal exit() since objects are not freed.
"""
sys.stdout.flush()
sys.stderr.flush()
os._exit(status)
def unmangle(name: str) -> str:
"""Remove internal suffixes from a short name."""
return name.rstrip("'")
def get_unique_redefinition_name(name: str, existing: Container[str]) -> str:
"""Get a simple redefinition name not present among existing.
For example, for name 'foo' we try 'foo-redefinition', 'foo-redefinition2',
'foo-redefinition3', etc. until we find one that is not in existing.
"""
r_name = name + '-redefinition'
if r_name not in existing:
return r_name
i = 2
while r_name + str(i) in existing:
i += 1
return r_name + str(i)
def check_python_version(program: str) -> None:
"""Report issues with the Python used to run mypy, dmypy, or stubgen"""
# Check for known bad Python versions.
if sys.version_info[:2] < (3, 6):
sys.exit("Running {name} with Python 3.5 or lower is not supported; "
"please upgrade to 3.6 or newer".format(name=program))
def count_stats(messages: List[str]) -> Tuple[int, int, int]:
"""Count total number of errors, notes and error_files in message list."""
errors = [e for e in messages if ': error:' in e]
error_files = {e.split(':')[0] for e in errors}
notes = [e for e in messages if ': note:' in e]
return len(errors), len(notes), len(error_files)
def split_words(msg: str) -> List[str]:
"""Split line of text into words (but not within quoted groups)."""
next_word = ''
res: List[str] = []
allow_break = True
for c in msg:
if c == ' ' and allow_break:
res.append(next_word)
next_word = ''
continue
if c == '"':
allow_break = not allow_break
next_word += c
res.append(next_word)
return res
def get_terminal_width() -> int:
"""Get current terminal width if possible, otherwise return the default one."""
return (int(os.getenv('MYPY_FORCE_TERMINAL_WIDTH', '0'))
or shutil.get_terminal_size().columns
or DEFAULT_COLUMNS)
def soft_wrap(msg: str, max_len: int, first_offset: int,
num_indent: int = 0) -> str:
"""Wrap a long error message into few lines.
Breaks will only happen between words, and never inside a quoted group
(to avoid breaking types such as "Union[int, str]"). The 'first_offset' is
the width before the start of first line.
Pad every next line with 'num_indent' spaces. Every line will be at most 'max_len'
characters, except if it is a single word or quoted group.
For example:
first_offset
------------------------
path/to/file: error: 58: Some very long error message
that needs to be split in separate lines.
"Long[Type, Names]" are never split.
^^^^--------------------------------------------------
num_indent max_len
"""
words = split_words(msg)
next_line = words.pop(0)
lines: List[str] = []
while words:
next_word = words.pop(0)
max_line_len = max_len - num_indent if lines else max_len - first_offset
# Add 1 to account for space between words.
if len(next_line) + len(next_word) + 1 <= max_line_len:
next_line += ' ' + next_word
else:
lines.append(next_line)
next_line = next_word
lines.append(next_line)
padding = '\n' + ' ' * num_indent
return padding.join(lines)
def hash_digest(data: bytes) -> str:
"""Compute a hash digest of some data.
We use a cryptographic hash because we want a low probability of
accidental collision, but we don't really care about any of the
cryptographic properties.
"""
# Once we drop Python 3.5 support, we should consider using
# blake2b, which is faster.
return hashlib.sha256(data).hexdigest()
def parse_gray_color(cup: bytes) -> str:
"""Reproduce a gray color in ANSI escape sequence"""
if sys.platform == "win32":
assert False, "curses is not available on Windows"
set_color = ''.join([cup[:-1].decode(), 'm'])
gray = curses.tparm(set_color.encode('utf-8'), 1, 89).decode()
return gray
class FancyFormatter:
"""Apply color and bold font to terminal output.
This currently only works on Linux and Mac.
"""
def __init__(self, f_out: IO[str], f_err: IO[str], show_error_codes: bool) -> None:
self.show_error_codes = show_error_codes
# Check if we are in a human-facing terminal on a supported platform.
if sys.platform not in ('linux', 'darwin', 'win32'):
self.dummy_term = True
return
force_color = int(os.getenv('MYPY_FORCE_COLOR', '0'))
if not force_color and (not f_out.isatty() or not f_err.isatty()):
self.dummy_term = True
return
if sys.platform == 'win32':
self.dummy_term = not self.initialize_win_colors()
else:
self.dummy_term = not self.initialize_unix_colors()
if not self.dummy_term:
self.colors = {'red': self.RED, 'green': self.GREEN,
'blue': self.BLUE, 'yellow': self.YELLOW,
'none': ''}
def initialize_win_colors(self) -> bool:
"""Return True if initialization was successful and we can use colors, False otherwise"""
# Windows ANSI escape sequences are only supported on Threshold 2 and above.
# we check with an assert at runtime and an if check for mypy, as asserts do not
# yet narrow platform
assert sys.platform == 'win32'
if sys.platform == 'win32':
winver = sys.getwindowsversion()
if (winver.major < MINIMUM_WINDOWS_MAJOR_VT100
or winver.build < MINIMUM_WINDOWS_BUILD_VT100):
return False
import ctypes
kernel32 = ctypes.windll.kernel32
ENABLE_PROCESSED_OUTPUT = 0x1
ENABLE_WRAP_AT_EOL_OUTPUT = 0x2
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4
STD_OUTPUT_HANDLE = -11
kernel32.SetConsoleMode(kernel32.GetStdHandle(STD_OUTPUT_HANDLE),
ENABLE_PROCESSED_OUTPUT
| ENABLE_WRAP_AT_EOL_OUTPUT
| ENABLE_VIRTUAL_TERMINAL_PROCESSING)
self.BOLD = '\033[1m'
self.UNDER = '\033[4m'
self.BLUE = '\033[94m'
self.GREEN = '\033[92m'
self.RED = '\033[91m'
self.YELLOW = '\033[93m'
self.NORMAL = '\033[0m'
self.DIM = '\033[2m'
return True
return False
def initialize_unix_colors(self) -> bool:
"""Return True if initialization was successful and we can use colors, False otherwise"""
if sys.platform == "win32" or not CURSES_ENABLED:
return False
try:
# setupterm wants a fd to potentially write an "initialization sequence".
# We override sys.stdout for the daemon API so if stdout doesn't have an fd,
# just give it /dev/null.
try:
fd = sys.stdout.fileno()
except io.UnsupportedOperation:
with open("/dev/null", "rb") as f:
curses.setupterm(fd=f.fileno())
else:
curses.setupterm(fd=fd)
except curses.error:
# Most likely terminfo not found.
return False
bold = curses.tigetstr('bold')
under = curses.tigetstr('smul')
set_color = curses.tigetstr('setaf')
set_eseq = curses.tigetstr('cup')
normal = curses.tigetstr('sgr0')
if not (bold and under and set_color and set_eseq and normal):
return False
self.NORMAL = normal.decode()
self.BOLD = bold.decode()
self.UNDER = under.decode()
self.DIM = parse_gray_color(set_eseq)
self.BLUE = curses.tparm(set_color, curses.COLOR_BLUE).decode()
self.GREEN = curses.tparm(set_color, curses.COLOR_GREEN).decode()
self.RED = curses.tparm(set_color, curses.COLOR_RED).decode()
self.YELLOW = curses.tparm(set_color, curses.COLOR_YELLOW).decode()
return True
def style(self, text: str, color: Literal['red', 'green', 'blue', 'yellow', 'none'],
bold: bool = False, underline: bool = False, dim: bool = False) -> str:
"""Apply simple color and style (underlined or bold)."""
if self.dummy_term:
return text
if bold:
start = self.BOLD
else:
start = ''
if underline:
start += self.UNDER
if dim:
start += self.DIM
return start + self.colors[color] + text + self.NORMAL
def fit_in_terminal(self, messages: List[str],
fixed_terminal_width: Optional[int] = None) -> List[str]:
"""Improve readability by wrapping error messages and trimming source code."""
width = fixed_terminal_width or get_terminal_width()
new_messages = messages.copy()
for i, error in enumerate(messages):
if ': error:' in error:
loc, msg = error.split('error:', maxsplit=1)
msg = soft_wrap(msg, width, first_offset=len(loc) + len('error: '))
new_messages[i] = loc + 'error:' + msg
if error.startswith(' ' * DEFAULT_SOURCE_OFFSET) and '^' not in error:
# TODO: detecting source code highlights through an indent can be surprising.
# Restore original error message and error location.
error = error[DEFAULT_SOURCE_OFFSET:]
column = messages[i+1].index('^') - DEFAULT_SOURCE_OFFSET
# Let source have some space also on the right side, plus 6
# to accommodate ... on each side.
max_len = width - DEFAULT_SOURCE_OFFSET - 6
source_line, offset = trim_source_line(error, max_len, column, MINIMUM_WIDTH)
new_messages[i] = ' ' * DEFAULT_SOURCE_OFFSET + source_line
# Also adjust the error marker position.
new_messages[i+1] = ' ' * (DEFAULT_SOURCE_OFFSET + column - offset) + '^'
return new_messages
def colorize(self, error: str) -> str:
"""Colorize an output line by highlighting the status and error code."""
if ': error:' in error:
loc, msg = error.split('error:', maxsplit=1)
if not self.show_error_codes:
return (loc + self.style('error:', 'red', bold=True) +
self.highlight_quote_groups(msg))
codepos = msg.rfind('[')
if codepos != -1:
code = msg[codepos:]
msg = msg[:codepos]
else:
code = "" # no error code specified
return (loc + self.style('error:', 'red', bold=True) +
self.highlight_quote_groups(msg) + self.style(code, 'yellow'))
elif ': note:' in error:
loc, msg = error.split('note:', maxsplit=1)
formatted = self.highlight_quote_groups(self.underline_link(msg))
return loc + self.style('note:', 'blue') + formatted
elif error.startswith(' ' * DEFAULT_SOURCE_OFFSET):
# TODO: detecting source code highlights through an indent can be surprising.
if '^' not in error:
return self.style(error, 'none', dim=True)
return self.style(error, 'red')
else:
return error
def highlight_quote_groups(self, msg: str) -> str:
"""Make groups quoted with double quotes bold (including quotes).
This is used to highlight types, attribute names etc.
"""
if msg.count('"') % 2:
# Broken error message, don't do any formatting.
return msg
parts = msg.split('"')
out = ''
for i, part in enumerate(parts):
if i % 2 == 0:
out += self.style(part, 'none')
else:
out += self.style('"' + part + '"', 'none', bold=True)
return out
def underline_link(self, note: str) -> str:
"""Underline a link in a note message (if any).
This assumes there is at most one link in the message.
"""
match = re.search(r'https?://\S*', note)
if not match:
return note
start = match.start()
end = match.end()
return (note[:start] +
self.style(note[start:end], 'none', underline=True) +
note[end:])
def format_success(self, n_sources: int, use_color: bool = True) -> str:
"""Format short summary in case of success.
n_sources is total number of files passed directly on command line,
i.e. excluding stubs and followed imports.
"""
msg = f'Success: no issues found in {n_sources} source file{plural_s(n_sources)}'
if not use_color:
return msg
return self.style(msg, 'green', bold=True)
def format_error(
self, n_errors: int, n_files: int, n_sources: int, *,
blockers: bool = False, use_color: bool = True
) -> str:
"""Format a short summary in case of errors."""
msg = f'Found {n_errors} error{plural_s(n_errors)} in {n_files} file{plural_s(n_files)}'
if blockers:
msg += ' (errors prevented further checking)'
else:
msg += f" (checked {n_sources} source file{plural_s(n_sources)})"
if not use_color:
return msg
return self.style(msg, 'red', bold=True)
def is_typeshed_file(file: str) -> bool:
# gross, but no other clear way to tell
return 'typeshed' in os.path.abspath(file).split(os.sep)
def is_stub_package_file(file: str) -> bool:
# Use hacky heuristics to check whether file is part of a PEP 561 stub package.
if not file.endswith('.pyi'):
return False
return any(component.endswith('-stubs')
for component in os.path.abspath(file).split(os.sep))
def unnamed_function(name: Optional[str]) -> bool:
return name is not None and name == "_"
# TODO: replace with uses of perf_counter_ns when support for py3.6 is dropped
# (or when mypy properly handles alternate definitions based on python version check
time_ref = time.perf_counter
def time_spent_us(t0: float) -> int:
return int((time.perf_counter() - t0) * 1e6)
def plural_s(s: Union[int, Sized]) -> str:
count = s if isinstance(s, int) else len(s)
if count > 1:
return 's'
else:
return ''
| []
| []
| [
"MYPY_FORCE_TERMINAL_WIDTH",
"MYPY_FORCE_COLOR"
]
| [] | ["MYPY_FORCE_TERMINAL_WIDTH", "MYPY_FORCE_COLOR"] | python | 2 | 0 | |
lib-python/3/test/test_readline.py | """
Very minimal unittests for parts of the readline module.
"""
from contextlib import ExitStack
from errno import EIO
import locale
import os
import selectors
import subprocess
import sys
import tempfile
import unittest
from test.support import import_module, unlink, temp_dir, TESTFN, verbose
from test.support.script_helper import assert_python_ok
# Skip tests if there is no readline module
readline = import_module('readline')
if hasattr(readline, "_READLINE_LIBRARY_VERSION"):
is_editline = ("EditLine wrapper" in readline._READLINE_LIBRARY_VERSION)
else:
is_editline = (readline.__doc__ and "libedit" in readline.__doc__)
def setUpModule():
if verbose:
# Python implementations other than CPython may not have
# these private attributes
if hasattr(readline, "_READLINE_VERSION"):
print(f"readline version: {readline._READLINE_VERSION:#x}")
print(f"readline runtime version: {readline._READLINE_RUNTIME_VERSION:#x}")
if hasattr(readline, "_READLINE_LIBRARY_VERSION"):
print(f"readline library version: {readline._READLINE_LIBRARY_VERSION!r}")
print(f"use libedit emulation? {is_editline}")
@unittest.skipUnless(hasattr(readline, "clear_history"),
"The history update test cannot be run because the "
"clear_history method is not available.")
class TestHistoryManipulation (unittest.TestCase):
"""
These tests were added to check that the libedit emulation on OSX and the
"real" readline have the same interface for history manipulation. That's
why the tests cover only a small subset of the interface.
"""
def testHistoryUpdates(self):
readline.clear_history()
readline.add_history("first line")
readline.add_history("second line")
self.assertEqual(readline.get_history_item(0), None)
self.assertEqual(readline.get_history_item(1), "first line")
self.assertEqual(readline.get_history_item(2), "second line")
readline.replace_history_item(0, "replaced line")
self.assertEqual(readline.get_history_item(0), None)
self.assertEqual(readline.get_history_item(1), "replaced line")
self.assertEqual(readline.get_history_item(2), "second line")
self.assertEqual(readline.get_current_history_length(), 2)
readline.remove_history_item(0)
self.assertEqual(readline.get_history_item(0), None)
self.assertEqual(readline.get_history_item(1), "second line")
self.assertEqual(readline.get_current_history_length(), 1)
@unittest.skipUnless(hasattr(readline, "append_history_file"),
"append_history not available")
def test_write_read_append(self):
hfile = tempfile.NamedTemporaryFile(delete=False)
hfile.close()
hfilename = hfile.name
self.addCleanup(unlink, hfilename)
# test write-clear-read == nop
readline.clear_history()
readline.add_history("first line")
readline.add_history("second line")
readline.write_history_file(hfilename)
readline.clear_history()
self.assertEqual(readline.get_current_history_length(), 0)
readline.read_history_file(hfilename)
self.assertEqual(readline.get_current_history_length(), 2)
self.assertEqual(readline.get_history_item(1), "first line")
self.assertEqual(readline.get_history_item(2), "second line")
# test append
readline.append_history_file(1, hfilename)
readline.clear_history()
readline.read_history_file(hfilename)
self.assertEqual(readline.get_current_history_length(), 3)
self.assertEqual(readline.get_history_item(1), "first line")
self.assertEqual(readline.get_history_item(2), "second line")
self.assertEqual(readline.get_history_item(3), "second line")
# test 'no such file' behaviour
os.unlink(hfilename)
with self.assertRaises(FileNotFoundError):
readline.append_history_file(1, hfilename)
# write_history_file can create the target
readline.write_history_file(hfilename)
def test_nonascii_history(self):
readline.clear_history()
try:
readline.add_history("entrée 1")
except UnicodeEncodeError as err:
self.skipTest("Locale cannot encode test data: " + format(err))
readline.add_history("entrée 2")
readline.replace_history_item(1, "entrée 22")
readline.write_history_file(TESTFN)
self.addCleanup(os.remove, TESTFN)
readline.clear_history()
readline.read_history_file(TESTFN)
if is_editline:
# An add_history() call seems to be required for get_history_
# item() to register items from the file
readline.add_history("dummy")
self.assertEqual(readline.get_history_item(1), "entrée 1")
self.assertEqual(readline.get_history_item(2), "entrée 22")
class TestReadline(unittest.TestCase):
@unittest.skipIf(getattr(readline, '_READLINE_VERSION', 0x0601) < 0x0601
and not is_editline,
"not supported in this library version")
def test_init(self):
# Issue #19884: Ensure that the ANSI sequence "\033[1034h" is not
# written into stdout when the readline module is imported and stdout
# is redirected to a pipe.
rc, stdout, stderr = assert_python_ok('-c', 'import readline',
TERM='xterm-256color')
self.assertEqual(stdout, b'')
auto_history_script = """\
import readline
readline.set_auto_history({})
input()
print("History length:", readline.get_current_history_length())
"""
def test_auto_history_enabled(self):
output = run_pty(self.auto_history_script.format(True))
# bpo-44949: Sometimes, the newline character is not written at the
# end, so don't expect it in the output.
self.assertIn(b"History length: 1", output)
def test_auto_history_disabled(self):
output = run_pty(self.auto_history_script.format(False))
# bpo-44949: Sometimes, the newline character is not written at the
# end, so don't expect it in the output.
self.assertIn(b"History length: 0", output)
@unittest.skipIf(not hasattr(readline,
'set_completion_display_matches_hook'),
"function not reimplemented in pypy")
def test_nonascii(self):
loc = locale.setlocale(locale.LC_CTYPE, None)
if loc in ('C', 'POSIX'):
# bpo-29240: On FreeBSD, if the LC_CTYPE locale is C or POSIX,
# writing and reading non-ASCII bytes into/from a TTY works, but
# readline or ncurses ignores non-ASCII bytes on read.
self.skipTest(f"the LC_CTYPE locale is {loc!r}")
try:
readline.add_history("\xEB\xEF")
except UnicodeEncodeError as err:
self.skipTest("Locale cannot encode test data: " + format(err))
script = r"""import readline
is_editline = readline.__doc__ and "libedit" in readline.__doc__
inserted = "[\xEFnserted]"
macro = "|t\xEB[after]"
set_pre_input_hook = getattr(readline, "set_pre_input_hook", None)
if is_editline or not set_pre_input_hook:
# The insert_line() call via pre_input_hook() does nothing with Editline,
# so include the extra text that would have been inserted here
macro = inserted + macro
if is_editline:
readline.parse_and_bind(r'bind ^B ed-prev-char')
readline.parse_and_bind(r'bind "\t" rl_complete')
readline.parse_and_bind(r'bind -s ^A "{}"'.format(macro))
else:
readline.parse_and_bind(r'Control-b: backward-char')
readline.parse_and_bind(r'"\t": complete')
readline.parse_and_bind(r'set disable-completion off')
readline.parse_and_bind(r'set show-all-if-ambiguous off')
readline.parse_and_bind(r'set show-all-if-unmodified off')
readline.parse_and_bind(r'Control-a: "{}"'.format(macro))
def pre_input_hook():
readline.insert_text(inserted)
readline.redisplay()
if set_pre_input_hook:
set_pre_input_hook(pre_input_hook)
def completer(text, state):
if text == "t\xEB":
if state == 0:
print("text", ascii(text))
print("line", ascii(readline.get_line_buffer()))
print("indexes", readline.get_begidx(), readline.get_endidx())
return "t\xEBnt"
if state == 1:
return "t\xEBxt"
if text == "t\xEBx" and state == 0:
return "t\xEBxt"
return None
readline.set_completer(completer)
def display(substitution, matches, longest_match_length):
print("substitution", ascii(substitution))
print("matches", ascii(matches))
readline.set_completion_display_matches_hook(display)
print("result", ascii(input()))
print("history", ascii(readline.get_history_item(1)))
"""
input = b"\x01" # Ctrl-A, expands to "|t\xEB[after]"
input += b"\x02" * len("[after]") # Move cursor back
input += b"\t\t" # Display possible completions
input += b"x\t" # Complete "t\xEBx" -> "t\xEBxt"
input += b"\r"
output = run_pty(script, input)
self.assertIn(b"text 't\\xeb'\r\n", output)
self.assertIn(b"line '[\\xefnserted]|t\\xeb[after]'\r\n", output)
self.assertIn(b"indexes 11 13\r\n", output)
if not is_editline and hasattr(readline, "set_pre_input_hook"):
self.assertIn(b"substitution 't\\xeb'\r\n", output)
self.assertIn(b"matches ['t\\xebnt', 't\\xebxt']\r\n", output)
expected = br"'[\xefnserted]|t\xebxt[after]'"
self.assertIn(b"result " + expected + b"\r\n", output)
# bpo-45195: Sometimes, the newline character is not written at the
# end, so don't expect it in the output.
self.assertIn(b"history " + expected, output)
# We have 2 reasons to skip this test:
# - readline: history size was added in 6.0
# See https://cnswww.cns.cwru.edu/php/chet/readline/CHANGES
# - editline: history size is broken on OS X 10.11.6.
# Newer versions were not tested yet.
@unittest.skipIf(getattr(readline, "_READLINE_VERSION", 0x601) < 0x600,
"this readline version does not support history-size")
@unittest.skipIf(is_editline,
"editline history size configuration is broken")
def test_history_size(self):
history_size = 10
with temp_dir() as test_dir:
inputrc = os.path.join(test_dir, "inputrc")
with open(inputrc, "wb") as f:
f.write(b"set history-size %d\n" % history_size)
history_file = os.path.join(test_dir, "history")
with open(history_file, "wb") as f:
# history_size * 2 items crashes readline
data = b"".join(b"item %d\n" % i
for i in range(history_size * 2))
f.write(data)
script = """
import os
import readline
history_file = os.environ["HISTORY_FILE"]
readline.read_history_file(history_file)
input()
readline.write_history_file(history_file)
"""
env = dict(os.environ)
env["INPUTRC"] = inputrc
env["HISTORY_FILE"] = history_file
run_pty(script, input=b"last input\r", env=env)
with open(history_file, "rb") as f:
lines = f.readlines()
self.assertEqual(len(lines), history_size)
self.assertEqual(lines[-1].strip(), b"last input")
def run_pty(script, input=b"dummy input\r", env=None):
pty = import_module('pty')
output = bytearray()
[master, slave] = pty.openpty()
args = (sys.executable, '-c', script)
proc = subprocess.Popen(args, stdin=slave, stdout=slave, stderr=slave, env=env)
os.close(slave)
with ExitStack() as cleanup:
cleanup.enter_context(proc)
def terminate(proc):
try:
proc.terminate()
except ProcessLookupError:
# Workaround for Open/Net BSD bug (Issue 16762)
pass
cleanup.callback(terminate, proc)
cleanup.callback(os.close, master)
# Avoid using DefaultSelector and PollSelector. Kqueue() does not
# work with pseudo-terminals on OS X < 10.9 (Issue 20365) and Open
# BSD (Issue 20667). Poll() does not work with OS X 10.6 or 10.4
# either (Issue 20472). Hopefully the file descriptor is low enough
# to use with select().
sel = cleanup.enter_context(selectors.SelectSelector())
sel.register(master, selectors.EVENT_READ | selectors.EVENT_WRITE)
os.set_blocking(master, False)
while True:
for [_, events] in sel.select():
if events & selectors.EVENT_READ:
try:
chunk = os.read(master, 0x10000)
except OSError as err:
# Linux raises EIO when slave is closed (Issue 5380)
if err.errno != EIO:
raise
chunk = b""
if not chunk:
return output
output.extend(chunk)
if events & selectors.EVENT_WRITE:
try:
input = input[os.write(master, input):]
except OSError as err:
# Apparently EIO means the slave was closed
if err.errno != EIO:
raise
input = b"" # Stop writing
if not input:
sel.modify(master, selectors.EVENT_READ)
if __name__ == "__main__":
unittest.main()
| []
| []
| [
"HISTORY_FILE"
]
| [] | ["HISTORY_FILE"] | python | 1 | 0 | |
src/com.mentor.nucleus.bp.core/src/com/mentor/nucleus/bp/core/ClassAsSimpleParticipant_c.java | package com.mentor.nucleus.bp.core;
//====================================================================
//
// File: com.mentor.nucleus.bp.core.ClassAsSimpleParticipant_c.java
//
// WARNING: Do not edit this generated file
// Generated by ../MC-Java/java.arc, $Revision: 1.111 $
//
// (c) Copyright 2005-2014 by Mentor Graphics Corp. All rights reserved.
//
//====================================================================
// No special imports
import java.util.*;
import java.lang.reflect.*;
import org.eclipse.core.resources.IFile;
import org.eclipse.core.resources.IResource;
import org.eclipse.core.runtime.IAdaptable;
import org.eclipse.core.runtime.IPath;
import org.eclipse.core.runtime.Path;
import com.mentor.nucleus.bp.core.util.PersistenceUtil;
import org.eclipse.core.runtime.NullProgressMonitor;
import com.mentor.nucleus.bp.core.ui.marker.UmlProblem;
import com.mentor.nucleus.bp.core.common.*;
abstract class EV_CLASS_AS_SIMPLE_PARTICIPANT extends genericEvent_c {
public abstract int getEvtcode();
}
public class ClassAsSimpleParticipant_c extends NonRootModelElement
implements
IAdaptable,
Cloneable {
// Public Constructors
public ClassAsSimpleParticipant_c(ModelRoot modelRoot,
java.util.UUID p_m_obj_id, java.util.UUID p_m_rel_id,
java.util.UUID p_m_oir_id, int p_m_mult, int p_m_cond,
String p_m_txt_phrs) {
super(modelRoot);
//pre-process the uuid so that we re-use null uuid instance rather then creating a new one.
m_oir_id = IdAssigner.preprocessUUID(p_m_oir_id);
m_mult = p_m_mult;
m_cond = p_m_cond;
m_txt_phrs = p_m_txt_phrs;
//pre-process the uuid so that we re-use null uuid instance rather then creating a new one.
m_obj_id = IdAssigner.preprocessUUID(p_m_obj_id);
//pre-process the uuid so that we re-use null uuid instance rather then creating a new one.
m_rel_id = IdAssigner.preprocessUUID(p_m_rel_id);
Object[] key = {m_oir_id, m_obj_id, m_rel_id};
addInstanceToMap(key);
}
static public ClassAsSimpleParticipant_c createProxy(ModelRoot modelRoot,
java.util.UUID p_m_obj_id, java.util.UUID p_m_rel_id,
java.util.UUID p_m_oir_id, int p_m_mult, int p_m_cond,
String p_m_txt_phrs, String p_contentPath, IPath p_localPath) {
ModelRoot resolvedModelRoot = ModelRoot.findModelRoot(modelRoot,
p_contentPath, p_localPath);
// if a model root was not resolved it is most likely
// due to a missing file of the proxy, defualt back to
// the original model root
if (resolvedModelRoot != null)
modelRoot = resolvedModelRoot;
InstanceList instances = modelRoot
.getInstanceList(ClassAsSimpleParticipant_c.class);
ClassAsSimpleParticipant_c new_inst = null;
synchronized (instances) {
Object[] key = {p_m_oir_id, p_m_obj_id, p_m_rel_id};
new_inst = (ClassAsSimpleParticipant_c) instances.get(key);
}
String contentPath = PersistenceUtil.resolveRelativePath(p_localPath,
new Path(p_contentPath));
if (modelRoot.isNewCompareRoot()) {
// for comparisons we do not want to change
// the content path
contentPath = p_contentPath;
}
if (new_inst != null && !modelRoot.isCompareRoot()) {
PersistableModelComponent pmc = new_inst.getPersistableComponent();
if (pmc == null) {
// dangling reference, redo this instance
new_inst.batchUnrelate();
//pre-process the uuid so that we re-use null uuid instance rather then creating a new one.
new_inst.m_oir_id = IdAssigner.preprocessUUID(p_m_oir_id);
new_inst.m_mult = p_m_mult;
new_inst.m_cond = p_m_cond;
new_inst.m_txt_phrs = p_m_txt_phrs;
//pre-process the uuid so that we re-use null uuid instance rather then creating a new one.
new_inst.m_obj_id = IdAssigner.preprocessUUID(p_m_obj_id);
//pre-process the uuid so that we re-use null uuid instance rather then creating a new one.
new_inst.m_rel_id = IdAssigner.preprocessUUID(p_m_rel_id);
}
}
if (new_inst == null) {
// there is no instance matching the id, create a proxy
// if the resource doesn't exist then this will be a dangling reference
new_inst = new ClassAsSimpleParticipant_c(modelRoot, p_m_obj_id,
p_m_rel_id, p_m_oir_id, p_m_mult, p_m_cond, p_m_txt_phrs);
new_inst.m_contentPath = contentPath;
}
return new_inst;
}
static public ClassAsSimpleParticipant_c resolveInstance(
ModelRoot modelRoot, java.util.UUID p_m_obj_id,
java.util.UUID p_m_rel_id, java.util.UUID p_m_oir_id, int p_m_mult,
int p_m_cond, String p_m_txt_phrs) {
InstanceList instances = modelRoot
.getInstanceList(ClassAsSimpleParticipant_c.class);
ClassAsSimpleParticipant_c source = null;
synchronized (instances) {
Object[] key = {p_m_oir_id, p_m_obj_id, p_m_rel_id};
source = (ClassAsSimpleParticipant_c) instances.get(key);
if (source != null && !modelRoot.isCompareRoot()) {
source.convertFromProxy();
source.batchUnrelate();
//pre-process the uuid so that we re-use null uuid instance rather then creating a new one.
source.m_oir_id = IdAssigner.preprocessUUID(p_m_oir_id);
source.m_mult = p_m_mult;
source.m_cond = p_m_cond;
source.m_txt_phrs = p_m_txt_phrs;
//pre-process the uuid so that we re-use null uuid instance rather then creating a new one.
source.m_obj_id = IdAssigner.preprocessUUID(p_m_obj_id);
//pre-process the uuid so that we re-use null uuid instance rather then creating a new one.
source.m_rel_id = IdAssigner.preprocessUUID(p_m_rel_id);
return source;
}
}
// there is no instance matching the id
ClassAsSimpleParticipant_c new_inst = new ClassAsSimpleParticipant_c(
modelRoot, p_m_obj_id, p_m_rel_id, p_m_oir_id, p_m_mult,
p_m_cond, p_m_txt_phrs);
return new_inst;
}
public ClassAsSimpleParticipant_c(ModelRoot modelRoot) {
super(modelRoot);
m_oir_id = IdAssigner.NULL_UUID;
m_txt_phrs = "";
m_obj_id = IdAssigner.NULL_UUID;
m_rel_id = IdAssigner.NULL_UUID;
Object[] key = {m_oir_id, m_obj_id, m_rel_id};
addInstanceToMap(key);
}
public Object getInstanceKey() {
Object[] key = {m_oir_id, m_obj_id, m_rel_id};
return key;
}
public boolean setInstanceKey(UUID p_newKey) {
boolean changed = false;
// round p1
// round p2
// round p3
// round p4
// round p5
if (m_oir_id != p_newKey) {
m_oir_id = p_newKey;
changed = true;
}
return changed;
}
public boolean equals(Object elem) {
if (!(elem instanceof ClassAsSimpleParticipant_c)) {
return false;
}
// check that the model-roots are the same
if (((NonRootModelElement) elem).getModelRoot() != getModelRoot()) {
return false;
}
return identityEquals(elem);
}
public boolean identityEquals(Object elem) {
if (!(elem instanceof ClassAsSimpleParticipant_c)) {
return false;
}
ClassAsSimpleParticipant_c me = (ClassAsSimpleParticipant_c) elem;
// don't allow an empty id-value to produce a false positive result;
// in this case, use whether the two instances are actually the same
// one in memory, instead
if ((IdAssigner.NULL_UUID.equals(getOir_id()) || IdAssigner.NULL_UUID
.equals(((ClassAsSimpleParticipant_c) elem).getOir_id()))
&& this != elem) {
return false;
}
if (!getOir_id()
.equals(((ClassAsSimpleParticipant_c) elem).getOir_id()))
return false;
// don't allow an empty id-value to produce a false positive result;
// in this case, use whether the two instances are actually the same
// one in memory, instead
if ((IdAssigner.NULL_UUID.equals(getObj_id()) || IdAssigner.NULL_UUID
.equals(((ClassAsSimpleParticipant_c) elem).getObj_id()))
&& this != elem) {
return false;
}
if (!getObj_id()
.equals(((ClassAsSimpleParticipant_c) elem).getObj_id()))
return false;
// don't allow an empty id-value to produce a false positive result;
// in this case, use whether the two instances are actually the same
// one in memory, instead
if ((IdAssigner.NULL_UUID.equals(getRel_id()) || IdAssigner.NULL_UUID
.equals(((ClassAsSimpleParticipant_c) elem).getRel_id()))
&& this != elem) {
return false;
}
if (!getRel_id()
.equals(((ClassAsSimpleParticipant_c) elem).getRel_id()))
return false;
return true;
}
public boolean cachedIdentityEquals(Object elem) {
if (!(elem instanceof ClassAsSimpleParticipant_c)) {
return false;
}
ClassAsSimpleParticipant_c me = (ClassAsSimpleParticipant_c) elem;
if (!getOir_idCachedValue().equals(
((ClassAsSimpleParticipant_c) elem).getOir_idCachedValue()))
return false;
if (!getObj_idCachedValue().equals(
((ClassAsSimpleParticipant_c) elem).getObj_idCachedValue()))
return false;
if (!getRel_idCachedValue().equals(
((ClassAsSimpleParticipant_c) elem).getRel_idCachedValue()))
return false;
return true;
}
// Attributes
private java.util.UUID m_oir_id;
private int m_mult;
private int m_cond;
private String m_txt_phrs;
private java.util.UUID m_obj_id;
private java.util.UUID m_rel_id;
// declare association references from this class
// referring navigation
SimpleAssociation_c IsRelatedToFormalizerViaSimpleAssociation;
public void relateAcrossR207To(SimpleAssociation_c target) {
relateAcrossR207To(target, true);
}
public void relateAcrossR207To(SimpleAssociation_c target,
boolean notifyChanges) {
if (target == null)
return;
if (target == IsRelatedToFormalizerViaSimpleAssociation)
return; // already related
if (IsRelatedToFormalizerViaSimpleAssociation != target) {
Object oldKey = getInstanceKey();
if (IsRelatedToFormalizerViaSimpleAssociation != null) {
IsRelatedToFormalizerViaSimpleAssociation
.clearBackPointerR207To(this);
if (Boolean.valueOf(System.getenv("PTC_MCC_ENABLED")) == true) { //$NON-NLS-1$
Ooaofooa.log
.println(
ILogger.CONSISTENCY,
"ClassAsSimpleParticipant_c.relateAcrossR207To(SimpleAssociation_c target)",
"Relate performed across R207 from Class As Simple Participant to Simple Association without unrelate of prior instance.");
}
}
IsRelatedToFormalizerViaSimpleAssociation = target;
if (IdAssigner.NULL_UUID.equals(target.getRel_id())) {
// do not update cached value
} else {
// update cached value
m_rel_id = target.getRel_idCachedValue();
}
updateInstanceKey(oldKey, getInstanceKey());
target.setBackPointerR207To(this);
target.addRef();
if (notifyChanges) {
RelationshipChangeModelDelta change = new RelationshipChangeModelDelta(
Modeleventnotification_c.DELTA_ELEMENT_RELATED, this,
target, "207", "");
Ooaofooa.getDefaultInstance().fireModelElementRelationChanged(
change);
}
}
}
public void unrelateAcrossR207From(SimpleAssociation_c target) {
unrelateAcrossR207From(target, true);
}
public void unrelateAcrossR207From(SimpleAssociation_c target,
boolean notifyChanges) {
if (target == null)
return;
if (IsRelatedToFormalizerViaSimpleAssociation == null)
return; // already unrelated
if (target != IsRelatedToFormalizerViaSimpleAssociation) {
Exception e = new Exception();
e.fillInStackTrace();
CorePlugin.logError(
"Tried to unrelate from non-related instance across R207",
e);
return;
}
if (target != null) {
target.clearBackPointerR207To(this);
}
if (IsRelatedToFormalizerViaSimpleAssociation != null) {
m_rel_id = IsRelatedToFormalizerViaSimpleAssociation.getRel_id();
if (IdAssigner.NULL_UUID.equals(m_rel_id)) {
m_rel_id = IsRelatedToFormalizerViaSimpleAssociation
.getRel_idCachedValue();
}
IsRelatedToFormalizerViaSimpleAssociation = null;
target.removeRef();
if (notifyChanges) {
RelationshipChangeModelDelta change = new RelationshipChangeModelDelta(
Modeleventnotification_c.DELTA_ELEMENT_UNRELATED, this,
target, "207", "");
Ooaofooa.getDefaultInstance().fireModelElementRelationChanged(
change);
}
}
}
public static ClassAsSimpleParticipant_c getOneR_PARTOnR207(
SimpleAssociation_c[] targets) {
return getOneR_PARTOnR207(targets, null);
}
public static ClassAsSimpleParticipant_c getOneR_PARTOnR207(
SimpleAssociation_c[] targets, ClassQueryInterface_c test) {
ClassAsSimpleParticipant_c ret_val = null;
if (targets != null) {
for (int i = 0; i < targets.length && ret_val == null; ++i) {
ret_val = getOneR_PARTOnR207(targets[i], test);
}
}
return ret_val;
}
public static ClassAsSimpleParticipant_c getOneR_PARTOnR207(
SimpleAssociation_c target) {
return getOneR_PARTOnR207(target, null);
}
public static ClassAsSimpleParticipant_c getOneR_PARTOnR207(
SimpleAssociation_c target, boolean loadComponent) {
return getOneR_PARTOnR207(target.getModelRoot(), target, null,
loadComponent);
}
public static ClassAsSimpleParticipant_c getOneR_PARTOnR207(
SimpleAssociation_c target, ClassQueryInterface_c test) {
if (target != null) {
return getOneR_PARTOnR207(target.getModelRoot(), target, test);
}
return null;
}
public static ClassAsSimpleParticipant_c getOneR_PARTOnR207(
ModelRoot modelRoot, SimpleAssociation_c target,
ClassQueryInterface_c test) {
return getOneR_PARTOnR207(modelRoot, target, test, true);
}
public static ClassAsSimpleParticipant_c getOneR_PARTOnR207(
ModelRoot modelRoot, SimpleAssociation_c target,
ClassQueryInterface_c test, boolean loadComponent) {
return find_getOneR_PARTOnR207(modelRoot, target, test);
}
private static ClassAsSimpleParticipant_c find_getOneR_PARTOnR207(
ModelRoot modelRoot, SimpleAssociation_c target,
ClassQueryInterface_c test) {
if (target != null) {
synchronized (target.backPointer_RelatesClassAsSimpleParticipantRelates_R207) {
for (int i = 0; i < target.backPointer_RelatesClassAsSimpleParticipantRelates_R207
.size(); ++i) {
ClassAsSimpleParticipant_c source = (ClassAsSimpleParticipant_c) target.backPointer_RelatesClassAsSimpleParticipantRelates_R207
.get(i);
if (source != null
&& (test == null || test.evaluate(source))) {
return source;
}
}
}
}
// not found
return null;
}
public static ClassAsSimpleParticipant_c[] getManyR_PARTsOnR207(
SimpleAssociation_c[] targets) {
return getManyR_PARTsOnR207(targets, null);
}
public static ClassAsSimpleParticipant_c[] getManyR_PARTsOnR207(
SimpleAssociation_c[] targets, boolean loadComponent) {
return getManyR_PARTsOnR207(targets, null, loadComponent);
}
public static ClassAsSimpleParticipant_c[] getManyR_PARTsOnR207(
SimpleAssociation_c[] targets, ClassQueryInterface_c test) {
return getManyR_PARTsOnR207(targets, test, true);
}
public static ClassAsSimpleParticipant_c[] getManyR_PARTsOnR207(
SimpleAssociation_c[] targets, ClassQueryInterface_c test,
boolean loadComponent) {
if (targets == null || targets.length == 0 || targets[0] == null)
return new ClassAsSimpleParticipant_c[0];
ModelRoot modelRoot = targets[0].getModelRoot();
InstanceList instances = modelRoot
.getInstanceList(ClassAsSimpleParticipant_c.class);
Vector matches = new Vector();
for (int i = 0; i < targets.length; i++) {
synchronized (targets[i].backPointer_RelatesClassAsSimpleParticipantRelates_R207) {
for (int j = 0; j < targets[i].backPointer_RelatesClassAsSimpleParticipantRelates_R207
.size(); ++j) {
ClassAsSimpleParticipant_c source = (ClassAsSimpleParticipant_c) targets[i].backPointer_RelatesClassAsSimpleParticipantRelates_R207
.get(j);
if (source != null
&& (test == null || test.evaluate(source))) {
matches.add(source);
}
}
}
}
if (matches.size() > 0) {
ClassAsSimpleParticipant_c[] ret_set = new ClassAsSimpleParticipant_c[matches
.size()];
matches.copyInto(ret_set);
return ret_set;
} else {
return new ClassAsSimpleParticipant_c[0];
}
}
public static ClassAsSimpleParticipant_c[] getManyR_PARTsOnR207(
SimpleAssociation_c target) {
return getManyR_PARTsOnR207(target, null);
}
public static ClassAsSimpleParticipant_c[] getManyR_PARTsOnR207(
SimpleAssociation_c target, boolean loadComponent) {
return getManyR_PARTsOnR207(target, null, loadComponent);
}
public static ClassAsSimpleParticipant_c[] getManyR_PARTsOnR207(
SimpleAssociation_c target, ClassQueryInterface_c test) {
return getManyR_PARTsOnR207(target, test, true);
}
public static ClassAsSimpleParticipant_c[] getManyR_PARTsOnR207(
SimpleAssociation_c target, ClassQueryInterface_c test,
boolean loadComponent) {
if (target == null)
return new ClassAsSimpleParticipant_c[0];
ModelRoot modelRoot = target.getModelRoot();
Vector matches = new Vector();
synchronized (target.backPointer_RelatesClassAsSimpleParticipantRelates_R207) {
for (int i = 0; i < target.backPointer_RelatesClassAsSimpleParticipantRelates_R207
.size(); ++i) {
ClassAsSimpleParticipant_c source = (ClassAsSimpleParticipant_c) target.backPointer_RelatesClassAsSimpleParticipantRelates_R207
.get(i);
if (source != null && (test == null || test.evaluate(source))) {
matches.add(source);
}
}
}
if (matches.size() > 0) {
ClassAsSimpleParticipant_c[] ret_set = new ClassAsSimpleParticipant_c[matches
.size()];
matches.copyInto(ret_set);
return ret_set;
} else {
return new ClassAsSimpleParticipant_c[0];
}
}
// referring navigation
ReferredToClassInAssoc_c IsSupertypeReferredToClassInAssoc;
public void relateAcrossR204To(ReferredToClassInAssoc_c target) {
relateAcrossR204To(target, true);
}
public void relateAcrossR204To(ReferredToClassInAssoc_c target,
boolean notifyChanges) {
if (target == null)
return;
if (target == IsSupertypeReferredToClassInAssoc)
return; // already related
if (IsSupertypeReferredToClassInAssoc != target) {
Object oldKey = getInstanceKey();
if (IsSupertypeReferredToClassInAssoc != null) {
IsSupertypeReferredToClassInAssoc.clearBackPointerR204To(this);
if (Boolean.valueOf(System.getenv("PTC_MCC_ENABLED")) == true) { //$NON-NLS-1$
Ooaofooa.log
.println(
ILogger.CONSISTENCY,
"ClassAsSimpleParticipant_c.relateAcrossR204To(ReferredToClassInAssoc_c target)",
"Relate performed across R204 from Class As Simple Participant to Referred To Class in Assoc without unrelate of prior instance.");
}
}
IsSupertypeReferredToClassInAssoc = target;
if (IdAssigner.NULL_UUID.equals(target.getOir_id())) {
// do not update cached value
} else {
// update cached value
m_oir_id = target.getOir_idCachedValue();
}
if (IdAssigner.NULL_UUID.equals(target.getObj_id())) {
// do not update cached value
} else {
// update cached value
m_obj_id = target.getObj_idCachedValue();
}
if (IdAssigner.NULL_UUID.equals(target.getRel_id())) {
// do not update cached value
} else {
// update cached value
m_rel_id = target.getRel_idCachedValue();
}
updateInstanceKey(oldKey, getInstanceKey());
target.setBackPointerR204To(this);
target.addRef();
if (notifyChanges) {
RelationshipChangeModelDelta change = new RelationshipChangeModelDelta(
Modeleventnotification_c.DELTA_ELEMENT_RELATED, this,
target, "204", "");
Ooaofooa.getDefaultInstance().fireModelElementRelationChanged(
change);
}
}
}
public void unrelateAcrossR204From(ReferredToClassInAssoc_c target) {
unrelateAcrossR204From(target, true);
}
public void unrelateAcrossR204From(ReferredToClassInAssoc_c target,
boolean notifyChanges) {
if (target == null)
return;
if (IsSupertypeReferredToClassInAssoc == null)
return; // already unrelated
if (target != IsSupertypeReferredToClassInAssoc) {
Exception e = new Exception();
e.fillInStackTrace();
CorePlugin.logError(
"Tried to unrelate from non-related instance across R204",
e);
return;
}
if (target != null) {
target.clearBackPointerR204To(this);
}
if (IsSupertypeReferredToClassInAssoc != null) {
m_oir_id = IsSupertypeReferredToClassInAssoc.getOir_id();
if (IdAssigner.NULL_UUID.equals(m_oir_id)) {
m_oir_id = IsSupertypeReferredToClassInAssoc
.getOir_idCachedValue();
}
m_obj_id = IsSupertypeReferredToClassInAssoc.getObj_id();
if (IdAssigner.NULL_UUID.equals(m_obj_id)) {
m_obj_id = IsSupertypeReferredToClassInAssoc
.getObj_idCachedValue();
}
m_rel_id = IsSupertypeReferredToClassInAssoc.getRel_id();
if (IdAssigner.NULL_UUID.equals(m_rel_id)) {
m_rel_id = IsSupertypeReferredToClassInAssoc
.getRel_idCachedValue();
}
IsSupertypeReferredToClassInAssoc = null;
target.removeRef();
if (notifyChanges) {
RelationshipChangeModelDelta change = new RelationshipChangeModelDelta(
Modeleventnotification_c.DELTA_ELEMENT_UNRELATED, this,
target, "204", "");
Ooaofooa.getDefaultInstance().fireModelElementRelationChanged(
change);
}
}
}
public static ClassAsSimpleParticipant_c getOneR_PARTOnR204(
ReferredToClassInAssoc_c[] targets) {
return getOneR_PARTOnR204(targets, null);
}
public static ClassAsSimpleParticipant_c getOneR_PARTOnR204(
ReferredToClassInAssoc_c[] targets, ClassQueryInterface_c test) {
ClassAsSimpleParticipant_c ret_val = null;
if (targets != null) {
for (int i = 0; i < targets.length && ret_val == null; ++i) {
ret_val = getOneR_PARTOnR204(targets[i], test);
}
}
return ret_val;
}
public static ClassAsSimpleParticipant_c getOneR_PARTOnR204(
ReferredToClassInAssoc_c target) {
return getOneR_PARTOnR204(target, null);
}
public static ClassAsSimpleParticipant_c getOneR_PARTOnR204(
ReferredToClassInAssoc_c target, boolean loadComponent) {
return getOneR_PARTOnR204(target.getModelRoot(), target, null,
loadComponent);
}
public static ClassAsSimpleParticipant_c getOneR_PARTOnR204(
ReferredToClassInAssoc_c target, ClassQueryInterface_c test) {
if (target != null) {
return getOneR_PARTOnR204(target.getModelRoot(), target, test);
}
return null;
}
public static ClassAsSimpleParticipant_c getOneR_PARTOnR204(
ModelRoot modelRoot, ReferredToClassInAssoc_c target,
ClassQueryInterface_c test) {
return getOneR_PARTOnR204(modelRoot, target, test, true);
}
public static ClassAsSimpleParticipant_c getOneR_PARTOnR204(
ModelRoot modelRoot, ReferredToClassInAssoc_c target,
ClassQueryInterface_c test, boolean loadComponent) {
return find_getOneR_PARTOnR204(modelRoot, target, test);
}
private static ClassAsSimpleParticipant_c find_getOneR_PARTOnR204(
ModelRoot modelRoot, ReferredToClassInAssoc_c target,
ClassQueryInterface_c test) {
if (target != null) {
ClassAsSimpleParticipant_c source = (ClassAsSimpleParticipant_c) target.backPointer_IsSubtypeClassAsSimpleParticipantIsSubtype_R204;
if (source != null && (test == null || test.evaluate(source))) {
return source;
}
}
// not found
return null;
}
public static ClassAsSimpleParticipant_c[] getManyR_PARTsOnR204(
ReferredToClassInAssoc_c[] targets) {
return getManyR_PARTsOnR204(targets, null);
}
public static ClassAsSimpleParticipant_c[] getManyR_PARTsOnR204(
ReferredToClassInAssoc_c[] targets, boolean loadComponent) {
return getManyR_PARTsOnR204(targets, null, loadComponent);
}
public static ClassAsSimpleParticipant_c[] getManyR_PARTsOnR204(
ReferredToClassInAssoc_c[] targets, ClassQueryInterface_c test) {
return getManyR_PARTsOnR204(targets, test, true);
}
public static ClassAsSimpleParticipant_c[] getManyR_PARTsOnR204(
ReferredToClassInAssoc_c[] targets, ClassQueryInterface_c test,
boolean loadComponent) {
if (targets == null || targets.length == 0 || targets[0] == null)
return new ClassAsSimpleParticipant_c[0];
ModelRoot modelRoot = targets[0].getModelRoot();
InstanceList instances = modelRoot
.getInstanceList(ClassAsSimpleParticipant_c.class);
Vector matches = new Vector();
for (int i = 0; i < targets.length; i++) {
ClassAsSimpleParticipant_c source = (ClassAsSimpleParticipant_c) targets[i].backPointer_IsSubtypeClassAsSimpleParticipantIsSubtype_R204;
if (source != null && (test == null || test.evaluate(source))) {
matches.add(source);
}
}
if (matches.size() > 0) {
ClassAsSimpleParticipant_c[] ret_set = new ClassAsSimpleParticipant_c[matches
.size()];
matches.copyInto(ret_set);
return ret_set;
} else {
return new ClassAsSimpleParticipant_c[0];
}
}
public static ClassAsSimpleParticipant_c[] getManyR_PARTsOnR204(
ReferredToClassInAssoc_c target) {
if (target != null) {
ReferredToClassInAssoc_c[] targetArray = new ReferredToClassInAssoc_c[1];
targetArray[0] = target;
return getManyR_PARTsOnR204(targetArray);
} else {
ClassAsSimpleParticipant_c[] result = new ClassAsSimpleParticipant_c[0];
return result;
}
}
public static ClassAsSimpleParticipant_c[] getManyR_PARTsOnR204(
ReferredToClassInAssoc_c target, boolean loadComponent) {
if (target != null) {
ReferredToClassInAssoc_c[] targetArray = new ReferredToClassInAssoc_c[1];
targetArray[0] = target;
return getManyR_PARTsOnR204(targetArray, loadComponent);
} else {
ClassAsSimpleParticipant_c[] result = new ClassAsSimpleParticipant_c[0];
return result;
}
}
public void batchRelate(ModelRoot modelRoot, boolean notifyChanges,
boolean searchAllRoots) {
batchRelate(modelRoot, false, notifyChanges, searchAllRoots);
}
public void batchRelate(ModelRoot modelRoot, boolean relateProxies,
boolean notifyChanges, boolean searchAllRoots) {
InstanceList instances = null;
ModelRoot baseRoot = modelRoot;
if (IsRelatedToFormalizerViaSimpleAssociation == null) {
// R207
SimpleAssociation_c relInst56762 = (SimpleAssociation_c) baseRoot
.getInstanceList(SimpleAssociation_c.class).get(
new Object[]{m_rel_id});
// if there was no local element, check for any global elements
// failing that proceed to check other model roots
if (relInst56762 == null) {
relInst56762 = (SimpleAssociation_c) Ooaofooa
.getDefaultInstance()
.getInstanceList(SimpleAssociation_c.class)
.get(new Object[]{m_rel_id});
}
if (relInst56762 == null && searchAllRoots
&& !baseRoot.isCompareRoot()) {
Ooaofooa[] roots = Ooaofooa.getInstances();
for (int i = 0; i < roots.length; i++) {
if (roots[i].isCompareRoot()) {
// never use elements from any compare root
continue;
}
relInst56762 = (SimpleAssociation_c) roots[i]
.getInstanceList(SimpleAssociation_c.class).get(
new Object[]{m_rel_id});
if (relInst56762 != null)
break;
}
}
//synchronized
if (relInst56762 != null) {
if (relateProxies || !isProxy()
|| (inSameComponent(this, relInst56762) && !isProxy())) {
relInst56762.relateAcrossR207To(this, notifyChanges);
}
}
}
// R204
ReferredToClassInAssoc_c relInst56763 = (ReferredToClassInAssoc_c) baseRoot
.getInstanceList(ReferredToClassInAssoc_c.class).get(
new Object[]{m_oir_id, m_obj_id, m_rel_id});
// if there was no local element, check for any global elements
// failing that proceed to check other model roots
if (relInst56763 == null) {
relInst56763 = (ReferredToClassInAssoc_c) Ooaofooa
.getDefaultInstance()
.getInstanceList(ReferredToClassInAssoc_c.class)
.get(new Object[]{m_oir_id, m_obj_id, m_rel_id});
}
if (relInst56763 == null && searchAllRoots && !baseRoot.isCompareRoot()) {
Ooaofooa[] roots = Ooaofooa.getInstances();
for (int i = 0; i < roots.length; i++) {
if (roots[i].isCompareRoot()) {
// never use elements from any compare root
continue;
}
relInst56763 = (ReferredToClassInAssoc_c) roots[i]
.getInstanceList(ReferredToClassInAssoc_c.class).get(
new Object[]{m_oir_id, m_obj_id, m_rel_id});
if (relInst56763 != null)
break;
}
}
//synchronized
if (relInst56763 != null) {
if (relateProxies || !isProxy()
|| (inSameComponent(this, relInst56763) && !isProxy())) {
relInst56763.relateAcrossR204To(this, notifyChanges);
}
}
}
public void batchUnrelate(boolean notifyChanges) {
NonRootModelElement inst = null;
// R207
// R_SIMP
inst = IsRelatedToFormalizerViaSimpleAssociation;
unrelateAcrossR207From(IsRelatedToFormalizerViaSimpleAssociation,
notifyChanges);
if (inst != null) {
inst.removeRef();
}
// R204
// R_RTO
inst = IsSupertypeReferredToClassInAssoc;
unrelateAcrossR204From(IsSupertypeReferredToClassInAssoc, notifyChanges);
if (inst != null) {
inst.removeRef();
}
}
public static void batchRelateAll(ModelRoot modelRoot,
boolean notifyChanges, boolean searchAllRoots) {
batchRelateAll(modelRoot, notifyChanges, searchAllRoots, false);
}
public static void batchRelateAll(ModelRoot modelRoot, boolean notifyChanges, boolean searchAllRoots, boolean relateProxies)
{
InstanceList instances = modelRoot.getInstanceList(ClassAsSimpleParticipant_c.class);
synchronized(instances) {
Iterator<NonRootModelElement> cursor = instances.iterator() ;
while (cursor.hasNext())
{
final ClassAsSimpleParticipant_c inst = (ClassAsSimpleParticipant_c)cursor.next() ;
inst.batchRelate(modelRoot, relateProxies, notifyChanges, searchAllRoots );
}
}
}
public static void clearInstances(ModelRoot modelRoot) {
InstanceList instances = modelRoot
.getInstanceList(ClassAsSimpleParticipant_c.class);
synchronized (instances) {
for (int i = instances.size() - 1; i >= 0; i--) {
((NonRootModelElement) instances.get(i)).delete_unchecked();
}
}
}
public static ClassAsSimpleParticipant_c ClassAsSimpleParticipantInstance(
ModelRoot modelRoot, ClassQueryInterface_c test,
boolean loadComponent) {
ClassAsSimpleParticipant_c result = findClassAsSimpleParticipantInstance(
modelRoot, test, loadComponent);
if (result == null && loadComponent) {
List pmcs = PersistenceManager.findAllComponents(modelRoot,
ClassAsSimpleParticipant_c.class);
for (int i = 0; i < pmcs.size(); i++) {
PersistableModelComponent component = (PersistableModelComponent) pmcs
.get(i);
if (!component.isLoaded()) {
try {
component.load(new NullProgressMonitor());
result = findClassAsSimpleParticipantInstance(
modelRoot, test, loadComponent);
if (result != null)
return result;
} catch (Exception e) {
CorePlugin.logError("Error Loading component", e);
}
}
}
}
if (result != null && loadComponent) {
result.loadProxy();
}
return result;
}
private static ClassAsSimpleParticipant_c findClassAsSimpleParticipantInstance(
ModelRoot modelRoot, ClassQueryInterface_c test,
boolean loadComponent) {
InstanceList instances = modelRoot
.getInstanceList(ClassAsSimpleParticipant_c.class);
synchronized (instances) {
for (int i = 0; i < instances.size(); ++i) {
ClassAsSimpleParticipant_c x = (ClassAsSimpleParticipant_c) instances
.get(i);
if (test == null || test.evaluate(x)) {
if (x.ensureLoaded(loadComponent))
return x;
}
}
}
return null;
}
public static ClassAsSimpleParticipant_c ClassAsSimpleParticipantInstance(
ModelRoot modelRoot, ClassQueryInterface_c test) {
return ClassAsSimpleParticipantInstance(modelRoot, test, true);
}
public static ClassAsSimpleParticipant_c ClassAsSimpleParticipantInstance(
ModelRoot modelRoot) {
return ClassAsSimpleParticipantInstance(modelRoot, null, true);
}
public static ClassAsSimpleParticipant_c[] ClassAsSimpleParticipantInstances(
ModelRoot modelRoot, ClassQueryInterface_c test,
boolean loadComponent) {
if (loadComponent) {
PersistenceManager.ensureAllInstancesLoaded(modelRoot,
ClassAsSimpleParticipant_c.class);
}
InstanceList instances = modelRoot
.getInstanceList(ClassAsSimpleParticipant_c.class);
Vector matches = new Vector();
synchronized (instances) {
for (int i = 0; i < instances.size(); ++i) {
ClassAsSimpleParticipant_c x = (ClassAsSimpleParticipant_c) instances
.get(i);
if (test == null || test.evaluate(x)) {
if (x.ensureLoaded(loadComponent))
matches.add(x);
}
}
if (matches.size() > 0) {
ClassAsSimpleParticipant_c[] ret_set = new ClassAsSimpleParticipant_c[matches
.size()];
matches.copyInto(ret_set);
return ret_set;
} else {
return new ClassAsSimpleParticipant_c[0];
}
}
}
public static ClassAsSimpleParticipant_c[] ClassAsSimpleParticipantInstances(
ModelRoot modelRoot, ClassQueryInterface_c test) {
return ClassAsSimpleParticipantInstances(modelRoot, test, true);
}
public static ClassAsSimpleParticipant_c[] ClassAsSimpleParticipantInstances(
ModelRoot modelRoot) {
return ClassAsSimpleParticipantInstances(modelRoot, null, true);
}
public boolean delete() {
boolean result = super.delete();
boolean delete_error = false;
String errorMsg = "The following relationships were not torn down by the Class As Simple Participant.dispose call: ";
SimpleAssociation_c testR207Inst = SimpleAssociation_c
.getOneR_SIMPOnR207(this, false);
if (testR207Inst != null) {
delete_error = true;
errorMsg = errorMsg + "207 ";
}
ReferredToClassInAssoc_c testR204Inst2 = ReferredToClassInAssoc_c
.getOneR_RTOOnR204(this, false);
if (testR204Inst2 != null) {
delete_error = true;
errorMsg = errorMsg + "204 ";
}
if (delete_error == true) {
if (CorePlugin.getDefault().isDebugging()) {
Ooaofooa.log.println(ILogger.DELETE,
"Class As Simple Participant", errorMsg);
} else {
Exception e = new Exception();
e.fillInStackTrace();
CorePlugin.logError(errorMsg, e);
}
}
return result;
}
// end declare instance pool
// declare attribute accessors
public boolean isUUID(String attributeName) {
if (attributeName.equals("oir_id")) {
return true;
}
if (attributeName.equals("obj_id")) {
return true;
}
if (attributeName.equals("rel_id")) {
return true;
}
return false;
}
public String getCompUniqueID() {
UUID tempID = null;
long longID = 0L;
StringBuffer result = new StringBuffer();
tempID = getOir_id();
if (IdAssigner.NULL_UUID.equals(tempID))
tempID = getOir_idCachedValue();
result.append(Long.toHexString(tempID.getMostSignificantBits()));
result.append(Long.toHexString(tempID.getLeastSignificantBits()));
tempID = getObj_id();
if (IdAssigner.NULL_UUID.equals(tempID))
tempID = getObj_idCachedValue();
result.append(Long.toHexString(tempID.getMostSignificantBits()));
result.append(Long.toHexString(tempID.getLeastSignificantBits()));
tempID = getRel_id();
if (IdAssigner.NULL_UUID.equals(tempID))
tempID = getRel_idCachedValue();
result.append(Long.toHexString(tempID.getMostSignificantBits()));
result.append(Long.toHexString(tempID.getLeastSignificantBits()));
return result.toString();
}
// declare attribute accessors
public long getOir_idLongBased() {
if (IsSupertypeReferredToClassInAssoc != null) {
return IsSupertypeReferredToClassInAssoc.getOir_idLongBased();
}
return 0;
}
public java.util.UUID getOir_id() {
if (IsSupertypeReferredToClassInAssoc != null) {
return IsSupertypeReferredToClassInAssoc.getOir_id();
}
return IdAssigner.NULL_UUID;
}
public boolean hasSuperType() {
return (IsSupertypeReferredToClassInAssoc != null);
}
public java.util.UUID getOir_idCachedValue() {
if (!IdAssigner.NULL_UUID.equals(m_oir_id))
return m_oir_id;
else
return getOir_id();
}
public void setOir_id(java.util.UUID newValue) {
if (newValue != null) {
if (newValue.equals(m_oir_id)) {
return;
}
} else if (m_oir_id != null) {
if (m_oir_id.equals(newValue)) {
return;
}
} else {
return;
}
AttributeChangeModelDelta change = new AttributeChangeModelDelta(
Modeleventnotification_c.DELTA_ATTRIBUTE_CHANGE, this,
"Oir_id", m_oir_id, newValue, true);
m_oir_id = IdAssigner.preprocessUUID(newValue);
Ooaofooa.getDefaultInstance().fireModelElementAttributeChanged(change);
}
public int getMult() {
return m_mult;
}
public void setMult(int newValue) {
if (m_mult == newValue) {
return;
}
AttributeChangeModelDelta change = new AttributeChangeModelDelta(
Modeleventnotification_c.DELTA_ATTRIBUTE_CHANGE, this, "Mult",
new Integer(m_mult), new Integer(newValue), true);
m_mult = newValue;
Ooaofooa.getDefaultInstance().fireModelElementAttributeChanged(change);
}
public int getCond() {
return m_cond;
}
public void setCond(int newValue) {
if (m_cond == newValue) {
return;
}
AttributeChangeModelDelta change = new AttributeChangeModelDelta(
Modeleventnotification_c.DELTA_ATTRIBUTE_CHANGE, this, "Cond",
new Integer(m_cond), new Integer(newValue), true);
m_cond = newValue;
Ooaofooa.getDefaultInstance().fireModelElementAttributeChanged(change);
}
public String getTxt_phrs() {
return m_txt_phrs;
}
public void setTxt_phrs(String newValue) {
if (newValue != null) {
if (newValue.equals(m_txt_phrs)) {
return;
}
} else if (m_txt_phrs != null) {
if (m_txt_phrs.equals(newValue)) {
return;
}
} else {
return;
}
AttributeChangeModelDelta change = new AttributeChangeModelDelta(
Modeleventnotification_c.DELTA_ATTRIBUTE_CHANGE, this,
"Txt_phrs", m_txt_phrs, newValue, true);
m_txt_phrs = newValue;
Ooaofooa.getDefaultInstance().fireModelElementAttributeChanged(change);
}
public long getObj_idLongBased() {
if (IsSupertypeReferredToClassInAssoc != null) {
return IsSupertypeReferredToClassInAssoc.getObj_idLongBased();
}
return 0;
}
public java.util.UUID getObj_id() {
if (IsSupertypeReferredToClassInAssoc != null) {
return IsSupertypeReferredToClassInAssoc.getObj_id();
}
return IdAssigner.NULL_UUID;
}
public java.util.UUID getObj_idCachedValue() {
if (!IdAssigner.NULL_UUID.equals(m_obj_id))
return m_obj_id;
else
return getObj_id();
}
public void setObj_id(java.util.UUID newValue) {
if (newValue != null) {
if (newValue.equals(m_obj_id)) {
return;
}
} else if (m_obj_id != null) {
if (m_obj_id.equals(newValue)) {
return;
}
} else {
return;
}
AttributeChangeModelDelta change = new AttributeChangeModelDelta(
Modeleventnotification_c.DELTA_ATTRIBUTE_CHANGE, this,
"Obj_id", m_obj_id, newValue, true);
m_obj_id = IdAssigner.preprocessUUID(newValue);
Ooaofooa.getDefaultInstance().fireModelElementAttributeChanged(change);
}
public long getRel_idLongBased() {
if (IsRelatedToFormalizerViaSimpleAssociation != null) {
return IsRelatedToFormalizerViaSimpleAssociation
.getRel_idLongBased();
}
if (IsSupertypeReferredToClassInAssoc != null) {
return IsSupertypeReferredToClassInAssoc.getRel_idLongBased();
}
return 0;
}
public java.util.UUID getRel_id() {
if (IsRelatedToFormalizerViaSimpleAssociation != null) {
return IsRelatedToFormalizerViaSimpleAssociation.getRel_id();
}
if (IsSupertypeReferredToClassInAssoc != null) {
return IsSupertypeReferredToClassInAssoc.getRel_id();
}
return IdAssigner.NULL_UUID;
}
public java.util.UUID getRel_idCachedValue() {
if (!IdAssigner.NULL_UUID.equals(m_rel_id))
return m_rel_id;
else
return getRel_id();
}
// end declare accessors
public static void checkClassConsistency(ModelRoot modelRoot) {
Ooaofooa.log
.println(ILogger.OPERATION, "Class As Simple Participant", //$NON-NLS-1$
" Operation entered: Class As Simple Participant::checkClassConsistency"); //$NON-NLS-1$
if (Boolean.valueOf(System.getenv("PTC_MCC_ENABLED")) == false) { //$NON-NLS-1$
return;
}
ClassAsSimpleParticipant_c[] objs = ClassAsSimpleParticipant_c
.ClassAsSimpleParticipantInstances(modelRoot, null, false);
for (int i = 0; i < objs.length; i++) {
objs[i].checkConsistency();
}
}
public boolean checkConsistency() {
Ooaofooa.log
.println(ILogger.OPERATION, "Class As Simple Participant", //$NON-NLS-1$
" Operation entered: Class As Simple Participant::checkConsistency"); //$NON-NLS-1$
if (Boolean.valueOf(System.getenv("PTC_MCC_ENABLED")) == false) { //$NON-NLS-1$
return true;
}
ModelRoot modelRoot = getModelRoot();
boolean retval = true;
class ClassAsSimpleParticipant_c_test56765_c
implements
ClassQueryInterface_c {
ClassAsSimpleParticipant_c_test56765_c(java.util.UUID p56766,
java.util.UUID p56767, java.util.UUID p56768) {
m_p56766 = p56766;
m_p56767 = p56767;
m_p56768 = p56768;
}
private java.util.UUID m_p56766;
private java.util.UUID m_p56767;
private java.util.UUID m_p56768;
public boolean evaluate(Object candidate) {
ClassAsSimpleParticipant_c selected = (ClassAsSimpleParticipant_c) candidate;
boolean retval = false;
retval = (selected.getOir_id().equals(m_p56766))
& (selected.getObj_id().equals(m_p56767))
& (selected.getRel_id().equals(m_p56768));
return retval;
}
}
ClassAsSimpleParticipant_c[] objs56764 = ClassAsSimpleParticipant_c
.ClassAsSimpleParticipantInstances(modelRoot,
new ClassAsSimpleParticipant_c_test56765_c(getOir_id(),
getObj_id(), getRel_id()));
if (((objs56764.length) == 0)) {
if (CorePlugin.getDefault().isDebugging()) {
Ooaofooa.log
.println(
ILogger.CONSISTENCY,
"Class As Simple Participant", //$NON-NLS-1$
"Consistency: Object: Class As Simple Participant: Cardinality of an identifier is zero. " //$NON-NLS-1$
+ "Actual Value: " + Integer.toString(objs56764.length)); //$NON-NLS-1$
} else {
Exception e = new Exception();
CorePlugin
.logError(
"Consistency: Object: Class As Simple Participant: Cardinality of an identifier is zero. " //$NON-NLS-1$
+ "Actual Value: " //$NON-NLS-1$
+ Integer.toString(objs56764.length), e);
}
retval = false;
}
if (((objs56764.length) > 1)) {
if (CorePlugin.getDefault().isDebugging()) {
Ooaofooa.log
.println(
ILogger.CONSISTENCY,
"Class As Simple Participant", //$NON-NLS-1$
"Consistency: Object: Class As Simple Participant: Cardinality of an identifier is greater than 1. " //$NON-NLS-1$
+ "Actual Value: " //$NON-NLS-1$
+ Integer.toString(objs56764.length)
+ " OIR_ID: " + "Not Printable" + " Obj_ID: " + "Not Printable" + " Rel_ID: " + "Not Printable"); //$NON-NLS-1$
} else {
Exception e = new Exception();
CorePlugin
.logError(
"Consistency: Object: Class As Simple Participant: Cardinality of an identifier is greater than 1. " //$NON-NLS-1$
+ "Actual Value: " //$NON-NLS-1$
+ Integer.toString(objs56764.length)
+ " OIR_ID: " + "Not Printable" + " Obj_ID: " + "Not Printable" + " Rel_ID: " + "Not Printable", e); //$NON-NLS-1$
}
retval = false;
}
// Class As Simple Participant is a subtype in association: rel.Numb = 204
// The supertype class is: Referred To Class in Assoc
class ReferredToClassInAssoc_c_test56772_c
implements
ClassQueryInterface_c {
ReferredToClassInAssoc_c_test56772_c(java.util.UUID p56773,
java.util.UUID p56774, java.util.UUID p56775) {
m_p56773 = p56773;
m_p56774 = p56774;
m_p56775 = p56775;
}
private java.util.UUID m_p56773;
private java.util.UUID m_p56774;
private java.util.UUID m_p56775;
public boolean evaluate(Object candidate) {
ReferredToClassInAssoc_c selected = (ReferredToClassInAssoc_c) candidate;
boolean retval = false;
retval = (selected.getOir_id().equals(m_p56773))
& (selected.getObj_id().equals(m_p56774))
& (selected.getRel_id().equals(m_p56775));
return retval;
}
}
ReferredToClassInAssoc_c[] objs56771 = ReferredToClassInAssoc_c
.ReferredToClassInAssocInstances(modelRoot,
new ReferredToClassInAssoc_c_test56772_c(getOir_id(),
getObj_id(), getRel_id()));
if (((objs56771.length) != 1)) {
if (CorePlugin.getDefault().isDebugging()) {
Ooaofooa.log
.println(
ILogger.CONSISTENCY,
"Class As Simple Participant", //$NON-NLS-1$
"Consistency: Object: Class As Simple Participant: Association: 204: Cardinality of a supertype is not equal to 1. " //$NON-NLS-1$
+ "Actual Value: " + Integer.toString(objs56771.length)); //$NON-NLS-1$
} else {
Exception e = new Exception();
CorePlugin
.logError(
"Consistency: Object: Class As Simple Participant: Association: 204: Cardinality of a supertype is not equal to 1. " //$NON-NLS-1$
+ "Actual Value: " //$NON-NLS-1$
+ Integer.toString(objs56771.length), e);
}
retval = false;
}
// Class As Simple Participant is a referring class in association: rel.Numb = 207
// The participating class is: Simple Association
class SimpleAssociation_c_test56777_c implements ClassQueryInterface_c {
SimpleAssociation_c_test56777_c(java.util.UUID p56778) {
m_p56778 = p56778;
}
private java.util.UUID m_p56778;
public boolean evaluate(Object candidate) {
SimpleAssociation_c selected = (SimpleAssociation_c) candidate;
boolean retval = false;
retval = (selected.getRel_id().equals(m_p56778));
return retval;
}
}
SimpleAssociation_c[] objs56776 = SimpleAssociation_c
.SimpleAssociationInstances(modelRoot,
new SimpleAssociation_c_test56777_c(getRel_id()));
// The participant is unconditional
// The multiplicity of the participant is one
if (((objs56776.length) != 1)) {
if (CorePlugin.getDefault().isDebugging()) {
Ooaofooa.log
.println(
ILogger.CONSISTENCY,
"Class As Simple Participant", //$NON-NLS-1$
"Consistency: Object: Class As Simple Participant: Association: 207: Cardinality of a participant is not equal to 1. " //$NON-NLS-1$
+ "Actual Value: " //$NON-NLS-1$
+ Integer.toString(objs56776.length)
+ " Rel_ID: " + "Not Printable"); //$NON-NLS-1$
} else {
Exception e = new Exception();
CorePlugin
.logError(
"Consistency: Object: Class As Simple Participant: Association: 207: Cardinality of a participant is not equal to 1. " //$NON-NLS-1$
+ "Actual Value: " //$NON-NLS-1$
+ Integer.toString(objs56776.length)
+ " Rel_ID: " + "Not Printable", e); //$NON-NLS-1$
}
retval = false;
}
return retval;
}
// declare transform functions
public String Get_connector_text(final int p_At) {
Ooaofooa.log
.println(ILogger.OPERATION, "Class As Simple Participant",
" Operation entered: ClassAsSimpleParticipant::Get_connector_text");
final ModelRoot modelRoot = getModelRoot();
String v_result = "";
if ((p_At == End_c.Start || p_At == End_c.End)) {
v_result = getTxt_phrs();
}
else if ((p_At == End_c.Start_Fixed || p_At == End_c.End_Fixed)) {
v_result = Association_c.Get_cardinality_text(modelRoot, getCond(),
getMult());
}
return v_result;
} // End get_connector_text
public void Dispose() {
Ooaofooa.log.println(ILogger.OPERATION, "Class As Simple Participant",
" Operation entered: ClassAsSimpleParticipant::Dispose");
final ModelRoot modelRoot = getModelRoot();
SimpleAssociation_c v_rsimp = SimpleAssociation_c
.getOneR_SIMPOnR207(this);
this.unrelateAcrossR207From(v_rsimp);
ReferredToClassInAssoc_c v_rto = ReferredToClassInAssoc_c
.getOneR_RTOOnR204(this);
this.unrelateAcrossR204From(v_rto);
if (delete()) {
Ooaofooa.getDefaultInstance().fireModelElementDeleted(
new BaseModelDelta(Modeleventnotification_c.DELTA_DELETE,
this));
}
} // End dispose
public String Get_name() {
Ooaofooa.log.println(ILogger.OPERATION, "Class As Simple Participant",
" Operation entered: ClassAsSimpleParticipant::Get_name");
final ModelRoot modelRoot = getModelRoot();
ModelClass_c v_obj = ModelClass_c
.getOneO_OBJOnR201(ClassInAssociation_c
.getOneR_OIROnR203(ReferredToClassInAssoc_c
.getOneR_RTOOnR204(this)));
return v_obj.getName();
} // End get_name
public void Migratetoformalizer() {
Ooaofooa.log
.println(ILogger.OPERATION, "Class As Simple Participant",
" Operation entered: ClassAsSimpleParticipant::Migratetoformalizer");
final ModelRoot modelRoot = getModelRoot();
ReferringClassInAssoc_c v_rgo = new ReferringClassInAssoc_c(modelRoot);
Ooaofooa.getDefaultInstance().fireModelElementCreated(
new BaseModelDelta(Modeleventnotification_c.DELTA_NEW, v_rgo));
ClassAsSimpleFormalizer_c v_form = new ClassAsSimpleFormalizer_c(
modelRoot);
Ooaofooa.getDefaultInstance().fireModelElementCreated(
new BaseModelDelta(Modeleventnotification_c.DELTA_NEW, v_form));
if (v_form != null) {
v_form.setMult(getMult());
} else {
Throwable t = new Throwable();
t.fillInStackTrace();
CorePlugin.logError("Attribute write attempted on null instance.",
t);
}
if (v_form != null) {
v_form.setCond(getCond());
} else {
Throwable t = new Throwable();
t.fillInStackTrace();
CorePlugin.logError("Attribute write attempted on null instance.",
t);
}
if (v_form != null) {
v_form.setTxt_phrs(getTxt_phrs());
} else {
Throwable t = new Throwable();
t.fillInStackTrace();
CorePlugin.logError("Attribute write attempted on null instance.",
t);
}
ReferredToClassInAssoc_c v_rto = ReferredToClassInAssoc_c
.getOneR_RTOOnR204(this);
ClassInAssociation_c v_oir = ClassInAssociation_c
.getOneR_OIROnR203(v_rto);
SimpleAssociation_c v_simp = SimpleAssociation_c
.getOneR_SIMPOnR207(this);
if (v_oir != null) {
v_oir.relateAcrossR203To(v_rgo);
} else {
Throwable t = new Throwable();
t.fillInStackTrace();
CorePlugin.logError("Relate attempted on null left hand instance.",
t);
}
if (v_rgo != null) {
v_rgo.relateAcrossR205To(v_form);
} else {
Throwable t = new Throwable();
t.fillInStackTrace();
CorePlugin.logError("Relate attempted on null left hand instance.",
t);
}
if (v_form != null) {
v_form.relateAcrossR208To(v_simp);
} else {
Throwable t = new Throwable();
t.fillInStackTrace();
CorePlugin.logError("Relate attempted on null left hand instance.",
t);
}
if (v_rto != null) {
v_rto.Dispose();
} else {
Throwable t = new Throwable();
t.fillInStackTrace();
CorePlugin.logError(
"Attempted to call an operation on a null instance.", t);
}
} // End migrateToFormalizer
public boolean Manymultallowed() {
Ooaofooa.log
.println(ILogger.OPERATION, "Class As Simple Participant",
" Operation entered: ClassAsSimpleParticipant::Manymultallowed");
final ModelRoot modelRoot = getModelRoot();
ClassAsSimpleFormalizer_c v_form = ClassAsSimpleFormalizer_c
.getOneR_FORMOnR208(SimpleAssociation_c
.getOneR_SIMPOnR207(this));
return (v_form == null);
} // End manyMultAllowed
public void Mergedispose() {
Ooaofooa.log.println(ILogger.OPERATION, "Class As Simple Participant",
" Operation entered: ClassAsSimpleParticipant::Mergedispose");
final ModelRoot modelRoot = getModelRoot();
SimpleAssociation_c v_simp = SimpleAssociation_c
.getOneR_SIMPOnR207(this);
if (((v_simp != null))) {
this.unrelateAcrossR207From(v_simp);
if (v_simp != null) {
v_simp.Dispose();
} else {
Throwable t = new Throwable();
t.fillInStackTrace();
CorePlugin
.logError(
"Attempted to call an operation on a null instance.",
t);
}
}
SimpleAssociation_c v_rsimp = SimpleAssociation_c
.getOneR_SIMPOnR207(this);
this.unrelateAcrossR207From(v_rsimp);
ReferredToClassInAssoc_c v_rto = ReferredToClassInAssoc_c
.getOneR_RTOOnR204(this);
ClassInAssociation_c v_cia = ClassInAssociation_c
.getOneR_OIROnR203(v_rto);
if (((v_rto != null))) {
this.unrelateAcrossR204From(v_rto);
if (((v_cia != null))) {
if (v_cia != null) {
v_cia.Dispose();
} else {
Throwable t = new Throwable();
t.fillInStackTrace();
CorePlugin
.logError(
"Attempted to call an operation on a null instance.",
t);
}
}
}
if (delete()) {
Ooaofooa.getDefaultInstance().fireModelElementDeleted(
new BaseModelDelta(Modeleventnotification_c.DELTA_DELETE,
this));
}
} // End mergeDispose
// end transform functions
public Object getAdapter(Class adapter) {
Object superAdapter = super.getAdapter(adapter);
if (superAdapter != null) {
return superAdapter;
}
return null;
}
} // end Class As Simple Participant
| [
"\"PTC_MCC_ENABLED\"",
"\"PTC_MCC_ENABLED\"",
"\"PTC_MCC_ENABLED\"",
"\"PTC_MCC_ENABLED\""
]
| []
| [
"PTC_MCC_ENABLED"
]
| [] | ["PTC_MCC_ENABLED"] | java | 1 | 0 | |
modules/openapi-generator/src/main/java/org/openapitools/codegen/languages/AbstractCSharpCodegen.java | /*
* Copyright 2018 OpenAPI-Generator Contributors (https://openapi-generator.tech)
* Copyright 2018 SmartBear Software
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.openapitools.codegen.languages;
import com.google.common.collect.ImmutableMap.Builder;
import com.samskivert.mustache.Mustache.Lambda;
import io.swagger.v3.core.util.Json;
import io.swagger.v3.oas.models.media.ArraySchema;
import io.swagger.v3.oas.models.media.Schema;
import org.apache.commons.io.FilenameUtils;
import org.apache.commons.lang3.StringUtils;
import org.openapitools.codegen.*;
import org.openapitools.codegen.templating.mustache.*;
import org.openapitools.codegen.utils.ModelUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.util.*;
import static org.openapitools.codegen.utils.OnceLogger.once;
import static org.openapitools.codegen.utils.StringUtils.camelize;
public abstract class AbstractCSharpCodegen extends DefaultCodegen implements CodegenConfig {
protected boolean optionalAssemblyInfoFlag = true;
protected boolean optionalEmitDefaultValuesFlag = false;
protected boolean optionalProjectFileFlag = true;
protected boolean optionalMethodArgumentFlag = true;
protected boolean useDateTimeOffsetFlag = false;
protected boolean useCollection = false;
protected boolean returnICollection = false;
protected boolean netCoreProjectFileFlag = false;
protected String modelPropertyNaming = CodegenConstants.MODEL_PROPERTY_NAMING_TYPE.PascalCase.name();
protected String licenseUrl = "http://localhost";
protected String licenseName = "NoLicense";
protected String packageVersion = "1.0.0";
protected String packageName = "Org.OpenAPITools";
protected String packageTitle = "OpenAPI Library";
protected String packageProductName = "OpenAPILibrary";
protected String packageDescription = "A library generated from a OpenAPI doc";
protected String packageCompany = "OpenAPI";
protected String packageCopyright = "No Copyright";
protected String packageAuthors = "OpenAPI";
protected String interfacePrefix = "I";
protected String enumNameSuffix = "Enum";
protected String enumValueSuffix = "Enum";
protected String sourceFolder = "src";
// TODO: Add option for test folder output location. Nice to allow e.g. ./test instead of ./src.
// This would require updating relative paths (e.g. path to main project file in test project file)
protected String testFolder = sourceFolder;
protected Set<String> collectionTypes;
protected Set<String> mapTypes;
// true if support nullable type
protected boolean supportNullable = Boolean.FALSE;
// nullable type
protected Set<String> nullableType = new HashSet<String>();
protected Set<String> valueTypes = new HashSet<String>();
private static final Logger LOGGER = LoggerFactory.getLogger(AbstractCSharpCodegen.class);
public AbstractCSharpCodegen() {
super();
supportsInheritance = true;
// C# does not use import mapping
importMapping.clear();
outputFolder = "generated-code" + File.separator + this.getName();
embeddedTemplateDir = templateDir = this.getName();
collectionTypes = new HashSet<String>(
Arrays.asList(
"IList", "List",
"ICollection", "Collection",
"IEnumerable")
);
mapTypes = new HashSet<String>(
Arrays.asList("IDictionary")
);
// NOTE: C# uses camel cased reserved words, while models are title cased. We don't want lowercase comparisons.
reservedWords.addAll(
Arrays.asList(
// set "client" as a reserved word to avoid conflicts with Org.OpenAPITools.Client
// this is a workaround and can be removed if c# api client is updated to use
// fully qualified name
"Client", "client", "parameter",
// local variable names in API methods (endpoints)
"localVarPath", "localVarPathParams", "localVarQueryParams", "localVarHeaderParams",
"localVarFormParams", "localVarFileParams", "localVarStatusCode", "localVarResponse",
"localVarPostBody", "localVarHttpHeaderAccepts", "localVarHttpHeaderAccept",
"localVarHttpContentTypes", "localVarHttpContentType",
"localVarStatusCode",
// C# reserved words
"abstract", "as", "base", "bool", "break", "byte", "case", "catch", "char", "checked",
"class", "const", "continue", "decimal", "default", "delegate", "do", "double", "else",
"enum", "event", "explicit", "extern", "false", "finally", "fixed", "float", "for",
"foreach", "goto", "if", "implicit", "in", "int", "interface", "internal", "is", "lock",
"long", "namespace", "new", "null", "object", "operator", "out", "override", "params",
"private", "protected", "public", "readonly", "ref", "return", "sbyte", "sealed",
"short", "sizeof", "stackalloc", "static", "string", "struct", "switch", "this", "throw",
"true", "try", "typeof", "uint", "ulong", "unchecked", "unsafe", "ushort", "using",
"virtual", "void", "volatile", "while")
);
// TODO: Either include fully qualified names here or handle in DefaultCodegen via lastIndexOf(".") search
languageSpecificPrimitives = new HashSet<String>(
Arrays.asList(
"String",
"string",
"bool?",
"bool",
"double?",
"double",
"decimal?",
"decimal",
"int?",
"int",
"long?",
"long",
"float?",
"float",
"byte[]",
"ICollection",
"Collection",
"List",
"Dictionary",
"DateTime?",
"DateTime",
"DateTimeOffset?",
"DateTimeOffset",
"Boolean",
"Double",
"Int32",
"Int64",
"Float",
"Guid?",
"Guid",
"System.IO.Stream", // not really a primitive, we include it to avoid model import
"Object")
);
instantiationTypes.put("array", "List");
instantiationTypes.put("list", "List");
instantiationTypes.put("map", "Dictionary");
// Nullable types here assume C# 2 support is not part of base
typeMapping = new HashMap<String, String>();
typeMapping.put("string", "string");
typeMapping.put("binary", "byte[]");
typeMapping.put("ByteArray", "byte[]");
typeMapping.put("boolean", "bool?");
typeMapping.put("integer", "int?");
typeMapping.put("float", "float?");
typeMapping.put("long", "long?");
typeMapping.put("double", "double?");
typeMapping.put("number", "decimal?");
typeMapping.put("BigDecimal", "decimal?");
typeMapping.put("DateTime", "DateTime?");
typeMapping.put("date", "DateTime?");
typeMapping.put("file", "System.IO.Stream");
typeMapping.put("array", "List");
typeMapping.put("list", "List");
typeMapping.put("map", "Dictionary");
typeMapping.put("object", "Object");
typeMapping.put("UUID", "Guid?");
typeMapping.put("URI", "string");
// nullable type
nullableType = new HashSet<String>(
Arrays.asList("decimal", "bool", "int", "float", "long", "double", "DateTime", "DateTimeOffset", "Guid")
);
// value Types
valueTypes = new HashSet<String>(
Arrays.asList("decimal", "bool", "int", "float", "long", "double")
);
}
public void setReturnICollection(boolean returnICollection) {
this.returnICollection = returnICollection;
}
public void setUseCollection(boolean useCollection) {
this.useCollection = useCollection;
if (useCollection) {
typeMapping.put("array", "Collection");
typeMapping.put("list", "Collection");
instantiationTypes.put("array", "Collection");
instantiationTypes.put("list", "Collection");
}
}
public void setOptionalMethodArgumentFlag(boolean flag) {
this.optionalMethodArgumentFlag = flag;
}
public void setNetCoreProjectFileFlag(boolean flag) {
this.netCoreProjectFileFlag = flag;
}
public void useDateTimeOffset(boolean flag) {
this.useDateTimeOffsetFlag = flag;
if (flag) {
typeMapping.put("DateTime", "DateTimeOffset");
} else {
typeMapping.put("DateTime", "DateTime");
}
}
@Override
public void processOpts() {
super.processOpts();
if (StringUtils.isEmpty(System.getenv("CSHARP_POST_PROCESS_FILE"))) {
LOGGER.info("Environment variable CSHARP_POST_PROCESS_FILE not defined so the C# code may not be properly formatted by uncrustify (0.66 or later) or other code formatter. To define it, try `export CSHARP_POST_PROCESS_FILE=\"/usr/local/bin/uncrustify --no-backup\" && export UNCRUSTIFY_CONFIG=/path/to/uncrustify-rules.cfg` (Linux/Mac). Note: replace /path/to with the location of uncrustify-rules.cfg");
LOGGER.info("NOTE: To enable file post-processing, 'enablePostProcessFile' must be set to `true` (--enable-post-process-file for CLI).");
}
// License info
if (additionalProperties.containsKey(CodegenConstants.LICENSE_URL)) {
setLicenseUrl((String) additionalProperties.get(CodegenConstants.LICENSE_URL));
} else {
additionalProperties.put(CodegenConstants.LICENSE_URL, this.licenseUrl);
}
if (additionalProperties.containsKey(CodegenConstants.LICENSE_NAME)) {
setLicenseName((String) additionalProperties.get(CodegenConstants.LICENSE_NAME));
} else {
additionalProperties.put(CodegenConstants.LICENSE_NAME, this.licenseName);
}
// {{packageVersion}}
if (additionalProperties.containsKey(CodegenConstants.PACKAGE_VERSION)) {
setPackageVersion((String) additionalProperties.get(CodegenConstants.PACKAGE_VERSION));
} else {
additionalProperties.put(CodegenConstants.PACKAGE_VERSION, packageVersion);
}
// {{sourceFolder}}
if (additionalProperties.containsKey(CodegenConstants.SOURCE_FOLDER)) {
setSourceFolder((String) additionalProperties.get(CodegenConstants.SOURCE_FOLDER));
} else {
additionalProperties.put(CodegenConstants.SOURCE_FOLDER, this.sourceFolder);
}
// {{packageName}}
if (additionalProperties.containsKey(CodegenConstants.PACKAGE_NAME)) {
setPackageName((String) additionalProperties.get(CodegenConstants.PACKAGE_NAME));
} else {
additionalProperties.put(CodegenConstants.PACKAGE_NAME, packageName);
}
if (additionalProperties.containsKey(CodegenConstants.INVOKER_PACKAGE)) {
LOGGER.warn(String.format(Locale.ROOT, "%s is not used by C# generators. Please use %s",
CodegenConstants.INVOKER_PACKAGE, CodegenConstants.PACKAGE_NAME));
}
// {{packageTitle}}
if (additionalProperties.containsKey(CodegenConstants.PACKAGE_TITLE)) {
setPackageTitle((String) additionalProperties.get(CodegenConstants.PACKAGE_TITLE));
} else {
additionalProperties.put(CodegenConstants.PACKAGE_TITLE, packageTitle);
}
// {{packageProductName}}
if (additionalProperties.containsKey(CodegenConstants.PACKAGE_PRODUCTNAME)) {
setPackageProductName((String) additionalProperties.get(CodegenConstants.PACKAGE_PRODUCTNAME));
} else {
additionalProperties.put(CodegenConstants.PACKAGE_PRODUCTNAME, packageProductName);
}
// {{packageDescription}}
if (additionalProperties.containsKey(CodegenConstants.PACKAGE_DESCRIPTION)) {
setPackageDescription((String) additionalProperties.get(CodegenConstants.PACKAGE_DESCRIPTION));
} else {
additionalProperties.put(CodegenConstants.PACKAGE_DESCRIPTION, packageDescription);
}
// {{packageCompany}}
if (additionalProperties.containsKey(CodegenConstants.PACKAGE_COMPANY)) {
setPackageCompany((String) additionalProperties.get(CodegenConstants.PACKAGE_COMPANY));
} else {
additionalProperties.put(CodegenConstants.PACKAGE_COMPANY, packageCompany);
}
// {{packageCopyright}}
if (additionalProperties.containsKey(CodegenConstants.PACKAGE_COPYRIGHT)) {
setPackageCopyright((String) additionalProperties.get(CodegenConstants.PACKAGE_COPYRIGHT));
} else {
additionalProperties.put(CodegenConstants.PACKAGE_COPYRIGHT, packageCopyright);
}
// {{packageAuthors}}
if (additionalProperties.containsKey(CodegenConstants.PACKAGE_AUTHORS)) {
setPackageAuthors((String) additionalProperties.get(CodegenConstants.PACKAGE_AUTHORS));
} else {
additionalProperties.put(CodegenConstants.PACKAGE_AUTHORS, packageAuthors);
}
// {{useDateTimeOffset}}
if (additionalProperties.containsKey(CodegenConstants.USE_DATETIME_OFFSET)) {
useDateTimeOffset(convertPropertyToBooleanAndWriteBack(CodegenConstants.USE_DATETIME_OFFSET));
} else {
additionalProperties.put(CodegenConstants.USE_DATETIME_OFFSET, useDateTimeOffsetFlag);
}
if (additionalProperties.containsKey(CodegenConstants.USE_COLLECTION)) {
setUseCollection(convertPropertyToBooleanAndWriteBack(CodegenConstants.USE_COLLECTION));
} else {
additionalProperties.put(CodegenConstants.USE_COLLECTION, useCollection);
}
if (additionalProperties.containsKey(CodegenConstants.RETURN_ICOLLECTION)) {
setReturnICollection(convertPropertyToBooleanAndWriteBack(CodegenConstants.RETURN_ICOLLECTION));
} else {
additionalProperties.put(CodegenConstants.RETURN_ICOLLECTION, returnICollection);
}
if (additionalProperties.containsKey(CodegenConstants.NETCORE_PROJECT_FILE)) {
setNetCoreProjectFileFlag(convertPropertyToBooleanAndWriteBack(CodegenConstants.NETCORE_PROJECT_FILE));
} else {
additionalProperties.put(CodegenConstants.NETCORE_PROJECT_FILE, netCoreProjectFileFlag);
}
if (additionalProperties.containsKey(CodegenConstants.INTERFACE_PREFIX)) {
String useInterfacePrefix = additionalProperties.get(CodegenConstants.INTERFACE_PREFIX).toString();
if ("false".equals(useInterfacePrefix.toLowerCase(Locale.ROOT))) {
setInterfacePrefix("");
} else if (!"true".equals(useInterfacePrefix.toLowerCase(Locale.ROOT))) {
// NOTE: if user passes "true" explicitly, we use the default I- prefix. The other supported case here is a custom prefix.
setInterfacePrefix(sanitizeName(useInterfacePrefix));
}
}
if (additionalProperties().containsKey(CodegenConstants.ENUM_NAME_SUFFIX)) {
setEnumNameSuffix(additionalProperties.get(CodegenConstants.ENUM_NAME_SUFFIX).toString());
}
if (additionalProperties().containsKey(CodegenConstants.ENUM_VALUE_SUFFIX)) {
setEnumValueSuffix(additionalProperties.get(CodegenConstants.ENUM_VALUE_SUFFIX).toString());
}
// This either updates additionalProperties with the above fixes, or sets the default if the option was not specified.
additionalProperties.put(CodegenConstants.INTERFACE_PREFIX, interfacePrefix);
}
@Override
protected Builder<String, Lambda> addMustacheLambdas() {
return super.addMustacheLambdas()
.put("camelcase_param", new CamelCaseLambda().generator(this).escapeAsParamName(true));
}
@Override
public void postProcessModelProperty(CodegenModel model, CodegenProperty property) {
super.postProcessModelProperty(model, property);
}
@Override
public Map<String, Object> postProcessModels(Map<String, Object> objs) {
List<Object> models = (List<Object>) objs.get("models");
for (Object _mo : models) {
Map<String, Object> mo = (Map<String, Object>) _mo;
CodegenModel cm = (CodegenModel) mo.get("model");
for (CodegenProperty var : cm.vars) {
// check to see if model name is same as the property name
// which will result in compilation error
// if found, prepend with _ to workaround the limitation
if (var.name.equalsIgnoreCase(cm.name)) {
var.name = "_" + var.name;
}
}
}
// process enum in models
return postProcessModelsEnum(objs);
}
/**
* Invoked by {@link DefaultGenerator} after all models have been post-processed, allowing for a last pass of codegen-specific model cleanup.
*
* @param objs Current state of codegen object model.
* @return An in-place modified state of the codegen object model.
*/
@Override
public Map<String, Object> postProcessAllModels(Map<String, Object> objs) {
final Map<String, Object> processed = super.postProcessAllModels(objs);
postProcessEnumRefs(processed);
updateValueTypeProperty(processed);
updateNullableTypeProperty(processed);
return processed;
}
/**
* C# differs from other languages in that Enums are not _true_ objects; enums are compiled to integral types.
* So, in C#, an enum is considers more like a user-defined primitive.
* <p>
* When working with enums, we can't always assume a RefModel is a nullable type (where default(YourType) == null),
* so this post processing runs through all models to find RefModel'd enums. Then, it runs through all vars and modifies
* those vars referencing RefModel'd enums to work the same as inlined enums rather than as objects.
*
* @param models processed models to be further processed for enum references
*/
@SuppressWarnings({"unchecked"})
private void postProcessEnumRefs(final Map<String, Object> models) {
Map<String, CodegenModel> enumRefs = new HashMap<String, CodegenModel>();
for (Map.Entry<String, Object> entry : models.entrySet()) {
CodegenModel model = ModelUtils.getModelByName(entry.getKey(), models);
if (model.isEnum) {
enumRefs.put(entry.getKey(), model);
}
}
for (Map.Entry<String, Object> entry : models.entrySet()) {
String openAPIName = entry.getKey();
CodegenModel model = ModelUtils.getModelByName(openAPIName, models);
if (model != null) {
for (CodegenProperty var : model.allVars) {
if (enumRefs.containsKey(var.dataType)) {
// Handle any enum properties referred to by $ref.
// This is different in C# than most other generators, because enums in C# are compiled to integral types,
// while enums in many other languages are true objects.
CodegenModel refModel = enumRefs.get(var.dataType);
var.allowableValues = refModel.allowableValues;
var.isEnum = true;
// We do these after updateCodegenPropertyEnum to avoid generalities that don't mesh with C#.
var.isPrimitiveType = true;
}
}
for (CodegenProperty var : model.vars) {
if (enumRefs.containsKey(var.dataType)) {
// Handle any enum properties referred to by $ref.
// This is different in C# than most other generators, because enums in C# are compiled to integral types,
// while enums in many other languages are true objects.
CodegenModel refModel = enumRefs.get(var.dataType);
var.allowableValues = refModel.allowableValues;
var.isEnum = true;
// We do these after updateCodegenPropertyEnum to avoid generalities that don't mesh with C#.
var.isPrimitiveType = true;
}
}
for (CodegenProperty var : model.readWriteVars) {
if (enumRefs.containsKey(var.dataType)) {
// Handle any enum properties referred to by $ref.
// This is different in C# than most other generators, because enums in C# are compiled to integral types,
// while enums in many other languages are true objects.
CodegenModel refModel = enumRefs.get(var.dataType);
var.allowableValues = refModel.allowableValues;
var.isEnum = true;
// We do these after updateCodegenPropertyEnum to avoid generalities that don't mesh with C#.
var.isPrimitiveType = true;
}
}
for (CodegenProperty var : model.readOnlyVars) {
if (enumRefs.containsKey(var.dataType)) {
// Handle any enum properties referred to by $ref.
// This is different in C# than most other generators, because enums in C# are compiled to integral types,
// while enums in many other languages are true objects.
CodegenModel refModel = enumRefs.get(var.dataType);
var.allowableValues = refModel.allowableValues;
var.isEnum = true;
// We do these after updateCodegenPropertyEnum to avoid generalities that don't mesh with C#.
var.isPrimitiveType = true;
}
}
// We're looping all models here.
if (model.isEnum) {
// We now need to make allowableValues.enumVars look like the context of CodegenProperty
Boolean isString = false;
Boolean isInteger = false;
Boolean isLong = false;
Boolean isByte = false;
if (model.dataType.startsWith("byte")) {
// C# Actually supports byte and short enums, swagger spec only supports byte.
isByte = true;
model.vendorExtensions.put("x-enum-byte", true);
} else if (model.dataType.startsWith("int32")) {
isInteger = true;
model.vendorExtensions.put("x-enum-integer", true);
} else if (model.dataType.startsWith("int64")) {
isLong = true;
model.vendorExtensions.put("x-enum-long", true);
} else {
// C# doesn't support non-integral enums, so we need to treat everything else as strings (e.g. to not lose precision or data integrity)
isString = true;
model.vendorExtensions.put("x-enum-string", true);
}
// Since we iterate enumVars for modelnnerEnum and enumClass templates, and CodegenModel is missing some of CodegenProperty's properties,
// we can take advantage of Mustache's contextual lookup to add the same "properties" to the model's enumVars scope rather than CodegenProperty's scope.
List<Map<String, String>> enumVars = (ArrayList<Map<String, String>>) model.allowableValues.get("enumVars");
List<Map<String, Object>> newEnumVars = new ArrayList<Map<String, Object>>();
for (Map<String, String> enumVar : enumVars) {
Map<String, Object> mixedVars = new HashMap<String, Object>();
mixedVars.putAll(enumVar);
mixedVars.put("isString", isString);
mixedVars.put("isLong", isLong);
mixedVars.put("isInteger", isInteger);
mixedVars.put("isByte", isByte);
newEnumVars.add(mixedVars);
}
if (!newEnumVars.isEmpty()) {
model.allowableValues.put("enumVars", newEnumVars);
}
}
} else {
LOGGER.warn("Expected to retrieve model %s by name, but no model was found. Check your -Dmodels inclusions.", openAPIName);
}
}
}
/**
* Update codegen property's enum by adding "enumVars" (with name and value)
*
* @param var list of CodegenProperty
*/
@Override
public void updateCodegenPropertyEnum(CodegenProperty var) {
if (var.vendorExtensions == null) {
var.vendorExtensions = new HashMap<>();
}
super.updateCodegenPropertyEnum(var);
// Because C# uses nullable primitives for datatype, and datatype is used in DefaultCodegen for determining enum-ness, guard against weirdness here.
if (var.isEnum) {
if ("byte".equals(var.dataFormat)) {// C# Actually supports byte and short enums.
var.vendorExtensions.put("x-enum-byte", true);
var.isString = false;
var.isLong = false;
var.isInteger = false;
} else if ("int32".equals(var.dataFormat)) {
var.isInteger = true;
var.isString = false;
var.isLong = false;
} else if ("int64".equals(var.dataFormat)) {
var.isLong = true;
var.isString = false;
var.isInteger = false;
} else {// C# doesn't support non-integral enums, so we need to treat everything else as strings (e.g. to not lose precision or data integrity)
var.isString = true;
var.isInteger = false;
var.isLong = false;
}
}
}
/**
* Update property if it is a C# value type
*
* @param models list of all models
*/
protected void updateValueTypeProperty(Map<String, Object> models) {
for (Map.Entry<String, Object> entry : models.entrySet()) {
String openAPIName = entry.getKey();
CodegenModel model = ModelUtils.getModelByName(openAPIName, models);
if (model != null) {
for (CodegenProperty var : model.vars) {
var.vendorExtensions.put("x-is-value-type", isValueType(var));
}
}
}
}
/**
* Update property if it is a C# nullable type
*
* @param models list of all models
*/
protected void updateNullableTypeProperty(Map<String, Object> models) {
for (Map.Entry<String, Object> entry : models.entrySet()) {
String openAPIName = entry.getKey();
CodegenModel model = ModelUtils.getModelByName(openAPIName, models);
if (model != null) {
for (CodegenProperty var : model.vars) {
if (!var.isContainer && (nullableType.contains(var.dataType) || var.isEnum)) {
var.vendorExtensions.put("x-csharp-value-type", true);
}
}
}
}
}
@Override
public Map<String, Object> postProcessOperationsWithModels(Map<String, Object> objs, List<Object> allModels) {
super.postProcessOperationsWithModels(objs, allModels);
if (objs != null) {
Map<String, Object> operations = (Map<String, Object>) objs.get("operations");
if (operations != null) {
List<CodegenOperation> ops = (List<CodegenOperation>) operations.get("operation");
for (CodegenOperation operation : ops) {
// Check return types for collection
if (operation.returnType != null) {
String typeMapping;
int namespaceEnd = operation.returnType.lastIndexOf(".");
if (namespaceEnd > 0) {
typeMapping = operation.returnType.substring(namespaceEnd);
} else {
typeMapping = operation.returnType;
}
if (this.collectionTypes.contains(typeMapping)) {
operation.isListContainer = true;
operation.returnContainer = operation.returnType;
if (this.returnICollection && (
typeMapping.startsWith("List") ||
typeMapping.startsWith("Collection"))) {
// NOTE: ICollection works for both List<T> and Collection<T>
int genericStart = typeMapping.indexOf("<");
if (genericStart > 0) {
operation.returnType = "ICollection" + typeMapping.substring(genericStart);
}
}
} else {
operation.returnContainer = operation.returnType;
operation.isMapContainer = this.mapTypes.contains(typeMapping);
}
}
if (operation.examples != null) {
for (Map<String, String> example : operation.examples) {
for (Map.Entry<String, String> entry : example.entrySet()) {
// Replace " with \", \r, \n with \\r, \\n
String val = entry.getValue().replace("\"", "\\\"")
.replace("\r", "\\r")
.replace("\n", "\\n");
entry.setValue(val);
}
}
}
if (!isSupportNullable()) {
for (CodegenParameter parameter : operation.allParams) {
CodegenModel model = null;
for (Object modelHashMap : allModels) {
CodegenModel codegenModel = ((HashMap<String, CodegenModel>) modelHashMap).get("model");
if (codegenModel.getClassname().equals(parameter.dataType)) {
model = codegenModel;
break;
}
}
if (model == null) {
// Primitive data types all come already marked
parameter.isNullable = true;
} else {
// Effectively mark enum models as enums and non-nullable
if (model.isEnum) {
parameter.isEnum = true;
parameter.allowableValues = model.allowableValues;
parameter.isPrimitiveType = true;
parameter.isNullable = false;
} else {
parameter.isNullable = true;
}
}
}
} else {
// Effectively mark enum models as enums
updateCodegenParametersEnum(operation.allParams, allModels);
}
processOperation(operation);
}
}
}
return objs;
}
protected void processOperation(CodegenOperation operation) {
// default noop
}
private void updateCodegenParametersEnum(List<CodegenParameter> parameters, List<Object> allModels) {
for (CodegenParameter parameter : parameters) {
CodegenModel model = null;
for (Object modelHashMap : allModels) {
CodegenModel codegenModel = ((HashMap<String, CodegenModel>) modelHashMap).get("model");
if (codegenModel.getClassname().equals(parameter.dataType)) {
model = codegenModel;
break;
}
}
if (model != null) {
// Effectively mark enum models as enums and non-nullable
if (model.isEnum) {
parameter.isEnum = true;
parameter.allowableValues = model.allowableValues;
parameter.isPrimitiveType = true;
parameter.vendorExtensions.put("x-csharp-value-type", true);
}
}
if (!parameter.isContainer && nullableType.contains(parameter.dataType)) {
parameter.vendorExtensions.put("x-csharp-value-type", true);
}
if (!parameter.required && parameter.vendorExtensions.get("x-csharp-value-type") != null) { //optional
parameter.dataType = parameter.dataType + "?";
}
}
}
@Override
public String apiFileFolder() {
return outputFolder + File.separator + sourceFolder + File.separator + packageName + File.separator + apiPackage();
}
@Override
public String modelFileFolder() {
return outputFolder + File.separator + sourceFolder + File.separator + packageName + File.separator + modelPackage();
}
@Override
public String toModelFilename(String name) {
// should be the same as the model name
return toModelName(name);
}
@Override
public String toOperationId(String operationId) {
// throw exception if method name is empty (should not occur as an auto-generated method name will be used)
if (StringUtils.isEmpty(operationId)) {
throw new RuntimeException("Empty method name (operationId) not allowed");
}
// method name cannot use reserved keyword, e.g. return
if (isReservedWord(operationId)) {
LOGGER.warn(operationId + " (reserved word) cannot be used as method name. Renamed to " + camelize(sanitizeName("call_" + operationId)));
operationId = "call_" + operationId;
}
// operationId starts with a number
if (operationId.matches("^\\d.*")) {
LOGGER.warn(operationId + " (starting with a number) cannot be used as method name. Renamed to " + camelize(sanitizeName("call_" + operationId)));
operationId = "call_" + operationId;
}
return camelize(sanitizeName(operationId));
}
@Override
public String toVarName(String name) {
// sanitize name
name = sanitizeName(name);
// if it's all uppper case, do nothing
if (name.matches("^[A-Z_]*$")) {
return name;
}
// camelize the variable name
// pet_id => PetId
name = camelize(name);
// for reserved word or word starting with number, append _
if (isReservedWord(name) || name.matches("^\\d.*")) {
name = escapeReservedWord(name);
}
return name;
}
@Override
public String toParamName(String name) {
// sanitize name
name = sanitizeName(name);
// replace - with _ e.g. created-at => created_at
name = name.replaceAll("-", "_");
// if it's all uppper case, do nothing
if (name.matches("^[A-Z_]*$")) {
return name;
}
// camelize(lower) the variable name
// pet_id => petId
name = camelize(name, true);
// for reserved word or word starting with number, append _
if (isReservedWord(name) || name.matches("^\\d.*")) {
name = escapeReservedWord(name);
}
return name;
}
@Override
public String escapeReservedWord(String name) {
if (this.reservedWordsMappings().containsKey(name)) {
return this.reservedWordsMappings().get(name);
}
return "_" + name;
}
/**
* Return the example value of the property
*
* @param p OpenAPI property object
* @return string presentation of the example value of the property
*/
@Override
public String toExampleValue(Schema p) {
if (ModelUtils.isStringSchema(p)) {
if (p.getExample() != null) {
return "\"" + p.getExample().toString() + "\"";
}
} else if (ModelUtils.isBooleanSchema(p)) {
if (p.getExample() != null) {
return p.getExample().toString();
}
} else if (ModelUtils.isDateSchema(p)) {
// TODO
} else if (ModelUtils.isDateTimeSchema(p)) {
// TODO
} else if (ModelUtils.isNumberSchema(p)) {
if (p.getExample() != null) {
return p.getExample().toString();
}
} else if (ModelUtils.isIntegerSchema(p)) {
if (p.getExample() != null) {
return p.getExample().toString();
}
}
return null;
}
/**
* Return the default value of the property
* @param p OpenAPI property object
* @return string presentation of the default value of the property
*/
@Override
public String toDefaultValue(Schema p) {
if (ModelUtils.isBooleanSchema(p)) {
if (p.getDefault() != null) {
return p.getDefault().toString();
}
} else if (ModelUtils.isDateSchema(p)) {
if (p.getDefault() != null) {
return "\"" + p.getDefault().toString() + "\"";
}
} else if (ModelUtils.isDateTimeSchema(p)) {
if (p.getDefault() != null) {
return "\"" + p.getDefault().toString() + "\"";
}
} else if (ModelUtils.isNumberSchema(p)) {
if (p.getDefault() != null) {
if (ModelUtils.isFloatSchema(p)) { // float
return p.getDefault().toString() + "F";
} else if (ModelUtils.isDoubleSchema(p)) { // double
return p.getDefault().toString() + "D";
} else { // decimal
return p.getDefault().toString() + "M";
}
}
} else if (ModelUtils.isIntegerSchema(p)) {
if (p.getDefault() != null) {
return p.getDefault().toString();
}
} else if (ModelUtils.isStringSchema(p)) {
if (p.getDefault() != null) {
String _default = (String) p.getDefault();
if (p.getEnum() == null) {
return "\"" + _default + "\"";
} else {
// convert to enum var name later in postProcessModels
return _default;
}
}
}
return null;
}
@Override
protected boolean isReservedWord(String word) {
// NOTE: This differs from super's implementation in that C# does _not_ want case insensitive matching.
return reservedWords.contains(word);
}
public String getNullableType(Schema p, String type) {
if (languageSpecificPrimitives.contains(type)) {
return type;
} else {
return null;
}
}
@Override
public String getSchemaType(Schema p) {
String openAPIType = super.getSchemaType(p);
String type;
if (openAPIType == null) {
LOGGER.error("OpenAPI Type for {} is null. Default to UNKNOWN_OPENAPI_TYPE instead.", p.getName());
openAPIType = "UNKNOWN_OPENAPI_TYPE";
}
if (typeMapping.containsKey(openAPIType)) {
type = typeMapping.get(openAPIType);
String languageType = getNullableType(p, type);
if (languageType != null) {
return languageType;
}
} else {
type = openAPIType;
}
return toModelName(type);
}
/**
* Provides C# strongly typed declaration for simple arrays of some type and arrays of arrays of some type.
*
* @param arr The input array property
* @return The type declaration when the type is an array of arrays.
*/
private String getArrayTypeDeclaration(ArraySchema arr) {
// TODO: collection type here should be fully qualified namespace to avoid model conflicts
// This supports arrays of arrays.
String arrayType = typeMapping.get("array");
StringBuilder instantiationType = new StringBuilder(arrayType);
Schema items = arr.getItems();
String nestedType = getTypeDeclaration(items);
// TODO: We may want to differentiate here between generics and primitive arrays.
instantiationType.append("<").append(nestedType).append(">");
return instantiationType.toString();
}
@Override
public String toInstantiationType(Schema p) {
if (ModelUtils.isArraySchema(p)) {
return getArrayTypeDeclaration((ArraySchema) p);
}
return super.toInstantiationType(p);
}
@Override
public String getTypeDeclaration(Schema p) {
if (ModelUtils.isArraySchema(p)) {
return getArrayTypeDeclaration((ArraySchema) p);
} else if (ModelUtils.isMapSchema(p)) {
// Should we also support maps of maps?
Schema inner = getAdditionalProperties(p);
return getSchemaType(p) + "<string, " + getTypeDeclaration(inner) + ">";
}
return super.getTypeDeclaration(p);
}
@Override
public String toModelName(String name) {
// We need to check if import-mapping has a different model for this class, so we use it
// instead of the auto-generated one.
if (importMapping.containsKey(name)) {
return importMapping.get(name);
}
if (!StringUtils.isEmpty(modelNamePrefix)) {
name = modelNamePrefix + "_" + name;
}
if (!StringUtils.isEmpty(modelNameSuffix)) {
name = name + "_" + modelNameSuffix;
}
name = sanitizeName(name);
// model name cannot use reserved keyword, e.g. return
if (isReservedWord(name)) {
LOGGER.warn(name + " (reserved word) cannot be used as model name. Renamed to " + camelize("model_" + name));
name = "model_" + name; // e.g. return => ModelReturn (after camelize)
}
// model name starts with number
if (name.matches("^\\d.*")) {
LOGGER.warn(name + " (model name starts with number) cannot be used as model name. Renamed to " + camelize("model_" + name));
name = "model_" + name; // e.g. 200Response => Model200Response (after camelize)
}
// camelize the model name
// phone_number => PhoneNumber
return camelize(name);
}
@Override
public String apiTestFileFolder() {
return outputFolder + ".Test";
}
@Override
public String modelTestFileFolder() {
return outputFolder + ".Test";
}
@Override
public String toApiTestFilename(String name) {
return toApiName(name) + "Tests";
}
@Override
public String toModelTestFilename(String name) {
return toModelName(name) + "Tests";
}
public void setLicenseUrl(String licenseUrl) {this.licenseUrl = licenseUrl;}
public void setLicenseName(String licenseName) {this.licenseName = licenseName;}
public void setPackageName(String packageName) {
this.packageName = packageName;
}
public void setPackageVersion(String packageVersion) {
this.packageVersion = packageVersion;
}
public void setPackageTitle(String packageTitle) {
this.packageTitle = packageTitle;
}
public void setPackageProductName(String packageProductName) {
this.packageProductName = packageProductName;
}
public void setPackageDescription(String packageDescription) {
this.packageDescription = packageDescription;
}
public void setPackageCompany(String packageCompany) {
this.packageCompany = packageCompany;
}
public void setPackageCopyright(String packageCopyright) {
this.packageCopyright = packageCopyright;
}
public void setPackageAuthors(String packageAuthors) {
this.packageAuthors = packageAuthors;
}
public void setSourceFolder(String sourceFolder) {
this.sourceFolder = sourceFolder;
}
public String getInterfacePrefix() {
return interfacePrefix;
}
public void setInterfacePrefix(final String interfacePrefix) {
this.interfacePrefix = interfacePrefix;
}
public void setEnumNameSuffix(final String enumNameSuffix) {
this.enumNameSuffix = enumNameSuffix;
}
public void setEnumValueSuffix(final String enumValueSuffix) {
this.enumValueSuffix = enumValueSuffix;
}
public boolean isSupportNullable() {
return supportNullable;
}
public void setSupportNullable(final boolean supportNullable) {
this.supportNullable = supportNullable;
}
@Override
public String toEnumValue(String value, String datatype) {
// C# only supports enums as literals for int, int?, long, long?, byte, and byte?. All else must be treated as strings.
// Per: https://docs.microsoft.com/en-us/dotnet/csharp/language-reference/keywords/enum
// The approved types for an enum are byte, sbyte, short, ushort, int, uint, long, or ulong.
// but we're not supporting unsigned integral types or shorts.
if (datatype.startsWith("int") || datatype.startsWith("long") || datatype.startsWith("byte")) {
return value;
}
return escapeText(value);
}
@Override
public String toEnumVarName(String name, String datatype) {
if (name.length() == 0) {
return "Empty";
}
// for symbol, e.g. $, #
if (getSymbolName(name) != null) {
return camelize(getSymbolName(name));
}
String enumName = sanitizeName(name);
enumName = enumName.replaceFirst("^_", "");
enumName = enumName.replaceFirst("_$", "");
enumName = camelize(enumName) + this.enumValueSuffix;
if (enumName.matches("\\d.*")) { // starts with number
return "_" + enumName;
} else {
return enumName;
}
}
@Override
public String toEnumName(CodegenProperty property) {
return sanitizeName(camelize(property.name)) + this.enumNameSuffix;
}
public String testPackageName() {
return this.packageName + ".Test";
}
@Override
public String escapeQuotationMark(String input) {
// remove " to avoid code injection
return input.replace("\"", "");
}
@Override
public String escapeUnsafeCharacters(String input) {
return input.replace("*/", "*_/").replace("/*", "/_*").replace("--", "- -");
}
@Override
public boolean isDataTypeString(String dataType) {
// also treat double/decimal/float as "string" in enum so that the values (e.g. 2.8) get double-quoted
return "String".equalsIgnoreCase(dataType) ||
"double?".equals(dataType) || "decimal?".equals(dataType) || "float?".equals(dataType) ||
"double".equals(dataType) || "decimal".equals(dataType) || "float".equals(dataType);
}
/**
* Return true if the property being passed is a C# value type
*
* @param var property
* @return true if property is a value type
*/
protected boolean isValueType(CodegenProperty var) {
return (valueTypes.contains(var.dataType) || var.isEnum ) ;
}
@Override
public void setParameterExampleValue(CodegenParameter codegenParameter) {
// set the example value
// if not specified in x-example, generate a default value
// TODO need to revise how to obtain the example value
if (codegenParameter.vendorExtensions != null && codegenParameter.vendorExtensions.containsKey("x-example")) {
codegenParameter.example = Json.pretty(codegenParameter.vendorExtensions.get("x-example"));
} else if (Boolean.TRUE.equals(codegenParameter.isBoolean)) {
codegenParameter.example = "true";
} else if (Boolean.TRUE.equals(codegenParameter.isLong)) {
codegenParameter.example = "789";
} else if (Boolean.TRUE.equals(codegenParameter.isInteger)) {
codegenParameter.example = "56";
} else if (Boolean.TRUE.equals(codegenParameter.isFloat)) {
codegenParameter.example = "3.4F";
} else if (Boolean.TRUE.equals(codegenParameter.isDouble)) {
codegenParameter.example = "1.2D";
} else if (Boolean.TRUE.equals(codegenParameter.isNumber)) {
codegenParameter.example = "8.14";
} else if (Boolean.TRUE.equals(codegenParameter.isBinary)) {
codegenParameter.example = "BINARY_DATA_HERE";
} else if (Boolean.TRUE.equals(codegenParameter.isByteArray)) {
codegenParameter.example = "BYTE_ARRAY_DATA_HERE";
} else if (Boolean.TRUE.equals(codegenParameter.isFile)) {
codegenParameter.example = "/path/to/file.txt";
} else if (Boolean.TRUE.equals(codegenParameter.isDate)) {
codegenParameter.example = "2013-10-20";
} else if (Boolean.TRUE.equals(codegenParameter.isDateTime)) {
codegenParameter.example = "2013-10-20T19:20:30+01:00";
} else if (Boolean.TRUE.equals(codegenParameter.isUuid)) {
codegenParameter.example = "38400000-8cf0-11bd-b23e-10b96e4ef00d";
} else if (Boolean.TRUE.equals(codegenParameter.isUri)) {
codegenParameter.example = "https://openapi-generator.tech";
} else if (Boolean.TRUE.equals(codegenParameter.isString)) {
codegenParameter.example = codegenParameter.paramName + "_example";
}
}
@Override
public void postProcessFile(File file, String fileType) {
if (file == null) {
return;
}
String csharpPostProcessFile = System.getenv("CSHARP_POST_PROCESS_FILE");
if (StringUtils.isEmpty(csharpPostProcessFile)) {
return; // skip if CSHARP_POST_PROCESS_FILE env variable is not defined
}
// only process files with .cs extension
if ("cs".equals(FilenameUtils.getExtension(file.toString()))) {
String command = csharpPostProcessFile + " " + file.toString();
try {
Process p = Runtime.getRuntime().exec(command);
int exitValue = p.waitFor();
if (exitValue != 0) {
LOGGER.error("Error running the command ({}). Exit code: {}", command, exitValue);
} else {
LOGGER.info("Successfully executed: " + command);
}
} catch (Exception e) {
LOGGER.error("Error running the command ({}). Exception: {}", command, e.getMessage());
}
}
}
}
| [
"\"CSHARP_POST_PROCESS_FILE\"",
"\"CSHARP_POST_PROCESS_FILE\""
]
| []
| [
"CSHARP_POST_PROCESS_FILE"
]
| [] | ["CSHARP_POST_PROCESS_FILE"] | java | 1 | 0 | |
python/tvm/rpc/server.py | """RPC server implementation.
Note
----
Server is TCP based with the following protocol:
- Initial handshake to the peer
- [RPC_MAGIC, keysize(int32), key-bytes]
- The key is in format
- {server|client}:device-type[:random-key] [-timeout=timeout]
"""
# pylint: disable=invalid-name
from __future__ import absolute_import
import os
import ctypes
import socket
import select
import struct
import logging
import multiprocessing
import subprocess
import time
import sys
import signal
from .._ffi.function import register_func
from .._ffi.base import py_str
from .._ffi.libinfo import find_lib_path
from ..module import load as _load_module
from ..contrib import util
from . import base
from . base import TrackerCode
logger = logging.getLogger('RPCServer')
def _server_env(load_library):
"""Server environment function return temp dir"""
temp = util.tempdir()
# pylint: disable=unused-variable
@register_func("tvm.rpc.server.workpath")
def get_workpath(path):
return temp.relpath(path)
@register_func("tvm.rpc.server.load_module", override=True)
def load_module(file_name):
"""Load module from remote side."""
path = temp.relpath(file_name)
m = _load_module(path)
logger.info("load_module %s", path)
return m
libs = []
load_library = load_library.split(":") if load_library else []
for file_name in load_library:
file_name = find_lib_path(file_name)[0]
libs.append(ctypes.CDLL(file_name, ctypes.RTLD_GLOBAL))
logger.info("Load additional library %s", file_name)
temp.libs = libs
return temp
def _serve_loop(sock, addr, load_library):
"""Server loop"""
sockfd = sock.fileno()
temp = _server_env(load_library)
base._ServerLoop(sockfd)
temp.remove()
logger.info("Finish serving %s", addr)
def _parse_server_opt(opts):
# parse client options
ret = {}
for kv in opts:
if kv.startswith("-timeout="):
ret["timeout"] = float(kv[9:])
return ret
def _listen_loop(sock, port, rpc_key, tracker_addr, load_library, custom_addr):
"""Listening loop of the server master."""
def _accept_conn(listen_sock, tracker_conn, ping_period=2):
"""Accept connection from the other places.
Parameters
----------
listen_sock: Socket
The socket used by listening process.
tracker_conn : connnection to tracker
Tracker connection
ping_period : float, optional
ping tracker every k seconds if no connection is accepted.
"""
old_keyset = set()
# Report resource to tracker
if tracker_conn:
matchkey = base.random_key(rpc_key + ":")
base.sendjson(tracker_conn,
[TrackerCode.PUT, rpc_key, (port, matchkey), custom_addr])
assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS
else:
matchkey = rpc_key
unmatch_period_count = 0
unmatch_timeout = 4
# Wait until we get a valid connection
while True:
if tracker_conn:
trigger = select.select([listen_sock], [], [], ping_period)
if not listen_sock in trigger[0]:
base.sendjson(tracker_conn, [TrackerCode.GET_PENDING_MATCHKEYS])
pending_keys = base.recvjson(tracker_conn)
old_keyset.add(matchkey)
# if match key not in pending key set
# it means the key is acquired by a client but not used.
if matchkey not in pending_keys:
unmatch_period_count += 1
else:
unmatch_period_count = 0
# regenerate match key if key is acquired but not used for a while
if unmatch_period_count * ping_period > unmatch_timeout + ping_period:
logger.info("no incoming connections, regenerate key ...")
matchkey = base.random_key(rpc_key + ":", old_keyset)
base.sendjson(tracker_conn,
[TrackerCode.PUT, rpc_key, (port, matchkey),
custom_addr])
assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS
unmatch_period_count = 0
continue
conn, addr = listen_sock.accept()
magic = struct.unpack("<i", base.recvall(conn, 4))[0]
if magic != base.RPC_MAGIC:
conn.close()
continue
keylen = struct.unpack("<i", base.recvall(conn, 4))[0]
key = py_str(base.recvall(conn, keylen))
arr = key.split()
expect_header = "client:" + matchkey
server_key = "server:" + rpc_key
if arr[0] != expect_header:
conn.sendall(struct.pack("<i", base.RPC_CODE_MISMATCH))
conn.close()
logger.warning("mismatch key from %s", addr)
continue
else:
conn.sendall(struct.pack("<i", base.RPC_CODE_SUCCESS))
conn.sendall(struct.pack("<i", len(server_key)))
conn.sendall(server_key.encode("utf-8"))
return conn, addr, _parse_server_opt(arr[1:])
# Server logic
tracker_conn = None
while True:
try:
# step 1: setup tracker and report to tracker
if tracker_addr and tracker_conn is None:
tracker_conn = base.connect_with_retry(tracker_addr)
tracker_conn.sendall(struct.pack("<i", base.RPC_TRACKER_MAGIC))
magic = struct.unpack("<i", base.recvall(tracker_conn, 4))[0]
if magic != base.RPC_TRACKER_MAGIC:
raise RuntimeError("%s is not RPC Tracker" % str(tracker_addr))
# report status of current queue
cinfo = {"key" : "server:" + rpc_key}
base.sendjson(tracker_conn,
[TrackerCode.UPDATE_INFO, cinfo])
assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS
# step 2: wait for in-coming connections
conn, addr, opts = _accept_conn(sock, tracker_conn)
except (socket.error, IOError):
# retry when tracker is dropped
if tracker_conn:
tracker_conn.close()
tracker_conn = None
continue
except RuntimeError as exc:
raise exc
# step 3: serving
logger.info("connection from %s", addr)
server_proc = multiprocessing.Process(target=_serve_loop,
args=(conn, addr, load_library))
server_proc.deamon = True
server_proc.start()
# close from our side.
conn.close()
# wait until server process finish or timeout
server_proc.join(opts.get("timeout", None))
if server_proc.is_alive():
logger.info("Timeout in RPC session, kill..")
server_proc.terminate()
def _connect_proxy_loop(addr, key, load_library):
key = "server:" + key
retry_count = 0
max_retry = 5
retry_period = 5
while True:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(addr)
sock.sendall(struct.pack("<i", base.RPC_MAGIC))
sock.sendall(struct.pack("<i", len(key)))
sock.sendall(key.encode("utf-8"))
magic = struct.unpack("<i", base.recvall(sock, 4))[0]
if magic == base.RPC_CODE_DUPLICATE:
raise RuntimeError("key: %s has already been used in proxy" % key)
elif magic == base.RPC_CODE_MISMATCH:
logger.warning("RPCProxy do not have matching client key %s", key)
elif magic != base.RPC_CODE_SUCCESS:
raise RuntimeError("%s is not RPC Proxy" % str(addr))
keylen = struct.unpack("<i", base.recvall(sock, 4))[0]
remote_key = py_str(base.recvall(sock, keylen))
opts = _parse_server_opt(remote_key.split()[1:])
logger.info("connected to %s", str(addr))
process = multiprocessing.Process(
target=_serve_loop, args=(sock, addr, load_library))
process.deamon = True
process.start()
sock.close()
process.join(opts.get("timeout", None))
if process.is_alive():
logger.info("Timeout in RPC session, kill..")
process.terminate()
retry_count = 0
except (socket.error, IOError) as err:
retry_count += 1
logger.warning("Error encountered %s, retry in %g sec", str(err), retry_period)
if retry_count > max_retry:
raise RuntimeError("Maximum retry error: last error: %s" % str(err))
time.sleep(retry_period)
def _popen(cmd):
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=os.environ)
(out, _) = proc.communicate()
if proc.returncode != 0:
msg = "Server invoke error:\n"
msg += out
raise RuntimeError(msg)
class Server(object):
"""Start RPC server on a separate process.
This is a simple python implementation based on multi-processing.
It is also possible to implement a similar C based sever with
TVM runtime which does not depend on the python.
Parameters
----------
host : str
The host url of the server.
port : int
The port to be bind to
port_end : int, optional
The end port to search
is_proxy : bool, optional
Whether the address specified is a proxy.
If this is true, the host and port actually corresponds to the
address of the proxy server.
use_popen : bool, optional
Whether to use Popen to start a fresh new process instead of fork.
This is recommended to switch on if we want to do local RPC demonstration
for GPU devices to avoid fork safety issues.
tracker_addr: Tuple (str, int) , optional
The address of RPC Tracker in tuple(host, ip) format.
If is not None, the server will register itself to the tracker.
key : str, optional
The key used to identify the device type in tracker.
load_library : str, optional
List of additional libraries to be loaded during execution.
custom_addr: str, optional
Custom IP Address to Report to RPC Tracker
silent: bool, optional
Whether run this server in silent mode.
"""
def __init__(self,
host,
port=9091,
port_end=9199,
is_proxy=False,
use_popen=False,
tracker_addr=None,
key="",
load_library=None,
custom_addr=None,
silent=False):
try:
if base._ServerLoop is None:
raise RuntimeError("Please compile with USE_RPC=1")
except NameError:
raise RuntimeError("Please compile with USE_RPC=1")
self.host = host
self.port = port
self.libs = []
self.custom_addr = custom_addr
self.use_popen = use_popen
if silent:
logger.setLevel(logging.WARN)
if use_popen:
cmd = [sys.executable,
"-m", "tvm.exec.rpc_server",
"--host=%s" % host,
"--port=%s" % port]
if tracker_addr:
assert key
cmd += ["--tracker=%s:%d" % tracker_addr,
"--key=%s" % key]
if load_library:
cmd += ["--load-library", load_library]
if custom_addr:
cmd += ["--custom-addr", custom_addr]
if silent:
cmd += ["--silent"]
self.proc = subprocess.Popen(cmd, preexec_fn=os.setsid)
time.sleep(0.5)
elif not is_proxy:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = None
for my_port in range(port, port_end):
try:
sock.bind((host, my_port))
self.port = my_port
break
except socket.error as sock_err:
if sock_err.errno in [98, 48]:
continue
else:
raise sock_err
if not self.port:
raise ValueError("cannot bind to any port in [%d, %d)" % (port, port_end))
logger.info("bind to %s:%d", host, self.port)
sock.listen(1)
self.sock = sock
self.proc = multiprocessing.Process(
target=_listen_loop, args=(
self.sock, self.port, key, tracker_addr, load_library,
self.custom_addr))
self.proc.deamon = True
self.proc.start()
else:
self.proc = multiprocessing.Process(
target=_connect_proxy_loop, args=((host, port), key, load_library))
self.proc.deamon = True
self.proc.start()
def terminate(self):
"""Terminate the server process"""
if self.use_popen:
if self.proc:
os.killpg(self.proc.pid, signal.SIGTERM)
self.proc = None
else:
if self.proc:
self.proc.terminate()
self.proc = None
def __del__(self):
self.terminate()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
server/main.go | package main
import (
"encoding/base64"
"encoding/json"
"fmt"
"github.com/SebastianRoll/go-socks5"
"io"
"log"
"net"
"net/http"
"os"
"strings"
)
type handler func(w http.ResponseWriter, r *http.Request)
func basicAuth(pass handler) handler {
return func(w http.ResponseWriter, r *http.Request) {
auth := strings.SplitN(r.Header.Get("Authorization"), " ", 2)
if len(auth) != 2 || auth[0] != "Basic" {
http.Error(w, "authorization failed", http.StatusUnauthorized)
return
}
payload, _ := base64.StdEncoding.DecodeString(auth[1])
pair := strings.SplitN(string(payload), ":", 2)
if len(pair) != 2 || !validate(pair[0], pair[1]) {
http.Error(w, "authorization failed", http.StatusUnauthorized)
return
}
pass(w, r)
}
}
func validate(username, password string) bool {
fmt.Println(username)
fmt.Println(os.Getenv("SOCKS5_USER"))
if username == os.Getenv("SOCKS5_USER") && password == os.Getenv("SOCKS5_PASSWORD") {
return true
}
return false
}
type InterfaceResponse struct {
Name string
Flags []string
Addrs []string
}
func interfaces(w http.ResponseWriter, req *http.Request) {
fmt.Println("In /interfaces")
w.Header().Set("Content-Type", "application/json")
ints, err := net.Interfaces()
response := []InterfaceResponse{}
if err != nil {
panic(err)
}
for _, s := range ints {
intresp := InterfaceResponse{}
intresp.Name = s.Name
if s.Flags&net.FlagUp != 0 {
intresp.Flags = append(intresp.Flags, "FlagUp")
}
if s.Flags&net.FlagBroadcast != 0 {
intresp.Flags = append(intresp.Flags, "FlagBroadcast")
}
if s.Flags&net.FlagLoopback != 0 {
intresp.Flags = append(intresp.Flags, "FlagLoopback")
}
if s.Flags&net.FlagPointToPoint != 0 {
intresp.Flags = append(intresp.Flags, "FlagPointToPoint")
}
if s.Flags&net.FlagMulticast != 0 {
intresp.Flags = append(intresp.Flags, "FlagMulticast")
}
addrs, err := s.Addrs()
if err != nil {
panic(err)
}
adds := []string{}
for _, a := range addrs {
adds = append(adds, a.String())
}
intresp.Addrs = adds
response = append(response, intresp)
}
js, err := json.Marshal(response)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Write(js)
}
func main() {
// Create a local listener
l, err := net.Listen("tcp", ":9998")
if err != nil {
panic(err)
}
fmt.Println("PONG server ja")
go func() {
for {
conn, err := l.Accept()
if err != nil {
panic(err)
}
fmt.Println("in PONG server")
buf := make([]byte, 5)
if _, err := io.ReadAtLeast(conn, buf, 4); err != nil {
panic(err)
}
fmt.Printf(string(buf))
//if !bytes.Equal(buf, []byte("ping")) {
// t.Fatalf("bad: %v", buf)
//}
conn.Write([]byte("pong"))
conn.Close()
}
}()
// Create a socks server
creds := socks5.StaticCredentials{
os.Getenv("SOCKS5_USER"): os.Getenv("SOCKS5_PASSWORD"),
}
cator := socks5.FromIPUserPassAuthenticator{Credentials: creds}
conf := &socks5.Config{
AuthMethods: []socks5.Authenticator{cator},
Logger: log.New(os.Stdout, "", log.LstdFlags),
Dial: socks5.DialFromIP,
}
serv, err := socks5.New(conf)
if err != nil {
panic(err)
}
// Start listening
go func() {
if err := serv.ListenAndServe("tcp", ":8989"); err != nil {
panic(err)
}
}()
//http.HandleFunc("/interfaces", basicAuth(interfaces))
http.HandleFunc("/interfaces", interfaces)
http.ListenAndServe(":8998", nil)
}
| [
"\"SOCKS5_USER\"",
"\"SOCKS5_USER\"",
"\"SOCKS5_PASSWORD\"",
"\"SOCKS5_USER\"",
"\"SOCKS5_PASSWORD\""
]
| []
| [
"SOCKS5_USER",
"SOCKS5_PASSWORD"
]
| [] | ["SOCKS5_USER", "SOCKS5_PASSWORD"] | go | 2 | 0 | |
functional/util/util.go | package util
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"strings"
"time"
)
var fleetctlBinPath string
func init() {
fleetctlBinPath = os.Getenv("FLEETCTL_BIN")
if fleetctlBinPath == "" {
fmt.Println("FLEETCTL_BIN environment variable must be set")
os.Exit(1)
} else if _, err := os.Stat(fleetctlBinPath); err != nil {
fmt.Printf("%v\n", err)
os.Exit(1)
}
if os.Getenv("SSH_AUTH_SOCK") == "" {
fmt.Println("SSH_AUTH_SOCK environment variable must be set")
os.Exit(1)
}
}
type fleetfunc func(args ...string) (string, string, error)
func RunFleetctl(args ...string) (string, string, error) {
log.Printf("%s %s", fleetctlBinPath, strings.Join(args, " "))
var stdoutBytes, stderrBytes bytes.Buffer
cmd := exec.Command(fleetctlBinPath, args...)
cmd.Stdout = &stdoutBytes
cmd.Stderr = &stderrBytes
err := cmd.Run()
return stdoutBytes.String(), stderrBytes.String(), err
}
func RunFleetctlWithInput(input string, args ...string) (string, string, error) {
log.Printf("%s %s", fleetctlBinPath, strings.Join(args, " "))
var stdoutBytes, stderrBytes bytes.Buffer
cmd := exec.Command(fleetctlBinPath, args...)
cmd.Stdout = &stdoutBytes
cmd.Stderr = &stderrBytes
stdin, err := cmd.StdinPipe()
if err != nil {
return "", "", err
}
if err = cmd.Start(); err != nil {
return "", "", err
}
stdin.Write([]byte(input))
stdin.Close()
err = cmd.Wait()
return stdoutBytes.String(), stderrBytes.String(), err
}
// Wait up to 10s to find the specified number of machines, retrying periodically.
func WaitForNMachines(fleetctl fleetfunc, count int) ([]string, error) {
var machines []string
timeout := 10 * time.Second
alarm := time.After(timeout)
ticker := time.Tick(250 * time.Millisecond)
loop:
for {
select {
case <-alarm:
return machines, fmt.Errorf("failed to find %d machines within %v", count, timeout)
case <-ticker:
stdout, _, err := fleetctl("list-machines", "--no-legend", "--full", "--fields", "machine")
if err != nil {
continue
}
stdout = strings.TrimSpace(stdout)
found := 0
if stdout != "" {
machines = strings.Split(stdout, "\n")
found = len(machines)
}
if found != count {
continue
}
break loop
}
}
return machines, nil
}
// WaitForNActiveUnits polls fleet for up to 10s, exiting when N units are
// found to be in an active state. It returns a map of active units to
// their target machines.
func WaitForNActiveUnits(fleetctl fleetfunc, count int) (map[string]UnitState, error) {
var nactive int
states := make(map[string]UnitState)
timeout := 10 * time.Second
alarm := time.After(timeout)
ticker := time.Tick(250 * time.Millisecond)
loop:
for {
select {
case <-alarm:
return nil, fmt.Errorf("failed to find %d active units within %v (last found: %d)", count, timeout, nactive)
case <-ticker:
stdout, _, err := fleetctl("list-units", "--no-legend", "--full")
stdout = strings.TrimSpace(stdout)
if stdout == "" || err != nil {
continue
}
lines := strings.Split(stdout, "\n")
allStates := parseUnitStates(lines)
active := filterActiveUnits(allStates)
nactive = len(active)
if nactive != count {
continue
}
for _, state := range active {
states[state.Name] = state
}
break loop
}
}
return states, nil
}
type UnitState struct {
Name string
JobState string
ActiveState string
Machine string
}
func parseUnitStates(units []string) map[string]UnitState {
states := make(map[string]UnitState)
for _, unit := range units {
cols := strings.SplitN(unit, "\t", 7)
if len(cols) == 7 {
machine := strings.SplitN(cols[6], "/", 2)[0]
states[cols[0]] = UnitState{cols[0], cols[2], cols[3], machine}
}
}
return states
}
func filterActiveUnits(states map[string]UnitState) map[string]UnitState {
filtered := make(map[string]UnitState)
for unit, state := range states {
if state.ActiveState == "active" {
filtered[unit] = state
}
}
return filtered
}
// tempUnit creates a local unit file with the given contents, returning
// the name of the file
func TempUnit(contents string) (string, error) {
tmp, err := ioutil.TempFile(os.TempDir(), "fleet-test-unit-")
if err != nil {
return "", err
}
tmp.Write([]byte(contents))
tmp.Close()
svc := fmt.Sprintf("%s.service", tmp.Name())
err = os.Rename(tmp.Name(), svc)
if err != nil {
os.Remove(tmp.Name())
return "", err
}
return svc, nil
}
| [
"\"FLEETCTL_BIN\"",
"\"SSH_AUTH_SOCK\""
]
| []
| [
"SSH_AUTH_SOCK",
"FLEETCTL_BIN"
]
| [] | ["SSH_AUTH_SOCK", "FLEETCTL_BIN"] | go | 2 | 0 | |
pkg/ghclient/localwrapper_test.go | package ghclient
import (
"context"
"os"
"path/filepath"
"testing"
"time"
"gopkg.in/src-d/go-billy.v4/osfs"
git "gopkg.in/src-d/go-git.v4"
"github.com/dollarshaveclub/acyl/pkg/memfs"
billy "gopkg.in/src-d/go-billy.v4"
gitplumb "gopkg.in/src-d/go-git.v4/plumbing"
gitcache "gopkg.in/src-d/go-git.v4/plumbing/cache"
gitobj "gopkg.in/src-d/go-git.v4/plumbing/object"
gitfs "gopkg.in/src-d/go-git.v4/storage/filesystem"
)
// localTestRepo creates a new repo in memory with the provided branches and returns testing commit hashes
func localTestRepo(t *testing.T, branches []string) (billy.Filesystem, []string) {
fs := memfs.New()
if err := fs.MkdirAll("repo", os.ModeDir|os.ModePerm); err != nil {
t.Fatalf("error in mkdir repo: %v", err)
}
fs2, err := fs.Chroot("repo")
if err != nil {
t.Fatalf("error in chroot: %v", err)
}
if err := fs2.MkdirAll(".git", os.ModeDir|os.ModePerm); err != nil {
t.Fatalf("error in mkdir .git: %v", err)
}
dot, _ := fs2.Chroot(".git")
repo, err := git.Init(gitfs.NewStorage(dot, gitcache.NewObjectLRUDefault()), fs2)
if err != nil {
t.Fatalf("error initializing repo: %v", err)
}
wt, err := repo.Worktree()
if err != nil {
t.Fatalf("error getting working tree: %v", err)
}
fs2.MkdirAll("something", os.ModeDir|os.ModePerm)
f, err := fs2.Create("something/foo.txt")
if err != nil {
t.Fatalf("error creating file 1: %v", err)
}
f.Write([]byte(`omg12345`))
f.Close()
if _, err := wt.Add("something/foo.txt"); err != nil {
t.Fatalf("error adding changed file: %v", err)
}
h1, err := wt.Commit("first commit", &git.CommitOptions{Author: &gitobj.Signature{Name: "someguy", Email: "[email protected]", When: time.Now().UTC()}})
if err != nil {
t.Fatalf("error commiting 1: %v", err)
}
out := []string{h1.String()}
for i, b := range branches {
co := &git.CheckoutOptions{Branch: gitplumb.NewBranchReferenceName(b), Create: true}
if err := wt.Checkout(co); err != nil {
t.Fatalf("error checking out branch: %v: %v", b, err)
}
if i == 0 {
fs2.MkdirAll("somethingelse", os.ModeDir|os.ModePerm)
f, err := fs2.Create("somethingelse/bar.txt")
if err != nil {
t.Fatalf("error creating file 2: %v", err)
}
f.Write([]byte(`qwerty9999`))
f.Close()
f, err = fs2.Create("somethingelse/asdf.txt")
if err != nil {
t.Fatalf("error creating file 3: %v", err)
}
f.Write([]byte(`00000000`))
f.Close()
if _, err := wt.Add("somethingelse/"); err != nil {
t.Fatalf("error adding changed files 2: %v", err)
}
h2, err := wt.Commit("another commit", &git.CommitOptions{Author: &gitobj.Signature{Name: "someguy", Email: "[email protected]", When: time.Now().UTC()}})
if err != nil {
t.Fatalf("error commiting 2: %v", err)
}
out = append(out, h2.String())
}
}
// add a file but don't commit it
f, err = fs2.Create("something/bar.txt")
if err != nil {
t.Fatalf("error creating extra file: %v", err)
}
f.Write([]byte(`asdf`))
f.Close()
return fs, out
}
func TestLocalWrapperGetBranches(t *testing.T) {
fs, _ := localTestRepo(t, []string{"foo", "bar"})
var backendExecuted bool
lw := &LocalWrapper{
FSFunc: func(path string) billy.Filesystem {
rfs, _ := fs.Chroot(path)
return rfs
},
RepoPathMap: map[string]string{"some/repo": "repo"},
Backend: &FakeRepoClient{
GetBranchesFunc: func(context.Context, string) ([]BranchInfo, error) {
backendExecuted = true
return []BranchInfo{}, nil
},
},
}
bl, err := lw.GetBranches(context.Background(), "some/repo")
if err != nil {
t.Fatalf("should have succeeded: %v", err)
}
if len(bl) != 3 {
t.Fatalf("bad count: %v", len(bl))
}
_, err = lw.GetBranches(context.Background(), "some/other-repo")
if err != nil {
t.Fatalf("should have succeeded: %v", err)
}
if !backendExecuted {
t.Fatalf("backend should have been executed")
}
}
func TestLocalWrapperGetBranch(t *testing.T) {
fs, _ := localTestRepo(t, []string{"foo", "bar"})
var backendExecuted bool
lw := &LocalWrapper{
FSFunc: func(path string) billy.Filesystem {
rfs, _ := fs.Chroot(path)
return rfs
},
RepoPathMap: map[string]string{"some/repo": "repo"},
Backend: &FakeRepoClient{
GetBranchFunc: func(context.Context, string, string) (BranchInfo, error) {
backendExecuted = true
return BranchInfo{}, nil
},
},
}
bi, err := lw.GetBranch(context.Background(), "some/repo", "foo")
if err != nil {
t.Fatalf("should have succeeded: %v", err)
}
if bi.Name != "foo" {
t.Errorf("bad branch name: %v", bi.Name)
}
_, err = lw.GetBranch(context.Background(), "some/other-repo", "foo")
if err != nil {
t.Fatalf("should have succeeded: %v", err)
}
if !backendExecuted {
t.Fatalf("backend should have been executed")
}
}
func TestLocalWrapperGetCommitMessage(t *testing.T) {
fs, commits := localTestRepo(t, []string{"foo", "bar"})
var backendExecuted bool
lw := &LocalWrapper{
FSFunc: func(path string) billy.Filesystem {
rfs, _ := fs.Chroot(path)
return rfs
},
RepoPathMap: map[string]string{"some/repo": "repo"},
Backend: &FakeRepoClient{
GetCommitMessageFunc: func(context.Context, string, string) (string, error) {
backendExecuted = true
return "", nil
},
},
}
msg, err := lw.GetCommitMessage(context.Background(), "some/repo", commits[0])
if err != nil {
t.Fatalf("should have succeeded: %v", err)
}
if msg != "first commit" {
t.Errorf("bad commit msg: %v", msg)
}
_, err = lw.GetCommitMessage(context.Background(), "some/other-repo", "asdf")
if err != nil {
t.Fatalf("should have succeeded: %v", err)
}
if !backendExecuted {
t.Fatalf("backend should have been executed")
}
}
func TestLocalWrapperGetFileContents(t *testing.T) {
fs, commits := localTestRepo(t, []string{"foo", "bar"})
var backendExecuted bool
lw := &LocalWrapper{
FSFunc: func(path string) billy.Filesystem {
rfs, _ := fs.Chroot(path)
return rfs
},
RepoPathMap: map[string]string{"some/repo": "repo"},
Backend: &FakeRepoClient{
GetFileContentsFunc: func(context.Context, string, string, string) ([]byte, error) {
backendExecuted = true
return nil, nil
},
},
}
contents, err := lw.GetFileContents(context.Background(), "some/repo", "something/foo.txt", commits[0])
if err != nil {
t.Fatalf("should have succeeded: %v", err)
}
if string(contents) != "omg12345" {
t.Errorf("bad contents: %v", string(contents))
}
_, err = lw.GetFileContents(context.Background(), "some/other-repo", "something/foo.txt", commits[0])
if err != nil {
t.Fatalf("should have succeeded: %v", err)
}
if !backendExecuted {
t.Fatalf("backend should have been executed")
}
}
func TestLocalWrapperGetFileContentsTriggeringRepo(t *testing.T) {
fs, commits := localTestRepo(t, []string{"foo", "bar"})
var backendExecuted bool
lw := &LocalWrapper{
WorkingTreeRepos: []string{"some/repo"},
FSFunc: func(path string) billy.Filesystem {
rfs, _ := fs.Chroot(path)
return rfs
},
RepoPathMap: map[string]string{"some/repo": "repo"},
Backend: &FakeRepoClient{
GetFileContentsFunc: func(context.Context, string, string, string) ([]byte, error) {
backendExecuted = true
return nil, nil
},
},
}
// we should be able to read the uncommitted file
contents, err := lw.GetFileContents(context.Background(), "some/repo", "something/bar.txt", commits[0])
if err != nil {
t.Fatalf("should have succeeded: %v", err)
}
if string(contents) != "asdf" {
t.Errorf("bad contents: %v", string(contents))
}
if backendExecuted {
t.Fatalf("backend should not have been executed")
}
}
func TestLocalWrapperGetDirectoryContents(t *testing.T) {
fs, _ := localTestRepo(t, []string{"foo", "bar"})
var backendExecuted bool
lw := &LocalWrapper{
FSFunc: func(path string) billy.Filesystem {
rfs, _ := fs.Chroot(path)
return rfs
},
RepoPathMap: map[string]string{"some/repo": "repo"},
Backend: &FakeRepoClient{
GetDirectoryContentsFunc: func(context.Context, string, string, string) (map[string]FileContents, error) {
backendExecuted = true
return nil, nil
},
},
}
dc, err := lw.GetDirectoryContents(context.Background(), "some/repo", "somethingelse", "foo")
if err != nil {
t.Fatalf("should have succeeded: %v", err)
}
if len(dc) != 2 {
t.Fatalf("bad length: %v", len(dc))
}
bar, ok := dc["bar.txt"]
if !ok {
t.Fatalf("bar.txt not found")
}
if string(bar.Contents) != "qwerty9999" {
t.Fatalf("bad contents for bar: %v", string(bar.Contents))
}
asdf, ok := dc["asdf.txt"]
if !ok {
t.Fatalf("asdf.txt not found")
}
if string(asdf.Contents) != "00000000" {
t.Fatalf("bad contents for asdf: %v", string(asdf.Contents))
}
_, err = lw.GetDirectoryContents(context.Background(), "some/other-repo", "somethingelse", "foo")
if err != nil {
t.Fatalf("should have succeeded: %v", err)
}
if !backendExecuted {
t.Fatalf("backend should have been executed")
}
}
func TestLocalWrapperGetDirectoryContentsTriggeringRepo(t *testing.T) {
fs, commits := localTestRepo(t, []string{"foo", "bar"})
var backendExecuted bool
lw := &LocalWrapper{
WorkingTreeRepos: []string{"some/repo"},
FSFunc: func(path string) billy.Filesystem {
rfs, _ := fs.Chroot(path)
return rfs
},
RepoPathMap: map[string]string{"some/repo": "repo"},
Backend: &FakeRepoClient{
GetFileContentsFunc: func(context.Context, string, string, string) ([]byte, error) {
backendExecuted = true
return nil, nil
},
},
}
// we should be able to read the uncommitted files
dc, err := lw.GetDirectoryContents(context.Background(), "some/repo", "something", commits[0])
if err != nil {
t.Fatalf("should have succeeded: %v", err)
}
if len(dc) != 2 {
t.Fatalf("bad length: %v", len(dc))
}
foo, ok := dc["foo.txt"]
if !ok {
t.Fatalf("foo.txt not found")
}
if string(foo.Contents) != "omg12345" {
t.Fatalf("bad contents for foo: %v", string(foo.Contents))
}
bar, ok := dc["bar.txt"]
if !ok {
t.Fatalf("bar.txt not found")
}
if string(bar.Contents) != "asdf" {
t.Fatalf("bad contents for bar: %v", string(bar.Contents))
}
if backendExecuted {
t.Fatalf("backend should not have been executed")
}
}
func TestLocalWrapperThisAcylRepo(t *testing.T) {
if os.Getenv("TEST_ACYL_REPO") == "" {
t.SkipNow()
}
p, err := filepath.Abs("../..")
if err != nil {
t.Fatalf("error making path absolute: %v", err)
}
lw := &LocalWrapper{
FSFunc: func(path string) billy.Filesystem { return osfs.New(path) },
RepoPathMap: map[string]string{"dollarshaveclub/acyl": p},
}
bl, err := lw.GetBranches(context.Background(), "dollarshaveclub/acyl")
if err != nil {
t.Fatalf("branches should have succeeded: %v", err)
}
t.Logf("# branches: %v\n", len(bl))
// arbitrary commit SHA
msg, err := lw.GetCommitMessage(context.Background(), "dollarshaveclub/acyl", "516d472b0ae6292fcd6b07350734ca0268747659")
if err != nil {
t.Fatalf("commit msg should have succeeded: %v", err)
}
if msg != "only load db secrets, fix error msg\n" {
t.Fatalf("bad commit msg: %v", msg)
}
lw.WorkingTreeRepos = []string{"dollarshaveclub/acyl"}
contents, err := lw.GetDirectoryContents(context.Background(), "dollarshaveclub/acyl", "", "516d472b0ae6292fcd6b07350734ca0268747659")
if err != nil {
t.Fatalf("get dir contents should have succeeded: %v", err)
}
t.Logf("contents file count: %v", len(contents))
}
| [
"\"TEST_ACYL_REPO\""
]
| []
| [
"TEST_ACYL_REPO"
]
| [] | ["TEST_ACYL_REPO"] | go | 1 | 0 | |
web/webgen/gen-chi/webruntime/runtime.go | package webruntime
import (
"fmt"
"io"
"net/http"
"os"
"reflect"
"strconv"
"strings"
"sync"
"github.com/go-chi/chi/v5"
"github.com/go-chi/render"
"github.com/gorilla/schema"
"github.com/morikuni/failure"
"github.com/go-playground/locales/en"
ut "github.com/go-playground/universal-translator"
"github.com/go-playground/validator/v10"
)
var (
// for parameters binding
mu sync.Mutex
decoder = schema.NewDecoder()
// for validation
uni *ut.UniversalTranslator
translator ut.Translator
validate *validator.Validate
)
// TODO: performance
func BindPathParams(dst interface{}, req *http.Request, keys ...string) error {
params := make(map[string][]string, len(keys))
rctx := chi.RouteContext(req.Context())
if rctx == nil {
return nil
}
for _, k := range keys {
params[k] = []string{rctx.URLParam(k)}
}
mu.Lock()
defer mu.Unlock()
decoder.SetAliasTag("path")
return decoder.Decode(dst, params)
}
func BindQuery(dst interface{}, req *http.Request) error {
mu.Lock()
defer mu.Unlock()
decoder.SetAliasTag("query")
return decoder.Decode(dst, req.URL.Query())
}
func BindHeader(dst interface{}, req *http.Request) error {
mu.Lock()
defer mu.Unlock()
decoder.SetAliasTag("header")
return decoder.Decode(dst, req.Header)
}
func BindBody(dst interface{}, r io.ReadCloser) error {
if err := render.DecodeJSON(r, dst); err != nil {
return err
}
return nil
}
func ValidateStruct(ob interface{}) error {
// TODO: wrap
if err := validate.Struct(ob); err != nil {
return err
}
if v, ok := ob.(interface{ Validate() error }); ok {
return v.Validate() // TODO: 422
}
return nil
}
// error
type errorRender struct {
HTTPStatusCode int `json:"-"`
Error []fieldError `json:"error"`
DebugContext string `json:"debug-context,omitempty"`
}
func (e *errorRender) Render(w http.ResponseWriter, r *http.Request) error {
render.Status(r, e.HTTPStatusCode)
return nil
}
type fieldError struct {
Field string `json:"field"`
Path string `json:"path"`
Message string `json:"message"`
}
func messageOf(err error) string {
msg, ok := failure.MessageOf(err)
if ok {
return msg
}
return "error"
}
func debugContextOf(err error) string {
if DEBUG {
return fmt.Sprintf("%+v", err)
}
return ""
}
var DEBUG = false
func init() {
if v, err := strconv.ParseBool(os.Getenv("DEBUG")); err == nil {
DEBUG = v
}
// todo: fix
en := en.New()
uni := ut.New(en, en)
// this is usually know or extracted from http 'Accept-Language' header
// also see uni.FindTranslator(...)
var found bool
translator, found = uni.GetTranslator("en")
if !found {
panic("translator is not found")
}
validate = validator.New()
validate.RegisterTagNameFunc(func(fld reflect.StructField) string {
name := strings.SplitN(fld.Tag.Get("json"), ",", 2)[0]
if name == "-" {
return ""
}
return name
})
}
| [
"\"DEBUG\""
]
| []
| [
"DEBUG"
]
| [] | ["DEBUG"] | go | 1 | 0 | |
pubsub/rabbitpubsub/rabbit.go | // Copyright 2018 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rabbitpubsub
import (
"context"
"errors"
"fmt"
"net/url"
"os"
"path"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/streadway/amqp"
"github.com/hy9be/gocloud/gcerrors"
"github.com/hy9be/gocloud/pubsub"
"github.com/hy9be/gocloud/pubsub/driver"
)
func init() {
o := new(defaultDialer)
pubsub.DefaultURLMux().RegisterTopic(Scheme, o)
pubsub.DefaultURLMux().RegisterSubscription(Scheme, o)
}
// defaultDialer dials a default Rabbit server based on the environment
// variable "RABBIT_SERVER_URL".
type defaultDialer struct {
mu sync.Mutex
conn *amqp.Connection
opener *URLOpener
}
func (o *defaultDialer) defaultConn(ctx context.Context) (*URLOpener, error) {
o.mu.Lock()
defer o.mu.Unlock()
// Re-use the connection if possible.
if o.opener != nil && o.conn != nil && !o.conn.IsClosed() {
return o.opener, nil
}
// First time through, or last time resulted in an error, or connection
// was closed. Initialize the connection.
serverURL := os.Getenv("RABBIT_SERVER_URL")
if serverURL == "" {
return nil, errors.New("RABBIT_SERVER_URL environment variable not set")
}
conn, err := amqp.Dial(serverURL)
if err != nil {
return nil, fmt.Errorf("failed to dial RABBIT_SERVER_URL %q: %v", serverURL, err)
}
o.conn = conn
o.opener = &URLOpener{Connection: conn}
return o.opener, nil
}
func (o *defaultDialer) OpenTopicURL(ctx context.Context, u *url.URL) (*pubsub.Topic, error) {
opener, err := o.defaultConn(ctx)
if err != nil {
return nil, fmt.Errorf("open topic %v: failed to open default connection: %v", u, err)
}
return opener.OpenTopicURL(ctx, u)
}
func (o *defaultDialer) OpenSubscriptionURL(ctx context.Context, u *url.URL) (*pubsub.Subscription, error) {
opener, err := o.defaultConn(ctx)
if err != nil {
return nil, fmt.Errorf("open subscription %v: failed to open default connection: %v", u, err)
}
return opener.OpenSubscriptionURL(ctx, u)
}
// Scheme is the URL scheme rabbitpubsub registers its URLOpeners under on pubsub.DefaultMux.
const Scheme = "rabbit"
// URLOpener opens RabbitMQ URLs like "rabbit://myexchange" for
// topics or "rabbit://myqueue" for subscriptions.
//
// For topics, the URL's host+path is used as the exchange name.
//
// For subscriptions, the URL's host+path is used as the queue name.
//
// No query parameters are supported.
type URLOpener struct {
// Connection to use for communication with the server.
Connection *amqp.Connection
// TopicOptions specifies the options to pass to OpenTopic.
TopicOptions TopicOptions
// SubscriptionOptions specifies the options to pass to OpenSubscription.
SubscriptionOptions SubscriptionOptions
}
// OpenTopicURL opens a pubsub.Topic based on u.
func (o *URLOpener) OpenTopicURL(ctx context.Context, u *url.URL) (*pubsub.Topic, error) {
for param := range u.Query() {
return nil, fmt.Errorf("open topic %v: invalid query parameter %q", u, param)
}
exchangeName := path.Join(u.Host, u.Path)
return OpenTopic(o.Connection, exchangeName, &o.TopicOptions), nil
}
// OpenSubscriptionURL opens a pubsub.Subscription based on u.
func (o *URLOpener) OpenSubscriptionURL(ctx context.Context, u *url.URL) (*pubsub.Subscription, error) {
for param := range u.Query() {
return nil, fmt.Errorf("open subscription %v: invalid query parameter %q", u, param)
}
queueName := path.Join(u.Host, u.Path)
return OpenSubscription(o.Connection, queueName, &o.SubscriptionOptions), nil
}
type topic struct {
exchange string // the AMQP exchange
conn amqpConnection
mu sync.Mutex
ch amqpChannel // AMQP channel used for all communication.
pubc <-chan amqp.Confirmation // Go channel for server acks of publishes
retc <-chan amqp.Return // Go channel for "returned" undeliverable messages
closec <-chan *amqp.Error // Go channel for AMQP channel close notifications
}
// TopicOptions sets options for constructing a *pubsub.Topic backed by
// RabbitMQ.
type TopicOptions struct{}
// SubscriptionOptions sets options for constructing a *pubsub.Subscription
// backed by RabbitMQ.
type SubscriptionOptions struct{}
// OpenTopic returns a *pubsub.Topic corresponding to the named exchange.
// See the package documentation for an example.
//
// The exchange should already exist (for instance, by using
// amqp.Channel.ExchangeDeclare), although this won't be checked until the first call
// to SendBatch. For the Go CDK Pub/Sub model to make sense, the exchange should
// be a fanout exchange, although nothing in this package enforces that.
//
// OpenTopic uses the supplied amqp.Connection for all communication. It is the
// caller's responsibility to establish this connection before calling OpenTopic, and
// to close it when Close has been called on all Topics opened with it.
//
// The documentation of the amqp package recommends using separate connections for
// publishing and subscribing.
func OpenTopic(conn *amqp.Connection, name string, opts *TopicOptions) *pubsub.Topic {
return pubsub.NewTopic(newTopic(&connection{conn}, name), nil)
}
func newTopic(conn amqpConnection, name string) *topic {
return &topic{
conn: conn,
exchange: name,
}
}
// establishChannel creates an AMQP channel if necessary. According to the amqp
// package docs, once an error is returned from the channel, it must be discarded and
// a new one created.
//
// Must be called with t.mu held.
func (t *topic) establishChannel(ctx context.Context) error {
if t.ch != nil { // We already have a channel.
select {
// If it was closed, open a new one.
// (Ignore the error, if any.)
case <-t.closec:
// If it isn't closed, nothing to do.
default:
return nil
}
}
var ch amqpChannel
err := runWithContext(ctx, func() error {
// Create a new channel in confirm mode.
var err error
ch, err = t.conn.Channel()
return err
})
if err != nil {
return err
}
t.ch = ch
// Get Go channels which will hold acks and returns from the server. The server
// will send an ack for each published message to confirm that it was received.
// It will return undeliverable messages.
// All the Notify methods return their arg.
t.pubc = ch.NotifyPublish(make(chan amqp.Confirmation))
t.retc = ch.NotifyReturn(make(chan amqp.Return))
t.closec = ch.NotifyClose(make(chan *amqp.Error, 1)) // closec will get at most one element
return nil
}
// Run f while checking to see if ctx is done.
// Return the error from f if it completes, or ctx.Err() if ctx is done.
func runWithContext(ctx context.Context, f func() error) error {
c := make(chan error, 1) // buffer so the goroutine can finish even if ctx is done
go func() { c <- f() }()
select {
case <-ctx.Done():
return ctx.Err()
case err := <-c:
return err
}
}
// SendBatch implements driver.SendBatch.
func (t *topic) SendBatch(ctx context.Context, ms []*driver.Message) error {
// It is simplest to allow only one SendBatch at a time. Allowing concurrent
// calls to SendBatch would complicate the logic of receiving publish
// confirmations and returns. We can go that route if performance warrants it.
t.mu.Lock()
defer t.mu.Unlock()
if err := t.establishChannel(ctx); err != nil {
return err
}
// Receive from Go channels concurrently or we will deadlock with the Publish
// RPC. (The amqp package docs recommend setting the capacity of the Go channel
// to the number of messages to be published, but we can't do that because we
// want to reuse the channel for all calls to SendBatch--it takes two RPCs to set
// up.)
errc := make(chan error, 1)
cctx, cancel := context.WithCancel(ctx)
defer cancel()
ch := t.ch // Avoid touching t.ch while goroutine is running.
go func() {
// This goroutine runs with t.mu held because its lifetime is within the
// lifetime of the t.mu.Lock call at the start of SendBatch.
errc <- t.receiveFromPublishChannels(cctx, len(ms))
}()
var perr error
for _, m := range ms {
pub := toPublishing(m)
if m.BeforeSend != nil {
asFunc := func(i interface{}) bool {
if p, ok := i.(**amqp.Publishing); ok {
*p = &pub
return true
}
return false
}
if err := m.BeforeSend(asFunc); err != nil {
return err
}
}
if perr = ch.Publish(t.exchange, pub); perr != nil {
cancel()
break
}
if m.AfterSend != nil {
asFunc := func(i interface{}) bool { return false }
if err := m.AfterSend(asFunc); err != nil {
return err
}
}
}
// Wait for the goroutine to finish.
err := <-errc
// If we got an error from Publish, prefer that.
if perr != nil {
// Set t.ch to nil because an AMQP channel is broken after error.
// Do this here, after the goroutine has finished, rather than in the Publish loop
// above, to avoid a race condition.
t.ch = nil
err = perr
}
// If there is only one error, return it rather than a MultiError. That
// will work better with ErrorCode and ErrorAs.
if merr, ok := err.(MultiError); ok && len(merr) == 1 {
return merr[0]
}
return err
}
// Read from the channels established with NotifyPublish and NotifyReturn.
// Must be called with t.mu held.
func (t *topic) receiveFromPublishChannels(ctx context.Context, nMessages int) error {
// Consume all the acknowledgments for the messages we are publishing, and also
// get returned messages. The server will send exactly one ack for each published
// message (successful or not), and one return for each undeliverable message.
// Since SendBatch (the only caller of this method) holds the lock, we expect
// exactly as many acks as messages.
var merr MultiError
nAcks := 0
for nAcks < nMessages {
select {
case <-ctx.Done():
if t.ch != nil {
// Channel will be in a weird state (not all publish acks consumed, perhaps)
// so re-create it next time.
t.ch.Close()
t.ch = nil
}
return ctx.Err()
case ret, ok := <-t.retc:
if !ok {
// Channel closed. Handled in the pubc case below. But set
// the channel to nil to prevent it from being selected again.
t.retc = nil
} else {
// The message was returned from the server because it is unroutable.
// Record the error and continue so we drain all
// items from pubc. We don't need to re-establish the channel on this
// error.
merr = append(merr, fmt.Errorf("rabbitpubsub: message returned from %s: %s (code %d)",
ret.Exchange, ret.ReplyText, ret.ReplyCode))
}
case conf, ok := <-t.pubc:
if !ok {
// t.pubc was closed unexpectedly.
t.ch = nil // re-create the channel on next use
if merr != nil {
return merr
}
// t.closec must be closed too. See if it has an error.
if err := closeErr(t.closec); err != nil {
merr = append(merr, err)
return merr
}
// We shouldn't be here, but if we are, we still want to return an
// error.
merr = append(merr, errors.New("rabbitpubsub: publish listener closed unexpectedly"))
return merr
}
nAcks++
if !conf.Ack {
merr = append(merr, errors.New("rabbitpubsub: ack failed on publish"))
}
}
}
if merr != nil {
return merr
}
// Returning a nil merr would mean the returned error interface value is non-nil, so return nil explicitly.
return nil
}
// A MultiError is an error that contains multiple errors.
type MultiError []error
func (m MultiError) Error() string {
var s []string
for _, e := range m {
s = append(s, e.Error())
}
return strings.Join(s, "; ")
}
// Return the error from a Go channel monitoring the closing of an AMQP channel.
// closec must have been registered via Channel.NotifyClose.
// When closeErr is called, we expect closec to be closed. If it isn't, we also
// consider that an error.
func closeErr(closec <-chan *amqp.Error) error {
select {
case aerr := <-closec:
// This nil check is necessary. aerr is of type *amqp.Error. If we
// returned it directly (effectively assigning it to a variable of
// type error), then the return value would not be a nil interface
// value even if aerr was a nil pointer, and that would break tests
// like "if err == nil ...".
if aerr == nil {
return nil
}
return aerr
default:
return errors.New("rabbitpubsub: NotifyClose Go channel is unexpectedly open")
}
}
// toPublishing converts a driver.Message to an amqp.Publishing.
func toPublishing(m *driver.Message) amqp.Publishing {
h := amqp.Table{}
for k, v := range m.Metadata {
h[k] = v
}
return amqp.Publishing{
Headers: h,
Body: m.Body,
}
}
// IsRetryable implements driver.Topic.IsRetryable.
func (*topic) IsRetryable(err error) bool {
return isRetryable(err)
}
func (*topic) ErrorCode(err error) gcerrors.ErrorCode {
return errorCode(err)
}
var errorCodes = map[int]gcerrors.ErrorCode{
amqp.NotFound: gcerrors.NotFound,
amqp.PreconditionFailed: gcerrors.FailedPrecondition,
// These next indicate a bug in our driver, not the user's code.
amqp.SyntaxError: gcerrors.Internal,
amqp.CommandInvalid: gcerrors.Internal,
amqp.InternalError: gcerrors.Internal,
amqp.NotImplemented: gcerrors.Unimplemented,
amqp.ChannelError: gcerrors.FailedPrecondition, // typically channel closed
}
func errorCode(err error) gcerrors.ErrorCode {
aerr, ok := err.(*amqp.Error)
if !ok {
return gcerrors.Unknown
}
if ec, ok := errorCodes[aerr.Code]; ok {
return ec
}
return gcerrors.Unknown
}
func isRetryable(err error) bool {
aerr, ok := err.(*amqp.Error)
if !ok {
return false
}
// amqp.Error has a Recover field which sounds like it should mean "retryable".
// But it actually means "can be recovered by retrying later or with different
// parameters," which is not what we want. The error codes for which Recover is
// true, defined in the isSoftExceptionCode function of
// github.com/streadway/amqp/spec091.go, include things like NotFound and
// AccessRefused, which require outside action.
//
// The following are the codes which might be resolved by retry without external
// action, according to the AMQP 0.91 spec
// (https://www.rabbitmq.com/amqp-0-9-1-reference.html#constants). The quotations
// are from that page.
switch aerr.Code {
case amqp.ContentTooLarge:
// "The client attempted to transfer content larger than the server could
// accept at the present time. The client may retry at a later time."
return true
case amqp.ConnectionForced:
// "An operator intervened to close the connection for some reason. The
// client may retry at some later date."
return true
default:
return false
}
}
// As implements driver.Topic.As.
func (t *topic) As(i interface{}) bool {
c, ok := i.(**amqp.Connection)
if !ok {
return false
}
conn, ok := t.conn.(*connection)
if !ok { // running against the fake
return false
}
*c = conn.conn
return true
}
// ErrorAs implements driver.Topic.ErrorAs
func (*topic) ErrorAs(err error, i interface{}) bool {
return errorAs(err, i)
}
func errorAs(err error, i interface{}) bool {
switch e := err.(type) {
case *amqp.Error:
if p, ok := i.(**amqp.Error); ok {
*p = e
return true
}
case MultiError:
if p, ok := i.(*MultiError); ok {
*p = e
return true
}
}
return false
}
// Close implements driver.Topic.Close.
func (*topic) Close() error { return nil }
// OpenSubscription returns a *pubsub.Subscription corresponding to the named queue.
// See the package documentation for an example.
//
// The queue must have been previously created (for instance, by using
// amqp.Channel.QueueDeclare) and bound to an exchange.
//
// OpenSubscription uses the supplied amqp.Connection for all communication. It is
// the caller's responsibility to establish this connection before calling
// OpenSubscription and to close it when Close has been called on all Subscriptions
// opened with it.
//
// The documentation of the amqp package recommends using separate connections for
// publishing and subscribing.
func OpenSubscription(conn *amqp.Connection, name string, opts *SubscriptionOptions) *pubsub.Subscription {
return pubsub.NewSubscription(newSubscription(&connection{conn}, name), nil, nil)
}
type subscription struct {
conn amqpConnection
queue string // the AMQP queue name
consumer string // the client-generated name for this particular subscriber
mu sync.Mutex
ch amqpChannel // AMQP channel used for all communication.
delc <-chan amqp.Delivery
closec <-chan *amqp.Error
receiveBatchHook func() // for testing
}
var nextConsumer int64 // atomic
func newSubscription(conn amqpConnection, name string) *subscription {
return &subscription{
conn: conn,
queue: name,
consumer: fmt.Sprintf("c%d", atomic.AddInt64(&nextConsumer, 1)),
receiveBatchHook: func() {},
}
}
// Must be called with s.mu held.
func (s *subscription) establishChannel(ctx context.Context) error {
if s.ch != nil { // We already have a channel.
select {
// If it was closed, open a new one.
// (Ignore the error, if any.)
case <-s.closec:
// If it isn't closed, nothing to do.
default:
return nil
}
}
var ch amqpChannel
err := runWithContext(ctx, func() error {
// Create a new channel.
var err error
ch, err = s.conn.Channel()
if err != nil {
return err
}
// Subscribe to messages from the queue.
s.delc, err = ch.Consume(s.queue, s.consumer)
return err
})
if err != nil {
return err
}
s.ch = ch
s.closec = ch.NotifyClose(make(chan *amqp.Error, 1)) // closec will get at most one element
return nil
}
// ReceiveBatch implements driver.Subscription.ReceiveBatch.
func (s *subscription) ReceiveBatch(ctx context.Context, maxMessages int) ([]*driver.Message, error) {
s.mu.Lock()
defer s.mu.Unlock()
if err := s.establishChannel(ctx); err != nil {
return nil, err
}
s.receiveBatchHook()
// Get up to maxMessages waiting messages, but don't take too long.
var ms []*driver.Message
maxTime := time.NewTimer(50 * time.Millisecond)
for {
select {
case <-ctx.Done():
// Cancel the Consume.
_ = s.ch.Cancel(s.consumer) // ignore the error
s.ch = nil
return nil, ctx.Err()
case d, ok := <-s.delc:
if !ok { // channel closed
s.ch = nil // re-establish the channel next time
if len(ms) > 0 {
return ms, nil
}
// s.closec must be closed too. See if it has an error.
if err := closeErr(s.closec); err != nil {
// PreconditionFailed can happen if we send an Ack or Nack for a
// message that has already been acked/nacked. Ignore those errors.
if aerr, ok := err.(*amqp.Error); ok && aerr.Code == amqp.PreconditionFailed {
return nil, nil
}
return nil, err
}
// We shouldn't be here, but if we are, we still want to return an
// error.
return nil, errors.New("rabbitpubsub: delivery channel closed unexpectedly")
}
ms = append(ms, toMessage(d))
if len(ms) >= maxMessages {
return ms, nil
}
case <-maxTime.C:
// Timed out. Return whatever we have. If we have nothing, we'll get
// called again soon, but returning allows us to give up the lock in
// case there are acks/nacks to be sent.
return ms, nil
}
}
}
// toMessage converts an amqp.Delivery (a received message) to a driver.Message.
func toMessage(d amqp.Delivery) *driver.Message {
// Delivery.Headers is a map[string]interface{}, so we have to
// convert each value to a string.
md := map[string]string{}
for k, v := range d.Headers {
md[k] = fmt.Sprint(v)
}
loggableID := d.MessageId
if loggableID == "" {
loggableID = d.CorrelationId
}
if loggableID == "" {
loggableID = fmt.Sprintf("DeliveryTag %d", d.DeliveryTag)
}
return &driver.Message{
LoggableID: loggableID,
Body: d.Body,
AckID: d.DeliveryTag,
Metadata: md,
AsFunc: func(i interface{}) bool {
p, ok := i.(*amqp.Delivery)
if !ok {
return false
}
*p = d
return true
},
}
}
// SendAcks implements driver.Subscription.SendAcks.
func (s *subscription) SendAcks(ctx context.Context, ackIDs []driver.AckID) error {
return s.sendAcksOrNacks(ctx, ackIDs, true)
}
// CanNack implements driver.CanNack.
func (s *subscription) CanNack() bool { return true }
// SendNacks implements driver.Subscription.SendNacks.
func (s *subscription) SendNacks(ctx context.Context, ackIDs []driver.AckID) error {
return s.sendAcksOrNacks(ctx, ackIDs, false)
}
func (s *subscription) sendAcksOrNacks(ctx context.Context, ackIDs []driver.AckID, ack bool) error {
s.mu.Lock()
defer s.mu.Unlock()
if err := s.establishChannel(ctx); err != nil {
return err
}
// Ack/Nack calls don't wait for a response, so this loop should execute relatively
// quickly.
// It wouldn't help to make it concurrent, because Channel.Ack/Nack grabs a
// channel-wide mutex. (We could consider using multiple channels if performance
// becomes an issue.)
for _, id := range ackIDs {
if ctx.Err() != nil {
return ctx.Err()
}
var err error
if ack {
err = s.ch.Ack(id.(uint64))
} else {
err = s.ch.Nack(id.(uint64))
}
if err != nil {
s.ch = nil // re-establish channel after an error
return err
}
}
return nil
}
// IsRetryable implements driver.Subscription.IsRetryable.
func (*subscription) IsRetryable(err error) bool {
return isRetryable(err)
}
func (*subscription) ErrorCode(err error) gcerrors.ErrorCode {
return errorCode(err)
}
// As implements driver.Subscription.As.
func (s *subscription) As(i interface{}) bool {
c, ok := i.(**amqp.Connection)
if !ok {
return false
}
conn, ok := s.conn.(*connection)
if !ok { // running against the fake
return false
}
*c = conn.conn
return true
}
// ErrorAs implements driver.Subscription.ErrorAs
func (*subscription) ErrorAs(err error, i interface{}) bool {
return errorAs(err, i)
}
// Close implements driver.Subscription.Close.
func (*subscription) Close() error { return nil }
| [
"\"RABBIT_SERVER_URL\""
]
| []
| [
"RABBIT_SERVER_URL"
]
| [] | ["RABBIT_SERVER_URL"] | go | 1 | 0 | |
project/__init__.py | import os
from flask import Flask, Blueprint
from project.api.endpoints.restplus import api
def create_app(script_info=None):
app = Flask(__name__)
# app_settings = os.getenv("APP_SETTINGS")
# app.config.from_object(app_settings)
from project.api.endpoints.sentiment import ns as sentiment_blueprint
api_blueprint = Blueprint("api", __name__, url_prefix="/api/v1")
api.init_app(api_blueprint)
app.register_blueprint(api_blueprint)
api.add_namespace(sentiment_blueprint)
return app
| []
| []
| [
"APP_SETTINGS"
]
| [] | ["APP_SETTINGS"] | python | 1 | 0 | |
src/src/jdk/nashorn/tools/jjs/Console.java | /*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package jdk.nashorn.tools.jjs;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.PrintStream;
import java.io.Writer;
import java.nio.file.Files;
import java.util.function.Function;
import java.util.stream.Collectors;
import jdk.internal.jline.NoInterruptUnixTerminal;
import jdk.internal.jline.Terminal;
import jdk.internal.jline.TerminalFactory;
import jdk.internal.jline.TerminalFactory.Flavor;
import jdk.internal.jline.WindowsTerminal;
import jdk.internal.jline.console.ConsoleReader;
import jdk.internal.jline.console.KeyMap;
import jdk.internal.jline.console.completer.CandidateListCompletionHandler;
import jdk.internal.jline.extra.EditingHistory;
import jdk.internal.misc.Signal;
import jdk.internal.misc.Signal.Handler;
class Console implements AutoCloseable {
private static final String DOCUMENTATION_SHORTCUT = "\033\133\132"; //Shift-TAB
private final ConsoleReader in;
private final File historyFile;
Console(final InputStream cmdin, final PrintStream cmdout, final File historyFile,
final NashornCompleter completer, final Function<String, String> docHelper) throws IOException {
this.historyFile = historyFile;
TerminalFactory.registerFlavor(Flavor.WINDOWS, ttyDevice -> isCygwin() ? new JJSUnixTerminal() : new JJSWindowsTerminal());
TerminalFactory.registerFlavor(Flavor.UNIX, ttyDevice -> new JJSUnixTerminal());
in = new ConsoleReader(cmdin, cmdout);
in.setExpandEvents(false);
in.setHandleUserInterrupt(true);
in.setBellEnabled(true);
in.setCopyPasteDetection(true);
((CandidateListCompletionHandler) in.getCompletionHandler()).setPrintSpaceAfterFullCompletion(false);
final Iterable<String> existingHistory = historyFile.exists() ? Files.readAllLines(historyFile.toPath()) : null;
in.setHistory(new EditingHistory(in, existingHistory) {
@Override protected boolean isComplete(CharSequence input) {
return completer.isComplete(input.toString());
}
});
in.addCompleter(completer);
Runtime.getRuntime().addShutdownHook(new Thread((Runnable)this::saveHistory));
bind(DOCUMENTATION_SHORTCUT, (Runnable) ()->showDocumentation(docHelper));
try {
Signal.handle(new Signal("CONT"), new Handler() {
@Override public void handle(Signal sig) {
try {
in.getTerminal().reset();
in.redrawLine();
in.flush();
} catch (Exception ex) {
ex.printStackTrace();
}
}
});
} catch (IllegalArgumentException ignored) {
//the CONT signal does not exist on this platform
}
}
String readLine(final String prompt) throws IOException {
return in.readLine(prompt);
}
@Override
public void close() {
saveHistory();
}
private void saveHistory() {
try (Writer out = Files.newBufferedWriter(historyFile.toPath())) {
String lineSeparator = System.getProperty("line.separator");
out.write(getHistory().save()
.stream()
.collect(Collectors.joining(lineSeparator)));
} catch (final IOException exp) {}
}
EditingHistory getHistory() {
return (EditingHistory) in.getHistory();
}
boolean terminalEditorRunning() {
Terminal terminal = in.getTerminal();
if (terminal instanceof JJSUnixTerminal) {
return ((JJSUnixTerminal) terminal).isRaw();
}
return false;
}
void suspend() {
try {
in.getTerminal().restore();
} catch (Exception ex) {
throw new IllegalStateException(ex);
}
}
void resume() {
try {
in.getTerminal().init();
} catch (Exception ex) {
throw new IllegalStateException(ex);
}
}
static final class JJSUnixTerminal extends NoInterruptUnixTerminal {
JJSUnixTerminal() throws Exception {
}
boolean isRaw() {
try {
return getSettings().get("-a").contains("-icanon");
} catch (IOException | InterruptedException ex) {
return false;
}
}
@Override
public void disableInterruptCharacter() {
}
@Override
public void enableInterruptCharacter() {
}
}
static final class JJSWindowsTerminal extends WindowsTerminal {
public JJSWindowsTerminal() throws Exception {
}
@Override
public void init() throws Exception {
super.init();
setAnsiSupported(false);
}
}
private static boolean isCygwin() {
return System.getenv("SHELL") != null;
}
private void bind(String shortcut, Object action) {
KeyMap km = in.getKeys();
for (int i = 0; i < shortcut.length(); i++) {
final Object value = km.getBound(Character.toString(shortcut.charAt(i)));
if (value instanceof KeyMap) {
km = (KeyMap) value;
} else {
km.bind(shortcut.substring(i), action);
}
}
}
private void showDocumentation(final Function<String, String> docHelper) {
final String buffer = in.getCursorBuffer().buffer.toString();
final int cursor = in.getCursorBuffer().cursor;
final String doc = docHelper.apply(buffer.substring(0, cursor));
try {
if (doc != null) {
in.println();
in.println(doc);
in.redrawLine();
in.flush();
} else {
in.beep();
}
} catch (IOException ex) {
throw new IllegalStateException(ex);
}
}
}
| [
"\"SHELL\""
]
| []
| [
"SHELL"
]
| [] | ["SHELL"] | java | 1 | 0 | |
myconnectome/timeseries/mk_phenomewide_graph.py | """
mk graph showing relations between differen variables
"""
#import pydot
import networkx as nx
import re
import os,glob
import numpy
basedir=os.environ['MYCONNECTOME_DIR']
filter_negatives=False
exclude_metab=False
exclude_metab_metab=True
exclude_gene_gene=True
filter_gene_modules=False # only include first cluster in each module
thresh=0.05 #0.1000000001
degree_thresh=1
use_infomap=True
exclude_unenriched=False
exclude_classes=['fd','psoriasis','pindex','fullmetab','bwcorr']
exclude_pairs=['metab_metab','wgcna_wgcna']
def load_dataframe(filename,thresh=0.1):
# return p value, t stat, and correlation
f=open(filename)
header=f.readline()
lines=f.readlines()
f.close()
data={}
for l in lines:
l_fixed=[]
in_quotes=False
for i in range(len(l)):
if l[i]=='"':
if in_quotes:
in_quotes=False
else:
in_quotes=True
if l[i]==' ' and not in_quotes:
l_fixed.append('\t')
else:
l_fixed.append(l[i])
l_fixed=''.join(l_fixed).replace('"','')
l_s=[i.replace('"','') for i in l_fixed.strip().split('\t')]
try:
if float(l_s[-1])<thresh:
#print l_s
data[(l_s[1],l_s[2])]=[float(l_s[-1]),float(l_s[4]),float(l_s[3])]
except:
pass
return data
cluster_names=['ME%d'%int(i.strip().split()[0]) for i in open(os.path.join(basedir,'rna-seq/WGCNA/module_descriptions.txt')).readlines()]
cluster_terms=[' '.join(i.strip().split()[1:]) for i in open(os.path.join(basedir,'rna-seq/WGCNA/module_descriptions.txt')).readlines()]
cluster_dict={}
for i in range(len(cluster_names)):
cluster_dict[cluster_names[i]]=cluster_terms[i]
metab_names=['C%d'%i for i in range(1,16)]
metab_terms=[i.strip() for i in open(os.path.join(basedir,'metabolomics/apclust_descriptions.txt')).readlines()]
metab_dict={}
for i in range(len(metab_names)):
metab_dict[metab_names[i]]=metab_terms[i]
files_to_load=list(set(glob.glob(os.path.join(basedir,'timeseries/out*.txt'))))
power_network_names={-1:'none',0:'none',1:'DefaultMode',2:'Visual-II',3:'Fronto-parietal',4.5:'Visual-I',
5:'DorsalAttn-I',7:'VentralAttn',8:'Salience',9:'Cingulo-opercular',
10:'Somatomotor',11.5:'FPOther',15:'MedialParietal',16:'Parieto-occipital'}
node_shapes={'metab':'box','wgcna':'ellipse','food':'triangle','wincorr':'diamond','behav':'hexagon','falff':'invtriangle',
'netdat':'trapezium'}
node_classes={'metab':1,'wgcna':2,'food':3,'wincorr':4,'behav':5,'falff':6,'netdat':7,'bwcorr':8,'immport':9,'fullmetab':10,
'fd':11}
behav_terms={'panas.positive':'Positive mood','panas.negative':'Negative mood','panas.fatigue':'Fatigue','afterscan.Anxietyduringscan':'Anxiety during scan',
'afterscan.diastolic':'Diastolic BP after scan','afterscan.pulse':'Pulse after scan','afterscan.systolic':'Systolic BP after scan',
'morning.Sleepquality':'Self-rated sleep quality','morning.Soreness':'Soreness','prevevening.Alcohol':'Alcohol intake (previous evening)',
'prevevening.Guthealth':'Gut health (previous day)','prevevening.Psoriasisseverity':'Psoriasis severity (previous day)',
'prevevening.Stress':'Stress (previous day)', 'prevevening.Timespentoutdoors':'Time spent outdoors (previous day)',
'TuesThurs':'Thursday vs. Tuesday', 'temp.mean':'Mean daily temp',"email.LIWCcdi":'Email content-dynamic index',
"email.LIWCnegemo":'Email negative emotion',"email.LIWCposemo":'Email positive emotion','zeo.zq':'ZEO zq'}
data={}
graph = nx.Graph()
shell=[]
for i in range(1,8):
shell.append([])
for filename in files_to_load:
f=os.path.basename(filename).replace('out.dat.','').replace('.txt','')
data[f]=load_dataframe(filename,thresh)
if f in exclude_pairs:
print 'excluding',f
continue
datatypes=f.split('_')
if len(data[f])<1:
print 'no significant results for',f
continue
if 'netdat' in datatypes:
continue
for k in data[f].keys():
print k
if data[f][k][1]<0 and filter_negatives:
continue
ktuple=k
if datatypes[0] in exclude_classes or datatypes[1] in exclude_classes:
print 'excluding',f
continue
if filter_gene_modules and 'wgcna' in datatypes:
dt=[False,False]
if datatypes[0]=='wgcna':
dt[0]=True
if datatypes[1]=='wgcna':
dt[1]=True
k=[i.replace(',','').replace('"','') for i in list(k)]
print k
nodenames=[datatypes[0]+'-'+k[0],datatypes[1]+'-'+k[1]]
exclude=False
for x in range(2):
name=u'%s'%re.sub(r'[^\x00-\x7F]+',' ', nodenames[x]).replace('"','').replace('&','')
print name
if name.find('no enrichment')>-1 and exclude_unenriched:
exclude=True
nodelabel=''.join(name.split('-')[1:]).replace('"','').replace('_NIST','').split(':')[0]
print name,nodelabel
if datatypes[x]=='wincorr':
nodelabel=power_network_names[float(nodelabel)]
print 'wincorr:',nodelabel
if datatypes[x]=='wgcna':
nodelabel=cluster_dict[nodelabel]+' ('+nodelabel.replace('mod','').replace('clust','')+')'
if datatypes[x]=='metab':
nodelabel=metab_dict[nodelabel]+' ('+nodelabel+')'
if datatypes[x]=='behav':
nodelabel=behav_terms[nodelabel].replace(' (previous evening)','').replace(' (previous day)','')
if not graph.has_node(name):
graph.add_node(name)
graph.node[name]['label']=nodelabel
graph.node[name]['nodeclass']=node_classes[datatypes[x]]
print name,graph.node[name]
#shell[node_classes[datatypes[x]]-1].append(name)
nodenames[x]=name
if not exclude:
graph.add_edge(nodenames[0],nodenames[1],attr_dict={'pval':data[f][ktuple][0],'tval':data[f][ktuple][1],'rval':data[f][ktuple][2]})
print 'edge:',nodenames[0],nodenames[1]
degree=graph.degree()
for i in degree.iterkeys():
if degree[i]<1:
graph.remove_node(i)
#h=nx.hits(graph)[0]
#for k in h.iterkeys():
# graph.node[k]['hub']=h[k]
#for k in graph.obj_dict['nodes'].iterkeys():
# print graph.obj_dict['nodes'][k]
print 'writing graph...'
#graph.write_pdf('graph.pdf')
if filter_negatives:
filt='_posonly'
else:
filt=''
if exclude_metab:
filt=filt+'_nometab'
if degree_thresh>1:
filt=filt+'degree%d'%degree_thresh
for n in graph.nodes():
if graph.degree(n)<degree_thresh:
graph.remove_node(n)
cc=nx.connected_components(graph)
nodes_to_remove=[]
for component in cc:
if len(component)<3:
for node in component:
nodes_to_remove.append(node)
for node in nodes_to_remove:
graph.remove_node(node)
nx.write_graphml(graph,'/tmp/tmp.graphml')
import igraph
G=igraph.read('/tmp/tmp.graphml')
if use_infomap:
c=G.community_infomap()
infomap_ext='_infomap'
else:
c=G.community_multilevel()
infomap_ext='multilevel'
labels=c.membership
print 'modularity:',c.modularity
for i in range(len(G.vs)):
graph.node[G.vs[i]['id']]['module']=labels[i]
nx.write_gexf(graph,os.path.join(basedir,'timeseries/graph_thresh%.02f%s%s.gexf'%(thresh,filt,infomap_ext)))
nx.write_gml(graph,os.path.join(basedir,'timeseries/graph_thresh%.02f%s%s.gml'%(thresh,filt,infomap_ext)))
nx.write_graphml(graph,os.path.join(basedir,'timeseries/graph_thresh%.02f%s%s.graphml'%(thresh,filt,infomap_ext)))
for i in numpy.unique(labels):
print ''
print 'module',i
for n in graph.nodes():
if graph.node[n]['module']==i:
print n
| []
| []
| [
"MYCONNECTOME_DIR"
]
| [] | ["MYCONNECTOME_DIR"] | python | 1 | 0 | |
lib/controller/federation_test.go | // Copyright (C) The Arvados Authors. All rights reserved.
//
// SPDX-License-Identifier: AGPL-3.0
package controller
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/http/httptest"
"net/url"
"os"
"strings"
"time"
"git.arvados.org/arvados.git/sdk/go/arvados"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
"git.arvados.org/arvados.git/sdk/go/ctxlog"
"git.arvados.org/arvados.git/sdk/go/httpserver"
"git.arvados.org/arvados.git/sdk/go/keepclient"
"github.com/sirupsen/logrus"
check "gopkg.in/check.v1"
)
// Gocheck boilerplate
var _ = check.Suite(&FederationSuite{})
type FederationSuite struct {
log logrus.FieldLogger
// testServer and testHandler are the controller being tested,
// "zhome".
testServer *httpserver.Server
testHandler *Handler
// remoteServer ("zzzzz") forwards requests to the Rails API
// provided by the integration test environment.
remoteServer *httpserver.Server
// remoteMock ("zmock") appends each incoming request to
// remoteMockRequests, and returns 200 with an empty JSON
// object.
remoteMock *httpserver.Server
remoteMockRequests []http.Request
}
func (s *FederationSuite) SetUpTest(c *check.C) {
s.log = ctxlog.TestLogger(c)
s.remoteServer = newServerFromIntegrationTestEnv(c)
c.Assert(s.remoteServer.Start(), check.IsNil)
s.remoteMock = newServerFromIntegrationTestEnv(c)
s.remoteMock.Server.Handler = http.HandlerFunc(s.remoteMockHandler)
c.Assert(s.remoteMock.Start(), check.IsNil)
cluster := &arvados.Cluster{
ClusterID: "zhome",
PostgreSQL: integrationTestCluster().PostgreSQL,
}
cluster.TLS.Insecure = true
cluster.API.MaxItemsPerResponse = 1000
cluster.API.MaxRequestAmplification = 4
cluster.API.RequestTimeout = arvados.Duration(5 * time.Minute)
cluster.Collections.BlobSigning = true
cluster.Collections.BlobSigningKey = arvadostest.BlobSigningKey
cluster.Collections.BlobSigningTTL = arvados.Duration(time.Hour * 24 * 14)
arvadostest.SetServiceURL(&cluster.Services.RailsAPI, "http://localhost:1/")
arvadostest.SetServiceURL(&cluster.Services.Controller, "http://localhost:/")
s.testHandler = &Handler{Cluster: cluster}
s.testServer = newServerFromIntegrationTestEnv(c)
s.testServer.Server.BaseContext = func(net.Listener) context.Context {
return ctxlog.Context(context.Background(), s.log)
}
s.testServer.Server.Handler = httpserver.AddRequestIDs(httpserver.LogRequests(s.testHandler))
cluster.RemoteClusters = map[string]arvados.RemoteCluster{
"zzzzz": {
Host: s.remoteServer.Addr,
Proxy: true,
Scheme: "http",
},
"zmock": {
Host: s.remoteMock.Addr,
Proxy: true,
Scheme: "http",
},
"*": {
Scheme: "https",
},
}
c.Assert(s.testServer.Start(), check.IsNil)
s.remoteMockRequests = nil
}
func (s *FederationSuite) remoteMockHandler(w http.ResponseWriter, req *http.Request) {
b := &bytes.Buffer{}
io.Copy(b, req.Body)
req.Body.Close()
req.Body = ioutil.NopCloser(b)
s.remoteMockRequests = append(s.remoteMockRequests, *req)
// Repond 200 with a valid JSON object
fmt.Fprint(w, "{}")
}
func (s *FederationSuite) TearDownTest(c *check.C) {
if s.remoteServer != nil {
s.remoteServer.Close()
}
if s.testServer != nil {
s.testServer.Close()
}
}
func (s *FederationSuite) testRequest(req *http.Request) *httptest.ResponseRecorder {
resp := httptest.NewRecorder()
s.testServer.Server.Handler.ServeHTTP(resp, req)
return resp
}
func (s *FederationSuite) TestLocalRequest(c *check.C) {
req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+strings.Replace(arvadostest.WorkflowWithDefinitionYAMLUUID, "zzzzz-", "zhome-", 1), nil)
resp := s.testRequest(req).Result()
s.checkHandledLocally(c, resp)
}
func (s *FederationSuite) checkHandledLocally(c *check.C, resp *http.Response) {
// Our "home" controller can't handle local requests because
// it doesn't have its own stub/test Rails API, so we rely on
// "connection refused" to indicate the controller tried to
// proxy the request to its local Rails API.
c.Check(resp.StatusCode, check.Equals, http.StatusBadGateway)
s.checkJSONErrorMatches(c, resp, `.*connection refused`)
}
func (s *FederationSuite) TestNoAuth(c *check.C) {
req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+arvadostest.WorkflowWithDefinitionYAMLUUID, nil)
resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusUnauthorized)
s.checkJSONErrorMatches(c, resp, `Not logged in.*`)
}
func (s *FederationSuite) TestBadAuth(c *check.C) {
req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+arvadostest.WorkflowWithDefinitionYAMLUUID, nil)
req.Header.Set("Authorization", "Bearer aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusUnauthorized)
s.checkJSONErrorMatches(c, resp, `Not logged in.*`)
}
func (s *FederationSuite) TestNoAccess(c *check.C) {
req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+arvadostest.WorkflowWithDefinitionYAMLUUID, nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.SpectatorToken)
resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusNotFound)
s.checkJSONErrorMatches(c, resp, `.*not found.*`)
}
func (s *FederationSuite) TestGetUnknownRemote(c *check.C) {
req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+strings.Replace(arvadostest.WorkflowWithDefinitionYAMLUUID, "zzzzz-", "zz404-", 1), nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusNotFound)
s.checkJSONErrorMatches(c, resp, `.*no proxy available for cluster zz404`)
}
func (s *FederationSuite) TestRemoteError(c *check.C) {
rc := s.testHandler.Cluster.RemoteClusters["zzzzz"]
rc.Scheme = "https"
s.testHandler.Cluster.RemoteClusters["zzzzz"] = rc
req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+arvadostest.WorkflowWithDefinitionYAMLUUID, nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusBadGateway)
s.checkJSONErrorMatches(c, resp, `.*HTTP response to HTTPS client`)
}
func (s *FederationSuite) TestGetRemoteWorkflow(c *check.C) {
req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+arvadostest.WorkflowWithDefinitionYAMLUUID, nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
var wf arvados.Workflow
c.Check(json.NewDecoder(resp.Body).Decode(&wf), check.IsNil)
c.Check(wf.UUID, check.Equals, arvadostest.WorkflowWithDefinitionYAMLUUID)
c.Check(wf.OwnerUUID, check.Equals, arvadostest.ActiveUserUUID)
}
func (s *FederationSuite) TestOptionsMethod(c *check.C) {
req := httptest.NewRequest("OPTIONS", "/arvados/v1/workflows/"+arvadostest.WorkflowWithDefinitionYAMLUUID, nil)
req.Header.Set("Origin", "https://example.com")
resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
body, err := ioutil.ReadAll(resp.Body)
c.Check(err, check.IsNil)
c.Check(string(body), check.Equals, "")
c.Check(resp.Header.Get("Access-Control-Allow-Origin"), check.Equals, "*")
for _, hdr := range []string{"Authorization", "Content-Type"} {
c.Check(resp.Header.Get("Access-Control-Allow-Headers"), check.Matches, ".*"+hdr+".*")
}
for _, method := range []string{"GET", "HEAD", "PUT", "POST", "DELETE"} {
c.Check(resp.Header.Get("Access-Control-Allow-Methods"), check.Matches, ".*"+method+".*")
}
}
func (s *FederationSuite) TestRemoteWithTokenInQuery(c *check.C) {
req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+strings.Replace(arvadostest.WorkflowWithDefinitionYAMLUUID, "zzzzz-", "zmock-", 1)+"?api_token="+arvadostest.ActiveToken, nil)
s.testRequest(req).Result()
c.Assert(s.remoteMockRequests, check.HasLen, 1)
pr := s.remoteMockRequests[0]
// Token is salted and moved from query to Authorization header.
c.Check(pr.URL.String(), check.Not(check.Matches), `.*api_token=.*`)
c.Check(pr.Header.Get("Authorization"), check.Equals, "Bearer v2/zzzzz-gj3su-077z32aux8dg2s1/7fd31b61f39c0e82a4155592163218272cedacdc")
}
func (s *FederationSuite) TestLocalTokenSalted(c *check.C) {
defer s.localServiceReturns404(c).Close()
for _, path := range []string{
// During the transition to the strongly typed
// controller implementation (#14287), workflows and
// collections test different code paths.
"/arvados/v1/workflows/" + strings.Replace(arvadostest.WorkflowWithDefinitionYAMLUUID, "zzzzz-", "zmock-", 1),
"/arvados/v1/collections/" + strings.Replace(arvadostest.UserAgreementCollection, "zzzzz-", "zmock-", 1),
} {
c.Log("testing path ", path)
s.remoteMockRequests = nil
req := httptest.NewRequest("GET", path, nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
s.testRequest(req).Result()
c.Assert(s.remoteMockRequests, check.HasLen, 1)
pr := s.remoteMockRequests[0]
// The salted token here has a "zzzzz-" UUID instead of a
// "ztest-" UUID because ztest's local database has the
// "zzzzz-" test fixtures. The "secret" part is HMAC(sha1,
// arvadostest.ActiveToken, "zmock") = "7fd3...".
c.Check(pr.Header.Get("Authorization"), check.Equals, "Bearer v2/zzzzz-gj3su-077z32aux8dg2s1/7fd31b61f39c0e82a4155592163218272cedacdc")
}
}
func (s *FederationSuite) TestRemoteTokenNotSalted(c *check.C) {
defer s.localServiceReturns404(c).Close()
// remoteToken can be any v1 token that doesn't appear in
// ztest's local db.
remoteToken := "abcdef00000000000000000000000000000000000000000000"
for _, path := range []string{
// During the transition to the strongly typed
// controller implementation (#14287), workflows and
// collections test different code paths.
"/arvados/v1/workflows/" + strings.Replace(arvadostest.WorkflowWithDefinitionYAMLUUID, "zzzzz-", "zmock-", 1),
"/arvados/v1/collections/" + strings.Replace(arvadostest.UserAgreementCollection, "zzzzz-", "zmock-", 1),
} {
c.Log("testing path ", path)
s.remoteMockRequests = nil
req := httptest.NewRequest("GET", path, nil)
req.Header.Set("Authorization", "Bearer "+remoteToken)
s.testRequest(req).Result()
c.Assert(s.remoteMockRequests, check.HasLen, 1)
pr := s.remoteMockRequests[0]
c.Check(pr.Header.Get("Authorization"), check.Equals, "Bearer "+remoteToken)
}
}
func (s *FederationSuite) TestWorkflowCRUD(c *check.C) {
var wf arvados.Workflow
{
req := httptest.NewRequest("POST", "/arvados/v1/workflows", strings.NewReader(url.Values{
"workflow": {`{"description": "TestCRUD"}`},
}.Encode()))
req.Header.Set("Content-type", "application/x-www-form-urlencoded")
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
rec := httptest.NewRecorder()
s.remoteServer.Server.Handler.ServeHTTP(rec, req) // direct to remote -- can't proxy a create req because no uuid
resp := rec.Result()
s.checkResponseOK(c, resp)
json.NewDecoder(resp.Body).Decode(&wf)
defer func() {
req := httptest.NewRequest("DELETE", "/arvados/v1/workflows/"+wf.UUID, nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
s.remoteServer.Server.Handler.ServeHTTP(httptest.NewRecorder(), req)
}()
c.Check(wf.UUID, check.Not(check.Equals), "")
c.Assert(wf.ModifiedAt, check.NotNil)
c.Logf("wf.ModifiedAt: %v", wf.ModifiedAt)
c.Check(time.Since(*wf.ModifiedAt) < time.Minute, check.Equals, true)
}
for _, method := range []string{"PATCH", "PUT", "POST"} {
form := url.Values{
"workflow": {`{"description": "Updated with ` + method + `"}`},
}
if method == "POST" {
form["_method"] = []string{"PATCH"}
}
req := httptest.NewRequest(method, "/arvados/v1/workflows/"+wf.UUID, strings.NewReader(form.Encode()))
req.Header.Set("Content-type", "application/x-www-form-urlencoded")
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
resp := s.testRequest(req).Result()
s.checkResponseOK(c, resp)
err := json.NewDecoder(resp.Body).Decode(&wf)
c.Check(err, check.IsNil)
c.Check(wf.Description, check.Equals, "Updated with "+method)
}
{
req := httptest.NewRequest("DELETE", "/arvados/v1/workflows/"+wf.UUID, nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
resp := s.testRequest(req).Result()
s.checkResponseOK(c, resp)
err := json.NewDecoder(resp.Body).Decode(&wf)
c.Check(err, check.IsNil)
}
{
req := httptest.NewRequest("GET", "/arvados/v1/workflows/"+wf.UUID, nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusNotFound)
}
}
func (s *FederationSuite) checkResponseOK(c *check.C, resp *http.Response) {
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
if resp.StatusCode != http.StatusOK {
body, err := ioutil.ReadAll(resp.Body)
c.Logf("... response body = %q, %v\n", body, err)
}
}
func (s *FederationSuite) checkJSONErrorMatches(c *check.C, resp *http.Response, re string) {
var jresp httpserver.ErrorResponse
err := json.NewDecoder(resp.Body).Decode(&jresp)
c.Check(err, check.IsNil)
c.Assert(jresp.Errors, check.HasLen, 1)
c.Check(jresp.Errors[0], check.Matches, re)
}
func (s *FederationSuite) localServiceHandler(c *check.C, h http.Handler) *httpserver.Server {
srv := &httpserver.Server{
Server: http.Server{
Handler: h,
},
}
c.Assert(srv.Start(), check.IsNil)
arvadostest.SetServiceURL(&s.testHandler.Cluster.Services.RailsAPI, "http://"+srv.Addr)
return srv
}
func (s *FederationSuite) localServiceReturns404(c *check.C) *httpserver.Server {
return s.localServiceHandler(c, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
if req.URL.Path == "/arvados/v1/api_client_authorizations/current" {
if req.Header.Get("Authorization") == "Bearer "+arvadostest.ActiveToken {
json.NewEncoder(w).Encode(arvados.APIClientAuthorization{UUID: arvadostest.ActiveTokenUUID, APIToken: arvadostest.ActiveToken, Scopes: []string{"all"}})
} else {
w.WriteHeader(http.StatusUnauthorized)
}
} else if req.URL.Path == "/arvados/v1/users/current" {
if req.Header.Get("Authorization") == "Bearer "+arvadostest.ActiveToken {
json.NewEncoder(w).Encode(arvados.User{UUID: arvadostest.ActiveUserUUID})
} else {
w.WriteHeader(http.StatusUnauthorized)
}
} else {
w.WriteHeader(404)
}
}))
}
func (s *FederationSuite) TestGetLocalCollection(c *check.C) {
s.testHandler.Cluster.ClusterID = "zzzzz"
arvadostest.SetServiceURL(&s.testHandler.Cluster.Services.RailsAPI, "https://"+os.Getenv("ARVADOS_TEST_API_HOST"))
// HTTP GET
req := httptest.NewRequest("GET", "/arvados/v1/collections/"+arvadostest.UserAgreementCollection, nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
var col arvados.Collection
c.Check(json.NewDecoder(resp.Body).Decode(&col), check.IsNil)
c.Check(col.UUID, check.Equals, arvadostest.UserAgreementCollection)
c.Check(col.ManifestText, check.Matches,
`\. 6a4ff0499484c6c79c95cd8c566bd25f\+249025\+A[0-9a-f]{40}@[0-9a-f]{8} 0:249025:GNU_General_Public_License,_version_3.pdf
`)
// HTTP POST with _method=GET as a form parameter
req = httptest.NewRequest("POST", "/arvados/v1/collections/"+arvadostest.UserAgreementCollection, bytes.NewBufferString((url.Values{
"_method": {"GET"},
}).Encode()))
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=UTF-8")
resp = s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
col = arvados.Collection{}
c.Check(json.NewDecoder(resp.Body).Decode(&col), check.IsNil)
c.Check(col.UUID, check.Equals, arvadostest.UserAgreementCollection)
c.Check(col.ManifestText, check.Matches,
`\. 6a4ff0499484c6c79c95cd8c566bd25f\+249025\+A[0-9a-f]{40}@[0-9a-f]{8} 0:249025:GNU_General_Public_License,_version_3.pdf
`)
}
func (s *FederationSuite) TestGetRemoteCollection(c *check.C) {
defer s.localServiceReturns404(c).Close()
req := httptest.NewRequest("GET", "/arvados/v1/collections/"+arvadostest.UserAgreementCollection, nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
var col arvados.Collection
c.Check(json.NewDecoder(resp.Body).Decode(&col), check.IsNil)
c.Check(col.UUID, check.Equals, arvadostest.UserAgreementCollection)
c.Check(col.ManifestText, check.Matches,
`\. 6a4ff0499484c6c79c95cd8c566bd25f\+249025\+Rzzzzz-[0-9a-f]{40}@[0-9a-f]{8} 0:249025:GNU_General_Public_License,_version_3.pdf
`)
}
func (s *FederationSuite) TestGetRemoteCollectionError(c *check.C) {
defer s.localServiceReturns404(c).Close()
req := httptest.NewRequest("GET", "/arvados/v1/collections/zzzzz-4zz18-fakefakefakefak", nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusNotFound)
}
func (s *FederationSuite) TestSignedLocatorPattern(c *check.C) {
// Confirm the regular expression identifies other groups of hints correctly
c.Check(keepclient.SignedLocatorRe.FindStringSubmatch(`6a4ff0499484c6c79c95cd8c566bd25f+249025+B1+C2+A05227438989d04712ea9ca1c91b556cef01d5cc7@5ba5405b+D3+E4`),
check.DeepEquals,
[]string{"6a4ff0499484c6c79c95cd8c566bd25f+249025+B1+C2+A05227438989d04712ea9ca1c91b556cef01d5cc7@5ba5405b+D3+E4",
"6a4ff0499484c6c79c95cd8c566bd25f",
"+249025",
"+B1+C2", "+C2",
"+A05227438989d04712ea9ca1c91b556cef01d5cc7@5ba5405b",
"05227438989d04712ea9ca1c91b556cef01d5cc7", "5ba5405b",
"+D3+E4", "+E4"})
}
func (s *FederationSuite) TestGetLocalCollectionByPDH(c *check.C) {
arvadostest.SetServiceURL(&s.testHandler.Cluster.Services.RailsAPI, "https://"+os.Getenv("ARVADOS_TEST_API_HOST"))
req := httptest.NewRequest("GET", "/arvados/v1/collections/"+arvadostest.UserAgreementPDH, nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
var col arvados.Collection
c.Check(json.NewDecoder(resp.Body).Decode(&col), check.IsNil)
c.Check(col.PortableDataHash, check.Equals, arvadostest.UserAgreementPDH)
c.Check(col.ManifestText, check.Matches,
`\. 6a4ff0499484c6c79c95cd8c566bd25f\+249025\+A[0-9a-f]{40}@[0-9a-f]{8} 0:249025:GNU_General_Public_License,_version_3.pdf
`)
}
func (s *FederationSuite) TestGetRemoteCollectionByPDH(c *check.C) {
defer s.localServiceReturns404(c).Close()
req := httptest.NewRequest("GET", "/arvados/v1/collections/"+arvadostest.UserAgreementPDH, nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
var col arvados.Collection
c.Check(json.NewDecoder(resp.Body).Decode(&col), check.IsNil)
c.Check(col.PortableDataHash, check.Equals, arvadostest.UserAgreementPDH)
c.Check(col.ManifestText, check.Matches,
`\. 6a4ff0499484c6c79c95cd8c566bd25f\+249025\+Rzzzzz-[0-9a-f]{40}@[0-9a-f]{8} 0:249025:GNU_General_Public_License,_version_3.pdf
`)
}
func (s *FederationSuite) TestGetCollectionByPDHError(c *check.C) {
defer s.localServiceReturns404(c).Close()
// zmock's normal response (200 with an empty body) would
// change the outcome from 404 to 502
delete(s.testHandler.Cluster.RemoteClusters, "zmock")
req := httptest.NewRequest("GET", "/arvados/v1/collections/99999999999999999999999999999999+99", nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
resp := s.testRequest(req).Result()
defer resp.Body.Close()
c.Check(resp.StatusCode, check.Equals, http.StatusNotFound)
}
func (s *FederationSuite) TestGetCollectionByPDHErrorBadHash(c *check.C) {
defer s.localServiceReturns404(c).Close()
// zmock's normal response (200 with an empty body) would
// change the outcome
delete(s.testHandler.Cluster.RemoteClusters, "zmock")
srv2 := &httpserver.Server{
Server: http.Server{
Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
w.WriteHeader(200)
// Return a collection where the hash
// of the manifest text doesn't match
// PDH that was requested.
var col arvados.Collection
col.PortableDataHash = "99999999999999999999999999999999+99"
col.ManifestText = `. 6a4ff0499484c6c79c95cd8c566bd25f\+249025 0:249025:GNU_General_Public_License,_version_3.pdf
`
enc := json.NewEncoder(w)
enc.Encode(col)
}),
},
}
c.Assert(srv2.Start(), check.IsNil)
defer srv2.Close()
// Direct zzzzz to service that returns a 200 result with a bogus manifest_text
s.testHandler.Cluster.RemoteClusters["zzzzz"] = arvados.RemoteCluster{
Host: srv2.Addr,
Proxy: true,
Scheme: "http",
}
req := httptest.NewRequest("GET", "/arvados/v1/collections/99999999999999999999999999999999+99", nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
resp := s.testRequest(req).Result()
defer resp.Body.Close()
c.Check(resp.StatusCode, check.Equals, http.StatusBadGateway)
}
func (s *FederationSuite) TestSaltedTokenGetCollectionByPDH(c *check.C) {
arvadostest.SetServiceURL(&s.testHandler.Cluster.Services.RailsAPI, "https://"+os.Getenv("ARVADOS_TEST_API_HOST"))
req := httptest.NewRequest("GET", "/arvados/v1/collections/"+arvadostest.UserAgreementPDH, nil)
req.Header.Set("Authorization", "Bearer v2/zzzzz-gj3su-077z32aux8dg2s1/282d7d172b6cfdce364c5ed12ddf7417b2d00065")
resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
var col arvados.Collection
c.Check(json.NewDecoder(resp.Body).Decode(&col), check.IsNil)
c.Check(col.PortableDataHash, check.Equals, arvadostest.UserAgreementPDH)
c.Check(col.ManifestText, check.Matches,
`\. 6a4ff0499484c6c79c95cd8c566bd25f\+249025\+A[0-9a-f]{40}@[0-9a-f]{8} 0:249025:GNU_General_Public_License,_version_3.pdf
`)
}
func (s *FederationSuite) TestSaltedTokenGetCollectionByPDHError(c *check.C) {
arvadostest.SetServiceURL(&s.testHandler.Cluster.Services.RailsAPI, "https://"+os.Getenv("ARVADOS_TEST_API_HOST"))
// zmock's normal response (200 with an empty body) would
// change the outcome
delete(s.testHandler.Cluster.RemoteClusters, "zmock")
req := httptest.NewRequest("GET", "/arvados/v1/collections/99999999999999999999999999999999+99", nil)
req.Header.Set("Authorization", "Bearer v2/zzzzz-gj3su-077z32aux8dg2s1/282d7d172b6cfdce364c5ed12ddf7417b2d00065")
resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusNotFound)
}
func (s *FederationSuite) TestGetRemoteContainerRequest(c *check.C) {
defer s.localServiceReturns404(c).Close()
req := httptest.NewRequest("GET", "/arvados/v1/container_requests/"+arvadostest.QueuedContainerRequestUUID, nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
var cr arvados.ContainerRequest
c.Check(json.NewDecoder(resp.Body).Decode(&cr), check.IsNil)
c.Check(cr.UUID, check.Equals, arvadostest.QueuedContainerRequestUUID)
c.Check(cr.Priority, check.Equals, 1)
}
func (s *FederationSuite) TestUpdateRemoteContainerRequest(c *check.C) {
defer s.localServiceReturns404(c).Close()
setPri := func(pri int) {
req := httptest.NewRequest("PATCH", "/arvados/v1/container_requests/"+arvadostest.QueuedContainerRequestUUID,
strings.NewReader(fmt.Sprintf(`{"container_request": {"priority": %d}}`, pri)))
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
req.Header.Set("Content-type", "application/json")
resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
var cr arvados.ContainerRequest
c.Check(json.NewDecoder(resp.Body).Decode(&cr), check.IsNil)
c.Check(cr.UUID, check.Equals, arvadostest.QueuedContainerRequestUUID)
c.Check(cr.Priority, check.Equals, pri)
}
setPri(696)
setPri(1) // Reset fixture so side effect doesn't break other tests.
}
func (s *FederationSuite) TestCreateContainerRequestBadToken(c *check.C) {
defer s.localServiceReturns404(c).Close()
// pass cluster_id via query parameter, this allows arvados-controller
// to avoid parsing the body
req := httptest.NewRequest("POST", "/arvados/v1/container_requests?cluster_id=zzzzz",
strings.NewReader(`{"container_request":{}}`))
req.Header.Set("Authorization", "Bearer abcdefg")
req.Header.Set("Content-type", "application/json")
resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusForbidden)
var e map[string][]string
c.Check(json.NewDecoder(resp.Body).Decode(&e), check.IsNil)
c.Check(e["errors"], check.DeepEquals, []string{"invalid API token"})
}
func (s *FederationSuite) TestCreateRemoteContainerRequest(c *check.C) {
defer s.localServiceReturns404(c).Close()
// pass cluster_id via query parameter, this allows arvados-controller
// to avoid parsing the body
req := httptest.NewRequest("POST", "/arvados/v1/container_requests?cluster_id=zzzzz",
strings.NewReader(`{
"container_request": {
"name": "hello world",
"state": "Uncommitted",
"output_path": "/",
"container_image": "123",
"command": ["abc"]
}
}
`))
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
req.Header.Set("Content-type", "application/json")
resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
var cr arvados.ContainerRequest
c.Check(json.NewDecoder(resp.Body).Decode(&cr), check.IsNil)
c.Check(cr.Name, check.Equals, "hello world")
c.Check(strings.HasPrefix(cr.UUID, "zzzzz-"), check.Equals, true)
}
// getCRfromMockRequest returns a ContainerRequest with the content of the
// request sent to the remote mock. This function takes into account the
// Content-Type and acts accordingly.
func (s *FederationSuite) getCRfromMockRequest(c *check.C) arvados.ContainerRequest {
// Body can be a json formated or something like:
// cluster_id=zmock&container_request=%7B%22command%22%3A%5B%22abc%22%5D%2C%22container_image%22%3A%22123%22%2C%22...7D
// or:
// "{\"container_request\":{\"command\":[\"abc\"],\"container_image\":\"12...Uncommitted\"}}"
var cr arvados.ContainerRequest
data, err := ioutil.ReadAll(s.remoteMockRequests[0].Body)
c.Check(err, check.IsNil)
if s.remoteMockRequests[0].Header.Get("Content-Type") == "application/json" {
// legacy code path sends a JSON request body
var answerCR struct {
ContainerRequest arvados.ContainerRequest `json:"container_request"`
}
c.Check(json.Unmarshal(data, &answerCR), check.IsNil)
cr = answerCR.ContainerRequest
} else if s.remoteMockRequests[0].Header.Get("Content-Type") == "application/x-www-form-urlencoded" {
// new code path sends a form-encoded request body with a JSON-encoded parameter value
decodedValue, err := url.ParseQuery(string(data))
c.Check(err, check.IsNil)
decodedValueCR := decodedValue.Get("container_request")
c.Check(json.Unmarshal([]byte(decodedValueCR), &cr), check.IsNil)
} else {
// mock needs to have Content-Type that we can parse.
c.Fail()
}
return cr
}
func (s *FederationSuite) TestCreateRemoteContainerRequestCheckRuntimeToken(c *check.C) {
// Send request to zmock and check that outgoing request has
// runtime_token set with a new random v2 token.
defer s.localServiceReturns404(c).Close()
req := httptest.NewRequest("POST", "/arvados/v1/container_requests?cluster_id=zmock",
strings.NewReader(`{
"container_request": {
"name": "hello world",
"state": "Uncommitted",
"output_path": "/",
"container_image": "123",
"command": ["abc"]
}
}
`))
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveTokenV2)
req.Header.Set("Content-type", "application/json")
// We replace zhome with zzzzz values (RailsAPI, ClusterID, SystemRootToken)
// SystemRoot token is needed because we check the
// https://[RailsAPI]/arvados/v1/api_client_authorizations/current
// https://[RailsAPI]/arvados/v1/users/current and
// https://[RailsAPI]/auth/controller/callback
arvadostest.SetServiceURL(&s.testHandler.Cluster.Services.RailsAPI, "https://"+os.Getenv("ARVADOS_TEST_API_HOST"))
s.testHandler.Cluster.ClusterID = "zzzzz"
s.testHandler.Cluster.SystemRootToken = arvadostest.SystemRootToken
s.testHandler.Cluster.API.MaxTokenLifetime = arvados.Duration(time.Hour)
resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
cr := s.getCRfromMockRequest(c)
// Runtime token must match zzzzz cluster
c.Check(cr.RuntimeToken, check.Matches, "v2/zzzzz-gj3su-.*")
// RuntimeToken must be different than the Original Token we originally did the request with.
c.Check(cr.RuntimeToken, check.Not(check.Equals), arvadostest.ActiveTokenV2)
// Runtime token should not have an expiration based on API.MaxTokenLifetime
req2 := httptest.NewRequest("GET", "/arvados/v1/api_client_authorizations/current", nil)
req2.Header.Set("Authorization", "Bearer "+cr.RuntimeToken)
req2.Header.Set("Content-type", "application/json")
resp = s.testRequest(req2).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
var aca arvados.APIClientAuthorization
c.Check(json.NewDecoder(resp.Body).Decode(&aca), check.IsNil)
c.Check(aca.ExpiresAt, check.NotNil) // Time.Now()+BlobSigningTTL
t, _ := time.Parse(time.RFC3339Nano, aca.ExpiresAt)
c.Check(t.After(time.Now().Add(s.testHandler.Cluster.API.MaxTokenLifetime.Duration())), check.Equals, true)
c.Check(t.Before(time.Now().Add(s.testHandler.Cluster.Collections.BlobSigningTTL.Duration())), check.Equals, true)
}
func (s *FederationSuite) TestCreateRemoteContainerRequestCheckSetRuntimeToken(c *check.C) {
// Send request to zmock and check that outgoing request has
// runtime_token set with the explicitly provided token.
defer s.localServiceReturns404(c).Close()
// pass cluster_id via query parameter, this allows arvados-controller
// to avoid parsing the body
req := httptest.NewRequest("POST", "/arvados/v1/container_requests?cluster_id=zmock",
strings.NewReader(`{
"container_request": {
"name": "hello world",
"state": "Uncommitted",
"output_path": "/",
"container_image": "123",
"command": ["abc"],
"runtime_token": "xyz"
}
}
`))
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
req.Header.Set("Content-type", "application/json")
resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
cr := s.getCRfromMockRequest(c)
// After mocking around now making sure the runtime_token we sent is still there.
c.Check(cr.RuntimeToken, check.Equals, "xyz")
}
func (s *FederationSuite) TestCreateRemoteContainerRequestError(c *check.C) {
defer s.localServiceReturns404(c).Close()
// pass cluster_id via query parameter, this allows arvados-controller
// to avoid parsing the body
req := httptest.NewRequest("POST", "/arvados/v1/container_requests?cluster_id=zz404",
strings.NewReader(`{
"container_request": {
"name": "hello world",
"state": "Uncommitted",
"output_path": "/",
"container_image": "123",
"command": ["abc"]
}
}
`))
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
req.Header.Set("Content-type", "application/json")
resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusNotFound)
}
func (s *FederationSuite) TestGetRemoteContainer(c *check.C) {
defer s.localServiceReturns404(c).Close()
req := httptest.NewRequest("GET", "/arvados/v1/containers/"+arvadostest.QueuedContainerUUID, nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
resp := s.testRequest(req)
c.Check(resp.Code, check.Equals, http.StatusOK)
var cn arvados.Container
c.Check(json.NewDecoder(resp.Body).Decode(&cn), check.IsNil)
c.Check(cn.UUID, check.Equals, arvadostest.QueuedContainerUUID)
}
func (s *FederationSuite) TestListRemoteContainer(c *check.C) {
defer s.localServiceReturns404(c).Close()
req := httptest.NewRequest("GET", "/arvados/v1/containers?count=none&filters="+
url.QueryEscape(fmt.Sprintf(`[["uuid", "in", ["%v"]]]`, arvadostest.QueuedContainerUUID)), nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
var cn arvados.ContainerList
c.Check(json.NewDecoder(resp.Body).Decode(&cn), check.IsNil)
c.Assert(cn.Items, check.HasLen, 1)
c.Check(cn.Items[0].UUID, check.Equals, arvadostest.QueuedContainerUUID)
}
func (s *FederationSuite) TestListMultiRemoteContainers(c *check.C) {
defer s.localServiceHandler(c, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
bd, _ := ioutil.ReadAll(req.Body)
c.Check(string(bd), check.Equals, `_method=GET&count=none&filters=%5B%5B%22uuid%22%2C+%22in%22%2C+%5B%22zhome-xvhdp-cr5queuedcontnr%22%5D%5D%5D&select=%5B%22uuid%22%2C+%22command%22%5D`)
w.WriteHeader(200)
w.Write([]byte(`{"kind": "arvados#containerList", "items": [{"uuid": "zhome-xvhdp-cr5queuedcontnr", "command": ["abc"]}]}`))
})).Close()
req := httptest.NewRequest("GET", fmt.Sprintf("/arvados/v1/containers?count=none&filters=%s&select=%s",
url.QueryEscape(fmt.Sprintf(`[["uuid", "in", ["%v", "zhome-xvhdp-cr5queuedcontnr"]]]`,
arvadostest.QueuedContainerUUID)),
url.QueryEscape(`["uuid", "command"]`)),
nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
var cn arvados.ContainerList
c.Check(json.NewDecoder(resp.Body).Decode(&cn), check.IsNil)
c.Check(cn.Items, check.HasLen, 2)
mp := make(map[string]arvados.Container)
for _, cr := range cn.Items {
mp[cr.UUID] = cr
}
c.Check(mp[arvadostest.QueuedContainerUUID].Command, check.DeepEquals, []string{"echo", "hello"})
c.Check(mp[arvadostest.QueuedContainerUUID].ContainerImage, check.Equals, "")
c.Check(mp["zhome-xvhdp-cr5queuedcontnr"].Command, check.DeepEquals, []string{"abc"})
c.Check(mp["zhome-xvhdp-cr5queuedcontnr"].ContainerImage, check.Equals, "")
}
func (s *FederationSuite) TestListMultiRemoteContainerError(c *check.C) {
defer s.localServiceReturns404(c).Close()
req := httptest.NewRequest("GET", fmt.Sprintf("/arvados/v1/containers?count=none&filters=%s&select=%s",
url.QueryEscape(fmt.Sprintf(`[["uuid", "in", ["%v", "zhome-xvhdp-cr5queuedcontnr"]]]`,
arvadostest.QueuedContainerUUID)),
url.QueryEscape(`["uuid", "command"]`)),
nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusBadGateway)
s.checkJSONErrorMatches(c, resp, `error fetching from zhome \(404 Not Found\): EOF`)
}
func (s *FederationSuite) TestListMultiRemoteContainersPaged(c *check.C) {
callCount := 0
defer s.localServiceHandler(c, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
bd, _ := ioutil.ReadAll(req.Body)
if callCount == 0 {
c.Check(string(bd), check.Equals, `_method=GET&count=none&filters=%5B%5B%22uuid%22%2C+%22in%22%2C+%5B%22zhome-xvhdp-cr5queuedcontnr%22%2C%22zhome-xvhdp-cr6queuedcontnr%22%5D%5D%5D`)
w.WriteHeader(200)
w.Write([]byte(`{"kind": "arvados#containerList", "items": [{"uuid": "zhome-xvhdp-cr5queuedcontnr", "command": ["abc"]}]}`))
} else if callCount == 1 {
c.Check(string(bd), check.Equals, `_method=GET&count=none&filters=%5B%5B%22uuid%22%2C+%22in%22%2C+%5B%22zhome-xvhdp-cr6queuedcontnr%22%5D%5D%5D`)
w.WriteHeader(200)
w.Write([]byte(`{"kind": "arvados#containerList", "items": [{"uuid": "zhome-xvhdp-cr6queuedcontnr", "command": ["efg"]}]}`))
}
callCount++
})).Close()
req := httptest.NewRequest("GET", fmt.Sprintf("/arvados/v1/containers?count=none&filters=%s",
url.QueryEscape(fmt.Sprintf(`[["uuid", "in", ["%v", "zhome-xvhdp-cr5queuedcontnr", "zhome-xvhdp-cr6queuedcontnr"]]]`,
arvadostest.QueuedContainerUUID))),
nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
c.Check(callCount, check.Equals, 2)
var cn arvados.ContainerList
c.Check(json.NewDecoder(resp.Body).Decode(&cn), check.IsNil)
c.Check(cn.Items, check.HasLen, 3)
mp := make(map[string]arvados.Container)
for _, cr := range cn.Items {
mp[cr.UUID] = cr
}
c.Check(mp[arvadostest.QueuedContainerUUID].Command, check.DeepEquals, []string{"echo", "hello"})
c.Check(mp["zhome-xvhdp-cr5queuedcontnr"].Command, check.DeepEquals, []string{"abc"})
c.Check(mp["zhome-xvhdp-cr6queuedcontnr"].Command, check.DeepEquals, []string{"efg"})
}
func (s *FederationSuite) TestListMultiRemoteContainersMissing(c *check.C) {
callCount := 0
defer s.localServiceHandler(c, http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
bd, _ := ioutil.ReadAll(req.Body)
if callCount == 0 {
c.Check(string(bd), check.Equals, `_method=GET&count=none&filters=%5B%5B%22uuid%22%2C+%22in%22%2C+%5B%22zhome-xvhdp-cr5queuedcontnr%22%2C%22zhome-xvhdp-cr6queuedcontnr%22%5D%5D%5D`)
w.WriteHeader(200)
w.Write([]byte(`{"kind": "arvados#containerList", "items": [{"uuid": "zhome-xvhdp-cr6queuedcontnr", "command": ["efg"]}]}`))
} else if callCount == 1 {
c.Check(string(bd), check.Equals, `_method=GET&count=none&filters=%5B%5B%22uuid%22%2C+%22in%22%2C+%5B%22zhome-xvhdp-cr5queuedcontnr%22%5D%5D%5D`)
w.WriteHeader(200)
w.Write([]byte(`{"kind": "arvados#containerList", "items": []}`))
}
callCount++
})).Close()
req := httptest.NewRequest("GET", fmt.Sprintf("/arvados/v1/containers?count=none&filters=%s",
url.QueryEscape(fmt.Sprintf(`[["uuid", "in", ["%v", "zhome-xvhdp-cr5queuedcontnr", "zhome-xvhdp-cr6queuedcontnr"]]]`,
arvadostest.QueuedContainerUUID))),
nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusOK)
c.Check(callCount, check.Equals, 2)
var cn arvados.ContainerList
c.Check(json.NewDecoder(resp.Body).Decode(&cn), check.IsNil)
c.Check(cn.Items, check.HasLen, 2)
mp := make(map[string]arvados.Container)
for _, cr := range cn.Items {
mp[cr.UUID] = cr
}
c.Check(mp[arvadostest.QueuedContainerUUID].Command, check.DeepEquals, []string{"echo", "hello"})
c.Check(mp["zhome-xvhdp-cr6queuedcontnr"].Command, check.DeepEquals, []string{"efg"})
}
func (s *FederationSuite) TestListMultiRemoteContainerPageSizeError(c *check.C) {
s.testHandler.Cluster.API.MaxItemsPerResponse = 1
req := httptest.NewRequest("GET", fmt.Sprintf("/arvados/v1/containers?count=none&filters=%s",
url.QueryEscape(fmt.Sprintf(`[["uuid", "in", ["%v", "zhome-xvhdp-cr5queuedcontnr"]]]`,
arvadostest.QueuedContainerUUID))),
nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusBadRequest)
s.checkJSONErrorMatches(c, resp, `Federated multi-object request for 2 objects which is more than max page size 1.`)
}
func (s *FederationSuite) TestListMultiRemoteContainerLimitError(c *check.C) {
req := httptest.NewRequest("GET", fmt.Sprintf("/arvados/v1/containers?count=none&filters=%s&limit=1",
url.QueryEscape(fmt.Sprintf(`[["uuid", "in", ["%v", "zhome-xvhdp-cr5queuedcontnr"]]]`,
arvadostest.QueuedContainerUUID))),
nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusBadRequest)
s.checkJSONErrorMatches(c, resp, `Federated multi-object may not provide 'limit', 'offset' or 'order'.`)
}
func (s *FederationSuite) TestListMultiRemoteContainerOffsetError(c *check.C) {
req := httptest.NewRequest("GET", fmt.Sprintf("/arvados/v1/containers?count=none&filters=%s&offset=1",
url.QueryEscape(fmt.Sprintf(`[["uuid", "in", ["%v", "zhome-xvhdp-cr5queuedcontnr"]]]`,
arvadostest.QueuedContainerUUID))),
nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusBadRequest)
s.checkJSONErrorMatches(c, resp, `Federated multi-object may not provide 'limit', 'offset' or 'order'.`)
}
func (s *FederationSuite) TestListMultiRemoteContainerOrderError(c *check.C) {
req := httptest.NewRequest("GET", fmt.Sprintf("/arvados/v1/containers?count=none&filters=%s&order=uuid",
url.QueryEscape(fmt.Sprintf(`[["uuid", "in", ["%v", "zhome-xvhdp-cr5queuedcontnr"]]]`,
arvadostest.QueuedContainerUUID))),
nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusBadRequest)
s.checkJSONErrorMatches(c, resp, `Federated multi-object may not provide 'limit', 'offset' or 'order'.`)
}
func (s *FederationSuite) TestListMultiRemoteContainerSelectError(c *check.C) {
req := httptest.NewRequest("GET", fmt.Sprintf("/arvados/v1/containers?count=none&filters=%s&select=%s",
url.QueryEscape(fmt.Sprintf(`[["uuid", "in", ["%v", "zhome-xvhdp-cr5queuedcontnr"]]]`,
arvadostest.QueuedContainerUUID)),
url.QueryEscape(`["command"]`)),
nil)
req.Header.Set("Authorization", "Bearer "+arvadostest.ActiveToken)
resp := s.testRequest(req).Result()
c.Check(resp.StatusCode, check.Equals, http.StatusBadRequest)
s.checkJSONErrorMatches(c, resp, `Federated multi-object request must include 'uuid' in 'select'`)
}
| [
"\"ARVADOS_TEST_API_HOST\"",
"\"ARVADOS_TEST_API_HOST\"",
"\"ARVADOS_TEST_API_HOST\"",
"\"ARVADOS_TEST_API_HOST\"",
"\"ARVADOS_TEST_API_HOST\""
]
| []
| [
"ARVADOS_TEST_API_HOST"
]
| [] | ["ARVADOS_TEST_API_HOST"] | go | 1 | 0 | |
app.py | from flask import Flask, render_template, request, url_for
from flask_mail import Mail, Message
from itsdangerous import URLSafeSerializer
from threading import Thread
from flask_sqlalchemy import SQLAlchemy
import random
import os
app = Flask(__name__)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
app.config.update(
DEBUG=True,
# EMAIL SETTINGS
MAIL_SERVER='smtp.gmail.com',
MAIL_PORT=465,
MAIL_USE_SSL=True,
MAIL_USERNAME='[email protected]',
MAIL_PASSWORD=os.environ['EMAIL_PASSWORD'],
SECRET_KEY=os.environ['SPECIAL_KEY'],
MAIL_MAX_EMAILS=1000
)
mail = Mail(app) # Declares a Mail instance
s = URLSafeSerializer(app.config['SECRET_KEY']) # Safe Serializer instance used for unique link gen, obfuscates email
def generate_token(email): # Function to generate token for unique URL
token = s.dumps(email, salt='email-confirm')
return token
def send_thread_email(msg): # Email function to send asynchronous emails (better performance)
with app.app_context():
mail.send(msg)
ENV = 'prod'
if ENV == 'dev':
app.debug = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:g4qtyx7v@localhost/test_db'
else:
app.debug = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgres://zyyzysejezblhz:17a351947912f2433f7d4ca45121650d224b002543e633d521e57d4c4bb6d874@ec2-174-129-253-63.compute-1.amazonaws.com:5432/ddui50dco58tad'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
class SecretSanta(db.Model):
"""An object representing a person in a Secret Santa group.
Attributes:
member: The person's name (gift giver)
email: Their email
wishlist: The gift recipient's wishlist
partner: The gift recipient
"""
__tablename__ = 'secretsanta'
id = db.Column(db.Integer, primary_key=True)
member = db.Column(db.String(200))
email = db.Column(db.String(200), unique=True)
wishlist = db.Column(db.Text())
partner = db.Column(db.String(200))
def __init__(self, member, email, partner):
self.member = member
self.email = email
self.partner = partner
# Takes a list of unique emails and generates a list of pairs
def generate_pairings(emails):
f = {} # dict containing name:group
for i, line in enumerate(emails):
group = line.strip().split(" ")
f.update({p: i for p in group})
names = list(f.keys())
while True:
# Shuffle the list until valid
random.shuffle(names)
assignments = {a: b for a, b in zip(names, names[1:] + [names[0]])}
# List is valid
if all([f[a] != f[b] for a, b in assignments.items()]):
break
pairs = [None]*len(names)
for a, b in assignments.items():
pairs[f[a]] = b
return pairs
@app.route('/')
def index():
return render_template('index.html')
@app.route('/submit', methods=['POST']) # run when user submits their info
def submit():
if request.method == 'POST':
member = request.form.getlist('member')
email = request.form.getlist('email')
# if len(email) >= len(set(email)):
# return render_template('index.html', message="Please don't enter duplicate emails")
pair = generate_pairings(email)
print(member, email, pair)
for ii in range(len(member)):
if member[ii] == '' or email[ii] == '':
return render_template('index.html', message='Please ensure all fields are entered')
else:
if db.session.query(SecretSanta).filter(SecretSanta.email == email[ii]).count() == 0:
data = SecretSanta(
member=member[ii], email=email[ii], partner=pair[ii])
db.session.add(data)
token = generate_token(email[ii]) # Token for unique URL
link = url_for('wishlist', token=token, _external=True) # Unique URL that routes user to wishlist
msg = Message('Hello from Optimal Secret Santa!',
sender='[email protected]',
recipients=[email[ii]])
msg.body = F"Hi {member[ii]},\n\nGreetings from the North Pole!\n\nYou have been added to a Secret Santa group created on optimal-secret-santa.herokuapp.com.\n\nPlease use the below link to fill out the wishlist/message you would like to send your Secret Santa.\n\nLink:{link}\n\nHappy Holidays!\n\nSincerely,\nOptimalSecretSanta"
thr = Thread(target=send_thread_email, args=[msg]) # Create a thread for asynchronous emailing, this prevents web hangs
thr.start()
else:
return render_template('index.html', message='A user with this email is already a part of Secret Santa')
db.session.commit()
return render_template('success.html')
@app.route('/wishlist/')
@app.route('/wishlist/<userid>')
def wishlist(userid=None):
return render_template('wishlist.html', userid=userid)
@app.route('/return', methods=['POST'])
def wish_submit():
if request.method == 'POST':
partner = str(request.form.get('partner'))
wlist = str(request.form.get('wishlist'))
result = SecretSanta.query.filter_by(partner=partner).first()
result.email
partner_email = SecretSanta.query.filter_by(email=partner).first()
partner_name = str(partner_email.member)
msg = Message('Your Secret Santa Assignment is in!', # Generates email message for assignment email
sender='[email protected]',
recipients=[result.email])
msg.body = F"Hi {result.member},\n\nYou have been assigned as the Secret Santa for {partner_name}. Their wishlist is included below: \n\n{wlist}\n\nHappy Holidays!\n\nSincerely,\nOptimalSecretSanta"
thr = Thread(target=send_thread_email, args=[msg])
thr.start() # Send email asynchronously in background
return render_template('success_wishlist.html')
if __name__ == '__main__':
app.run(debug=True)
| []
| []
| [
"SPECIAL_KEY",
"EMAIL_PASSWORD"
]
| [] | ["SPECIAL_KEY", "EMAIL_PASSWORD"] | python | 2 | 0 | |
daemon/daemon.go | // Package daemon exposes the functions that occur on the host server
// that the Docker daemon is running.
//
// In implementing the various functions of the daemon, there is often
// a method-specific struct for configuring the runtime behavior.
package daemon
import (
"encoding/json"
"fmt"
"io/ioutil"
"net"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"sync"
"time"
"github.com/Sirupsen/logrus"
containerd "github.com/docker/containerd/api/grpc/types"
"github.com/docker/docker/api"
"github.com/docker/docker/api/types"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/container"
"github.com/docker/docker/daemon/events"
"github.com/docker/docker/daemon/exec"
"github.com/docker/docker/daemon/initlayer"
"github.com/docker/docker/dockerversion"
"github.com/docker/docker/plugin"
"github.com/docker/libnetwork/cluster"
// register graph drivers
_ "github.com/docker/docker/daemon/graphdriver/register"
dmetadata "github.com/docker/docker/distribution/metadata"
"github.com/docker/docker/distribution/xfer"
"github.com/docker/docker/image"
"github.com/docker/docker/layer"
"github.com/docker/docker/libcontainerd"
"github.com/docker/docker/migrate/v1"
"github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/plugingetter"
"github.com/docker/docker/pkg/registrar"
"github.com/docker/docker/pkg/signal"
"github.com/docker/docker/pkg/sysinfo"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/pkg/truncindex"
"github.com/docker/docker/reference"
"github.com/docker/docker/registry"
"github.com/docker/docker/runconfig"
volumedrivers "github.com/docker/docker/volume/drivers"
"github.com/docker/docker/volume/local"
"github.com/docker/docker/volume/store"
"github.com/docker/libnetwork"
nwconfig "github.com/docker/libnetwork/config"
"github.com/docker/libtrust"
"github.com/pkg/errors"
)
var (
// DefaultRuntimeBinary is the default runtime to be used by
// containerd if none is specified
DefaultRuntimeBinary = "docker-runc"
// DefaultInitBinary is the name of the default init binary
DefaultInitBinary = "docker-init"
errSystemNotSupported = fmt.Errorf("The Docker daemon is not supported on this platform.")
)
// Daemon holds information about the Docker daemon.
type Daemon struct {
ID string
repository string
containers container.Store
execCommands *exec.Store
referenceStore reference.Store
downloadManager *xfer.LayerDownloadManager
uploadManager *xfer.LayerUploadManager
distributionMetadataStore dmetadata.Store
trustKey libtrust.PrivateKey
idIndex *truncindex.TruncIndex
configStore *Config
statsCollector *statsCollector
defaultLogConfig containertypes.LogConfig
RegistryService registry.Service
EventsService *events.Events
netController libnetwork.NetworkController
volumes *store.VolumeStore
discoveryWatcher discoveryReloader
root string
seccompEnabled bool
shutdown bool
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
layerStore layer.Store
imageStore image.Store
PluginStore *plugin.Store // todo: remove
pluginManager *plugin.Manager
nameIndex *registrar.Registrar
linkIndex *linkIndex
containerd libcontainerd.Client
containerdRemote libcontainerd.Remote
defaultIsolation containertypes.Isolation // Default isolation mode on Windows
clusterProvider cluster.Provider
cluster Cluster
seccompProfile []byte
seccompProfilePath string
}
// HasExperimental returns whether the experimental features of the daemon are enabled or not
func (daemon *Daemon) HasExperimental() bool {
if daemon.configStore != nil && daemon.configStore.Experimental {
return true
}
return false
}
func (daemon *Daemon) restore() error {
var (
currentDriver = daemon.GraphDriverName()
containers = make(map[string]*container.Container)
)
logrus.Info("Loading containers: start.")
dir, err := ioutil.ReadDir(daemon.repository)
if err != nil {
return err
}
for _, v := range dir {
id := v.Name()
container, err := daemon.load(id)
if err != nil {
logrus.Errorf("Failed to load container %v: %v", id, err)
continue
}
// Ignore the container if it does not support the current driver being used by the graph
if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver {
rwlayer, err := daemon.layerStore.GetRWLayer(container.ID)
if err != nil {
logrus.Errorf("Failed to load container mount %v: %v", id, err)
continue
}
container.RWLayer = rwlayer
logrus.Debugf("Loaded container %v", container.ID)
containers[container.ID] = container
} else {
logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID)
}
}
removeContainers := make(map[string]*container.Container)
restartContainers := make(map[*container.Container]chan struct{})
activeSandboxes := make(map[string]interface{})
for id, c := range containers {
if err := daemon.registerName(c); err != nil {
logrus.Errorf("Failed to register container %s: %s", c.ID, err)
delete(containers, id)
continue
}
if err := daemon.Register(c); err != nil {
logrus.Errorf("Failed to register container %s: %s", c.ID, err)
delete(containers, id)
continue
}
// verify that all volumes valid and have been migrated from the pre-1.7 layout
if err := daemon.verifyVolumesInfo(c); err != nil {
// don't skip the container due to error
logrus.Errorf("Failed to verify volumes for container '%s': %v", c.ID, err)
}
// The LogConfig.Type is empty if the container was created before docker 1.12 with default log driver.
// We should rewrite it to use the daemon defaults.
// Fixes https://github.com/docker/docker/issues/22536
if c.HostConfig.LogConfig.Type == "" {
if err := daemon.mergeAndVerifyLogConfig(&c.HostConfig.LogConfig); err != nil {
logrus.Errorf("Failed to verify log config for container %s: %q", c.ID, err)
continue
}
}
}
var migrateLegacyLinks bool // Not relevant on Windows
var wg sync.WaitGroup
var mapLock sync.Mutex
for _, c := range containers {
wg.Add(1)
go func(c *container.Container) {
defer wg.Done()
if err := backportMountSpec(c); err != nil {
logrus.Error("Failed to migrate old mounts to use new spec format")
}
if c.IsRunning() || c.IsPaused() {
c.RestartManager().Cancel() // manually start containers because some need to wait for swarm networking
if err := daemon.containerd.Restore(c.ID, c.InitializeStdio); err != nil {
logrus.Errorf("Failed to restore %s with containerd: %s", c.ID, err)
return
}
// we call Mount and then Unmount to get BaseFs of the container
if err := daemon.Mount(c); err != nil {
// The mount is unlikely to fail. However, in case mount fails
// the container should be allowed to restore here. Some functionalities
// (like docker exec -u user) might be missing but container is able to be
// stopped/restarted/removed.
// See #29365 for related information.
// The error is only logged here.
logrus.Warnf("Failed to mount container on getting BaseFs path %v: %v", c.ID, err)
} else {
// if mount success, then unmount it
if err := daemon.Unmount(c); err != nil {
logrus.Warnf("Failed to umount container on getting BaseFs path %v: %v", c.ID, err)
}
}
c.ResetRestartManager(false)
if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() {
options, err := daemon.buildSandboxOptions(c)
if err != nil {
logrus.Warnf("Failed build sandbox option to restore container %s: %v", c.ID, err)
}
mapLock.Lock()
activeSandboxes[c.NetworkSettings.SandboxID] = options
mapLock.Unlock()
}
}
// fixme: only if not running
// get list of containers we need to restart
if !c.IsRunning() && !c.IsPaused() {
// Do not autostart containers which
// has endpoints in a swarm scope
// network yet since the cluster is
// not initialized yet. We will start
// it after the cluster is
// initialized.
if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint {
mapLock.Lock()
restartContainers[c] = make(chan struct{})
mapLock.Unlock()
} else if c.HostConfig != nil && c.HostConfig.AutoRemove {
mapLock.Lock()
removeContainers[c.ID] = c
mapLock.Unlock()
}
}
if c.RemovalInProgress {
// We probably crashed in the middle of a removal, reset
// the flag.
//
// We DO NOT remove the container here as we do not
// know if the user had requested for either the
// associated volumes, network links or both to also
// be removed. So we put the container in the "dead"
// state and leave further processing up to them.
logrus.Debugf("Resetting RemovalInProgress flag from %v", c.ID)
c.ResetRemovalInProgress()
c.SetDead()
c.ToDisk()
}
// if c.hostConfig.Links is nil (not just empty), then it is using the old sqlite links and needs to be migrated
if c.HostConfig != nil && c.HostConfig.Links == nil {
migrateLegacyLinks = true
}
}(c)
}
wg.Wait()
daemon.netController, err = daemon.initNetworkController(daemon.configStore, activeSandboxes)
if err != nil {
return fmt.Errorf("Error initializing network controller: %v", err)
}
// Perform migration of legacy sqlite links (no-op on Windows)
if migrateLegacyLinks {
if err := daemon.sqliteMigration(containers); err != nil {
return err
}
}
// Now that all the containers are registered, register the links
for _, c := range containers {
if err := daemon.registerLinks(c, c.HostConfig); err != nil {
logrus.Errorf("failed to register link for container %s: %v", c.ID, err)
}
}
group := sync.WaitGroup{}
for c, notifier := range restartContainers {
group.Add(1)
go func(c *container.Container, chNotify chan struct{}) {
defer group.Done()
logrus.Debugf("Starting container %s", c.ID)
// ignore errors here as this is a best effort to wait for children to be
// running before we try to start the container
children := daemon.children(c)
timeout := time.After(5 * time.Second)
for _, child := range children {
if notifier, exists := restartContainers[child]; exists {
select {
case <-notifier:
case <-timeout:
}
}
}
// Make sure networks are available before starting
daemon.waitForNetworks(c)
if err := daemon.containerStart(c, "", "", true); err != nil {
logrus.Errorf("Failed to start container %s: %s", c.ID, err)
}
close(chNotify)
}(c, notifier)
}
group.Wait()
removeGroup := sync.WaitGroup{}
for id := range removeContainers {
removeGroup.Add(1)
go func(cid string) {
if err := daemon.ContainerRm(cid, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
logrus.Errorf("Failed to remove container %s: %s", cid, err)
}
removeGroup.Done()
}(id)
}
removeGroup.Wait()
// any containers that were started above would already have had this done,
// however we need to now prepare the mountpoints for the rest of the containers as well.
// This shouldn't cause any issue running on the containers that already had this run.
// This must be run after any containers with a restart policy so that containerized plugins
// can have a chance to be running before we try to initialize them.
for _, c := range containers {
// if the container has restart policy, do not
// prepare the mountpoints since it has been done on restarting.
// This is to speed up the daemon start when a restart container
// has a volume and the volume dirver is not available.
if _, ok := restartContainers[c]; ok {
continue
} else if _, ok := removeContainers[c.ID]; ok {
// container is automatically removed, skip it.
continue
}
group.Add(1)
go func(c *container.Container) {
defer group.Done()
if err := daemon.prepareMountPoints(c); err != nil {
logrus.Error(err)
}
}(c)
}
group.Wait()
logrus.Info("Loading containers: done.")
return nil
}
// RestartSwarmContainers restarts any autostart container which has a
// swarm endpoint.
func (daemon *Daemon) RestartSwarmContainers() {
group := sync.WaitGroup{}
for _, c := range daemon.List() {
if !c.IsRunning() && !c.IsPaused() {
// Autostart all the containers which has a
// swarm endpoint now that the cluster is
// initialized.
if daemon.configStore.AutoRestart && c.ShouldRestart() && c.NetworkSettings.HasSwarmEndpoint {
group.Add(1)
go func(c *container.Container) {
defer group.Done()
if err := daemon.containerStart(c, "", "", true); err != nil {
logrus.Error(err)
}
}(c)
}
}
}
group.Wait()
}
// waitForNetworks is used during daemon initialization when starting up containers
// It ensures that all of a container's networks are available before the daemon tries to start the container.
// In practice it just makes sure the discovery service is available for containers which use a network that require discovery.
func (daemon *Daemon) waitForNetworks(c *container.Container) {
if daemon.discoveryWatcher == nil {
return
}
// Make sure if the container has a network that requires discovery that the discovery service is available before starting
for netName := range c.NetworkSettings.Networks {
// If we get `ErrNoSuchNetwork` here, we can assume that it is due to discovery not being ready
// Most likely this is because the K/V store used for discovery is in a container and needs to be started
if _, err := daemon.netController.NetworkByName(netName); err != nil {
if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok {
continue
}
// use a longish timeout here due to some slowdowns in libnetwork if the k/v store is on anything other than --net=host
// FIXME: why is this slow???
logrus.Debugf("Container %s waiting for network to be ready", c.Name)
select {
case <-daemon.discoveryWatcher.ReadyCh():
case <-time.After(60 * time.Second):
}
return
}
}
}
func (daemon *Daemon) children(c *container.Container) map[string]*container.Container {
return daemon.linkIndex.children(c)
}
// parents returns the names of the parent containers of the container
// with the given name.
func (daemon *Daemon) parents(c *container.Container) map[string]*container.Container {
return daemon.linkIndex.parents(c)
}
func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error {
fullName := path.Join(parent.Name, alias)
if err := daemon.nameIndex.Reserve(fullName, child.ID); err != nil {
if err == registrar.ErrNameReserved {
logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err)
return nil
}
return err
}
daemon.linkIndex.link(parent, child, fullName)
return nil
}
// SetClusterProvider sets a component for querying the current cluster state.
func (daemon *Daemon) SetClusterProvider(clusterProvider cluster.Provider) {
daemon.clusterProvider = clusterProvider
daemon.netController.SetClusterProvider(clusterProvider)
}
// IsSwarmCompatible verifies if the current daemon
// configuration is compatible with the swarm mode
func (daemon *Daemon) IsSwarmCompatible() error {
if daemon.configStore == nil {
return nil
}
return daemon.configStore.isSwarmCompatible()
}
// NewDaemon sets up everything for the daemon to be able to service
// requests from the webserver.
func NewDaemon(config *Config, registryService registry.Service, containerdRemote libcontainerd.Remote) (daemon *Daemon, err error) {
setDefaultMtu(config)
// Ensure that we have a correct root key limit for launching containers.
if err := ModifyRootKeyLimit(); err != nil {
logrus.Warnf("unable to modify root key limit, number of containers could be limited by this quota: %v", err)
}
// Ensure we have compatible and valid configuration options
if err := verifyDaemonSettings(config); err != nil {
return nil, err
}
// Do we have a disabled network?
config.DisableBridge = isBridgeNetworkDisabled(config)
// Verify the platform is supported as a daemon
if !platformSupported {
return nil, errSystemNotSupported
}
// Validate platform-specific requirements
if err := checkSystem(); err != nil {
return nil, err
}
uidMaps, gidMaps, err := setupRemappedRoot(config)
if err != nil {
return nil, err
}
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
if err != nil {
return nil, err
}
if err := setupDaemonProcess(config); err != nil {
return nil, err
}
// set up the tmpDir to use a canonical path
tmp, err := tempDir(config.Root, rootUID, rootGID)
if err != nil {
return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err)
}
realTmp, err := fileutils.ReadSymlinkedDirectory(tmp)
if err != nil {
return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err)
}
os.Setenv("TMPDIR", realTmp)
d := &Daemon{configStore: config}
// Ensure the daemon is properly shutdown if there is a failure during
// initialization
defer func() {
if err != nil {
if err := d.Shutdown(); err != nil {
logrus.Error(err)
}
}
}()
if err := d.setupSeccompProfile(); err != nil {
return nil, err
}
// Set the default isolation mode (only applicable on Windows)
if err := d.setDefaultIsolation(); err != nil {
return nil, fmt.Errorf("error setting default isolation mode: %v", err)
}
logrus.Debugf("Using default logging driver %s", config.LogConfig.Type)
if err := configureMaxThreads(config); err != nil {
logrus.Warnf("Failed to configure golang's threads limit: %v", err)
}
if err := ensureDefaultAppArmorProfile(); err != nil {
logrus.Errorf(err.Error())
}
daemonRepo := filepath.Join(config.Root, "containers")
if err := idtools.MkdirAllAs(daemonRepo, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) {
return nil, err
}
if runtime.GOOS == "windows" {
if err := system.MkdirAll(filepath.Join(config.Root, "credentialspecs"), 0); err != nil && !os.IsExist(err) {
return nil, err
}
}
driverName := os.Getenv("DOCKER_DRIVER")
if driverName == "" {
driverName = config.GraphDriver
}
d.RegistryService = registryService
d.PluginStore = plugin.NewStore(config.Root) // todo: remove
// Plugin system initialization should happen before restore. Do not change order.
d.pluginManager, err = plugin.NewManager(plugin.ManagerConfig{
Root: filepath.Join(config.Root, "plugins"),
ExecRoot: "/run/docker/plugins", // possibly needs fixing
Store: d.PluginStore,
Executor: containerdRemote,
RegistryService: registryService,
LiveRestoreEnabled: config.LiveRestoreEnabled,
LogPluginEvent: d.LogPluginEvent, // todo: make private
})
if err != nil {
return nil, errors.Wrap(err, "couldn't create plugin manager")
}
d.layerStore, err = layer.NewStoreFromOptions(layer.StoreOptions{
StorePath: config.Root,
MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"),
GraphDriver: driverName,
GraphDriverOptions: config.GraphOptions,
UIDMaps: uidMaps,
GIDMaps: gidMaps,
PluginGetter: d.PluginStore,
ExperimentalEnabled: config.Experimental,
})
if err != nil {
return nil, err
}
graphDriver := d.layerStore.DriverName()
imageRoot := filepath.Join(config.Root, "image", graphDriver)
// Configure and validate the kernels security support
if err := configureKernelSecuritySupport(config, graphDriver); err != nil {
return nil, err
}
logrus.Debugf("Max Concurrent Downloads: %d", *config.MaxConcurrentDownloads)
d.downloadManager = xfer.NewLayerDownloadManager(d.layerStore, *config.MaxConcurrentDownloads)
logrus.Debugf("Max Concurrent Uploads: %d", *config.MaxConcurrentUploads)
d.uploadManager = xfer.NewLayerUploadManager(*config.MaxConcurrentUploads)
ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb"))
if err != nil {
return nil, err
}
d.imageStore, err = image.NewImageStore(ifs, d.layerStore)
if err != nil {
return nil, err
}
// Configure the volumes driver
volStore, err := d.configureVolumes(rootUID, rootGID)
if err != nil {
return nil, err
}
trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath)
if err != nil {
return nil, err
}
trustDir := filepath.Join(config.Root, "trust")
if err := system.MkdirAll(trustDir, 0700); err != nil {
return nil, err
}
distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution"))
if err != nil {
return nil, err
}
eventsService := events.New()
referenceStore, err := reference.NewReferenceStore(filepath.Join(imageRoot, "repositories.json"))
if err != nil {
return nil, fmt.Errorf("Couldn't create Tag store repositories: %s", err)
}
migrationStart := time.Now()
if err := v1.Migrate(config.Root, graphDriver, d.layerStore, d.imageStore, referenceStore, distributionMetadataStore); err != nil {
logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err)
}
logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds())
// Discovery is only enabled when the daemon is launched with an address to advertise. When
// initialized, the daemon is registered and we can store the discovery backend as its read-only
if err := d.initDiscovery(config); err != nil {
return nil, err
}
sysInfo := sysinfo.New(false)
// Check if Devices cgroup is mounted, it is hard requirement for container security,
// on Linux.
if runtime.GOOS == "linux" && !sysInfo.CgroupDevicesEnabled {
return nil, fmt.Errorf("Devices cgroup isn't mounted")
}
d.ID = trustKey.PublicKey().KeyID()
d.repository = daemonRepo
d.containers = container.NewMemoryStore()
d.execCommands = exec.NewStore()
d.referenceStore = referenceStore
d.distributionMetadataStore = distributionMetadataStore
d.trustKey = trustKey
d.idIndex = truncindex.NewTruncIndex([]string{})
d.statsCollector = d.newStatsCollector(1 * time.Second)
d.defaultLogConfig = containertypes.LogConfig{
Type: config.LogConfig.Type,
Config: config.LogConfig.Config,
}
d.EventsService = eventsService
d.volumes = volStore
d.root = config.Root
d.uidMaps = uidMaps
d.gidMaps = gidMaps
d.seccompEnabled = sysInfo.Seccomp
d.nameIndex = registrar.NewRegistrar()
d.linkIndex = newLinkIndex()
d.containerdRemote = containerdRemote
go d.execCommandGC()
d.containerd, err = containerdRemote.Client(d)
if err != nil {
return nil, err
}
if err := d.restore(); err != nil {
return nil, err
}
// FIXME: this method never returns an error
info, _ := d.SystemInfo()
engineVersion.WithValues(
dockerversion.Version,
dockerversion.GitCommit,
info.Architecture,
info.Driver,
info.KernelVersion,
info.OperatingSystem,
).Set(1)
engineCpus.Set(float64(info.NCPU))
engineMemory.Set(float64(info.MemTotal))
// set up SIGUSR1 handler on Unix-like systems, or a Win32 global event
// on Windows to dump Go routine stacks
stackDumpDir := config.Root
if execRoot := config.GetExecRoot(); execRoot != "" {
stackDumpDir = execRoot
}
d.setupDumpStackTrap(stackDumpDir)
return d, nil
}
func (daemon *Daemon) shutdownContainer(c *container.Container) error {
stopTimeout := c.StopTimeout()
// TODO(windows): Handle docker restart with paused containers
if c.IsPaused() {
// To terminate a process in freezer cgroup, we should send
// SIGTERM to this process then unfreeze it, and the process will
// force to terminate immediately.
logrus.Debugf("Found container %s is paused, sending SIGTERM before unpausing it", c.ID)
sig, ok := signal.SignalMap["TERM"]
if !ok {
return fmt.Errorf("System does not support SIGTERM")
}
if err := daemon.kill(c, int(sig)); err != nil {
return fmt.Errorf("sending SIGTERM to container %s with error: %v", c.ID, err)
}
if err := daemon.containerUnpause(c); err != nil {
return fmt.Errorf("Failed to unpause container %s with error: %v", c.ID, err)
}
if _, err := c.WaitStop(time.Duration(stopTimeout) * time.Second); err != nil {
logrus.Debugf("container %s failed to exit in %d second of SIGTERM, sending SIGKILL to force", c.ID, stopTimeout)
sig, ok := signal.SignalMap["KILL"]
if !ok {
return fmt.Errorf("System does not support SIGKILL")
}
if err := daemon.kill(c, int(sig)); err != nil {
logrus.Errorf("Failed to SIGKILL container %s", c.ID)
}
c.WaitStop(-1 * time.Second)
return err
}
}
// If container failed to exit in stopTimeout seconds of SIGTERM, then using the force
if err := daemon.containerStop(c, stopTimeout); err != nil {
return fmt.Errorf("Failed to stop container %s with error: %v", c.ID, err)
}
c.WaitStop(-1 * time.Second)
return nil
}
// ShutdownTimeout returns the shutdown timeout based on the max stopTimeout of the containers,
// and is limited by daemon's ShutdownTimeout.
func (daemon *Daemon) ShutdownTimeout() int {
// By default we use daemon's ShutdownTimeout.
shutdownTimeout := daemon.configStore.ShutdownTimeout
graceTimeout := 5
if daemon.containers != nil {
for _, c := range daemon.containers.List() {
if shutdownTimeout >= 0 {
stopTimeout := c.StopTimeout()
if stopTimeout < 0 {
shutdownTimeout = -1
} else {
if stopTimeout+graceTimeout > shutdownTimeout {
shutdownTimeout = stopTimeout + graceTimeout
}
}
}
}
}
return shutdownTimeout
}
// Shutdown stops the daemon.
func (daemon *Daemon) Shutdown() error {
daemon.shutdown = true
// Keep mounts and networking running on daemon shutdown if
// we are to keep containers running and restore them.
if daemon.configStore.LiveRestoreEnabled && daemon.containers != nil {
// check if there are any running containers, if none we should do some cleanup
if ls, err := daemon.Containers(&types.ContainerListOptions{}); len(ls) != 0 || err != nil {
return nil
}
}
if daemon.containers != nil {
logrus.Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.configStore.ShutdownTimeout)
daemon.containers.ApplyAll(func(c *container.Container) {
if !c.IsRunning() {
return
}
logrus.Debugf("stopping %s", c.ID)
if err := daemon.shutdownContainer(c); err != nil {
logrus.Errorf("Stop container error: %v", err)
return
}
if mountid, err := daemon.layerStore.GetMountID(c.ID); err == nil {
daemon.cleanupMountsByID(mountid)
}
logrus.Debugf("container stopped %s", c.ID)
})
}
if daemon.volumes != nil {
if err := daemon.volumes.Shutdown(); err != nil {
logrus.Errorf("Error shutting down volume store: %v", err)
}
}
if daemon.layerStore != nil {
if err := daemon.layerStore.Cleanup(); err != nil {
logrus.Errorf("Error during layer Store.Cleanup(): %v", err)
}
}
// Shutdown plugins after containers and layerstore. Don't change the order.
daemon.pluginShutdown()
// trigger libnetwork Stop only if it's initialized
if daemon.netController != nil {
daemon.netController.Stop()
}
if err := daemon.cleanupMounts(); err != nil {
return err
}
return nil
}
// Mount sets container.BaseFS
// (is it not set coming in? why is it unset?)
func (daemon *Daemon) Mount(container *container.Container) error {
dir, err := container.RWLayer.Mount(container.GetMountLabel())
if err != nil {
return err
}
logrus.Debugf("container mounted via layerStore: %v", dir)
if container.BaseFS != dir {
// The mount path reported by the graph driver should always be trusted on Windows, since the
// volume path for a given mounted layer may change over time. This should only be an error
// on non-Windows operating systems.
if container.BaseFS != "" && runtime.GOOS != "windows" {
daemon.Unmount(container)
return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')",
daemon.GraphDriverName(), container.ID, container.BaseFS, dir)
}
}
container.BaseFS = dir // TODO: combine these fields
return nil
}
// Unmount unsets the container base filesystem
func (daemon *Daemon) Unmount(container *container.Container) error {
logrus.Debugf("[Unmount] Begin - container:%v", container.ID)
//[Important] UnmountDevice, deactivateDevice, removeDevice, deactivateDevice
if err := container.RWLayer.Unmount(); err != nil {
logrus.Errorf("Error unmounting container %s: %s", container.ID, err)
return err
}
logrus.Debugf("[Unmount] End - container:%v", container.ID)
return nil
}
// V4Subnets returns the IPv4 subnets of networks that are managed by Docker.
func (daemon *Daemon) V4Subnets() []net.IPNet {
var subnets []net.IPNet
managedNetworks := daemon.netController.Networks()
for _, managedNetwork := range managedNetworks {
v4Infos, _ := managedNetwork.Info().IpamInfo()
for _, v4Info := range v4Infos {
if v4Info.IPAMData.Pool != nil {
subnets = append(subnets, *v4Info.IPAMData.Pool)
}
}
}
return subnets
}
// V6Subnets returns the IPv6 subnets of networks that are managed by Docker.
func (daemon *Daemon) V6Subnets() []net.IPNet {
var subnets []net.IPNet
managedNetworks := daemon.netController.Networks()
for _, managedNetwork := range managedNetworks {
_, v6Infos := managedNetwork.Info().IpamInfo()
for _, v6Info := range v6Infos {
if v6Info.IPAMData.Pool != nil {
subnets = append(subnets, *v6Info.IPAMData.Pool)
}
}
}
return subnets
}
// GraphDriverName returns the name of the graph driver used by the layer.Store
func (daemon *Daemon) GraphDriverName() string {
return daemon.layerStore.DriverName()
}
// GetUIDGIDMaps returns the current daemon's user namespace settings
// for the full uid and gid maps which will be applied to containers
// started in this instance.
func (daemon *Daemon) GetUIDGIDMaps() ([]idtools.IDMap, []idtools.IDMap) {
return daemon.uidMaps, daemon.gidMaps
}
// GetRemappedUIDGID returns the current daemon's uid and gid values
// if user namespaces are in use for this daemon instance. If not
// this function will return "real" root values of 0, 0.
func (daemon *Daemon) GetRemappedUIDGID() (int, int) {
uid, gid, _ := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps)
return uid, gid
}
// tempDir returns the default directory to use for temporary files.
func tempDir(rootDir string, rootUID, rootGID int) (string, error) {
var tmpDir string
if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" {
tmpDir = filepath.Join(rootDir, "tmp")
}
return tmpDir, idtools.MkdirAllAs(tmpDir, 0700, rootUID, rootGID)
}
func (daemon *Daemon) setupInitLayer(initPath string) error {
rootUID, rootGID := daemon.GetRemappedUIDGID()
return initlayer.Setup(initPath, rootUID, rootGID)
}
func setDefaultMtu(config *Config) {
// do nothing if the config does not have the default 0 value.
if config.Mtu != 0 {
return
}
config.Mtu = defaultNetworkMtu
}
func (daemon *Daemon) configureVolumes(rootUID, rootGID int) (*store.VolumeStore, error) {
volumesDriver, err := local.New(daemon.configStore.Root, rootUID, rootGID)
if err != nil {
return nil, err
}
volumedrivers.RegisterPluginGetter(daemon.PluginStore)
if !volumedrivers.Register(volumesDriver, volumesDriver.Name()) {
return nil, fmt.Errorf("local volume driver could not be registered")
}
return store.New(daemon.configStore.Root)
}
// IsShuttingDown tells whether the daemon is shutting down or not
func (daemon *Daemon) IsShuttingDown() bool {
return daemon.shutdown
}
// initDiscovery initializes the discovery watcher for this daemon.
func (daemon *Daemon) initDiscovery(config *Config) error {
advertise, err := parseClusterAdvertiseSettings(config.ClusterStore, config.ClusterAdvertise)
if err != nil {
if err == errDiscoveryDisabled {
return nil
}
return err
}
config.ClusterAdvertise = advertise
discoveryWatcher, err := initDiscovery(config.ClusterStore, config.ClusterAdvertise, config.ClusterOpts)
if err != nil {
return fmt.Errorf("discovery initialization failed (%v)", err)
}
daemon.discoveryWatcher = discoveryWatcher
return nil
}
// Reload reads configuration changes and modifies the
// daemon according to those changes.
// These are the settings that Reload changes:
// - Daemon labels.
// - Daemon debug log level.
// - Daemon insecure registries.
// - Daemon max concurrent downloads
// - Daemon max concurrent uploads
// - Cluster discovery (reconfigure and restart).
// - Daemon live restore
// - Daemon shutdown timeout (in seconds).
func (daemon *Daemon) Reload(config *Config) (err error) {
daemon.configStore.reloadLock.Lock()
attributes := daemon.platformReload(config)
defer func() {
// we're unlocking here, because
// LogDaemonEventWithAttributes() -> SystemInfo() -> GetAllRuntimes()
// holds that lock too.
daemon.configStore.reloadLock.Unlock()
if err == nil {
daemon.LogDaemonEventWithAttributes("reload", attributes)
}
}()
if err := daemon.reloadClusterDiscovery(config); err != nil {
return err
}
if config.IsValueSet("labels") {
daemon.configStore.Labels = config.Labels
}
if config.IsValueSet("debug") {
daemon.configStore.Debug = config.Debug
}
if config.IsValueSet("insecure-registries") {
daemon.configStore.InsecureRegistries = config.InsecureRegistries
if err := daemon.RegistryService.LoadInsecureRegistries(config.InsecureRegistries); err != nil {
return err
}
}
if config.IsValueSet("live-restore") {
daemon.configStore.LiveRestoreEnabled = config.LiveRestoreEnabled
if err := daemon.containerdRemote.UpdateOptions(libcontainerd.WithLiveRestore(config.LiveRestoreEnabled)); err != nil {
return err
}
}
// If no value is set for max-concurrent-downloads we assume it is the default value
// We always "reset" as the cost is lightweight and easy to maintain.
if config.IsValueSet("max-concurrent-downloads") && config.MaxConcurrentDownloads != nil {
*daemon.configStore.MaxConcurrentDownloads = *config.MaxConcurrentDownloads
} else {
maxConcurrentDownloads := defaultMaxConcurrentDownloads
daemon.configStore.MaxConcurrentDownloads = &maxConcurrentDownloads
}
logrus.Debugf("Reset Max Concurrent Downloads: %d", *daemon.configStore.MaxConcurrentDownloads)
if daemon.downloadManager != nil {
daemon.downloadManager.SetConcurrency(*daemon.configStore.MaxConcurrentDownloads)
}
// If no value is set for max-concurrent-upload we assume it is the default value
// We always "reset" as the cost is lightweight and easy to maintain.
if config.IsValueSet("max-concurrent-uploads") && config.MaxConcurrentUploads != nil {
*daemon.configStore.MaxConcurrentUploads = *config.MaxConcurrentUploads
} else {
maxConcurrentUploads := defaultMaxConcurrentUploads
daemon.configStore.MaxConcurrentUploads = &maxConcurrentUploads
}
logrus.Debugf("Reset Max Concurrent Uploads: %d", *daemon.configStore.MaxConcurrentUploads)
if daemon.uploadManager != nil {
daemon.uploadManager.SetConcurrency(*daemon.configStore.MaxConcurrentUploads)
}
if config.IsValueSet("shutdown-timeout") {
daemon.configStore.ShutdownTimeout = config.ShutdownTimeout
logrus.Debugf("Reset Shutdown Timeout: %d", daemon.configStore.ShutdownTimeout)
}
// We emit daemon reload event here with updatable configurations
attributes["debug"] = fmt.Sprintf("%t", daemon.configStore.Debug)
attributes["live-restore"] = fmt.Sprintf("%t", daemon.configStore.LiveRestoreEnabled)
if daemon.configStore.InsecureRegistries != nil {
insecureRegistries, err := json.Marshal(daemon.configStore.InsecureRegistries)
if err != nil {
return err
}
attributes["insecure-registries"] = string(insecureRegistries)
} else {
attributes["insecure-registries"] = "[]"
}
attributes["cluster-store"] = daemon.configStore.ClusterStore
if daemon.configStore.ClusterOpts != nil {
opts, err := json.Marshal(daemon.configStore.ClusterOpts)
if err != nil {
return err
}
attributes["cluster-store-opts"] = string(opts)
} else {
attributes["cluster-store-opts"] = "{}"
}
attributes["cluster-advertise"] = daemon.configStore.ClusterAdvertise
if daemon.configStore.Labels != nil {
labels, err := json.Marshal(daemon.configStore.Labels)
if err != nil {
return err
}
attributes["labels"] = string(labels)
} else {
attributes["labels"] = "[]"
}
attributes["max-concurrent-downloads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentDownloads)
attributes["max-concurrent-uploads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentUploads)
attributes["shutdown-timeout"] = fmt.Sprintf("%d", daemon.configStore.ShutdownTimeout)
return nil
}
func (daemon *Daemon) reloadClusterDiscovery(config *Config) error {
var err error
newAdvertise := daemon.configStore.ClusterAdvertise
newClusterStore := daemon.configStore.ClusterStore
if config.IsValueSet("cluster-advertise") {
if config.IsValueSet("cluster-store") {
newClusterStore = config.ClusterStore
}
newAdvertise, err = parseClusterAdvertiseSettings(newClusterStore, config.ClusterAdvertise)
if err != nil && err != errDiscoveryDisabled {
return err
}
}
if daemon.clusterProvider != nil {
if err := config.isSwarmCompatible(); err != nil {
return err
}
}
// check discovery modifications
if !modifiedDiscoverySettings(daemon.configStore, newAdvertise, newClusterStore, config.ClusterOpts) {
return nil
}
// enable discovery for the first time if it was not previously enabled
if daemon.discoveryWatcher == nil {
discoveryWatcher, err := initDiscovery(newClusterStore, newAdvertise, config.ClusterOpts)
if err != nil {
return fmt.Errorf("discovery initialization failed (%v)", err)
}
daemon.discoveryWatcher = discoveryWatcher
} else {
if err == errDiscoveryDisabled {
// disable discovery if it was previously enabled and it's disabled now
daemon.discoveryWatcher.Stop()
} else {
// reload discovery
if err = daemon.discoveryWatcher.Reload(config.ClusterStore, newAdvertise, config.ClusterOpts); err != nil {
return err
}
}
}
daemon.configStore.ClusterStore = newClusterStore
daemon.configStore.ClusterOpts = config.ClusterOpts
daemon.configStore.ClusterAdvertise = newAdvertise
if daemon.netController == nil {
return nil
}
netOptions, err := daemon.networkOptions(daemon.configStore, daemon.PluginStore, nil)
if err != nil {
logrus.WithError(err).Warnf("failed to get options with network controller")
return nil
}
err = daemon.netController.ReloadConfiguration(netOptions...)
if err != nil {
logrus.Warnf("Failed to reload configuration with network controller: %v", err)
}
return nil
}
func isBridgeNetworkDisabled(config *Config) bool {
return config.bridgeConfig.Iface == disableNetworkBridge
}
func (daemon *Daemon) networkOptions(dconfig *Config, pg plugingetter.PluginGetter, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) {
options := []nwconfig.Option{}
if dconfig == nil {
return options, nil
}
options = append(options, nwconfig.OptionExperimental(dconfig.Experimental))
options = append(options, nwconfig.OptionDataDir(dconfig.Root))
options = append(options, nwconfig.OptionExecRoot(dconfig.GetExecRoot()))
dd := runconfig.DefaultDaemonNetworkMode()
dn := runconfig.DefaultDaemonNetworkMode().NetworkName()
options = append(options, nwconfig.OptionDefaultDriver(string(dd)))
options = append(options, nwconfig.OptionDefaultNetwork(dn))
if strings.TrimSpace(dconfig.ClusterStore) != "" {
kv := strings.Split(dconfig.ClusterStore, "://")
if len(kv) != 2 {
return nil, fmt.Errorf("kv store daemon config must be of the form KV-PROVIDER://KV-URL")
}
options = append(options, nwconfig.OptionKVProvider(kv[0]))
options = append(options, nwconfig.OptionKVProviderURL(kv[1]))
}
if len(dconfig.ClusterOpts) > 0 {
options = append(options, nwconfig.OptionKVOpts(dconfig.ClusterOpts))
}
if daemon.discoveryWatcher != nil {
options = append(options, nwconfig.OptionDiscoveryWatcher(daemon.discoveryWatcher))
}
if dconfig.ClusterAdvertise != "" {
options = append(options, nwconfig.OptionDiscoveryAddress(dconfig.ClusterAdvertise))
}
options = append(options, nwconfig.OptionLabels(dconfig.Labels))
options = append(options, driverOptions(dconfig)...)
if daemon.configStore != nil && daemon.configStore.LiveRestoreEnabled && len(activeSandboxes) != 0 {
options = append(options, nwconfig.OptionActiveSandboxes(activeSandboxes))
}
if pg != nil {
options = append(options, nwconfig.OptionPluginGetter(pg))
}
return options, nil
}
func copyBlkioEntry(entries []*containerd.BlkioStatsEntry) []types.BlkioStatEntry {
out := make([]types.BlkioStatEntry, len(entries))
for i, re := range entries {
out[i] = types.BlkioStatEntry{
Major: re.Major,
Minor: re.Minor,
Op: re.Op,
Value: re.Value,
}
}
return out
}
// GetCluster returns the cluster
func (daemon *Daemon) GetCluster() Cluster {
return daemon.cluster
}
// SetCluster sets the cluster
func (daemon *Daemon) SetCluster(cluster Cluster) {
daemon.cluster = cluster
}
func (daemon *Daemon) pluginShutdown() {
manager := daemon.pluginManager
// Check for a valid manager object. In error conditions, daemon init can fail
// and shutdown called, before plugin manager is initialized.
if manager != nil {
manager.Shutdown()
}
}
// PluginManager returns current pluginManager associated with the daemon
func (daemon *Daemon) PluginManager() *plugin.Manager { // set up before daemon to avoid this method
return daemon.pluginManager
}
// CreateDaemonRoot creates the root for the daemon
func CreateDaemonRoot(config *Config) error {
// get the canonical path to the Docker root directory
var realRoot string
if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) {
realRoot = config.Root
} else {
realRoot, err = fileutils.ReadSymlinkedDirectory(config.Root)
if err != nil {
return fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err)
}
}
uidMaps, gidMaps, err := setupRemappedRoot(config)
if err != nil {
return err
}
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
if err != nil {
return err
}
if err := setupDaemonRoot(config, realRoot, rootUID, rootGID); err != nil {
return err
}
return nil
}
| [
"\"DOCKER_DRIVER\"",
"\"DOCKER_TMPDIR\""
]
| []
| [
"DOCKER_DRIVER",
"DOCKER_TMPDIR"
]
| [] | ["DOCKER_DRIVER", "DOCKER_TMPDIR"] | go | 2 | 0 | |
script/test.py | #!/usr/bin/env python
import argparse
import atexit
import os
import shutil
import subprocess
import sys
from lib.config import enable_verbose_mode
from lib.util import get_electron_branding, execute_stdout, rm_rf
import lib.dbus_mock
if sys.platform == 'linux2':
# On Linux we use python-dbusmock to create a fake system bus and test
# powerMonitor interaction with org.freedesktop.login1 service. The
# dbus_mock module takes care of setting up the fake server with mock,
# while also setting DBUS_SYSTEM_BUS_ADDRESS environment variable, which
# will be picked up by electron.
try:
lib.dbus_mock.start()
atexit.register(lib.dbus_mock.stop)
except ImportError:
# If not available, the powerMonitor tests will be skipped since
# DBUS_SYSTEM_BUS_ADDRESS will not be set
pass
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
PROJECT_NAME = get_electron_branding()['project_name']
PRODUCT_NAME = get_electron_branding()['product_name']
def main():
os.chdir(SOURCE_ROOT)
args = parse_args()
config = args.configuration
if args.verbose:
enable_verbose_mode()
os.environ['ELECTRON_ENABLE_LOGGING'] = '1'
spec_modules = os.path.join(SOURCE_ROOT, 'spec', 'node_modules')
if args.rebuild_native_modules or not os.path.isdir(spec_modules):
rebuild_native_modules(args.verbose, config)
if sys.platform == 'darwin':
electron = os.path.join(SOURCE_ROOT, 'out', config,
'{0}.app'.format(PRODUCT_NAME), 'Contents',
'MacOS', PRODUCT_NAME)
resources_path = os.path.join(SOURCE_ROOT, 'out', config,
'{0}.app'.format(PRODUCT_NAME), 'Contents',
'Resources')
elif sys.platform == 'win32':
electron = os.path.join(SOURCE_ROOT, 'out', config,
'{0}.exe'.format(PROJECT_NAME))
resources_path = os.path.join(SOURCE_ROOT, 'out', config)
if config != 'R':
os.environ['ELECTRON_SKIP_NATIVE_MODULE_TESTS'] = '1'
else:
electron = os.path.join(SOURCE_ROOT, 'out', config, PROJECT_NAME)
resources_path = os.path.join(SOURCE_ROOT, 'out', config)
returncode = 0
try:
if args.use_instrumented_asar:
install_instrumented_asar_file(resources_path)
os.environ["ELECTRON_DISABLE_SECURITY_WARNINGS"] = "1"
subprocess.check_call([electron, 'spec'] + sys.argv[1:])
except subprocess.CalledProcessError as e:
returncode = e.returncode
except KeyboardInterrupt:
returncode = 0
if args.use_instrumented_asar:
restore_uninstrumented_asar_file(resources_path)
if os.environ.has_key('OUTPUT_TO_FILE'):
output_to_file = os.environ['OUTPUT_TO_FILE']
with open(output_to_file, 'r') as f:
print f.read()
rm_rf(output_to_file)
return returncode
def parse_args():
parser = argparse.ArgumentParser(description='Run Electron tests')
parser.add_argument('--use_instrumented_asar',
help='Run tests with coverage instructed asar file',
action='store_true',
required=False)
parser.add_argument('--rebuild_native_modules',
help='Rebuild native modules used by specs',
action='store_true',
required=False)
parser.add_argument('--ci',
help='Run tests in CI mode',
action='store_true',
required=False)
parser.add_argument('-g', '--grep',
help='Only run tests matching <pattern>',
metavar='pattern',
required=False)
parser.add_argument('-i', '--invert',
help='Inverts --grep matches',
action='store_true',
required=False)
parser.add_argument('-v', '--verbose',
action='store_true',
help='Prints the output of the subprocesses')
parser.add_argument('-c', '--configuration',
help='Build configuration to run tests against',
default='D',
required=False)
return parser.parse_args()
def install_instrumented_asar_file(resources_path):
asar_path = os.path.join(resources_path, '{0}.asar'.format(PROJECT_NAME))
uninstrumented_path = os.path.join(resources_path,
'{0}-original.asar'.format(PROJECT_NAME))
instrumented_path = os.path.join(SOURCE_ROOT, 'out', 'coverage',
'{0}.asar'.format(PROJECT_NAME))
shutil.move(asar_path, uninstrumented_path)
shutil.move(instrumented_path, asar_path)
def restore_uninstrumented_asar_file(resources_path):
asar_path = os.path.join(resources_path, '{0}.asar'.format(PROJECT_NAME))
uninstrumented_path = os.path.join(resources_path,
'{0}-original.asar'.format(PROJECT_NAME))
os.remove(asar_path)
shutil.move(uninstrumented_path, asar_path)
def rebuild_native_modules(verbose, configuration):
script_path = os.path.join(SOURCE_ROOT, 'script', 'rebuild-test-modules.py')
args = ['--configuration', configuration]
if verbose:
args += ['--verbose']
execute_stdout([sys.executable, script_path] + args)
if __name__ == '__main__':
sys.exit(main())
| []
| []
| [
"ELECTRON_DISABLE_SECURITY_WARNINGS",
"ELECTRON_ENABLE_LOGGING",
"OUTPUT_TO_FILE",
"ELECTRON_SKIP_NATIVE_MODULE_TESTS"
]
| [] | ["ELECTRON_DISABLE_SECURITY_WARNINGS", "ELECTRON_ENABLE_LOGGING", "OUTPUT_TO_FILE", "ELECTRON_SKIP_NATIVE_MODULE_TESTS"] | python | 4 | 0 | |
sellhere/sellhere/wsgi.py | """
WSGI config for sellhere project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sellhere.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
examples/src/main/java/org/apache/spark/examples/sql/JavaSparkSQL.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples.sql;
import java.io.Serializable;
import java.util.List;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.VoidFunction;
import org.apache.spark.sql.api.java.JavaSQLContext;
import org.apache.spark.sql.api.java.JavaSchemaRDD;
import org.apache.spark.sql.api.java.Row;
public class JavaSparkSQL {
public static class Person implements Serializable {
private String name;
private int age;
String getName() {
return name;
}
void setName(String name) {
this.name = name;
}
int getAge() {
return age;
}
void setAge(int age) {
this.age = age;
}
}
public static void main(String[] args) throws Exception {
JavaSparkContext ctx = new JavaSparkContext("local", "JavaSparkSQL",
System.getenv("SPARK_HOME"), JavaSparkContext.jarOfClass(JavaSparkSQL.class));
JavaSQLContext sqlCtx = new JavaSQLContext(ctx);
// Load a text file and convert each line to a Java Bean.
JavaRDD<Person> people = ctx.textFile("examples/src/main/resources/people.txt").map(
new Function<String, Person>() {
public Person call(String line) throws Exception {
String[] parts = line.split(",");
Person person = new Person();
person.setName(parts[0]);
person.setAge(Integer.parseInt(parts[1].trim()));
return person;
}
});
// Apply a schema to an RDD of Java Beans and register it as a table.
JavaSchemaRDD schemaPeople = sqlCtx.applySchema(people, Person.class);
schemaPeople.registerAsTable("people");
// SQL can be run over RDDs that have been registered as tables.
JavaSchemaRDD teenagers = sqlCtx.sql("SELECT name FROM people WHERE age >= 13 AND age <= 19");
// The results of SQL queries are SchemaRDDs and support all the normal RDD operations.
// The columns of a row in the result can be accessed by ordinal.
List<String> teenagerNames = teenagers.map(new Function<Row, String>() {
public String call(Row row) {
return "Name: " + row.getString(0);
}
}).collect();
// JavaSchemaRDDs can be saved as parquet files, maintaining the schema information.
schemaPeople.saveAsParquetFile("people.parquet");
// Read in the parquet file created above. Parquet files are self-describing so the schema is preserved.
// The result of loading a parquet file is also a JavaSchemaRDD.
JavaSchemaRDD parquetFile = sqlCtx.parquetFile("people.parquet");
//Parquet files can also be registered as tables and then used in SQL statements.
parquetFile.registerAsTable("parquetFile");
JavaSchemaRDD teenagers2 = sqlCtx.sql("SELECT name FROM parquetFile WHERE age >= 13 AND age <= 19");
}
}
| [
"\"SPARK_HOME\""
]
| []
| [
"SPARK_HOME"
]
| [] | ["SPARK_HOME"] | java | 1 | 0 | |
src/java/br/com/beibe/service/ConnectionFactory.java | package br.com.beibe.service;
import java.io.IOException;
import java.io.InputStream;
import java.util.Properties;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
public abstract class ConnectionFactory {
private static final String PROPS_FILE;
private static final Properties PROPS = new Properties();
static {
if (System.getenv("HEROKU") == null)
PROPS_FILE = "/../db.properties";
else
PROPS_FILE = "/../db-heroku.properties";
try (InputStream is = ConnectionFactory.class.getResourceAsStream(PROPS_FILE)) {
PROPS.load(is);
} catch (IOException ex) {
throw new RuntimeException(ex);
}
}
public static Connection getConnection() {
return getConnection(true);
}
public static Connection getConnection(boolean autoCommit) {
try {
Class.forName(PROPS.getProperty("DB_DRIVER"));
String dbUrl = PROPS.getProperty("DB_URL")
+ PROPS.getProperty("DB_HOST") + ":"
+ PROPS.getProperty("DB_PORT") + "/"
+ PROPS.getProperty("DB_SCHEMA");
String dbUser = PROPS.getProperty("DB_USER");
String dbPassword = PROPS.getProperty("DB_PASSWORD");
Connection conn = DriverManager.getConnection(dbUrl, dbUser, dbPassword);
conn.setAutoCommit(autoCommit);
return conn;
} catch (ClassNotFoundException | SQLException ex) {
throw new RuntimeException(ex);
}
}
}
| [
"\"HEROKU\""
]
| []
| [
"HEROKU"
]
| [] | ["HEROKU"] | java | 1 | 0 | |
flash/core/data/data_module.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import platform
from typing import Any, Callable, Collection, Dict, Iterable, List, Optional, Sequence, Tuple, TYPE_CHECKING, Union
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.trainer.states import RunningStage
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from torch.utils.data import DataLoader, Dataset
from torch.utils.data.dataset import IterableDataset, Subset
from torch.utils.data.sampler import Sampler
import flash
from flash.core.data.auto_dataset import BaseAutoDataset, IterableAutoDataset
from flash.core.data.base_viz import BaseVisualization
from flash.core.data.callback import BaseDataFetcher
from flash.core.data.data_pipeline import DataPipeline, DefaultPreprocess, Postprocess, Preprocess
from flash.core.data.data_source import DataSource, DefaultDataSources
from flash.core.data.splits import SplitDataset
from flash.core.data.utils import _STAGES_PREFIX
from flash.core.utilities.imports import _FIFTYONE_AVAILABLE, requires
if _FIFTYONE_AVAILABLE and TYPE_CHECKING:
from fiftyone.core.collections import SampleCollection
else:
SampleCollection = None
class DataModule(pl.LightningDataModule):
"""A basic DataModule class for all Flash tasks. This class includes references to a
:class:`~flash.core.data.data_source.DataSource`, :class:`~flash.core.data.process.Preprocess`,
:class:`~flash.core.data.process.Postprocess`, and a :class:`~flash.core.data.callback.BaseDataFetcher`.
Args:
train_dataset: Dataset for training. Defaults to None.
val_dataset: Dataset for validating model performance during training. Defaults to None.
test_dataset: Dataset to test model performance. Defaults to None.
predict_dataset: Dataset for predicting. Defaults to None.
data_source: The :class:`~flash.core.data.data_source.DataSource` that was used to create the datasets.
preprocess: The :class:`~flash.core.data.process.Preprocess` to use when constructing the
:class:`~flash.core.data.data_pipeline.DataPipeline`. If ``None``, a
:class:`~flash.core.data.process.DefaultPreprocess` will be used.
postprocess: The :class:`~flash.core.data.process.Postprocess` to use when constructing the
:class:`~flash.core.data.data_pipeline.DataPipeline`. If ``None``, a plain
:class:`~flash.core.data.process.Postprocess` will be used.
data_fetcher: The :class:`~flash.core.data.callback.BaseDataFetcher` to attach to the
:class:`~flash.core.data.process.Preprocess`. If ``None``, the output from
:meth:`~flash.core.data.data_module.DataModule.configure_data_fetcher` will be used.
val_split: An optional float which gives the relative amount of the training dataset to use for the validation
dataset.
batch_size: The batch size to be used by the DataLoader. Defaults to 1.
num_workers: The number of workers to use for parallelized loading.
Defaults to None which equals the number of available CPU threads,
or 0 for Windows or Darwin platform.
sampler: A sampler following the :class:`~torch.utils.data.sampler.Sampler` type.
Will be passed to the DataLoader for the training dataset. Defaults to None.
"""
preprocess_cls = DefaultPreprocess
postprocess_cls = Postprocess
def __init__(
self,
train_dataset: Optional[Dataset] = None,
val_dataset: Optional[Dataset] = None,
test_dataset: Optional[Dataset] = None,
predict_dataset: Optional[Dataset] = None,
data_source: Optional[DataSource] = None,
preprocess: Optional[Preprocess] = None,
postprocess: Optional[Postprocess] = None,
data_fetcher: Optional[BaseDataFetcher] = None,
val_split: Optional[float] = None,
batch_size: int = 4,
num_workers: Optional[int] = None,
sampler: Optional[Sampler] = None,
) -> None:
super().__init__()
if flash._IS_TESTING and torch.cuda.is_available():
batch_size = 16
self._data_source: DataSource = data_source
self._preprocess: Optional[Preprocess] = preprocess
self._postprocess: Optional[Postprocess] = postprocess
self._viz: Optional[BaseVisualization] = None
self._data_fetcher: Optional[BaseDataFetcher] = data_fetcher or self.configure_data_fetcher()
# TODO: Preprocess can change
self.data_fetcher.attach_to_preprocess(self.preprocess)
self._train_ds = train_dataset
self._val_ds = val_dataset
self._test_ds = test_dataset
self._predict_ds = predict_dataset
if self._train_ds is not None and (val_split is not None and self._val_ds is None):
self._train_ds, self._val_ds = self._split_train_val(self._train_ds, val_split)
if self._train_ds:
self.train_dataloader = self._train_dataloader
if self._val_ds:
self.val_dataloader = self._val_dataloader
if self._test_ds:
self.test_dataloader = self._test_dataloader
if self._predict_ds:
self.predict_dataloader = self._predict_dataloader
self.batch_size = batch_size
# TODO: figure out best solution for setting num_workers
if num_workers is None:
if platform.system() in ("Darwin", "Windows"):
num_workers = 0
else:
num_workers = os.cpu_count()
self.num_workers = num_workers
self.sampler = sampler
self.set_running_stages()
@property
def train_dataset(self) -> Optional[Dataset]:
"""This property returns the train dataset."""
return self._train_ds
@property
def val_dataset(self) -> Optional[Dataset]:
"""This property returns the validation dataset."""
return self._val_ds
@property
def test_dataset(self) -> Optional[Dataset]:
"""This property returns the test dataset."""
return self._test_ds
@property
def predict_dataset(self) -> Optional[Dataset]:
"""This property returns the predict dataset."""
return self._predict_ds
@property
def viz(self) -> BaseVisualization:
return self._viz or DataModule.configure_data_fetcher()
@viz.setter
def viz(self, viz: BaseVisualization) -> None:
self._viz = viz
@staticmethod
def configure_data_fetcher(*args, **kwargs) -> BaseDataFetcher:
"""This function is used to configure a :class:`~flash.core.data.callback.BaseDataFetcher`.
Override with your custom one.
"""
return BaseDataFetcher()
@property
def data_fetcher(self) -> BaseDataFetcher:
return self._data_fetcher or DataModule.configure_data_fetcher()
@data_fetcher.setter
def data_fetcher(self, data_fetcher: BaseDataFetcher) -> None:
self._data_fetcher = data_fetcher
def _reset_iterator(self, stage: str) -> Iterable[Any]:
iter_name = f"_{stage}_iter"
# num_workers has to be set to 0 to work properly
num_workers = self.num_workers
self.num_workers = 0
dataloader_fn = getattr(self, f"{stage}_dataloader")
iterator = iter(dataloader_fn())
self.num_workers = num_workers
setattr(self, iter_name, iterator)
return iterator
def _show_batch(self, stage: str, func_names: Union[str, List[str]], reset: bool = True) -> None:
"""This function is used to handle transforms profiling for batch visualization."""
# don't show in CI
if os.getenv("FLASH_TESTING", "0") == "1":
return None
iter_name = f"_{stage}_iter"
if not hasattr(self, iter_name):
self._reset_iterator(stage)
# list of functions to visualise
if isinstance(func_names, str):
func_names = [func_names]
iter_dataloader = getattr(self, iter_name)
with self.data_fetcher.enable():
if reset:
self.data_fetcher.batches[stage] = {}
try:
_ = next(iter_dataloader)
except StopIteration:
iter_dataloader = self._reset_iterator(stage)
_ = next(iter_dataloader)
data_fetcher: BaseVisualization = self.data_fetcher
data_fetcher._show(stage, func_names)
if reset:
self.data_fetcher.batches[stage] = {}
def show_train_batch(self, hooks_names: Union[str, List[str]] = "load_sample", reset: bool = True) -> None:
"""This function is used to visualize a batch from the train dataloader."""
stage_name: str = _STAGES_PREFIX[RunningStage.TRAINING]
self._show_batch(stage_name, hooks_names, reset=reset)
def show_val_batch(self, hooks_names: Union[str, List[str]] = "load_sample", reset: bool = True) -> None:
"""This function is used to visualize a batch from the validation dataloader."""
stage_name: str = _STAGES_PREFIX[RunningStage.VALIDATING]
self._show_batch(stage_name, hooks_names, reset=reset)
def show_test_batch(self, hooks_names: Union[str, List[str]] = "load_sample", reset: bool = True) -> None:
"""This function is used to visualize a batch from the test dataloader."""
stage_name: str = _STAGES_PREFIX[RunningStage.TESTING]
self._show_batch(stage_name, hooks_names, reset=reset)
def show_predict_batch(self, hooks_names: Union[str, List[str]] = "load_sample", reset: bool = True) -> None:
"""This function is used to visualize a batch from the predict dataloader."""
stage_name: str = _STAGES_PREFIX[RunningStage.PREDICTING]
self._show_batch(stage_name, hooks_names, reset=reset)
@staticmethod
def get_dataset_attribute(dataset: torch.utils.data.Dataset, attr_name: str, default: Optional[Any] = None) -> Any:
if isinstance(dataset, Subset):
return getattr(dataset.dataset, attr_name, default)
return getattr(dataset, attr_name, default)
@staticmethod
def set_dataset_attribute(dataset: torch.utils.data.Dataset, attr_name: str, value: Any) -> None:
if isinstance(dataset, Subset):
dataset = dataset.dataset
if isinstance(dataset, (Dataset, IterableDataset)):
setattr(dataset, attr_name, value)
def set_running_stages(self):
if self._train_ds:
self.set_dataset_attribute(self._train_ds, "running_stage", RunningStage.TRAINING)
if self._val_ds:
self.set_dataset_attribute(self._val_ds, "running_stage", RunningStage.VALIDATING)
if self._test_ds:
self.set_dataset_attribute(self._test_ds, "running_stage", RunningStage.TESTING)
if self._predict_ds:
self.set_dataset_attribute(self._predict_ds, "running_stage", RunningStage.PREDICTING)
def _resolve_collate_fn(self, dataset: Dataset, running_stage: RunningStage) -> Optional[Callable]:
if isinstance(dataset, (BaseAutoDataset, SplitDataset)):
return self.data_pipeline.worker_preprocessor(running_stage)
def _train_dataloader(self) -> DataLoader:
train_ds: Dataset = self._train_ds() if isinstance(self._train_ds, Callable) else self._train_ds
shuffle: bool = False
collate_fn = self._resolve_collate_fn(train_ds, RunningStage.TRAINING)
if isinstance(train_ds, IterableAutoDataset):
drop_last = False
else:
drop_last = len(train_ds) > self.batch_size
pin_memory = True
if self.sampler is None:
shuffle = not isinstance(train_ds, (IterableDataset, IterableAutoDataset))
if isinstance(getattr(self, "trainer", None), pl.Trainer):
return self.trainer.lightning_module.process_train_dataset(
train_ds,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=pin_memory,
shuffle=shuffle,
drop_last=drop_last,
collate_fn=collate_fn,
sampler=self.sampler,
)
return DataLoader(
train_ds,
batch_size=self.batch_size,
shuffle=shuffle,
sampler=self.sampler,
num_workers=self.num_workers,
pin_memory=pin_memory,
drop_last=drop_last,
collate_fn=collate_fn,
)
def _val_dataloader(self) -> DataLoader:
val_ds: Dataset = self._val_ds() if isinstance(self._val_ds, Callable) else self._val_ds
collate_fn = self._resolve_collate_fn(val_ds, RunningStage.VALIDATING)
pin_memory = True
if isinstance(getattr(self, "trainer", None), pl.Trainer):
return self.trainer.lightning_module.process_val_dataset(
val_ds,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=pin_memory,
collate_fn=collate_fn,
)
return DataLoader(
val_ds,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=pin_memory,
collate_fn=collate_fn,
)
def _test_dataloader(self) -> DataLoader:
test_ds: Dataset = self._test_ds() if isinstance(self._test_ds, Callable) else self._test_ds
collate_fn = self._resolve_collate_fn(test_ds, RunningStage.TESTING)
pin_memory = True
if isinstance(getattr(self, "trainer", None), pl.Trainer):
return self.trainer.lightning_module.process_test_dataset(
test_ds,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=pin_memory,
collate_fn=collate_fn,
)
return DataLoader(
test_ds,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=pin_memory,
collate_fn=collate_fn,
)
def _predict_dataloader(self) -> DataLoader:
predict_ds: Dataset = self._predict_ds() if isinstance(self._predict_ds, Callable) else self._predict_ds
if isinstance(predict_ds, IterableAutoDataset):
batch_size = self.batch_size
else:
batch_size = min(self.batch_size, len(predict_ds) if len(predict_ds) > 0 else 1)
collate_fn = self._resolve_collate_fn(predict_ds, RunningStage.PREDICTING)
pin_memory = True
if isinstance(getattr(self, "trainer", None), pl.Trainer):
return self.trainer.lightning_module.process_test_dataset(
predict_ds,
batch_size=batch_size,
num_workers=self.num_workers,
pin_memory=pin_memory,
collate_fn=collate_fn,
)
return DataLoader(
predict_ds, batch_size=batch_size, num_workers=self.num_workers, pin_memory=True, collate_fn=collate_fn
)
@property
def num_classes(self) -> Optional[int]:
n_cls_train = getattr(self.train_dataset, "num_classes", None)
n_cls_val = getattr(self.val_dataset, "num_classes", None)
n_cls_test = getattr(self.test_dataset, "num_classes", None)
return n_cls_train or n_cls_val or n_cls_test
@property
def multi_label(self) -> Optional[bool]:
multi_label_train = getattr(self.train_dataset, "multi_label", None)
multi_label_val = getattr(self.val_dataset, "multi_label", None)
multi_label_test = getattr(self.test_dataset, "multi_label", None)
return multi_label_train or multi_label_val or multi_label_test
@property
def data_source(self) -> Optional[DataSource]:
return self._data_source
@property
def preprocess(self) -> Preprocess:
return self._preprocess or self.preprocess_cls()
@property
def postprocess(self) -> Postprocess:
return self._postprocess or self.postprocess_cls()
@property
def data_pipeline(self) -> DataPipeline:
return DataPipeline(self.data_source, self.preprocess, self.postprocess)
def available_data_sources(self) -> Sequence[str]:
"""Get the list of available data source names for use with this
:class:`~flash.core.data.data_module.DataModule`.
Returns:
The list of data source names.
"""
return self.preprocess.available_data_sources()
@staticmethod
def _split_train_val(
train_dataset: Dataset,
val_split: float,
) -> Tuple[Any, Any]:
if not isinstance(val_split, float) or (isinstance(val_split, float) and val_split > 1 or val_split < 0):
raise MisconfigurationException(f"`val_split` should be a float between 0 and 1. Found {val_split}.")
if isinstance(train_dataset, IterableAutoDataset):
raise MisconfigurationException(
"`val_split` should be `None` when the dataset is built with an IterableDataset."
)
val_num_samples = int(len(train_dataset) * val_split)
indices = list(range(len(train_dataset)))
np.random.shuffle(indices)
val_indices = indices[:val_num_samples]
train_indices = indices[val_num_samples:]
return (
SplitDataset(train_dataset, train_indices, use_duplicated_indices=True),
SplitDataset(train_dataset, val_indices, use_duplicated_indices=True),
)
@classmethod
def from_data_source(
cls,
data_source: str,
train_data: Any = None,
val_data: Any = None,
test_data: Any = None,
predict_data: Any = None,
train_transform: Optional[Dict[str, Callable]] = None,
val_transform: Optional[Dict[str, Callable]] = None,
test_transform: Optional[Dict[str, Callable]] = None,
predict_transform: Optional[Dict[str, Callable]] = None,
data_fetcher: Optional[BaseDataFetcher] = None,
preprocess: Optional[Preprocess] = None,
val_split: Optional[float] = None,
batch_size: int = 4,
num_workers: Optional[int] = None,
sampler: Optional[Sampler] = None,
**preprocess_kwargs: Any,
) -> "DataModule":
"""Creates a :class:`~flash.core.data.data_module.DataModule` object from the given inputs to
:meth:`~flash.core.data.data_source.DataSource.load_data` (``train_data``, ``val_data``, ``test_data``,
``predict_data``). The data source will be resolved from the instantiated
:class:`~flash.core.data.process.Preprocess`
using :meth:`~flash.core.data.process.Preprocess.data_source_of_name`.
Args:
data_source: The name of the data source to use for the
:meth:`~flash.core.data.data_source.DataSource.load_data`.
train_data: The input to :meth:`~flash.core.data.data_source.DataSource.load_data` to use when creating
the train dataset.
val_data: The input to :meth:`~flash.core.data.data_source.DataSource.load_data` to use when creating
the validation dataset.
test_data: The input to :meth:`~flash.core.data.data_source.DataSource.load_data` to use when creating
the test dataset.
predict_data: The input to :meth:`~flash.core.data.data_source.DataSource.load_data` to use when creating
the predict dataset.
train_transform: The dictionary of transforms to use during training which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
val_transform: The dictionary of transforms to use during validation which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
test_transform: The dictionary of transforms to use during testing which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
predict_transform: The dictionary of transforms to use during predicting which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
data_fetcher: The :class:`~flash.core.data.callback.BaseDataFetcher` to pass to the
:class:`~flash.core.data.data_module.DataModule`.
preprocess: The :class:`~flash.core.data.data.Preprocess` to pass to the
:class:`~flash.core.data.data_module.DataModule`. If ``None``, ``cls.preprocess_cls`` will be
constructed and used.
val_split: The ``val_split`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
batch_size: The ``batch_size`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
num_workers: The ``num_workers`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
sampler: The ``sampler`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
preprocess_kwargs: Additional keyword arguments to use when constructing the preprocess. Will only be used
if ``preprocess = None``.
Returns:
The constructed data module.
Examples::
data_module = DataModule.from_data_source(
DefaultDataSources.FOLDERS,
train_data="train_folder",
train_transform={
"to_tensor_transform": torch.as_tensor,
},
)
"""
preprocess = preprocess or cls.preprocess_cls(
train_transform,
val_transform,
test_transform,
predict_transform,
**preprocess_kwargs,
)
data_source = preprocess.data_source_of_name(data_source)
train_dataset, val_dataset, test_dataset, predict_dataset = data_source.to_datasets(
train_data,
val_data,
test_data,
predict_data,
)
return cls(
train_dataset,
val_dataset,
test_dataset,
predict_dataset,
data_source=data_source,
preprocess=preprocess,
data_fetcher=data_fetcher,
val_split=val_split,
batch_size=batch_size,
num_workers=num_workers,
sampler=sampler,
)
@classmethod
def from_folders(
cls,
train_folder: Optional[str] = None,
val_folder: Optional[str] = None,
test_folder: Optional[str] = None,
predict_folder: Optional[str] = None,
train_transform: Optional[Dict[str, Callable]] = None,
val_transform: Optional[Dict[str, Callable]] = None,
test_transform: Optional[Dict[str, Callable]] = None,
predict_transform: Optional[Dict[str, Callable]] = None,
data_fetcher: Optional[BaseDataFetcher] = None,
preprocess: Optional[Preprocess] = None,
val_split: Optional[float] = None,
batch_size: int = 4,
num_workers: Optional[int] = None,
sampler: Optional[Sampler] = None,
**preprocess_kwargs: Any,
) -> "DataModule":
"""Creates a :class:`~flash.core.data.data_module.DataModule` object from the given folders using the
:class:`~flash.core.data.data_source.DataSource` of name
:attr:`~flash.core.data.data_source.DefaultDataSources.FOLDERS`
from the passed or constructed :class:`~flash.core.data.process.Preprocess`.
Args:
train_folder: The folder containing the train data.
val_folder: The folder containing the validation data.
test_folder: The folder containing the test data.
predict_folder: The folder containing the predict data.
train_transform: The dictionary of transforms to use during training which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
val_transform: The dictionary of transforms to use during validation which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
test_transform: The dictionary of transforms to use during testing which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
predict_transform: The dictionary of transforms to use during predicting which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
data_fetcher: The :class:`~flash.core.data.callback.BaseDataFetcher` to pass to the
:class:`~flash.core.data.data_module.DataModule`.
preprocess: The :class:`~flash.core.data.data.Preprocess` to pass to the
:class:`~flash.core.data.data_module.DataModule`. If ``None``, ``cls.preprocess_cls``
will be constructed and used.
val_split: The ``val_split`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
batch_size: The ``batch_size`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
num_workers: The ``num_workers`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
sampler: The ``sampler`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
preprocess_kwargs: Additional keyword arguments to use when constructing the preprocess. Will only be used
if ``preprocess = None``.
Returns:
The constructed data module.
Examples::
data_module = DataModule.from_folders(
train_folder="train_folder",
train_transform={
"to_tensor_transform": torch.as_tensor,
},
)
"""
return cls.from_data_source(
DefaultDataSources.FOLDERS,
train_folder,
val_folder,
test_folder,
predict_folder,
train_transform=train_transform,
val_transform=val_transform,
test_transform=test_transform,
predict_transform=predict_transform,
data_fetcher=data_fetcher,
preprocess=preprocess,
val_split=val_split,
batch_size=batch_size,
num_workers=num_workers,
sampler=sampler,
**preprocess_kwargs,
)
@classmethod
def from_files(
cls,
train_files: Optional[Sequence[str]] = None,
train_targets: Optional[Sequence[Any]] = None,
val_files: Optional[Sequence[str]] = None,
val_targets: Optional[Sequence[Any]] = None,
test_files: Optional[Sequence[str]] = None,
test_targets: Optional[Sequence[Any]] = None,
predict_files: Optional[Sequence[str]] = None,
train_transform: Optional[Dict[str, Callable]] = None,
val_transform: Optional[Dict[str, Callable]] = None,
test_transform: Optional[Dict[str, Callable]] = None,
predict_transform: Optional[Dict[str, Callable]] = None,
data_fetcher: Optional[BaseDataFetcher] = None,
preprocess: Optional[Preprocess] = None,
val_split: Optional[float] = None,
batch_size: int = 4,
num_workers: Optional[int] = None,
sampler: Optional[Sampler] = None,
**preprocess_kwargs: Any,
) -> "DataModule":
"""Creates a :class:`~flash.core.data.data_module.DataModule` object from the given sequences of files
using the :class:`~flash.core.data.data_source.DataSource` of name
:attr:`~flash.core.data.data_source.DefaultDataSources.FILES` from the passed or constructed
:class:`~flash.core.data.process.Preprocess`.
Args:
train_files: A sequence of files to use as the train inputs.
train_targets: A sequence of targets (one per train file) to use as the train targets.
val_files: A sequence of files to use as the validation inputs.
val_targets: A sequence of targets (one per validation file) to use as the validation targets.
test_files: A sequence of files to use as the test inputs.
test_targets: A sequence of targets (one per test file) to use as the test targets.
predict_files: A sequence of files to use when predicting.
train_transform: The dictionary of transforms to use during training which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
val_transform: The dictionary of transforms to use during validation which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
test_transform: The dictionary of transforms to use during testing which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
predict_transform: The dictionary of transforms to use during predicting which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
data_fetcher: The :class:`~flash.core.data.callback.BaseDataFetcher` to pass to the
:class:`~flash.core.data.data_module.DataModule`.
preprocess: The :class:`~flash.core.data.data.Preprocess` to pass to the
:class:`~flash.core.data.data_module.DataModule`. If ``None``, ``cls.preprocess_cls``
will be constructed and used.
val_split: The ``val_split`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
batch_size: The ``batch_size`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
num_workers: The ``num_workers`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
sampler: The ``sampler`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
preprocess_kwargs: Additional keyword arguments to use when constructing the preprocess. Will only be used
if ``preprocess = None``.
Returns:
The constructed data module.
Examples::
data_module = DataModule.from_files(
train_files=["image_1.png", "image_2.png", "image_3.png"],
train_targets=[1, 0, 1],
train_transform={
"to_tensor_transform": torch.as_tensor,
},
)
"""
return cls.from_data_source(
DefaultDataSources.FILES,
(train_files, train_targets),
(val_files, val_targets),
(test_files, test_targets),
predict_files,
train_transform=train_transform,
val_transform=val_transform,
test_transform=test_transform,
predict_transform=predict_transform,
data_fetcher=data_fetcher,
preprocess=preprocess,
val_split=val_split,
batch_size=batch_size,
num_workers=num_workers,
sampler=sampler,
**preprocess_kwargs,
)
@classmethod
def from_tensors(
cls,
train_data: Optional[Collection[torch.Tensor]] = None,
train_targets: Optional[Collection[Any]] = None,
val_data: Optional[Collection[torch.Tensor]] = None,
val_targets: Optional[Sequence[Any]] = None,
test_data: Optional[Collection[torch.Tensor]] = None,
test_targets: Optional[Sequence[Any]] = None,
predict_data: Optional[Collection[torch.Tensor]] = None,
train_transform: Optional[Dict[str, Callable]] = None,
val_transform: Optional[Dict[str, Callable]] = None,
test_transform: Optional[Dict[str, Callable]] = None,
predict_transform: Optional[Dict[str, Callable]] = None,
data_fetcher: Optional[BaseDataFetcher] = None,
preprocess: Optional[Preprocess] = None,
val_split: Optional[float] = None,
batch_size: int = 4,
num_workers: Optional[int] = None,
sampler: Optional[Sampler] = None,
**preprocess_kwargs: Any,
) -> "DataModule":
"""Creates a :class:`~flash.core.data.data_module.DataModule` object from the given tensors using the
:class:`~flash.core.data.data_source.DataSource`
of name :attr:`~flash.core.data.data_source.DefaultDataSources.TENSOR`
from the passed or constructed :class:`~flash.core.data.process.Preprocess`.
Args:
train_data: A tensor or collection of tensors to use as the train inputs.
train_targets: A sequence of targets (one per train input) to use as the train targets.
val_data: A tensor or collection of tensors to use as the validation inputs.
val_targets: A sequence of targets (one per validation input) to use as the validation targets.
test_data: A tensor or collection of tensors to use as the test inputs.
test_targets: A sequence of targets (one per test input) to use as the test targets.
predict_data: A tensor or collection of tensors to use when predicting.
train_transform: The dictionary of transforms to use during training which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
val_transform: The dictionary of transforms to use during validation which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
test_transform: The dictionary of transforms to use during testing which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
predict_transform: The dictionary of transforms to use during predicting which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
data_fetcher: The :class:`~flash.core.data.callback.BaseDataFetcher` to pass to the
:class:`~flash.core.data.data_module.DataModule`.
preprocess: The :class:`~flash.core.data.data.Preprocess` to pass to the
:class:`~flash.core.data.data_module.DataModule`. If ``None``, ``cls.preprocess_cls``
will be constructed and used.
val_split: The ``val_split`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
batch_size: The ``batch_size`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
num_workers: The ``num_workers`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
sampler: The ``sampler`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
preprocess_kwargs: Additional keyword arguments to use when constructing the preprocess. Will only be used
if ``preprocess = None``.
Returns:
The constructed data module.
Examples::
data_module = DataModule.from_tensors(
train_files=torch.rand(3, 128),
train_targets=[1, 0, 1],
train_transform={
"to_tensor_transform": torch.as_tensor,
},
)
"""
return cls.from_data_source(
DefaultDataSources.TENSORS,
(train_data, train_targets),
(val_data, val_targets),
(test_data, test_targets),
predict_data,
train_transform=train_transform,
val_transform=val_transform,
test_transform=test_transform,
predict_transform=predict_transform,
data_fetcher=data_fetcher,
preprocess=preprocess,
val_split=val_split,
batch_size=batch_size,
num_workers=num_workers,
sampler=sampler,
**preprocess_kwargs,
)
@classmethod
def from_numpy(
cls,
train_data: Optional[Collection[np.ndarray]] = None,
train_targets: Optional[Collection[Any]] = None,
val_data: Optional[Collection[np.ndarray]] = None,
val_targets: Optional[Sequence[Any]] = None,
test_data: Optional[Collection[np.ndarray]] = None,
test_targets: Optional[Sequence[Any]] = None,
predict_data: Optional[Collection[np.ndarray]] = None,
train_transform: Optional[Dict[str, Callable]] = None,
val_transform: Optional[Dict[str, Callable]] = None,
test_transform: Optional[Dict[str, Callable]] = None,
predict_transform: Optional[Dict[str, Callable]] = None,
data_fetcher: Optional[BaseDataFetcher] = None,
preprocess: Optional[Preprocess] = None,
val_split: Optional[float] = None,
batch_size: int = 4,
num_workers: Optional[int] = None,
sampler: Optional[Sampler] = None,
**preprocess_kwargs: Any,
) -> "DataModule":
"""Creates a :class:`~flash.core.data.data_module.DataModule` object from the given numpy array using the
:class:`~flash.core.data.data_source.DataSource`
of name :attr:`~flash.core.data.data_source.DefaultDataSources.NUMPY`
from the passed or constructed :class:`~flash.core.data.process.Preprocess`.
Args:
train_data: A numpy array to use as the train inputs.
train_targets: A sequence of targets (one per train input) to use as the train targets.
val_data: A numpy array to use as the validation inputs.
val_targets: A sequence of targets (one per validation input) to use as the validation targets.
test_data: A numpy array to use as the test inputs.
test_targets: A sequence of targets (one per test input) to use as the test targets.
predict_data: A numpy array to use when predicting.
train_transform: The dictionary of transforms to use during training which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
val_transform: The dictionary of transforms to use during validation which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
test_transform: The dictionary of transforms to use during testing which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
predict_transform: The dictionary of transforms to use during predicting which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
data_fetcher: The :class:`~flash.core.data.callback.BaseDataFetcher` to pass to the
:class:`~flash.core.data.data_module.DataModule`.
preprocess: The :class:`~flash.core.data.data.Preprocess` to pass to the
:class:`~flash.core.data.data_module.DataModule`. If ``None``, ``cls.preprocess_cls``
will be constructed and used.
val_split: The ``val_split`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
batch_size: The ``batch_size`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
num_workers: The ``num_workers`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
sampler: The ``sampler`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
preprocess_kwargs: Additional keyword arguments to use when constructing the preprocess. Will only be used
if ``preprocess = None``.
Returns:
The constructed data module.
Examples::
data_module = DataModule.from_numpy(
train_files=np.random.rand(3, 128),
train_targets=[1, 0, 1],
train_transform={
"to_tensor_transform": torch.as_tensor,
},
)
"""
return cls.from_data_source(
DefaultDataSources.NUMPY,
(train_data, train_targets),
(val_data, val_targets),
(test_data, test_targets),
predict_data,
train_transform=train_transform,
val_transform=val_transform,
test_transform=test_transform,
predict_transform=predict_transform,
data_fetcher=data_fetcher,
preprocess=preprocess,
val_split=val_split,
batch_size=batch_size,
num_workers=num_workers,
sampler=sampler,
**preprocess_kwargs,
)
@classmethod
def from_json(
cls,
input_fields: Union[str, Sequence[str]],
target_fields: Optional[Union[str, Sequence[str]]] = None,
train_file: Optional[str] = None,
val_file: Optional[str] = None,
test_file: Optional[str] = None,
predict_file: Optional[str] = None,
train_transform: Optional[Dict[str, Callable]] = None,
val_transform: Optional[Dict[str, Callable]] = None,
test_transform: Optional[Dict[str, Callable]] = None,
predict_transform: Optional[Dict[str, Callable]] = None,
data_fetcher: Optional[BaseDataFetcher] = None,
preprocess: Optional[Preprocess] = None,
val_split: Optional[float] = None,
batch_size: int = 4,
num_workers: Optional[int] = None,
sampler: Optional[Sampler] = None,
field: Optional[str] = None,
**preprocess_kwargs: Any,
) -> "DataModule":
"""Creates a :class:`~flash.core.data.data_module.DataModule` object from the given JSON files using the
:class:`~flash.core.data.data_source.DataSource`
of name :attr:`~flash.core.data.data_source.DefaultDataSources.JSON`
from the passed or constructed :class:`~flash.core.data.process.Preprocess`.
Args:
input_fields: The field or fields in the JSON objects to use for the input.
target_fields: The field or fields in the JSON objects to use for the target.
train_file: The JSON file containing the training data.
val_file: The JSON file containing the validation data.
test_file: The JSON file containing the testing data.
predict_file: The JSON file containing the data to use when predicting.
train_transform: The dictionary of transforms to use during training which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
val_transform: The dictionary of transforms to use during validation which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
test_transform: The dictionary of transforms to use during testing which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
predict_transform: The dictionary of transforms to use during predicting which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
data_fetcher: The :class:`~flash.core.data.callback.BaseDataFetcher` to pass to the
:class:`~flash.core.data.data_module.DataModule`.
preprocess: The :class:`~flash.core.data.data.Preprocess` to pass to the
:class:`~flash.core.data.data_module.DataModule`. If ``None``, ``cls.preprocess_cls``
will be constructed and used.
val_split: The ``val_split`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
batch_size: The ``batch_size`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
num_workers: The ``num_workers`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
sampler: The ``sampler`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
field: To specify the field that holds the data in the JSON file.
preprocess_kwargs: Additional keyword arguments to use when constructing the preprocess. Will only be used
if ``preprocess = None``.
Returns:
The constructed data module.
Examples::
data_module = DataModule.from_json(
"input",
"target",
train_file="train_data.json",
train_transform={
"to_tensor_transform": torch.as_tensor,
},
)
# In the case where the data is of the form:
# {
# "version": 0.0.x,
# "data": [
# {
# "input_field" : "input_data",
# "target_field" : "target_output"
# },
# ...
# ]
# }
data_module = DataModule.from_json(
"input",
"target",
train_file="train_data.json",
train_transform={
"to_tensor_transform": torch.as_tensor,
},
feild="data"
)
"""
return cls.from_data_source(
DefaultDataSources.JSON,
(train_file, input_fields, target_fields, field),
(val_file, input_fields, target_fields, field),
(test_file, input_fields, target_fields, field),
(predict_file, input_fields, target_fields, field),
train_transform=train_transform,
val_transform=val_transform,
test_transform=test_transform,
predict_transform=predict_transform,
data_fetcher=data_fetcher,
preprocess=preprocess,
val_split=val_split,
batch_size=batch_size,
num_workers=num_workers,
sampler=sampler,
**preprocess_kwargs,
)
@classmethod
def from_csv(
cls,
input_fields: Union[str, Sequence[str]],
target_fields: Optional[Union[str, Sequence[str]]] = None,
train_file: Optional[str] = None,
val_file: Optional[str] = None,
test_file: Optional[str] = None,
predict_file: Optional[str] = None,
train_transform: Optional[Dict[str, Callable]] = None,
val_transform: Optional[Dict[str, Callable]] = None,
test_transform: Optional[Dict[str, Callable]] = None,
predict_transform: Optional[Dict[str, Callable]] = None,
data_fetcher: Optional[BaseDataFetcher] = None,
preprocess: Optional[Preprocess] = None,
val_split: Optional[float] = None,
batch_size: int = 4,
num_workers: Optional[int] = None,
sampler: Optional[Sampler] = None,
**preprocess_kwargs: Any,
) -> "DataModule":
"""Creates a :class:`~flash.core.data.data_module.DataModule` object from the given CSV files using the
:class:`~flash.core.data.data_source.DataSource`
of name :attr:`~flash.core.data.data_source.DefaultDataSources.CSV`
from the passed or constructed :class:`~flash.core.data.process.Preprocess`.
Args:
input_fields: The field or fields (columns) in the CSV file to use for the input.
target_fields: The field or fields (columns) in the CSV file to use for the target.
train_file: The CSV file containing the training data.
val_file: The CSV file containing the validation data.
test_file: The CSV file containing the testing data.
predict_file: The CSV file containing the data to use when predicting.
train_transform: The dictionary of transforms to use during training which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
val_transform: The dictionary of transforms to use during validation which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
test_transform: The dictionary of transforms to use during testing which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
predict_transform: The dictionary of transforms to use during predicting which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
data_fetcher: The :class:`~flash.core.data.callback.BaseDataFetcher` to pass to the
:class:`~flash.core.data.data_module.DataModule`.
preprocess: The :class:`~flash.core.data.data.Preprocess` to pass to the
:class:`~flash.core.data.data_module.DataModule`. If ``None``, ``cls.preprocess_cls``
will be constructed and used.
val_split: The ``val_split`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
batch_size: The ``batch_size`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
num_workers: The ``num_workers`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
sampler: The ``sampler`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
preprocess_kwargs: Additional keyword arguments to use when constructing the preprocess. Will only be used
if ``preprocess = None``.
Returns:
The constructed data module.
Examples::
data_module = DataModule.from_csv(
"input",
"target",
train_file="train_data.csv",
train_transform={
"to_tensor_transform": torch.as_tensor,
},
)
"""
return cls.from_data_source(
DefaultDataSources.CSV,
(train_file, input_fields, target_fields),
(val_file, input_fields, target_fields),
(test_file, input_fields, target_fields),
(predict_file, input_fields, target_fields),
train_transform=train_transform,
val_transform=val_transform,
test_transform=test_transform,
predict_transform=predict_transform,
data_fetcher=data_fetcher,
preprocess=preprocess,
val_split=val_split,
batch_size=batch_size,
num_workers=num_workers,
sampler=sampler,
**preprocess_kwargs,
)
@classmethod
def from_datasets(
cls,
train_dataset: Optional[Dataset] = None,
val_dataset: Optional[Dataset] = None,
test_dataset: Optional[Dataset] = None,
predict_dataset: Optional[Dataset] = None,
train_transform: Optional[Dict[str, Callable]] = None,
val_transform: Optional[Dict[str, Callable]] = None,
test_transform: Optional[Dict[str, Callable]] = None,
predict_transform: Optional[Dict[str, Callable]] = None,
data_fetcher: Optional[BaseDataFetcher] = None,
preprocess: Optional[Preprocess] = None,
val_split: Optional[float] = None,
batch_size: int = 4,
num_workers: Optional[int] = None,
sampler: Optional[Sampler] = None,
**preprocess_kwargs: Any,
) -> "DataModule":
"""Creates a :class:`~flash.core.data.data_module.DataModule` object from the given datasets using the
:class:`~flash.core.data.data_source.DataSource`
of name :attr:`~flash.core.data.data_source.DefaultDataSources.DATASETS`
from the passed or constructed :class:`~flash.core.data.process.Preprocess`.
Args:
train_dataset: Dataset used during training.
val_dataset: Dataset used during validating.
test_dataset: Dataset used during testing.
predict_dataset: Dataset used during predicting.
train_transform: The dictionary of transforms to use during training which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
val_transform: The dictionary of transforms to use during validation which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
test_transform: The dictionary of transforms to use during testing which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
predict_transform: The dictionary of transforms to use during predicting which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
data_fetcher: The :class:`~flash.core.data.callback.BaseDataFetcher` to pass to the
:class:`~flash.core.data.data_module.DataModule`.
preprocess: The :class:`~flash.core.data.data.Preprocess` to pass to the
:class:`~flash.core.data.data_module.DataModule`. If ``None``, ``cls.preprocess_cls``
will be constructed and used.
val_split: The ``val_split`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
batch_size: The ``batch_size`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
num_workers: The ``num_workers`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
sampler: The ``sampler`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
preprocess_kwargs: Additional keyword arguments to use when constructing the preprocess. Will only be used
if ``preprocess = None``.
Returns:
The constructed data module.
Examples::
data_module = DataModule.from_datasets(
train_dataset=train_dataset,
train_transform={
"to_tensor_transform": torch.as_tensor,
},
)
"""
return cls.from_data_source(
DefaultDataSources.DATASETS,
train_dataset,
val_dataset,
test_dataset,
predict_dataset,
train_transform=train_transform,
val_transform=val_transform,
test_transform=test_transform,
predict_transform=predict_transform,
data_fetcher=data_fetcher,
preprocess=preprocess,
val_split=val_split,
batch_size=batch_size,
num_workers=num_workers,
sampler=sampler,
**preprocess_kwargs,
)
@classmethod
@requires("fiftyone")
def from_fiftyone(
cls,
train_dataset: Optional[SampleCollection] = None,
val_dataset: Optional[SampleCollection] = None,
test_dataset: Optional[SampleCollection] = None,
predict_dataset: Optional[SampleCollection] = None,
train_transform: Optional[Dict[str, Callable]] = None,
val_transform: Optional[Dict[str, Callable]] = None,
test_transform: Optional[Dict[str, Callable]] = None,
predict_transform: Optional[Dict[str, Callable]] = None,
data_fetcher: Optional[BaseDataFetcher] = None,
preprocess: Optional[Preprocess] = None,
val_split: Optional[float] = None,
batch_size: int = 4,
num_workers: Optional[int] = None,
**preprocess_kwargs: Any,
) -> "DataModule":
"""Creates a :class:`~flash.core.data.data_module.DataModule` object
from the given FiftyOne Datasets using the
:class:`~flash.core.data.data_source.DataSource` of name
:attr:`~flash.core.data.data_source.DefaultDataSources.FIFTYONE`
from the passed or constructed :class:`~flash.core.data.process.Preprocess`.
Args:
train_dataset: The ``fiftyone.core.collections.SampleCollection`` containing the train data.
val_dataset: The ``fiftyone.core.collections.SampleCollection`` containing the validation data.
test_dataset: The ``fiftyone.core.collections.SampleCollection`` containing the test data.
predict_dataset: The ``fiftyone.core.collections.SampleCollection`` containing the predict data.
train_transform: The dictionary of transforms to use during training which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
val_transform: The dictionary of transforms to use during validation which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
test_transform: The dictionary of transforms to use during testing which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
predict_transform: The dictionary of transforms to use during predicting which maps
:class:`~flash.core.data.process.Preprocess` hook names to callable transforms.
data_fetcher: The :class:`~flash.core.data.callback.BaseDataFetcher` to pass to the
:class:`~flash.core.data.data_module.DataModule`.
preprocess: The :class:`~flash.core.data.data.Preprocess` to pass to the
:class:`~flash.core.data.data_module.DataModule`. If ``None``, ``cls.preprocess_cls``
will be constructed and used.
val_split: The ``val_split`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
batch_size: The ``batch_size`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
num_workers: The ``num_workers`` argument to pass to the :class:`~flash.core.data.data_module.DataModule`.
preprocess_kwargs: Additional keyword arguments to use when constructing the preprocess. Will only be used
if ``preprocess = None``.
Returns:
The constructed data module.
Examples::
train_dataset = fo.Dataset.from_dir(
"/path/to/dataset",
dataset_type=fo.types.ImageClassificationDirectoryTree,
)
data_module = DataModule.from_fiftyone(
train_data = train_dataset,
train_transform={
"to_tensor_transform": torch.as_tensor,
},
)
"""
return cls.from_data_source(
DefaultDataSources.FIFTYONE,
train_dataset,
val_dataset,
test_dataset,
predict_dataset,
train_transform=train_transform,
val_transform=val_transform,
test_transform=test_transform,
predict_transform=predict_transform,
data_fetcher=data_fetcher,
preprocess=preprocess,
val_split=val_split,
batch_size=batch_size,
num_workers=num_workers,
**preprocess_kwargs,
)
| []
| []
| [
"FLASH_TESTING"
]
| [] | ["FLASH_TESTING"] | python | 1 | 0 | |
middleware/logger.go | package middleware
import (
"bytes"
"fmt"
"io"
"os"
"strconv"
"strings"
"sync/atomic"
"time"
fiber "github.com/gofiber/fiber"
utils "github.com/gofiber/utils"
colorable "github.com/mattn/go-colorable"
isatty "github.com/mattn/go-isatty"
bytebufferpool "github.com/valyala/bytebufferpool"
)
// Middleware types
type (
// LoggerConfig defines the config for Logger middleware.
LoggerConfig struct {
// Next defines a function to skip this middleware if returned true.
Next func(*fiber.Ctx) bool
// Format defines the logging tags
//
// - pid
// - time
// - ip
// - ips
// - url
// - host
// - method
// - methodColored
// - path
// - protocol
// - route
// - referer
// - ua
// - latency
// - status
// - statusColored
// - body
// - error
// - bytesSent
// - bytesReceived
// - header:<key>
// - query:<key>
// - form:<key>
// - cookie:<key>
//
// Optional. Default: ${time} - ${ip} - ${status} - ${latency} - ${method} ${path}\n
Format string
// TimeFormat https://programming.guide/go/format-parse-string-time-date-example.html
//
// Optional. Default: 15:04:05
TimeFormat string
// TimeZone can be specified, such as "UTC" and "America/New_York" and "Asia/Chongqing", etc
//
// Optional. Default: Local
TimeZone string
// Output is a writter where logs are written
//
// Default: os.Stderr
Output io.Writer
// Colors are only supported if no custom Output is given
enableColors bool
// timeZoneLocation holds the compiled timezone
timeZoneLocation *time.Location
}
)
// Logger variables
const (
LoggerTagPid = "pid"
LoggerTagTime = "time"
LoggerTagReferer = "referer"
LoggerTagProtocol = "protocol"
LoggerTagIP = "ip"
LoggerTagIPs = "ips"
LoggerTagHost = "host"
LoggerTagMethod = "method"
LoggerTagPath = "path"
LoggerTagURL = "url"
LoggerTagUA = "ua"
LoggerTagLatency = "latency"
LoggerTagStatus = "status"
LoggerTagBody = "body"
LoggerTagBytesSent = "bytesSent"
LoggerTagBytesReceived = "bytesReceived"
LoggerTagRoute = "route"
LoggerTagError = "error"
LoggerTagHeader = "header:"
LoggerTagQuery = "query:"
LoggerTagForm = "form:"
LoggerTagCookie = "cookie:"
LoggerTagColorBlack = "black"
LoggerTagColorRed = "red"
LoggerTagColorGreen = "green"
LoggerTagColorYellow = "yellow"
LoggerTagColorBlue = "blue"
LoggerTagColorMagenta = "magenta"
LoggerTagColorCyan = "cyan"
LoggerTagColorWhite = "white"
LoggerTagColorReset = "resetColor"
// LoggerTagStatusColor = "statusColor"
// LoggerTagMethodColor = "methodColor"
)
// NEW : Color variables
const (
cBlack = "\u001b[90m"
cRed = "\u001b[91m"
cGreen = "\u001b[92m"
cYellow = "\u001b[93m"
cBlue = "\u001b[94m"
cMagenta = "\u001b[95m"
cCyan = "\u001b[96m"
cWhite = "\u001b[97m"
cReset = "\u001b[0m"
)
// for colorizing response status and request method
var (
statusColor string
responseStatus int
methodColor string
requestMethod string
)
// LoggerConfigDefault is the default config
var LoggerConfigDefault = LoggerConfig{
Next: nil,
Format: "#${pid} - ${time} ${status} - ${latency} ${method} ${path}\n",
TimeFormat: "2006/01/02 15:04:05",
TimeZone: "Local",
Output: os.Stderr,
}
/*
Logger allows the following config arguments in any order:
- Logger()
- Logger(next func(*fiber.Ctx) bool)
- Logger(output io.Writer)
- Logger(format string)
- Logger(timeZone string)
- Logger(timeFormat string)
- Logger(config LoggerConfig)
*/
func Logger(options ...interface{}) fiber.Handler {
// Create default config
var config = LoggerConfig{}
// Assert options if provided to adjust the config
if len(options) > 0 {
for i := range options {
switch opt := options[i].(type) {
case func(*fiber.Ctx) bool:
config.Next = opt
case string:
if strings.Contains(opt, "${") {
config.Format = opt
} else if tzl := getTimeZoneLocation(opt); tzl != nil {
config.TimeZone = opt
config.timeZoneLocation = tzl
} else {
config.TimeFormat = opt
}
case io.Writer:
config.Output = opt
case LoggerConfig:
config = opt
default:
panic("Logger: the following option types are allowed: string, io.Writer, LoggerConfig")
}
}
}
// Return logger
return logger(config)
}
func logger(config LoggerConfig) fiber.Handler {
// Set config default values
if config.Format == "" {
config.Format = LoggerConfigDefault.Format
}
if config.TimeZone == "" {
config.TimeZone = LoggerConfigDefault.TimeZone
}
if config.TimeFormat == "" {
config.TimeFormat = LoggerConfigDefault.TimeFormat
}
if config.Output == nil {
// Check if colors should be disabled
if os.Getenv("TERM") == "dumb" ||
(!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd())) {
config.Output = LoggerConfigDefault.Output
} else {
config.enableColors = true
config.Output = colorable.NewColorableStderr()
}
}
var tmpl loggerTemplate
tmpl.new(config.Format, "${", "}")
var timestamp atomic.Value
timestamp.Store(nowTimeString(config.timeZoneLocation, config.TimeFormat))
// Update date/time every millisecond in a separate go routine
if strings.Contains(config.Format, "${time}") {
go func() {
for {
time.Sleep(time.Millisecond)
timestamp.Store(nowTimeString(config.timeZoneLocation, config.TimeFormat))
}
}()
}
pid := fmt.Sprintf("%-5s", strconv.Itoa(os.Getpid()))
// Return handler
return func(c *fiber.Ctx) {
// Don't execute the middleware if Next returns true
if config.Next != nil && config.Next(c) {
c.Next()
return
}
// Middleware logic...
start := time.Now()
// handle request
c.Next()
// build log
stop := time.Now()
// Get new buffer
buf := bytebufferpool.Get()
_, err := tmpl.executeFunc(buf, func(w io.Writer, tag string) (int, error) {
switch tag {
case LoggerTagTime:
return buf.WriteString(timestamp.Load().(string))
case LoggerTagReferer:
return buf.WriteString(c.Get(fiber.HeaderReferer))
case LoggerTagProtocol:
return buf.WriteString(c.Protocol())
case LoggerTagPid:
return buf.WriteString(pid)
case LoggerTagIP:
return buf.WriteString(c.IP())
case LoggerTagIPs:
return buf.WriteString(c.Get(fiber.HeaderXForwardedFor))
case LoggerTagHost:
return buf.WriteString(c.Hostname())
case LoggerTagPath:
return buf.WriteString(c.Path())
case LoggerTagURL:
return buf.WriteString(c.OriginalURL())
case LoggerTagUA:
return buf.WriteString(c.Get(fiber.HeaderUserAgent))
case LoggerTagLatency:
return buf.WriteString(fmt.Sprintf("%-6s", stop.Sub(start).Round(1*time.Millisecond)))
// return buf.WriteString(stop.Sub(start).String())
case LoggerTagBody:
return buf.WriteString(c.Body())
case LoggerTagBytesReceived:
return buf.WriteString(strconv.Itoa(len(c.Fasthttp.Request.Body())))
case LoggerTagBytesSent:
return buf.WriteString(strconv.Itoa(len(c.Fasthttp.Response.Body())))
case LoggerTagRoute:
return buf.WriteString(c.Route().Path)
case LoggerTagError:
if c.Error() != nil {
return buf.WriteString(c.Error().Error())
}
case LoggerTagColorBlack:
return buf.WriteString(cBlack)
case LoggerTagColorRed:
return buf.WriteString(cRed)
case LoggerTagColorGreen:
return buf.WriteString(cGreen)
case LoggerTagColorYellow:
return buf.WriteString(cYellow)
case LoggerTagColorBlue:
return buf.WriteString(cBlue)
case LoggerTagColorMagenta:
return buf.WriteString(cMagenta)
case LoggerTagColorCyan:
return buf.WriteString(cCyan)
case LoggerTagColorWhite:
return buf.WriteString(cWhite)
case LoggerTagColorReset:
return buf.WriteString(cReset)
case LoggerTagStatus:
responseStatus = c.Fasthttp.Response.StatusCode()
if !config.enableColors {
return buf.WriteString(strconv.Itoa(responseStatus))
}
switch {
case responseStatus >= 200 && responseStatus < 300:
statusColor = cGreen
case responseStatus >= 300 && responseStatus < 400:
statusColor = cBlue
case responseStatus >= 400 && responseStatus < 500:
statusColor = cYellow
default:
statusColor = cRed
}
return buf.WriteString(statusColor + strconv.Itoa(responseStatus) + cReset)
case LoggerTagMethod:
requestMethod = c.Method()
if !config.enableColors {
return buf.WriteString(requestMethod)
}
switch requestMethod {
case fiber.MethodGet:
methodColor = cGreen
case fiber.MethodPost:
methodColor = cCyan
case fiber.MethodPut:
methodColor = cYellow
case fiber.MethodDelete:
methodColor = cRed
case fiber.MethodPatch:
methodColor = cBlue
case fiber.MethodHead:
methodColor = cMagenta
case fiber.MethodOptions:
methodColor = cBlack
default:
methodColor = cReset
}
return buf.WriteString(fmt.Sprintf("%s%7s%s", methodColor, requestMethod, cReset))
//return buf.WriteString(methodColor + requestMethod + cReset)
default:
switch {
case strings.HasPrefix(tag, LoggerTagHeader):
return buf.WriteString(c.Get(tag[7:]))
case strings.HasPrefix(tag, LoggerTagQuery):
return buf.WriteString(c.Query(tag[6:]))
case strings.HasPrefix(tag, LoggerTagForm):
return buf.WriteString(c.FormValue(tag[5:]))
case strings.HasPrefix(tag, LoggerTagCookie):
return buf.WriteString(c.Cookies(tag[7:]))
}
}
return 0, nil
})
if err != nil {
_, _ = buf.WriteString(err.Error())
}
if _, err := config.Output.Write(buf.Bytes()); err != nil {
fmt.Println(err)
}
bytebufferpool.Put(buf)
}
}
func nowTimeString(tzl *time.Location, layout string) string {
// This is different from Golang's time package which returns UTC, and Local is better than it
if tzl == nil {
return time.Now().Format(layout)
}
return time.Now().In(tzl).Format(layout)
}
// Use Golang's time package to determine whether the TimeZone is available
func getTimeZoneLocation(name string) *time.Location {
tz, _ := time.LoadLocation(name)
return tz
}
// MIT License fasttemplate
// Copyright (c) 2015 Aliaksandr Valialkin
// https://github.com/valyala/fasttemplate/blob/master/LICENSE
type (
loggerTemplate struct {
template string
startTag string
endTag string
texts [][]byte
tags []string
}
loggerTagFunc func(w io.Writer, tag string) (int, error)
)
func (t *loggerTemplate) new(template, startTag, endTag string) {
t.template = template
t.startTag = startTag
t.endTag = endTag
t.texts = t.texts[:0]
t.tags = t.tags[:0]
if len(startTag) == 0 {
panic("startTag cannot be empty")
}
if len(endTag) == 0 {
panic("endTag cannot be empty")
}
s := utils.GetBytes(template)
a := utils.GetBytes(startTag)
b := utils.GetBytes(endTag)
tagsCount := bytes.Count(s, a)
if tagsCount == 0 {
return
}
if tagsCount+1 > cap(t.texts) {
t.texts = make([][]byte, 0, tagsCount+1)
}
if tagsCount > cap(t.tags) {
t.tags = make([]string, 0, tagsCount)
}
for {
n := bytes.Index(s, a)
if n < 0 {
t.texts = append(t.texts, s)
break
}
t.texts = append(t.texts, s[:n])
s = s[n+len(a):]
n = bytes.Index(s, b)
if n < 0 {
panic(fmt.Errorf("cannot find end tag=%q in the template=%q starting from %q", endTag, template, s))
}
t.tags = append(t.tags, utils.GetString(s[:n]))
s = s[n+len(b):]
}
}
func (t *loggerTemplate) executeFunc(w io.Writer, f loggerTagFunc) (int64, error) {
var nn int64
n := len(t.texts) - 1
if n == -1 {
ni, err := w.Write(utils.GetBytes(t.template))
return int64(ni), err
}
for i := 0; i < n; i++ {
ni, err := w.Write(t.texts[i])
nn += int64(ni)
if err != nil {
return nn, err
}
ni, err = f(w, t.tags[i])
nn += int64(ni)
if err != nil {
return nn, err
}
}
ni, err := w.Write(t.texts[n])
nn += int64(ni)
return nn, err
}
| [
"\"TERM\""
]
| []
| [
"TERM"
]
| [] | ["TERM"] | go | 1 | 0 | |
pilot/pkg/bootstrap/server.go | // Copyright 2017 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bootstrap
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"sync"
"time"
"code.cloudfoundry.org/copilot"
"github.com/davecgh/go-spew/spew"
"github.com/gogo/protobuf/types"
middleware "github.com/grpc-ecosystem/go-grpc-middleware"
prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
multierror "github.com/hashicorp/go-multierror"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
mcpapi "istio.io/api/mcp/v1alpha1"
meshconfig "istio.io/api/mesh/v1alpha1"
"istio.io/istio/pilot/cmd"
configaggregate "istio.io/istio/pilot/pkg/config/aggregate"
cf "istio.io/istio/pilot/pkg/config/cloudfoundry"
"istio.io/istio/pilot/pkg/config/clusterregistry"
"istio.io/istio/pilot/pkg/config/coredatamodel"
"istio.io/istio/pilot/pkg/config/kube/crd"
"istio.io/istio/pilot/pkg/config/kube/ingress"
"istio.io/istio/pilot/pkg/config/memory"
configmonitor "istio.io/istio/pilot/pkg/config/monitor"
"istio.io/istio/pilot/pkg/model"
istio_networking "istio.io/istio/pilot/pkg/networking/core"
"istio.io/istio/pilot/pkg/networking/plugin"
"istio.io/istio/pilot/pkg/proxy/envoy"
envoyv2 "istio.io/istio/pilot/pkg/proxy/envoy/v2"
"istio.io/istio/pilot/pkg/serviceregistry"
"istio.io/istio/pilot/pkg/serviceregistry/aggregate"
"istio.io/istio/pilot/pkg/serviceregistry/cloudfoundry"
"istio.io/istio/pilot/pkg/serviceregistry/consul"
"istio.io/istio/pilot/pkg/serviceregistry/external"
"istio.io/istio/pilot/pkg/serviceregistry/kube"
srmemory "istio.io/istio/pilot/pkg/serviceregistry/memory"
"istio.io/istio/pkg/ctrlz"
kubelib "istio.io/istio/pkg/kube"
"istio.io/istio/pkg/log"
mcpclient "istio.io/istio/pkg/mcp/client"
"istio.io/istio/pkg/mcp/configz"
"istio.io/istio/pkg/mcp/creds"
"istio.io/istio/pkg/version"
// Import the resource package to pull in all proto types.
_ "istio.io/istio/galley/pkg/metadata"
)
const (
// ConfigMapKey should match the expected MeshConfig file name
ConfigMapKey = "mesh"
requiredMCPCertCheckFreq = 500 * time.Millisecond
)
var (
// FilepathWalkInterval dictates how often the file system is walked for config
FilepathWalkInterval = 100 * time.Millisecond
// PilotCertDir is the default location for mTLS certificates used by pilot
// Visible for tests - at runtime can be set by PILOT_CERT_DIR environment variable.
PilotCertDir = "/etc/certs/"
// DefaultPlugins is the default list of plugins to enable, when no plugin(s)
// is specified through the command line
DefaultPlugins = []string{
plugin.Authn,
plugin.Authz,
plugin.Health,
plugin.Mixer,
plugin.Envoyfilter,
}
)
func init() {
// get the grpc server wired up
// This should only be set before any RPCs are sent or received by this program.
grpc.EnableTracing = true
}
// MeshArgs provide configuration options for the mesh. If ConfigFile is provided, an attempt will be made to
// load the mesh from the file. Otherwise, a default mesh will be used with optional overrides.
type MeshArgs struct {
ConfigFile string
MixerAddress string
RdsRefreshDelay *types.Duration
}
// ConfigArgs provide configuration options for the configuration controller. If FileDir is set, that directory will
// be monitored for CRD yaml files and will update the controller as those files change (This is used for testing
// purposes). Otherwise, a CRD client is created based on the configuration.
type ConfigArgs struct {
ClusterRegistriesConfigmap string
ClusterRegistriesNamespace string
KubeConfig string
CFConfig string
ControllerOptions kube.ControllerOptions
FileDir string
DisableInstallCRDs bool
// Controller if specified, this controller overrides the other config settings.
Controller model.ConfigStoreCache
}
// ConsulArgs provides configuration for the Consul service registry.
type ConsulArgs struct {
Config string
ServerURL string
Interval time.Duration
}
// ServiceArgs provides the composite configuration for all service registries in the system.
type ServiceArgs struct {
Registries []string
Consul ConsulArgs
}
// PilotArgs provides all of the configuration parameters for the Pilot discovery service.
type PilotArgs struct {
DiscoveryOptions envoy.DiscoveryServiceOptions
Namespace string
Mesh MeshArgs
Config ConfigArgs
Service ServiceArgs
MeshConfig *meshconfig.MeshConfig
CtrlZOptions *ctrlz.Options
Plugins []string
MCPServerAddrs []string
MCPCredentialOptions *creds.Options
}
// Server contains the runtime configuration for the Pilot discovery service.
type Server struct {
HTTPListeningAddr net.Addr
GRPCListeningAddr net.Addr
SecureGRPCListeningAddr net.Addr
MonitorListeningAddr net.Addr
// TODO(nmittler): Consider alternatives to exposing these directly
EnvoyXdsServer *envoyv2.DiscoveryServer
ServiceController *aggregate.Controller
mesh *meshconfig.MeshConfig
configController model.ConfigStoreCache
mixerSAN []string
kubeClient kubernetes.Interface
startFuncs []startFunc
clusterStore *clusterregistry.ClusterStore
httpServer *http.Server
grpcServer *grpc.Server
secureGRPCServer *grpc.Server
discoveryService *envoy.DiscoveryService
istioConfigStore model.IstioConfigStore
mux *http.ServeMux
kubeRegistry *kube.Controller
}
// NewServer creates a new Server instance based on the provided arguments.
func NewServer(args PilotArgs) (*Server, error) {
// If the namespace isn't set, try looking it up from the environment.
if args.Namespace == "" {
args.Namespace = os.Getenv("POD_NAMESPACE")
}
if args.Config.ClusterRegistriesNamespace == "" {
if args.Namespace != "" {
args.Config.ClusterRegistriesNamespace = args.Namespace
} else {
args.Config.ClusterRegistriesNamespace = model.IstioSystemNamespace
}
}
s := &Server{}
// Apply the arguments to the configuration.
if err := s.initKubeClient(&args); err != nil {
return nil, err
}
if err := s.initClusterRegistries(&args); err != nil {
return nil, err
}
if err := s.initMesh(&args); err != nil {
return nil, err
}
if err := s.initMixerSan(&args); err != nil {
return nil, err
}
if err := s.initConfigController(&args); err != nil {
return nil, err
}
if err := s.initServiceControllers(&args); err != nil {
return nil, err
}
if err := s.initDiscoveryService(&args); err != nil {
return nil, err
}
if err := s.initMonitor(&args); err != nil {
return nil, err
}
if err := s.initMultiClusterController(&args); err != nil {
return nil, err
}
if args.CtrlZOptions != nil {
_, _ = ctrlz.Run(args.CtrlZOptions, nil)
}
return s, nil
}
// Start starts all components of the Pilot discovery service on the port specified in DiscoveryServiceOptions.
// If Port == 0, a port number is automatically chosen. Content serving is started by this method,
// but is executed asynchronously. Serving can be cancelled at any time by closing the provided stop channel.
func (s *Server) Start(stop <-chan struct{}) error {
// Now start all of the components.
for _, fn := range s.startFuncs {
if err := fn(stop); err != nil {
return err
}
}
return nil
}
// startFunc defines a function that will be used to start one or more components of the Pilot discovery service.
type startFunc func(stop <-chan struct{}) error
// initMonitor initializes the configuration for the pilot monitoring server.
func (s *Server) initMonitor(args *PilotArgs) error {
s.addStartFunc(func(stop <-chan struct{}) error {
monitor, addr, err := startMonitor(args.DiscoveryOptions.MonitoringAddr, s.mux)
if err != nil {
return err
}
s.MonitorListeningAddr = addr
go func() {
<-stop
err := monitor.Close()
log.Debugf("Monitoring server terminated: %v", err)
}()
return nil
})
return nil
}
func (s *Server) initClusterRegistries(args *PilotArgs) (err error) {
s.clusterStore = clusterregistry.NewClustersStore()
if s.kubeClient == nil {
log.Infof("skipping cluster registries, no kube-client created")
return nil
}
// Drop from multicluster test cases if Mock Registry is used
if checkForMock(args.Service.Registries) {
return nil
}
if args.Config.ClusterRegistriesConfigmap != "" {
if err = clusterregistry.ReadClusters(s.kubeClient,
args.Config.ClusterRegistriesConfigmap,
args.Config.ClusterRegistriesNamespace,
s.clusterStore); err != nil {
return err
}
}
log.Infof("clusters configuration %s", spew.Sdump(s.clusterStore))
return err
}
// Check if Mock's registry exists in PilotArgs's Registries
func checkForMock(registries []string) bool {
for _, r := range registries {
if strings.ToLower(r) == "mock" {
return true
}
}
return false
}
// GetMeshConfig fetches the ProxyMesh configuration from Kubernetes ConfigMap.
func GetMeshConfig(kube kubernetes.Interface, namespace, name string) (*v1.ConfigMap, *meshconfig.MeshConfig, error) {
if kube == nil {
defaultMesh := model.DefaultMeshConfig()
return nil, &defaultMesh, nil
}
config, err := kube.CoreV1().ConfigMaps(namespace).Get(name, meta_v1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
defaultMesh := model.DefaultMeshConfig()
return nil, &defaultMesh, nil
}
return nil, nil, err
}
// values in the data are strings, while proto might use a different data type.
// therefore, we have to get a value by a key
cfgYaml, exists := config.Data[ConfigMapKey]
if !exists {
return nil, nil, fmt.Errorf("missing configuration map key %q", ConfigMapKey)
}
mesh, err := model.ApplyMeshConfigDefaults(cfgYaml)
if err != nil {
return nil, nil, err
}
return config, mesh, nil
}
// initMesh creates the mesh in the pilotConfig from the input arguments.
func (s *Server) initMesh(args *PilotArgs) error {
// If a config file was specified, use it.
if args.MeshConfig != nil {
s.mesh = args.MeshConfig
return nil
}
var mesh *meshconfig.MeshConfig
var err error
if args.Mesh.ConfigFile != "" {
mesh, err = cmd.ReadMeshConfig(args.Mesh.ConfigFile)
if err != nil {
log.Warnf("failed to read mesh configuration, using default: %v", err)
}
}
if mesh == nil {
// Config file either wasn't specified or failed to load - use a default mesh.
if _, mesh, err = GetMeshConfig(s.kubeClient, kube.IstioNamespace, kube.IstioConfigMap); err != nil {
log.Warnf("failed to read mesh configuration: %v", err)
return err
}
// Allow some overrides for testing purposes.
if args.Mesh.MixerAddress != "" {
mesh.MixerCheckServer = args.Mesh.MixerAddress
mesh.MixerReportServer = args.Mesh.MixerAddress
}
}
log.Infof("mesh configuration %s", spew.Sdump(mesh))
log.Infof("version %s", version.Info.String())
log.Infof("flags %s", spew.Sdump(args))
s.mesh = mesh
return nil
}
// initMixerSan configures the mixerSAN configuration item. The mesh must already have been configured.
func (s *Server) initMixerSan(args *PilotArgs) error {
if s.mesh == nil {
return fmt.Errorf("the mesh has not been configured before configuring mixer san")
}
if s.mesh.DefaultConfig.ControlPlaneAuthPolicy == meshconfig.AuthenticationPolicy_MUTUAL_TLS {
s.mixerSAN = envoy.GetMixerSAN(args.Config.ControllerOptions.DomainSuffix, args.Namespace)
}
return nil
}
func (s *Server) getKubeCfgFile(args *PilotArgs) string {
return args.Config.KubeConfig
}
// initKubeClient creates the k8s client if running in an k8s environment.
func (s *Server) initKubeClient(args *PilotArgs) error {
if hasKubeRegistry(args) && args.Config.FileDir == "" {
client, kuberr := kubelib.CreateClientset(s.getKubeCfgFile(args), "")
if kuberr != nil {
return multierror.Prefix(kuberr, "failed to connect to Kubernetes API.")
}
s.kubeClient = client
}
return nil
}
type mockController struct{}
func (c *mockController) AppendServiceHandler(f func(*model.Service, model.Event)) error {
return nil
}
func (c *mockController) AppendInstanceHandler(f func(*model.ServiceInstance, model.Event)) error {
return nil
}
func (c *mockController) Run(<-chan struct{}) {}
func (s *Server) initMCPConfigController(args *PilotArgs) error {
clientNodeID := ""
supportedTypes := make([]string, len(model.IstioConfigTypes))
for i, model := range model.IstioConfigTypes {
supportedTypes[i] = fmt.Sprintf("type.googleapis.com/%s", model.MessageName)
}
options := coredatamodel.Options{
DomainSuffix: args.Config.ControllerOptions.DomainSuffix,
}
mcpController := coredatamodel.NewController(options)
ctx, cancel := context.WithCancel(context.Background())
var clients []*mcpclient.Client
var conns []*grpc.ClientConn
for _, addr := range args.MCPServerAddrs {
u, err := url.Parse(addr)
if err != nil {
return err
}
securityOption := grpc.WithInsecure()
if u.Scheme == "mcps" {
requiredFiles := []string{
args.MCPCredentialOptions.CertificateFile,
args.MCPCredentialOptions.KeyFile,
args.MCPCredentialOptions.CACertificateFile,
}
log.Infof("Secure MCP configured. Waiting for required certificate files to become available: %v",
requiredFiles)
for len(requiredFiles) > 0 {
if _, err := os.Stat(requiredFiles[0]); os.IsNotExist(err) {
log.Infof("%v not found. Checking again in %v", requiredFiles[0], requiredMCPCertCheckFreq)
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(requiredMCPCertCheckFreq):
// retry
}
continue
}
log.Infof("%v found", requiredFiles[0])
requiredFiles = requiredFiles[1:]
}
watcher, err := creds.WatchFiles(ctx.Done(), args.MCPCredentialOptions)
if err != nil {
return err
}
credentials := creds.CreateForClient(u.Hostname(), watcher)
securityOption = grpc.WithTransportCredentials(credentials)
}
conn, err := grpc.DialContext(ctx, u.Host, securityOption)
if err != nil {
log.Errorf("Unable to dial MCP Server %q: %v", u.Host, err)
return err
}
cl := mcpapi.NewAggregatedMeshConfigServiceClient(conn)
mcpClient := mcpclient.New(cl, supportedTypes, mcpController, clientNodeID, map[string]string{})
configz.Register(mcpClient)
clients = append(clients, mcpClient)
conns = append(conns, conn)
}
s.addStartFunc(func(stop <-chan struct{}) error {
var wg sync.WaitGroup
for i := range clients {
client := clients[i]
wg.Add(1)
go func() {
client.Run(ctx)
wg.Done()
}()
}
go func() {
<-stop
// Stop the MCP clients and any pending connection.
cancel()
// Close all of the open grpc connections once the mcp
// client(s) have fully stopped.
wg.Wait()
for _, conn := range conns {
_ = conn.Close() // nolint: errcheck
}
}()
return nil
})
s.configController = mcpController
return nil
}
// initConfigController creates the config controller in the pilotConfig.
func (s *Server) initConfigController(args *PilotArgs) error {
if len(args.MCPServerAddrs) > 0 {
if err := s.initMCPConfigController(args); err != nil {
return err
}
} else if args.Config.Controller != nil {
s.configController = args.Config.Controller
} else if args.Config.FileDir != "" {
store := memory.Make(model.IstioConfigTypes)
configController := memory.NewController(store)
err := s.makeFileMonitor(args, configController)
if err != nil {
return err
}
s.configController = configController
if args.Config.CFConfig != "" {
cfConfig, err := cloudfoundry.LoadConfig(args.Config.CFConfig)
if err != nil {
return multierror.Prefix(err, "loading cloud foundry config")
}
tlsConfig, err := cfConfig.ClientTLSConfig()
if err != nil {
return multierror.Prefix(err, "creating cloud foundry client tls config")
}
client, err := copilot.NewIstioClient(cfConfig.Copilot.Address, tlsConfig)
if err != nil {
return multierror.Prefix(err, "creating cloud foundry client")
}
confController, err := configaggregate.MakeCache([]model.ConfigStoreCache{
s.configController,
cf.NewController(client, configController, log.RegisterScope("cloudfoundry", "cloudfoundry debugging", 0), 30*time.Second, 10*time.Second),
})
if err != nil {
return err
}
s.configController = confController
}
} else {
controller, err := s.makeKubeConfigController(args)
if err != nil {
return err
}
s.configController = controller
}
// Defer starting the controller until after the service is created.
s.addStartFunc(func(stop <-chan struct{}) error {
go s.configController.Run(stop)
return nil
})
// If running in ingress mode (requires k8s), wrap the config controller.
if hasKubeRegistry(args) && s.mesh.IngressControllerMode != meshconfig.MeshConfig_OFF {
// Wrap the config controller with a cache.
configController, err := configaggregate.MakeCache([]model.ConfigStoreCache{
s.configController,
ingress.NewController(s.kubeClient, s.mesh, args.Config.ControllerOptions),
})
if err != nil {
return err
}
// Update the config controller
s.configController = configController
if ingressSyncer, errSyncer := ingress.NewStatusSyncer(s.mesh, s.kubeClient,
args.Namespace, args.Config.ControllerOptions); errSyncer != nil {
log.Warnf("Disabled ingress status syncer due to %v", errSyncer)
} else {
s.addStartFunc(func(stop <-chan struct{}) error {
go ingressSyncer.Run(stop)
return nil
})
}
}
// Create the config store.
s.istioConfigStore = model.MakeIstioStore(s.configController)
return nil
}
func (s *Server) makeKubeConfigController(args *PilotArgs) (model.ConfigStoreCache, error) {
kubeCfgFile := s.getKubeCfgFile(args)
configClient, err := crd.NewClient(kubeCfgFile, "", model.IstioConfigTypes, args.Config.ControllerOptions.DomainSuffix)
if err != nil {
return nil, multierror.Prefix(err, "failed to open a config client.")
}
if !args.Config.DisableInstallCRDs {
if err = configClient.RegisterResources(); err != nil {
return nil, multierror.Prefix(err, "failed to register custom resources.")
}
}
return crd.NewController(configClient, args.Config.ControllerOptions), nil
}
func (s *Server) makeFileMonitor(args *PilotArgs, configController model.ConfigStore) error {
fileSnapshot := configmonitor.NewFileSnapshot(args.Config.FileDir, model.IstioConfigTypes)
fileMonitor := configmonitor.NewMonitor("file-monitor", configController, FilepathWalkInterval, fileSnapshot.ReadConfigFiles)
// Defer starting the file monitor until after the service is created.
s.addStartFunc(func(stop <-chan struct{}) error {
fileMonitor.Start(stop)
return nil
})
return nil
}
// createK8sServiceControllers creates all the k8s service controllers under this pilot
func (s *Server) createK8sServiceControllers(serviceControllers *aggregate.Controller, args *PilotArgs) (err error) {
clusterID := string(serviceregistry.KubernetesRegistry)
log.Infof("Primary Cluster name: %s", clusterID)
kubectl := kube.NewController(s.kubeClient, args.Config.ControllerOptions)
s.kubeRegistry = kubectl
serviceControllers.AddRegistry(
aggregate.Registry{
Name: serviceregistry.KubernetesRegistry,
ClusterID: clusterID,
ServiceDiscovery: kubectl,
ServiceAccounts: kubectl,
Controller: kubectl,
})
return
}
// initMultiClusterController initializes multi cluster controller
// currently implemented only for kubernetes registries
func (s *Server) initMultiClusterController(args *PilotArgs) error {
if hasKubeRegistry(args) {
s.addStartFunc(func(stop <-chan struct{}) error {
secretController := clusterregistry.NewController(s.kubeClient,
args.Config.ClusterRegistriesNamespace,
s.clusterStore,
s.ServiceController,
s.EnvoyXdsServer,
args.Config.ControllerOptions.ResyncPeriod,
args.Config.ControllerOptions.WatchedNamespace,
args.Config.ControllerOptions.DomainSuffix)
// Start secret controller which watches for runtime secret Object changes and adds secrets dynamically
go secretController.Run(stop)
return nil
})
}
return nil
}
func hasKubeRegistry(args *PilotArgs) bool {
for _, r := range args.Service.Registries {
if serviceregistry.ServiceRegistry(r) == serviceregistry.KubernetesRegistry {
return true
}
}
return false
}
// initServiceControllers creates and initializes the service controllers
func (s *Server) initServiceControllers(args *PilotArgs) error {
serviceControllers := aggregate.NewController()
registered := make(map[serviceregistry.ServiceRegistry]bool)
for _, r := range args.Service.Registries {
serviceRegistry := serviceregistry.ServiceRegistry(r)
if _, exists := registered[serviceRegistry]; exists {
log.Warnf("%s registry specified multiple times.", r)
continue
}
registered[serviceRegistry] = true
log.Infof("Adding %s registry adapter", serviceRegistry)
switch serviceRegistry {
case serviceregistry.ConfigRegistry:
s.initConfigRegistry(serviceControllers)
case serviceregistry.MockRegistry:
s.initMemoryRegistry(serviceControllers)
case serviceregistry.KubernetesRegistry:
if err := s.createK8sServiceControllers(serviceControllers, args); err != nil {
return err
}
case serviceregistry.ConsulRegistry:
if err := s.initConsulRegistry(serviceControllers, args); err != nil {
return err
}
case serviceregistry.CloudFoundryRegistry:
if err := s.initCloudFoundryRegistry(serviceControllers, args); err != nil {
return err
}
default:
return multierror.Prefix(nil, "Service registry "+r+" is not supported.")
}
}
serviceEntryStore := external.NewServiceDiscovery(s.configController, s.istioConfigStore)
// add service entry registry to aggregator by default
serviceEntryRegistry := aggregate.Registry{
Name: "ServiceEntries",
Controller: serviceEntryStore,
ServiceDiscovery: serviceEntryStore,
ServiceAccounts: serviceEntryStore,
}
serviceControllers.AddRegistry(serviceEntryRegistry)
s.ServiceController = serviceControllers
// Defer running of the service controllers.
s.addStartFunc(func(stop <-chan struct{}) error {
go s.ServiceController.Run(stop)
return nil
})
return nil
}
func (s *Server) initMemoryRegistry(serviceControllers *aggregate.Controller) {
// MemServiceDiscovery implementation
discovery1 := srmemory.NewDiscovery(
map[model.Hostname]*model.Service{ // srmemory.HelloService.Hostname: srmemory.HelloService,
}, 2)
discovery2 := srmemory.NewDiscovery(
map[model.Hostname]*model.Service{ // srmemory.WorldService.Hostname: srmemory.WorldService,
}, 2)
registry1 := aggregate.Registry{
Name: serviceregistry.ServiceRegistry("mockAdapter1"),
ClusterID: "mockAdapter1",
ServiceDiscovery: discovery1,
ServiceAccounts: discovery1,
Controller: &mockController{},
}
registry2 := aggregate.Registry{
Name: serviceregistry.ServiceRegistry("mockAdapter2"),
ClusterID: "mockAdapter2",
ServiceDiscovery: discovery2,
ServiceAccounts: discovery2,
Controller: &mockController{},
}
serviceControllers.AddRegistry(registry1)
serviceControllers.AddRegistry(registry2)
}
func (s *Server) initConfigRegistry(serviceControllers *aggregate.Controller) {
serviceEntryStore := external.NewServiceDiscovery(s.configController, s.istioConfigStore)
serviceControllers.AddRegistry(aggregate.Registry{
Name: serviceregistry.ConfigRegistry,
ServiceDiscovery: serviceEntryStore,
ServiceAccounts: serviceEntryStore,
Controller: serviceEntryStore,
})
}
func (s *Server) initDiscoveryService(args *PilotArgs) error {
environment := &model.Environment{
Mesh: s.mesh,
IstioConfigStore: s.istioConfigStore,
ServiceDiscovery: s.ServiceController,
ServiceAccounts: s.ServiceController,
MixerSAN: s.mixerSAN,
}
// Set up discovery service
discovery, err := envoy.NewDiscoveryService(
s.ServiceController,
s.configController,
environment,
args.DiscoveryOptions,
)
if err != nil {
return fmt.Errorf("failed to create discovery service: %v", err)
}
s.discoveryService = discovery
s.mux = s.discoveryService.RestContainer.ServeMux
// For now we create the gRPC server sourcing data from Pilot's older data model.
s.initGrpcServer()
s.EnvoyXdsServer = envoyv2.NewDiscoveryServer(environment, istio_networking.NewConfigGenerator(args.Plugins))
// TODO: decouple v2 from the cache invalidation, use direct listeners.
envoy.V2ClearCache = s.EnvoyXdsServer.ClearCacheFunc()
s.EnvoyXdsServer.Register(s.grpcServer)
if s.kubeRegistry != nil {
// kubeRegistry may use the environment for push status reporting.
// TODO: maybe all registries should have his as an optional field ?
s.kubeRegistry.Env = environment
}
s.EnvoyXdsServer.InitDebug(s.mux, s.ServiceController)
s.EnvoyXdsServer.ConfigController = s.configController
s.httpServer = &http.Server{
Addr: args.DiscoveryOptions.HTTPAddr,
Handler: discovery.RestContainer}
listener, err := net.Listen("tcp", args.DiscoveryOptions.HTTPAddr)
if err != nil {
return err
}
s.HTTPListeningAddr = listener.Addr()
grpcListener, err := net.Listen("tcp", args.DiscoveryOptions.GrpcAddr)
if err != nil {
return err
}
s.GRPCListeningAddr = grpcListener.Addr()
// TODO: only if TLS certs, go routine to check for late certs
secureGrpcListener, err := net.Listen("tcp", args.DiscoveryOptions.SecureGrpcAddr)
if err != nil {
return err
}
s.SecureGRPCListeningAddr = secureGrpcListener.Addr()
s.addStartFunc(func(stop <-chan struct{}) error {
log.Infof("Discovery service started at http=%s grpc=%s", listener.Addr().String(), grpcListener.Addr().String())
go func() {
if err = s.httpServer.Serve(listener); err != nil {
log.Warna(err)
}
}()
go func() {
if err = s.grpcServer.Serve(grpcListener); err != nil {
log.Warna(err)
}
}()
if len(args.DiscoveryOptions.SecureGrpcAddr) > 0 {
go s.secureGrpcStart(secureGrpcListener)
}
go func() {
<-stop
model.JwtKeyResolver.Close()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
err = s.httpServer.Shutdown(ctx)
if err != nil {
log.Warna(err)
}
s.grpcServer.GracefulStop()
if s.secureGRPCServer != nil {
s.secureGRPCServer.GracefulStop()
}
}()
return err
})
return nil
}
func (s *Server) initConsulRegistry(serviceControllers *aggregate.Controller, args *PilotArgs) error {
log.Infof("Consul url: %v", args.Service.Consul.ServerURL)
conctl, conerr := consul.NewController(
args.Service.Consul.ServerURL, args.Service.Consul.Interval)
if conerr != nil {
return fmt.Errorf("failed to create Consul controller: %v", conerr)
}
serviceControllers.AddRegistry(
aggregate.Registry{
Name: serviceregistry.ConsulRegistry,
ServiceDiscovery: conctl,
ServiceAccounts: conctl,
Controller: conctl,
})
return nil
}
func (s *Server) initCloudFoundryRegistry(serviceControllers *aggregate.Controller, args *PilotArgs) error {
cfConfig, err := cloudfoundry.LoadConfig(args.Config.CFConfig)
if err != nil {
return multierror.Prefix(err, "loading cloud foundry config")
}
tlsConfig, err := cfConfig.ClientTLSConfig()
if err != nil {
return multierror.Prefix(err, "creating cloud foundry client tls config")
}
client, err := copilot.NewIstioClient(cfConfig.Copilot.Address, tlsConfig)
if err != nil {
return multierror.Prefix(err, "creating cloud foundry client")
}
serviceControllers.AddRegistry(aggregate.Registry{
Name: serviceregistry.CloudFoundryRegistry,
Controller: &cloudfoundry.Controller{
Ticker: cloudfoundry.NewTicker(cfConfig.Copilot.PollInterval),
},
ServiceDiscovery: &cloudfoundry.ServiceDiscovery{
RoutesRepo: cloudfoundry.NewCachedRoutes(client, log.RegisterScope("cfcacher", "cf cacher debugging", 0), "30s"),
ServicePort: cfConfig.ServicePort,
},
ServiceAccounts: cloudfoundry.NewServiceAccounts(),
})
return nil
}
func (s *Server) initGrpcServer() {
grpcOptions := s.grpcServerOptions()
s.grpcServer = grpc.NewServer(grpcOptions...)
}
// The secure grpc will start when the credentials are found.
func (s *Server) secureGrpcStart(listener net.Listener) {
certDir := os.Getenv("PILOT_CERT_DIR")
if certDir == "" {
certDir = PilotCertDir // /etc/certs
}
if !strings.HasSuffix(certDir, "/") {
certDir = certDir + "/"
}
for i := 0; i < 30; i++ {
opts := s.grpcServerOptions()
// This is used for the grpc h2 implementation. It doesn't appear to be needed in
// the case of golang h2 stack.
creds, err := credentials.NewServerTLSFromFile(certDir+model.CertChainFilename,
certDir+model.KeyFilename)
// certs not ready yet.
if err != nil {
time.Sleep(5 * time.Second)
continue
}
// TODO: parse the file to determine expiration date. Restart listener before expiration
cert, err := tls.LoadX509KeyPair(certDir+model.CertChainFilename,
certDir+model.KeyFilename)
if err != nil {
time.Sleep(5 * time.Second)
continue
}
caCertFile := certDir + model.RootCertFilename
caCert, err := ioutil.ReadFile(caCertFile)
if err != nil {
time.Sleep(5 * time.Second)
continue
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
opts = append(opts, grpc.Creds(creds))
s.secureGRPCServer = grpc.NewServer(opts...)
s.EnvoyXdsServer.Register(s.secureGRPCServer)
log.Infof("Starting GRPC secure on %v with certs in %s", listener.Addr(), certDir)
s := &http.Server{
TLSConfig: &tls.Config{
Certificates: []tls.Certificate{cert},
VerifyPeerCertificate: func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {
// For now accept any certs - pilot is not authenticating the caller, TLS used for
// privacy
return nil
},
NextProtos: []string{"h2", "http/1.1"},
//ClientAuth: tls.NoClientCert,
//ClientAuth: tls.RequestClientCert,
ClientAuth: tls.RequireAndVerifyClientCert,
ClientCAs: caCertPool,
},
Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.ProtoMajor == 2 && strings.HasPrefix(
r.Header.Get("Content-Type"), "application/grpc") {
s.secureGRPCServer.ServeHTTP(w, r)
} else {
s.mux.ServeHTTP(w, r)
}
}),
}
// This seems the only way to call setupHTTP2 - it may also be possible to set NextProto
// on a listener
_ = s.ServeTLS(listener, certDir+model.CertChainFilename, certDir+model.KeyFilename)
// The other way to set TLS - but you can't add http handlers, and the h2 stack is
// different.
//if err := s.secureGRPCServer.Serve(listener); err != nil {
// log.Warna(err)
//}
}
log.Errorf("Failed to find certificates for GRPC secure in %s", certDir)
// Exit - mesh is in MTLS mode, but certificates are missing or bad.
// k8s may allocate to a different machine.
if s.mesh.DefaultConfig.ControlPlaneAuthPolicy == meshconfig.AuthenticationPolicy_MUTUAL_TLS {
os.Exit(403)
}
}
func (s *Server) grpcServerOptions() []grpc.ServerOption {
interceptors := []grpc.UnaryServerInterceptor{
// setup server prometheus monitoring (as final interceptor in chain)
prometheus.UnaryServerInterceptor,
}
prometheus.EnableHandlingTimeHistogram()
// Temp setting, default should be enough for most supported environments. Can be used for testing
// envoy with lower values.
var maxStreams int
maxStreamsEnv := os.Getenv("ISTIO_GPRC_MAXSTREAMS")
if len(maxStreamsEnv) > 0 {
maxStreams, _ = strconv.Atoi(maxStreamsEnv)
}
if maxStreams == 0 {
maxStreams = 100000
}
grpcOptions := []grpc.ServerOption{
grpc.UnaryInterceptor(middleware.ChainUnaryServer(interceptors...)),
grpc.MaxConcurrentStreams(uint32(maxStreams)),
}
return grpcOptions
}
func (s *Server) addStartFunc(fn startFunc) {
s.startFuncs = append(s.startFuncs, fn)
}
| [
"\"POD_NAMESPACE\"",
"\"PILOT_CERT_DIR\"",
"\"ISTIO_GPRC_MAXSTREAMS\""
]
| []
| [
"POD_NAMESPACE",
"ISTIO_GPRC_MAXSTREAMS",
"PILOT_CERT_DIR"
]
| [] | ["POD_NAMESPACE", "ISTIO_GPRC_MAXSTREAMS", "PILOT_CERT_DIR"] | go | 3 | 0 | |
main.go | package main
import (
// Import the generated protobuf code
"errors"
"fmt"
"github.com/micro/go-micro"
"github.com/micro/go-micro/metadata"
"github.com/micro/go-micro/server"
k8s "github.com/micro/kubernetes/go/micro"
pb "github.com/wizofgoz/shippy-consignment-service/proto/consignment"
userService "github.com/wizofgoz/shippy-user-service/proto/user"
vesselProto "github.com/wizofgoz/shippy-vessel-service/proto/vessel"
"golang.org/x/net/context"
"log"
"os"
)
const (
defaultHost = "localhost:27017"
)
var (
srv micro.Service
)
func main() {
// Database host from the environment variables
host := os.Getenv("DB_HOST")
if host == "" {
host = defaultHost
}
session, err := CreateSession(host)
// Mgo creates a 'master' session, we need to end that session
// before the main function closes.
defer session.Close()
if err != nil {
// We're wrapping the error returned from our CreateSession
// here to add some context to the error.
log.Panicf("Could not connect to datastore with host %s - %v", host, err)
}
// Create a new service. Optionally include some options here.
srv = k8s.NewService(
// This name must match the package name given in your protobuf definition
micro.Name("shippy.consignment"),
micro.Version("latest"),
micro.WrapHandler(AuthWrapper),
)
vesselClient := vesselProto.NewVesselServiceClient("shippy.vessel", srv.Client())
// Init will parse the command line flags.
srv.Init()
// Register handler
pb.RegisterConsignmentServiceHandler(srv.Server(), &service{session, vesselClient})
// Run the server
if err := srv.Run(); err != nil {
fmt.Println(err)
}
}
// AuthWrapper is a high-order function which takes a HandlerFunc
// and returns a function, which takes a context, request and response interface.
// The token is extracted from the context set in our consignment-cli, that
// token is then sent over to the user service to be validated.
// If valid, the call is passed along to the handler. If not,
// an error is returned.
func AuthWrapper(fn server.HandlerFunc) server.HandlerFunc {
return func(ctx context.Context, req server.Request, resp interface{}) error {
if os.Getenv("DISABLE_AUTH") == "true" {
return fn(ctx, req, resp)
}
meta, ok := metadata.FromContext(ctx)
if !ok {
return errors.New("no auth meta-data found in request")
}
// Note this is now uppercase (not entirely sure why this is...)
token := meta["Token"]
log.Println("Authenticating with token: ", token)
// Auth here
// Really shouldn't be using a global here, find a better way
// of doing this, since you can't pass it into a wrapper.
authClient := userService.NewUserServiceClient("shippy.user", srv.Client())
_, err := authClient.ValidateToken(ctx, &userService.Token{
Token: token,
})
if err != nil {
return err
}
err = fn(ctx, req, resp)
return err
}
} | [
"\"DB_HOST\"",
"\"DISABLE_AUTH\""
]
| []
| [
"DISABLE_AUTH",
"DB_HOST"
]
| [] | ["DISABLE_AUTH", "DB_HOST"] | go | 2 | 0 | |
simple-app/src/app.go | package main
import (
"flag"
"fmt"
"log"
"net/http"
"os"
)
func handler(w http.ResponseWriter, r *http.Request) {
log.Print("Simple app running...")
msg := os.Getenv("SIMPLE_MSG")
if msg == "" {
msg = ":( SIMPLE_MSG variable not found"
}
fmt.Fprintf(w, "<h1>%s</h1>", msg)
}
func main() {
port := os.Getenv("PORT")
if len(port) == 0 {
port = "8080"
}
flag.Parse()
log.Print("Simple app server started...")
http.HandleFunc("/", handler)
http.ListenAndServe(":"+port, nil)
}
| [
"\"SIMPLE_MSG\"",
"\"PORT\""
]
| []
| [
"PORT",
"SIMPLE_MSG"
]
| [] | ["PORT", "SIMPLE_MSG"] | go | 2 | 0 | |
commands.go | package docker
import (
"archive/tar"
"bufio"
"bytes"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/auth"
"github.com/dotcloud/docker/engine"
flag "github.com/dotcloud/docker/pkg/mflag"
"github.com/dotcloud/docker/pkg/sysinfo"
"github.com/dotcloud/docker/pkg/term"
"github.com/dotcloud/docker/registry"
"github.com/dotcloud/docker/utils"
"io"
"io/ioutil"
"net"
"net/http"
"net/http/httputil"
"net/url"
"os"
"os/signal"
"path"
"reflect"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"syscall"
"text/tabwriter"
"text/template"
"time"
)
var (
GITCOMMIT string
VERSION string
)
var (
ErrConnectionRefused = errors.New("Can't connect to docker daemon. Is 'docker -d' running on this host?")
)
func (cli *DockerCli) getMethod(name string) (func(...string) error, bool) {
methodName := "Cmd" + strings.ToUpper(name[:1]) + strings.ToLower(name[1:])
method := reflect.ValueOf(cli).MethodByName(methodName)
if !method.IsValid() {
return nil, false
}
return method.Interface().(func(...string) error), true
}
func ParseCommands(proto, addr string, args ...string) error {
cli := NewDockerCli(os.Stdin, os.Stdout, os.Stderr, proto, addr)
if len(args) > 0 {
method, exists := cli.getMethod(args[0])
if !exists {
fmt.Println("Error: Command not found:", args[0])
return cli.CmdHelp(args[1:]...)
}
return method(args[1:]...)
}
return cli.CmdHelp(args...)
}
func (cli *DockerCli) CmdHelp(args ...string) error {
if len(args) > 0 {
method, exists := cli.getMethod(args[0])
if !exists {
fmt.Fprintf(cli.err, "Error: Command not found: %s\n", args[0])
} else {
method("--help")
return nil
}
}
help := fmt.Sprintf("Usage: docker [OPTIONS] COMMAND [arg...]\n -H=[unix://%s]: tcp://host:port to bind/connect to or unix://path/to/socket to use\n\nA self-sufficient runtime for linux containers.\n\nCommands:\n", DEFAULTUNIXSOCKET)
for _, command := range [][]string{
{"attach", "Attach to a running container"},
{"build", "Build a container from a Dockerfile"},
{"commit", "Create a new image from a container's changes"},
{"cp", "Copy files/folders from the containers filesystem to the host path"},
{"diff", "Inspect changes on a container's filesystem"},
{"events", "Get real time events from the server"},
{"export", "Stream the contents of a container as a tar archive"},
{"history", "Show the history of an image"},
{"images", "List images"},
{"import", "Create a new filesystem image from the contents of a tarball"},
{"info", "Display system-wide information"},
{"insert", "Insert a file in an image"},
{"inspect", "Return low-level information on a container"},
{"kill", "Kill a running container"},
{"load", "Load an image from a tar archive"},
{"login", "Register or Login to the docker registry server"},
{"logs", "Fetch the logs of a container"},
{"port", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT"},
{"ps", "List containers"},
{"pull", "Pull an image or a repository from the docker registry server"},
{"push", "Push an image or a repository to the docker registry server"},
{"restart", "Restart a running container"},
{"rm", "Remove one or more containers"},
{"rmi", "Remove one or more images"},
{"run", "Run a command in a new container"},
{"save", "Save an image to a tar archive"},
{"search", "Search for an image in the docker index"},
{"start", "Start a stopped container"},
{"stop", "Stop a running container"},
{"tag", "Tag an image into a repository"},
{"top", "Lookup the running processes of a container"},
{"version", "Show the docker version information"},
{"wait", "Block until a container stops, then print its exit code"},
} {
help += fmt.Sprintf(" %-10.10s%s\n", command[0], command[1])
}
fmt.Fprintf(cli.err, "%s\n", help)
return nil
}
func (cli *DockerCli) CmdInsert(args ...string) error {
cmd := cli.Subcmd("insert", "IMAGE URL PATH", "Insert a file from URL in the IMAGE at PATH")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 3 {
cmd.Usage()
return nil
}
v := url.Values{}
v.Set("url", cmd.Arg(1))
v.Set("path", cmd.Arg(2))
return cli.stream("POST", "/images/"+cmd.Arg(0)+"/insert?"+v.Encode(), nil, cli.out, nil)
}
// mkBuildContext returns an archive of an empty context with the contents
// of `dockerfile` at the path ./Dockerfile
func MkBuildContext(dockerfile string, files [][2]string) (archive.Archive, error) {
buf := new(bytes.Buffer)
tw := tar.NewWriter(buf)
files = append(files, [2]string{"Dockerfile", dockerfile})
for _, file := range files {
name, content := file[0], file[1]
hdr := &tar.Header{
Name: name,
Size: int64(len(content)),
}
if err := tw.WriteHeader(hdr); err != nil {
return nil, err
}
if _, err := tw.Write([]byte(content)); err != nil {
return nil, err
}
}
if err := tw.Close(); err != nil {
return nil, err
}
return buf, nil
}
func (cli *DockerCli) CmdBuild(args ...string) error {
cmd := cli.Subcmd("build", "[OPTIONS] PATH | URL | -", "Build a new container image from the source code at PATH")
tag := cmd.String([]string{"t", "-tag"}, "", "Repository name (and optionally a tag) to be applied to the resulting image in case of success")
suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress verbose build output")
noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image")
rm := cmd.Bool([]string{"#rm", "-rm"}, false, "Remove intermediate containers after a successful build")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
var (
context archive.Archive
isRemote bool
err error
)
if cmd.Arg(0) == "-" {
// As a special case, 'docker build -' will build from an empty context with the
// contents of stdin as a Dockerfile
dockerfile, err := ioutil.ReadAll(cli.in)
if err != nil {
return err
}
context, err = MkBuildContext(string(dockerfile), nil)
} else if utils.IsURL(cmd.Arg(0)) || utils.IsGIT(cmd.Arg(0)) {
isRemote = true
} else {
if _, err := os.Stat(cmd.Arg(0)); err != nil {
return err
}
filename := path.Join(cmd.Arg(0), "Dockerfile")
if _, err = os.Stat(filename); os.IsNotExist(err) {
return fmt.Errorf("no Dockerfile found in %s", cmd.Arg(0))
}
context, err = archive.Tar(cmd.Arg(0), archive.Uncompressed)
}
var body io.Reader
// Setup an upload progress bar
// FIXME: ProgressReader shouldn't be this annoying to use
if context != nil {
sf := utils.NewStreamFormatter(false)
body = utils.ProgressReader(ioutil.NopCloser(context), 0, cli.err, sf, true, "", "Uploading context")
}
// Upload the build context
v := &url.Values{}
v.Set("t", *tag)
if *suppressOutput {
v.Set("q", "1")
}
if isRemote {
v.Set("remote", cmd.Arg(0))
}
if *noCache {
v.Set("nocache", "1")
}
if *rm {
v.Set("rm", "1")
}
cli.LoadConfigFile()
headers := http.Header(make(map[string][]string))
buf, err := json.Marshal(cli.configFile)
if err != nil {
return err
}
headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf))
if context != nil {
headers.Set("Content-Type", "application/tar")
}
err = cli.stream("POST", fmt.Sprintf("/build?%s", v.Encode()), body, cli.out, headers)
if jerr, ok := err.(*utils.JSONError); ok {
// If no error code is set, default to 1
if jerr.Code == 0 {
jerr.Code = 1
}
return &utils.StatusError{Status: jerr.Message, StatusCode: jerr.Code}
}
return err
}
// 'docker login': login / register a user to registry service.
func (cli *DockerCli) CmdLogin(args ...string) error {
cmd := cli.Subcmd("login", "[OPTIONS] [SERVER]", "Register or Login to a docker registry server, if no server is specified \""+auth.IndexServerAddress()+"\" is the default.")
var username, password, email string
cmd.StringVar(&username, []string{"u", "-username"}, "", "username")
cmd.StringVar(&password, []string{"p", "-password"}, "", "password")
cmd.StringVar(&email, []string{"e", "-email"}, "", "email")
err := cmd.Parse(args)
if err != nil {
return nil
}
serverAddress := auth.IndexServerAddress()
if len(cmd.Args()) > 0 {
serverAddress, err = registry.ExpandAndVerifyRegistryUrl(cmd.Arg(0))
if err != nil {
return err
}
fmt.Fprintf(cli.out, "Login against server at %s\n", serverAddress)
}
promptDefault := func(prompt string, configDefault string) {
if configDefault == "" {
fmt.Fprintf(cli.out, "%s: ", prompt)
} else {
fmt.Fprintf(cli.out, "%s (%s): ", prompt, configDefault)
}
}
readInput := func(in io.Reader, out io.Writer) string {
reader := bufio.NewReader(in)
line, _, err := reader.ReadLine()
if err != nil {
fmt.Fprintln(out, err.Error())
os.Exit(1)
}
return string(line)
}
cli.LoadConfigFile()
authconfig, ok := cli.configFile.Configs[serverAddress]
if !ok {
authconfig = auth.AuthConfig{}
}
if username == "" {
promptDefault("Username", authconfig.Username)
username = readInput(cli.in, cli.out)
if username == "" {
username = authconfig.Username
}
}
if username != authconfig.Username {
if password == "" {
oldState, _ := term.SaveState(cli.terminalFd)
fmt.Fprintf(cli.out, "Password: ")
term.DisableEcho(cli.terminalFd, oldState)
password = readInput(cli.in, cli.out)
fmt.Fprint(cli.out, "\n")
term.RestoreTerminal(cli.terminalFd, oldState)
if password == "" {
return fmt.Errorf("Error : Password Required")
}
}
if email == "" {
promptDefault("Email", authconfig.Email)
email = readInput(cli.in, cli.out)
if email == "" {
email = authconfig.Email
}
}
} else {
password = authconfig.Password
email = authconfig.Email
}
authconfig.Username = username
authconfig.Password = password
authconfig.Email = email
authconfig.ServerAddress = serverAddress
cli.configFile.Configs[serverAddress] = authconfig
body, statusCode, err := readBody(cli.call("POST", "/auth", cli.configFile.Configs[serverAddress], false))
if statusCode == 401 {
delete(cli.configFile.Configs, serverAddress)
auth.SaveConfig(cli.configFile)
return err
}
if err != nil {
return err
}
var out2 engine.Env
err = json.Unmarshal(body, &out2)
if err != nil {
cli.configFile, _ = auth.LoadConfig(os.Getenv("HOME"))
return err
}
auth.SaveConfig(cli.configFile)
if out2.Get("Status") != "" {
fmt.Fprintf(cli.out, "%s\n", out2.Get("Status"))
}
return nil
}
// 'docker wait': block until a container stops
func (cli *DockerCli) CmdWait(args ...string) error {
cmd := cli.Subcmd("wait", "CONTAINER [CONTAINER...]", "Block until a container stops, then print its exit code.")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
var encounteredError error
for _, name := range cmd.Args() {
status, err := waitForExit(cli, name)
if err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to wait one or more containers")
} else {
fmt.Fprintf(cli.out, "%d\n", status)
}
}
return encounteredError
}
// 'docker version': show version information
func (cli *DockerCli) CmdVersion(args ...string) error {
cmd := cli.Subcmd("version", "", "Show the docker version information.")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() > 0 {
cmd.Usage()
return nil
}
if VERSION != "" {
fmt.Fprintf(cli.out, "Client version: %s\n", VERSION)
}
fmt.Fprintf(cli.out, "Go version (client): %s\n", runtime.Version())
if GITCOMMIT != "" {
fmt.Fprintf(cli.out, "Git commit (client): %s\n", GITCOMMIT)
}
body, _, err := readBody(cli.call("GET", "/version", nil, false))
if err != nil {
return err
}
out := engine.NewOutput()
remoteVersion, err := out.AddEnv()
if err != nil {
utils.Errorf("Error reading remote version: %s\n", err)
return err
}
if _, err := out.Write(body); err != nil {
utils.Errorf("Error reading remote version: %s\n", err)
return err
}
out.Close()
fmt.Fprintf(cli.out, "Server version: %s\n", remoteVersion.Get("Version"))
fmt.Fprintf(cli.out, "Git commit (server): %s\n", remoteVersion.Get("GitCommit"))
fmt.Fprintf(cli.out, "Go version (server): %s\n", remoteVersion.Get("GoVersion"))
release := utils.GetReleaseVersion()
if release != "" {
fmt.Fprintf(cli.out, "Last stable version: %s", release)
if (VERSION != "" || remoteVersion.Exists("Version")) && (strings.Trim(VERSION, "-dev") != release || strings.Trim(remoteVersion.Get("Version"), "-dev") != release) {
fmt.Fprintf(cli.out, ", please update docker")
}
fmt.Fprintf(cli.out, "\n")
}
return nil
}
// 'docker info': display system-wide information.
func (cli *DockerCli) CmdInfo(args ...string) error {
cmd := cli.Subcmd("info", "", "Display system-wide information")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() > 0 {
cmd.Usage()
return nil
}
body, _, err := readBody(cli.call("GET", "/info", nil, false))
if err != nil {
return err
}
out := engine.NewOutput()
remoteInfo, err := out.AddEnv()
if err != nil {
return err
}
if _, err := out.Write(body); err != nil {
utils.Errorf("Error reading remote info: %s\n", err)
return err
}
out.Close()
fmt.Fprintf(cli.out, "Containers: %d\n", remoteInfo.GetInt("Containers"))
fmt.Fprintf(cli.out, "Images: %d\n", remoteInfo.GetInt("Images"))
fmt.Fprintf(cli.out, "Driver: %s\n", remoteInfo.Get("Driver"))
var driverStatus [][2]string
if err := remoteInfo.GetJson("DriverStatus", &driverStatus); err != nil {
return err
}
for _, pair := range driverStatus {
fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1])
}
if remoteInfo.GetBool("Debug") || os.Getenv("DEBUG") != "" {
fmt.Fprintf(cli.out, "Debug mode (server): %v\n", remoteInfo.GetBool("Debug"))
fmt.Fprintf(cli.out, "Debug mode (client): %v\n", os.Getenv("DEBUG") != "")
fmt.Fprintf(cli.out, "Fds: %d\n", remoteInfo.GetInt("NFd"))
fmt.Fprintf(cli.out, "Goroutines: %d\n", remoteInfo.GetInt("NGoroutines"))
fmt.Fprintf(cli.out, "Execution Driver: %s\n", remoteInfo.Get("ExecutionDriver"))
fmt.Fprintf(cli.out, "EventsListeners: %d\n", remoteInfo.GetInt("NEventsListener"))
fmt.Fprintf(cli.out, "Kernel Version: %s\n", remoteInfo.Get("KernelVersion"))
if initSha1 := remoteInfo.Get("InitSha1"); initSha1 != "" {
fmt.Fprintf(cli.out, "Init SHA1: %s\n", initSha1)
}
if initPath := remoteInfo.Get("InitPath"); initPath != "" {
fmt.Fprintf(cli.out, "Init Path: %s\n", initPath)
}
}
if len(remoteInfo.GetList("IndexServerAddress")) != 0 {
cli.LoadConfigFile()
u := cli.configFile.Configs[remoteInfo.Get("IndexServerAddress")].Username
if len(u) > 0 {
fmt.Fprintf(cli.out, "Username: %v\n", u)
fmt.Fprintf(cli.out, "Registry: %v\n", remoteInfo.GetList("IndexServerAddress"))
}
}
if !remoteInfo.GetBool("MemoryLimit") {
fmt.Fprintf(cli.err, "WARNING: No memory limit support\n")
}
if !remoteInfo.GetBool("SwapLimit") {
fmt.Fprintf(cli.err, "WARNING: No swap limit support\n")
}
if !remoteInfo.GetBool("IPv4Forwarding") {
fmt.Fprintf(cli.err, "WARNING: IPv4 forwarding is disabled.\n")
}
return nil
}
func (cli *DockerCli) CmdStop(args ...string) error {
cmd := cli.Subcmd("stop", "[OPTIONS] CONTAINER [CONTAINER...]", "Stop a running container (Send SIGTERM, and then SIGKILL after grace period)")
nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to wait for the container to stop before killing it.")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
v := url.Values{}
v.Set("t", strconv.Itoa(*nSeconds))
var encounteredError error
for _, name := range cmd.Args() {
_, _, err := readBody(cli.call("POST", "/containers/"+name+"/stop?"+v.Encode(), nil, false))
if err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to stop one or more containers")
} else {
fmt.Fprintf(cli.out, "%s\n", name)
}
}
return encounteredError
}
func (cli *DockerCli) CmdRestart(args ...string) error {
cmd := cli.Subcmd("restart", "[OPTIONS] CONTAINER [CONTAINER...]", "Restart a running container")
nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default=10")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
v := url.Values{}
v.Set("t", strconv.Itoa(*nSeconds))
var encounteredError error
for _, name := range cmd.Args() {
_, _, err := readBody(cli.call("POST", "/containers/"+name+"/restart?"+v.Encode(), nil, false))
if err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to restart one or more containers")
} else {
fmt.Fprintf(cli.out, "%s\n", name)
}
}
return encounteredError
}
func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal {
sigc := make(chan os.Signal, 1)
utils.CatchAll(sigc)
go func() {
for s := range sigc {
if s == syscall.SIGCHLD {
continue
}
if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%d", cid, s), nil, false)); err != nil {
utils.Debugf("Error sending signal: %s", err)
}
}
}()
return sigc
}
func (cli *DockerCli) CmdStart(args ...string) error {
cmd := cli.Subcmd("start", "CONTAINER [CONTAINER...]", "Restart a stopped container")
attach := cmd.Bool([]string{"a", "-attach"}, false, "Attach container's stdout/stderr and forward all signals to the process")
openStdin := cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's stdin")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
var cErr chan error
var tty bool
if *attach || *openStdin {
if cmd.NArg() > 1 {
return fmt.Errorf("Impossible to start and attach multiple containers at once.")
}
body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false))
if err != nil {
return err
}
container := &Container{}
err = json.Unmarshal(body, container)
if err != nil {
return err
}
tty = container.Config.Tty
if !container.Config.Tty {
sigc := cli.forwardAllSignals(cmd.Arg(0))
defer utils.StopCatch(sigc)
}
var in io.ReadCloser
v := url.Values{}
v.Set("stream", "1")
if *openStdin && container.Config.OpenStdin {
v.Set("stdin", "1")
in = cli.in
}
v.Set("stdout", "1")
v.Set("stderr", "1")
cErr = utils.Go(func() error {
return cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), container.Config.Tty, in, cli.out, cli.err, nil)
})
}
var encounteredError error
for _, name := range cmd.Args() {
_, _, err := readBody(cli.call("POST", "/containers/"+name+"/start", nil, false))
if err != nil {
if !*attach || !*openStdin {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to start one or more containers")
}
} else {
if !*attach || !*openStdin {
fmt.Fprintf(cli.out, "%s\n", name)
}
}
}
if encounteredError != nil {
if *openStdin || *attach {
cli.in.Close()
<-cErr
}
return encounteredError
}
if *openStdin || *attach {
if tty && cli.isTerminal {
if err := cli.monitorTtySize(cmd.Arg(0)); err != nil {
utils.Errorf("Error monitoring TTY size: %s\n", err)
}
}
return <-cErr
}
return nil
}
func (cli *DockerCli) CmdInspect(args ...string) error {
cmd := cli.Subcmd("inspect", "CONTAINER|IMAGE [CONTAINER|IMAGE...]", "Return low-level information on a container/image")
tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template.")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
var tmpl *template.Template
if *tmplStr != "" {
var err error
if tmpl, err = template.New("").Parse(*tmplStr); err != nil {
fmt.Fprintf(cli.err, "Template parsing error: %v\n", err)
return &utils.StatusError{StatusCode: 64,
Status: "Template parsing error: " + err.Error()}
}
}
indented := new(bytes.Buffer)
indented.WriteByte('[')
status := 0
for _, name := range cmd.Args() {
obj, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false))
if err != nil {
obj, _, err = readBody(cli.call("GET", "/images/"+name+"/json", nil, false))
if err != nil {
if strings.Contains(err.Error(), "No such") {
fmt.Fprintf(cli.err, "Error: No such image or container: %s\n", name)
} else {
fmt.Fprintf(cli.err, "%s", err)
}
status = 1
continue
}
}
if tmpl == nil {
if err = json.Indent(indented, obj, "", " "); err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
status = 1
continue
}
} else {
// Has template, will render
var value interface{}
if err := json.Unmarshal(obj, &value); err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
status = 1
continue
}
if err := tmpl.Execute(cli.out, value); err != nil {
return err
}
cli.out.Write([]byte{'\n'})
}
indented.WriteString(",")
}
if indented.Len() > 1 {
// Remove trailing ','
indented.Truncate(indented.Len() - 1)
}
indented.WriteByte(']')
if tmpl == nil {
if _, err := io.Copy(cli.out, indented); err != nil {
return err
}
}
if status != 0 {
return &utils.StatusError{StatusCode: status}
}
return nil
}
func (cli *DockerCli) CmdTop(args ...string) error {
cmd := cli.Subcmd("top", "CONTAINER [ps OPTIONS]", "Lookup the running processes of a container")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() == 0 {
cmd.Usage()
return nil
}
val := url.Values{}
if cmd.NArg() > 1 {
val.Set("ps_args", strings.Join(cmd.Args()[1:], " "))
}
body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/top?"+val.Encode(), nil, false))
if err != nil {
return err
}
procs := APITop{}
err = json.Unmarshal(body, &procs)
if err != nil {
return err
}
w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
fmt.Fprintln(w, strings.Join(procs.Titles, "\t"))
for _, proc := range procs.Processes {
fmt.Fprintln(w, strings.Join(proc, "\t"))
}
w.Flush()
return nil
}
func (cli *DockerCli) CmdPort(args ...string) error {
cmd := cli.Subcmd("port", "CONTAINER PRIVATE_PORT", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 2 {
cmd.Usage()
return nil
}
port := cmd.Arg(1)
proto := "tcp"
parts := strings.SplitN(port, "/", 2)
if len(parts) == 2 && len(parts[1]) != 0 {
port = parts[0]
proto = parts[1]
}
body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false))
if err != nil {
return err
}
var out Container
err = json.Unmarshal(body, &out)
if err != nil {
return err
}
if frontends, exists := out.NetworkSettings.Ports[Port(port+"/"+proto)]; exists && frontends != nil {
for _, frontend := range frontends {
fmt.Fprintf(cli.out, "%s:%s\n", frontend.HostIp, frontend.HostPort)
}
} else {
return fmt.Errorf("Error: No public port '%s' published for %s", cmd.Arg(1), cmd.Arg(0))
}
return nil
}
// 'docker rmi IMAGE' removes all images with the name IMAGE
func (cli *DockerCli) CmdRmi(args ...string) error {
cmd := cli.Subcmd("rmi", "IMAGE [IMAGE...]", "Remove one or more images")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
var encounteredError error
for _, name := range cmd.Args() {
body, _, err := readBody(cli.call("DELETE", "/images/"+name, nil, false))
if err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to remove one or more images")
} else {
outs := engine.NewTable("Created", 0)
if _, err := outs.ReadListFrom(body); err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to remove one or more images")
continue
}
for _, out := range outs.Data {
if out.Get("Deleted") != "" {
fmt.Fprintf(cli.out, "Deleted: %s\n", out.Get("Deleted"))
} else {
fmt.Fprintf(cli.out, "Untagged: %s\n", out.Get("Untagged"))
}
}
}
}
return encounteredError
}
func (cli *DockerCli) CmdHistory(args ...string) error {
cmd := cli.Subcmd("history", "[OPTIONS] IMAGE", "Show the history of an image")
quiet := cmd.Bool([]string{"q", "-quiet"}, false, "only show numeric IDs")
noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
body, _, err := readBody(cli.call("GET", "/images/"+cmd.Arg(0)+"/history", nil, false))
if err != nil {
return err
}
outs := engine.NewTable("Created", 0)
if _, err := outs.ReadListFrom(body); err != nil {
return err
}
w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
if !*quiet {
fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE")
}
for _, out := range outs.Data {
outID := out.Get("ID")
if !*quiet {
if *noTrunc {
fmt.Fprintf(w, "%s\t", outID)
} else {
fmt.Fprintf(w, "%s\t", utils.TruncateID(outID))
}
fmt.Fprintf(w, "%s ago\t", utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))))
if *noTrunc {
fmt.Fprintf(w, "%s\t", out.Get("CreatedBy"))
} else {
fmt.Fprintf(w, "%s\t", utils.Trunc(out.Get("CreatedBy"), 45))
}
fmt.Fprintf(w, "%s\n", utils.HumanSize(out.GetInt64("Size")))
} else {
if *noTrunc {
fmt.Fprintln(w, outID)
} else {
fmt.Fprintln(w, utils.TruncateID(outID))
}
}
}
w.Flush()
return nil
}
func (cli *DockerCli) CmdRm(args ...string) error {
cmd := cli.Subcmd("rm", "[OPTIONS] CONTAINER [CONTAINER...]", "Remove one or more containers")
v := cmd.Bool([]string{"v", "-volumes"}, false, "Remove the volumes associated to the container")
link := cmd.Bool([]string{"l", "#link", "-link"}, false, "Remove the specified link and not the underlying container")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
val := url.Values{}
if *v {
val.Set("v", "1")
}
if *link {
val.Set("link", "1")
}
var encounteredError error
for _, name := range cmd.Args() {
_, _, err := readBody(cli.call("DELETE", "/containers/"+name+"?"+val.Encode(), nil, false))
if err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to remove one or more containers")
} else {
fmt.Fprintf(cli.out, "%s\n", name)
}
}
return encounteredError
}
// 'docker kill NAME' kills a running container
func (cli *DockerCli) CmdKill(args ...string) error {
cmd := cli.Subcmd("kill", "[OPTIONS] CONTAINER [CONTAINER...]", "Kill a running container (send SIGKILL, or specified signal)")
signal := cmd.String([]string{"s", "-signal"}, "KILL", "Signal to send to the container")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
var encounteredError error
for _, name := range cmd.Args() {
if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", name, *signal), nil, false)); err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to kill one or more containers")
} else {
fmt.Fprintf(cli.out, "%s\n", name)
}
}
return encounteredError
}
func (cli *DockerCli) CmdImport(args ...string) error {
cmd := cli.Subcmd("import", "URL|- [REPOSITORY[:TAG]]", "Create an empty filesystem image and import the contents of the tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it.")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
var src, repository, tag string
if cmd.NArg() == 3 {
fmt.Fprintf(cli.err, "[DEPRECATED] The format 'URL|- [REPOSITORY [TAG]]' as been deprecated. Please use URL|- [REPOSITORY[:TAG]]\n")
src, repository, tag = cmd.Arg(0), cmd.Arg(1), cmd.Arg(2)
} else {
src = cmd.Arg(0)
repository, tag = utils.ParseRepositoryTag(cmd.Arg(1))
}
v := url.Values{}
v.Set("repo", repository)
v.Set("tag", tag)
v.Set("fromSrc", src)
var in io.Reader
if src == "-" {
in = cli.in
}
return cli.stream("POST", "/images/create?"+v.Encode(), in, cli.out, nil)
}
func (cli *DockerCli) CmdPush(args ...string) error {
cmd := cli.Subcmd("push", "NAME", "Push an image or a repository to the registry")
if err := cmd.Parse(args); err != nil {
return nil
}
name := cmd.Arg(0)
if name == "" {
cmd.Usage()
return nil
}
cli.LoadConfigFile()
// Resolve the Repository name from fqn to endpoint + name
endpoint, _, err := registry.ResolveRepositoryName(name)
if err != nil {
return err
}
// Resolve the Auth config relevant for this server
authConfig := cli.configFile.ResolveAuthConfig(endpoint)
// If we're not using a custom registry, we know the restrictions
// applied to repository names and can warn the user in advance.
// Custom repositories can have different rules, and we must also
// allow pushing by image ID.
if len(strings.SplitN(name, "/", 2)) == 1 {
username := cli.configFile.Configs[auth.IndexServerAddress()].Username
if username == "" {
username = "<user>"
}
return fmt.Errorf("Impossible to push a \"root\" repository. Please rename your repository in <user>/<repo> (ex: %s/%s)", username, name)
}
v := url.Values{}
push := func(authConfig auth.AuthConfig) error {
buf, err := json.Marshal(authConfig)
if err != nil {
return err
}
registryAuthHeader := []string{
base64.URLEncoding.EncodeToString(buf),
}
return cli.stream("POST", "/images/"+name+"/push?"+v.Encode(), nil, cli.out, map[string][]string{
"X-Registry-Auth": registryAuthHeader,
})
}
if err := push(authConfig); err != nil {
if err.Error() == registry.ErrLoginRequired.Error() {
fmt.Fprintln(cli.out, "\nPlease login prior to push:")
if err := cli.CmdLogin(endpoint); err != nil {
return err
}
authConfig := cli.configFile.ResolveAuthConfig(endpoint)
return push(authConfig)
}
return err
}
return nil
}
func (cli *DockerCli) CmdPull(args ...string) error {
cmd := cli.Subcmd("pull", "NAME", "Pull an image or a repository from the registry")
tag := cmd.String([]string{"t", "-tag"}, "", "Download tagged image in repository")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
remote, parsedTag := utils.ParseRepositoryTag(cmd.Arg(0))
if *tag == "" {
*tag = parsedTag
}
// Resolve the Repository name from fqn to endpoint + name
endpoint, _, err := registry.ResolveRepositoryName(remote)
if err != nil {
return err
}
cli.LoadConfigFile()
// Resolve the Auth config relevant for this server
authConfig := cli.configFile.ResolveAuthConfig(endpoint)
v := url.Values{}
v.Set("fromImage", remote)
v.Set("tag", *tag)
pull := func(authConfig auth.AuthConfig) error {
buf, err := json.Marshal(authConfig)
if err != nil {
return err
}
registryAuthHeader := []string{
base64.URLEncoding.EncodeToString(buf),
}
return cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.out, map[string][]string{
"X-Registry-Auth": registryAuthHeader,
})
}
if err := pull(authConfig); err != nil {
if err.Error() == registry.ErrLoginRequired.Error() {
fmt.Fprintln(cli.out, "\nPlease login prior to pull:")
if err := cli.CmdLogin(endpoint); err != nil {
return err
}
authConfig := cli.configFile.ResolveAuthConfig(endpoint)
return pull(authConfig)
}
return err
}
return nil
}
func (cli *DockerCli) CmdImages(args ...string) error {
cmd := cli.Subcmd("images", "[OPTIONS] [NAME]", "List images")
quiet := cmd.Bool([]string{"q", "-quiet"}, false, "only show numeric IDs")
all := cmd.Bool([]string{"a", "-all"}, false, "show all images (by default filter out the intermediate images used to build)")
noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
flViz := cmd.Bool([]string{"v", "#viz", "-viz"}, false, "output graph in graphviz format")
flTree := cmd.Bool([]string{"t", "#tree", "-tree"}, false, "output graph in tree format")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() > 1 {
cmd.Usage()
return nil
}
filter := cmd.Arg(0)
if *flViz || *flTree {
body, _, err := readBody(cli.call("GET", "/images/json?all=1", nil, false))
if err != nil {
return err
}
outs := engine.NewTable("Created", 0)
if _, err := outs.ReadListFrom(body); err != nil {
return err
}
var (
printNode func(cli *DockerCli, noTrunc bool, image *engine.Env, prefix string)
startImage *engine.Env
roots = engine.NewTable("Created", outs.Len())
byParent = make(map[string]*engine.Table)
)
for _, image := range outs.Data {
if image.Get("ParentId") == "" {
roots.Add(image)
} else {
if children, exists := byParent[image.Get("ParentId")]; exists {
children.Add(image)
} else {
byParent[image.Get("ParentId")] = engine.NewTable("Created", 1)
byParent[image.Get("ParentId")].Add(image)
}
}
if filter != "" {
if filter == image.Get("ID") || filter == utils.TruncateID(image.Get("ID")) {
startImage = image
}
for _, repotag := range image.GetList("RepoTags") {
if repotag == filter {
startImage = image
}
}
}
}
if *flViz {
fmt.Fprintf(cli.out, "digraph docker {\n")
printNode = (*DockerCli).printVizNode
} else {
printNode = (*DockerCli).printTreeNode
}
if startImage != nil {
root := engine.NewTable("Created", 1)
root.Add(startImage)
cli.WalkTree(*noTrunc, root, byParent, "", printNode)
} else if filter == "" {
cli.WalkTree(*noTrunc, roots, byParent, "", printNode)
}
if *flViz {
fmt.Fprintf(cli.out, " base [style=invisible]\n}\n")
}
} else {
v := url.Values{}
if cmd.NArg() == 1 {
v.Set("filter", filter)
}
if *all {
v.Set("all", "1")
}
body, _, err := readBody(cli.call("GET", "/images/json?"+v.Encode(), nil, false))
if err != nil {
return err
}
outs := engine.NewTable("Created", 0)
if _, err := outs.ReadListFrom(body); err != nil {
return err
}
w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
if !*quiet {
fmt.Fprintln(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tVIRTUAL SIZE")
}
for _, out := range outs.Data {
for _, repotag := range out.GetList("RepoTags") {
repo, tag := utils.ParseRepositoryTag(repotag)
outID := out.Get("ID")
if !*noTrunc {
outID = utils.TruncateID(outID)
}
if !*quiet {
fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, outID, utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), utils.HumanSize(out.GetInt64("VirtualSize")))
} else {
fmt.Fprintln(w, outID)
}
}
}
if !*quiet {
w.Flush()
}
}
return nil
}
func (cli *DockerCli) WalkTree(noTrunc bool, images *engine.Table, byParent map[string]*engine.Table, prefix string, printNode func(cli *DockerCli, noTrunc bool, image *engine.Env, prefix string)) {
length := images.Len()
if length > 1 {
for index, image := range images.Data {
if index+1 == length {
printNode(cli, noTrunc, image, prefix+"└─")
if subimages, exists := byParent[image.Get("ID")]; exists {
cli.WalkTree(noTrunc, subimages, byParent, prefix+" ", printNode)
}
} else {
printNode(cli, noTrunc, image, prefix+"\u251C─")
if subimages, exists := byParent[image.Get("ID")]; exists {
cli.WalkTree(noTrunc, subimages, byParent, prefix+"\u2502 ", printNode)
}
}
}
} else {
for _, image := range images.Data {
printNode(cli, noTrunc, image, prefix+"└─")
if subimages, exists := byParent[image.Get("ID")]; exists {
cli.WalkTree(noTrunc, subimages, byParent, prefix+" ", printNode)
}
}
}
}
func (cli *DockerCli) printVizNode(noTrunc bool, image *engine.Env, prefix string) {
var (
imageID string
parentID string
)
if noTrunc {
imageID = image.Get("ID")
parentID = image.Get("ParentId")
} else {
imageID = utils.TruncateID(image.Get("ID"))
parentID = utils.TruncateID(image.Get("ParentId"))
}
if parentID == "" {
fmt.Fprintf(cli.out, " base -> \"%s\" [style=invis]\n", imageID)
} else {
fmt.Fprintf(cli.out, " \"%s\" -> \"%s\"\n", parentID, imageID)
}
if image.GetList("RepoTags")[0] != "<none>:<none>" {
fmt.Fprintf(cli.out, " \"%s\" [label=\"%s\\n%s\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n",
imageID, imageID, strings.Join(image.GetList("RepoTags"), "\\n"))
}
}
func (cli *DockerCli) printTreeNode(noTrunc bool, image *engine.Env, prefix string) {
var imageID string
if noTrunc {
imageID = image.Get("ID")
} else {
imageID = utils.TruncateID(image.Get("ID"))
}
fmt.Fprintf(cli.out, "%s%s Virtual Size: %s", prefix, imageID, utils.HumanSize(image.GetInt64("VirtualSize")))
if image.GetList("RepoTags")[0] != "<none>:<none>" {
fmt.Fprintf(cli.out, " Tags: %s\n", strings.Join(image.GetList("RepoTags"), ", "))
} else {
fmt.Fprint(cli.out, "\n")
}
}
func displayablePorts(ports *engine.Table) string {
result := []string{}
for _, port := range ports.Data {
if port.Get("IP") == "" {
result = append(result, fmt.Sprintf("%d/%s", port.GetInt("PublicPort"), port.Get("Type")))
} else {
result = append(result, fmt.Sprintf("%s:%d->%d/%s", port.Get("IP"), port.GetInt("PublicPort"), port.GetInt("PrivatePort"), port.Get("Type")))
}
}
sort.Strings(result)
return strings.Join(result, ", ")
}
func (cli *DockerCli) CmdPs(args ...string) error {
cmd := cli.Subcmd("ps", "[OPTIONS]", "List containers")
quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs")
size := cmd.Bool([]string{"s", "-size"}, false, "Display sizes")
all := cmd.Bool([]string{"a", "-all"}, false, "Show all containers. Only running containers are shown by default.")
noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
nLatest := cmd.Bool([]string{"l", "-latest"}, false, "Show only the latest created container, include non-running ones.")
since := cmd.String([]string{"#sinceId", "-since-id"}, "", "Show only containers created since Id, include non-running ones.")
before := cmd.String([]string{"#beforeId", "-before-id"}, "", "Show only container created before Id, include non-running ones.")
last := cmd.Int([]string{"n"}, -1, "Show n last created containers, include non-running ones.")
if err := cmd.Parse(args); err != nil {
return nil
}
v := url.Values{}
if *last == -1 && *nLatest {
*last = 1
}
if *all {
v.Set("all", "1")
}
if *last != -1 {
v.Set("limit", strconv.Itoa(*last))
}
if *since != "" {
v.Set("since", *since)
}
if *before != "" {
v.Set("before", *before)
}
if *size {
v.Set("size", "1")
}
body, _, err := readBody(cli.call("GET", "/containers/json?"+v.Encode(), nil, false))
if err != nil {
return err
}
outs := engine.NewTable("Created", 0)
if _, err := outs.ReadListFrom(body); err != nil {
return err
}
w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
if !*quiet {
fmt.Fprint(w, "CONTAINER ID\tIMAGE\tCOMMAND\tCREATED\tSTATUS\tPORTS\tNAMES")
if *size {
fmt.Fprintln(w, "\tSIZE")
} else {
fmt.Fprint(w, "\n")
}
}
for _, out := range outs.Data {
var (
outID = out.Get("ID")
outNames = out.GetList("Names")
)
if !*noTrunc {
outID = utils.TruncateID(outID)
}
// Remove the leading / from the names
for i := 0; i < len(outNames); i++ {
outNames[i] = outNames[i][1:]
}
if !*quiet {
var (
outCommand = out.Get("Command")
ports = engine.NewTable("", 0)
)
if !*noTrunc {
outCommand = utils.Trunc(outCommand, 20)
}
ports.ReadListFrom([]byte(out.Get("Ports")))
fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\t%s\t%s\t", outID, out.Get("Image"), outCommand, utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), out.Get("Status"), displayablePorts(ports), strings.Join(outNames, ","))
if *size {
if out.GetInt("SizeRootFs") > 0 {
fmt.Fprintf(w, "%s (virtual %s)\n", utils.HumanSize(out.GetInt64("SizeRw")), utils.HumanSize(out.GetInt64("SizeRootFs")))
} else {
fmt.Fprintf(w, "%s\n", utils.HumanSize(out.GetInt64("SizeRw")))
}
} else {
fmt.Fprint(w, "\n")
}
} else {
fmt.Fprintln(w, outID)
}
}
if !*quiet {
w.Flush()
}
return nil
}
func (cli *DockerCli) CmdCommit(args ...string) error {
cmd := cli.Subcmd("commit", "[OPTIONS] CONTAINER [REPOSITORY[:TAG]]", "Create a new image from a container's changes")
flComment := cmd.String([]string{"m", "-message"}, "", "Commit message")
flAuthor := cmd.String([]string{"a", "#author", "-author"}, "", "Author (eg. \"John Hannibal Smith <[email protected]>\"")
flConfig := cmd.String([]string{"#run", "-run"}, "", "Config automatically applied when the image is run. "+`(ex: -run='{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')`)
if err := cmd.Parse(args); err != nil {
return nil
}
var name, repository, tag string
if cmd.NArg() == 3 {
fmt.Fprintf(cli.err, "[DEPRECATED] The format 'CONTAINER [REPOSITORY [TAG]]' as been deprecated. Please use CONTAINER [REPOSITORY[:TAG]]\n")
name, repository, tag = cmd.Arg(0), cmd.Arg(1), cmd.Arg(2)
} else {
name = cmd.Arg(0)
repository, tag = utils.ParseRepositoryTag(cmd.Arg(1))
}
if name == "" {
cmd.Usage()
return nil
}
v := url.Values{}
v.Set("container", name)
v.Set("repo", repository)
v.Set("tag", tag)
v.Set("comment", *flComment)
v.Set("author", *flAuthor)
var config *Config
if *flConfig != "" {
config = &Config{}
if err := json.Unmarshal([]byte(*flConfig), config); err != nil {
return err
}
}
body, _, err := readBody(cli.call("POST", "/commit?"+v.Encode(), config, false))
if err != nil {
return err
}
apiID := &APIID{}
err = json.Unmarshal(body, apiID)
if err != nil {
return err
}
fmt.Fprintf(cli.out, "%s\n", apiID.ID)
return nil
}
func (cli *DockerCli) CmdEvents(args ...string) error {
cmd := cli.Subcmd("events", "[OPTIONS]", "Get real time events from the server")
since := cmd.String([]string{"#since", "-since"}, "", "Show previously created events and then stream.")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 0 {
cmd.Usage()
return nil
}
v := url.Values{}
if *since != "" {
loc := time.FixedZone(time.Now().Zone())
format := "2006-01-02 15:04:05 -0700 MST"
if len(*since) < len(format) {
format = format[:len(*since)]
}
if t, err := time.ParseInLocation(format, *since, loc); err == nil {
v.Set("since", strconv.FormatInt(t.Unix(), 10))
} else {
v.Set("since", *since)
}
}
if err := cli.stream("GET", "/events?"+v.Encode(), nil, cli.out, nil); err != nil {
return err
}
return nil
}
func (cli *DockerCli) CmdExport(args ...string) error {
cmd := cli.Subcmd("export", "CONTAINER", "Export the contents of a filesystem as a tar archive to STDOUT")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
if err := cli.stream("GET", "/containers/"+cmd.Arg(0)+"/export", nil, cli.out, nil); err != nil {
return err
}
return nil
}
func (cli *DockerCli) CmdDiff(args ...string) error {
cmd := cli.Subcmd("diff", "CONTAINER", "Inspect changes on a container's filesystem")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/changes", nil, false))
if err != nil {
return err
}
outs := engine.NewTable("", 0)
if _, err := outs.ReadListFrom(body); err != nil {
return err
}
for _, change := range outs.Data {
var kind string
switch change.GetInt("Kind") {
case archive.ChangeModify:
kind = "C"
case archive.ChangeAdd:
kind = "A"
case archive.ChangeDelete:
kind = "D"
}
fmt.Fprintf(cli.out, "%s %s\n", kind, change.Get("Path"))
}
return nil
}
func (cli *DockerCli) CmdLogs(args ...string) error {
cmd := cli.Subcmd("logs", "CONTAINER", "Fetch the logs of a container")
follow := cmd.Bool([]string{"f", "-follow"}, false, "Follow log output")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
name := cmd.Arg(0)
body, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false))
if err != nil {
return err
}
container := &Container{}
err = json.Unmarshal(body, container)
if err != nil {
return err
}
v := url.Values{}
v.Set("logs", "1")
v.Set("stdout", "1")
v.Set("stderr", "1")
if *follow && container.State.Running {
v.Set("stream", "1")
}
if err := cli.hijack("POST", "/containers/"+name+"/attach?"+v.Encode(), container.Config.Tty, nil, cli.out, cli.err, nil); err != nil {
return err
}
return nil
}
func (cli *DockerCli) CmdAttach(args ...string) error {
cmd := cli.Subcmd("attach", "[OPTIONS] CONTAINER", "Attach to a running container")
noStdin := cmd.Bool([]string{"#nostdin", "-no-stdin"}, false, "Do not attach stdin")
proxy := cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
name := cmd.Arg(0)
body, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false))
if err != nil {
return err
}
container := &Container{}
err = json.Unmarshal(body, container)
if err != nil {
return err
}
if !container.State.IsRunning() {
return fmt.Errorf("Impossible to attach to a stopped container, start it first")
}
if container.Config.Tty && cli.isTerminal {
if err := cli.monitorTtySize(cmd.Arg(0)); err != nil {
utils.Debugf("Error monitoring TTY size: %s", err)
}
}
var in io.ReadCloser
v := url.Values{}
v.Set("stream", "1")
if !*noStdin && container.Config.OpenStdin {
v.Set("stdin", "1")
in = cli.in
}
v.Set("stdout", "1")
v.Set("stderr", "1")
if *proxy && !container.Config.Tty {
sigc := cli.forwardAllSignals(cmd.Arg(0))
defer utils.StopCatch(sigc)
}
if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), container.Config.Tty, in, cli.out, cli.err, nil); err != nil {
return err
}
_, status, err := getExitCode(cli, cmd.Arg(0))
if err != nil {
return err
}
if status != 0 {
return &utils.StatusError{StatusCode: status}
}
return nil
}
func (cli *DockerCli) CmdSearch(args ...string) error {
cmd := cli.Subcmd("search", "TERM", "Search the docker index for images")
noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
trusted := cmd.Bool([]string{"t", "#trusted", "-trusted"}, false, "Only show trusted builds")
stars := cmd.Int([]string{"s", "#stars", "-stars"}, 0, "Only displays with at least xxx stars")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
v := url.Values{}
v.Set("term", cmd.Arg(0))
body, _, err := readBody(cli.call("GET", "/images/search?"+v.Encode(), nil, true))
if err != nil {
return err
}
outs := engine.NewTable("star_count", 0)
if _, err := outs.ReadListFrom(body); err != nil {
return err
}
w := tabwriter.NewWriter(cli.out, 10, 1, 3, ' ', 0)
fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tTRUSTED\n")
for _, out := range outs.Data {
if (*trusted && !out.GetBool("is_trusted")) || (*stars > out.GetInt("star_count")) {
continue
}
desc := strings.Replace(out.Get("description"), "\n", " ", -1)
desc = strings.Replace(desc, "\r", " ", -1)
if !*noTrunc && len(desc) > 45 {
desc = utils.Trunc(desc, 42) + "..."
}
fmt.Fprintf(w, "%s\t%s\t%d\t", out.Get("name"), desc, out.GetInt("star_count"))
if out.GetBool("is_official") {
fmt.Fprint(w, "[OK]")
}
fmt.Fprint(w, "\t")
if out.GetBool("is_trusted") {
fmt.Fprint(w, "[OK]")
}
fmt.Fprint(w, "\n")
}
w.Flush()
return nil
}
// Ports type - Used to parse multiple -p flags
type ports []int
func (cli *DockerCli) CmdTag(args ...string) error {
cmd := cli.Subcmd("tag", "[OPTIONS] IMAGE REPOSITORY[:TAG]", "Tag an image into a repository")
force := cmd.Bool([]string{"f", "#force", "-force"}, false, "Force")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 2 && cmd.NArg() != 3 {
cmd.Usage()
return nil
}
var repository, tag string
if cmd.NArg() == 3 {
fmt.Fprintf(cli.err, "[DEPRECATED] The format 'IMAGE [REPOSITORY [TAG]]' as been deprecated. Please use IMAGE [REPOSITORY[:TAG]]\n")
repository, tag = cmd.Arg(1), cmd.Arg(2)
} else {
repository, tag = utils.ParseRepositoryTag(cmd.Arg(1))
}
v := url.Values{}
v.Set("repo", repository)
v.Set("tag", tag)
if *force {
v.Set("force", "1")
}
if _, _, err := readBody(cli.call("POST", "/images/"+cmd.Arg(0)+"/tag?"+v.Encode(), nil, false)); err != nil {
return err
}
return nil
}
//FIXME Only used in tests
func ParseRun(args []string, sysInfo *sysinfo.SysInfo) (*Config, *HostConfig, *flag.FlagSet, error) {
cmd := flag.NewFlagSet("run", flag.ContinueOnError)
cmd.SetOutput(ioutil.Discard)
cmd.Usage = nil
return parseRun(cmd, args, sysInfo)
}
func parseRun(cmd *flag.FlagSet, args []string, sysInfo *sysinfo.SysInfo) (*Config, *HostConfig, *flag.FlagSet, error) {
var (
// FIXME: use utils.ListOpts for attach and volumes?
flAttach = NewListOpts(ValidateAttach)
flVolumes = NewListOpts(ValidatePath)
flLinks = NewListOpts(ValidateLink)
flEnv = NewListOpts(ValidateEnv)
flPublish ListOpts
flExpose ListOpts
flDns ListOpts
flVolumesFrom ListOpts
flLxcOpts ListOpts
flAutoRemove = cmd.Bool([]string{"#rm", "-rm"}, false, "Automatically remove the container when it exits (incompatible with -d)")
flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: Run container in the background, print new container id")
flNetwork = cmd.Bool([]string{"n", "-networking"}, true, "Enable networking for this container")
flPrivileged = cmd.Bool([]string{"#privileged", "-privileged"}, false, "Give extended privileges to this container")
flPublishAll = cmd.Bool([]string{"P", "-publish-all"}, false, "Publish all exposed ports to the host interfaces")
flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep stdin open even if not attached")
flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-tty")
flContainerIDFile = cmd.String([]string{"#cidfile", "-cidfile"}, "", "Write the container ID to the file")
flEntrypoint = cmd.String([]string{"#entrypoint", "-entrypoint"}, "", "Overwrite the default entrypoint of the image")
flHostname = cmd.String([]string{"h", "-hostname"}, "", "Container host name")
flMemoryString = cmd.String([]string{"m", "-memory"}, "", "Memory limit (format: <number><optional unit>, where unit = b, k, m or g)")
flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID")
flWorkingDir = cmd.String([]string{"w", "-workdir"}, "", "Working directory inside the container")
flCpuShares = cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)")
// For documentation purpose
_ = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxify all received signal to the process (even in non-tty mode)")
_ = cmd.String([]string{"#name", "-name"}, "", "Assign a name to the container")
)
cmd.Var(&flAttach, []string{"a", "-attach"}, "Attach to stdin, stdout or stderr.")
cmd.Var(&flVolumes, []string{"v", "-volume"}, "Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)")
cmd.Var(&flLinks, []string{"#link", "-link"}, "Add link to another container (name:alias)")
cmd.Var(&flEnv, []string{"e", "-env"}, "Set environment variables")
cmd.Var(&flPublish, []string{"p", "-publish"}, fmt.Sprintf("Publish a container's port to the host (format: %s) (use 'docker port' to see the actual mapping)", PortSpecTemplateFormat))
cmd.Var(&flExpose, []string{"#expose", "-expose"}, "Expose a port from the container without publishing it to your host")
cmd.Var(&flDns, []string{"#dns", "-dns"}, "Set custom dns servers")
cmd.Var(&flVolumesFrom, []string{"#volumes-from", "-volumes-from"}, "Mount volumes from the specified container(s)")
cmd.Var(&flLxcOpts, []string{"#lxc-conf", "-lxc-conf"}, "Add custom lxc options -lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"")
if err := cmd.Parse(args); err != nil {
return nil, nil, cmd, err
}
// Check if the kernel supports memory limit cgroup.
if sysInfo != nil && *flMemoryString != "" && !sysInfo.MemoryLimit {
*flMemoryString = ""
}
// Validate input params
if *flDetach && flAttach.Len() > 0 {
return nil, nil, cmd, ErrConflictAttachDetach
}
if *flWorkingDir != "" && !path.IsAbs(*flWorkingDir) {
return nil, nil, cmd, ErrInvalidWorikingDirectory
}
if *flDetach && *flAutoRemove {
return nil, nil, cmd, ErrConflictDetachAutoRemove
}
// If neither -d or -a are set, attach to everything by default
if flAttach.Len() == 0 && !*flDetach {
if !*flDetach {
flAttach.Set("stdout")
flAttach.Set("stderr")
if *flStdin {
flAttach.Set("stdin")
}
}
}
var flMemory int64
if *flMemoryString != "" {
parsedMemory, err := utils.RAMInBytes(*flMemoryString)
if err != nil {
return nil, nil, cmd, err
}
flMemory = parsedMemory
}
var binds []string
// add any bind targets to the list of container volumes
for bind := range flVolumes.GetMap() {
if arr := strings.Split(bind, ":"); len(arr) > 1 {
if arr[0] == "/" {
return nil, nil, cmd, fmt.Errorf("Invalid bind mount: source can't be '/'")
}
dstDir := arr[1]
flVolumes.Set(dstDir)
binds = append(binds, bind)
flVolumes.Delete(bind)
} else if bind == "/" {
return nil, nil, cmd, fmt.Errorf("Invalid volume: path can't be '/'")
}
}
var (
parsedArgs = cmd.Args()
runCmd []string
entrypoint []string
image string
)
if len(parsedArgs) >= 1 {
image = cmd.Arg(0)
}
if len(parsedArgs) > 1 {
runCmd = parsedArgs[1:]
}
if *flEntrypoint != "" {
entrypoint = []string{*flEntrypoint}
}
lxcConf, err := parseLxcConfOpts(flLxcOpts)
if err != nil {
return nil, nil, cmd, err
}
var (
domainname string
hostname = *flHostname
parts = strings.SplitN(hostname, ".", 2)
)
if len(parts) > 1 {
hostname = parts[0]
domainname = parts[1]
}
ports, portBindings, err := parsePortSpecs(flPublish.GetAll())
if err != nil {
return nil, nil, cmd, err
}
// Merge in exposed ports to the map of published ports
for _, e := range flExpose.GetAll() {
if strings.Contains(e, ":") {
return nil, nil, cmd, fmt.Errorf("Invalid port format for --expose: %s", e)
}
p := NewPort(splitProtoPort(e))
if _, exists := ports[p]; !exists {
ports[p] = struct{}{}
}
}
config := &Config{
Hostname: hostname,
Domainname: domainname,
PortSpecs: nil, // Deprecated
ExposedPorts: ports,
User: *flUser,
Tty: *flTty,
NetworkDisabled: !*flNetwork,
OpenStdin: *flStdin,
Memory: flMemory,
CpuShares: *flCpuShares,
AttachStdin: flAttach.Get("stdin"),
AttachStdout: flAttach.Get("stdout"),
AttachStderr: flAttach.Get("stderr"),
Env: flEnv.GetAll(),
Cmd: runCmd,
Dns: flDns.GetAll(),
Image: image,
Volumes: flVolumes.GetMap(),
VolumesFrom: strings.Join(flVolumesFrom.GetAll(), ","),
Entrypoint: entrypoint,
WorkingDir: *flWorkingDir,
}
hostConfig := &HostConfig{
Binds: binds,
ContainerIDFile: *flContainerIDFile,
LxcConf: lxcConf,
Privileged: *flPrivileged,
PortBindings: portBindings,
Links: flLinks.GetAll(),
PublishAllPorts: *flPublishAll,
}
if sysInfo != nil && flMemory > 0 && !sysInfo.SwapLimit {
//fmt.Fprintf(stdout, "WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
config.MemorySwap = -1
}
// When allocating stdin in attached mode, close stdin at client disconnect
if config.OpenStdin && config.AttachStdin {
config.StdinOnce = true
}
return config, hostConfig, cmd, nil
}
func (cli *DockerCli) CmdRun(args ...string) error {
config, hostConfig, cmd, err := parseRun(cli.Subcmd("run", "[OPTIONS] IMAGE [COMMAND] [ARG...]", "Run a command in a new container"), args, nil)
if err != nil {
return err
}
if config.Image == "" {
cmd.Usage()
return nil
}
// Retrieve relevant client-side config
var (
flName = cmd.Lookup("name")
flRm = cmd.Lookup("rm")
flSigProxy = cmd.Lookup("sig-proxy")
autoRemove, _ = strconv.ParseBool(flRm.Value.String())
sigProxy, _ = strconv.ParseBool(flSigProxy.Value.String())
)
// Disable sigProxy in case on TTY
if config.Tty {
sigProxy = false
}
var containerIDFile io.WriteCloser
if len(hostConfig.ContainerIDFile) > 0 {
if _, err := os.Stat(hostConfig.ContainerIDFile); err == nil {
return fmt.Errorf("cid file found, make sure the other container isn't running or delete %s", hostConfig.ContainerIDFile)
}
if containerIDFile, err = os.Create(hostConfig.ContainerIDFile); err != nil {
return fmt.Errorf("failed to create the container ID file: %s", err)
}
defer containerIDFile.Close()
}
containerValues := url.Values{}
if name := flName.Value.String(); name != "" {
containerValues.Set("name", name)
}
//create the container
body, statusCode, err := readBody(cli.call("POST", "/containers/create?"+containerValues.Encode(), config, false))
//if image not found try to pull it
if statusCode == 404 {
_, tag := utils.ParseRepositoryTag(config.Image)
if tag == "" {
tag = DEFAULTTAG
}
fmt.Fprintf(cli.err, "Unable to find image '%s' (tag: %s) locally\n", config.Image, tag)
v := url.Values{}
repos, tag := utils.ParseRepositoryTag(config.Image)
v.Set("fromImage", repos)
v.Set("tag", tag)
// Resolve the Repository name from fqn to endpoint + name
endpoint, _, err := registry.ResolveRepositoryName(repos)
if err != nil {
return err
}
// Load the auth config file, to be able to pull the image
cli.LoadConfigFile()
// Resolve the Auth config relevant for this server
authConfig := cli.configFile.ResolveAuthConfig(endpoint)
buf, err := json.Marshal(authConfig)
if err != nil {
return err
}
registryAuthHeader := []string{
base64.URLEncoding.EncodeToString(buf),
}
if err = cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.err, map[string][]string{"X-Registry-Auth": registryAuthHeader}); err != nil {
return err
}
if body, _, err = readBody(cli.call("POST", "/containers/create?"+containerValues.Encode(), config, false)); err != nil {
return err
}
} else if err != nil {
return err
}
var runResult APIRun
if err := json.Unmarshal(body, &runResult); err != nil {
return err
}
for _, warning := range runResult.Warnings {
fmt.Fprintf(cli.err, "WARNING: %s\n", warning)
}
if len(hostConfig.ContainerIDFile) > 0 {
if _, err = containerIDFile.Write([]byte(runResult.ID)); err != nil {
return fmt.Errorf("failed to write the container ID to the file: %s", err)
}
}
if sigProxy {
sigc := cli.forwardAllSignals(runResult.ID)
defer utils.StopCatch(sigc)
}
var (
waitDisplayId chan struct{}
errCh chan error
)
if !config.AttachStdout && !config.AttachStderr {
// Make this asynchrone in order to let the client write to stdin before having to read the ID
waitDisplayId = make(chan struct{})
go func() {
defer close(waitDisplayId)
fmt.Fprintf(cli.out, "%s\n", runResult.ID)
}()
}
// We need to instanciate the chan because the select needs it. It can
// be closed but can't be uninitialized.
hijacked := make(chan io.Closer)
// Block the return until the chan gets closed
defer func() {
utils.Debugf("End of CmdRun(), Waiting for hijack to finish.")
if _, ok := <-hijacked; ok {
utils.Errorf("Hijack did not finish (chan still open)")
}
}()
if config.AttachStdin || config.AttachStdout || config.AttachStderr {
var (
out, stderr io.Writer
in io.ReadCloser
v = url.Values{}
)
v.Set("stream", "1")
if config.AttachStdin {
v.Set("stdin", "1")
in = cli.in
}
if config.AttachStdout {
v.Set("stdout", "1")
out = cli.out
}
if config.AttachStderr {
v.Set("stderr", "1")
if config.Tty {
stderr = cli.out
} else {
stderr = cli.err
}
}
errCh = utils.Go(func() error {
return cli.hijack("POST", "/containers/"+runResult.ID+"/attach?"+v.Encode(), config.Tty, in, out, stderr, hijacked)
})
} else {
close(hijacked)
}
// Acknowledge the hijack before starting
select {
case closer := <-hijacked:
// Make sure that hijack gets closed when returning. (result
// in closing hijack chan and freeing server's goroutines.
if closer != nil {
defer closer.Close()
}
case err := <-errCh:
if err != nil {
utils.Debugf("Error hijack: %s", err)
return err
}
}
//start the container
if _, _, err = readBody(cli.call("POST", "/containers/"+runResult.ID+"/start", hostConfig, false)); err != nil {
return err
}
if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminal {
if err := cli.monitorTtySize(runResult.ID); err != nil {
utils.Errorf("Error monitoring TTY size: %s\n", err)
}
}
if errCh != nil {
if err := <-errCh; err != nil {
utils.Debugf("Error hijack: %s", err)
return err
}
}
// Detached mode: wait for the id to be displayed and return.
if !config.AttachStdout && !config.AttachStderr {
// Detached mode
<-waitDisplayId
return nil
}
var status int
// Attached mode
if autoRemove {
// Autoremove: wait for the container to finish, retrieve
// the exit code and remove the container
if _, _, err := readBody(cli.call("POST", "/containers/"+runResult.ID+"/wait", nil, false)); err != nil {
return err
}
if _, status, err = getExitCode(cli, runResult.ID); err != nil {
return err
}
if _, _, err := readBody(cli.call("DELETE", "/containers/"+runResult.ID+"?v=1", nil, false)); err != nil {
return err
}
} else {
if !config.Tty {
// In non-tty mode, we can't dettach, so we know we need to wait.
if status, err = waitForExit(cli, runResult.ID); err != nil {
return err
}
} else {
// In TTY mode, there is a race. If the process dies too slowly, the state can be update after the getExitCode call
// and result in a wrong exit code.
// No Autoremove: Simply retrieve the exit code
if _, status, err = getExitCode(cli, runResult.ID); err != nil {
return err
}
}
}
if status != 0 {
return &utils.StatusError{StatusCode: status}
}
return nil
}
func (cli *DockerCli) CmdCp(args ...string) error {
cmd := cli.Subcmd("cp", "CONTAINER:PATH HOSTPATH", "Copy files/folders from the PATH to the HOSTPATH")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 2 {
cmd.Usage()
return nil
}
var copyData APICopy
info := strings.Split(cmd.Arg(0), ":")
if len(info) != 2 {
return fmt.Errorf("Error: Path not specified")
}
copyData.Resource = info[1]
copyData.HostPath = cmd.Arg(1)
stream, statusCode, err := cli.call("POST", "/containers/"+info[0]+"/copy", copyData, false)
if stream != nil {
defer stream.Close()
}
if err != nil {
return err
}
if statusCode == 200 {
if err := archive.Untar(stream, copyData.HostPath, nil); err != nil {
return err
}
}
return nil
}
func (cli *DockerCli) CmdSave(args ...string) error {
cmd := cli.Subcmd("save", "IMAGE", "Save an image to a tar archive (streamed to stdout)")
if err := cmd.Parse(args); err != nil {
return err
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
image := cmd.Arg(0)
if err := cli.stream("GET", "/images/"+image+"/get", nil, cli.out, nil); err != nil {
return err
}
return nil
}
func (cli *DockerCli) CmdLoad(args ...string) error {
cmd := cli.Subcmd("load", "", "Load an image from a tar archive on STDIN")
if err := cmd.Parse(args); err != nil {
return err
}
if cmd.NArg() != 0 {
cmd.Usage()
return nil
}
if err := cli.stream("POST", "/images/load", cli.in, cli.out, nil); err != nil {
return err
}
return nil
}
func (cli *DockerCli) call(method, path string, data interface{}, passAuthInfo bool) (io.ReadCloser, int, error) {
var params io.Reader
if data != nil {
buf, err := json.Marshal(data)
if err != nil {
return nil, -1, err
}
params = bytes.NewBuffer(buf)
}
// fixme: refactor client to support redirect
re := regexp.MustCompile("/+")
path = re.ReplaceAllString(path, "/")
req, err := http.NewRequest(method, fmt.Sprintf("/v%g%s", APIVERSION, path), params)
if err != nil {
return nil, -1, err
}
if passAuthInfo {
cli.LoadConfigFile()
// Resolve the Auth config relevant for this server
authConfig := cli.configFile.ResolveAuthConfig(auth.IndexServerAddress())
getHeaders := func(authConfig auth.AuthConfig) (map[string][]string, error) {
buf, err := json.Marshal(authConfig)
if err != nil {
return nil, err
}
registryAuthHeader := []string{
base64.URLEncoding.EncodeToString(buf),
}
return map[string][]string{"X-Registry-Auth": registryAuthHeader}, nil
}
if headers, err := getHeaders(authConfig); err == nil && headers != nil {
for k, v := range headers {
req.Header[k] = v
}
}
}
req.Header.Set("User-Agent", "Docker-Client/"+VERSION)
req.Host = cli.addr
if data != nil {
req.Header.Set("Content-Type", "application/json")
} else if method == "POST" {
req.Header.Set("Content-Type", "plain/text")
}
dial, err := net.Dial(cli.proto, cli.addr)
if err != nil {
if strings.Contains(err.Error(), "connection refused") {
return nil, -1, ErrConnectionRefused
}
return nil, -1, err
}
clientconn := httputil.NewClientConn(dial, nil)
resp, err := clientconn.Do(req)
if err != nil {
clientconn.Close()
if strings.Contains(err.Error(), "connection refused") {
return nil, -1, ErrConnectionRefused
}
return nil, -1, err
}
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, -1, err
}
if len(body) == 0 {
return nil, resp.StatusCode, fmt.Errorf("Error :%s", http.StatusText(resp.StatusCode))
}
return nil, resp.StatusCode, fmt.Errorf("Error: %s", bytes.TrimSpace(body))
}
wrapper := utils.NewReadCloserWrapper(resp.Body, func() error {
if resp != nil && resp.Body != nil {
resp.Body.Close()
}
return clientconn.Close()
})
return wrapper, resp.StatusCode, nil
}
func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, headers map[string][]string) error {
if (method == "POST" || method == "PUT") && in == nil {
in = bytes.NewReader([]byte{})
}
// fixme: refactor client to support redirect
re := regexp.MustCompile("/+")
path = re.ReplaceAllString(path, "/")
req, err := http.NewRequest(method, fmt.Sprintf("/v%g%s", APIVERSION, path), in)
if err != nil {
return err
}
req.Header.Set("User-Agent", "Docker-Client/"+VERSION)
req.Host = cli.addr
if method == "POST" {
req.Header.Set("Content-Type", "plain/text")
}
if headers != nil {
for k, v := range headers {
req.Header[k] = v
}
}
dial, err := net.Dial(cli.proto, cli.addr)
if err != nil {
if strings.Contains(err.Error(), "connection refused") {
return fmt.Errorf("Can't connect to docker daemon. Is 'docker -d' running on this host?")
}
return err
}
clientconn := httputil.NewClientConn(dial, nil)
resp, err := clientconn.Do(req)
defer clientconn.Close()
if err != nil {
if strings.Contains(err.Error(), "connection refused") {
return fmt.Errorf("Can't connect to docker daemon. Is 'docker -d' running on this host?")
}
return err
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
if len(body) == 0 {
return fmt.Errorf("Error :%s", http.StatusText(resp.StatusCode))
}
return fmt.Errorf("Error: %s", bytes.TrimSpace(body))
}
if matchesContentType(resp.Header.Get("Content-Type"), "application/json") {
return utils.DisplayJSONMessagesStream(resp.Body, out, cli.terminalFd, cli.isTerminal)
}
if _, err := io.Copy(out, resp.Body); err != nil {
return err
}
return nil
}
func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.ReadCloser, stdout, stderr io.Writer, started chan io.Closer) error {
defer func() {
if started != nil {
close(started)
}
}()
// fixme: refactor client to support redirect
re := regexp.MustCompile("/+")
path = re.ReplaceAllString(path, "/")
req, err := http.NewRequest(method, fmt.Sprintf("/v%g%s", APIVERSION, path), nil)
if err != nil {
return err
}
req.Header.Set("User-Agent", "Docker-Client/"+VERSION)
req.Header.Set("Content-Type", "plain/text")
req.Host = cli.addr
dial, err := net.Dial(cli.proto, cli.addr)
if err != nil {
if strings.Contains(err.Error(), "connection refused") {
return fmt.Errorf("Can't connect to docker daemon. Is 'docker -d' running on this host?")
}
return err
}
clientconn := httputil.NewClientConn(dial, nil)
defer clientconn.Close()
// Server hijacks the connection, error 'connection closed' expected
clientconn.Do(req)
rwc, br := clientconn.Hijack()
defer rwc.Close()
if started != nil {
started <- rwc
}
var receiveStdout chan error
var oldState *term.State
if in != nil && setRawTerminal && cli.isTerminal && os.Getenv("NORAW") == "" {
oldState, err = term.SetRawTerminal(cli.terminalFd)
if err != nil {
return err
}
defer term.RestoreTerminal(cli.terminalFd, oldState)
}
if stdout != nil || stderr != nil {
receiveStdout = utils.Go(func() (err error) {
defer func() {
if in != nil {
if setRawTerminal && cli.isTerminal {
term.RestoreTerminal(cli.terminalFd, oldState)
}
in.Close()
}
}()
// When TTY is ON, use regular copy
if setRawTerminal {
_, err = io.Copy(stdout, br)
} else {
_, err = utils.StdCopy(stdout, stderr, br)
}
utils.Debugf("[hijack] End of stdout")
return err
})
}
sendStdin := utils.Go(func() error {
if in != nil {
io.Copy(rwc, in)
utils.Debugf("[hijack] End of stdin")
}
if tcpc, ok := rwc.(*net.TCPConn); ok {
if err := tcpc.CloseWrite(); err != nil {
utils.Errorf("Couldn't send EOF: %s\n", err)
}
} else if unixc, ok := rwc.(*net.UnixConn); ok {
if err := unixc.CloseWrite(); err != nil {
utils.Errorf("Couldn't send EOF: %s\n", err)
}
}
// Discard errors due to pipe interruption
return nil
})
if stdout != nil || stderr != nil {
if err := <-receiveStdout; err != nil {
utils.Errorf("Error receiveStdout: %s", err)
return err
}
}
if !cli.isTerminal {
if err := <-sendStdin; err != nil {
utils.Errorf("Error sendStdin: %s", err)
return err
}
}
return nil
}
func (cli *DockerCli) getTtySize() (int, int) {
if !cli.isTerminal {
return 0, 0
}
ws, err := term.GetWinsize(cli.terminalFd)
if err != nil {
utils.Errorf("Error getting size: %s", err)
if ws == nil {
return 0, 0
}
}
return int(ws.Height), int(ws.Width)
}
func (cli *DockerCli) resizeTty(id string) {
height, width := cli.getTtySize()
if height == 0 && width == 0 {
return
}
v := url.Values{}
v.Set("h", strconv.Itoa(height))
v.Set("w", strconv.Itoa(width))
if _, _, err := readBody(cli.call("POST", "/containers/"+id+"/resize?"+v.Encode(), nil, false)); err != nil {
utils.Errorf("Error resize: %s", err)
}
}
func (cli *DockerCli) monitorTtySize(id string) error {
cli.resizeTty(id)
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGWINCH)
go func() {
for _ = range sigchan {
cli.resizeTty(id)
}
}()
return nil
}
func (cli *DockerCli) Subcmd(name, signature, description string) *flag.FlagSet {
flags := flag.NewFlagSet(name, flag.ContinueOnError)
flags.Usage = func() {
fmt.Fprintf(cli.err, "\nUsage: docker %s %s\n\n%s\n\n", name, signature, description)
flags.PrintDefaults()
os.Exit(2)
}
return flags
}
func (cli *DockerCli) LoadConfigFile() (err error) {
cli.configFile, err = auth.LoadConfig(os.Getenv("HOME"))
if err != nil {
fmt.Fprintf(cli.err, "WARNING: %s\n", err)
}
return err
}
func waitForExit(cli *DockerCli, containerId string) (int, error) {
body, _, err := readBody(cli.call("POST", "/containers/"+containerId+"/wait", nil, false))
if err != nil {
return -1, err
}
var out APIWait
if err := json.Unmarshal(body, &out); err != nil {
return -1, err
}
return out.StatusCode, nil
}
// getExitCode perform an inspect on the container. It returns
// the running state and the exit code.
func getExitCode(cli *DockerCli, containerId string) (bool, int, error) {
body, _, err := readBody(cli.call("GET", "/containers/"+containerId+"/json", nil, false))
if err != nil {
// If we can't connect, then the daemon probably died.
if err != ErrConnectionRefused {
return false, -1, err
}
return false, -1, nil
}
c := &Container{}
if err := json.Unmarshal(body, c); err != nil {
return false, -1, err
}
return c.State.IsRunning(), c.State.GetExitCode(), nil
}
func readBody(stream io.ReadCloser, statusCode int, err error) ([]byte, int, error) {
if stream != nil {
defer stream.Close()
}
if err != nil {
return nil, statusCode, err
}
body, err := ioutil.ReadAll(stream)
if err != nil {
return nil, -1, err
}
return body, statusCode, nil
}
func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string) *DockerCli {
var (
isTerminal = false
terminalFd uintptr
)
if in != nil {
if file, ok := in.(*os.File); ok {
terminalFd = file.Fd()
isTerminal = term.IsTerminal(terminalFd)
}
}
if err == nil {
err = out
}
return &DockerCli{
proto: proto,
addr: addr,
in: in,
out: out,
err: err,
isTerminal: isTerminal,
terminalFd: terminalFd,
}
}
type DockerCli struct {
proto string
addr string
configFile *auth.ConfigFile
in io.ReadCloser
out io.Writer
err io.Writer
isTerminal bool
terminalFd uintptr
}
| [
"\"HOME\"",
"\"DEBUG\"",
"\"DEBUG\"",
"\"NORAW\"",
"\"HOME\""
]
| []
| [
"HOME",
"NORAW",
"DEBUG"
]
| [] | ["HOME", "NORAW", "DEBUG"] | go | 3 | 0 | |
discordbot.py | import discord
from discord.ext import commands
import os
import traceback
import random
import re #正規表現
import math #Zeller
description = '''An example bot to showcase the discord.ext.commands extension module.
There are a number of utility commands being showcased here.'''
bot = commands.Bot(command_prefix='?', description=description)
#bot = commands.Bot(command_prefix='/')
token = os.environ['DISCORD_BOT_TOKEN']
# 接続に必要なオブジェクトを生成
client = discord.Client()
"""Bot起動時に実行されるイベントハンドラ"""
@client.event # イベントを受信するための構文(デコレータ)
async def on_ready():
#botのステータス変更
activity = discord.Activity(name='Netflix', type=discord.ActivityType.watching)
await client.change_presence(activity=activity)
print('------Logged in as------')
print(client.user.name)
print(client.user.id)
print('------------------------')
# コマンドに対応するリストデータを取得する関数を定義
def get_data(message):
command = message.content
data_table = {
'/members': message.guild.members, # メンバーのリスト
'/roles': message.guild.roles, # 役職のリスト
'/text_channels': message.guild.text_channels, # テキストチャンネルのリスト
'/voice_channels': message.guild.voice_channels, # ボイスチャンネルのリスト
'/category_channels': message.guild.categories, # カテゴリチャンネルのリスト
}
return data_table.get(command, '対応するリストデータを取得するには、無効なコマンドです')
# ツェラーの公式 [Zeller]
def zeller(year, month, day):
if month == 1 or month == 2:
year -= 1
month+= 12
# Math.floor( year + Math.floor(year/4) - Math.floor(year/100) + Math.floor(year/400) + Math.floor((13 * month + 8)/5) + date ) % 7
h = math.floor( year + math.floor(year/4) - math.floor(year/100) + math.floor(year/400) + math.floor((13 * month + 8)/5) + day ) % 7
return h
# メッセージ受信時に動作する処理
@client.event
async def on_message(message):
# メッセージ送信者がBotだった場合は無視する
if message.author.bot:
#if message.author == client.user:
return
# 「/neko」と発言したら「にゃーん」が返る処理
# if message.content == '/neko':
# if client.user in message.mentions: # 話しかけられたかの判定
if 'おは' in message.content:
text = message.author.mention+'ちゃん、おはゆ!:hatching_chick:' #message.author.mentionでメンション、nameで名前のみ
await message.channel.send(text)
# ツェラーの公式 [Zeller]
# 年 + 年/4 - 年/100 + 年/400 + (13*月+8)/5 + 日 を7で割ったときの余り = [0-6]
# ただし、1月、2月は前年の13月、14月として計算
# 1582/10/15(金)以降に対応。閏年対応。
if re.search('^[0-9]{4}\/[0-9]{2}\/[0-9]{2}$', message.content):# [yyyy/mm/dd]にマッチ
ztext = message.content
l = ztext.split('/')
z_year = int(l[0])
z_month = int(l[1])
z_date = int(l[2])
ws = ["日", "月", "火", "水", "木", "金", "土"]
x = zeller(z_year, z_month, z_date)
await message.channel.send(message.content + " は " + ws[x] + "曜日:turtle:")
# ユーザーのステータスを取得 error:message.author.activities
# memstatus = message.author.activities
# print(memstatus)
# コマンドに対応するデータを取得して表示
# print(get_data(message))
# Botの起動とDiscordサーバーへの接続
client.run(token)
| []
| []
| [
"DISCORD_BOT_TOKEN"
]
| [] | ["DISCORD_BOT_TOKEN"] | python | 1 | 0 | |
greaterwms/wsgi.py | """
WSGI config for django_wms project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'greaterwms.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
tests/support/case.py | # -*- coding: utf-8 -*-
"""
:codeauthor: Pedro Algarvio ([email protected])
====================================
Custom Salt TestCase Implementations
====================================
Custom reusable :class:`TestCase<python2:unittest.TestCase>`
implementations.
"""
from __future__ import absolute_import, unicode_literals
import errno
import logging
import os
import re
import subprocess
import sys
import tempfile
import textwrap
import time
from datetime import datetime, timedelta
import salt.utils.files
from salt.ext import six
from salt.ext.six.moves import cStringIO
from tests.support.cli_scripts import ScriptPathMixin
from tests.support.helpers import RedirectStdStreams, requires_sshd_server
from tests.support.mixins import (
AdaptedConfigurationTestCaseMixin,
SaltClientTestCaseMixin,
SaltMultimasterClientTestCaseMixin,
)
from tests.support.paths import CODE_DIR, INTEGRATION_TEST_DIR, PYEXEC, SCRIPT_DIR
from tests.support.processes import terminate_process
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import TestCase
STATE_FUNCTION_RUNNING_RE = re.compile(
r"""The function (?:"|')(?P<state_func>.*)(?:"|') is running as PID """
r"(?P<pid>[\d]+) and was started at (?P<date>.*) with jid (?P<jid>[\d]+)"
)
log = logging.getLogger(__name__)
class ShellTestCase(TestCase, AdaptedConfigurationTestCaseMixin, ScriptPathMixin):
"""
Execute a test for a shell command
"""
def run_salt(self, arg_str, with_retcode=False, catch_stderr=False, timeout=15):
r'''
Run the ``salt`` CLI tool with the provided arguments
.. code-block:: python
class MatchTest(ShellTestCase):
def test_list(self):
"""
test salt -L matcher
"""
data = self.run_salt('-L minion test.ping')
data = '\n'.join(data)
self.assertIn('minion', data)
'''
arg_str = "-c {0} -t {1} {2}".format(
RUNTIME_VARS.TMP_CONF_DIR, timeout, arg_str
)
return self.run_script(
"salt",
arg_str,
with_retcode=with_retcode,
catch_stderr=catch_stderr,
timeout=timeout,
)
def run_ssh(
self,
arg_str,
with_retcode=False,
timeout=25,
catch_stderr=False,
wipe=False,
raw=False,
roster_file=None,
ssh_opts="",
**kwargs
):
"""
Execute salt-ssh
"""
if not roster_file:
roster_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "roster")
arg_str = "{0} {1} -c {2} -i --priv {3} --roster-file {4} {5} localhost {6} --out=json".format(
" -W" if wipe else "",
" -r" if raw else "",
RUNTIME_VARS.TMP_CONF_DIR,
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "key_test"),
roster_file,
ssh_opts,
arg_str,
)
return self.run_script(
"salt-ssh",
arg_str,
with_retcode=with_retcode,
catch_stderr=catch_stderr,
raw=True,
timeout=timeout,
**kwargs
)
def run_run(
self,
arg_str,
with_retcode=False,
catch_stderr=False,
asynchronous=False,
timeout=60,
config_dir=None,
**kwargs
):
"""
Execute salt-run
"""
asynchronous = kwargs.get("async", asynchronous)
arg_str = "-c {0}{async_flag} -t {timeout} {1}".format(
config_dir or RUNTIME_VARS.TMP_CONF_DIR,
arg_str,
timeout=timeout,
async_flag=" --async" if asynchronous else "",
)
return self.run_script(
"salt-run",
arg_str,
with_retcode=with_retcode,
catch_stderr=catch_stderr,
timeout=timeout,
)
def run_run_plus(self, fun, *arg, **kwargs):
"""
Execute the runner function and return the return data and output in a dict
"""
ret = {"fun": fun}
# Late import
import salt.config
import salt.output
import salt.runner
from salt.ext.six.moves import cStringIO
opts = salt.config.master_config(self.get_config_file_path("master"))
opts_arg = list(arg)
if kwargs:
opts_arg.append({"__kwarg__": True})
opts_arg[-1].update(kwargs)
opts.update({"doc": False, "fun": fun, "arg": opts_arg})
with RedirectStdStreams():
runner = salt.runner.Runner(opts)
ret["return"] = runner.run()
try:
ret["jid"] = runner.jid
except AttributeError:
ret["jid"] = None
# Compile output
# TODO: Support outputters other than nested
opts["color"] = False
opts["output_file"] = cStringIO()
try:
salt.output.display_output(ret["return"], opts=opts)
ret["out"] = opts["output_file"].getvalue()
finally:
opts["output_file"].close()
return ret
def run_key(self, arg_str, catch_stderr=False, with_retcode=False):
"""
Execute salt-key
"""
arg_str = "-c {0} {1}".format(RUNTIME_VARS.TMP_CONF_DIR, arg_str)
return self.run_script(
"salt-key", arg_str, catch_stderr=catch_stderr, with_retcode=with_retcode
)
def run_cp(self, arg_str, with_retcode=False, catch_stderr=False):
"""
Execute salt-cp
"""
arg_str = "--config-dir {0} {1}".format(RUNTIME_VARS.TMP_CONF_DIR, arg_str)
return self.run_script(
"salt-cp", arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr
)
def run_call(
self,
arg_str,
with_retcode=False,
catch_stderr=False,
local=False,
timeout=15,
config_dir=None,
):
if not config_dir:
config_dir = RUNTIME_VARS.TMP_MINION_CONF_DIR
arg_str = "{0} --config-dir {1} {2}".format(
"--local" if local else "", config_dir, arg_str
)
return self.run_script(
"salt-call",
arg_str,
with_retcode=with_retcode,
catch_stderr=catch_stderr,
timeout=timeout,
)
def assertRunCall(self, arg_str, **kwargs):
"""
Assert no error code was returned with run_call, give stderr as error message
"""
stdout, stderr, retcode = self.run_call(
arg_str, catch_stderr=True, with_retcode=True, **kwargs
)
if stderr:
log.warning(stderr)
self.assertFalse(retcode, stderr)
return stdout
def run_cloud(self, arg_str, catch_stderr=False, timeout=None):
"""
Execute salt-cloud
"""
arg_str = "-c {0} {1}".format(RUNTIME_VARS.TMP_CONF_DIR, arg_str)
return self.run_script("salt-cloud", arg_str, catch_stderr, timeout)
def run_script(
self,
script,
arg_str,
catch_stderr=False,
with_retcode=False,
catch_timeout=False,
# FIXME A timeout of zero or disabling timeouts may not return results!
timeout=15,
raw=False,
popen_kwargs=None,
log_output=None,
**kwargs
):
"""
Execute a script with the given argument string
The ``log_output`` argument is ternary, it can be True, False, or None.
If the value is boolean, then it forces the results to either be logged
or not logged. If it is None, then the return code of the subprocess
determines whether or not to log results.
"""
import salt.utils.platform
script_path = self.get_script_path(script)
if not os.path.isfile(script_path):
return False
popen_kwargs = popen_kwargs or {}
if salt.utils.platform.is_windows():
cmd = "python "
if "cwd" not in popen_kwargs:
popen_kwargs["cwd"] = os.getcwd()
if "env" not in popen_kwargs:
popen_kwargs["env"] = os.environ.copy()
if sys.version_info[0] < 3:
popen_kwargs["env"][b"PYTHONPATH"] = CODE_DIR.encode()
else:
popen_kwargs["env"]["PYTHONPATH"] = CODE_DIR
else:
cmd = "PYTHONPATH="
python_path = os.environ.get("PYTHONPATH", None)
if python_path is not None:
cmd += "{0}:".format(python_path)
if sys.version_info[0] < 3:
cmd += "{0} ".format(":".join(sys.path[1:]))
else:
cmd += "{0} ".format(":".join(sys.path[0:]))
cmd += "python{0}.{1} ".format(*sys.version_info)
cmd += "{0} ".format(script_path)
cmd += "{0} ".format(arg_str)
if kwargs:
# late import
import salt.utils.json
for key, value in kwargs.items():
cmd += "'{0}={1} '".format(key, salt.utils.json.dumps(value))
tmp_file = tempfile.SpooledTemporaryFile()
popen_kwargs = dict(
{"shell": True, "stdout": tmp_file, "universal_newlines": True},
**popen_kwargs
)
if catch_stderr is True:
popen_kwargs["stderr"] = subprocess.PIPE
if not sys.platform.lower().startswith("win"):
popen_kwargs["close_fds"] = True
def detach_from_parent_group():
# detach from parent group (no more inherited signals!)
os.setpgrp()
popen_kwargs["preexec_fn"] = detach_from_parent_group
def format_return(retcode, stdout, stderr=None, timed_out=False):
"""
DRY helper to log script result if it failed, and then return the
desired output based on whether or not stderr was desired, and
wither or not a retcode was desired.
"""
log_func = log.debug
if timed_out:
log.error(
"run_script timed out after %d seconds (process killed)", timeout
)
log_func = log.error
if log_output is True or timed_out or (log_output is None and retcode != 0):
log_func(
"run_script results for: %s %s\n"
"return code: %s\n"
"stdout:\n"
"%s\n\n"
"stderr:\n"
"%s",
script,
arg_str,
retcode,
stdout,
stderr,
)
stdout = stdout or ""
stderr = stderr or ""
if not raw:
stdout = stdout.splitlines()
stderr = stderr.splitlines()
ret = [stdout]
if catch_stderr:
ret.append(stderr)
if with_retcode:
ret.append(retcode)
if catch_timeout:
ret.append(timed_out)
return ret[0] if len(ret) == 1 else tuple(ret)
process = subprocess.Popen(cmd, **popen_kwargs)
if timeout is not None:
stop_at = datetime.now() + timedelta(seconds=timeout)
term_sent = False
while True:
process.poll()
time.sleep(0.1)
if datetime.now() <= stop_at:
# We haven't reached the timeout yet
if process.returncode is not None:
break
else:
terminate_process(process.pid, kill_children=True)
return format_return(
process.returncode, *process.communicate(), timed_out=True
)
tmp_file.seek(0)
if sys.version_info >= (3,):
try:
out = tmp_file.read().decode(__salt_system_encoding__)
except (NameError, UnicodeDecodeError):
# Let's cross our fingers and hope for the best
out = tmp_file.read().decode("utf-8")
else:
out = tmp_file.read()
if catch_stderr:
if sys.version_info < (2, 7):
# On python 2.6, the subprocess'es communicate() method uses
# select which, is limited by the OS to 1024 file descriptors
# We need more available descriptors to run the tests which
# need the stderr output.
# So instead of .communicate() we wait for the process to
# finish, but, as the python docs state "This will deadlock
# when using stdout=PIPE and/or stderr=PIPE and the child
# process generates enough output to a pipe such that it
# blocks waiting for the OS pipe buffer to accept more data.
# Use communicate() to avoid that." <- a catch, catch situation
#
# Use this work around were it's needed only, python 2.6
process.wait()
err = process.stderr.read()
else:
_, err = process.communicate()
# Force closing stderr/stdout to release file descriptors
if process.stdout is not None:
process.stdout.close()
if process.stderr is not None:
process.stderr.close()
# pylint: disable=maybe-no-member
try:
return format_return(process.returncode, out, err or "")
finally:
try:
if os.path.exists(tmp_file.name):
if isinstance(tmp_file.name, six.string_types):
# tmp_file.name is an int when using SpooledTemporaryFiles
# int types cannot be used with os.remove() in Python 3
os.remove(tmp_file.name)
else:
# Clean up file handles
tmp_file.close()
process.terminate()
except OSError as err:
# process already terminated
pass
# pylint: enable=maybe-no-member
# TODO Remove this?
process.communicate()
if process.stdout is not None:
process.stdout.close()
try:
return format_return(process.returncode, out)
finally:
try:
if os.path.exists(tmp_file.name):
if isinstance(tmp_file.name, six.string_types):
# tmp_file.name is an int when using SpooledTemporaryFiles
# int types cannot be used with os.remove() in Python 3
os.remove(tmp_file.name)
else:
# Clean up file handles
tmp_file.close()
process.terminate()
except OSError as err:
# process already terminated
pass
class MultiMasterTestShellCase(ShellTestCase):
"""
Execute a test for a shell command when running multi-master tests
"""
@property
def config_dir(self):
return RUNTIME_VARS.TMP_MM_CONF_DIR
class ShellCase(ShellTestCase, AdaptedConfigurationTestCaseMixin, ScriptPathMixin):
"""
Execute a test for a shell command
"""
_code_dir_ = CODE_DIR
_script_dir_ = SCRIPT_DIR
_python_executable_ = PYEXEC
RUN_TIMEOUT = 500
def chdir(self, dirname):
try:
os.chdir(dirname)
except OSError:
os.chdir(INTEGRATION_TEST_DIR)
def run_salt( # pylint: disable=arguments-differ
self,
arg_str,
with_retcode=False,
catch_stderr=False,
timeout=RUN_TIMEOUT,
popen_kwargs=None,
):
"""
Execute salt
"""
arg_str = "-c {0} -t {1} {2}".format(
RUNTIME_VARS.TMP_CONF_DIR, timeout, arg_str
)
ret = self.run_script(
"salt",
arg_str,
with_retcode=with_retcode,
catch_stderr=catch_stderr,
timeout=timeout,
popen_kwargs=popen_kwargs,
)
log.debug("Result of run_salt for command '%s': %s", arg_str, ret)
return ret
def run_spm(
self, arg_str, with_retcode=False, catch_stderr=False, timeout=RUN_TIMEOUT
): # pylint: disable=arguments-differ
"""
Execute spm
"""
ret = self.run_script(
"spm",
arg_str,
with_retcode=with_retcode,
catch_stderr=catch_stderr,
timeout=timeout,
)
log.debug("Result of run_spm for command '%s': %s", arg_str, ret)
return ret
def run_ssh( # pylint: disable=arguments-differ
self,
arg_str,
with_retcode=False,
catch_stderr=False, # pylint: disable=W0221
timeout=RUN_TIMEOUT,
wipe=True,
raw=False,
roster_file=None,
ssh_opts="",
**kwargs
):
"""
Execute salt-ssh
"""
if not roster_file:
roster_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "roster")
arg_str = "{0} -ldebug{1} -c {2} -i --priv {3} --roster-file {4} {5} --out=json localhost {6}".format(
" -W" if wipe else "",
" -r" if raw else "",
RUNTIME_VARS.TMP_CONF_DIR,
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "key_test"),
roster_file,
ssh_opts,
arg_str,
)
ret = self.run_script(
"salt-ssh",
arg_str,
with_retcode=with_retcode,
catch_stderr=catch_stderr,
timeout=timeout,
raw=True,
**kwargs
)
log.debug("Result of run_ssh for command '%s %s': %s", arg_str, kwargs, ret)
return ret
def run_run(
self,
arg_str,
with_retcode=False,
catch_stderr=False,
asynchronous=False,
timeout=RUN_TIMEOUT,
config_dir=None,
**kwargs
):
"""
Execute salt-run
"""
asynchronous = kwargs.get("async", asynchronous)
arg_str = "-c {0}{async_flag} -t {timeout} {1}".format(
config_dir or RUNTIME_VARS.TMP_CONF_DIR,
arg_str,
timeout=timeout,
async_flag=" --async" if asynchronous else "",
)
ret = self.run_script(
"salt-run",
arg_str,
with_retcode=with_retcode,
catch_stderr=catch_stderr,
timeout=timeout + 10,
)
log.debug("Result of run_run for command '%s': %s", arg_str, ret)
return ret
def run_run_plus(self, fun, *arg, **kwargs):
"""
Execute the runner function and return the return data and output in a dict
"""
# Late import
import salt.runner
import salt.output
ret = {"fun": fun}
from_scratch = bool(kwargs.pop("__reload_config", False))
# Have to create an empty dict and then update it, as the result from
# self.get_config() is an ImmutableDict which cannot be updated.
opts = {}
opts.update(self.get_config("client_config", from_scratch=from_scratch))
opts_arg = list(arg)
if kwargs:
opts_arg.append({"__kwarg__": True})
opts_arg[-1].update(kwargs)
opts.update({"doc": False, "fun": fun, "arg": opts_arg})
with RedirectStdStreams():
runner = salt.runner.Runner(opts)
ret["return"] = runner.run()
try:
ret["jid"] = runner.jid
except AttributeError:
ret["jid"] = None
# Compile output
# TODO: Support outputters other than nested
opts["color"] = False
opts["output_file"] = cStringIO()
try:
salt.output.display_output(ret["return"], opts=opts)
ret["out"] = opts["output_file"].getvalue().splitlines()
finally:
opts["output_file"].close()
log.debug(
"Result of run_run_plus for fun '%s' with arg '%s': %s", fun, opts_arg, ret
)
return ret
def run_key( # pylint: disable=arguments-differ
self, arg_str, catch_stderr=False, with_retcode=False, timeout=RUN_TIMEOUT,
):
"""
Execute salt-key
"""
arg_str = "-c {0} {1}".format(RUNTIME_VARS.TMP_CONF_DIR, arg_str)
ret = self.run_script(
"salt-key",
arg_str,
catch_stderr=catch_stderr,
with_retcode=with_retcode,
timeout=timeout,
)
log.debug("Result of run_key for command '%s': %s", arg_str, ret)
return ret
def run_cp( # pylint: disable=arguments-differ
self, arg_str, with_retcode=False, catch_stderr=False, timeout=RUN_TIMEOUT,
):
"""
Execute salt-cp
"""
# Note: not logging result of run_cp because it will log a bunch of
# bytes which will not be very helpful.
arg_str = "--config-dir {0} {1}".format(RUNTIME_VARS.TMP_CONF_DIR, arg_str)
return self.run_script(
"salt-cp",
arg_str,
with_retcode=with_retcode,
catch_stderr=catch_stderr,
timeout=timeout,
)
def run_call(
self,
arg_str,
with_retcode=False,
catch_stderr=False,
local=False,
timeout=RUN_TIMEOUT,
config_dir=None,
):
"""
Execute salt-call.
"""
if not config_dir:
config_dir = RUNTIME_VARS.TMP_MINION_CONF_DIR
arg_str = "{0} --config-dir {1} {2}".format(
"--local" if local else "", config_dir, arg_str
)
ret = self.run_script(
"salt-call",
arg_str,
with_retcode=with_retcode,
catch_stderr=catch_stderr,
timeout=timeout,
)
log.debug("Result of run_call for command '%s': %s", arg_str, ret)
return ret
# pylint: enable=arguments-differ
def run_cloud(self, arg_str, catch_stderr=False, timeout=RUN_TIMEOUT):
"""
Execute salt-cloud
"""
arg_str = "-c {0} {1}".format(RUNTIME_VARS.TMP_CONF_DIR, arg_str)
ret = self.run_script("salt-cloud", arg_str, catch_stderr, timeout=timeout)
log.debug("Result of run_cloud for command '%s': %s", arg_str, ret)
return ret
class SPMTestUserInterface(object):
"""
Test user interface to SPMClient
"""
def __init__(self):
self._status = []
self._confirm = []
self._error = []
def status(self, msg):
self._status.append(msg)
def confirm(self, action):
self._confirm.append(action)
def error(self, msg):
self._error.append(msg)
class SPMCase(TestCase, AdaptedConfigurationTestCaseMixin):
"""
Class for handling spm commands
"""
def _spm_build_files(self, config):
self.formula_dir = os.path.join(
" ".join(config["file_roots"]["base"]), "formulas"
)
self.formula_sls_dir = os.path.join(self.formula_dir, "apache")
self.formula_sls = os.path.join(self.formula_sls_dir, "apache.sls")
self.formula_file = os.path.join(self.formula_dir, "FORMULA")
dirs = [self.formula_dir, self.formula_sls_dir]
for f_dir in dirs:
os.makedirs(f_dir)
with salt.utils.files.fopen(self.formula_sls, "w") as fp:
fp.write(
textwrap.dedent(
"""\
install-apache:
pkg.installed:
- name: apache2
"""
)
)
with salt.utils.files.fopen(self.formula_file, "w") as fp:
fp.write(
textwrap.dedent(
"""\
name: apache
os: RedHat, Debian, Ubuntu, Suse, FreeBSD
os_family: RedHat, Debian, Suse, FreeBSD
version: 201506
release: 2
summary: Formula for installing Apache
description: Formula for installing Apache
"""
)
)
def _spm_config(self, assume_yes=True):
self._tmp_spm = tempfile.mkdtemp()
config = self.get_temp_config(
"minion",
**{
"spm_logfile": os.path.join(self._tmp_spm, "log"),
"spm_repos_config": os.path.join(self._tmp_spm, "etc", "spm.repos"),
"spm_cache_dir": os.path.join(self._tmp_spm, "cache"),
"spm_build_dir": os.path.join(self._tmp_spm, "build"),
"spm_build_exclude": ["apache/.git"],
"spm_db_provider": "sqlite3",
"spm_files_provider": "local",
"spm_db": os.path.join(self._tmp_spm, "packages.db"),
"extension_modules": os.path.join(self._tmp_spm, "modules"),
"file_roots": {"base": [self._tmp_spm]},
"formula_path": os.path.join(self._tmp_spm, "salt"),
"pillar_path": os.path.join(self._tmp_spm, "pillar"),
"reactor_path": os.path.join(self._tmp_spm, "reactor"),
"assume_yes": True if assume_yes else False,
"force": False,
"verbose": False,
"cache": "localfs",
"cachedir": os.path.join(self._tmp_spm, "cache"),
"spm_repo_dups": "ignore",
"spm_share_dir": os.path.join(self._tmp_spm, "share"),
}
)
import salt.utils.yaml
if not os.path.isdir(config["formula_path"]):
os.makedirs(config["formula_path"])
with salt.utils.files.fopen(os.path.join(self._tmp_spm, "spm"), "w") as fp:
salt.utils.yaml.safe_dump(config, fp)
return config
def _spm_create_update_repo(self, config):
build_spm = self.run_spm("build", self.config, self.formula_dir)
c_repo = self.run_spm("create_repo", self.config, self.config["spm_build_dir"])
repo_conf_dir = self.config["spm_repos_config"] + ".d"
os.makedirs(repo_conf_dir)
with salt.utils.files.fopen(os.path.join(repo_conf_dir, "spm.repo"), "w") as fp:
fp.write(
textwrap.dedent(
"""\
local_repo:
url: file://{0}
""".format(
self.config["spm_build_dir"]
)
)
)
u_repo = self.run_spm("update_repo", self.config)
def _spm_client(self, config):
import salt.spm
self.ui = SPMTestUserInterface()
client = salt.spm.SPMClient(self.ui, config)
return client
def run_spm(self, cmd, config, arg=None):
client = self._spm_client(config)
client.run([cmd, arg])
client._close()
return self.ui._status
class ModuleCase(TestCase, SaltClientTestCaseMixin):
"""
Execute a module function
"""
def wait_for_all_jobs(self, minions=("minion", "sub_minion",), sleep=0.3):
"""
Wait for all jobs currently running on the list of minions to finish
"""
for minion in minions:
while True:
ret = self.run_function(
"saltutil.running", minion_tgt=minion, timeout=300
)
if ret:
log.debug("Waiting for minion's jobs: %s", minion)
time.sleep(sleep)
else:
break
def minion_run(self, _function, *args, **kw):
"""
Run a single salt function on the 'minion' target and condition
the return down to match the behavior of the raw function call
"""
return self.run_function(_function, args, **kw)
def run_function(
self,
function,
arg=(),
minion_tgt="minion",
timeout=300,
master_tgt=None,
**kwargs
):
"""
Run a single salt function and condition the return down to match the
behavior of the raw function call
"""
known_to_return_none = (
"data.get",
"file.chown",
"file.chgrp",
"pkg.refresh_db",
"ssh.recv_known_host_entries",
"time.sleep",
)
if "f_arg" in kwargs:
kwargs["arg"] = kwargs.pop("f_arg")
if "f_timeout" in kwargs:
kwargs["timeout"] = kwargs.pop("f_timeout")
client = self.client if master_tgt is None else self.clients[master_tgt]
log.debug(
"Running client.cmd(minion_tgt=%r, function=%r, arg=%r, timeout=%r, kwarg=%r)",
minion_tgt,
function,
arg,
timeout,
kwargs,
)
orig = client.cmd(minion_tgt, function, arg, timeout=timeout, kwarg=kwargs)
if RUNTIME_VARS.PYTEST_SESSION:
fail_or_skip_func = self.fail
else:
fail_or_skip_func = self.skipTest
if minion_tgt not in orig:
fail_or_skip_func(
"WARNING(SHOULD NOT HAPPEN #1935): Failed to get a reply "
"from the minion '{0}'. Command output: {1}".format(minion_tgt, orig)
)
elif orig[minion_tgt] is None and function not in known_to_return_none:
fail_or_skip_func(
"WARNING(SHOULD NOT HAPPEN #1935): Failed to get '{0}' from "
"the minion '{1}'. Command output: {2}".format(
function, minion_tgt, orig
)
)
# Try to match stalled state functions
orig[minion_tgt] = self._check_state_return(orig[minion_tgt])
return orig[minion_tgt]
def run_state(self, function, **kwargs):
"""
Run the state.single command and return the state return structure
"""
ret = self.run_function("state.single", [function], **kwargs)
return self._check_state_return(ret)
def _check_state_return(self, ret):
if isinstance(ret, dict):
# This is the supposed return format for state calls
return ret
if isinstance(ret, list):
jids = []
# These are usually errors
for item in ret[:]:
if not isinstance(item, six.string_types):
# We don't know how to handle this
continue
match = STATE_FUNCTION_RUNNING_RE.match(item)
if not match:
# We don't know how to handle this
continue
jid = match.group("jid")
if jid in jids:
continue
jids.append(jid)
job_data = self.run_function("saltutil.find_job", [jid])
job_kill = self.run_function("saltutil.kill_job", [jid])
msg = (
"A running state.single was found causing a state lock. "
"Job details: '{0}' Killing Job Returned: '{1}'".format(
job_data, job_kill
)
)
ret.append(
"[TEST SUITE ENFORCED]{0}" "[/TEST SUITE ENFORCED]".format(msg)
)
return ret
class MultimasterModuleCase(ModuleCase, SaltMultimasterClientTestCaseMixin):
"""
Execute a module function
"""
def run_function(
self,
function,
arg=(),
minion_tgt="mm-minion",
timeout=300,
master_tgt="mm-master",
**kwargs
):
"""
Run a single salt function and condition the return down to match the
behavior of the raw function call
"""
known_to_return_none = (
"data.get",
"file.chown",
"file.chgrp",
"pkg.refresh_db",
"ssh.recv_known_host_entries",
"time.sleep",
)
if minion_tgt == "mm-sub-minion":
known_to_return_none += ("mine.update",)
if "f_arg" in kwargs:
kwargs["arg"] = kwargs.pop("f_arg")
if "f_timeout" in kwargs:
kwargs["timeout"] = kwargs.pop("f_timeout")
if master_tgt is None:
client = self.clients["mm-master"]
elif isinstance(master_tgt, int):
client = self.clients[list(self.clients)[master_tgt]]
else:
client = self.clients[master_tgt]
orig = client.cmd(minion_tgt, function, arg, timeout=timeout, kwarg=kwargs)
if RUNTIME_VARS.PYTEST_SESSION:
fail_or_skip_func = self.fail
else:
fail_or_skip_func = self.skipTest
if minion_tgt not in orig:
fail_or_skip_func(
"WARNING(SHOULD NOT HAPPEN #1935): Failed to get a reply "
"from the minion '{0}'. Command output: {1}".format(minion_tgt, orig)
)
elif orig[minion_tgt] is None and function not in known_to_return_none:
fail_or_skip_func(
"WARNING(SHOULD NOT HAPPEN #1935): Failed to get '{0}' from "
"the minion '{1}'. Command output: {2}".format(
function, minion_tgt, orig
)
)
# Try to match stalled state functions
orig[minion_tgt] = self._check_state_return(orig[minion_tgt])
return orig[minion_tgt]
def run_function_all_masters(
self, function, arg=(), minion_tgt="mm-minion", timeout=300, **kwargs
):
"""
Run a single salt function from all the masters in multimaster environment
and condition the return down to match the behavior of the raw function call
"""
ret = []
for master_id in self.clients:
ret.append(
self.run_function(
function,
arg=arg,
minion_tgt=minion_tgt,
timeout=timeout,
master_tgt=master_id,
**kwargs
)
)
return ret
class SyndicCase(TestCase, SaltClientTestCaseMixin):
"""
Execute a syndic based execution test
"""
_salt_client_config_file_name_ = "syndic_master"
def run_function(self, function, arg=(), timeout=90):
"""
Run a single salt function and condition the return down to match the
behavior of the raw function call
"""
orig = self.client.cmd("minion", function, arg, timeout=timeout)
if RUNTIME_VARS.PYTEST_SESSION:
fail_or_skip_func = self.fail
else:
fail_or_skip_func = self.skipTest
if "minion" not in orig:
fail_or_skip_func(
"WARNING(SHOULD NOT HAPPEN #1935): Failed to get a reply "
"from the minion. Command output: {0}".format(orig)
)
return orig["minion"]
@requires_sshd_server
class SSHCase(ShellCase):
"""
Execute a command via salt-ssh
"""
def _arg_str(self, function, arg):
return "{0} {1}".format(function, " ".join(arg))
def run_function(
self, function, arg=(), timeout=180, wipe=True, raw=False, **kwargs
):
"""
We use a 180s timeout here, which some slower systems do end up needing
"""
ret = self.run_ssh(
self._arg_str(function, arg), timeout=timeout, wipe=wipe, raw=raw, **kwargs
)
log.debug(
"SSHCase run_function executed %s with arg %s and kwargs %s",
function,
arg,
kwargs,
)
log.debug("SSHCase JSON return: %s", ret)
# Late import
import salt.utils.json
try:
return salt.utils.json.loads(ret)["localhost"]
except Exception: # pylint: disable=broad-except
return ret
def custom_roster(self, new_roster, data):
"""
helper method to create a custom roster to use for a ssh test
"""
roster = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "roster")
with salt.utils.files.fopen(roster, "r") as fp_:
conf = salt.utils.yaml.safe_load(fp_)
conf["localhost"].update(data)
with salt.utils.files.fopen(new_roster, "w") as fp_:
salt.utils.yaml.safe_dump(conf, fp_)
class ClientCase(AdaptedConfigurationTestCaseMixin, TestCase):
"""
A base class containing relevant options for starting the various Salt
Python API entrypoints
"""
def get_opts(self):
# Late import
import salt.config
return salt.config.client_config(self.get_config_file_path("master"))
def mkdir_p(self, path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
| []
| []
| [
"PYTHONPATH"
]
| [] | ["PYTHONPATH"] | python | 1 | 0 | |
tests/system_tests_sasl_plain.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from time import sleep
import os, json
from subprocess import PIPE, STDOUT, Popen
from system_test import TestCase, Qdrouterd, main_module, DIR, TIMEOUT, SkipIfNeeded, Process
from system_test import unittest, QdManager
from qpid_dispatch.management.client import Node
from proton import SASL
class RouterTestPlainSaslCommon(TestCase):
@classmethod
def router(cls, name, connection):
config = Qdrouterd.Config(connection)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=False))
@classmethod
def createSaslFiles(cls):
# Create a sasl database.
p = Popen(['saslpasswd2', '-c', '-p', '-f', 'qdrouterd.sasldb', '-u', 'domain.com', 'test'],
stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True)
result = p.communicate('password')
assert p.returncode == 0, \
"saslpasswd2 exit status %s, output:\n%s" % (p.returncode, result)
# Create a SASL configuration file.
with open('tests-mech-PLAIN.conf', 'w') as sasl_conf:
sasl_conf.write("""
pwcheck_method: auxprop
auxprop_plugin: sasldb
sasldb_path: qdrouterd.sasldb
mech_list: ANONYMOUS DIGEST-MD5 EXTERNAL PLAIN
# The following line stops spurious 'sql_select option missing' errors when cyrus-sql-sasl plugin is installed
sql_select: dummy select
""")
class RouterTestPlainSaslFailure(RouterTestPlainSaslCommon):
@staticmethod
def sasl_file(name):
return os.path.join(DIR, 'sasl_files', name)
@classmethod
def setUpClass(cls):
"""
Tests the sasl_username, sasl_password property of the dispatch router.
Creates two routers (QDR.X and QDR.Y) and sets up PLAIN authentication on QDR.X.
QDR.Y connects to QDR.X by providing a sasl_username and a bad sasl_password
as a non-existent file.
"""
super(RouterTestPlainSaslFailure, cls).setUpClass()
if not SASL.extended():
return
super(RouterTestPlainSaslFailure, cls).createSaslFiles()
cls.routers = []
x_listener_port = cls.tester.get_port()
y_listener_port = cls.tester.get_port()
super(RouterTestPlainSaslFailure, cls).router('X', [
('listener', {'host': '0.0.0.0', 'role': 'inter-router', 'port': x_listener_port,
'saslMechanisms':'PLAIN', 'authenticatePeer': 'yes'}),
# This unauthenticated listener is for qdstat to connect to it.
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.tester.get_port(),
'authenticatePeer': 'no'}),
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.tester.get_port(),
'saslMechanisms':'PLAIN', 'authenticatePeer': 'yes'}),
('router', {'workerThreads': 1,
'id': 'QDR.X',
'mode': 'interior',
'saslConfigName': 'tests-mech-PLAIN',
# Leave as saslConfigPath for testing backward compatibility
'saslConfigPath': os.getcwd()}),
])
super(RouterTestPlainSaslFailure, cls).router('Y', [
('connector', {'host': '0.0.0.0', 'role': 'inter-router', 'port': x_listener_port,
# Provide a sasl user name and password to connect to QDR.X
'saslMechanisms': 'PLAIN',
'saslUsername': '[email protected]',
# Provide a non-existen file.
'saslPassword': 'file:' + cls.sasl_file('non-existent-password-file.txt')}),
('router', {'workerThreads': 1,
'mode': 'interior',
'id': 'QDR.Y'}),
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': y_listener_port}),
])
cls.routers[0].wait_ports()
cls.routers[1].wait_ports()
try:
# This will time out in 5 seconds because there is no inter-router connection
cls.routers[1].wait_connectors(timeout=5)
except:
pass
# Give some time for connector failures to be written to the log.
sleep(3)
@SkipIfNeeded(not SASL.extended(), "Cyrus library not available. skipping test")
def test_inter_router_sasl_fail(self):
passed = False
long_type = 'org.apache.qpid.dispatch.connection'
qd_manager = QdManager(self, address=self.routers[1].addresses[0])
connections = qd_manager.query(long_type)
for connection in connections:
if connection['role'] == 'inter-router':
passed = True
break
# There was no inter-router connection established.
self.assertFalse(passed)
qd_manager = QdManager(self, address=self.routers[1].addresses[0])
logs = qd_manager.get_log()
sasl_failed = False
file_open_failed = False
for log in logs:
if log[0] == 'SERVER' and log[1] == "info" and "amqp:unauthorized-access Authentication failed [mech=PLAIN]" in log[2]:
sasl_failed = True
if log[0] == "CONN_MGR" and log[1] == "error" and "Unable to open password file" in log[2] and "error: No such file or directory" in log[2]:
file_open_failed = True
self.assertTrue(sasl_failed)
self.assertTrue(file_open_failed)
class RouterTestPlainSaslFailureUsingLiteral(RouterTestPlainSaslCommon):
@staticmethod
def sasl_file(name):
return os.path.join(DIR, 'sasl_files', name)
@classmethod
def setUpClass(cls):
"""
Tests the sasl_username, sasl_password property of the dispatch router.
Creates two routers (QDR.X and QDR.Y) and sets up PLAIN authentication on QDR.X.
QDR.Y connects to QDR.X by providing a sasl_username and a bad sasl_password
using the literal: prefix.
"""
super(RouterTestPlainSaslFailureUsingLiteral, cls).setUpClass()
if not SASL.extended():
return
super(RouterTestPlainSaslFailureUsingLiteral, cls).createSaslFiles()
cls.routers = []
x_listener_port = cls.tester.get_port()
y_listener_port = cls.tester.get_port()
super(RouterTestPlainSaslFailureUsingLiteral, cls).router('X', [
('listener', {'host': '0.0.0.0', 'role': 'inter-router', 'port': x_listener_port,
'saslMechanisms':'PLAIN', 'authenticatePeer': 'yes'}),
# This unauthenticated listener is for qdstat to connect to it.
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.tester.get_port(),
'authenticatePeer': 'no'}),
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.tester.get_port(),
'saslMechanisms':'PLAIN', 'authenticatePeer': 'yes'}),
('router', {'workerThreads': 1,
'id': 'QDR.X',
'mode': 'interior',
'saslConfigName': 'tests-mech-PLAIN',
# Leave as saslConfigPath for testing backward compatibility
'saslConfigPath': os.getcwd()}),
])
super(RouterTestPlainSaslFailureUsingLiteral, cls).router('Y', [
('connector', {'host': '0.0.0.0', 'role': 'inter-router', 'port': x_listener_port,
# Provide a sasl user name and password to connect to QDR.X
'saslMechanisms': 'PLAIN',
'saslUsername': '[email protected]',
# Provide the password with a prefix of literal. This should fail..
'saslPassword': 'literal:password'}),
('router', {'workerThreads': 1,
'mode': 'interior',
'id': 'QDR.Y'}),
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': y_listener_port}),
])
cls.routers[0].wait_ports()
cls.routers[1].wait_ports()
try:
# This will time out in 5 seconds because there is no inter-router connection
cls.routers[1].wait_connectors(timeout=5)
except:
pass
# Give some time for connector failures to be written to the log.
sleep(3)
@SkipIfNeeded(not SASL.extended(), "Cyrus library not available. skipping test")
def test_inter_router_sasl_fail(self):
passed = False
long_type = 'org.apache.qpid.dispatch.connection'
qd_manager = QdManager(self, address=self.routers[1].addresses[0])
connections = qd_manager.query(long_type)
for connection in connections:
if connection['role'] == 'inter-router':
passed = True
break
# There was no inter-router connection established.
self.assertFalse(passed)
logs = qd_manager.get_log()
sasl_failed = False
for log in logs:
if log[0] == 'SERVER' and log[1] == "info" and "amqp:unauthorized-access Authentication failed [mech=PLAIN]" in log[2]:
sasl_failed = True
self.assertTrue(sasl_failed)
class RouterTestPlainSasl(RouterTestPlainSaslCommon):
@classmethod
def setUpClass(cls):
"""
Tests the sasl_username, sasl_password property of the dispatch router.
Creates two routers (QDR.X and QDR.Y) and sets up PLAIN authentication on QDR.X.
QDR.Y connects to QDR.X by providing a sasl_username and a sasl_password.
"""
super(RouterTestPlainSasl, cls).setUpClass()
if not SASL.extended():
return
os.environ["ENV_SASL_PASSWORD"] = "password"
super(RouterTestPlainSasl, cls).createSaslFiles()
cls.routers = []
x_listener_port = cls.tester.get_port()
y_listener_port = cls.tester.get_port()
super(RouterTestPlainSasl, cls).router('X', [
('listener', {'host': '0.0.0.0', 'role': 'inter-router', 'port': x_listener_port,
'saslMechanisms':'PLAIN', 'authenticatePeer': 'yes'}),
# This unauthenticated listener is for qdstat to connect to it.
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.tester.get_port(),
'authenticatePeer': 'no'}),
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.tester.get_port(),
'saslMechanisms':'PLAIN', 'authenticatePeer': 'yes'}),
('router', {'workerThreads': 1,
'id': 'QDR.X',
'mode': 'interior',
'saslConfigName': 'tests-mech-PLAIN',
# Leave as saslConfigPath for testing backward compatibility
'saslConfigPath': os.getcwd()}),
])
super(RouterTestPlainSasl, cls).router('Y', [
('connector', {'host': '0.0.0.0', 'role': 'inter-router', 'port': x_listener_port,
# Provide a sasl user name and password to connect to QDR.X
'saslMechanisms': 'PLAIN',
'saslUsername': '[email protected]',
'saslPassword': 'env:ENV_SASL_PASSWORD'}),
('router', {'workerThreads': 1,
'mode': 'interior',
'id': 'QDR.Y'}),
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': y_listener_port}),
])
cls.routers[1].wait_router_connected('QDR.X')
@SkipIfNeeded(not SASL.extended(), "Cyrus library not available. skipping test")
def test_inter_router_plain_exists(self):
"""
Check authentication of inter-router link is PLAIN.
This test makes executes a qdstat -c via an unauthenticated listener to
QDR.X and makes sure that the output has an "inter-router" connection to
QDR.Y whose authentication is PLAIN. This ensures that QDR.Y did not
somehow use SASL ANONYMOUS to connect to QDR.X
"""
p = self.popen(
['qdstat', '-b', str(self.routers[0].addresses[1]), '-c'],
name='qdstat-'+self.id(), stdout=PIPE, expect=None,
universal_newlines=True)
out = p.communicate()[0]
assert p.returncode == 0, \
"qdstat exit status %s, output:\n%s" % (p.returncode, out)
self.assertIn("inter-router", out)
self.assertIn("[email protected](PLAIN)", out)
@SkipIfNeeded(not SASL.extended(), "Cyrus library not available. skipping test")
def test_qdstat_connect_sasl(self):
"""
Make qdstat use sasl plain authentication.
"""
p = self.popen(
['qdstat', '-b', str(self.routers[0].addresses[2]), '-c', '--sasl-mechanisms=PLAIN',
'[email protected]', '--sasl-password=password'],
name='qdstat-'+self.id(), stdout=PIPE, expect=None,
universal_newlines=True)
out = p.communicate()[0]
assert p.returncode == 0, \
"qdstat exit status %s, output:\n%s" % (p.returncode, out)
split_list = out.split()
# There will be 2 connections that have authenticated using SASL PLAIN. One inter-router connection
# and the other connection that this qdstat client is making
self.assertEqual(2, split_list.count("[email protected](PLAIN)"))
self.assertEqual(1, split_list.count("inter-router"))
self.assertEqual(1, split_list.count("normal"))
@SkipIfNeeded(not SASL.extended(), "Cyrus library not available. skipping test")
def test_qdstat_connect_sasl_password_file(self):
"""
Make qdstat use sasl plain authentication with client password specified in a file.
"""
password_file = os.getcwd() + '/sasl-client-password-file.txt'
# Create a SASL configuration file.
with open(password_file, 'w') as sasl_client_password_file:
sasl_client_password_file.write("password")
sasl_client_password_file.close()
p = self.popen(
['qdstat', '-b', str(self.routers[0].addresses[2]), '-c', '--sasl-mechanisms=PLAIN',
'[email protected]', '--sasl-password-file=' + password_file],
name='qdstat-'+self.id(), stdout=PIPE, expect=None,
universal_newlines=True)
out = p.communicate()[0]
assert p.returncode == 0, \
"qdstat exit status %s, output:\n%s" % (p.returncode, out)
split_list = out.split()
# There will be 2 connections that have authenticated using SASL PLAIN. One inter-router connection
# and the other connection that this qdstat client is making
self.assertEqual(2, split_list.count("[email protected](PLAIN)"))
self.assertEqual(1, split_list.count("inter-router"))
self.assertEqual(1, split_list.count("normal"))
class RouterTestPlainSaslOverSsl(RouterTestPlainSaslCommon):
@staticmethod
def ssl_file(name):
return os.path.join(DIR, 'ssl_certs', name)
@staticmethod
def sasl_file(name):
return os.path.join(DIR, 'sasl_files', name)
@classmethod
def setUpClass(cls):
"""
Tests the sasl_username, sasl_password property of the dispatch router.
Creates two routers (QDR.X and QDR.Y) and sets up PLAIN authentication on QDR.X.
QDR.Y connects to QDR.X by providing a sasl_username and a sasl_password.
This PLAIN authentication is done over a TLS connection.
"""
super(RouterTestPlainSaslOverSsl, cls).setUpClass()
if not SASL.extended():
return
super(RouterTestPlainSaslOverSsl, cls).createSaslFiles()
cls.routers = []
x_listener_port = cls.tester.get_port()
y_listener_port = cls.tester.get_port()
super(RouterTestPlainSaslOverSsl, cls).router('X', [
('listener', {'host': '0.0.0.0', 'role': 'inter-router', 'port': x_listener_port,
'sslProfile':'server-ssl-profile',
'saslMechanisms':'PLAIN', 'authenticatePeer': 'yes'}),
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.tester.get_port(),
'authenticatePeer': 'no'}),
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.tester.get_port(),
'sslProfile':'server-ssl-profile',
'saslMechanisms':'PLAIN', 'authenticatePeer': 'yes'}),
('sslProfile', {'name': 'server-ssl-profile',
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'ciphers': 'ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS',
'protocols': 'TLSv1.1 TLSv1.2',
'password': 'server-password'}),
('router', {'workerThreads': 1,
'id': 'QDR.X',
'mode': 'interior',
'saslConfigName': 'tests-mech-PLAIN',
'saslConfigDir': os.getcwd()}),
])
super(RouterTestPlainSaslOverSsl, cls).router('Y', [
# This router will act like a client. First an SSL connection will be established and then
# we will have SASL plain authentication over SSL.
('connector', {'host': 'localhost', 'role': 'inter-router', 'port': x_listener_port,
'sslProfile': 'client-ssl-profile',
# Provide a sasl user name and password to connect to QDR.X
'saslMechanisms': 'PLAIN',
'saslUsername': '[email protected]',
'saslPassword': 'file:' + cls.sasl_file('password.txt')}),
('router', {'workerThreads': 1,
'mode': 'interior',
'id': 'QDR.Y'}),
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': y_listener_port}),
('sslProfile', {'name': 'client-ssl-profile',
'caCertFile': cls.ssl_file('ca-certificate.pem')}),
])
cls.routers[1].wait_router_connected('QDR.X')
@SkipIfNeeded(not SASL.extended(), "Cyrus library not available. skipping test")
def test_aaa_qdstat_connect_sasl_over_ssl(self):
"""
Make qdstat use sasl plain authentication over ssl.
"""
p = self.popen(
['qdstat', '-b', str(self.routers[0].addresses[2]), '-c',
# The following are SASL args
'--sasl-mechanisms=PLAIN',
'[email protected]',
'--sasl-password=password',
# The following are SSL args
'--ssl-disable-peer-name-verify',
'--ssl-trustfile=' + self.ssl_file('ca-certificate.pem'),
'--ssl-certificate=' + self.ssl_file('client-certificate.pem'),
'--ssl-key=' + self.ssl_file('client-private-key.pem'),
'--ssl-password=client-password'],
name='qdstat-'+self.id(), stdout=PIPE, expect=None,
universal_newlines=True)
out = p.communicate()[0]
assert p.returncode == 0, \
"qdstat exit status %s, output:\n%s" % (p.returncode, out)
split_list = out.split()
# There will be 2 connections that have authenticated using SASL PLAIN. One inter-router connection
# and the other connection that this qdstat client is making
self.assertEqual(2, split_list.count("[email protected](PLAIN)"))
self.assertEqual(1, split_list.count("inter-router"))
self.assertEqual(1, split_list.count("normal"))
@SkipIfNeeded(not SASL.extended(), "Cyrus library not available. skipping test")
def test_inter_router_plain_over_ssl_exists(self):
"""The setUpClass sets up two routers with SASL PLAIN enabled over TLS.
This test makes executes a query for type='org.apache.qpid.dispatch.connection' over
an unauthenticated listener to
QDR.X and makes sure that the output has an "inter-router" connection to
QDR.Y whose authentication is PLAIN. This ensures that QDR.Y did not
somehow use SASL ANONYMOUS to connect to QDR.X
Also makes sure that TLSv1.x was used as sslProto
"""
local_node = Node.connect(self.routers[0].addresses[1], timeout=TIMEOUT)
results = local_node.query(type='org.apache.qpid.dispatch.connection').results
# sslProto should be TLSv1.x
self.assertTrue(u'TLSv1' in results[0][10])
# role should be inter-router
self.assertEqual(u'inter-router', results[0][3])
# sasl must be plain
self.assertEqual(u'PLAIN', results[0][6])
# user must be [email protected]
self.assertEqual(u'[email protected]', results[0][8])
class RouterTestVerifyHostNameYes(RouterTestPlainSaslCommon):
@staticmethod
def ssl_file(name):
return os.path.join(DIR, 'ssl_certs', name)
@staticmethod
def sasl_file(name):
return os.path.join(DIR, 'sasl_files', name)
@classmethod
def setUpClass(cls):
"""
Tests the verifyHostname property of the connector. The hostname on the server certificate we use is
localhost and the host is 127.0.0.1 on the client router initiating the SSL connection.
Since the host names do not match and the verifyHostname is set to true, the client router
will NOT be able make a successful SSL connection the server router.
"""
super(RouterTestVerifyHostNameYes, cls).setUpClass()
if not SASL.extended():
return
super(RouterTestVerifyHostNameYes, cls).createSaslFiles()
cls.routers = []
x_listener_port = cls.tester.get_port()
y_listener_port = cls.tester.get_port()
super(RouterTestVerifyHostNameYes, cls).router('X', [
('listener', {'host': '0.0.0.0', 'role': 'inter-router', 'port': x_listener_port,
'sslProfile':'server-ssl-profile',
'saslMechanisms':'PLAIN', 'authenticatePeer': 'yes'}),
# This unauthenticated listener is for qdstat to connect to it.
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.tester.get_port(),
'authenticatePeer': 'no'}),
('sslProfile', {'name': 'server-ssl-profile',
'certFile': cls.ssl_file('server-certificate.pem'),
'privateKeyFile': cls.ssl_file('server-private-key.pem'),
'password': 'server-password'}),
('router', {'workerThreads': 1,
'id': 'QDR.X',
'mode': 'interior',
'saslConfigName': 'tests-mech-PLAIN',
'saslConfigDir': os.getcwd()}),
])
super(RouterTestVerifyHostNameYes, cls).router('Y', [
('connector', {'host': '127.0.0.1', 'role': 'inter-router', 'port': x_listener_port,
'sslProfile': 'client-ssl-profile',
# verifyHostName has been deprecated. We are using it here to test
# backward compatibility. TODO: should add a specific test.
'verifyHostName': 'yes',
'saslMechanisms': 'PLAIN',
'saslUsername': '[email protected]',
'saslPassword': 'file:' + cls.sasl_file('password.txt')}),
('router', {'workerThreads': 1,
'mode': 'interior',
'id': 'QDR.Y'}),
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': y_listener_port}),
('sslProfile', {'name': 'client-ssl-profile',
'caCertFile': cls.ssl_file('ca-certificate.pem')}),
])
cls.routers[0].wait_ports()
cls.routers[1].wait_ports()
try:
# This will time out because there is no inter-router connection
cls.routers[1].wait_connectors(timeout=3)
except:
pass
@SkipIfNeeded(not SASL.extended(), "Cyrus library not available. skipping test")
def test_no_inter_router_connection(self):
"""
Tests to make sure that there are no 'inter-router' connections.
The connection to the other router will not happen because the connection failed
due to setting 'verifyHostname': 'yes'
"""
local_node = Node.connect(self.routers[1].addresses[0], timeout=TIMEOUT)
results = local_node.query(type='org.apache.qpid.dispatch.connection').results
# There should be only two connections.
# There will be no inter-router connection
self.assertEqual(2, len(results))
self.assertEqual('in', results[0][4])
self.assertEqual('normal', results[0][3])
self.assertEqual('anonymous', results[0][8])
self.assertEqual('normal', results[1][3])
self.assertEqual('anonymous', results[1][8])
class RouterTestVerifyHostNameNo(RouterTestPlainSaslCommon):
@staticmethod
def ssl_file(name):
return os.path.join(DIR, 'ssl_certs', name)
x_listener_port = None
@classmethod
def setUpClass(cls):
"""
Tests the verifyHostname property of the connector. The hostname on the server certificate we use is
localhost and the host is 127.0.0.1 on the client router initiating the SSL connection.
Since the host names do not match but verifyHostname is set to false, the client router
will be successfully able to make an SSL connection the server router.
"""
super(RouterTestVerifyHostNameNo, cls).setUpClass()
if not SASL.extended():
return
super(RouterTestVerifyHostNameNo, cls).createSaslFiles()
cls.routers = []
x_listener_port = cls.tester.get_port()
RouterTestVerifyHostNameNo.x_listener_port = x_listener_port
y_listener_port = cls.tester.get_port()
super(RouterTestVerifyHostNameNo, cls).router('X', [
('listener', {'host': '0.0.0.0', 'role': 'inter-router', 'port': x_listener_port,
'sslProfile':'server-ssl-profile',
'saslMechanisms':'PLAIN', 'authenticatePeer': 'yes'}),
# This unauthenticated listener is for qdstat to connect to it.
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': cls.tester.get_port(),
'authenticatePeer': 'no'}),
('sslProfile', {'name': 'server-ssl-profile',
# certDb has been deprecated. We are using it here to test backward compatibility.
# TODO: should add a specific test, this one presumably doesnt even use it due to not doing client-certificate authentication
'certDb': cls.ssl_file('ca-certificate.pem'),
'certFile': cls.ssl_file('server-certificate.pem'),
# keyFile has been deprecated. We are using it here to test backward compatibility.
'keyFile': cls.ssl_file('server-private-key.pem'),
'password': 'server-password'}),
('router', {'workerThreads': 1,
'id': 'QDR.X',
'mode': 'interior',
'saslConfigName': 'tests-mech-PLAIN',
'saslConfigDir': os.getcwd()}),
])
super(RouterTestVerifyHostNameNo, cls).router('Y', [
# This router will act like a client. First an SSL connection will be established and then
# we will have SASL plain authentication over SSL.
('connector', {'name': 'connectorToX',
'host': '127.0.0.1', 'role': 'inter-router',
'port': x_listener_port,
'sslProfile': 'client-ssl-profile',
# Provide a sasl user name and password to connect to QDR.X
'saslMechanisms': 'PLAIN',
'verifyHostname': 'no',
'saslUsername': '[email protected]', 'saslPassword': 'pass:password'}),
('router', {'workerThreads': 1,
'mode': 'interior',
'id': 'QDR.Y'}),
('listener', {'host': '0.0.0.0', 'role': 'normal', 'port': y_listener_port}),
('sslProfile', {'name': 'client-ssl-profile',
'caCertFile': cls.ssl_file('ca-certificate.pem')}),
])
cls.routers[0].wait_ports()
cls.routers[1].wait_ports()
cls.routers[1].wait_router_connected('QDR.X')
@staticmethod
def ssl_file(name):
return os.path.join(DIR, 'ssl_certs', name)
def common_asserts(self, results):
search = "QDR.X"
found = False
for N in range(0, len(results)):
if results[N][5] == search:
found = True
break
self.assertTrue(found, "Connection to %s not found" % search)
# sslProto should be TLSv1.x
self.assertTrue(u'TLSv1' in results[N][10])
# role should be inter-router
self.assertEqual(u'inter-router', results[N][3])
# sasl must be plain
self.assertEqual(u'PLAIN', results[N][6])
# user must be [email protected]
self.assertEqual(u'[email protected]', results[N][8])
@SkipIfNeeded(not SASL.extended(), "Cyrus library not available. skipping test")
def test_inter_router_plain_over_ssl_exists(self):
"""
Tests to make sure that an inter-router connection exists between the routers since verifyHostname is 'no'.
"""
local_node = Node.connect(self.routers[1].addresses[0], timeout=TIMEOUT)
results = local_node.query(type='org.apache.qpid.dispatch.connection').results
self.common_asserts(results)
@SkipIfNeeded(not SASL.extended(), "Cyrus library not available. skipping test")
def test_zzz_delete_create_ssl_profile(self):
"""
Deletes a connector and its corresponding ssl profile and recreates both
"""
local_node = self.routers[1].management
connections = local_node.query(type='org.apache.qpid.dispatch.connection').get_entities()
self.assertIn("QDR.X", [c.container for c in connections]) # We can find the connection before
local_node.delete(type='connector', name='connectorToX')
local_node.delete(type='sslProfile', name='client-ssl-profile')
connections = local_node.query(type='org.apache.qpid.dispatch.connection').get_entities()
is_qdr_x = "QDR.X" in [c.container for c in connections]
self.assertFalse(is_qdr_x) # Should not be present now
# re-create the ssl profile
local_node.create({'type': 'sslProfile',
'name': 'client-ssl-profile',
'certFile': self.ssl_file('client-certificate.pem'),
'privateKeyFile': self.ssl_file('client-private-key.pem'),
'password': 'client-password',
'caCertFile': self.ssl_file('ca-certificate.pem')})
# re-create connector
local_node.create({'type': 'connector',
'name': 'connectorToX',
'host': '127.0.0.1',
'port': self.x_listener_port,
'saslMechanisms': 'PLAIN',
'sslProfile': 'client-ssl-profile',
'role': 'inter-router',
'verifyHostname': False,
'saslUsername': '[email protected]',
'saslPassword': 'password'})
self.routers[1].wait_connectors()
results = local_node.query(type='org.apache.qpid.dispatch.connection').results
self.common_asserts(results)
if __name__ == '__main__':
unittest.main(main_module())
| []
| []
| [
"ENV_SASL_PASSWORD"
]
| [] | ["ENV_SASL_PASSWORD"] | python | 1 | 0 | |
eggs/pylint-1.4.4-py2.7.egg/pylint/test/unittest_reporting.py | # Copyright (c) 2003-2014 LOGILAB S.A. (Paris, FRANCE).
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os
from os.path import join, dirname, abspath
import unittest
import warnings
import six
from logilab.common.ureports import Section
from pylint.lint import PyLinter
from pylint import checkers
from pylint.reporters.text import TextReporter, ParseableTextReporter
from pylint.reporters.html import HTMLReporter
HERE = abspath(dirname(__file__))
INPUTDIR = join(HERE, 'input')
class PyLinterTC(unittest.TestCase):
def setUp(self):
self.linter = PyLinter(reporter=TextReporter())
self.linter.disable('I')
self.linter.config.persistent = 0
# register checkers
checkers.initialize(self.linter)
os.environ.pop('PYLINTRC', None)
def test_template_option(self):
output = six.StringIO()
self.linter.reporter.set_output(output)
self.linter.set_option('msg-template', '{msg_id}:{line:03d}')
self.linter.open()
self.linter.set_current_module('0123')
self.linter.add_message('C0301', line=1, args=(1, 2))
self.linter.add_message('line-too-long', line=2, args=(3, 4))
self.assertMultiLineEqual(output.getvalue(),
'************* Module 0123\n'
'C0301:001\n'
'C0301:002\n')
def test_parseable_output_deprecated(self):
with warnings.catch_warnings(record=True) as cm:
warnings.simplefilter("always")
ParseableTextReporter()
self.assertEqual(len(cm), 1)
self.assertIsInstance(cm[0].message, DeprecationWarning)
def test_parseable_output_regression(self):
output = six.StringIO()
with warnings.catch_warnings(record=True):
linter = PyLinter(reporter=ParseableTextReporter())
checkers.initialize(linter)
linter.config.persistent = 0
linter.reporter.set_output(output)
linter.set_option('output-format', 'parseable')
linter.open()
linter.set_current_module('0123')
linter.add_message('line-too-long', line=1, args=(1, 2))
self.assertMultiLineEqual(output.getvalue(),
'************* Module 0123\n'
'0123:1: [C0301(line-too-long), ] '
'Line too long (1/2)\n')
def test_html_reporter_msg_template(self):
expected = '''
<html>
<body>
<div>
<div>
<h2>Messages</h2>
<table>
<tr class="header">
<th>category</th>
<th>msg_id</th>
</tr>
<tr class="even">
<td>warning</td>
<td>W0332</td>
</tr>
</table>
</div>
</div>
</body>
</html>'''.strip().splitlines()
output = six.StringIO()
linter = PyLinter(reporter=HTMLReporter())
checkers.initialize(linter)
linter.config.persistent = 0
linter.reporter.set_output(output)
linter.set_option('msg-template', '{category}{msg_id}')
linter.open()
linter.set_current_module('0123')
linter.add_message('lowercase-l-suffix', line=1)
linter.reporter.display_results(Section())
self.assertEqual(output.getvalue().splitlines(), expected)
@unittest.expectedFailure
def test_html_reporter_type(self):
# Integration test for issue #263
# https://bitbucket.org/logilab/pylint/issue/263/html-report-type-problems
expected = '''<html>
<body>
<div>
<div>
<h2>Messages</h2>
<table>
<tr class="header">
<th>type</th>
<th>module</th>
<th>object</th>
<th>line</th>
<th>col_offset</th>
<th>message</th>
</tr>
<tr class="even">
<td>convention</td>
<td>0123</td>
<td> </td>
<td>1</td>
<td>0</td>
<td>Exactly one space required before comparison
a< 5: print "zero"</td>
</tr>
</table>
</div>
</div>
</body>
</html>
'''
output = six.StringIO()
linter = PyLinter(reporter=HTMLReporter())
checkers.initialize(linter)
linter.config.persistent = 0
linter.reporter.set_output(output)
linter.open()
linter.set_current_module('0123')
linter.add_message('bad-whitespace', line=1,
args=('Exactly one', 'required', 'before',
'comparison', 'a< 5: print "zero"'))
linter.reporter.display_results(Section())
self.assertMultiLineEqual(output.getvalue(), expected)
if __name__ == '__main__':
unittest.main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
projectkasir/wsgi.py | """
WSGI config for projectkasir project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "projectkasir.settings")
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
providers/dns/bluecat/bluecat_test.go | package bluecat
import (
"os"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
var (
bluecatLiveTest bool
bluecatServer string
bluecatUserName string
bluecatPassword string
bluecatConfigName string
bluecatDNSView string
bluecatDomain string
)
func init() {
bluecatServer = os.Getenv("BLUECAT_SERVER_URL")
bluecatUserName = os.Getenv("BLUECAT_USER_NAME")
bluecatPassword = os.Getenv("BLUECAT_PASSWORD")
bluecatDomain = os.Getenv("BLUECAT_DOMAIN")
bluecatConfigName = os.Getenv("BLUECAT_CONFIG_NAME")
bluecatDNSView = os.Getenv("BLUECAT_DNS_VIEW")
if len(bluecatServer) > 0 &&
len(bluecatDomain) > 0 &&
len(bluecatUserName) > 0 &&
len(bluecatPassword) > 0 &&
len(bluecatConfigName) > 0 &&
len(bluecatDNSView) > 0 {
bluecatLiveTest = true
}
}
func TestLiveBluecatPresent(t *testing.T) {
if !bluecatLiveTest {
t.Skip("skipping live test")
}
provider, err := NewDNSProvider()
assert.NoError(t, err)
err = provider.Present(bluecatDomain, "", "123d==")
assert.NoError(t, err)
}
func TestLiveBluecatCleanUp(t *testing.T) {
if !bluecatLiveTest {
t.Skip("skipping live test")
}
time.Sleep(time.Second * 1)
provider, err := NewDNSProvider()
assert.NoError(t, err)
err = provider.CleanUp(bluecatDomain, "", "123d==")
assert.NoError(t, err)
}
| [
"\"BLUECAT_SERVER_URL\"",
"\"BLUECAT_USER_NAME\"",
"\"BLUECAT_PASSWORD\"",
"\"BLUECAT_DOMAIN\"",
"\"BLUECAT_CONFIG_NAME\"",
"\"BLUECAT_DNS_VIEW\""
]
| []
| [
"BLUECAT_CONFIG_NAME",
"BLUECAT_USER_NAME",
"BLUECAT_DNS_VIEW",
"BLUECAT_PASSWORD",
"BLUECAT_SERVER_URL",
"BLUECAT_DOMAIN"
]
| [] | ["BLUECAT_CONFIG_NAME", "BLUECAT_USER_NAME", "BLUECAT_DNS_VIEW", "BLUECAT_PASSWORD", "BLUECAT_SERVER_URL", "BLUECAT_DOMAIN"] | go | 6 | 0 | |
MiSiCNet_SimGit.py | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 5 19:13:50 2021
@author: behnood
"""
#from __future__ import print_function
import matplotlib.pyplot as plt
#%matplotlib inline
# from numpy import linalg as LA
import os
#os.environ['CUDA_VISIBLE_DEVICES'] = '3'
import numpy as np
#from models import *
#import math
import torch
import torch.optim
import torch.nn as nn
# from skimage.measure import compare_psnr
# from skimage.measure import compare_mse
#from utils.denoising_utils import *
# from skimage._shared import *
# from skimage.util import *
# from skimage.metrics.simple_metrics import _as_floats
# from skimage.metrics.simple_metrics import mean_squared_error
#from UtilityMine import add_noise
# from UtilityMine import find_endmember
# from UtilityMine import add_noise
from UtilityMine import *
# from VCA import *
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark =True
dtype = torch.cuda.FloatTensor
PLOT = True
#%% Load image
import scipy.io
import scipy.linalg
#%%
fname2 = "HS Data/Sim1/Y_clean.mat"
mat2 = scipy.io.loadmat(fname2)
img_np_gt = mat2["Y_clean"]
img_np_gt = img_np_gt.transpose(2,0,1)
[p1, nr1, nc1] = img_np_gt.shape
#%%
# fname3 = "C:/Users/behnood/Desktop/VMCNN/Easy/A_true.mat"
# mat3 = scipy.io.loadmat(fname3)
# A_true_np = mat3["A_true"]
# A_true_np = A_true_np.transpose(2,0,1)
#%%
# fname4 = "C:/Users/behnood/Desktop/VMCNN/Easy/E.mat"
# mat4 = scipy.io.loadmat(fname4)
# E_np = mat4["E"]
rmax=6#E_np.shape[1]
#%%
npar=np.zeros((1,4))
npar[0,0]=17.5
npar[0,1]=55.5
npar[0,2]=175
npar[0,3]=555
tol1=npar.shape[1]
tol2=1
save_result=False
from tqdm import tqdm
for fi in tqdm(range(tol1)):
for fj in tqdm(range(tol2)):
#%%
#img_noisy_np = get_noisy_image(img_np_gt, 1/10)
img_noisy_np = add_noise(img_np_gt, 1/npar[0,fi])#11.55 20 dB, 36.7 30 dB, 116.5 40 dB
#print(compare_snr(img_np_gt, img_noisy_np))
img_resh=np.reshape(img_noisy_np,(p1,nr1*nc1))
V, SS, U = scipy.linalg.svd(img_resh, full_matrices=False)
PC=np.diag(SS)@U
# img_resh_DN=V[:,:rmax]@PC[:rmax,:]
img_resh_DN=V[:,:rmax]@V[:,:rmax].transpose(1,0)@img_resh
img_resh_np_clip=np.clip(img_resh_DN, 0, 1)
II,III = Endmember_extract(img_resh_np_clip,rmax)
E_np1=img_resh_np_clip[:,II]
#%% Set up Simulated
INPUT = 'noise' # 'meshgrid'
pad = 'reflection'
need_bias=True
OPT_OVER = 'net'
#
LR1 = 0.001
show_every = 100
exp_weight=0.99
num_iter1 = 8000
input_depth = img_noisy_np.shape[0]
class CAE_EndEst(nn.Module):
def __init__(self):
super(CAE_EndEst, self).__init__()
self.conv1 = nn.Sequential(
conv(input_depth, 256,3,1,bias=need_bias, pad=pad),
nn.BatchNorm2d(256,eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.LeakyReLU(0.1, inplace=True),
)
self.conv2 = nn.Sequential(
conv(256, 256,3,1,bias=need_bias, pad=pad),
nn.BatchNorm2d(256,eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.LeakyReLU(0.1, inplace=True),
)
self.conv3 = nn.Sequential(
conv(input_depth, 4, 1,1,bias=need_bias, pad=pad),
nn.BatchNorm2d(4,eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.LeakyReLU(0.1, inplace=True),
)
self.dconv2 = nn.Sequential(
nn.Upsample(scale_factor=1),
conv(260, 256, 3,1,bias=need_bias, pad=pad),
nn.BatchNorm2d(256,eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.LeakyReLU(0.1, inplace=True),
)
self.dconv3 = nn.Sequential(
nn.Upsample(scale_factor=1),
conv(256, rmax, 3,1,bias=need_bias, pad=pad),
nn.BatchNorm2d(rmax,eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.Softmax(),
)
self.dconv4 = nn.Sequential(
nn.Linear(rmax, p1,bias=False),
)
def forward(self, x):
x1 = self.conv3(x)
x = self.conv1(x)
x = torch.cat([x,x1], 1)
x = self.dconv2(x)
x2 = self.dconv3(x)
x3 = torch.transpose(x2.view((rmax,nr1*nc1)),0,1)
x3 = self.dconv4(x3)
return x2,x3
net1 = CAE_EndEst()
net1.cuda()
# Loss
def my_loss(target, End2, lamb, out_):
loss1 = 0.5*torch.norm((out_.transpose(1,0).view(1,p1,nr1,nc1) - target), 'fro')**2
O = torch.mean(target.view(p1,nr1*nc1),1).type(dtype).view(p1,1)
B = torch.from_numpy(np.identity(rmax)).type(dtype)
loss2 = torch.norm(torch.mm(End2,B.view((rmax,rmax)))-O, 'fro')**2
return loss1+lamb*loss2
img_noisy_torch = torch.from_numpy(img_resh_DN).view(1,p1,nr1,nc1).type(dtype)
net_input1 = get_noise(input_depth, INPUT,
(img_noisy_np.shape[1], img_noisy_np.shape[2])).type(dtype).detach()
E_torch = torch.from_numpy(E_np1).type(dtype)
#%%
# net_input_saved = net_input1.detach().clone()
# noise = net_input1.detach().clone()
out_avg = True
i = 0
def closure1():
global i, out_LR_np, out_avg, out_avg_np, Eest
out_LR,out_spec = net1(net_input1)
# out_HR=torch.mm(E_torch.view(p1,rmax),out_LR.view(rmax,nr1*nc1))
# Smoothing
if out_avg is None:
out_avg = out_LR.detach()
# out_HR_avg = out_HR.detach()
else:
out_avg = out_avg * exp_weight + out_LR.detach() * (1 - exp_weight)
# out_HR_avg = out_HR_avg * exp_weight + out_HR.detach() * (1 - exp_weight)
#%%
total_loss = my_loss(img_noisy_torch, net1.dconv4[0].weight,.1,out_spec)
total_loss.backward()
# print ('Iteration %05d Loss %f RMSE_LR: %f RMSE_LR_avg: %f SRE: %f SRE_avg: %f' % (i, total_loss.item(), RMSE_LR, RMSE_LR_avg, SRE, SRE_avg), '\r', end='')
if PLOT and i % show_every == 0:
out_LR_np = out_LR.detach().cpu().squeeze().numpy()
out_avg_np = out_avg.detach().cpu().squeeze().numpy()
out_LR_np = np.clip(out_LR_np, 0, 1)
out_avg_np = np.clip(out_avg_np, 0, 1)
f, ((ax1, ax2),(ax3, ax4)) = plt.subplots(2, 2, sharey=True, figsize=(10,10))
ax1.imshow(np.stack((out_LR_np[2,:,:],out_LR_np[1,:,:],out_LR_np[0,:,:]),2))
ax2.imshow(np.stack((out_LR_np[5,:,:],out_LR_np[4,:,:],out_LR_np[3,:,:]),2))
ax3.imshow(np.stack((out_avg_np[2,:,:],out_avg_np[1,:,:],out_avg_np[0,:,:]),2))
ax4.imshow(np.stack((out_avg_np[5,:,:],out_avg_np[4,:,:],out_avg_np[3,:,:]),2))
plt.show()
i += 1
return total_loss
net1.dconv4[0].weight=torch.nn.Parameter(E_torch.view(p1,rmax))
p11 = get_params(OPT_OVER, net1, net_input1)
optimizer = torch.optim.Adam(p11, lr=LR1, betas=(0.9, 0.999), eps=1e-8,
weight_decay= 0, amsgrad=False)
for j in range(num_iter1):
optimizer.zero_grad()
closure1()
optimizer.step()
net1.dconv4[0].weight.data[net1.dconv4[0].weight <= 0] = 0
net1.dconv4[0].weight.data[net1.dconv4[0].weight >= 1] = 1
if j>0:
Eest=net1.dconv4[0].weight.detach().cpu().squeeze().numpy()
if PLOT and j % show_every== 0:
plt.plot(Eest)
plt.show()
out_avg_np = out_avg.detach().cpu().squeeze().numpy()
#%%
if save_result is True:
scipy.io.savemat("Result/EestdB%01d%01d.mat" % (fi+2, fj+1),
{'Eest%01d%01d' % (fi+2, fj+1):Eest})
scipy.io.savemat("Result/out_avg_npdB%01d%01d.mat" % (fi+2, fj+1),
{'out_avg_np%01d%01d' % (fi+2, fj+1):out_avg_np.transpose(1,2,0)})
#
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
vendor/k8s.io/client-go/util/jsonpath/parser.go | /*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package jsonpath
import (
"errors"
"fmt"
"regexp"
"strconv"
"strings"
"unicode"
"unicode/utf8"
)
const eof = -1
const (
leftDelim = "{"
rightDelim = "}"
)
type Parser struct {
Name string
Root *ListNode
input string
cur *ListNode
pos int
start int
width int
}
var (
ErrSyntax = errors.New("invalid syntax")
dictKeyRex = regexp.MustCompile(`^'([^']*)'$`)
sliceOperatorRex = regexp.MustCompile(`^(-?[\d]*)(:-?[\d]*)?(:[\d]*)?$`)
)
// Parse parsed the given text and return a node Parser.
// If an error is encountered, parsing stops and an empty
// Parser is returned with the error
func Parse(name, text string) (*Parser, error) {
p := NewParser(name)
err := p.Parse(text)
if err != nil {
p = nil
}
return p, err
}
func NewParser(name string) *Parser {
return &Parser{
Name: name,
}
}
// parseAction parsed the expression inside delimiter
func parseAction(name, text string) (*Parser, error) {
p, err := Parse(name, fmt.Sprintf("%s%s%s", leftDelim, text, rightDelim))
// when error happens, p will be nil, so we need to return here
if err != nil {
return p, err
}
p.Root = p.Root.Nodes[0].(*ListNode)
return p, nil
}
func (p *Parser) Parse(text string) error {
p.input = text
p.Root = newList()
p.pos = 0
return p.parseText(p.Root)
}
// consumeText return the parsed text since last cosumeText
func (p *Parser) consumeText() string {
value := p.input[p.start:p.pos]
p.start = p.pos
return value
}
// next returns the next rune in the input.
func (p *Parser) next() rune {
if int(p.pos) >= len(p.input) {
p.width = 0
return eof
}
r, w := utf8.DecodeRuneInString(p.input[p.pos:])
p.width = w
p.pos += p.width
return r
}
// peek returns but does not consume the next rune in the input.
func (p *Parser) peek() rune {
r := p.next()
p.backup()
return r
}
// backup steps back one rune. Can only be called once per call of next.
func (p *Parser) backup() {
p.pos -= p.width
}
func (p *Parser) parseText(cur *ListNode) error {
for {
if strings.HasPrefix(p.input[p.pos:], leftDelim) {
if p.pos > p.start {
cur.append(newText(p.consumeText()))
}
return p.parseLeftDelim(cur)
}
if p.next() == eof {
break
}
}
// Correctly reached EOF.
if p.pos > p.start {
cur.append(newText(p.consumeText()))
}
return nil
}
// parseLeftDelim scans the left delimiter, which is known to be present.
func (p *Parser) parseLeftDelim(cur *ListNode) error {
p.pos += len(leftDelim)
p.consumeText()
newNode := newList()
cur.append(newNode)
cur = newNode
return p.parseInsideAction(cur)
}
func (p *Parser) parseInsideAction(cur *ListNode) error {
prefixMap := map[string]func(*ListNode) error{
rightDelim: p.parseRightDelim,
"[?(": p.parseFilter,
"..": p.parseRecursive,
}
for prefix, parseFunc := range prefixMap {
if strings.HasPrefix(p.input[p.pos:], prefix) {
return parseFunc(cur)
}
}
switch r := p.next(); {
case r == eof || isEndOfLine(r):
return fmt.Errorf("unclosed action")
case r == ' ':
p.consumeText()
case r == '@' || r == '$': //the current object, just pass it
p.consumeText()
case r == '[':
return p.parseArray(cur)
case r == '"' || r == '\'':
return p.parseQuote(cur, r)
case r == '.':
return p.parseField(cur)
case r == '+' || r == '-' || unicode.IsDigit(r):
p.backup()
return p.parseNumber(cur)
case isAlphaNumeric(r):
p.backup()
return p.parseIdentifier(cur)
default:
return fmt.Errorf("unrecognized character in action: %#U", r)
}
return p.parseInsideAction(cur)
}
// parseRightDelim scans the right delimiter, which is known to be present.
func (p *Parser) parseRightDelim(cur *ListNode) error {
p.pos += len(rightDelim)
p.consumeText()
cur = p.Root
return p.parseText(cur)
}
// parseIdentifier scans build-in keywords, like "range" "end"
func (p *Parser) parseIdentifier(cur *ListNode) error {
var r rune
for {
r = p.next()
if isTerminator(r) {
p.backup()
break
}
}
value := p.consumeText()
if isBool(value) {
v, err := strconv.ParseBool(value)
if err != nil {
return fmt.Errorf("can not parse bool '%s': %s", value, err.Error())
}
cur.append(newBool(v))
} else {
cur.append(newIdentifier(value))
}
return p.parseInsideAction(cur)
}
// parseRecursive scans the recursive desent operator ..
func (p *Parser) parseRecursive(cur *ListNode) error {
p.pos += len("..")
p.consumeText()
cur.append(newRecursive())
if r := p.peek(); isAlphaNumeric(r) {
return p.parseField(cur)
}
return p.parseInsideAction(cur)
}
// parseNumber scans number
func (p *Parser) parseNumber(cur *ListNode) error {
r := p.peek()
if r == '+' || r == '-' {
r = p.next()
}
for {
r = p.next()
if r != '.' && !unicode.IsDigit(r) {
p.backup()
break
}
}
value := p.consumeText()
i, err := strconv.Atoi(value)
if err == nil {
cur.append(newInt(i))
return p.parseInsideAction(cur)
}
d, err := strconv.ParseFloat(value, 64)
if err == nil {
cur.append(newFloat(d))
return p.parseInsideAction(cur)
}
return fmt.Errorf("cannot parse number %s", value)
}
// parseArray scans array index selection
func (p *Parser) parseArray(cur *ListNode) error {
Loop:
for {
switch p.next() {
case eof, '\n':
return fmt.Errorf("unterminated array")
case ']':
break Loop
}
}
text := p.consumeText()
text = string(text[1 : len(text)-1])
if text == "*" {
text = ":"
}
//union operator
strs := strings.Split(text, ",")
if len(strs) > 1 {
union := []*ListNode{}
for _, str := range strs {
parser, err := parseAction("union", fmt.Sprintf("[%s]", strings.Trim(str, " ")))
if err != nil {
return err
}
union = append(union, parser.Root)
}
cur.append(newUnion(union))
return p.parseInsideAction(cur)
}
// dict key
value := dictKeyRex.FindStringSubmatch(text)
if value != nil {
parser, err := parseAction("arraydict", fmt.Sprintf(".%s", value[1]))
if err != nil {
return err
}
for _, node := range parser.Root.Nodes {
cur.append(node)
}
return p.parseInsideAction(cur)
}
//slice operator
value = sliceOperatorRex.FindStringSubmatch(text)
if value == nil {
return fmt.Errorf("invalid array index %s", text)
}
value = value[1:]
params := [3]ParamsEntry{}
for i := 0; i < 3; i++ {
if value[i] != "" {
if i > 0 {
value[i] = value[i][1:]
}
if i > 0 && value[i] == "" {
params[i].Known = false
} else {
var err error
params[i].Known = true
params[i].Value, err = strconv.Atoi(value[i])
if err != nil {
return fmt.Errorf("array index %s is not a number", value[i])
}
}
} else {
if i == 1 {
params[i].Known = true
params[i].Value = params[0].Value + 1
} else {
params[i].Known = false
params[i].Value = 0
}
}
}
cur.append(newArray(params))
return p.parseInsideAction(cur)
}
// parseFilter scans filter inside array selection
func (p *Parser) parseFilter(cur *ListNode) error {
p.pos += len("[?(")
p.consumeText()
begin := false
end := false
var pair rune
Loop:
for {
r := p.next()
switch r {
case eof, '\n':
return fmt.Errorf("unterminated filter")
case '"', '\'':
if begin == false {
//save the paired rune
begin = true
pair = r
continue
}
//only add when met paired rune
if p.input[p.pos-2] != '\\' && r == pair {
end = true
}
case ')':
//in rightParser below quotes only appear zero or once
//and must be paired at the beginning and end
if begin == end {
break Loop
}
}
}
if p.next() != ']' {
return fmt.Errorf("unclosed array expect ]")
}
reg := regexp.MustCompile(`^([^!<>=]+)([!<>=]+)(.+?)$`)
text := p.consumeText()
text = string(text[:len(text)-2])
value := reg.FindStringSubmatch(text)
if value == nil {
parser, err := parseAction("text", text)
if err != nil {
return err
}
cur.append(newFilter(parser.Root, newList(), "exists"))
} else {
leftParser, err := parseAction("left", value[1])
if err != nil {
return err
}
rightParser, err := parseAction("right", value[3])
if err != nil {
return err
}
cur.append(newFilter(leftParser.Root, rightParser.Root, value[2]))
}
return p.parseInsideAction(cur)
}
// parseQuote unquotes string inside double or single quote
func (p *Parser) parseQuote(cur *ListNode, end rune) error {
Loop:
for {
switch p.next() {
case eof, '\n':
return fmt.Errorf("unterminated quoted string")
case end:
//if it's not escape break the Loop
if p.input[p.pos-2] != '\\' {
break Loop
}
}
}
value := p.consumeText()
s, err := UnquoteExtend(value)
if err != nil {
return fmt.Errorf("unquote string %s error %v", value, err)
}
cur.append(newText(s))
return p.parseInsideAction(cur)
}
// parseField scans a field until a terminator
func (p *Parser) parseField(cur *ListNode) error {
p.consumeText()
for p.advance() {
}
value := p.consumeText()
if value == "*" {
cur.append(newWildcard())
} else {
cur.append(newField(strings.Replace(value, "\\", "", -1)))
}
return p.parseInsideAction(cur)
}
// advance scans until next non-escaped terminator
func (p *Parser) advance() bool {
r := p.next()
if r == '\\' {
p.next()
} else if isTerminator(r) {
p.backup()
return false
}
return true
}
// isTerminator reports whether the input is at valid termination character to appear after an identifier.
func isTerminator(r rune) bool {
if isSpace(r) || isEndOfLine(r) {
return true
}
switch r {
case eof, '.', ',', '[', ']', '$', '@', '{', '}':
return true
}
return false
}
// isSpace reports whether r is a space character.
func isSpace(r rune) bool {
return r == ' ' || r == '\t'
}
// isEndOfLine reports whether r is an end-of-line character.
func isEndOfLine(r rune) bool {
return r == '\r' || r == '\n'
}
// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
func isAlphaNumeric(r rune) bool {
return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
}
// isBool reports whether s is a boolean value.
func isBool(s string) bool {
return s == "true" || s == "false"
}
//UnquoteExtend is almost same as strconv.Unquote(), but it support parse single quotes as a string
func UnquoteExtend(s string) (string, error) {
n := len(s)
if n < 2 {
return "", ErrSyntax
}
quote := s[0]
if quote != s[n-1] {
return "", ErrSyntax
}
s = s[1 : n-1]
if quote != '"' && quote != '\'' {
return "", ErrSyntax
}
// Is it trivial? Avoid allocation.
if !contains(s, '\\') && !contains(s, quote) {
return s, nil
}
var runeTmp [utf8.UTFMax]byte
buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations.
for len(s) > 0 {
c, multibyte, ss, err := strconv.UnquoteChar(s, quote)
if err != nil {
return "", err
}
s = ss
if c < utf8.RuneSelf || !multibyte {
buf = append(buf, byte(c))
} else {
n := utf8.EncodeRune(runeTmp[:], c)
buf = append(buf, runeTmp[:n]...)
}
}
return string(buf), nil
}
func contains(s string, c byte) bool {
for i := 0; i < len(s); i++ {
if s[i] == c {
return true
}
}
return false
}
| []
| []
| []
| [] | [] | go | null | null | null |
pgo-backrest/pgo-backrest.go | package main
/*
Copyright 2019 Crunchy Data Solutions, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import (
"flag"
"os"
"strconv"
"strings"
"time"
crv1 "github.com/crunchydata/postgres-operator/apis/cr/v1"
"github.com/crunchydata/postgres-operator/kubeapi"
log "github.com/sirupsen/logrus"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
var Clientset *kubernetes.Clientset
const backrestCommand = "pgbackrest"
const backrestBackupCommand = `backup`
const backrestInfoCommand = `info`
const backrestStanzaCreateCommand = `stanza-create`
const containername = "database"
const repoTypeFlagS3 = "--repo-type=s3"
func main() {
log.Info("pgo-backrest starts")
kubeconfig := flag.String("kubeconfig", "", "Path to a kube config. Only required if out-of-cluster.")
flag.Parse()
debugFlag := os.Getenv("CRUNCHY_DEBUG")
if debugFlag == "true" {
log.SetLevel(log.DebugLevel)
log.Debug("debug flag set to true")
} else {
log.Info("debug flag set to false")
}
Namespace := os.Getenv("NAMESPACE")
log.Debugf("setting NAMESPACE to %s", Namespace)
if Namespace == "" {
log.Error("NAMESPACE env var not set")
os.Exit(2)
}
COMMAND := os.Getenv("COMMAND")
log.Debugf("setting COMMAND to %s", COMMAND)
if COMMAND == "" {
log.Error("COMMAND env var not set")
os.Exit(2)
}
COMMAND_OPTS := os.Getenv("COMMAND_OPTS")
log.Debugf("setting COMMAND_OPTS to %s", COMMAND_OPTS)
PODNAME := os.Getenv("PODNAME")
log.Debugf("setting PODNAME to %s", PODNAME)
if PODNAME == "" {
log.Error("PODNAME env var not set")
os.Exit(2)
}
REPO_TYPE := os.Getenv("PGBACKREST_REPO_TYPE")
log.Debugf("setting REPO_TYPE to %s", REPO_TYPE)
BACKREST_LOCAL_AND_S3_STORAGE, err := strconv.ParseBool(os.Getenv("BACKREST_LOCAL_AND_S3_STORAGE"))
if err != nil {
panic(err)
}
log.Debugf("setting BACKREST_LOCAL_AND_S3_STORAGE to %s", BACKREST_LOCAL_AND_S3_STORAGE)
config, err := buildConfig(*kubeconfig)
if err != nil {
panic(err)
}
Clientset, err = kubernetes.NewForConfig(config)
if err != nil {
log.Info("error creating Clientset")
panic(err.Error())
}
bashcmd := make([]string, 1)
bashcmd[0] = "bash"
cmdStrs := make([]string, 0)
switch COMMAND {
case crv1.PgtaskBackrestStanzaCreate:
log.Info("backrest stanza-create command requested")
time.Sleep(time.Second * time.Duration(30))
log.Info("sleeping 30 seconds to avoid race with PG during startup")
cmdStrs = append(cmdStrs, backrestCommand)
cmdStrs = append(cmdStrs, backrestStanzaCreateCommand)
cmdStrs = append(cmdStrs, COMMAND_OPTS)
case crv1.PgtaskBackrestInfo:
log.Info("backrest info command requested")
cmdStrs = append(cmdStrs, backrestCommand)
cmdStrs = append(cmdStrs, backrestInfoCommand)
cmdStrs = append(cmdStrs, COMMAND_OPTS)
case crv1.PgtaskBackrestBackup:
log.Info("backrest backup command requested")
cmdStrs = append(cmdStrs, backrestCommand)
cmdStrs = append(cmdStrs, backrestBackupCommand)
cmdStrs = append(cmdStrs, COMMAND_OPTS)
default:
log.Error("unsupported backup command specified " + COMMAND)
os.Exit(2)
}
if BACKREST_LOCAL_AND_S3_STORAGE {
firstCmd := cmdStrs
cmdStrs = append(cmdStrs, "&&")
cmdStrs = append(cmdStrs, strings.Join(firstCmd, " "))
cmdStrs = append(cmdStrs, repoTypeFlagS3)
log.Info("backrest command will be executed for both local and s3 storage")
} else if REPO_TYPE == "s3" {
cmdStrs = append(cmdStrs, repoTypeFlagS3)
log.Info("s3 flag enabled for backrest command")
}
log.Infof("command to execute is [%s]", strings.Join(cmdStrs, " "))
log.Infof("command is %s ", strings.Join(cmdStrs, " "))
reader := strings.NewReader(strings.Join(cmdStrs, " "))
output, stderr, err := kubeapi.ExecToPodThroughAPI(config, Clientset, bashcmd, containername, PODNAME, Namespace, reader)
if err != nil {
log.Info("output=[" + output + "]")
log.Info("stderr=[" + stderr + "]")
log.Error(err)
os.Exit(2)
}
log.Info("output=[" + output + "]")
log.Info("stderr=[" + stderr + "]")
log.Info("pgo-backrest ends")
}
func buildConfig(kubeconfig string) (*rest.Config, error) {
if kubeconfig != "" {
return clientcmd.BuildConfigFromFlags("", kubeconfig)
}
return rest.InClusterConfig()
}
| [
"\"CRUNCHY_DEBUG\"",
"\"NAMESPACE\"",
"\"COMMAND\"",
"\"COMMAND_OPTS\"",
"\"PODNAME\"",
"\"PGBACKREST_REPO_TYPE\"",
"\"BACKREST_LOCAL_AND_S3_STORAGE\""
]
| []
| [
"PODNAME",
"CRUNCHY_DEBUG",
"PGBACKREST_REPO_TYPE",
"NAMESPACE",
"BACKREST_LOCAL_AND_S3_STORAGE",
"COMMAND",
"COMMAND_OPTS"
]
| [] | ["PODNAME", "CRUNCHY_DEBUG", "PGBACKREST_REPO_TYPE", "NAMESPACE", "BACKREST_LOCAL_AND_S3_STORAGE", "COMMAND", "COMMAND_OPTS"] | go | 7 | 0 | |
pkg/helm/helm_template.go | package helm
import (
"bufio"
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"k8s.io/helm/pkg/chartutil"
"k8s.io/helm/pkg/proto/hapi/chart"
"github.com/jenkins-x/jx/v2/pkg/kube"
"github.com/jenkins-x/jx/v2/pkg/log"
"github.com/jenkins-x/jx/v2/pkg/util"
"github.com/pkg/errors"
yaml "gopkg.in/yaml.v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
const (
// AnnotationChartName stores the chart name
AnnotationChartName = "jenkins.io/chart"
// AnnotationAppVersion stores the chart's app version
AnnotationAppVersion = "jenkins.io/chart-app-version"
// AnnotationAppDescription stores the chart's app version
AnnotationAppDescription = "jenkins.io/chart-description"
// AnnotationAppRepository stores the chart's app repository
AnnotationAppRepository = "jenkins.io/chart-repository"
// LabelReleaseName stores the chart release name
LabelReleaseName = "jenkins.io/chart-release"
// LabelNamespace stores the chart namespace for cluster wide resources
LabelNamespace = "jenkins.io/namespace"
// LabelReleaseChartVersion stores the version of a chart installation in a label
LabelReleaseChartVersion = "jenkins.io/version"
// LabelAppName stores the chart's app name
LabelAppName = "jenkins.io/app-name"
// LabelAppVersion stores the chart's app version
LabelAppVersion = "jenkins.io/app-version"
hookFailed = "hook-failed"
hookSucceeded = "hook-succeeded"
// resourcesSeparator is used to separate multiple objects stored in the same YAML file
resourcesSeparator = "---"
)
// HelmTemplate implements common helm actions but purely as client side operations
// delegating a separate Helmer such as HelmCLI for the client side operations
type HelmTemplate struct {
Client *HelmCLI
WorkDir string
CWD string
Binary string
Runner util.Commander
KubectlValidate bool
KubeClient kubernetes.Interface
Namespace string
}
// NewHelmTemplate creates a new HelmTemplate instance configured to the given client side Helmer
func NewHelmTemplate(client *HelmCLI, workDir string, kubeClient kubernetes.Interface, ns string) *HelmTemplate {
cli := &HelmTemplate{
Client: client,
WorkDir: workDir,
Runner: client.Runner,
Binary: "kubectl",
CWD: client.CWD,
KubectlValidate: false,
KubeClient: kubeClient,
Namespace: ns,
}
return cli
}
type HelmHook struct {
Kind string
Name string
File string
Hooks []string
HookDeletePolicies []string
}
// SetHost is used to point at a locally running tiller
func (h *HelmTemplate) SetHost(tillerAddress string) {
// NOOP
}
// SetCWD configures the common working directory of helm CLI
func (h *HelmTemplate) SetCWD(dir string) {
h.Client.SetCWD(dir)
h.CWD = dir
}
// HelmBinary return the configured helm CLI
func (h *HelmTemplate) HelmBinary() string {
return h.Client.HelmBinary()
}
// SetHelmBinary configure a new helm CLI
func (h *HelmTemplate) SetHelmBinary(binary string) {
h.Client.SetHelmBinary(binary)
}
// Init executes the helm init command according with the given flags
func (h *HelmTemplate) Init(clientOnly bool, serviceAccount string, tillerNamespace string, upgrade bool) error {
return h.Client.Init(true, serviceAccount, tillerNamespace, upgrade)
}
// AddRepo adds a new helm repo with the given name and URL
func (h *HelmTemplate) AddRepo(repo, URL, username, password string) error {
return h.Client.AddRepo(repo, URL, username, password)
}
// RemoveRepo removes the given repo from helm
func (h *HelmTemplate) RemoveRepo(repo string) error {
return h.Client.RemoveRepo(repo)
}
// ListRepos list the installed helm repos together with their URL
func (h *HelmTemplate) ListRepos() (map[string]string, error) {
return h.Client.ListRepos()
}
// SearchCharts searches for all the charts matching the given filter
func (h *HelmTemplate) SearchCharts(filter string, allVersions bool) ([]ChartSummary, error) {
return h.Client.SearchCharts(filter, false)
}
// IsRepoMissing checks if the repository with the given URL is missing from helm
func (h *HelmTemplate) IsRepoMissing(URL string) (bool, string, error) {
return h.Client.IsRepoMissing(URL)
}
// UpdateRepo updates the helm repositories
func (h *HelmTemplate) UpdateRepo() error {
return h.Client.UpdateRepo()
}
// RemoveRequirementsLock removes the requirements.lock file from the current working directory
func (h *HelmTemplate) RemoveRequirementsLock() error {
return h.Client.RemoveRequirementsLock()
}
// BuildDependency builds the helm dependencies of the helm chart from the current working directory
func (h *HelmTemplate) BuildDependency() error {
return h.Client.BuildDependency()
}
// ListReleases lists the releases in ns
func (h *HelmTemplate) ListReleases(ns string) (map[string]ReleaseSummary, []string, error) {
list, err := h.KubeClient.AppsV1().Deployments(ns).List(metav1.ListOptions{})
if err != nil {
return nil, nil, errors.WithStack(err)
}
charts := make(map[string]ReleaseSummary)
keys := make([]string, 0)
if list != nil {
for _, deploy := range list.Items {
labels := deploy.Labels
ann := deploy.Annotations
if labels != nil && ann != nil {
status := "ERROR"
if deploy.Status.Replicas > 0 {
if deploy.Status.UnavailableReplicas > 0 {
status = "PENDING"
} else {
status = "DEPLOYED"
}
}
updated := deploy.CreationTimestamp.Format("Mon Jan 2 15:04:05 2006")
chartName := ann[AnnotationChartName]
chartVersion := labels[LabelReleaseChartVersion]
releaseName := labels[LabelReleaseName]
keys = append(keys, releaseName)
charts[releaseName] = ReleaseSummary{
Chart: chartName,
ChartFullName: chartName + "-" + chartVersion,
Revision: strconv.FormatInt(deploy.Generation, 10),
Updated: updated,
Status: status,
ChartVersion: chartVersion,
ReleaseName: releaseName,
AppVersion: ann[AnnotationAppVersion],
Namespace: ns,
}
}
}
}
return charts, keys, nil
}
// FindChart find a chart in the current working directory, if no chart file is found an error is returned
func (h *HelmTemplate) FindChart() (string, error) {
return h.Client.FindChart()
}
// Lint lints the helm chart from the current working directory and returns the warnings in the output
func (h *HelmTemplate) Lint(valuesFiles []string) (string, error) {
return h.Client.Lint(valuesFiles)
}
// Env returns the environment variables for the helmer
func (h *HelmTemplate) Env() map[string]string {
return h.Client.Env()
}
// PackageChart packages the chart from the current working directory
func (h *HelmTemplate) PackageChart() error {
return h.Client.PackageChart()
}
// Version executes the helm version command and returns its output
func (h *HelmTemplate) Version(tls bool) (string, error) {
return h.Client.Version(tls)
}
// Template generates the YAML from the chart template to the given directory
func (h *HelmTemplate) Template(chart string, releaseName string, ns string, outDir string, upgrade bool, values []string, valueStrings []string,
valueFiles []string) error {
return h.Client.Template(chart, releaseName, ns, outDir, upgrade, values, valueStrings, valueFiles)
}
// Mutation API
// InstallChart installs a helm chart according with the given flags
func (h *HelmTemplate) InstallChart(chart string, releaseName string, ns string, version string, timeout int,
values []string, valueStrings []string, valueFiles []string, repo string, username string, password string) error {
err := h.clearOutputDir(releaseName)
if err != nil {
return err
}
outputDir, _, chartsDir, err := h.getDirectories(releaseName)
if err != nil {
return err
}
chartDir, err := h.fetchChart(chart, version, chartsDir, repo, username, password)
if err != nil {
return err
}
err = h.Client.Template(chartDir, releaseName, ns, outputDir, false, values, valueStrings, valueFiles)
if err != nil {
return err
}
// Skip the chart when no resources are generated by the template
if empty, err := util.IsEmpty(outputDir); empty || err != nil {
return nil
}
metadata, versionText, err := h.getChart(chartDir, version)
if err != nil {
return err
}
helmHooks, err := h.addLabelsToFiles(chart, releaseName, versionText, metadata, ns)
if err != nil {
return err
}
helmCrdPhase := "crd-install"
helmPrePhase := "pre-install"
helmPostPhase := "post-install"
wait := true
create := true
force := true
err = h.runHooks(helmHooks, helmCrdPhase, ns, chart, releaseName, wait, create, force)
if err != nil {
return err
}
err = h.runHooks(helmHooks, helmPrePhase, ns, chart, releaseName, wait, create, force)
if err != nil {
return err
}
err = h.kubectlApply(ns, releaseName, wait, create, force, outputDir)
if err != nil {
err2 := h.deleteHooks(helmHooks, helmPrePhase, hookFailed, ns)
return util.CombineErrors(err, err2)
}
err = h.deleteHooks(helmHooks, helmPrePhase, hookSucceeded, ns)
if err != nil {
log.Logger().Warnf("Failed to delete the %s hook, due to: %s", helmPrePhase, err)
}
err = h.runHooks(helmHooks, helmPostPhase, ns, chart, releaseName, wait, create, force)
if err != nil {
err2 := h.deleteHooks(helmHooks, helmPostPhase, hookFailed, ns)
return util.CombineErrors(err, err2)
}
err = h.deleteHooks(helmHooks, helmPostPhase, hookSucceeded, ns)
err2 := h.deleteOldResources(ns, releaseName, versionText, wait)
return util.CombineErrors(err, err2)
}
// FetchChart fetches a Helm Chart
func (h *HelmTemplate) FetchChart(chart string, version string, untar bool, untardir string, repo string,
username string, password string) error {
_, err := h.fetchChart(chart, version, untardir, repo, username, password)
return err
}
// UpgradeChart upgrades a helm chart according with given helm flags
func (h *HelmTemplate) UpgradeChart(chart string, releaseName string, ns string, version string, install bool, timeout int, force bool, wait bool, values []string, valueStrings []string, valueFiles []string, repo string, username string, password string) error {
err := h.clearOutputDir(releaseName)
if err != nil {
return err
}
outputDir, _, chartsDir, err := h.getDirectories(releaseName)
if err != nil {
return err
}
// check if we are installing a chart from the filesystem
chartDir := filepath.Join(h.CWD, chart)
exists, err := util.DirExists(chartDir)
if err != nil {
return err
}
if !exists {
log.Logger().Debugf("Fetching chart: %s", chart)
chartDir, err = h.fetchChart(chart, version, chartsDir, repo, username, password)
if err != nil {
return err
}
}
err = h.Client.Template(chartDir, releaseName, ns, outputDir, false, values, valueStrings, valueFiles)
if err != nil {
return err
}
// Skip the chart when no resources are generated by the template
if empty, err := util.IsEmpty(outputDir); empty || err != nil {
return nil
}
metadata, versionText, err := h.getChart(chartDir, version)
if err != nil {
return err
}
helmHooks, err := h.addLabelsToFiles(chart, releaseName, versionText, metadata, ns)
if err != nil {
return err
}
helmCrdPhase := "crd-install"
helmPrePhase := "pre-upgrade"
helmPostPhase := "post-upgrade"
create := false
err = h.runHooks(helmHooks, helmCrdPhase, ns, chart, releaseName, wait, create, force)
if err != nil {
return err
}
err = h.runHooks(helmHooks, helmPrePhase, ns, chart, releaseName, wait, create, force)
if err != nil {
return err
}
err = h.kubectlApply(ns, releaseName, wait, create, force, outputDir)
if err != nil {
err2 := h.deleteHooks(helmHooks, helmPrePhase, hookFailed, ns)
return util.CombineErrors(err, err2)
}
err = h.deleteHooks(helmHooks, helmPrePhase, hookSucceeded, ns)
if err != nil {
log.Logger().Warnf("Failed to delete the %s hook, due to: %s", helmPrePhase, err)
}
err = h.runHooks(helmHooks, helmPostPhase, ns, chart, releaseName, wait, create, force)
if err != nil {
err2 := h.deleteHooks(helmHooks, helmPostPhase, hookFailed, ns)
return util.CombineErrors(err, err2)
}
err = h.deleteHooks(helmHooks, helmPostPhase, hookSucceeded, ns)
err2 := h.deleteOldResources(ns, releaseName, versionText, wait)
return util.CombineErrors(err, err2)
}
func (h *HelmTemplate) DecryptSecrets(location string) error {
return h.Client.DecryptSecrets(location)
}
func (h *HelmTemplate) kubectlApply(ns string, releaseName string, wait bool, create bool, force bool, dir string) error {
namespacesDir := filepath.Join(dir, "namespaces")
if _, err := os.Stat(namespacesDir); !os.IsNotExist(err) {
fileInfo, err := ioutil.ReadDir(namespacesDir)
if err != nil {
return errors.Wrapf(err, "unable to locate subdirs in %s", namespacesDir)
}
for _, path := range fileInfo {
namespace := filepath.Base(path.Name())
fullPath := filepath.Join(namespacesDir, path.Name())
log.Logger().Debugf("Applying generated chart %q YAML via kubectl in dir: %s to namespace %s", releaseName, fullPath, namespace)
command := "apply"
if create {
command = "create"
}
args := []string{command, "--recursive", "-f", fullPath, "-l", LabelReleaseName + "=" + releaseName}
applyNs := namespace
if applyNs == "" {
applyNs = ns
}
if applyNs != "" {
args = append(args, "--namespace", applyNs)
}
if wait && !create {
args = append(args, "--wait")
}
if !h.KubectlValidate {
args = append(args, "--validate=false")
}
err = h.runKubectl(args...)
if err != nil {
return err
}
log.Logger().Info("")
}
return err
}
log.Logger().Debugf("Applying generated chart %q YAML via kubectl in dir: %s to namespace %s", releaseName, dir, ns)
command := "apply"
if create {
command = "create"
}
args := []string{command, "--recursive", "-f", dir, "-l", LabelReleaseName + "=" + releaseName}
if ns != "" {
args = append(args, "--namespace", ns)
}
if wait && !create {
args = append(args, "--wait")
}
if force {
args = append(args, "--force")
}
if !h.KubectlValidate {
args = append(args, "--validate=false")
}
err := h.runKubectl(args...)
if err != nil {
return err
}
return nil
}
func (h *HelmTemplate) kubectlApplyFile(ns string, helmHook string, wait bool, create bool, force bool, file string) error {
log.Logger().Debugf("Applying Helm hook %s YAML via kubectl in file: %s", helmHook, file)
command := "apply"
if create {
command = "create"
}
args := []string{command, "-f", file}
if ns != "" {
args = append(args, "--namespace", ns)
}
if wait && !create {
args = append(args, "--wait")
}
if force {
args = append(args, "--force")
}
if !h.KubectlValidate {
args = append(args, "--validate=false")
}
err := h.runKubectl(args...)
return err
}
func (h *HelmTemplate) kubectlDeleteFile(ns string, file string) error {
log.Logger().Debugf("Deleting helm hook sources from file: %s", file)
return h.runKubectl("delete", "-f", file, "--namespace", ns, "--wait")
}
func (h *HelmTemplate) deleteOldResources(ns string, releaseName string, versionText string, wait bool) error {
selector := LabelReleaseName + "=" + releaseName + "," + LabelReleaseChartVersion + "!=" + versionText
err := h.deleteNamespacedResourcesBySelector(ns, selector, wait, "older releases")
if err != nil {
return err
}
return h.deleteClusterResourcesBySelector(ns, selector, wait, "older releases")
}
func (h *HelmTemplate) deleteNamespacedResourcesBySelector(ns string, selector string, wait bool, message string) error {
kinds := []string{"pvc", "configmap", "release", "sa", "role", "rolebinding", "secret"}
errList := []error{}
log.Logger().Debugf("Removing Kubernetes resources from %s using selector: %s from %s", message, util.ColorInfo(selector), strings.Join(kinds, " "))
errs := h.deleteResourcesBySelector(ns, kinds, selector, wait)
errList = append(errList, errs...)
return util.CombineErrors(errList...)
}
func (h *HelmTemplate) deleteClusterResourcesBySelector(ns string, selector string, wait bool, message string) error {
clusterKinds := []string{"all", "clusterrole", "clusterrolebinding"}
errList := []error{}
hasPermissions, errs := kube.CanI(h.KubeClient, kube.Delete, kube.All)
errList = append(errList, errs...)
if hasPermissions {
selector += "," + LabelNamespace + "=" + ns
log.Logger().Debugf("Removing Kubernetes resources from %s using selector: %s from %s", message, util.ColorInfo(selector), strings.Join(clusterKinds, " "))
errs = h.deleteResourcesBySelector("", clusterKinds, selector, wait)
errList = append(errList, errs...)
}
return util.CombineErrors(errList...)
}
func (h *HelmTemplate) deleteResourcesBySelector(ns string, kinds []string, selector string, wait bool) []error {
errList := []error{}
for _, kind := range kinds {
args := []string{"delete", kind, "--ignore-not-found", "-l", selector}
if ns != "" {
args = append(args, "--namespace", ns)
}
if wait {
args = append(args, "--wait")
}
output, err := h.runKubectlWithOutput(args...)
if err != nil {
errList = append(errList, err)
} else {
output = strings.TrimSpace(output)
if output != "No resources found" {
log.Logger().Info(output)
}
}
}
return errList
}
// isClusterKind returns true if the kind or resource name is a cluster wide resource
func isClusterKind(kind string) bool {
lower := strings.ToLower(kind)
return strings.HasPrefix(lower, "cluster") || strings.HasPrefix(lower, "namespace")
}
// DeleteRelease removes the given release
func (h *HelmTemplate) DeleteRelease(ns string, releaseName string, purge bool) error {
if ns == "" {
ns = h.Namespace
}
selector := LabelReleaseName + "=" + releaseName
err := h.deleteNamespacedResourcesBySelector(ns, selector, true, fmt.Sprintf("release %s", releaseName))
if err != nil {
return err
}
return h.deleteClusterResourcesBySelector(ns, selector, true, fmt.Sprintf("release %s", releaseName))
}
// StatusRelease returns the output of the helm status command for a given release
func (h *HelmTemplate) StatusRelease(ns string, releaseName string) error {
releases, _, err := h.ListReleases(ns)
if err != nil {
return errors.Wrap(err, "listing current chart releases")
}
if _, ok := releases[releaseName]; ok {
return nil
}
return fmt.Errorf("chart release %q not found", releaseName)
}
// StatusReleaseWithOutput returns the output of the helm status command for a given release
func (h *HelmTemplate) StatusReleaseWithOutput(ns string, releaseName string, outputFormat string) (string, error) {
return h.Client.StatusReleaseWithOutput(ns, releaseName, outputFormat)
}
func (h *HelmTemplate) getDirectories(releaseName string) (string, string, string, error) {
if releaseName == "" {
return "", "", "", fmt.Errorf("No release name specified!")
}
if h.WorkDir == "" {
var err error
h.WorkDir, err = ioutil.TempDir("", "helm-template-workdir-")
if err != nil {
return "", "", "", errors.Wrap(err, "Failed to create temporary directory for helm template workdir")
}
}
workDir := h.WorkDir
outDir := filepath.Join(workDir, releaseName, "output")
helmHookDir := filepath.Join(workDir, releaseName, "helmHooks")
chartsDir := filepath.Join(workDir, releaseName, "chartFiles")
dirs := []string{outDir, helmHookDir, chartsDir}
for _, d := range dirs {
err := os.MkdirAll(d, util.DefaultWritePermissions)
if err != nil {
return "", "", "", err
}
}
return outDir, helmHookDir, chartsDir, nil
}
// clearOutputDir removes all files in the helm output dir
func (h *HelmTemplate) clearOutputDir(releaseName string) error {
dir, helmDir, chartsDir, err := h.getDirectories(releaseName)
if err != nil {
return err
}
return util.RecreateDirs(dir, helmDir, chartsDir)
}
func (h *HelmTemplate) fetchChart(chart string, version string, dir string, repo string, username string,
password string) (string, error) {
exists, err := util.FileExists(chart)
if err != nil {
return "", err
}
if exists {
log.Logger().Infof("Chart dir already exists: %s", dir)
return chart, nil
}
if dir == "" {
return "", fmt.Errorf("must specify dir for chart %s", chart)
}
args := []string{
"fetch", "-d", dir, "--untar", chart,
}
if repo != "" {
args = append(args, "--repo", repo)
}
if version != "" {
args = append(args, "--version", version)
}
if username != "" {
args = append(args, "--username", username)
}
if password != "" {
args = append(args, "--password", password)
}
err = h.Client.runHelm(args...)
if err != nil {
return "", err
}
answer := dir
files, err := ioutil.ReadDir(dir)
if err != nil {
return "", err
}
for _, f := range files {
if f.IsDir() {
answer = filepath.Join(dir, f.Name())
break
}
}
log.Logger().Debugf("Fetched chart %s to dir %s", chart, answer)
return answer, nil
}
func (h *HelmTemplate) addLabelsToFiles(chart string, releaseName string, version string, metadata *chart.Metadata, ns string) ([]*HelmHook, error) {
dir, helmHookDir, _, err := h.getDirectories(releaseName)
if err != nil {
return nil, err
}
return addLabelsToChartYaml(dir, helmHookDir, chart, releaseName, version, metadata, ns)
}
func splitObjectsInFiles(inputFile string, baseDir string, relativePath, defaultNamespace string) ([]string, error) {
result := make([]string, 0)
f, err := os.Open(inputFile)
if err != nil {
return result, errors.Wrapf(err, "opening inputFile %q", inputFile)
}
defer f.Close()
scanner := bufio.NewScanner(f)
var buf bytes.Buffer
fileName := filepath.Base(inputFile)
count := 0
for scanner.Scan() {
line := scanner.Text()
if line == resourcesSeparator {
// ensure that we actually have YAML in the buffer
data := buf.Bytes()
if isWhitespaceOrComments(data) {
buf.Reset()
continue
}
m := yaml.MapSlice{}
err = yaml.Unmarshal(data, &m)
namespace := getYamlValueString(&m, "metadata", "namespace")
if namespace == "" {
namespace = defaultNamespace
}
if err != nil {
return make([]string, 0), errors.Wrapf(err, "Failed to parse the following YAML from inputFile '%s':\n%s", inputFile, buf.String())
}
if len(m) == 0 {
buf.Reset()
continue
}
partFile, err := writeObjectInFile(&buf, baseDir, relativePath, namespace, fileName, count)
if err != nil {
return result, errors.Wrapf(err, "saving object")
}
result = append(result, partFile)
buf.Reset()
count += count + 1
} else {
_, err := buf.WriteString(line)
if err != nil {
return result, errors.Wrapf(err, "writing line from inputFile %q into a buffer", inputFile)
}
_, err = buf.WriteString("\n")
if err != nil {
return result, errors.Wrapf(err, "writing a new line in the buffer")
}
}
}
if buf.Len() > 0 && !isWhitespaceOrComments(buf.Bytes()) {
data := buf.Bytes()
m := yaml.MapSlice{}
err = yaml.Unmarshal(data, &m)
namespace := getYamlValueString(&m, "metadata", "namespace")
if namespace == "" {
namespace = defaultNamespace
}
partFile, err := writeObjectInFile(&buf, baseDir, relativePath, namespace, fileName, count)
if err != nil {
return result, errors.Wrapf(err, "saving object")
}
result = append(result, partFile)
}
return result, nil
}
// isWhitespaceOrComments returns true if the data is empty, whitespace or comments only
func isWhitespaceOrComments(data []byte) bool {
if len(data) == 0 {
return true
}
lines := strings.Split(string(data), "\n")
for _, line := range lines {
t := strings.TrimSpace(line)
if t != "" && !strings.HasPrefix(t, "#") {
return false
}
}
return true
}
func writeObjectInFile(buf io.WriterTo, baseDir string, relativePath, namespace string, fileName string, count int) (string, error) {
relativeDir := filepath.Dir(relativePath)
const filePrefix = "part"
partFile := fmt.Sprintf("%s%d-%s", filePrefix, count, fileName)
absFile := filepath.Join(baseDir, "namespaces", namespace, relativeDir, partFile)
absFileDir := filepath.Dir(absFile)
log.Logger().Debugf("creating file: %s", absFile)
err := os.MkdirAll(absFileDir, os.ModePerm)
if err != nil {
return "", errors.Wrapf(err, "creating directory %q", absFileDir)
}
file, err := os.Create(absFile)
if err != nil {
return "", errors.Wrapf(err, "creating file %q", absFile)
}
log.Logger().Debugf("writing data to %s", absFile)
defer file.Close()
_, err = buf.WriteTo(file)
if err != nil {
return "", errors.Wrapf(err, "writing object to file %q", absFile)
}
return absFile, nil
}
func addLabelsToChartYaml(basedir string, hooksDir string, chart string, releaseName string, version string, metadata *chart.Metadata, ns string) ([]*HelmHook, error) {
helmHooks := []*HelmHook{}
log.Logger().Debugf("Searching for yaml files from basedir %s", basedir)
err := filepath.Walk(basedir, func(path string, f os.FileInfo, err error) error {
ext := filepath.Ext(path)
if ext == ".yaml" {
file := path
relativePath, err := filepath.Rel(basedir, file)
if err != nil {
return errors.Wrapf(err, "unable to determine relative path %q", file)
}
partFiles, err := splitObjectsInFiles(file, basedir, relativePath, ns)
if err != nil {
return errors.Wrapf(err, "splitting objects from file %q", file)
}
log.Logger().Debugf("part files list: %v", partFiles)
for _, partFile := range partFiles {
log.Logger().Debugf("processing part file: %s", partFile)
data, err := ioutil.ReadFile(partFile)
if err != nil {
return errors.Wrapf(err, "Failed to load partFile %s", partFile)
}
m := yaml.MapSlice{}
err = yaml.Unmarshal(data, &m)
if err != nil {
return errors.Wrapf(err, "Failed to parse YAML of partFile %s", partFile)
}
kind := getYamlValueString(&m, "kind")
helmHookType := getYamlValueString(&m, "metadata", "annotations", "helm.sh/hook")
if helmHookType != "" {
helmHook, err := getHelmHookFromFile(basedir, path, hooksDir, helmHookType, kind, &m, partFile)
if err != nil {
return errors.Wrap(err, fmt.Sprintf("when getting helm hook from part file '%s'", partFile))
}
helmHooks = append(helmHooks, helmHook)
} else {
err := processChartResource(partFile, data, kind, ns, releaseName, &m, metadata, version, chart)
if err != nil {
return errors.Wrap(err, fmt.Sprintf("when processing chart resource '%s'", partFile))
}
}
}
}
return nil
})
return helmHooks, err
}
func getHelmHookFromFile(basedir string, path string, hooksDir string, helmHook string, kind string, m *yaml.MapSlice, partFile string) (*HelmHook, error) {
// lets move any helm hooks to the new partFile
relPath, err := filepath.Rel(basedir, path)
if err != nil {
return &HelmHook{}, err
}
if relPath == "" {
return &HelmHook{}, fmt.Errorf("Failed to find relative path of basedir %s and path %s", basedir, partFile)
}
// add the hook type into the directory structure
newPath := filepath.Join(hooksDir, relPath)
newDir, _ := filepath.Split(newPath)
err = os.MkdirAll(newDir, util.DefaultWritePermissions)
if err != nil {
return &HelmHook{}, errors.Wrap(err, fmt.Sprintf("when creating '%s'", newDir))
}
// copy the hook part file to the hooks path
_, hookFileName := filepath.Split(partFile)
hookFile := filepath.Join(newDir, hookFileName)
err = os.Rename(partFile, hookFile)
if err != nil {
return &HelmHook{}, errors.Wrap(err, fmt.Sprintf("when copying from '%s' to '%s'", partFile, hookFile))
}
name := getYamlValueString(m, "metadata", "name")
helmDeletePolicy := getYamlValueString(m, "metadata", "annotations", "helm.sh/hook-delete-policy")
return NewHelmHook(kind, name, hookFile, helmHook, helmDeletePolicy), nil
}
func processChartResource(partFile string, data []byte, kind string, ns string, releaseName string, m *yaml.MapSlice, metadata *chart.Metadata, version string, chart string) error {
err := setYamlValue(m, releaseName, "metadata", "labels", LabelReleaseName)
if err != nil {
return errors.Wrapf(err, "Failed to modify YAML of partFile %s", partFile)
}
if !isClusterKind(kind) {
err = setYamlValue(m, ns, "metadata", "labels", LabelNamespace)
if err != nil {
return errors.Wrapf(err, "Failed to modify YAML of partFile %s", partFile)
}
}
err = setYamlValue(m, version, "metadata", "labels", LabelReleaseChartVersion)
if err != nil {
return errors.Wrapf(err, "Failed to modify YAML of partFile %s", partFile)
}
chartName := ""
if metadata != nil {
chartName = metadata.GetName()
appVersion := metadata.GetAppVersion()
if appVersion != "" {
err = setYamlValue(m, appVersion, "metadata", "annotations", AnnotationAppVersion)
if err != nil {
return errors.Wrapf(err, "Failed to modify YAML of partFile %s", partFile)
}
}
}
if chartName == "" {
chartName = chart
}
err = setYamlValue(m, chartName, "metadata", "annotations", AnnotationChartName)
if err != nil {
return errors.Wrapf(err, "Failed to modify YAML of partFile %s", partFile)
}
data, err = yaml.Marshal(m)
if err != nil {
return errors.Wrapf(err, "Failed to marshal YAML of partFile %s", partFile)
}
err = ioutil.WriteFile(partFile, data, util.DefaultWritePermissions)
if err != nil {
return errors.Wrapf(err, "Failed to write YAML partFile %s", partFile)
}
return nil
}
func getYamlValueString(mapSlice *yaml.MapSlice, keys ...string) string {
value := getYamlValue(mapSlice, keys...)
answer, ok := value.(string)
if ok {
return answer
}
return ""
}
func getYamlValue(mapSlice *yaml.MapSlice, keys ...string) interface{} {
if mapSlice == nil {
return nil
}
if mapSlice == nil {
return fmt.Errorf("No map input!")
}
m := mapSlice
lastIdx := len(keys) - 1
for idx, k := range keys {
last := idx >= lastIdx
found := false
for _, mi := range *m {
if mi.Key == k {
found = true
if last {
return mi.Value
} else {
value := mi.Value
if value == nil {
return nil
} else {
v, ok := value.(yaml.MapSlice)
if ok {
m = &v
} else {
v2, ok := value.(*yaml.MapSlice)
if ok {
m = v2
} else {
return nil
}
}
}
}
}
}
if !found {
return nil
}
}
return nil
}
// setYamlValue navigates through the YAML object structure lazily creating or inserting new values
func setYamlValue(mapSlice *yaml.MapSlice, value string, keys ...string) error {
if mapSlice == nil {
return fmt.Errorf("No map input!")
}
m := mapSlice
lastIdx := len(keys) - 1
for idx, k := range keys {
last := idx >= lastIdx
found := false
for i, mi := range *m {
if mi.Key == k {
found = true
if last {
(*m)[i].Value = value
} else if i < len(*m) {
value := (*m)[i].Value
if value == nil {
v := &yaml.MapSlice{}
(*m)[i].Value = v
m = v
} else {
v, ok := value.(yaml.MapSlice)
if ok {
m2 := &yaml.MapSlice{}
*m2 = append(*m2, v...)
(*m)[i].Value = m2
m = m2
} else {
v2, ok := value.(*yaml.MapSlice)
if ok {
m2 := &yaml.MapSlice{}
*m2 = append(*m2, *v2...)
(*m)[i].Value = m2
m = m2
} else {
return fmt.Errorf("Could not convert key %s value %#v to a yaml.MapSlice", k, value)
}
}
}
}
}
}
if !found {
if last {
*m = append(*m, yaml.MapItem{
Key: k,
Value: value,
})
} else {
m2 := &yaml.MapSlice{}
*m = append(*m, yaml.MapItem{
Key: k,
Value: m2,
})
m = m2
}
}
}
return nil
}
func (h *HelmTemplate) runKubectl(args ...string) error {
h.Runner.SetDir(h.CWD)
h.Runner.SetName(h.Binary)
h.Runner.SetArgs(args)
output, err := h.Runner.RunWithoutRetry()
log.Logger().Debugf(output)
return err
}
func (h *HelmTemplate) runKubectlWithOutput(args ...string) (string, error) {
h.Runner.SetDir(h.CWD)
h.Runner.SetName(h.Binary)
h.Runner.SetArgs(args)
return h.Runner.RunWithoutRetry()
}
// getChart returns the chart metadata for the given dir
func (h *HelmTemplate) getChart(chartDir string, version string) (*chart.Metadata, string, error) {
file := filepath.Join(chartDir, ChartFileName)
if !filepath.IsAbs(chartDir) {
file = filepath.Join(h.Runner.CurrentDir(), file)
}
exists, err := util.FileExists(file)
if err != nil {
return nil, version, err
}
if !exists {
return nil, version, fmt.Errorf("no file %s found!", file)
}
metadata, err := chartutil.LoadChartfile(file)
if version == "" && metadata != nil {
version = metadata.GetVersion()
}
return metadata, version, err
}
func (h *HelmTemplate) runHooks(hooks []*HelmHook, hookPhase string, ns string, chart string, releaseName string, wait bool, create bool, force bool) error {
matchingHooks := MatchingHooks(hooks, hookPhase, "")
for _, hook := range matchingHooks {
err := h.kubectlApplyFile(ns, hookPhase, wait, create, force, hook.File)
if err != nil {
return err
}
}
return nil
}
func (h *HelmTemplate) deleteHooks(hooks []*HelmHook, hookPhase string, hookDeletePolicy string, ns string) error {
flag := os.Getenv("JX_DISABLE_DELETE_HELM_HOOKS")
matchingHooks := MatchingHooks(hooks, hookPhase, hookDeletePolicy)
for _, hook := range matchingHooks {
kind := hook.Kind
name := hook.Name
if kind == "Job" && name != "" {
log.Logger().Debugf("Waiting for helm %s hook Job %s to complete before removing it", hookPhase, name)
err := kube.WaitForJobToComplete(h.KubeClient, ns, name, time.Minute*30, false)
if err != nil {
log.Logger().Warnf("Job %s has not yet terminated for helm hook phase %s due to: %s so removing it anyway", name, hookPhase, err)
}
} else {
log.Logger().Warnf("Could not wait for hook resource to complete as it is kind %s and name %s for phase %s", kind, name, hookPhase)
}
if flag == "true" {
log.Logger().Infof("Not deleting the Job %s as we have the $JX_DISABLE_DELETE_HELM_HOOKS enabled", name)
continue
}
err := h.kubectlDeleteFile(ns, hook.File)
if err != nil {
return err
}
}
return nil
}
// NewHelmHook returns a newly created HelmHook
func NewHelmHook(kind string, name string, file string, hook string, hookDeletePolicy string) *HelmHook {
return &HelmHook{
Kind: kind,
Name: name,
File: file,
Hooks: strings.Split(hook, ","),
HookDeletePolicies: strings.Split(hookDeletePolicy, ","),
}
}
// MatchingHooks returns the matching files which have the given hook name and if hookPolicy is not blank the hook policy too
func MatchingHooks(hooks []*HelmHook, hook string, hookDeletePolicy string) []*HelmHook {
answer := []*HelmHook{}
for _, h := range hooks {
if util.StringArrayIndex(h.Hooks, hook) >= 0 &&
(hookDeletePolicy == "" || util.StringArrayIndex(h.HookDeletePolicies, hookDeletePolicy) >= 0) {
answer = append(answer, h)
}
}
return answer
}
| [
"\"JX_DISABLE_DELETE_HELM_HOOKS\""
]
| []
| [
"JX_DISABLE_DELETE_HELM_HOOKS"
]
| [] | ["JX_DISABLE_DELETE_HELM_HOOKS"] | go | 1 | 0 | |
cmd/helm/load_plugins.go | /*
Copyright The Helm Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"syscall"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"sigs.k8s.io/yaml"
"helm.sh/helm/v3/pkg/plugin"
)
const (
pluginStaticCompletionFile = "completion.yaml"
pluginDynamicCompletionExecutable = "plugin.complete"
)
type pluginError struct {
error
code int
}
// loadPlugins loads plugins into the command list.
//
// This follows a different pattern than the other commands because it has
// to inspect its environment and then add commands to the base command
// as it finds them.
func loadPlugins(baseCmd *cobra.Command, out io.Writer) {
// If HELM_NO_PLUGINS is set to 1, do not load plugins.
if os.Getenv("HELM_NO_PLUGINS") == "1" {
return
}
found, err := plugin.FindPlugins(settings.PluginsDirectory)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to load plugins: %s\n", err)
return
}
// Now we create commands for all of these.
for _, plug := range found {
plug := plug
md := plug.Metadata
if md.Usage == "" {
md.Usage = fmt.Sprintf("the %q plugin", md.Name)
}
c := &cobra.Command{
Use: md.Name,
Short: md.Usage,
Long: md.Description,
RunE: func(cmd *cobra.Command, args []string) error {
u, err := processParent(cmd, args)
if err != nil {
return err
}
// Call setupEnv before PrepareCommand because
// PrepareCommand uses os.ExpandEnv and expects the
// setupEnv vars.
plugin.SetupPluginEnv(settings, md.Name, plug.Dir)
main, argv, prepCmdErr := plug.PrepareCommand(u)
if prepCmdErr != nil {
os.Stderr.WriteString(prepCmdErr.Error())
return errors.Errorf("plugin %q exited with error", md.Name)
}
return callPluginExecutable(md.Name, main, argv, out)
},
// This passes all the flags to the subcommand.
DisableFlagParsing: true,
}
// TODO: Make sure a command with this name does not already exist.
baseCmd.AddCommand(c)
// For completion, we try to load more details about the plugins so as to allow for command and
// flag completion of the plugin itself.
// We only do this when necessary (for the "completion" and "__complete" commands) to avoid the
// risk of a rogue plugin affecting Helm's normal behavior.
subCmd, _, err := baseCmd.Find(os.Args[1:])
if (err == nil &&
((subCmd.HasParent() && subCmd.Parent().Name() == "completion") || subCmd.Name() == cobra.ShellCompRequestCmd)) ||
/* for the tests */ subCmd == baseCmd.Root() {
loadCompletionForPlugin(c, plug)
}
}
}
func processParent(cmd *cobra.Command, args []string) ([]string, error) {
k, u := manuallyProcessArgs(args)
if err := cmd.Parent().ParseFlags(k); err != nil {
return nil, err
}
return u, nil
}
// This function is used to setup the environment for the plugin and then
// call the executable specified by the parameter 'main'
func callPluginExecutable(pluginName string, main string, argv []string, out io.Writer) error {
env := os.Environ()
for k, v := range settings.EnvVars() {
env = append(env, fmt.Sprintf("%s=%s", k, v))
}
prog := exec.Command(main, argv...)
prog.Env = env
prog.Stdin = os.Stdin
prog.Stdout = out
prog.Stderr = os.Stderr
if err := prog.Run(); err != nil {
if eerr, ok := err.(*exec.ExitError); ok {
os.Stderr.Write(eerr.Stderr)
status := eerr.Sys().(syscall.WaitStatus)
return pluginError{
error: errors.Errorf("plugin %q exited with error", pluginName),
code: status.ExitStatus(),
}
}
return err
}
return nil
}
// manuallyProcessArgs processes an arg array, removing special args.
//
// Returns two sets of args: known and unknown (in that order)
func manuallyProcessArgs(args []string) ([]string, []string) {
known := []string{}
unknown := []string{}
kvargs := []string{"--kube-context", "--namespace", "-n", "--kubeconfig", "--kube-apiserver", "--kube-token", "--registry-config", "--repository-cache", "--repository-config"}
knownArg := func(a string) bool {
for _, pre := range kvargs {
if strings.HasPrefix(a, pre+"=") {
return true
}
}
return false
}
isKnown := func(v string) string {
for _, i := range kvargs {
if i == v {
return v
}
}
return ""
}
for i := 0; i < len(args); i++ {
switch a := args[i]; a {
case "--debug":
known = append(known, a)
case isKnown(a):
known = append(known, a)
i++
if i < len(args) {
known = append(known, args[i])
}
default:
if knownArg(a) {
known = append(known, a)
continue
}
unknown = append(unknown, a)
}
}
return known, unknown
}
// pluginCommand represents the optional completion.yaml file of a plugin
type pluginCommand struct {
Name string `json:"name"`
ValidArgs []string `json:"validArgs"`
Flags []string `json:"flags"`
Commands []pluginCommand `json:"commands"`
}
// loadCompletionForPlugin will load and parse any completion.yaml provided by the plugin
// and add the dynamic completion hook to call the optional plugin.complete
func loadCompletionForPlugin(pluginCmd *cobra.Command, plugin *plugin.Plugin) {
// Parse the yaml file providing the plugin's sub-commands and flags
cmds, err := loadFile(strings.Join(
[]string{plugin.Dir, pluginStaticCompletionFile}, string(filepath.Separator)))
if err != nil {
// The file could be missing or invalid. No static completion for this plugin.
if settings.Debug {
log.Output(2, fmt.Sprintf("[info] %s\n", err.Error()))
}
// Continue to setup dynamic completion.
cmds = &pluginCommand{}
}
// Preserve the Usage string specified for the plugin
cmds.Name = pluginCmd.Use
addPluginCommands(plugin, pluginCmd, cmds)
}
// addPluginCommands is a recursive method that adds each different level
// of sub-commands and flags for the plugins that have provided such information
func addPluginCommands(plugin *plugin.Plugin, baseCmd *cobra.Command, cmds *pluginCommand) {
if cmds == nil {
return
}
if len(cmds.Name) == 0 {
// Missing name for a command
if settings.Debug {
log.Output(2, fmt.Sprintf("[info] sub-command name field missing for %s", baseCmd.CommandPath()))
}
return
}
baseCmd.Use = cmds.Name
baseCmd.ValidArgs = cmds.ValidArgs
// Setup the same dynamic completion for each plugin sub-command.
// This is because if dynamic completion is triggered, there is a single executable
// to call (plugin.complete), so every sub-commands calls it in the same fashion.
if cmds.Commands == nil {
// Only setup dynamic completion if there are no sub-commands. This avoids
// calling plugin.complete at every completion, which greatly simplifies
// development of plugin.complete for plugin developers.
baseCmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return pluginDynamicComp(plugin, cmd, args, toComplete)
}
}
// Create fake flags.
if len(cmds.Flags) > 0 {
// The flags can be created with any type, since we only need them for completion.
// pflag does not allow to create short flags without a corresponding long form
// so we look for all short flags and match them to any long flag. This will allow
// plugins to provide short flags without a long form.
// If there are more short-flags than long ones, we'll create an extra long flag with
// the same single letter as the short form.
shorts := []string{}
longs := []string{}
for _, flag := range cmds.Flags {
if len(flag) == 1 {
shorts = append(shorts, flag)
} else {
longs = append(longs, flag)
}
}
f := baseCmd.Flags()
if len(longs) >= len(shorts) {
for i := range longs {
if i < len(shorts) {
f.BoolP(longs[i], shorts[i], false, "")
} else {
f.Bool(longs[i], false, "")
}
}
} else {
for i := range shorts {
if i < len(longs) {
f.BoolP(longs[i], shorts[i], false, "")
} else {
// Create a long flag with the same name as the short flag.
// Not a perfect solution, but its better than ignoring the extra short flags.
f.BoolP(shorts[i], shorts[i], false, "")
}
}
}
}
// Recursively add any sub-commands
for _, cmd := range cmds.Commands {
// Create a fake command so that completion can be done for the sub-commands of the plugin
subCmd := &cobra.Command{
// This prevents Cobra from removing the flags. We want to keep the flags to pass them
// to the dynamic completion script of the plugin.
DisableFlagParsing: true,
// A Run is required for it to be a valid command without subcommands
Run: func(cmd *cobra.Command, args []string) {},
}
baseCmd.AddCommand(subCmd)
addPluginCommands(plugin, subCmd, &cmd)
}
}
// loadFile takes a yaml file at the given path, parses it and returns a pluginCommand object
func loadFile(path string) (*pluginCommand, error) {
cmds := new(pluginCommand)
b, err := ioutil.ReadFile(path)
if err != nil {
return cmds, errors.New(fmt.Sprintf("File (%s) not provided by plugin. No plugin auto-completion possible.", path))
}
err = yaml.Unmarshal(b, cmds)
return cmds, err
}
// pluginDynamicComp call the plugin.complete script of the plugin (if available)
// to obtain the dynamic completion choices. It must pass all the flags and sub-commands
// specified in the command-line to the plugin.complete executable (except helm's global flags)
func pluginDynamicComp(plug *plugin.Plugin, cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
md := plug.Metadata
u, err := processParent(cmd, args)
if err != nil {
return nil, cobra.ShellCompDirectiveError
}
// We will call the dynamic completion script of the plugin
main := strings.Join([]string{plug.Dir, pluginDynamicCompletionExecutable}, string(filepath.Separator))
// We must include all sub-commands passed on the command-line.
// To do that, we pass-in the entire CommandPath, except the first two elements
// which are 'helm' and 'pluginName'.
argv := strings.Split(cmd.CommandPath(), " ")[2:]
if !md.IgnoreFlags {
argv = append(argv, u...)
argv = append(argv, toComplete)
}
plugin.SetupPluginEnv(settings, md.Name, plug.Dir)
cobra.CompDebugln(fmt.Sprintf("calling %s with args %v", main, argv), settings.Debug)
buf := new(bytes.Buffer)
if err := callPluginExecutable(md.Name, main, argv, buf); err != nil {
// The dynamic completion file is optional for a plugin, so this error is ok.
cobra.CompDebugln(fmt.Sprintf("Unable to call %s: %v", main, err.Error()), settings.Debug)
return nil, cobra.ShellCompDirectiveDefault
}
var completions []string
for _, comp := range strings.Split(buf.String(), "\n") {
// Remove any empty lines
if len(comp) > 0 {
completions = append(completions, comp)
}
}
// Check if the last line of output is of the form :<integer>, which
// indicates the BashCompletionDirective.
directive := cobra.ShellCompDirectiveDefault
if len(completions) > 0 {
lastLine := completions[len(completions)-1]
if len(lastLine) > 1 && lastLine[0] == ':' {
if strInt, err := strconv.Atoi(lastLine[1:]); err == nil {
directive = cobra.ShellCompDirective(strInt)
completions = completions[:len(completions)-1]
}
}
}
return completions, directive
}
| [
"\"HELM_NO_PLUGINS\""
]
| []
| [
"HELM_NO_PLUGINS"
]
| [] | ["HELM_NO_PLUGINS"] | go | 1 | 0 | |
config/asgi.py | """
ASGI config for foo project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
app/data.py | """
BloomTech Labs DS Data Engineer Role
- Database Interface
- Visualization Interface
"""
import os
import re
import string
from random import randint
from typing import Iterable, Dict, List
import pandas as pd
import psycopg2
import plotly.express as px
import plotly.graph_objects as go
from plotly.graph_objs import Figure
from psycopg2 import sql
from dotenv import load_dotenv
class Data:
load_dotenv()
db_url = os.getenv("DB_URL")
def _setup(self, table_name: str, columns: Iterable[str]):
self._action(f"""CREATE TABLE IF NOT EXISTS {table_name}
({', '.join(columns)});""")
def _action(self, sql_action):
conn = psycopg2.connect(self.db_url)
curs = conn.cursor()
curs.execute(sql_action)
conn.commit()
curs.close()
conn.close()
def _query(self, sql_query) -> list:
conn = psycopg2.connect(self.db_url)
curs = conn.cursor()
curs.execute(sql_query)
results = curs.fetchall()
curs.close()
conn.close()
return results
def count(self) -> int:
return self._query("SELECT COUNT(*) FROM features")[0][0]
def columns(self) -> List[str]:
return [col[3] for col in self._query(
"""SELECT * FROM information_schema.columns
WHERE table_name = 'features';"""
)]
def rows(self) -> List[List]:
return self._query("SELECT * FROM features;")
def df(self):
return pd.DataFrame(data=self.rows(), columns=self.columns())
def row(self, idx: int) -> Dict:
df = self.df()
return df[df["idx"] == idx].to_dict(orient="records")[0]
def format_target(self, target):
return f"Class {str(target).rjust(2, '0')}"
def random_row(self, n_features=3):
features = tuple(randint(1, 6) for _ in range(n_features))
return *features, self.format_target(sum(features))
def joined_rows(self, n_rows):
return ",".join(str(self.random_row()) for _ in range(n_rows))
def seed(self, n_rows: int):
self._action(f"""INSERT INTO
features (feature_1, feature_2, feature_3, target)
VALUES {self.joined_rows(n_rows)};""")
@staticmethod
def cleaner(text: str) -> str:
return re.sub(r"\s+", " ", text.translate(
str.maketrans("", "", string.punctuation)
).strip())
def insert(self, feature_1, feature_2, feature_3, target):
self._action(sql.SQL("""INSERT INTO features
(feature_1, feature_2, feature_3, target)
VALUES ({},{},{},{});""").format(
sql.Literal(feature_1),
sql.Literal(feature_2),
sql.Literal(feature_3),
sql.Literal(self.format_target(self.cleaner(target))),
))
return int(self._query(sql.SQL("""SELECT idx FROM features
ORDER BY idx DESC LIMIT 1;"""))[0][0])
def reset(self):
self._action("TRUNCATE TABLE features RESTART IDENTITY;")
def crosstab_vis(self, feature_id) -> Figure:
if feature_id not in range(1, 4):
return Figure()
feature_name = f"feature_{feature_id}"
feature_title = feature_name.replace('_', ' ').title()
df = self.df()
cross_tab = pd.crosstab(
df["target"],
df[feature_name],
)
data = [
go.Bar(name=col, x=cross_tab.index, y=cross_tab[col])
for col in cross_tab.columns
]
title = f"Target by {feature_title} Crosstab"
layout = go.Layout(
title=title,
barmode="stack",
colorway=px.colors.qualitative.Antique,
)
return go.Figure(data=data, layout=layout)
def target_percent_vis(self):
df = self.df()["target"].value_counts().to_frame()
data = go.Pie(
labels=df.index.values,
values=df["target"],
textinfo='label+percent',
showlegend=False,
hole=0.5,
)
layout = go.Layout(
title="Target Percentage",
colorway=px.colors.qualitative.Antique,
)
return go.Figure(data=data, layout=layout)
if __name__ == '__main__':
db = Data()
# db._action("DROP TABLE features")
# db._setup("features", [
# "idx SERIAL PRIMARY KEY NOT NULL",
# "feature_1 INT8 NOT NULL",
# "feature_2 INT8 NOT NULL",
# "feature_3 INT8 NOT NULL",
# "target varchar(10) NOT NULL"
# ])
# db.reset()
# db.seed(1024)
db.crosstab_vis(1).show()
# db.target_percent_vis().show()
| []
| []
| [
"DB_URL"
]
| [] | ["DB_URL"] | python | 1 | 0 | |
driver.go | package vertigo
// Copyright (c) 2019-2021 Micro Focus or one of its affiliates.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
import (
"database/sql"
"database/sql/driver"
"os"
"strconv"
"github.com/vertica/vertica-sql-go/logger"
)
// Driver as defined by the Go language Driver interface
type Driver struct{}
const (
driverName string = "vertica-sql-go"
driverVersion string = "1.1.1"
protocolVersion uint32 = 0x00030008
)
var driverLogger = logger.New("driver")
// Open takes a connection string in this format:
// user:pass@host:port/database
func (d *Driver) Open(connString string) (driver.Conn, error) {
conn, err := newConnection(connString)
if err != nil {
driverLogger.Error(err.Error())
}
return conn, err
}
// Register ourselves with the sql package.
func init() {
logger.SetLogLevel(logger.WARN)
if logLevel := os.Getenv("VERTICA_SQL_GO_LOG_LEVEL"); logLevel != "" {
logVal, err := strconv.ParseUint(logLevel, 10, 32)
if err != nil {
driverLogger.Error(err.Error())
} else {
logFlag := logger.WARN
switch logVal {
case 0:
logFlag = logger.TRACE
case 1:
logFlag = logger.DEBUG
case 2:
logFlag = logger.INFO
case 3:
logFlag = logger.WARN
case 4:
logFlag = logger.ERROR
case 5:
logFlag = logger.FATAL
case 6:
logFlag = logger.NONE
default:
driverLogger.Error("invalid VERTICA_SQL_GO_LOG_LEVEL value; should be 0-6")
}
logger.SetLogLevel(logFlag)
}
}
if logFile := os.Getenv("VERTICA_SQL_GO_LOG_FILE"); logFile != "" {
if loggerBackend, err := logger.NewFileLogger(logFile); err == nil {
logger.SetLogger(loggerBackend)
} else {
driverLogger.Error("unable to create file logger: %v", err)
}
}
sql.Register("vertica", &Driver{})
}
| [
"\"VERTICA_SQL_GO_LOG_LEVEL\"",
"\"VERTICA_SQL_GO_LOG_FILE\""
]
| []
| [
"VERTICA_SQL_GO_LOG_FILE",
"VERTICA_SQL_GO_LOG_LEVEL"
]
| [] | ["VERTICA_SQL_GO_LOG_FILE", "VERTICA_SQL_GO_LOG_LEVEL"] | go | 2 | 0 | |
pyon/net/transport.py | #!/usr/bin/env python
"""
Transport layer abstractions
TODOS:
- split listen() into two subcalls (for StreamSubscriber)
"""
__author__ = 'Dave Foster <[email protected]>'
from pyon.util.log import log
from pyon.util.containers import DotDict
from gevent.event import AsyncResult, Event
from gevent.queue import Queue
from gevent import coros, sleep
from gevent.timeout import Timeout
from gevent.pool import Pool
from contextlib import contextmanager
import os
from pika import BasicProperties
from pyon.util.async import spawn
from pyon.util.pool import IDPool
from uuid import uuid4
from collections import defaultdict
class TransportError(StandardError):
pass
class BaseTransport(object):
def declare_exchange_impl(self, exchange, **kwargs):
raise NotImplementedError()
def delete_exchange_impl(self, exchange, **kwargs):
raise NotImplementedError()
def declare_queue_impl(self, queue, **kwargs):
raise NotImplementedError()
def delete_queue_impl(self, queue, **kwargs):
raise NotImplementedError()
def bind_impl(self, exchange, queue, binding):
raise NotImplementedError()
def unbind_impl(self, exchange, queue, binding):
raise NotImplementedError()
def ack_impl(self, delivery_tag):
raise NotImplementedError()
def reject_impl(self, delivery_tag, requeue=False):
raise NotImplementedError()
def start_consume_impl(self, callback, queue, no_ack=False, exclusive=False):
raise NotImplementedError()
def stop_consume_impl(self, consumer_tag):
raise NotImplementedError()
def setup_listener(self, binding, default_cb):
raise NotImplementedError()
def get_stats_impl(self, queue):
raise NotImplementedError()
def purge_impl(self, queue):
raise NotImplementedError()
def qos_impl(self, prefetch_size=0, prefetch_count=0, global_=False):
raise NotImplementedError()
def publish_impl(self, exchange, routing_key, body, properties, immediate=False, mandatory=False, durable_msg=False):
raise NotImplementedError()
def close(self):
raise NotImplementedError()
@property
def channel_number(self):
raise NotImplementedError()
@property
def active(self):
raise NotImplementedError()
def add_on_close_callback(self, cb):
raise NotImplementedError()
class ComposableTransport(BaseTransport):
"""
A Transport that has its methods composed of two or more transports.
This is used for ExchangeObjects, where we want to compose the container's ex_manager authoritative
transport with a self transport unique to the XO, needed for the following methods:
- ack_impl
- reject_impl
- start_consume_impl
- stop_consume_impl
- qos_impl
- get_stats_impl
- publish_impl (solely for publish rates, not needed for identity in protocol)
"""
common_methods = ['ack_impl',
'reject_impl',
'start_consume_impl',
'stop_consume_impl',
'qos_impl',
'get_stats_impl',
'publish_impl']
def __init__(self, left, right, *methods):
self._transports = [left]
log.debug("ComposableTransport.__init__(%s) %s %s", self.channel_number, type(left), left)
self._methods = { 'declare_exchange_impl': left.declare_exchange_impl,
'delete_exchange_impl' : left.delete_exchange_impl,
'declare_queue_impl' : left.declare_queue_impl,
'delete_queue_impl' : left.delete_queue_impl,
'bind_impl' : left.bind_impl,
'unbind_impl' : left.unbind_impl,
'ack_impl' : left.ack_impl,
'reject_impl' : left.reject_impl,
'start_consume_impl' : left.start_consume_impl,
'stop_consume_impl' : left.stop_consume_impl,
'setup_listener' : left.setup_listener,
'get_stats_impl' : left.get_stats_impl,
'purge_impl' : left.purge_impl,
'qos_impl' : left.qos_impl,
'publish_impl' : left.publish_impl, }
if right is not None:
self.overlay(right, *methods)
self._close_callbacks = []
def overlay(self, transport, *methods):
for m in methods:
self._methods[m] = getattr(transport, m)
log.debug("ComposableTransport.overlay(%s) %s %s (%s)", self.channel_number, type(transport), transport, transport.channel_number)
self._transports.append(transport)
def declare_exchange_impl(self, exchange, **kwargs):
m = self._methods['declare_exchange_impl']
return m(exchange, **kwargs)
def delete_exchange_impl(self, exchange, **kwargs):
m = self._methods['delete_exchange_impl']
return m(exchange, **kwargs)
def declare_queue_impl(self, queue, **kwargs):
m = self._methods['declare_queue_impl']
return m(queue, **kwargs)
def delete_queue_impl(self, queue, **kwargs):
m = self._methods['delete_queue_impl']
return m(queue, **kwargs)
def bind_impl(self, exchange, queue, binding):
m = self._methods['bind_impl']
return m(exchange, queue, binding)
def unbind_impl(self, exchange, queue, binding):
m = self._methods['unbind_impl']
return m(exchange, queue, binding)
def ack_impl(self, delivery_tag):
m = self._methods['ack_impl']
return m(delivery_tag)
def reject_impl(self, delivery_tag, requeue=False):
m = self._methods['reject_impl']
return m(delivery_tag, requeue=requeue)
def start_consume_impl(self, callback, queue, no_ack=False, exclusive=False):
m = self._methods['start_consume_impl']
return m(callback, queue, no_ack=no_ack, exclusive=exclusive)
def stop_consume_impl(self, consumer_tag):
m = self._methods['stop_consume_impl']
return m(consumer_tag)
def setup_listener(self, binding, default_cb):
m = self._methods['setup_listener']
return m(binding, default_cb)
def get_stats_impl(self, queue):
m = self._methods['get_stats_impl']
return m(queue)
def purge_impl(self, queue):
m = self._methods['purge_impl']
return m(queue)
def qos_impl(self, prefetch_size=0, prefetch_count=0, global_=False):
m = self._methods['qos_impl']
return m(prefetch_size=prefetch_size, prefetch_count=prefetch_count, global_=global_)
def publish_impl(self, exchange, routing_key, body, properties, immediate=False, mandatory=False, durable_msg=False):
m = self._methods['publish_impl']
return m(exchange, routing_key, body, properties, immediate=immediate, mandatory=mandatory, durable_msg=durable_msg)
def close(self):
for t in self._transports:
t.close()
for cb in self._close_callbacks:
cb(self, 200, "Closed OK") # @TODO where to get real value
@property
def channel_number(self):
return self._transports[-1].channel_number
def add_on_close_callback(self, cb):
self._close_callbacks.append(cb)
@property
def active(self):
return all(map(lambda x: x.active, self._transports))
class AMQPTransport(BaseTransport):
"""
A transport adapter around a Pika channel.
"""
def __init__(self, amq_chan):
"""
Creates an AMQPTransport, bound to an underlying Pika channel.
"""
#log.info("AMQPTransport(%d)", amq_chan.channel_number)
self._client = amq_chan
self._client.add_on_close_callback(self._on_underlying_close)
self._close_callbacks = []
self.lock = False
def _on_underlying_close(self, code, text):
if not (code == 0 or code == 200):
log.error("AMQPTransport.underlying closed:\n\tchannel number: %s\n\tcode: %d\n\ttext: %s", self.channel_number, code, text)
# PIKA BUG: in v0.9.5, this amq_chan instance will be left around in the callbacks
# manager, and trips a bug in the handler for on_basic_deliver. We attempt to clean
# up for Pika here so we don't goof up when reusing a channel number.
# this appears to be fixed in 3050d116899aced2392def2e3e66ca30c93334ac
# https://github.com/pika/pika/commit/e93c7ebae2c57b798977ba2992602310deb4758b
self._client.callbacks.remove(self._client.channel_number, 'Basic.GetEmpty')
self._client.callbacks.remove(self._client.channel_number, 'Channel.Close')
self._client.callbacks.remove(self._client.channel_number, '_on_basic_deliver')
self._client.callbacks.remove(self._client.channel_number, '_on_basic_get')
# uncomment these lines to see the full callback list that Pika maintains
#stro = pprint.pformat(callbacks._callbacks)
#log.error(str(stro))
for cb in self._close_callbacks:
cb(self, code, text)
@property
def active(self):
if self._client is not None:
if self._client.closing is None:
return True
return False
def close(self):
if self.lock:
return
self._client.close()
@property
def channel_number(self):
return self._client.channel_number
def add_on_close_callback(self, cb):
self._close_callbacks.append(cb)
@contextmanager
def _push_close_cb(self, callback):
self._client.add_on_close_callback(callback)
try:
yield callback
finally:
# PIKA BUG: v0.9.5, we need to specify the callback as a dict - this is fixed in git HEAD (13 Feb 2012)
de = {'handle': callback, 'one_shot': True}
self._client.callbacks.remove(self._client.channel_number, '_on_channel_close', de)
def _sync_call(self, func, cb_arg, *args, **kwargs):
"""
Functionally similar to the generic blocking_cb but with error support that's Channel specific.
"""
ar = AsyncResult()
def cb(*args, **kwargs):
ret = list(args)
if len(kwargs): ret.append(kwargs)
ar.set(ret)
eb = lambda ch, *args: ar.set(TransportError("_sync_call could not complete due to an error (%s)" % args))
kwargs[cb_arg] = cb
with self._push_close_cb(eb):
func(*args, **kwargs)
# Note: MM (2014-04-03): It seems that gevent block or something else can lead to this timeout
# hitting. Increased from 10 to 20
ret_vals = ar.get(timeout=20)
if isinstance(ret_vals, TransportError):
# mark this channel as poison, do not use again!
# don't test for type here, we don't want to have to import PyonSelectConnection
if hasattr(self._client.transport, 'connection') and hasattr(self._client.transport.connection, 'mark_bad_channel'):
self._client.transport.connection.mark_bad_channel(self._client.channel_number)
else:
log.warn("Could not mark channel # (%s) as bad, Pika could be corrupt", self._client.channel_number)
raise ret_vals
if len(ret_vals) == 0:
return None
elif len(ret_vals) == 1:
return ret_vals[0]
return tuple(ret_vals)
def declare_exchange_impl(self, exchange, exchange_type='topic', durable=False, auto_delete=True):
#log.debug("AMQPTransport.declare_exchange_impl(%s): %s, T %s, D %s, AD %s", self._client.channel_number, exchange, exchange_type, durable, auto_delete)
arguments = {}
if os.environ.get('QUEUE_BLAME', None) is not None:
testid = os.environ['QUEUE_BLAME']
arguments.update({'created-by': testid})
self._sync_call(self._client.exchange_declare, 'callback',
exchange=exchange,
type=exchange_type,
durable=durable,
auto_delete=auto_delete,
arguments=arguments)
def delete_exchange_impl(self, exchange, **kwargs):
log.debug("AMQPTransport.delete_exchange_impl(%s): %s", self._client.channel_number, exchange)
self._sync_call(self._client.exchange_delete, 'callback', exchange=exchange)
def declare_queue_impl(self, queue, durable=False, auto_delete=True):
#log.debug("AMQPTransport.declare_queue_impl(%s): %s, D %s, AD %s", self._client.channel_number, queue, durable, auto_delete)
arguments = {}
if os.environ.get('QUEUE_BLAME', None) is not None:
testid = os.environ['QUEUE_BLAME']
arguments.update({'created-by': testid})
frame = self._sync_call(self._client.queue_declare, 'callback',
queue=queue or '',
auto_delete=auto_delete,
durable=durable,
arguments=arguments)
return frame.method.queue
def delete_queue_impl(self, queue, **kwargs):
log.debug("AMQPTransport.delete_queue_impl(%s): %s", self._client.channel_number, queue)
self._sync_call(self._client.queue_delete, 'callback', queue=queue)
def bind_impl(self, exchange, queue, binding):
#log.debug("AMQPTransport.bind_impl(%s): EX %s, Q %s, B %s", self._client.channel_number, exchange, queue, binding)
self._sync_call(self._client.queue_bind, 'callback',
queue=queue,
exchange=exchange,
routing_key=binding)
def unbind_impl(self, exchange, queue, binding):
#log.debug("AMQPTransport.unbind_impl(%s): EX %s, Q %s, B %s", self._client.channel_number, exchange, queue, binding)
self._sync_call(self._client.queue_unbind, 'callback', queue=queue,
exchange=exchange,
routing_key=binding)
def ack_impl(self, delivery_tag):
"""
Acks a message.
"""
#log.debug("AMQPTransport.ack(%s): %s", self._client.channel_number, delivery_tag)
self._client.basic_ack(delivery_tag)
def reject_impl(self, delivery_tag, requeue=False):
"""
Rejects a message.
"""
self._client.basic_reject(delivery_tag, requeue=requeue)
def start_consume_impl(self, callback, queue, no_ack=False, exclusive=False):
"""
Starts consuming on a queue.
Will asynchronously deliver messages to the callback method supplied.
@return A consumer tag to be used when stop_consume_impl is called.
"""
#log.debug("AMQPTransport.start_consume_impl(%s): %s", self._client.channel_number, queue)
consumer_tag = self._client.basic_consume(callback,
queue=queue,
no_ack=no_ack,
exclusive=exclusive)
return consumer_tag
def stop_consume_impl(self, consumer_tag):
"""
Stops consuming by consumer tag.
"""
#log.debug("AMQPTransport.stop_consume_impl(%s): %s", self._client.channel_number, consumer_tag)
self._sync_call(self._client.basic_cancel, 'callback', consumer_tag)
# PIKA 0.9.5 / GEVENT interaction problem here
# we get called back too early, the basic_cancel hasn't really finished processing yet. we need
# to wait until our consumer tag is removed from the pika channel's consumers dict.
# See: https://gist.github.com/3751870
attempts = 5
while attempts > 0:
if consumer_tag not in self._client._consumers:
break
else:
log.debug("stop_consume_impl waiting for ctag to be removed from consumers, attempts rem: %s", attempts)
attempts -= 1
sleep(1)
if consumer_tag in self._client._consumers:
raise TransportError("stop_consume_impl did not complete in the expected amount of time, transport may be compromised")
def setup_listener(self, binding, default_cb):
"""
Calls setup listener via the default callback passed in.
"""
return default_cb(self, binding)
def get_stats_impl(self, queue):
"""
Gets a tuple of number of messages, number of consumers on a queue.
"""
log.debug("AMQPTransport.get_stats_impl(%s): Q %s", self._client.channel_number, queue)
frame = self._sync_call(self._client.queue_declare, 'callback',
queue=queue or '',
passive=True)
return frame.method.message_count, frame.method.consumer_count
def purge_impl(self, queue):
"""
Purges a queue.
"""
log.debug("AMQPTransport.purge_impl(%s): Q %s", self._client.channel_number, queue)
self._sync_call(self._client.queue_purge, 'callback', queue=queue)
def qos_impl(self, prefetch_size=0, prefetch_count=0, global_=False):
"""
Adjusts quality of service for a channel.
"""
#log.debug("AMQPTransport.qos_impl(%s): pf_size %s, pf_count %s, global_ %s", self._client.channel_number, prefetch_size, prefetch_count, global_)
self._sync_call(self._client.basic_qos, 'callback', prefetch_size=prefetch_size, prefetch_count=prefetch_count, global_=global_)
def publish_impl(self, exchange, routing_key, body, properties, immediate=False, mandatory=False, durable_msg=False):
"""
Publishes a message on an exchange.
"""
#log.debug("AMQPTransport.publish(%s): ex %s key %s", self._client.channel_number, exchange, routing_key)
#log.debug("AMQPTransport.publish(%s): ex %s key %s, size %d", self._client.channel_number, exchange, routing_key, len(repr(body))+len(repr(properties)))
if durable_msg:
delivery_mode = 2
else:
delivery_mode = None
props = BasicProperties(headers=properties,
delivery_mode=delivery_mode)
self._client.basic_publish(exchange=exchange, # todo
routing_key=routing_key, # todo
body=body,
properties=props,
immediate=immediate, # todo
mandatory=mandatory) # todo
class NameTrio(object):
"""
Internal representation of a name/queue/binding (optional).
Created and used at the Endpoint layer and sometimes Channel layer.
"""
def __init__(self, exchange=None, queue=None, binding=None):
"""
Creates a NameTrio.
If either exchange or queue is a tuple, it will use that as a (exchange, queue, binding (optional)) triple.
@param exchange An exchange name. You would typically use the sysname for that.
@param queue Queue name.
@param binding A binding/routing key (used for both recv and send sides). Optional,
and if not specified, defaults to the *internal* queue name.
"""
if isinstance(exchange, tuple):
self._exchange, self._queue, self._binding = list(exchange) + ([None] * (3 - len(exchange)))
elif isinstance(queue, tuple):
self._exchange, self._queue, self._binding = list(queue) + ([None] * (3 - len(queue)))
else:
self._exchange = exchange
self._queue = queue
self._binding = binding
@property
def exchange(self):
return self._exchange
@property
def queue(self):
return self._queue
@property
def binding(self):
return self._binding or self._queue
def __str__(self):
return "NP (%s,%s,B: %s)" % (self.exchange, self.queue, self.binding)
class TopicTrie(object):
"""
Support class for building a routing device to do amqp-like pattern matching.
Used for events/pubsub in our system with the local transport. Efficiently stores all registered
subscription topic trees in a trie structure, handling wildcards * and #.
See:
http://www.zeromq.org/whitepapers:message-matching (doesn't handle # so scrapped)
http://www.rabbitmq.com/blog/2010/09/14/very-fast-and-scalable-topic-routing-part-1/
http://www.rabbitmq.com/blog/2011/03/28/very-fast-and-scalable-topic-routing-part-2/
"""
class Node(object):
"""
Internal node of a trie.
Stores two data points: a token (literal string, '*', or '#', or None if used as root element),
and a set of "patterns" aka a ref to an object representing a bind.
"""
def __init__(self, token, patterns=None):
self.token = token
self.patterns = patterns or []
self.children = {}
def get_or_create_child(self, token):
"""
Returns a child node with the given token.
If it doesn't already exist, it is created, otherwise the existing one is returned.
"""
if token in self.children:
return self.children[token]
new_node = TopicTrie.Node(token)
self.children[token] = new_node
return new_node
def get_all_matches(self, topics):
"""
Given a list of topic tokens, returns all patterns stored in child nodes/self that match the topic tokens.
This is a depth-first search pruned by token, with special handling for both wildcard types.
"""
results = []
if len(topics) == 0:
# terminal point, return any pattern we have here
return self.patterns
cur_token = topics[0]
rem_tokens = topics[1:] # will always be a list, even if empty or 1-len
#log.debug('get_all_matches(%s): cur_token %s, rem_tokens %s', self.token, cur_token, rem_tokens)
# child node direct matching
if cur_token in self.children:
results.extend(self.children[cur_token].get_all_matches(rem_tokens))
# now '*' wildcard
if '*' in self.children:
results.extend(self.children['*'].get_all_matches(rem_tokens))
# '#' means any number of tokens - naive method of descent, we'll feed it nothing to start. Then chop the full
# topics all the way down, put the results in a set to remove duplicates, and also any patterns on self.
if '#' in self.children:
# keep popping off and descend, make a set out of results
all_wild_childs = set()
for i in xrange(len(topics)):
res = self.children['#'].get_all_matches(topics[i:])
map(all_wild_childs.add, res)
results.extend(all_wild_childs)
results.extend(self.children['#'].patterns) # any patterns defined in # are legal too
return results
def __init__(self):
"""
Creates a dummy root node that all topic trees hang off of.
"""
self.root = self.Node(None)
def add_topic_tree(self, topic_tree, pattern):
"""
Splits a string topic_tree into tokens (by .) and recursively adds them to the trie.
Adds the pattern at the terminal node for later retrieval.
"""
topics = topic_tree.split(".")
curnode = self.root
for topic in topics:
curnode = curnode.get_or_create_child(topic)
if not pattern in curnode.patterns:
curnode.patterns.append(pattern)
def remove_topic_tree(self, topic_tree, pattern):
"""
Splits a string topic_tree into tokens (by .) and removes the pattern from the terminal node.
@TODO should remove empty nodes
"""
topics = topic_tree.split(".")
curnode = self.root
for topic in topics:
curnode = curnode.get_or_create_child(topic)
if pattern in curnode.patterns:
curnode.patterns.remove(pattern)
def get_all_matches(self, topic_tree):
"""
Returns a list of all matches for a given topic tree string.
Creates a set out of the matching patterns, so multiple binds matching on the same pattern only
return once.
"""
topics = topic_tree.split(".")
return set(self.root.get_all_matches(topics))
class LocalRouter(object):
"""
A RabbitMQ-like routing device implemented with gevent mechanisms for an in-memory broker.
Using LocalTransport, can handle topic-exchange-like communication in ION within the context
of a single container.
"""
class ConsumerClosedMessage(object):
"""
Dummy object used to exit queue get looping greenlets.
"""
pass
def __init__(self, sysname):
self._sysname = sysname
self.ready = Event()
# exchange/queues/bindings
self._exchanges = {} # names -> { subscriber, topictrie(queue name) }
self._queues = {} # names -> gevent queue
self._bindings_by_queue = defaultdict(list) # queue name -> [(ex, binding)]
self._lock_declarables = coros.RLock() # exchanges, queues, bindings, routing method
# consumers
self._consumers = defaultdict(list) # queue name -> [ctag, channel._on_deliver]
self._consumers_by_ctag = {} # ctag -> queue_name ??
self._ctag_pool = IDPool() # pool of consumer tags
self._lock_consumers = coros.RLock() # lock for interacting with any consumer related attrs
# deliveries
self._unacked = {} # dtag -> (ctag, msg)
self._lock_unacked = coros.RLock() # lock for interacting with unacked field
self._gl_msgs = None
self._gl_pool = Pool()
self.gl_ioloop = None
self.errors = []
@property
def _connect_addr(self):
return "inproc://%s" % self._sysname
def start(self):
"""
Starts all internal greenlets of this router device.
"""
self._queue_incoming = Queue()
self._gl_msgs = self._gl_pool.spawn(self._run_gl_msgs)
self._gl_msgs._glname = "pyon.net AMQP msgs"
self._gl_msgs.link_exception(self._child_failed)
self.gl_ioloop = spawn(self._run_ioloop)
self.gl_ioloop._glname = "pyon.net AMQP ioloop"
def stop(self):
self._gl_msgs.kill() # @TODO: better
self._gl_pool.join(timeout=5, raise_error=True)
def _run_gl_msgs(self):
self.ready.set()
while True:
ex, rkey, body, props = self._queue_incoming.get()
try:
with self._lock_declarables:
self._route(ex, rkey, body, props)
except Exception as e:
self.errors.append(e)
log.exception("Routing message")
def _route(self, exchange, routing_key, body, props):
"""
Delivers incoming messages into queues based on known routes.
This entire method runs in a lock (likely pretty slow).
"""
assert exchange in self._exchanges, "Unknown exchange %s" % exchange
queues = self._exchanges[exchange].get_all_matches(routing_key)
log.debug("route: ex %s, rkey %s, matched %s routes", exchange, routing_key, len(queues))
# deliver to each queue
for q in queues:
assert q in self._queues
log.debug("deliver -> %s", q)
self._queues[q].put((exchange, routing_key, body, props))
def _child_failed(self, gproc):
"""
Handler method for when any child worker thread dies with error.
Aborts the "ioloop" greenlet.
"""
log.error("Child (%s) failed with an exception: %s", gproc, gproc.exception)
if self.gl_ioloop:
self.gl_ioloop.kill(exception=gproc.exception, block=False)
def _run_ioloop(self):
"""
An "IOLoop"-like greenlet - sits and waits until the pool is finished.
Fits with the AMQP node.
"""
self._gl_pool.join()
def publish(self, exchange, routing_key, body, properties, immediate=False, mandatory=False):
self._queue_incoming.put((exchange, routing_key, body, properties))
sleep(0.0001) # really wish switch would work instead of a sleep, seems wrong
def declare_exchange(self, exchange, **kwargs):
with self._lock_declarables:
if not exchange in self._exchanges:
self._exchanges[exchange] = TopicTrie()
def delete_exchange(self, exchange, **kwargs):
with self._lock_declarables:
if exchange in self._exchanges:
del self._exchanges[exchange]
def declare_queue(self, queue, **kwargs):
with self._lock_declarables:
# come up with new queue name if none specified
if queue is None or queue == '':
while True:
proposed = "q-%s" % str(uuid4())[0:10]
if proposed not in self._queues:
queue = proposed
break
if not queue in self._queues:
self._queues[queue] = Queue()
return queue
def delete_queue(self, queue, **kwargs):
with self._lock_declarables:
if queue in self._queues:
del self._queues[queue]
# kill bindings
for ex, binding in self._bindings_by_queue[queue]:
if ex in self._exchanges:
self._exchanges[ex].remove_topic_tree(binding, queue)
self._bindings_by_queue.pop(queue)
def bind(self, exchange, queue, binding):
log.info("Bind: ex %s, q %s, b %s", exchange, queue, binding)
with self._lock_declarables:
assert exchange in self._exchanges, "Missing exchange %s in list of exchanges" % str(exchange)
assert queue in self._queues
tt = self._exchanges[exchange]
tt.add_topic_tree(binding, queue)
self._bindings_by_queue[queue].append((exchange, binding))
def unbind(self, exchange, queue, binding):
with self._lock_declarables:
assert exchange in self._exchanges
assert queue in self._queues
self._exchanges[exchange].remove_topic_tree(binding, queue)
for i, val in enumerate(self._bindings_by_queue[queue]):
ex, b = val
if ex == exchange and b == binding:
self._bindings_by_queue[queue].pop(i)
break
def start_consume(self, callback, queue, no_ack=False, exclusive=False):
assert queue in self._queues
with self._lock_consumers:
new_ctag = self._generate_ctag()
assert new_ctag not in self._consumers_by_ctag
with self._lock_declarables:
gl = self._gl_pool.spawn(self._run_consumer, new_ctag, queue, self._queues[queue], callback)
gl.link_exception(self._child_failed)
self._consumers[queue].append((new_ctag, callback, no_ack, exclusive, gl))
self._consumers_by_ctag[new_ctag] = queue
return new_ctag
def stop_consume(self, consumer_tag):
assert consumer_tag in self._consumers_by_ctag
with self._lock_consumers:
queue = self._consumers_by_ctag[consumer_tag]
self._consumers_by_ctag.pop(consumer_tag)
for i, consumer in enumerate(self._consumers[queue]):
if consumer[0] == consumer_tag:
# notify consumer greenlet that we want to stop
if queue in self._queues:
self._queues[queue].put(self.ConsumerClosedMessage())
consumer[4].join(timeout=5)
consumer[4].kill()
# @TODO reject any unacked messages
self._consumers[queue].pop(i)
break
self._return_ctag(consumer_tag)
def _run_consumer(self, ctag, queue_name, gqueue, callback):
cnt = 0
while True:
m = gqueue.get()
if isinstance(m, self.ConsumerClosedMessage):
break
exchange, routing_key, body, props = m
# create method frame
method_frame = DotDict()
method_frame['consumer_tag'] = ctag
method_frame['redelivered'] = False # @TODO
method_frame['exchange'] = exchange
method_frame['routing_key'] = routing_key
# create header frame
header_frame = DotDict()
header_frame['headers'] = props.copy()
# make delivery tag for ack/reject later
dtag = self._generate_dtag(ctag, cnt)
cnt += 1
with self._lock_unacked:
self._unacked[dtag] = (ctag, queue_name, m)
method_frame['delivery_tag'] = dtag
# deliver to callback
try:
callback(self, method_frame, header_frame, body)
except Exception:
log.exception("delivering to consumer, ignore!")
def _generate_ctag(self):
return "zctag-%s" % self._ctag_pool.get_id()
def _return_ctag(self, ctag):
self._ctag_pool.release_id(int(ctag.split("-")[-1]))
def _generate_dtag(self, ctag, cnt):
"""
Generates a unique delivery tag for each consumer.
Greenlet-safe, no need to lock.
"""
return "%s-%s" % (ctag, cnt)
def ack(self, delivery_tag):
assert delivery_tag in self._unacked
with self._lock_unacked:
del self._unacked[delivery_tag]
def reject(self, delivery_tag, requeue=False):
assert delivery_tag in self._unacked
with self._lock_unacked:
_, queue, m = self._unacked.pop(delivery_tag)
if requeue:
log.warn("REQUEUE: EXPERIMENTAL %s", delivery_tag)
self._queues[queue].put(m)
def transport_close(self, transport):
log.warn("LocalRouter.transport_close: %s TODO", transport)
# @TODO reject all messages in unacked spot
# turn off any consumers from this transport
def get_stats(self, queue):
"""
Returns a 2-tuple of (# msgs, # consumers) on a given queue.
"""
assert queue in self._queues
consumers = 0
if queue in self._consumers:
consumers = len(self._consumers[queue])
# the queue qsize gives you number of undelivered messages, which i think is what AMQP does too
return (self._queues[queue].qsize(), consumers)
def purge(self, queue):
"""
Deletes all contents of a queue.
@TODO could end up in a race with an infinite producer
"""
assert queue in self._queues
with Timeout(5):
while not self._queues[queue].empty():
self._queues[queue].get_nowait()
class LocalTransport(BaseTransport):
def __init__(self, broker, ch_number):
self._broker = broker
self._ch_number = ch_number
self._active = True
self._close_callbacks = []
def declare_exchange_impl(self, exchange, **kwargs):
self._broker.declare_exchange(exchange, **kwargs)
def delete_exchange_impl(self, exchange, **kwargs):
self._broker.delete_exchange(exchange, **kwargs)
def declare_queue_impl(self, queue, **kwargs):
return self._broker.declare_queue(queue, **kwargs)
def delete_queue_impl(self, queue, **kwargs):
self._broker.delete_queue(queue, **kwargs)
def bind_impl(self, exchange, queue, binding):
self._broker.bind(exchange, queue, binding)
def unbind_impl(self, exchange, queue, binding):
self._broker.unbind(exchange, queue, binding)
def publish_impl(self, exchange, routing_key, body, properties, immediate=False, mandatory=False, durable_msg=False):
self._broker.publish(exchange, routing_key, body, properties, immediate=immediate, mandatory=mandatory)
def start_consume_impl(self, callback, queue, no_ack=False, exclusive=False):
return self._broker.start_consume(callback, queue, no_ack=no_ack, exclusive=exclusive)
def stop_consume_impl(self, consumer_tag):
self._broker.stop_consume(consumer_tag)
def ack_impl(self, delivery_tag):
self._broker.ack(delivery_tag)
def reject_impl(self, delivery_tag, requeue=False):
self._broker.reject(delivery_tag, requeue=requeue)
def close(self):
self._broker.transport_close(self)
self._active = False
for cb in self._close_callbacks:
cb(self, 200, "Closed ok") # @TODO should come elsewhere
def add_on_close_callback(self, cb):
self._close_callbacks.append(cb)
@property
def active(self):
return self._active
@property
def channel_number(self):
return self._ch_number
def qos_impl(self, prefetch_size=0, prefetch_count=0, global_=False):
log.info("TODO: QOS")
def get_stats_impl(self, queue):
return self._broker.get_stats(queue)
def purge_impl(self, queue):
return self._broker.purge(queue)
| []
| []
| [
"QUEUE_BLAME"
]
| [] | ["QUEUE_BLAME"] | python | 1 | 0 | |
integration/e2e_test.go | // +build !mainnet
package integration
import (
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/url"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/cosmos/cosmos-sdk/server"
bankcli "github.com/cosmos/cosmos-sdk/x/bank/client/testutil"
"github.com/stretchr/testify/assert"
ctypes "github.com/ovrclk/akash/provider/cluster/types"
providerCmd "github.com/ovrclk/akash/provider/cmd"
"github.com/ovrclk/akash/sdl"
ccli "github.com/ovrclk/akash/x/cert/client/cli"
mcli "github.com/ovrclk/akash/x/market/client/cli"
"github.com/ovrclk/akash/x/provider/client/cli"
"github.com/ovrclk/akash/x/provider/types"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/cosmos/cosmos-sdk/client/flags"
"github.com/cosmos/cosmos-sdk/crypto/hd"
"github.com/cosmos/cosmos-sdk/crypto/keyring"
"github.com/cosmos/cosmos-sdk/testutil/network"
sdk "github.com/cosmos/cosmos-sdk/types"
ptestutil "github.com/ovrclk/akash/provider/testutil"
"github.com/ovrclk/akash/testutil"
deploycli "github.com/ovrclk/akash/x/deployment/client/cli"
dtypes "github.com/ovrclk/akash/x/deployment/types"
mtypes "github.com/ovrclk/akash/x/market/types"
)
// IntegrationTestSuite wraps testing components
type IntegrationTestSuite struct {
suite.Suite
cfg network.Config
network *network.Network
validator *network.Validator
keyProvider keyring.Info
keyTenant keyring.Info
appHost string
appPort string
}
type E2EContainerToContainer struct {
IntegrationTestSuite
}
type E2EAppNodePort struct {
IntegrationTestSuite
}
type E2EDeploymentUpdate struct {
IntegrationTestSuite
}
type E2EApp struct {
IntegrationTestSuite
}
func (s *IntegrationTestSuite) SetupSuite() {
s.appHost, s.appPort = appEnv(s.T())
// Create a network for test
cfg := testutil.DefaultConfig()
cfg.NumValidators = 1
cfg.MinGasPrices = ""
s.cfg = cfg
s.network = network.New(s.T(), cfg)
kb := s.network.Validators[0].ClientCtx.Keyring
_, _, err := kb.NewMnemonic("keyBar", keyring.English, sdk.FullFundraiserPath, hd.Secp256k1)
s.Require().NoError(err)
_, _, err = kb.NewMnemonic("keyFoo", keyring.English, sdk.FullFundraiserPath, hd.Secp256k1)
s.Require().NoError(err)
// Wait for the network to start
_, err = s.network.WaitForHeight(1)
s.Require().NoError(err)
//
s.validator = s.network.Validators[0]
// Send coins value
sendTokens := sdk.NewCoin(s.cfg.BondDenom, mtypes.DefaultBidMinDeposit.Amount.MulRaw(4))
// Setup a Provider key
s.keyProvider, err = s.validator.ClientCtx.Keyring.Key("keyFoo")
s.Require().NoError(err)
// give provider some coins
res, err := bankcli.MsgSendExec(
s.validator.ClientCtx,
s.validator.Address,
s.keyProvider.GetAddress(),
sdk.NewCoins(sendTokens),
fmt.Sprintf("--%s=true", flags.FlagSkipConfirmation),
fmt.Sprintf("--%s=%s", flags.FlagBroadcastMode, flags.BroadcastBlock),
fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(s.cfg.BondDenom, sdk.NewInt(10))).String()),
fmt.Sprintf("--gas=%d", flags.DefaultGasLimit),
)
s.Require().NoError(err)
s.Require().NoError(s.network.WaitForNextBlock())
validateTxSuccessful(s.T(), s.validator.ClientCtx, res.Bytes())
// Set up second tenant key
s.keyTenant, err = s.validator.ClientCtx.Keyring.Key("keyBar")
s.Require().NoError(err)
// give tenant some coins too
res, err = bankcli.MsgSendExec(
s.validator.ClientCtx,
s.validator.Address,
s.keyTenant.GetAddress(),
sdk.NewCoins(sendTokens),
fmt.Sprintf("--%s=true", flags.FlagSkipConfirmation),
fmt.Sprintf("--%s=%s", flags.FlagBroadcastMode, flags.BroadcastBlock),
fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(s.cfg.BondDenom, sdk.NewInt(10))).String()),
fmt.Sprintf("--gas=%d", flags.DefaultGasLimit),
)
s.Require().NoError(err)
s.Require().NoError(s.network.WaitForNextBlock())
validateTxSuccessful(s.T(), s.validator.ClientCtx, res.Bytes())
// address for provider to listen on
_, port, err := server.FreeTCPAddr()
require.NoError(s.T(), err)
provHost := fmt.Sprintf("localhost:%s", port)
provURL := url.URL{
Host: provHost,
Scheme: "https",
}
provFileStr := fmt.Sprintf(providerTemplate, provURL.String())
tmpFile, err := ioutil.TempFile(s.network.BaseDir, "provider.yaml")
require.NoError(s.T(), err)
_, err = tmpFile.WriteString(provFileStr)
require.NoError(s.T(), err)
defer func() {
err := tmpFile.Close()
require.NoError(s.T(), err)
}()
fstat, err := tmpFile.Stat()
require.NoError(s.T(), err)
// create Provider blockchain declaration
_, err = cli.TxCreateProviderExec(
s.validator.ClientCtx,
s.keyProvider.GetAddress(),
fmt.Sprintf("%s/%s", s.network.BaseDir, fstat.Name()),
fmt.Sprintf("--%s=true", flags.FlagSkipConfirmation),
fmt.Sprintf("--%s=%s", flags.FlagBroadcastMode, flags.BroadcastBlock),
fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(s.cfg.BondDenom, sdk.NewInt(10))).String()),
fmt.Sprintf("--gas=%d", flags.DefaultGasLimit),
)
s.Require().NoError(err)
s.Require().NoError(s.network.WaitForNextBlock())
validateTxSuccessful(s.T(), s.validator.ClientCtx, res.Bytes())
// Create provider's certificate
_, err = ccli.TxCreateServerExec(
s.validator.ClientCtx,
s.keyTenant.GetAddress(),
"localhost",
fmt.Sprintf("--%s=true", flags.FlagSkipConfirmation),
fmt.Sprintf("--%s=%s", flags.FlagBroadcastMode, flags.BroadcastBlock),
fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(s.cfg.BondDenom, sdk.NewInt(10))).String()),
fmt.Sprintf("--gas=%d", flags.DefaultGasLimit),
)
s.Require().NoError(err)
s.Require().NoError(s.network.WaitForNextBlock())
validateTxSuccessful(s.T(), s.validator.ClientCtx, res.Bytes())
// Create tenant's certificate
_, err = ccli.TxCreateServerExec(
s.validator.ClientCtx,
s.keyProvider.GetAddress(),
"localhost",
fmt.Sprintf("--%s=true", flags.FlagSkipConfirmation),
fmt.Sprintf("--%s=%s", flags.FlagBroadcastMode, flags.BroadcastBlock),
fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(s.cfg.BondDenom, sdk.NewInt(10))).String()),
fmt.Sprintf("--gas=%d", flags.DefaultGasLimit),
)
s.Require().NoError(err)
s.Require().NoError(s.network.WaitForNextBlock())
validateTxSuccessful(s.T(), s.validator.ClientCtx, res.Bytes())
pemSrc := fmt.Sprintf("%s/%s.pem", s.validator.ClientCtx.HomeDir, s.keyProvider.GetAddress().String())
pemDst := fmt.Sprintf("%s/%s.pem", strings.Replace(s.validator.ClientCtx.HomeDir, "simd", "simcli", 1), s.keyProvider.GetAddress().String())
input, err := ioutil.ReadFile(pemSrc)
s.Require().NoError(err)
err = ioutil.WriteFile(pemDst, input, 0400)
s.Require().NoError(err)
pemSrc = fmt.Sprintf("%s/%s.pem", s.validator.ClientCtx.HomeDir, s.keyTenant.GetAddress().String())
pemDst = fmt.Sprintf("%s/%s.pem", strings.Replace(s.validator.ClientCtx.HomeDir, "simd", "simcli", 1), s.keyTenant.GetAddress().String())
input, err = ioutil.ReadFile(pemSrc)
s.Require().NoError(err)
err = ioutil.WriteFile(pemDst, input, 0400)
s.Require().NoError(err)
localCtx := s.validator.ClientCtx.WithOutputFormat("json")
// test query providers
resp, err := cli.QueryProvidersExec(localCtx)
s.Require().NoError(err)
out := &types.QueryProvidersResponse{}
err = s.validator.ClientCtx.JSONMarshaler.UnmarshalJSON(resp.Bytes(), out)
s.Require().NoError(err)
s.Require().Len(out.Providers, 1, "Provider Creation Failed")
providers := out.Providers
s.Require().Equal(s.keyProvider.GetAddress().String(), providers[0].Owner)
// test query provider
createdProvider := providers[0]
resp, err = cli.QueryProviderExec(localCtx, createdProvider.Owner)
s.Require().NoError(err)
var provider types.Provider
err = s.validator.ClientCtx.JSONMarshaler.UnmarshalJSON(resp.Bytes(), &provider)
s.Require().NoError(err)
s.Require().Equal(createdProvider, provider)
// Run Provider service
keyName := s.keyProvider.GetName()
// Change the akash home directory for CLI to access the test keyring
cliHome := strings.Replace(s.validator.ClientCtx.HomeDir, "simd", "simcli", 1)
cctx := s.validator.ClientCtx
go func() {
_, err := ptestutil.RunLocalProvider(cctx,
cctx.ChainID,
s.validator.RPCAddress,
cliHome,
keyName,
provURL.Host,
fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(s.cfg.BondDenom, sdk.NewInt(20))).String()),
"--deployment-runtime-class=none",
)
s.Require().NoError(err)
}()
s.Require().NoError(s.network.WaitForNextBlock())
}
func (s *IntegrationTestSuite) TearDownSuite() {
s.T().Log("Cleaning up after E2E tests")
keyTenant, err := s.validator.ClientCtx.Keyring.Key("keyBar")
s.Require().NoError(err)
resp, err := deploycli.QueryDeploymentsExec(s.validator.ClientCtx.WithOutputFormat("json"))
s.Require().NoError(err)
deployResp := &dtypes.QueryDeploymentsResponse{}
err = s.validator.ClientCtx.JSONMarshaler.UnmarshalJSON(resp.Bytes(), deployResp)
s.Require().NoError(err)
s.Require().False(0 == len(deployResp.Deployments), "no deployments created")
deployments := deployResp.Deployments
s.T().Logf("Cleaning up %d deployments", len(deployments))
for _, createdDep := range deployments {
// teardown lease
res, err := deploycli.TxCloseDeploymentExec(
s.validator.ClientCtx,
keyTenant.GetAddress(),
fmt.Sprintf("--owner=%s", createdDep.Groups[0].GroupID.Owner),
fmt.Sprintf("--dseq=%v", createdDep.Deployment.DeploymentID.DSeq),
fmt.Sprintf("--%s=true", flags.FlagSkipConfirmation),
fmt.Sprintf("--%s=%s", flags.FlagBroadcastMode, flags.BroadcastBlock),
fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(s.cfg.BondDenom, sdk.NewInt(10))).String()),
fmt.Sprintf("--gas=%d", flags.DefaultGasLimit),
)
s.Require().NoError(err)
s.Require().NoError(s.waitForBlocksCommitted(1))
validateTxSuccessful(s.T(), s.validator.ClientCtx, res.Bytes())
}
// test query deployments with state filter closed
resp, err = deploycli.QueryDeploymentsExec(
s.validator.ClientCtx.WithOutputFormat("json"),
"--state=closed",
)
s.Require().NoError(err)
qResp := &dtypes.QueryDeploymentsResponse{}
err = s.validator.ClientCtx.JSONMarshaler.UnmarshalJSON(resp.Bytes(), qResp)
s.Require().NoError(err)
s.Require().True(len(qResp.Deployments) == len(deployResp.Deployments), "Deployment Close Failed")
s.network.Cleanup()
}
func newestLease(leases []mtypes.QueryLeaseResponse) mtypes.Lease {
result := mtypes.Lease{}
assigned := false
for _, lease := range leases {
if !assigned {
result = lease.Lease
assigned = true
} else if result.GetLeaseID().DSeq < lease.Lease.GetLeaseID().DSeq {
result = lease.Lease
}
}
return result
}
func getKubernetesIP() string {
return os.Getenv("KUBE_NODE_IP")
}
func (s *E2EContainerToContainer) TestE2EContainerToContainer() {
// create a deployment
deploymentPath, err := filepath.Abs("../x/deployment/testdata/deployment-v2-c2c.yaml")
s.Require().NoError(err)
deploymentID := dtypes.DeploymentID{
Owner: s.keyTenant.GetAddress().String(),
DSeq: uint64(100),
}
// Create Deployments
res, err := deploycli.TxCreateDeploymentExec(
s.validator.ClientCtx,
s.keyTenant.GetAddress(),
deploymentPath,
fmt.Sprintf("--%s", flags.FlagSkipConfirmation),
fmt.Sprintf("--%s=%s", flags.FlagBroadcastMode, flags.BroadcastBlock),
fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(s.cfg.BondDenom, sdk.NewInt(20))).String()),
fmt.Sprintf("--gas=%d", flags.DefaultGasLimit),
fmt.Sprintf("--deposit=%s", dtypes.DefaultDeploymentMinDeposit),
fmt.Sprintf("--dseq=%v", deploymentID.DSeq),
)
s.Require().NoError(err)
s.Require().NoError(s.waitForBlocksCommitted(7))
validateTxSuccessful(s.T(), s.validator.ClientCtx, res.Bytes())
bidID := mtypes.MakeBidID(
mtypes.MakeOrderID(dtypes.MakeGroupID(deploymentID, 1), 1),
s.keyProvider.GetAddress(),
)
// check bid
_, err = mcli.QueryBidExec(s.validator.ClientCtx, bidID)
s.Require().NoError(err)
// create lease
_, err = mcli.TxCreateLeaseExec(
s.validator.ClientCtx,
bidID,
s.keyTenant.GetAddress(),
fmt.Sprintf("--%s", flags.FlagSkipConfirmation),
fmt.Sprintf("--%s=%s", flags.FlagBroadcastMode, flags.BroadcastBlock),
fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(s.cfg.BondDenom, sdk.NewInt(20))).String()),
fmt.Sprintf("--gas=%d", flags.DefaultGasLimit),
)
s.Require().NoError(err)
s.Require().NoError(s.waitForBlocksCommitted(2))
validateTxSuccessful(s.T(), s.validator.ClientCtx, res.Bytes())
lid := bidID.LeaseID()
// Send Manifest to Provider ----------------------------------------------
_, err = ptestutil.TestSendManifest(
s.validator.ClientCtx.WithOutputFormat("json"),
lid.BidID(),
deploymentPath,
fmt.Sprintf("--%s=%s", flags.FlagFrom, s.keyTenant.GetAddress().String()),
fmt.Sprintf("--%s=%s", flags.FlagHome, s.validator.ClientCtx.HomeDir),
)
s.Require().NoError(err)
s.Require().NoError(s.waitForBlocksCommitted(2))
// Hit the endpoint to set a key in redis, foo = bar
appURL := fmt.Sprintf("http://%s:%s/SET/foo/bar", s.appHost, s.appPort)
const testHost = "webdistest.localhost"
const attempts = 120
httpResp := queryAppWithRetries(s.T(), appURL, testHost, attempts)
bodyData, err := ioutil.ReadAll(httpResp.Body)
s.Require().NoError(err)
s.Require().Equal(`{"SET":[true,"OK"]}`, string(bodyData))
// Hit the endpoint to read a key in redis, foo
appURL = fmt.Sprintf("http://%s:%s/GET/foo", s.appHost, s.appPort)
httpResp = queryAppWithRetries(s.T(), appURL, testHost, attempts)
bodyData, err = ioutil.ReadAll(httpResp.Body)
s.Require().NoError(err)
s.Require().Equal(`{"GET":"bar"}`, string(bodyData)) // Check that the value is bar
}
func (s *E2EAppNodePort) TestE2EAppNodePort() {
// create a deployment
deploymentPath, err := filepath.Abs("../x/deployment/testdata/deployment-v2-nodeport.yaml")
s.Require().NoError(err)
deploymentID := dtypes.DeploymentID{
Owner: s.keyTenant.GetAddress().String(),
DSeq: uint64(101),
}
// Create Deployments
res, err := deploycli.TxCreateDeploymentExec(
s.validator.ClientCtx,
s.keyTenant.GetAddress(),
deploymentPath,
fmt.Sprintf("--%s", flags.FlagSkipConfirmation),
fmt.Sprintf("--%s=%s", flags.FlagBroadcastMode, flags.BroadcastBlock),
fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(s.cfg.BondDenom, sdk.NewInt(20))).String()),
fmt.Sprintf("--gas=%d", flags.DefaultGasLimit),
fmt.Sprintf("--dseq=%v", deploymentID.DSeq),
)
s.Require().NoError(err)
s.Require().NoError(s.waitForBlocksCommitted(3))
validateTxSuccessful(s.T(), s.validator.ClientCtx, res.Bytes())
bidID := mtypes.MakeBidID(
mtypes.MakeOrderID(dtypes.MakeGroupID(deploymentID, 1), 1),
s.keyProvider.GetAddress(),
)
// check bid
_, err = mcli.QueryBidExec(s.validator.ClientCtx, bidID)
s.Require().NoError(err)
// create lease
_, err = mcli.TxCreateLeaseExec(
s.validator.ClientCtx,
bidID,
s.keyTenant.GetAddress(),
fmt.Sprintf("--%s", flags.FlagSkipConfirmation),
fmt.Sprintf("--%s=%s", flags.FlagBroadcastMode, flags.BroadcastBlock),
fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(s.cfg.BondDenom, sdk.NewInt(20))).String()),
fmt.Sprintf("--gas=%d", flags.DefaultGasLimit),
)
s.Require().NoError(err)
s.Require().NoError(s.waitForBlocksCommitted(2))
validateTxSuccessful(s.T(), s.validator.ClientCtx, res.Bytes())
// Assert provider made bid and created lease; test query leases ---------
resp, err := mcli.QueryLeasesExec(s.validator.ClientCtx.WithOutputFormat("json"))
s.Require().NoError(err)
leaseRes := &mtypes.QueryLeasesResponse{}
err = s.validator.ClientCtx.JSONMarshaler.UnmarshalJSON(resp.Bytes(), leaseRes)
s.Require().NoError(err)
s.Require().Len(leaseRes.Leases, 1)
lease := newestLease(leaseRes.Leases)
lid := lease.LeaseID
s.Require().Equal(s.keyProvider.GetAddress().String(), lid.Provider)
// Send Manifest to Provider ----------------------------------------------
_, err = ptestutil.TestSendManifest(
s.validator.ClientCtx.WithOutputFormat("json"),
lid.BidID(),
deploymentPath,
fmt.Sprintf("--%s=%s", flags.FlagFrom, s.keyTenant.GetAddress().String()),
fmt.Sprintf("--%s=%s", flags.FlagHome, s.validator.ClientCtx.HomeDir),
)
s.Require().NoError(err)
s.Require().NoError(s.waitForBlocksCommitted(2))
// Get the lease status
cmdResult, err := providerCmd.ProviderLeaseStatusExec(
s.validator.ClientCtx,
fmt.Sprintf("--%s=%v", "dseq", lid.DSeq),
fmt.Sprintf("--%s=%v", "gseq", lid.GSeq),
fmt.Sprintf("--%s=%v", "oseq", lid.OSeq),
fmt.Sprintf("--%s=%v", "provider", lid.Provider),
fmt.Sprintf("--%s=%s", flags.FlagFrom, s.keyTenant.GetAddress().String()),
fmt.Sprintf("--%s=%s", flags.FlagHome, s.validator.ClientCtx.HomeDir),
)
assert.NoError(s.T(), err)
data := ctypes.LeaseStatus{}
err = json.Unmarshal(cmdResult.Bytes(), &data)
assert.NoError(s.T(), err)
forwardedPort := uint16(0)
portLoop:
for _, entry := range data.ForwardedPorts {
for _, port := range entry {
forwardedPort = port.ExternalPort
break portLoop
}
}
s.Require().NotEqual(uint16(0), forwardedPort)
const maxAttempts = 60
var recvData []byte
var connErr error
var conn net.Conn
kubernetesIP := getKubernetesIP()
if len(kubernetesIP) != 0 {
for attempts := 0; attempts != maxAttempts; attempts++ {
// Connect with a timeout so the test doesn't get stuck here
conn, connErr = net.DialTimeout("tcp", fmt.Sprintf("%s:%d", kubernetesIP, forwardedPort), 2*time.Second)
// If an error, just wait and try again
if connErr != nil {
time.Sleep(time.Duration(500) * time.Millisecond)
continue
}
break
}
// check that a connection was created without any error
s.Require().NoError(connErr)
// Read everything with a timeout
err = conn.SetReadDeadline(time.Now().Add(time.Duration(10) * time.Second))
s.Require().NoError(err)
recvData, err = ioutil.ReadAll(conn)
s.Require().NoError(err)
s.Require().NoError(conn.Close())
s.Require().Regexp("^.*hello world(?s:.)*$", string(recvData))
}
}
func (s *E2EDeploymentUpdate) TestE2EDeploymentUpdate() {
// create a deployment
deploymentPath, err := filepath.Abs("../x/deployment/testdata/deployment-v2-updateA.yaml")
s.Require().NoError(err)
deploymentID := dtypes.DeploymentID{
Owner: s.keyTenant.GetAddress().String(),
DSeq: uint64(102),
}
// Create Deployments
res, err := deploycli.TxCreateDeploymentExec(
s.validator.ClientCtx,
s.keyTenant.GetAddress(),
deploymentPath,
fmt.Sprintf("--%s", flags.FlagSkipConfirmation),
fmt.Sprintf("--%s=%s", flags.FlagBroadcastMode, flags.BroadcastBlock),
fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(s.cfg.BondDenom, sdk.NewInt(20))).String()),
fmt.Sprintf("--gas=%d", flags.DefaultGasLimit),
fmt.Sprintf("--dseq=%v", deploymentID.DSeq),
)
s.Require().NoError(err)
s.Require().NoError(s.waitForBlocksCommitted(3))
validateTxSuccessful(s.T(), s.validator.ClientCtx, res.Bytes())
bidID := mtypes.MakeBidID(
mtypes.MakeOrderID(dtypes.MakeGroupID(deploymentID, 1), 1),
s.keyProvider.GetAddress(),
)
// check bid
_, err = mcli.QueryBidExec(s.validator.ClientCtx, bidID)
s.Require().NoError(err)
// create lease
_, err = mcli.TxCreateLeaseExec(
s.validator.ClientCtx,
bidID,
s.keyTenant.GetAddress(),
fmt.Sprintf("--%s", flags.FlagSkipConfirmation),
fmt.Sprintf("--%s=%s", flags.FlagBroadcastMode, flags.BroadcastBlock),
fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(s.cfg.BondDenom, sdk.NewInt(20))).String()),
fmt.Sprintf("--gas=%d", flags.DefaultGasLimit),
)
s.Require().NoError(err)
s.Require().NoError(s.waitForBlocksCommitted(2))
validateTxSuccessful(s.T(), s.validator.ClientCtx, res.Bytes())
// Assert provider made bid and created lease; test query leases ---------
resp, err := mcli.QueryLeasesExec(s.validator.ClientCtx.WithOutputFormat("json"))
s.Require().NoError(err)
leaseRes := &mtypes.QueryLeasesResponse{}
err = s.validator.ClientCtx.JSONMarshaler.UnmarshalJSON(resp.Bytes(), leaseRes)
s.Require().NoError(err)
s.Require().Len(leaseRes.Leases, 1)
lease := newestLease(leaseRes.Leases)
lid := lease.LeaseID
did := lease.GetLeaseID().DeploymentID()
s.Require().Equal(s.keyProvider.GetAddress().String(), lid.Provider)
// Send Manifest to Provider
_, err = ptestutil.TestSendManifest(
s.validator.ClientCtx.WithOutputFormat("json"),
lid.BidID(),
deploymentPath,
fmt.Sprintf("--%s=%s", flags.FlagFrom, s.keyTenant.GetAddress().String()),
fmt.Sprintf("--%s=%s", flags.FlagHome, s.validator.ClientCtx.HomeDir),
)
s.Require().NoError(err)
s.Require().NoError(s.waitForBlocksCommitted(2))
appURL := fmt.Sprintf("http://%s:%s/", s.appHost, s.appPort)
queryAppWithHostname(s.T(), appURL, 50, "testupdatea.localhost")
deploymentPath, err = filepath.Abs("../x/deployment/testdata/deployment-v2-updateB.yaml")
s.Require().NoError(err)
res, err = deploycli.TxUpdateDeploymentExec(s.validator.ClientCtx,
s.keyTenant.GetAddress(),
deploymentPath,
fmt.Sprintf("--owner=%s", lease.GetLeaseID().Owner),
fmt.Sprintf("--dseq=%v", did.GetDSeq()),
fmt.Sprintf("--%s", flags.FlagSkipConfirmation),
fmt.Sprintf("--%s=%s", flags.FlagBroadcastMode, flags.BroadcastBlock),
fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(s.cfg.BondDenom, sdk.NewInt(20))).String()),
fmt.Sprintf("--gas=%d", flags.DefaultGasLimit),
)
s.Require().NoError(err)
s.Require().NoError(s.waitForBlocksCommitted(2))
validateTxSuccessful(s.T(), s.validator.ClientCtx, res.Bytes())
// Send Updated Manifest to Provider
_, err = ptestutil.TestSendManifest(
s.validator.ClientCtx.WithOutputFormat("json"),
lid.BidID(),
deploymentPath,
fmt.Sprintf("--%s=%s", flags.FlagFrom, s.keyTenant.GetAddress().String()),
fmt.Sprintf("--%s=%s", flags.FlagHome, s.validator.ClientCtx.HomeDir),
)
s.Require().NoError(err)
s.Require().NoError(s.waitForBlocksCommitted(2))
queryAppWithHostname(s.T(), appURL, 50, "testupdateb.localhost")
}
func (s *E2EApp) TestE2EApp() {
// create a deployment
deploymentPath, err := filepath.Abs("../x/deployment/testdata/deployment-v2.yaml")
s.Require().NoError(err)
cctxJSON := s.validator.ClientCtx.WithOutputFormat("json")
deploymentID := dtypes.DeploymentID{
Owner: s.keyTenant.GetAddress().String(),
DSeq: uint64(103),
}
// Create Deployments and assert query to assert
tenantAddr := s.keyTenant.GetAddress().String()
res, err := deploycli.TxCreateDeploymentExec(
s.validator.ClientCtx,
s.keyTenant.GetAddress(),
deploymentPath,
fmt.Sprintf("--%s", flags.FlagSkipConfirmation),
fmt.Sprintf("--%s=%s", flags.FlagBroadcastMode, flags.BroadcastBlock),
fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(s.cfg.BondDenom, sdk.NewInt(20))).String()),
fmt.Sprintf("--gas=%d", flags.DefaultGasLimit),
fmt.Sprintf("--dseq=%v", deploymentID.DSeq),
)
s.Require().NoError(err)
s.Require().NoError(s.network.WaitForNextBlock())
validateTxSuccessful(s.T(), s.validator.ClientCtx, res.Bytes())
// Test query deployments ---------------------------------------------
res, err = deploycli.QueryDeploymentsExec(cctxJSON)
s.Require().NoError(err)
deployResp := &dtypes.QueryDeploymentsResponse{}
err = s.validator.ClientCtx.JSONMarshaler.UnmarshalJSON(res.Bytes(), deployResp)
s.Require().NoError(err)
s.Require().Len(deployResp.Deployments, 1, "Deployment Create Failed")
deployments := deployResp.Deployments
s.Require().Equal(tenantAddr, deployments[0].Deployment.DeploymentID.Owner)
// test query deployment
createdDep := deployments[0]
res, err = deploycli.QueryDeploymentExec(cctxJSON, createdDep.Deployment.DeploymentID)
s.Require().NoError(err)
deploymentResp := dtypes.QueryDeploymentResponse{}
err = s.validator.ClientCtx.JSONMarshaler.UnmarshalJSON(res.Bytes(), &deploymentResp)
s.Require().NoError(err)
s.Require().Equal(createdDep, deploymentResp)
s.Require().NotEmpty(deploymentResp.Deployment.Version)
// test query deployments with filters -----------------------------------
res, err = deploycli.QueryDeploymentsExec(
s.validator.ClientCtx.WithOutputFormat("json"),
fmt.Sprintf("--owner=%s", tenantAddr),
fmt.Sprintf("--dseq=%v", createdDep.Deployment.DeploymentID.DSeq),
)
s.Require().NoError(err, "Error when fetching deployments with owner filter")
deployResp = &dtypes.QueryDeploymentsResponse{}
err = s.validator.ClientCtx.JSONMarshaler.UnmarshalJSON(res.Bytes(), deployResp)
s.Require().NoError(err)
s.Require().Len(deployResp.Deployments, 1)
// Assert orders created by provider
// test query orders
res, err = mcli.QueryOrdersExec(cctxJSON)
s.Require().NoError(err)
result := &mtypes.QueryOrdersResponse{}
err = s.validator.ClientCtx.JSONMarshaler.UnmarshalJSON(res.Bytes(), result)
s.Require().NoError(err)
s.Require().Len(result.Orders, 1)
orders := result.Orders
s.Require().Equal(tenantAddr, orders[0].OrderID.Owner)
// Wait for then EndBlock to handle bidding and creating lease
s.Require().NoError(s.waitForBlocksCommitted(15))
// Assert provider made bid and created lease; test query leases
// Assert provider made bid and created lease; test query leases
res, err = mcli.QueryBidsExec(cctxJSON)
s.Require().NoError(err)
bidsRes := &mtypes.QueryBidsResponse{}
err = s.validator.ClientCtx.JSONMarshaler.UnmarshalJSON(res.Bytes(), bidsRes)
s.Require().NoError(err)
s.Require().Len(bidsRes.Bids, 1)
res, err = mcli.TxCreateLeaseExec(
cctxJSON,
bidsRes.Bids[0].Bid.BidID,
s.keyTenant.GetAddress(),
fmt.Sprintf("--%s=true", flags.FlagSkipConfirmation),
fmt.Sprintf("--%s=%s", flags.FlagBroadcastMode, flags.BroadcastBlock),
fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(s.cfg.BondDenom, sdk.NewInt(10))).String()),
fmt.Sprintf("--gas=%d", flags.DefaultGasLimit),
)
s.Require().NoError(err)
s.Require().NoError(s.waitForBlocksCommitted(6))
validateTxSuccessful(s.T(), s.validator.ClientCtx, res.Bytes())
res, err = mcli.QueryLeasesExec(cctxJSON)
s.Require().NoError(err)
leaseRes := &mtypes.QueryLeasesResponse{}
err = s.validator.ClientCtx.JSONMarshaler.UnmarshalJSON(res.Bytes(), leaseRes)
s.Require().NoError(err)
s.Require().Len(leaseRes.Leases, 1)
lease := newestLease(leaseRes.Leases)
lid := lease.LeaseID
s.Require().Equal(s.keyProvider.GetAddress().String(), lid.Provider)
// Send Manifest to Provider ----------------------------------------------
_, err = ptestutil.TestSendManifest(
cctxJSON,
lid.BidID(),
deploymentPath,
fmt.Sprintf("--%s=%s", flags.FlagFrom, s.keyTenant.GetAddress().String()),
fmt.Sprintf("--%s=%s", flags.FlagHome, s.validator.ClientCtx.HomeDir),
)
s.Require().NoError(err)
s.Require().NoError(s.waitForBlocksCommitted(20))
appURL := fmt.Sprintf("http://%s:%s/", s.appHost, s.appPort)
queryApp(s.T(), appURL, 50)
cmdResult, err := providerCmd.ProviderStatusExec(s.validator.ClientCtx, lid.Provider)
assert.NoError(s.T(), err)
data := make(map[string]interface{})
err = json.Unmarshal(cmdResult.Bytes(), &data)
assert.NoError(s.T(), err)
leaseCount, ok := data["cluster"].(map[string]interface{})["leases"]
assert.True(s.T(), ok)
assert.Equal(s.T(), float64(1), leaseCount)
// Read SDL into memory so each service can be checked
deploymentSdl, err := sdl.ReadFile(deploymentPath)
require.NoError(s.T(), err)
mani, err := deploymentSdl.Manifest()
require.NoError(s.T(), err)
cmdResult, err = providerCmd.ProviderLeaseStatusExec(
s.validator.ClientCtx,
fmt.Sprintf("--%s=%v", "dseq", lid.DSeq),
fmt.Sprintf("--%s=%v", "gseq", lid.GSeq),
fmt.Sprintf("--%s=%v", "oseq", lid.OSeq),
fmt.Sprintf("--%s=%v", "provider", lid.Provider),
fmt.Sprintf("--%s=%s", flags.FlagFrom, s.keyTenant.GetAddress().String()),
fmt.Sprintf("--%s=%s", flags.FlagHome, s.validator.ClientCtx.HomeDir),
)
assert.NoError(s.T(), err)
err = json.Unmarshal(cmdResult.Bytes(), &data)
assert.NoError(s.T(), err)
for _, group := range mani.GetGroups() {
for _, service := range group.Services {
serviceTotalCount, ok := data["services"].(map[string]interface{})[service.Name].(map[string]interface{})["total"]
assert.True(s.T(), ok)
assert.Greater(s.T(), serviceTotalCount, float64(0))
}
}
for _, group := range mani.GetGroups() {
for _, service := range group.Services {
cmdResult, err = providerCmd.ProviderServiceStatusExec(
s.validator.ClientCtx,
fmt.Sprintf("--%s=%v", "dseq", lid.DSeq),
fmt.Sprintf("--%s=%v", "gseq", lid.GSeq),
fmt.Sprintf("--%s=%v", "oseq", lid.OSeq),
fmt.Sprintf("--%s=%v", "provider", lid.Provider),
fmt.Sprintf("--%s=%v", "service", service.Name),
fmt.Sprintf("--%s=%s", flags.FlagFrom, s.keyTenant.GetAddress().String()),
fmt.Sprintf("--%s=%s", flags.FlagHome, s.validator.ClientCtx.HomeDir),
)
assert.NoError(s.T(), err)
err = json.Unmarshal(cmdResult.Bytes(), &data)
assert.NoError(s.T(), err)
serviceTotalCount, ok := data["services"].(map[string]interface{})[service.Name].(map[string]interface{})["total"]
assert.True(s.T(), ok)
assert.Greater(s.T(), serviceTotalCount, float64(0))
}
}
}
func TestIntegrationTestSuite(t *testing.T) {
integrationTestOnly(t)
suite.Run(t, new(E2EContainerToContainer))
suite.Run(t, new(E2EAppNodePort))
suite.Run(t, new(E2EDeploymentUpdate))
suite.Run(t, new(E2EApp))
}
func (s *IntegrationTestSuite) waitForBlocksCommitted(height int) error {
h, err := s.network.LatestHeight()
if err != nil {
return err
}
blocksToWait := h + int64(height)
_, err = s.network.WaitForHeightWithTimeout(blocksToWait, time.Duration(blocksToWait+1)*5*time.Second)
if err != nil {
return err
}
return nil
}
// TestQueryApp enables rapid testing of the querying functionality locally
// Not for CI tests.
func TestQueryApp(t *testing.T) {
integrationTestOnly(t)
host, appPort := appEnv(t)
appURL := fmt.Sprintf("https://%s:%s/", host, appPort)
queryApp(t, appURL, 1)
}
| [
"\"KUBE_NODE_IP\""
]
| []
| [
"KUBE_NODE_IP"
]
| [] | ["KUBE_NODE_IP"] | go | 1 | 0 | |
tests/functional/test_buckets.py | import csv
import os
import tempfile
import unittest
import warnings
from requests import exceptions
from kbcstorage.buckets import Buckets
from kbcstorage.tables import Tables
class TestBuckets(unittest.TestCase):
def setUp(self):
self.buckets = Buckets(os.getenv('KBC_TEST_API_URL'),
os.getenv('KBC_TEST_TOKEN'))
try:
self.buckets.delete('in.c-py-test-buckets', force=True)
except exceptions.HTTPError as e:
if e.response.status_code != 404:
raise
# https://github.com/boto/boto3/issues/454
warnings.simplefilter("ignore", ResourceWarning)
def tearDown(self):
try:
self.buckets.delete('in.c-py-test-buckets', force=True)
except exceptions.HTTPError as e:
if e.response.status_code != 404:
raise
def test_create_bucket(self):
bucket_id = self.buckets.create(name='py-test-buckets',
stage='in',
description='Test bucket')['id']
self.assertEqual(bucket_id, self.buckets.detail(bucket_id)['id'])
def test_list_tables(self):
bucket_id = self.buckets.create(name='py-test-buckets',
stage='in',
description='Test bucket')['id']
file, path = tempfile.mkstemp(prefix='sapi-test')
with open(path, 'w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=['col1', 'col2'],
lineterminator='\n', delimiter=',',
quotechar='"')
writer.writeheader()
writer.writerow({'col1': 'ping', 'col2': 'pong'})
os.close(file)
tables = Tables(os.getenv('KBC_TEST_API_URL'),
os.getenv('KBC_TEST_TOKEN'))
tables.create(name='some-table', file_path=path,
bucket_id='in.c-py-test-buckets')
tables = self.buckets.list_tables(bucket_id)
with self.subTest():
self.assertEqual(1, len(tables))
with self.subTest():
self.assertEqual('in.c-py-test-buckets.some-table',
tables[0]['id'])
def test_bucket_detail(self):
bucket_id = self.buckets.create(name='py-test-buckets',
stage='in',
description='Test bucket')['id']
detail = self.buckets.detail(bucket_id)
with self.subTest():
self.assertEqual(bucket_id, detail['id'])
with self.subTest():
self.assertEqual('c-py-test-buckets', detail['name'])
with self.subTest():
self.assertIsNotNone(detail['uri'])
with self.subTest():
self.assertIsNotNone(detail['created'])
with self.subTest():
self.assertEqual('Test bucket', detail['description'])
with self.subTest():
self.assertEqual([], detail['tables'])
with self.subTest():
self.assertEqual([], detail['attributes'])
def test_invalid_bucket(self):
try:
self.buckets.detail('some-totally-non-existent-bucket')
except exceptions.HTTPError as e:
if e.response.status_code != 404:
raise
| []
| []
| [
"KBC_TEST_TOKEN",
"KBC_TEST_API_URL"
]
| [] | ["KBC_TEST_TOKEN", "KBC_TEST_API_URL"] | python | 2 | 0 | |
tests/tasks/api/refresh_token.py | #!/usr/bin/env python
# usage ./refresh_token.py {auth-server client secret} {current keycloak grant}
import sys
import base64
import json
import jwt
import time
import requests
import os
auth_server_client_secret = sys.argv[1]
grant_raw = base64.standard_b64decode(sys.argv[2])
grant = json.loads(grant_raw)
access_token = jwt.decode(grant['access_token'], verify=False)
current_time = int(time.time())
if current_time < access_token['exp']:
print grant_raw
else:
data = {'client_id': 'auth-server',
'client_secret': auth_server_client_secret,
'grant_type': 'refresh_token',
'refresh_token': grant['refresh_token']}
r = requests.post(url = os.environ['KEYCLOAK_URL'] + "/auth/realms/lagoon/protocol/openid-connect/token", data = data)
print r.text
| []
| []
| [
"KEYCLOAK_URL"
]
| [] | ["KEYCLOAK_URL"] | python | 1 | 0 | |
upup/pkg/fi/cloudup/template_functions.go | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/******************************************************************************
Template Functions are what map functions in the models, to internal logic in
kops. This is the point where we connect static YAML configuration to dynamic
runtime values in memory.
When defining a new function:
- Build the new function here
- Define the new function in AddTo()
dest["MyNewFunction"] = MyNewFunction // <-- Function Pointer
******************************************************************************/
package cloudup
import (
"encoding/base64"
"encoding/json"
"fmt"
"os"
"path"
"strconv"
"strings"
"text/template"
"github.com/Masterminds/sprig/v3"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog/v2"
kopscontrollerconfig "k8s.io/kops/cmd/kops-controller/pkg/config"
"k8s.io/kops/pkg/apis/kops"
apiModel "k8s.io/kops/pkg/apis/kops/model"
"k8s.io/kops/pkg/apis/kops/util"
"k8s.io/kops/pkg/dns"
"k8s.io/kops/pkg/featureflag"
"k8s.io/kops/pkg/model"
"k8s.io/kops/pkg/resources/spotinst"
"k8s.io/kops/pkg/wellknownports"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/awsup"
"k8s.io/kops/upup/pkg/fi/cloudup/gce"
"k8s.io/kops/util/pkg/env"
)
// TemplateFunctions provides a collection of methods used throughout the templates
type TemplateFunctions struct {
model.KopsModelContext
cloud fi.Cloud
}
// AddTo defines the available functions we can use in our YAML models.
// If we are trying to get a new function implemented it MUST
// be defined here.
func (tf *TemplateFunctions) AddTo(dest template.FuncMap, secretStore fi.SecretStore) (err error) {
cluster := tf.Cluster
dest["EtcdScheme"] = tf.EtcdScheme
dest["SharedVPC"] = tf.SharedVPC
dest["ToJSON"] = tf.ToJSON
dest["UseBootstrapTokens"] = tf.UseBootstrapTokens
dest["UseEtcdTLS"] = tf.UseEtcdTLS
// Remember that we may be on a different arch from the target. Hard-code for now.
dest["replace"] = func(s, find, replace string) string {
return strings.Replace(s, find, replace, -1)
}
dest["join"] = func(a []string, sep string) string {
return strings.Join(a, sep)
}
sprigTxtFuncMap := sprig.TxtFuncMap()
dest["indent"] = sprigTxtFuncMap["indent"]
dest["contains"] = sprigTxtFuncMap["contains"]
dest["ClusterName"] = tf.ClusterName
dest["WithDefaultBool"] = func(v *bool, defaultValue bool) bool {
if v != nil {
return *v
}
return defaultValue
}
dest["GetInstanceGroup"] = tf.GetInstanceGroup
dest["GetNodeInstanceGroups"] = tf.GetNodeInstanceGroups
dest["CloudTags"] = tf.CloudTagsForInstanceGroup
dest["KubeDNS"] = func() *kops.KubeDNSConfig {
return cluster.Spec.KubeDNS
}
dest["NodeLocalDNSClusterIP"] = func() string {
if cluster.Spec.KubeProxy.ProxyMode == "ipvs" {
return cluster.Spec.KubeDNS.ServerIP
}
return "__PILLAR__CLUSTER__DNS__"
}
dest["NodeLocalDNSHealthCheck"] = func() string {
return fmt.Sprintf("%d", wellknownports.NodeLocalDNSHealthCheck)
}
dest["KopsControllerArgv"] = tf.KopsControllerArgv
dest["KopsControllerConfig"] = tf.KopsControllerConfig
dest["DnsControllerArgv"] = tf.DNSControllerArgv
dest["ExternalDnsArgv"] = tf.ExternalDNSArgv
dest["CloudControllerConfigArgv"] = tf.CloudControllerConfigArgv
// TODO: Only for GCE?
dest["EncodeGCELabel"] = gce.EncodeGCELabel
dest["Region"] = func() string {
return tf.Region
}
// will return openstack external ccm image location for current kubernetes version
dest["OpenStackCCMTag"] = tf.OpenStackCCMTag
dest["AWSCCMTag"] = tf.AWSCCMTag
dest["ProxyEnv"] = tf.ProxyEnv
dest["KopsSystemEnv"] = tf.KopsSystemEnv
dest["UseKopsControllerForNodeBootstrap"] = func() bool {
return tf.UseKopsControllerForNodeBootstrap()
}
dest["DO_TOKEN"] = func() string {
return os.Getenv("DIGITALOCEAN_ACCESS_TOKEN")
}
if featureflag.Spotinst.Enabled() {
if creds, err := spotinst.LoadCredentials(); err == nil {
dest["SpotinstToken"] = func() string { return creds.Token }
dest["SpotinstAccount"] = func() string { return creds.Account }
dest["SpotinstTokenBase64"] = func() string { return base64.StdEncoding.EncodeToString([]byte(creds.Token)) }
dest["SpotinstAccountBase64"] = func() string { return base64.StdEncoding.EncodeToString([]byte(creds.Account)) }
}
}
if cluster.Spec.Networking != nil && cluster.Spec.Networking.Calico != nil {
c := cluster.Spec.Networking.Calico
dest["CalicoIPv4PoolIPIPMode"] = func() string {
if c.EncapsulationMode != "ipip" {
return "Never"
}
if c.IPIPMode != "" {
return c.IPIPMode
}
if c.CrossSubnet {
return "CrossSubnet"
}
return "Always"
}
dest["CalicoIPv4PoolVXLANMode"] = func() string {
if c.EncapsulationMode != "vxlan" {
return "Never"
}
if c.CrossSubnet {
return "CrossSubnet"
}
return "Always"
}
}
if cluster.Spec.Networking != nil && cluster.Spec.Networking.Cilium != nil {
ciliumsecretString := ""
ciliumsecret, _ := secretStore.Secret("ciliumpassword")
if ciliumsecret != nil {
ciliumsecretString, err = ciliumsecret.AsString()
if err != nil {
return err
}
klog.V(4).Info("Cilium secret function successfully registered")
}
dest["CiliumSecret"] = func() string { return ciliumsecretString }
}
if cluster.Spec.Networking != nil && cluster.Spec.Networking.Flannel != nil {
flannelBackendType := cluster.Spec.Networking.Flannel.Backend
if flannelBackendType == "" {
klog.Warningf("Defaulting flannel backend to udp (not a recommended configuration)")
flannelBackendType = "udp"
}
dest["FlannelBackendType"] = func() string { return flannelBackendType }
}
if cluster.Spec.Networking != nil && cluster.Spec.Networking.Weave != nil {
weavesecretString := ""
weavesecret, _ := secretStore.Secret("weavepassword")
if weavesecret != nil {
weavesecretString, err = weavesecret.AsString()
if err != nil {
return err
}
klog.V(4).Info("Weave secret function successfully registered")
}
dest["WeaveSecret"] = func() string { return weavesecretString }
}
dest["CsiExtraTags"] = func() string {
s := fmt.Sprintf("KubernetesCluster=%s", cluster.ObjectMeta.Name)
for n, v := range cluster.Spec.CloudLabels {
s += fmt.Sprintf(",%s=%s", n, v)
}
return s
}
dest["UseServiceAccountIAM"] = tf.UseServiceAccountIAM
if cluster.Spec.NodeTerminationHandler != nil {
dest["DefaultQueueName"] = func() string {
s := strings.Replace(tf.ClusterName(), ".", "-", -1)
domain := ".amazonaws.com/"
if strings.Contains(tf.Region, "cn-") {
domain = ".amazonaws.com.cn/"
}
url := "https://sqs." + tf.Region + domain + tf.AWSAccountID + "/" + s + "-nth"
return url
}
dest["EnableSQSTerminationDraining"] = func() bool { return *cluster.Spec.NodeTerminationHandler.EnableSQSTerminationDraining }
}
return nil
}
// ToJSON returns a json representation of the struct or on error an empty string
func (tf *TemplateFunctions) ToJSON(data interface{}) string {
encoded, err := json.Marshal(data)
if err != nil {
return ""
}
return string(encoded)
}
// EtcdScheme parses and grabs the protocol to the etcd cluster
func (tf *TemplateFunctions) EtcdScheme() string {
if tf.UseEtcdTLS() {
return "https"
}
return "http"
}
// SharedVPC is a simple helper function which makes the templates for a shared VPC clearer
func (tf *TemplateFunctions) SharedVPC() bool {
return tf.Cluster.SharedVPC()
}
// GetInstanceGroup returns the instance group with the specified name
func (tf *TemplateFunctions) GetInstanceGroup(name string) (*kops.InstanceGroup, error) {
ig := tf.KopsModelContext.FindInstanceGroup(name)
if ig == nil {
return nil, fmt.Errorf("InstanceGroup %q not found", name)
}
return ig, nil
}
// CloudControllerConfigArgv returns the args to external cloud controller
func (tf *TemplateFunctions) CloudControllerConfigArgv() ([]string, error) {
cluster := tf.Cluster
if cluster.Spec.ExternalCloudControllerManager == nil {
return nil, fmt.Errorf("ExternalCloudControllerManager is nil")
}
var argv []string
if cluster.Spec.ExternalCloudControllerManager.Master != "" {
argv = append(argv, fmt.Sprintf("--master=%s", cluster.Spec.ExternalCloudControllerManager.Master))
}
if cluster.Spec.ExternalCloudControllerManager.LogLevel != 0 {
argv = append(argv, fmt.Sprintf("--v=%d", cluster.Spec.ExternalCloudControllerManager.LogLevel))
} else {
argv = append(argv, "--v=2")
}
if cluster.Spec.ExternalCloudControllerManager.CloudProvider != "" {
argv = append(argv, fmt.Sprintf("--cloud-provider=%s", cluster.Spec.ExternalCloudControllerManager.CloudProvider))
} else if cluster.Spec.CloudProvider != "" {
argv = append(argv, fmt.Sprintf("--cloud-provider=%s", cluster.Spec.CloudProvider))
} else {
return nil, fmt.Errorf("Cloud Provider is not set")
}
if cluster.Spec.ExternalCloudControllerManager.ClusterName != "" {
argv = append(argv, fmt.Sprintf("--cluster-name=%s", cluster.Spec.ExternalCloudControllerManager.ClusterName))
}
if cluster.Spec.ExternalCloudControllerManager.ClusterCIDR != "" {
argv = append(argv, fmt.Sprintf("--cluster-cidr=%s", cluster.Spec.ExternalCloudControllerManager.ClusterCIDR))
}
if cluster.Spec.ExternalCloudControllerManager.AllocateNodeCIDRs != nil {
argv = append(argv, fmt.Sprintf("--allocate-node-cidrs=%t", *cluster.Spec.ExternalCloudControllerManager.AllocateNodeCIDRs))
}
if cluster.Spec.ExternalCloudControllerManager.ConfigureCloudRoutes != nil {
argv = append(argv, fmt.Sprintf("--configure-cloud-routes=%t", *cluster.Spec.ExternalCloudControllerManager.ConfigureCloudRoutes))
}
if cluster.Spec.ExternalCloudControllerManager.CIDRAllocatorType != nil && *cluster.Spec.ExternalCloudControllerManager.CIDRAllocatorType != "" {
argv = append(argv, fmt.Sprintf("--cidr-allocator-type=%s", *cluster.Spec.ExternalCloudControllerManager.CIDRAllocatorType))
}
if cluster.Spec.ExternalCloudControllerManager.UseServiceAccountCredentials != nil {
argv = append(argv, fmt.Sprintf("--use-service-account-credentials=%t", *cluster.Spec.ExternalCloudControllerManager.UseServiceAccountCredentials))
} else {
argv = append(argv, fmt.Sprintf("--use-service-account-credentials=%t", true))
}
return argv, nil
}
// DNSControllerArgv returns the args to the DNS controller
func (tf *TemplateFunctions) DNSControllerArgv() ([]string, error) {
cluster := tf.Cluster
var argv []string
argv = append(argv, "/dns-controller")
// @check if the dns controller has custom configuration
if cluster.Spec.ExternalDNS == nil {
argv = append(argv, []string{"--watch-ingress=false"}...)
klog.V(4).Infof("watch-ingress=false set on dns-controller")
} else {
// @check if the watch ingress is set
var watchIngress bool
if cluster.Spec.ExternalDNS.WatchIngress != nil {
watchIngress = fi.BoolValue(cluster.Spec.ExternalDNS.WatchIngress)
}
if watchIngress {
klog.Warningln("--watch-ingress=true set on dns-controller")
klog.Warningln("this may cause problems with previously defined services: https://github.com/kubernetes/kops/issues/2496")
}
argv = append(argv, fmt.Sprintf("--watch-ingress=%t", watchIngress))
if cluster.Spec.ExternalDNS.WatchNamespace != "" {
argv = append(argv, fmt.Sprintf("--watch-namespace=%s", cluster.Spec.ExternalDNS.WatchNamespace))
}
}
if dns.IsGossipHostname(cluster.Spec.MasterInternalName) {
argv = append(argv, "--dns=gossip")
// Configuration specifically for the DNS controller gossip
if cluster.Spec.DNSControllerGossipConfig != nil {
if cluster.Spec.DNSControllerGossipConfig.Protocol != nil {
argv = append(argv, "--gossip-protocol="+*cluster.Spec.DNSControllerGossipConfig.Protocol)
}
if cluster.Spec.DNSControllerGossipConfig.Listen != nil {
argv = append(argv, "--gossip-listen="+*cluster.Spec.DNSControllerGossipConfig.Listen)
}
if cluster.Spec.DNSControllerGossipConfig.Secret != nil {
argv = append(argv, "--gossip-secret="+*cluster.Spec.DNSControllerGossipConfig.Secret)
}
if cluster.Spec.DNSControllerGossipConfig.Seed != nil {
argv = append(argv, "--gossip-seed="+*cluster.Spec.DNSControllerGossipConfig.Seed)
} else {
argv = append(argv, fmt.Sprintf("--gossip-seed=127.0.0.1:%d", wellknownports.ProtokubeGossipWeaveMesh))
}
if cluster.Spec.DNSControllerGossipConfig.Secondary != nil {
if cluster.Spec.DNSControllerGossipConfig.Secondary.Protocol != nil {
argv = append(argv, "--gossip-protocol-secondary="+*cluster.Spec.DNSControllerGossipConfig.Secondary.Protocol)
}
if cluster.Spec.DNSControllerGossipConfig.Secondary.Listen != nil {
argv = append(argv, "--gossip-listen-secondary="+*cluster.Spec.DNSControllerGossipConfig.Secondary.Listen)
}
if cluster.Spec.DNSControllerGossipConfig.Secondary.Secret != nil {
argv = append(argv, "--gossip-secret-secondary="+*cluster.Spec.DNSControllerGossipConfig.Secondary.Secret)
}
if cluster.Spec.DNSControllerGossipConfig.Secondary.Seed != nil {
argv = append(argv, "--gossip-seed-secondary="+*cluster.Spec.DNSControllerGossipConfig.Secondary.Seed)
} else {
argv = append(argv, fmt.Sprintf("--gossip-seed-secondary=127.0.0.1:%d", wellknownports.ProtokubeGossipMemberlist))
}
}
} else {
// Default to primary mesh and secondary memberlist
argv = append(argv, fmt.Sprintf("--gossip-seed=127.0.0.1:%d", wellknownports.ProtokubeGossipWeaveMesh))
argv = append(argv, "--gossip-protocol-secondary=memberlist")
argv = append(argv, fmt.Sprintf("--gossip-listen-secondary=0.0.0.0:%d", wellknownports.DNSControllerGossipMemberlist))
argv = append(argv, fmt.Sprintf("--gossip-seed-secondary=127.0.0.1:%d", wellknownports.ProtokubeGossipMemberlist))
}
} else {
switch kops.CloudProviderID(cluster.Spec.CloudProvider) {
case kops.CloudProviderAWS:
if strings.HasPrefix(os.Getenv("AWS_REGION"), "cn-") {
argv = append(argv, "--dns=gossip")
} else {
argv = append(argv, "--dns=aws-route53")
}
case kops.CloudProviderGCE:
argv = append(argv, "--dns=google-clouddns")
case kops.CloudProviderDO:
argv = append(argv, "--dns=digitalocean")
default:
return nil, fmt.Errorf("unhandled cloudprovider %q", cluster.Spec.CloudProvider)
}
}
zone := cluster.Spec.DNSZone
if zone != "" {
if strings.Contains(zone, ".") {
// match by name
argv = append(argv, "--zone="+zone)
} else {
// match by id
argv = append(argv, "--zone=*/"+zone)
}
}
// permit wildcard updates
argv = append(argv, "--zone=*/*")
// Verbose, but not crazy logging
argv = append(argv, "-v=2")
return argv, nil
}
// KopsControllerConfig returns the yaml configuration for kops-controller
func (tf *TemplateFunctions) KopsControllerConfig() (string, error) {
cluster := tf.Cluster
config := &kopscontrollerconfig.Options{
Cloud: cluster.Spec.CloudProvider,
ConfigBase: cluster.Spec.ConfigBase,
}
if featureflag.CacheNodeidentityInfo.Enabled() {
config.CacheNodeidentityInfo = true
}
if tf.UseKopsControllerForNodeBootstrap() {
certNames := []string{"kubelet", "kubelet-server"}
signingCAs := []string{fi.CertificateIDCA}
if apiModel.UseCiliumEtcd(cluster) {
certNames = append(certNames, "etcd-client-cilium")
signingCAs = append(signingCAs, "etcd-clients-ca-cilium")
}
if cluster.Spec.KubeProxy.Enabled == nil || *cluster.Spec.KubeProxy.Enabled {
certNames = append(certNames, "kube-proxy")
}
if cluster.Spec.Networking.Kuberouter != nil {
certNames = append(certNames, "kube-router")
}
pkiDir := "/etc/kubernetes/kops-controller/pki"
config.Server = &kopscontrollerconfig.ServerOptions{
Listen: fmt.Sprintf(":%d", wellknownports.KopsControllerPort),
ServerCertificatePath: path.Join(pkiDir, "kops-controller.crt"),
ServerKeyPath: path.Join(pkiDir, "kops-controller.key"),
CABasePath: pkiDir,
SigningCAs: signingCAs,
CertNames: certNames,
}
switch kops.CloudProviderID(cluster.Spec.CloudProvider) {
case kops.CloudProviderAWS:
nodesRoles := sets.String{}
for _, ig := range tf.InstanceGroups {
if ig.Spec.Role == kops.InstanceGroupRoleNode || ig.Spec.Role == kops.InstanceGroupRoleAPIServer {
profile, err := tf.LinkToIAMInstanceProfile(ig)
if err != nil {
return "", fmt.Errorf("getting profile for ig %s: %v", ig.Name, err)
}
// The IAM Instance Profile has not been created at this point if it is not specified.
// Because the IAM Instance Profile and the IAM Role are created in IAMModelBuilder tasks.
// Therefore, the IAM Role associated with IAM Instance Profile is acquired only when it is not specified.
if ig.Spec.IAM != nil && ig.Spec.IAM.Profile != nil {
c := tf.cloud.(awsup.AWSCloud)
roles, err := awsup.GetRolesInInstanceProfile(c, *profile.Name)
if err != nil {
return "", fmt.Errorf("getting role from profile %s: %v", *profile.Name, err)
}
nodesRoles.Insert(roles...)
} else {
// When the IAM Instance Profile is not specified, IAM Instance Profile is created by kOps.
// In this case, the IAM Instance Profile name and IAM Role name are same.
// So there is no problem even if IAM Instance Profile name is inserted as role name in nodesRoles.
nodesRoles.Insert(*profile.Name)
}
}
}
config.Server.Provider.AWS = &awsup.AWSVerifierOptions{
NodesRoles: nodesRoles.List(),
Region: tf.Region,
}
default:
return "", fmt.Errorf("unsupported cloud provider %s", cluster.Spec.CloudProvider)
}
}
// To avoid indentation problems, we marshal as json. json is a subset of yaml
b, err := json.Marshal(config)
if err != nil {
return "", fmt.Errorf("failed to serialize kops-controller config: %v", err)
}
return string(b), nil
}
// KopsControllerArgv returns the args to kops-controller
func (tf *TemplateFunctions) KopsControllerArgv() ([]string, error) {
var argv []string
argv = append(argv, "/kops-controller")
// Verbose, but not excessive logging
argv = append(argv, "--v=2")
argv = append(argv, "--conf=/etc/kubernetes/kops-controller/config/config.yaml")
return argv, nil
}
func (tf *TemplateFunctions) ExternalDNSArgv() ([]string, error) {
cluster := tf.Cluster
var argv []string
cloudProvider := cluster.Spec.CloudProvider
switch kops.CloudProviderID(cloudProvider) {
case kops.CloudProviderAWS:
argv = append(argv, "--provider=aws")
case kops.CloudProviderGCE:
project := cluster.Spec.Project
argv = append(argv, "--provider=google")
argv = append(argv, "--google-project="+project)
default:
return nil, fmt.Errorf("unhandled cloudprovider %q", cluster.Spec.CloudProvider)
}
argv = append(argv, "--source=ingress")
return argv, nil
}
func (tf *TemplateFunctions) ProxyEnv() map[string]string {
cluster := tf.Cluster
envs := map[string]string{}
proxies := cluster.Spec.EgressProxy
if proxies == nil {
return envs
}
httpProxy := proxies.HTTPProxy
if httpProxy.Host != "" {
var portSuffix string
if httpProxy.Port != 0 {
portSuffix = ":" + strconv.Itoa(httpProxy.Port)
} else {
portSuffix = ""
}
url := "http://" + httpProxy.Host + portSuffix
envs["http_proxy"] = url
envs["https_proxy"] = url
}
if proxies.ProxyExcludes != "" {
envs["no_proxy"] = proxies.ProxyExcludes
envs["NO_PROXY"] = proxies.ProxyExcludes
}
return envs
}
// KopsSystemEnv builds the env vars for a system component
func (tf *TemplateFunctions) KopsSystemEnv() []corev1.EnvVar {
envMap := env.BuildSystemComponentEnvVars(&tf.Cluster.Spec)
return envMap.ToEnvVars()
}
// OpenStackCCM returns OpenStack external cloud controller manager current image
// with tag specified to k8s version
func (tf *TemplateFunctions) OpenStackCCMTag() string {
var tag string
parsed, err := util.ParseKubernetesVersion(tf.Cluster.Spec.KubernetesVersion)
if err != nil {
tag = "latest"
} else {
if parsed.Minor == 13 {
// The bugfix release
tag = "1.13.1"
} else {
// otherwise we use always .0 ccm image, if needed that can be overrided using clusterspec
tag = fmt.Sprintf("v%d.%d.0", parsed.Major, parsed.Minor)
}
}
return tag
}
// AWSCCMTag returns the correct tag for the cloud controller manager based on
// the Kubernetes Version
func (tf *TemplateFunctions) AWSCCMTag() (string, error) {
var tag string
parsed, err := util.ParseKubernetesVersion(tf.Cluster.Spec.KubernetesVersion)
if err != nil {
return "", fmt.Errorf("failed to parse Kubernetes version from cluster spec: %q", err)
}
// Update when we have stable releases
switch parsed.Minor {
case 18:
tag = "v1.18.0-alpha.1"
case 19:
tag = "v1.19.0-alpha.1"
default:
tag = "latest"
}
return tag, nil
}
// GetNodeInstanceGroups returns a map containing the defined instance groups of role "Node".
func (tf *TemplateFunctions) GetNodeInstanceGroups() map[string]kops.InstanceGroupSpec {
nodegroups := make(map[string]kops.InstanceGroupSpec)
for _, ig := range tf.KopsModelContext.InstanceGroups {
if ig.Spec.Role == kops.InstanceGroupRoleNode {
nodegroups[ig.ObjectMeta.Name] = ig.Spec
}
}
return nodegroups
}
| [
"\"DIGITALOCEAN_ACCESS_TOKEN\"",
"\"AWS_REGION\""
]
| []
| [
"DIGITALOCEAN_ACCESS_TOKEN",
"AWS_REGION"
]
| [] | ["DIGITALOCEAN_ACCESS_TOKEN", "AWS_REGION"] | go | 2 | 0 | |
weeat/wsgi.py | """
WSGI config for weeat project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "weeat.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
bobsled/tests/test_env_config.py | import os
import pytest
from .. import storages
from bobsled.utils import get_env_config
def test_get_env_config_basic():
os.environ["FAKE_TEST_KEY"] = "DatabaseStorage"
os.environ["BOBSLED_DATABASE_URI"] = "test://"
Cls, args = get_env_config("FAKE_TEST_KEY", None, storages)
assert Cls is storages.DatabaseStorage
assert args == {"BOBSLED_DATABASE_URI": "test://"}
def test_get_env_config_missing():
os.environ["FAKE_TEST_KEY"] = "DatabaseStorage"
os.environ.pop("BOBSLED_DATABASE_URI", None)
with pytest.raises(EnvironmentError):
Cls, args = get_env_config("FAKE_TEST_KEY", None, storages)
| []
| []
| [
"BOBSLED_DATABASE_URI",
"FAKE_TEST_KEY"
]
| [] | ["BOBSLED_DATABASE_URI", "FAKE_TEST_KEY"] | python | 2 | 0 | |
chekw.py | #!/usr/bin/env python
# coding=utf-8
# chrome 60 + chromedriver 2.31
import os
import random
import sys
import time
import commands
import argparse
import requests
import mysql.connector
from requests.adapters import HTTPAdapter
from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.proxy import Proxy, ProxyType
from fake_useragent import UserAgent
from pprint import pprint
mydb = mysql.connector.connect(
host="localhost",
user="ekw",
passwd="rPH5p873rDRgawJx",
database="geckoerp"
)
mycursor = mydb.cursor()
def exception(b):
b.close()
b.quit()
def search(b):
time.sleep(random.random())
wyszukaj = b.find_element_by_id("wyszukaj")
ActionChains(b).move_to_element_with_offset(wyszukaj, random.randint(1, 5), random.randint(1, 5)).perform()
time.sleep(random.random() + 2)
ActionChains(b).click(wyszukaj).perform()
try:
WebDriverWait(b, random.randint(7,12)).until(
EC.presence_of_element_located((By.ID, "powrotDoKryterii"))
)
except:
pass
print "Błąd: nie udało się wczytać strony/CAPTCHA(?)"
print "Ponawianie wyszukiwania..."
search(b)
myProxy = "127.0.0.1:9050"
parser = argparse.ArgumentParser()
parser.add_argument("kodWydzialu", help="kod wydziału")
parser.add_argument("numerKW", help="numer KW")
parser.add_argument("cKontrolna", help="cyfra kontrolna")
args = parser.parse_args()
# ua = UserAgent()
# userAgent = ua.random
# print(userAgent)
# os.environ['SOCKS_SERVER'] = "127.0.0.1:" + str(socks_port)
# os.environ['SOCKS_VERSION'] = "5"
path = "storage/app/ekw/"
kodWydzialu = args.kodWydzialu
numerKWx = args.numerKW
cyfraKontrolnaX = args.cKontrolna
print >> sys.stdout, kodWydzialu
print >> sys.stdout, numerKWx
print >> sys.stdout, cyfraKontrolnaX
sql = "SELECT count(1) FROM `ekw` WHERE `sad` = %s AND `numer` = %s"
mycursor.execute(sql, (kodWydzialu, numerKWx))
if mycursor.fetchone()[0]:
print "Księga już istnieje w bazie danych lub zostala dodana jako nieistniejaca"
exit()
opt = Options()
opt.add_argument("--incognito")
opt.add_argument("--headless")
opt.add_argument("--no-first-run")
opt.add_argument("--no-session-id")
opt.add_argument("--disable-background-networking")
opt.add_argument("--disable-client-side-phishing-detection")
opt.add_argument("--disable-cloud-import")
opt.add_argument("--disable-sync")
opt.add_argument("--proxy-auto-detect")
driver = os.path.join("/usr/lib/chromium-browser/", "chromedriver")
try:
browser = webdriver.Chrome(executable_path=driver,chrome_options=opt,service_args=["--verbose", "--log-path=/home/r/chromedriver.log"])
browser.maximize_window()
browser.delete_all_cookies()
browser.get("https://przegladarka-ekw.ms.gov.pl/eukw_prz/KsiegiWieczyste/wyszukiwanieKW?komunikaty=true&kontakt=true&okienkoSerwisowe=false")
try:
WebDriverWait(browser, 15).until(
EC.presence_of_element_located((By.ID, "kodWydzialu"))
)
print "Zaladowano strone EKW"
except:
exception(browser)
kodWydzialuInput = browser.find_element_by_id("kodWydzialuInput")
kodWydzialuInput.click()
time.sleep(random.random() + random.random())
kodWydzialuInput.send_keys(kodWydzialu)
numerKsiegiWieczystej = browser.find_element_by_id("numerKsiegiWieczystej")
numerKsiegiWieczystej.click()
time.sleep(random.random() + random.random())
browser.execute_script("$(arguments[0]).val('" + numerKWx + "');", numerKsiegiWieczystej)
# numerKsiegiWieczystej.send_keys(numerKWx)
cyfraKontrolna = browser.find_element_by_id("cyfraKontrolna")
cyfraKontrolna.click()
time.sleep(random.random() + random.random())
browser.execute_script("$(arguments[0]).val('" + cyfraKontrolnaX + "');", cyfraKontrolna)
# cyfraKontrolna.send_keys(cyfraKontrolnaX)
time.sleep(random.random() + random.random() + random.randint(1,2))
wyszukaj = browser.find_element_by_id("wyszukaj")
# browser.execute_script("$(arguments[0]).trigger('click');", wyszukaj)
wyszukaj.click()
print "Wyszukiwanie..."
try:
WebDriverWait(browser, 60).until(
EC.presence_of_element_located((By.ID, "powrotDoKryterii"))
)
try:
WebDriverWait(browser, 1).until(
EC.presence_of_element_located((By.ID, "przyciskWydrukZupelny"))
)
print "Znaleziono KW!"
except:
print >> sys.stdout, "Nie znaleziono KW!"
exist = 0
sql = "INSERT INTO `ekw` (`sad`, `numer`, `ck`, `exists`) VALUES(%s, %s, %s, %s)"
val = (kodWydzialu, numerKWx, cyfraKontrolnaX, exist)
mycursor.execute(sql, val)
mydb.commit()
exception(browser)
time.sleep(random.random() + 1)
przyciskWydrukZupelny = browser.find_element_by_id("przyciskWydrukZupelny")
time.sleep(random.random() + 1)
browser.execute_script("$(arguments[0]).trigger('click');", przyciskWydrukZupelny)
# przyciskWydrukZupelny.click()
print "Pobieranie tresci 1/7 (podstawowe informacje)..."
try:
WebDriverWait(browser, 60).until(
EC.presence_of_element_located((By.XPATH, "//input[@value='OKLADKA']"))
)
print "Pobieranie tresci 2/7 (okladka)... początek treści KW"
time.sleep(1)
okladka = browser.find_element_by_id('contentDzialu')
okladkaVal = browser.execute_script("return $(arguments[0]).html();", okladka)
okladkaFile = open(path + kodWydzialu + numerKWx + cyfraKontrolnaX + "_okladka.dat", "w+")
okladkaFile.write(okladkaVal)
okladkaFile.close()
print "Pobieranie tresci 3/7 (oznaczenie nieruchomości)..."
dzialIO = browser.find_element_by_xpath("//input[@value='Dział I-O']")
time.sleep(random.random())
browser.execute_script("$(arguments[0]).trigger('click');", dzialIO)
WebDriverWait(browser, 60).until(
EC.presence_of_element_located(
(By.XPATH, "//*[contains(text(), 'DZIAŁ I-O - OZNACZENIE NIERUCHOMOŚCI')]"))
)
dIO = browser.find_element_by_id('contentDzialu')
dIOVal = browser.execute_script("return $(arguments[0]).html();", dIO)
dIOFile = open(path + kodWydzialu + numerKWx + cyfraKontrolnaX + "_adres.dat", "w+")
dIOFile.write(dIOVal)
dIOFile.close()
print "Pobieranie tresci 4/7 (spis praw zw. z wlasn.)..."
dzialIS = browser.find_element_by_xpath("//input[@value='Dział I-Sp']")
time.sleep(random.random())
browser.execute_script("$(arguments[0]).trigger('click');", dzialIS)
WebDriverWait(browser, 60).until(
EC.presence_of_element_located(
(By.XPATH, "//*[contains(text(), 'DZIAŁ I-SP - SPIS PRAW ZWIĄZANYCH Z WŁASNOŚCIĄ')]"))
)
dIS = browser.find_element_by_id('contentDzialu')
dISVal = browser.execute_script("return $(arguments[0]).html();", dIS)
dISFile = open(path + kodWydzialu + numerKWx + cyfraKontrolnaX + "_prawa.dat", "w+")
dISFile.write(dISVal)
dISFile.close()
print "Pobieranie tresci 5/7 (wlasnosc)..."
dzialII = browser.find_element_by_xpath("//input[@value='Dział II']")
time.sleep(random.random())
browser.execute_script("$(arguments[0]).trigger('click');", dzialII)
WebDriverWait(browser, 60).until(
EC.presence_of_element_located((By.XPATH, "//*[contains(text(), 'DZIAŁ II - WŁASNOŚĆ')]"))
)
dII = browser.find_element_by_id('contentDzialu')
dIIVal = browser.execute_script("return $(arguments[0]).html();", dII)
dIIFile = open(path + kodWydzialu + numerKWx + cyfraKontrolnaX + "_wlasc.dat", "w+")
dIIFile.write(dIIVal)
dIIFile.close()
print "Pobieranie tresci 6/7 (prawa, roszczenia i ogran.)..."
dzialIII = browser.find_element_by_xpath("//input[@value='Dział III']")
time.sleep(random.random())
browser.execute_script("$(arguments[0]).trigger('click');", dzialIII)
WebDriverWait(browser, 60).until(
EC.presence_of_element_located(
(By.XPATH, "//*[contains(text(), 'DZIAŁ III - PRAWA, ROSZCZENIA I OGRANICZENIA')]"))
)
dIII = browser.find_element_by_id('contentDzialu')
dIIIVal = browser.execute_script("return $(arguments[0]).html();", dIII)
dIIIFile = open(path + kodWydzialu + numerKWx + cyfraKontrolnaX + "_roszcz.dat", "w+")
dIIIFile.write(dIIIVal)
dIIIFile.close()
print "Pobieranie tresci 7/7 (hipoteka)..."
dzialIV = browser.find_element_by_xpath("//input[@value='Dział IV']")
time.sleep(random.random())
browser.execute_script("$(arguments[0]).trigger('click');", dzialIV)
WebDriverWait(browser, 60).until(
EC.presence_of_element_located((By.XPATH, "//*[contains(text(), 'DZIAŁ IV - HIPOTEKA')]"))
)
dIV = browser.find_element_by_id('contentDzialu')
dIVVal = browser.execute_script("return $(arguments[0]).html();", dIV)
dIVFile = open(path + kodWydzialu + numerKWx + cyfraKontrolnaX + "_hipo.dat", "w+")
dIVFile.write(dIVVal)
dIVFile.close()
print "Zapisywanie do bazy danych..."
sql = "INSERT INTO `ekw` (sad, numer, ck) VALUES(%s, %s, %s)"
val = (kodWydzialu, numerKWx, cyfraKontrolnaX)
mycursor.execute(sql, val)
mydb.commit()
# print(browser.page_source)
exception(browser)
except:
exception(browser)
except:
print "Błąd krytyczny: nie udało się wczytać strony/CAPTCHA(?)"
exception(browser)
except:
exception(browser)
| []
| []
| [
"SOCKS_SERVER",
"SOCKS_VERSION"
]
| [] | ["SOCKS_SERVER", "SOCKS_VERSION"] | python | 2 | 0 | |
ediscs_catalog.py | from statsmodels.formula.api import ols,rlm
from astropy import coordinates
from astropy.table import Table
from astropy import units as u
import pdb, shlex, os, shutil, pandas, statsmodels.api as sm, numpy as np
import matplotlib.pyplot as plt, matplotlib.cm as cm, photcheck as pc, re
import subprocess as sp, labbe_depth as lb, pyfits as pf, random, pandas as pd
from astropysics import obstools as obs
from astropy.stats.funcs import biweight_location as bl
from scipy.optimize import curve_fit
import threedhst.eazyPy as eazy
megaLocation='/Volumes/BAHAMUT/megacat.v5.7.fits'
plt.rcParams['text.latex.preamble']=[r"\usepackage{amsmath}"]
#plt.rc('ps',usedistiller='xpdf')
#-----------------------------------
# GENERAL NOTES
#
# Required files:
# Photometry from SExtractor for the WFI, MOSAIC, and
# NEWFIRM observations
#
# Dennis Just's "VLT.starflag.dat" file for each cluster
#
# FORS photometry to solve for WFI zeropoints
#-----------------------------------
#Constants
vega2AB={'bctio':-0.09949,'bkpno':-0.10712,'v':0.01850,'r':0.19895,'i':0.42143,'k':1.84244}
medCterms={'VRv':-0.151,'VRr':0.0245,'VIv':-0.0725,'VIi':0.1465,'RIr':0.015,'RIi':0.238}
#Fields for FITS catalogs
colNames=('field','ids','ra','dec','x','y','ebv','fwhmR','fB1','fB1err','fB2','fB2err','fB3','fB3err','fBiso',
'fBisoerr','fBauto','fBautoerr','fV1','fV1err','fV2','fV2err','fV3','fV3err','fViso','fVisoerr',
'fVauto','fVautoerr','fR1','fR1err','fR2','fR2err','fR3','fR3err','fRiso','fRisoerr','fRauto',
'fRautoerr','fI1','fI1err','fI2','fI2err','fI3','fI3err','fIiso','fIisoerr','fIauto','fIautoerr',
'fz1','fz1err','fz2','fz2err','fz3','fz3err','fziso','fzisoerr','fzauto','fzautoerr','fK1','fK1err',
'fK2','fK2err','fK3','fK3err','fKiso','fKisoerr','fKauto','fKautoerr','zLDP5_7','Q5_7',
'class_StarR','iso_area','major_ax','minor_ax','theta','sexflagB','sexflagV','sexflagR','sexflagI',
'sexflagz','sexflagK','wK')
colNames70=('field','ids','ra','dec','x','y','ebv','fwhmR','fB1','fB1err','fB2','fB2err','fB3','fB3err','fBiso',
'fBisoerr','fBauto','fBautoerr','fV1','fV1err','fV2','fV2err','fV3','fV3err','fViso','fVisoerr',
'fVauto','fVautoerr','fR1','fR1err','fR2','fR2err','fR3','fR3err','fRiso','fRisoerr','fRauto',
'fRautoerr','fI1','fI1err','fI2','fI2err','fI3','fI3err','fIiso','fIisoerr','fIauto','fIautoerr',
'fz1','fz1err','fz2','fz2err','fz3','fz3err','fziso','fzisoerr','fzauto','fzautoerr','fK1','fK1err',
'fK2','fK2err','fK3','fK3err','fKiso','fKisoerr','fKauto','fKautoerr','zLDP5_7','Q5_7','zphot',
'zphot_errUP','zphot_errLO','class_StarR','iso_area','major_ax','minor_ax','theta','sexflagB',
'sexflagV','sexflagR','sexflagI','sexflagz','sexflagK','wK')
#Flux and error column names
fluxNames = [x for x in list(colNames) if re.match('(?!.*err)f[BVRIzK].',x)]
errNames = [x for x in list(colNames) if re.match('(?=.*err)f[BVRIzK].',x)]
#Set up data types for FITS catalogs. All values should be floats except the SExtractor flags (int), LDP quality (int)
#field names (str), and WFI ID names (str)
Fdtype=('f;'*len(colNames)).split(';')[:-1]
for x in range(len(Fdtype)):
if ('field' in colNames[x]) or ('ids' in colNames[x]):
Fdtype[x] = 'S'
if (colNames[x] == 'Q5_7') or ('sexflag' in colNames[x]):
Fdtype[x] = 'i'
Fdtype = tuple(Fdtype)
Fdtype70=('f;'*len(colNames70)).split(';')[:-1]
for x in range(len(Fdtype70)):
if ('field' in colNames70[x]) or ('ids' in colNames70[x]):
Fdtype70[x] = 'S'
if (colNames70[x] == 'Q5_7') or ('sexflag' in colNames70[x]):
Fdtype70[x] = 'i'
Fdtype70 = tuple(Fdtype70)
#-----------------------------------
#Define a series of functions that are for fitting the zeropoints by comparing the WFI and FORS observations
#of stars using a constant color term, but allowing the zeropoint to vary. A separate function was necessary
#for each color (e.g., V-R, R-I, etc.) to fix the slope to different values unique to each color.
def fixedVRv(x,zp):
return (medCterms['VRv']*x)+zp
def fixedVRr(x,zp):
return (medCterms['VRr']*x)+zp
def fixedVIv(x,zp):
return (medCterms['VIv']*x)+zp
def fixedVIi(x,zp):
return (medCterms['VIi']*x)+zp
def fixedRIr(x,zp):
return (medCterms['RIr']*x)+zp
def fixedRIi(x,zp):
return (medCterms['RIi']*x)+zp
#-----------------------------------
def randomSample(catalog,filters,output1='randomFirst.fits',output2='randomSecond.fits',
classStar=0.3,Q=4):
"""
PURPOSE: Generate a random sample of galaxies from the photometric catalog of a single cluster.
\tAlso generate a second FITS file of the other galaxies not in the random sample.'
INPUTS:
\tcatalog - FITS photometric catalog
\tfilters - string list of the filter names (e.g., BVRIzK)
\toutput1 - Name of random sample FITS catalog
\toutput2 - Name of FITS catalog with galaxies NOT in the random sample
\tclassStar - SExtractor class_star value to perform cut on (selects objects with class_star < this value)
\tQ - LDP quality flag for selecting sources
RETURNS: None.
"""
select = 'class_StarR < '+str(classStar)+' & Q5_7 == '+str(Q)+' & '+' & '.join(['sexflag'+x+' == 0' for x in filters])
data = Table.read(catalog).to_pandas()
trimData = data.query(select)
idx=xrange(len(trimData))
sample = random.sample(idx,int(np.ceil(len(trimData)/2.)))
inverse = []
for x in idx:
if x not in sample:
inverse.append(x)
first = trimData.iloc[sample]
second = trimData.iloc[inverse]
fitsCols = list(trimData.columns.values)
firstDict = {colNames[x]:first[fitsCols[x]].values for x in range(len(fitsCols))}
secondDict = {colNames[x]:second[fitsCols[x]].values for x in range(len(fitsCols))}
galsDict = {colNames[x]:trimData[fitsCols[x]].values for x in range(len(fitsCols))}
Table(firstDict, names=colNames, dtype=Fdtype).write(output1,format='fits',overwrite=True)
Table(secondDict, names=colNames, dtype=Fdtype).write(output2,format='fits',overwrite=True)
Table(galsDict, names=colNames, dtype=Fdtype).write('galaxies.fits',format='fits',overwrite=True)
#-----------------------------------
def mkZPoffs(b=0.0,v=0.0,r=0.0,i=0.0,z=0.0,k=0.0,kpno=False):
f=open('zphot.zeropoint','w')
bvrizk=[b,v,r,i,z,k]
fcodes=['F','F3','F4','F5','F6','F7']
if kpno == False:
fcodes[0]=fcodes[0]+'1'
else:
fcodes[0]=fcodes[0]+'2'
for x in range(len(bvrizk)):
if bvrizk[x] > 0.0:
offset = 10.0**(-0.4*bvrizk[x])
f.write(fcodes[x]+'\t'+str(offset)+'\n')
f.close()
#-----------------------------------
def update70(fitscat,clname,zphot='OUTPUT/photz.zout',output='v7.0.fits',zpvoff=0.0,zproff=0.0,zpioff=0.0,best=True,
offFile=''):
if best == True:
if offFile == '':
offFile=os.environ['EDISCS']+'/files/images/wfi_zeropoints.dat'
offsets=pd.read_table(offFile,delim_whitespace=True,
header=None,names=['CLUSTER','FILTER','C1','S1','C2','S2','FLAG','OFFSET','KEEP'],
comment='#',index_col=None)
(zpvoff, zproff, zpioff) = (offsets[offsets['CLUSTER'].str.contains('CL1301') & offsets['FILTER'].str.contains('V')]['OFFSET'].values[0],
offsets[offsets['CLUSTER'].str.contains('CL1301') & offsets['FILTER'].str.contains('R')]['OFFSET'].values[0],
offsets[offsets['CLUSTER'].str.contains('CL1301') & offsets['FILTER'].str.contains('I')]['OFFSET'].values[0])
catalog = Table.read(fitscat).to_pandas()
z=eazy.catIO.Readfile(zphot)
#Add in the zphot values and their associated uncertainties
index=range(len(catalog))
catalog['index']=index
catalog['zphot']=z.z_m2[catalog['index']].tolist()
catalog['zphot_errUP']=z.u68[catalog['index']].tolist()-catalog['zphot']
catalog['zphot_errLO']=catalog['zphot']-z.l68[catalog['index']].tolist()
catalog.drop('index', axis=1, inplace=True)
#Update the VRI photometry with the offsets (if necessary) and preserve the
#signal-to-noise ratio
offsets=[zpvoff,zproff,zpioff]
filters=['V','R','I']
apertures=['1','2','3','auto','iso']
for x in range(len(offsets)):
if offsets[x] != 0.0:
for y in apertures:
filtap = filters[x]+y
snratio = catalog['f'+filtap]/catalog['f'+filtap+'err']
catalog['f'+filtap] = catalog['f'+filtap] * (10.0**(-0.4*offsets[x]))
catalog['f'+filtap+'err'] = catalog['f'+filtap] / snratio
#Write the final FITS file
final = {colNames70[x]:catalog[colNames70[x]].values for x in range(len(colNames70))}
Table(final, names=colNames70, dtype=Fdtype70).write(output,format='fits',overwrite=True)
#-----------------------------------
#def updatecat(catalog,fieldName,outname,megacat='/Volumes/BAHAMUT/megacat.v5.7.fits'):
#
# hdu = pf.open(catalog)
# oldData = hdu[1].data
# field=[fieldName for x in range(len(oldData))]
#
# (filters, apertures) = (['B','V','R','I','z','K'], ['1','2','3','iso','auto'])
# for filt in filters:
# for ap in apertures:
# fluxes = 'f'+filt+ap
# bad = np.where(np.log10(np.abs(oldData[fluxes])) < -6.)
# oldData[fluxes][bad] = -77
#
# (zLDP,Q)=(np.zeros(len(oldData))-99,np.zeros(len(oldData))-99)
#
# megatab=pf.open('/Users/tyler/megacat.v5.7.fits')
# megadat=megatab[1].data
# (mzldp,mq,megaRA,megaDec)=(megadat['zldp'],megadat['q'],megadat['RA'],megadat['DEC'])
#
# wfiSky=coordinates.SkyCoord(ra=oldData['ra']*u.degree, dec=oldData['dec']*u.degree)
# megaSky=coordinates.SkyCoord(ra=megaRA*u.degree, dec=megaDec*u.degree)
# (idx,d2d,_)=wfiSky.match_to_catalog_sky(megaSky)
# match=np.where(d2d.arcsec < 0.5)
# zLDP[match]=mzldp[idx][match]
# Q[match]=mq[idx][match]
#
# final={'field':field,'ids':oldData['ids'],'ra':oldData['ra'],'dec':oldData['dec'],'x':oldData['x'],'y':oldData['y'],'ebv':oldData['ebv'],
# 'fwhmR':oldData['fwhmR'],
# 'fB1':oldData['fB1'],'fB1err':oldData['fB1err'],'fB2':oldData['fB2'],'fB2err':oldData['fB2err'],'fB3':oldData['fB3'],'fB3err':oldData['fB3err'],
# 'fBiso':oldData['fBiso'],'fBisoerr':oldData['fBisoerr'],'fBauto':oldData['fBauto'],'fBautoerr':oldData['fBautoerr'],'fV1':oldData['fV1'],
# 'fV1err':oldData['fV1err'],'fV2':oldData['fV2'],'fV2err':oldData['fV2err'],'fV3':oldData['fV3'],'fV3err':oldData['fV3err'],'fViso':oldData['fViso'],
# 'fVisoerr':oldData['fVisoerr'],'fVauto':oldData['fVauto'],'fVautoerr':oldData['fVautoerr'],'fR1':oldData['fR1'],'fR1err':oldData['fR1err'],
# 'fR2':oldData['fR2'],'fR2err':oldData['fR2err'],'fR3':oldData['fR3'],'fR3err':oldData['fR3err'],'fRiso':oldData['fRiso'],'fRisoerr':oldData['fRisoerr']
# ,'fRauto':oldData['fRauto'],'fRautoerr':oldData['fRautoerr'],'fI1':oldData['fI1'],'fI1err':oldData['fI1err'],'fI2':oldData['fI2'],
# 'fI2err':oldData['fI2err'],'fI3':oldData['fI3'],'fI3err':oldData['fI3err'],'fIiso':oldData['fIiso'],'fIisoerr':oldData['fIisoerr'],
# 'fIauto':oldData['fIauto'],'fIautoerr':oldData['fIautoerr'],'fz1':oldData['fz1'],'fz1err':oldData['fz1err'],'fz2':oldData['fz2'],
# 'fz2err':oldData['fz2err'],'fz3':oldData['fz3'],'fz3err':oldData['fz3err'],'fziso':oldData['fziso'],'fzisoerr':oldData['fzisoerr'],
# 'fzauto':oldData['fzauto'],'fzautoerr':oldData['fzautoerr'],'fK1':oldData['fK1'],'fK1err':oldData['fK1err'],'fK2':oldData['fK2'],
# 'fK2err':oldData['fK2err'],'fK3':oldData['fK3'],'fK3err':oldData['fK3err'],'fKiso':oldData['fKiso'],'fKisoerr':oldData['fKisoerr'],
# 'fKauto':oldData['fKauto'],'fKautoerr':oldData['fKautoerr'],'zLDP':zLDP,'Q':Q,'starB':oldData['starB'],'starV':oldData['starV'],
# 'starR':oldData['starR'],
# 'starI':oldData['starI'],'starz':oldData['starz'],'starK':oldData['starK'],'sexflagB':oldData['sexflagB'],'sexflagV':oldData['sexflagV'],
# 'sexflagR':oldData['sexflagR'],'sexflagI':oldData['sexflagI'],'sexflagz':oldData['sexflagz'],'sexflagK':oldData['sexflagK']}
#
# tab = Table(final, names=('field','ids','ra','dec','x','y','ebv','fwhmR','fB1','fB1err','fB2',
# 'fB2err','fB3','fB3err','fBiso','fBisoerr',
# 'fBauto','fBautoerr','fV1','fV1err','fV2','fV2err','fV3','fV3err','fViso','fVisoerr','fVauto',
# 'fVautoerr','fR1','fR1err','fR2','fR2err','fR3','fR3err','fRiso','fRisoerr','fRauto',
# 'fRautoerr','fI1','fI1err','fI2','fI2err','fI3','fI3err','fIiso','fIisoerr','fIauto',
# 'fIautoerr','fz1','fz1err','fz2','fz2err','fz3','fz3err','fziso','fzisoerr','fzauto',
# 'fzautoerr','fK1','fK1err','fK2','fK2err','fK3','fK3err','fKiso','fKisoerr','fKauto',
# 'fKautoerr','zLDP','Q','starB','starV','starR','starI','starz','starK','sexflagB','sexflagV','sexflagR','sexflagI',
# 'sexflagz','sexflagK'))
# tab.write(outname, format='fits', overwrite=True)
#-----------------------------------
def matchxy(x1,y1,x2,y2,tol=0.1):
match=[]
for i in range(len(x1)):
cdt=(x1[i],y1[i])
dist=np.sqrt((cdt[0]-x2)**2.+(cdt[1]-y2)**2.)
if np.min(dist) <= tol:
match.append(np.where(dist == np.min(dist))[0][0])
return np.array(match)
#-----------------------------------
def backZP(flux,mag):
"""PURPOSE: Backout the zeropoint for a source knowing its flux in detector units and its physical magnitude
INPUTS:
\tflux - flux in detector units (counts or counts/second)
\tmag - magnitude in physical units (note that if this is in AB mag, the result includes the AB conversion factor)
RETURNS: The zeropoint to convert between detector flux and physical magnitude
"""
return 2.5*np.log10(flux)+mag
#-----------------------------------
#def getSmoothFactor(rcat,xcat,class_star=0.0,border=1500.,pixscale=0.238,save=False):
#
# rangeMagR=[-15.,-12.]
# rangeMagX=[-10.,-5.]
#
# (rflux,rfwhm,starR)=np.loadtxt(rcat,usecols=(6,9,14),unpack=True,comments='#')
# (xcdt,ycdt,xflux,xfwhm,starX)=np.loadtxt(xcat,usecols=(2,3,6,9,14),unpack=True,comments='#')
#
# (rmags,xmags)=(-2.5*np.log10(rflux),-2.5*np.log10(xflux))
# goodX=np.where((xfwhm > 0.) & (xcdt < np.max(xcdt)-border) &
# (xcdt > np.min(xcdt)+border) & (ycdt < np.max(ycdt)-border) &
# (ycdt > np.min(ycdt)+border))
# goodR=np.where(starR >= class_star)
#
# plt.scatter(rmags,rfwhm*3600.,alpha=0.05,color='r')
# plt.scatter(xmags[goodX],xfwhm[goodX]*3600.,alpha=0.05,color='b')
#
# (rfwhm,xfwhm)=(rfwhm*3600.,xfwhm*3600.)
# (rfwhmSub,xfwhmSub)=(rfwhm[np.where((rmags >= rangeMagR[0]) & (rmags <= rangeMagR[1]) & (starR >= class_star))],
# xfwhm[np.where((xmags >= rangeMagX[0]) & (xmags <= rangeMagX[1]) & (starX >= class_star) & (xcdt < np.max(xcdt)-border) &
# (xcdt > np.min(xcdt)+border) & (ycdt < np.max(ycdt)-border) &
# (ycdt > np.min(ycdt)+border))])
#
# (avgR,avgX)=(bl(rfwhmSub),bl(xfwhmSub))
#
# xx=[-100,100]
# yy1=[avgR,avgR]
# yy2=[avgX,avgX]
# plt.plot(xx,yy1,'k-')
# plt.plot(xx,yy2,'k--')
# plt.axis([-20,-1,0.5,15])
#
# (pR,pX)=(avgR/pixscale,avgX/pixscale)
# sig=np.sqrt((pX**2.)-(pR**2.))/2.355
# print 'Avg. R = '+str(avgR)+' arcsec\nAvg. X = '+str(avgX)+' arcsec\nSigma smooth factor: '+str(sig)+' pixels'
#
# if save == True:
# plt.savefig('seeing_comp.pdf',format='pdf',dpi=6000.)
# else:
# plt.show()
#-----------------------------------
def addquad(xerr,orerr,nrerr,xflux,orflux,nrflux):
value=(xflux/nrflux)*orflux
return np.abs(value*np.sqrt((xerr/xflux)**2. + (nrerr/nrflux)**2. + (orerr/orflux)**2.))
#-----------------------------------
def photscript(listfile,clname,photfile='photscript'):
"""
PURPOSE: Generate a script (to be called on with source from the command line) that will run SExtractor on
\tall input images in dual-image mode (using the R-band for detection) for a cluster.
INPUTS:
\tlistfile - two columns of ?img (where ? is the filter letter) and image file names
\tclname - name of the cluster (e.g., cl1354)
\tphotfile - name of the output script file (optional)
RETURNS: None.
"""
(keys,files)=np.loadtxt(listfile,usecols=(0,1),unpack=True,dtype={'names':('keys','files'), 'formats':('S5','S30')})
imgs={}
for x in range(len(keys)):
imgs[keys[x]]=files[x]
outfile=open('photscript','w')
string1='sex -c ediscs.sex -BACKPHOTO_TYPE "GLOBAL" -CATALOG_NAME '
string2=' -CHECKIMAGE_TYPE "-BACKGROUND" -CHECKIMAGE_NAME '
finalString=''
finalString=finalString+string1+clname+'_r.cat'+' -CHECKIMAGE_TYPE "-BACKGROUND,SEGMENTATION" -CHECKIMAGE_NAME "' \
+imgs['rimg'][:-5]+'_bkgsub.fits'+','+imgs['rimg'][:-5]+'_segmap.fits" '+imgs['rimg']+','+imgs['rimg']+'\n'
if 'bimg' in keys:
finalString=finalString+string1+clname+'_b.cat'+string2+imgs['bimg'][:-5]+'_bkgsub.fits '+imgs['rimg']+','+imgs['bimg']+'\n'
if 'vimg' in keys:
finalString=finalString+string1+clname+'_v.cat'+string2+imgs['vimg'][:-5]+'_bkgsub.fits '+imgs['rimg']+','+imgs['vimg']+'\n'
if 'iimg' in keys:
finalString=finalString+string1+clname+'_i.cat'+string2+imgs['iimg'][:-5]+'_bkgsub.fits '+imgs['rimg']+','+imgs['iimg']+'\n'
if 'zimg' in keys:
finalString=finalString+string1+clname+'_z.cat'+string2+imgs['zimg'][:-5]+'_bkgsub.fits '+imgs['rimg']+','+imgs['zimg']+'\n'
if 'kimg' in keys:
finalString=finalString+string1+clname+'_k.cat'+string2+imgs['kimg'][:-5]+'_bkgsub.fits '+imgs['rimg']+','+imgs['kimg']+'\n'
if 'rbimg' in keys:
finalString=finalString+string1+clname+'_rb.cat'+string2+imgs['rbimg'][:-5]+'_bkgsub.fits '+imgs['rimg']+','+imgs['rbimg']+'\n'
if 'rkimg' in keys:
finalString=finalString+string1+clname+'_rk.cat'+string2+imgs['rkimg'][:-5]+'_bkgsub.fits '+imgs['rimg']+','+imgs['rkimg']+'\n'
out=open(photfile,'w')
out.write(finalString)
out.close()
#-----------------------------------
#def flags(flux,err,x,y,xmin=0,xmax=1e6,ymin=0,ymax=1e6):
#
# for j in range(len(flux)):
# if flux[j] == -99.0:
# err[j] = -99.0
# if np.log10(np.abs(flux[j])) > 4.0:
# flux[j] = -77.0
# err[j] = -77.0
# if np.log10(np.abs(flux[j])) < -8.0:
# flux[j] = -66.0
# err[j] = -66.0
# if x[j] < xmin:
# flux[j] = -55.0
# err[j] = -55.0
# if x[j] > xmax:
# flux[j] = -55.0
# err[j] = -55.0
# if y[j] < ymin:
# flux[j] = -55.0
# err[j] = -55.0
# if y[j] > ymax:
# flux[j] = -55.0
# err[j] = -55.0
#
# return flux,err
#-----------------------------------
#def fixData(data,flag=-88.0):
#
# keys=data.keys()
# for x in keys:
# if x != 'ids':
# nan=np.where(np.isnan(data[x]))
# inf=np.where(np.isinf(data[x]))
# data[x][nan]=flag
# data[x][inf]=flag
#
# return data
#-----------------------------------
def sigfunc(N,s,a,b):
"""
PURPOSE: Function from qquation 3 from Labbe et al. (2003), ApJ, 125, 1107.
"""
return (N*s*(a+(b*N)))
#-----------------------------------
#def binAvgData(x, y, npts, minimum = -999999.0, maximum = 9999999.0):
#
# use = np.where((x > minimum) & (x < maximum))
# x, y = x[use], y[use]
# sort = np.argsort(x)
# xs, ys = x[sort], y[sort]
# xm, ym = np.array([]), np.array([])
# i = 0
# for j in range(int(np.ceil(len(x)/npts))):
# if i+npts <= len(x):
# xm = np.append(xm, np.mean(xs[i:i+npts]))
# ym = np.append(ym, np.mean(ys[i:i+npts]))
# else:
# xm = np.append(xm, np.mean(xs[i:]))
# ym = np.append(ym, np.mean(ys[i:]))
# i = i + npts
#
# return (xm, ym)
#-----------------------------------
#def binAvgDataFixed(x, y, width, minimum = -999999.0, maximum = 9999999.0):
#
# use = np.where((x > minimum) & (x < maximum))
# x, y = x[use], y[use]
# sort = np.argsort(x)
# xs, ys = x[sort], y[sort]
# xm, ym = np.array([]), np.array([])
# i = np.min(x)
# while i + width < np.max(x):
# xm = np.append(xm, np.mean(xs[np.where((xs > i) & (xs <= i+width))]))
# ym = np.append(ym, np.mean(ys[np.where((xs > i) & (xs <= i+width))]))
# i = i + width
#
# return (xm, ym)
#-----------------------------------
def getEBV(ra,dec,path='/Users/tyler/Downloads/'):
"""
PURPOSE: For a given RA and Dec, lookup the E(B-V) value from the Schlegel dust maps.
INPUTS:
\tra - Right ascension (deg; J2000)
\tdec - Declination (deg; J2000)
RETURNS: E(B-V)
"""
if os.path.exists('SFD_dust_4096_ngp.fits') == False:
shutil.copy(path+'SFD_dust_4096_ngp.fits','.')
if os.path.exists('SFD_dust_4096_sgp.fits') == False:
shutil.copy(path+'SFD_dust_4096_sgp.fits','.')
galcoord = coordinates.SkyCoord(ra=ra*u.degree, dec=dec*u.degree, frame='fk5').galactic
(latitude, longitude) = (galcoord.l.degree, galcoord.b.degree)
ebv = obs.get_SFD_dust(latitude, longitude)
return ebv
#-----------------------------------
def calflux(flux, zp, abconv = 0.0):
"""
PURPOSE: Converts an instrumental flux into a physical one in flux space to preserve negative values.
INPUTS:
\tflux - instrumental flux
\tzp - zeropoint to convert instrumental to calibrated flux
\tabconv - conversion factor from Vega to AB magnitudes (optional)
RETURNS: Flux in uJy
"""
return ((flux)/(10.0**(0.4*(zp + abconv)))) * 3631.0 * 1e6
#-----------------------------------
def flux2mag(flux, zp, abconv = 0.0):
return -2.5 * np.log10(flux) + zp + abconv
#-----------------------------------
def mag2flux(mag, ab2ujy=False):
if ab2ujy == False:
return 10.0**(-0.4 * mag)
else:
return 10.0**(-0.4 * mag) * 3631.0 * 1e6
#-----------------------------------
#def ab2ujy(mag):
#
# jy = 10.0**(-0.4 * mag) * 3631.0
#
# return jy * 1e6
#-----------------------------------
#def ujy2abmag(flux):
#
# return -2.5 * np.log10(flux) + 23.9
#-----------------------------------
def seeingCorr(rr, rm, mm, outfile='seeingcorr.dat'):
res=(mm*rr)/rm
if outfile != '':
out=open(outfile,'w')
for x in range(len(mm)):
out.write(str(mm[x])+'\t'+str(res[x])+'\n')
out.close()
return res
#-----------------------------------
def zpWFI(ra, dec, v, r, i, v3, r3, i3, starR, photref = 'fors.dat', tol=0.01, synth=False, plot=False,
vr1=-99, vi1=-99, ri1=-99, show=False, classStar=0.9):
"""
Notes: Adapted from Dennis Just's "bvriz.pro" IDL code
"""
# (wmin, starflag) = np.loadtxt('VLT.starflag.dat', usecols=(0, 1), unpack = True, comments = '#')
(forsRA, forsDec, forsV, forsR, forsI, wmin, starflag) = np.loadtxt(photref, usecols = (1, 2, 9, 10, 11, 16, 17),
unpack = True, comments = '#')
#Identify stars based on Dennis's criteria. Create new arrays of only the coordinates
#and photometry of the stars from the FORS and WFI data. Convert the FORS RA from hours
#to degrees for matching. (starflag = 0 = galaxies, 1 = stars)
stars_fors = np.where((wmin > 0.2) & (starflag == 1) & (forsR <= 22.0) & (forsR > 16.5)
& (forsV > 16.5) & (forsI > 16.5))
star_wfi = np.where((starR >= classStar))
(sforsRA, sforsDec, sforsV, sforsR, sforsI) = (forsRA[stars_fors], forsDec[stars_fors],
forsV[stars_fors], forsR[stars_fors],
forsI[stars_fors])
(swfiRA, swfiDec, swfiV, swfiR, swfiI) = (ra[star_wfi], dec[star_wfi], v3[star_wfi],
r3[star_wfi], i3[star_wfi])
sforsRAh = sforsRA*15.0
#Match coords here!
forsCat=coordinates.SkyCoord(ra=sforsRAh*u.degree, dec=sforsDec*u.degree)
wfiCat=coordinates.SkyCoord(ra=swfiRA*u.degree, dec=swfiDec*u.degree)
idx, d2d, _ = wfiCat.match_to_catalog_sky(forsCat)
match = np.where(d2d.arcsec <= 1.0)
(sforsVmatch,sforsRmatch,sforsImatch)=(sforsV[idx][match], sforsR[idx][match],
sforsI[idx][match])
#Create initial guesses of the zeropoints, then fit the zeropoints to the
#(V-R) and (R-I) colors using an orthogonal least squares (OLS) regression
#to protect against outliers.
(zpV, zpR, zpI) = (sforsVmatch + (2.5 * np.log10(swfiV[match])),
sforsRmatch + (2.5 * np.log10(swfiR[match])),
sforsImatch + (2.5 * np.log10(swfiI[match])))
vicolor=sforsVmatch-sforsImatch
vrcolor=sforsVmatch-sforsRmatch
ricolor=sforsRmatch-sforsImatch
#For the V-R color, exclude stars with V-R >= 0.7 for clusters with synthesized R-band magnitudes
(vrrange,virange,rirange)=(np.where((vrcolor > 0.) & (vrcolor < 2.5) & (zpV > 22.5) & (zpR > 22.5) & (zpI > 22.5)),
np.where((vicolor > 0.) & (vicolor < 2.5) & (zpV > 22.5) & (zpR > 22.5) & (zpI > 22.5)),
np.where((ricolor > 0.) & (ricolor < 2.5) & (zpV > 22.5) & (zpR > 22.5) & (zpI > 22.5)))
# if synth == True:
# goodvr = np.where((vrcolor > 0.0) & (vrcolor < 2.5))
# (zpVvr, zpRvr, vr) = (zpV[goodvr], zpR[goodvr], vrcolor[goodvr])
# vfit0=curve_fit(fixedVRv,vr,zpVvr)[0][0]
# rfit0=curve_fit(fixedVRr,vr,zpRvr)[0][0]
# else:
# goodvr = np.where((vrcolor > 0.) & (vrcolor < 2.5))
# (zpVvr, zpRvr, vr) = (zpV[goodvr], zpR[goodvr], vrcolor[goodvr])
# vfit0=curve_fit(fixedVRv,vr,zpVvr)[0][0]
# rfit0=curve_fit(fixedVRr,vr,zpRvr)[0][0]
(zpVvi, zpVvr, zpRri, zpRvr, zpIri, zpIvi) = (zpV[virange], zpV[vrrange], zpR[rirange], zpR[vrrange], zpI[rirange], zpI[virange])
(vr,ri,vi) = (vrcolor[vrrange],ricolor[rirange],vicolor[virange])
vfit0=curve_fit(fixedVRv,vr,zpVvr)[0][0]
rfit0=curve_fit(fixedVRr,vr,zpRvr)[0][0]
vfit20=curve_fit(fixedVIv,vi,zpVvi)[0][0]
rfit20=curve_fit(fixedRIr,ri,zpRri)[0][0]
ifit0=curve_fit(fixedVIi,vi,zpIvi)[0][0]
ifit20=curve_fit(fixedRIi,ri,zpIri)[0][0]
if show == True:
print '\n\tFITTED SLOPES:\n'
print '\tV (vr) slope: '+str(vfit0)
print '\tR (vr) slope: '+str(rfit0)
print '\tV (vi) slope: '+str(vfit20)
print '\tI (vi) slope: '+str(ifit0)
print '\tR (ri) slope: '+str(rfit20)
print '\tI (ri) slope: '+str(ifit20)+'\n'
pdb.set_trace()
if plot == True:
plt.plot(vi,zpIvi,'ro')
plt.plot(vi,zpVvi,'bo')
plt.plot(vr,zpRvr,'ko')
xx=np.array([-100.0,100.0])
yy=ifit0+(xx*medCterms['VIi'])
yy2=vfit20+(xx*medCterms['VIv'])
yy3=rfit0+(xx*medCterms['VRr'])
plt.plot(xx,yy,'r--')
plt.plot(xx,yy2,'b--')
plt.plot(xx,yy3,'k--')
plt.axis([0,2.5,np.min([ifit0,vfit20,rfit0])-0.5,np.max([ifit0,vfit20,rfit0])+1.])
plt.xlabel('(V-I)')
plt.ylabel('ZP')
plt.savefig('slopecheck.pdf', format='pdf', dpi=6000)
plt.close()
if ((type(vr1) is int) | (type(vi1) is int) | (type(ri1) is int)):
#Iterate over the WFI photometry (used 20 iterations from Dennis's code) to
#sovle for the zeropoint for each source. When there is no V-band data, use
#the (R-I) color (and the assosciated fit), otherwise use the (V-R) and (V-I)
#colors.
(nv,nr,ni)=(0,0,0)
(zpV0,zpR0,zpI0)=(vfit0+(0.5*medCterms['VRv']), rfit0+(0.5*medCterms['VRr']), ifit0+(0.5*medCterms['VIi']))
(vmag,rmag,imag)=(np.empty(len(starR)),np.empty(len(starR)),np.empty(len(starR)))
(zpv,zpr,zpi)=(np.empty(len(starR)),np.empty(len(starR)),np.empty(len(starR)))
rpflag=np.empty(len(starR))
for j in range(len(starR)):
(zpv[j], zpr[j], zpi[j]) = (zpV0, zpR0, zpI0)
(ii, rdiff, vdiff, idiff) = (0, 1.0, 1.0, 1.0)
if v[j] > 0:
(zpcomp,compmag) = (zpV0,flux2mag(v[j],zpV0))
else:
(zpcomp,compmag) = (zpI0,flux2mag(i[j],zpI0))
oldr=flux2mag(r[j],zpr[j])
rmag[j]=oldr
while (ii <= 20) and (np.abs(rdiff) > tol):
if v[j] > 0:
rpflag[j]=ii
zpr[j]=medCterms['VRr']*(compmag-rmag[j])+rfit0
zpcomp=medCterms['VRv']*(compmag-rmag[j])+vfit0
rmag[j]=flux2mag(r[j],zpr[j])
compmag=flux2mag(v[j],zpcomp)
elif ((v[j] <= 0) & (i[j] > 0)):
rpflag[j]=-1
zpr[j]=medCterms['RIr']*(rmag[j]-compmag)+rfit20
zpcomp=medCterms['RIi']*(rmag[j]-compmag)+ifit20
rmag[j]=flux2mag(r[j],zpr[j])
compmag=flux2mag(i[j],zpcomp)
else:
rpflag[j]=-1
rmag[j]=flux2mag(r[j],zpr[j])
rdiff=oldr-rmag[j]
oldr=rmag[j]
ii += 1
ii = 0
if v[j] > 0:
oldv=flux2mag(v[j],zpv[j])
vmag[j]=oldv
compmag=rmag[j]
while (ii <= 20) and (np.abs(vdiff) > tol):
zpv[j]=medCterms['VRv']*(vmag[j]-compmag)+vfit0
vmag[j]=flux2mag(v[j],zpv[j])
vdiff=oldv-vmag[j]
oldv=vmag[j]
ii += 1
else:
vmag[j]=-99
ii = 0
if i[j] > 0:
oldi=flux2mag(i[j],zpi[j])
imag[j]=oldi
if v[j] > 0:
(fit0, fit1, compmag, cfit0, cfit1) = (ifit0, medCterms['VIi'], vmag[j], vfit20, medCterms['VIv'])
else:
(fit0, fit1, compmag, cfit0, cfit1) = (ifit20, medCterms['RIi'], rmag[j], rfit20, medCterms['RIr'])
while (ii <= 20) and (np.abs(idiff) > tol):
zpi[j]=fit1*(compmag-imag[j])+fit0
imag[j]=flux2mag(i[j],zpi[j])
idiff=oldi-imag[j]
oldi=imag[j]
ii += 1
else:
imag[j]=-99
else:
(vmag,rmag,imag)=(np.empty(len(starR)),np.empty(len(starR)),np.empty(len(starR)))
(zpv,zpr,zpi)=(np.empty(len(starR)),np.empty(len(starR)),np.empty(len(starR)))
#For if the color is supplied (e.g., the 1" colors are supplied for larger apertures so the colors are uniform
#for the zeropoint calculations)
for j in range(len(starR)):
if ((v[j] > 0) & (i[j] > 0)):
zpv[j]=fixedVRv(vr1[j],vfit0)
zpr[j]=fixedVRr(vr1[j],rfit0)
zpi[j]=fixedVIi(vi1[j],ifit0)
vmag[j]=flux2mag(v[j],zpv[j])
rmag[j]=flux2mag(r[j],zpr[j])
imag[j]=flux2mag(i[j],zpi[j])
elif ((v[j] <= 0) & (i[j] > 0)):
vmag[j]=-99
zpr[j]=fixedRIr(ri1[j],rfit20)
zpi[j]=fixedRIi(ri1[j],ifit20)
rmag[j]=flux2mag(r[j],zpr[j])
imag[j]=flux2mag(i[j],zpi[j])
elif ((v[j] <= 0) & (i[j] <= 0)):
vmag[j]=-99
imag[j]=-99
zpr[j]=fixedVRr(0.5,rfit0)
rmag[j]=flux2mag(r[j],zpr[j])
#Convert the Vega magnitudes to AB magnitudes and uJy fluxes. Return the uJy fluxes
#and the AB magnitude zeropoints.
(vmagab, rmagab, imagab) = (vmag + vega2AB['v'], rmag + vega2AB['r'], imag + vega2AB['i'])
(vab, rab, iab) = (mag2flux(vmagab, ab2ujy=True), mag2flux(rmagab, ab2ujy=True), mag2flux(imagab, ab2ujy=True))
return (vab, rab, iab, zpv + vega2AB['v'], zpr + vega2AB['r'], zpi + vega2AB['i'])
#-----------------------------------
def mergeLists(b="",v="",r="",i="",z="",k="",rimg="",zpb=0.0,zpk=0.0,null=-99):
"""
"""
(foo1,foo2)=np.loadtxt(r, usecols = (0,1), unpack=True, comments= '#')
nsrcs=len(foo1)
nullArr = np.zeros(nsrcs) + null
columnsR = (2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)
columnsX = (4, 5, 6, 7, 8, 15)
if b != "":
(b1, b2, b3, biso, bauto, sexflagB) = np.loadtxt(b, usecols = columnsX,
unpack = True, comments = '#')
else:
(b1, b2, b3, biso, bauto, sexflagB) = (np.zeros(nsrcs)+null, np.zeros(nsrcs)+null, np.zeros(nsrcs)+null,
np.zeros(nsrcs)+null, np.zeros(nsrcs)+null, np.zeros(nsrcs)+null)
if v != "":
(v1, v2, v3, viso, vauto, sexflagV) = np.loadtxt(v, usecols = columnsX, unpack = True, comments = '#')
else:
(v1, v2, v3, viso, vauto, sexflagV) = (np.zeros(nsrcs)+null, np.zeros(nsrcs)+null, np.zeros(nsrcs)+null,
np.zeros(nsrcs)+null, np.zeros(nsrcs)+null, np.zeros(nsrcs)+null)
(x, y, r1, r2, r3, riso, rauto, fwhmR, kronR, aimage,
bimage, theta, isoarea, starR, sexflagR) = np.loadtxt(r, usecols = columnsR, unpack = True, comments = '#')
fwhmR = fwhmR * 3600.0
majorax = kronR * aimage
minorax = kronR * bimage
if i != "":
(i1, i2, i3, iiso, iauto, sexflagI) = np.loadtxt(i, usecols = columnsX, unpack = True, comments = '#')
else:
(i1, i2, i3, iiso, iauto, sexflagI) = (np.zeros(nsrcs)+null, np.zeros(nsrcs)+null, np.zeros(nsrcs)+null,
np.zeros(nsrcs)+null, np.zeros(nsrcs)+null, np.zeros(nsrcs)+null)
if z != "":
(z1, z2, z3, ziso, zauto, sexflagz) = np.loadtxt(z, usecols = columnsX, unpack = True, comments = '#')
else:
(z1, z2, z3, ziso, zauto, sexflagz) = (np.zeros(nsrcs)+null, np.zeros(nsrcs)+null, np.zeros(nsrcs)+null,
np.zeros(nsrcs)+null, np.zeros(nsrcs)+null, np.zeros(nsrcs)+null)
if k != "":
(k1, k2, k3, kiso, kauto, sexflagK) = np.loadtxt(k, usecols = columnsX, unpack = True, comments = '#')
else:
(k1, k2, k3, kiso, kauto, sexflagK) = (np.zeros(nsrcs)+null, np.zeros(nsrcs)+null, np.zeros(nsrcs)+null,
np.zeros(nsrcs)+null, np.zeros(nsrcs)+null, np.zeros(nsrcs)+null)
#Convert (x,y) into RA and Dec
np.savetxt('dummy.foobar', np.c_[(x, y)])
sp.Popen('xy2sky -d '+rimg+' @dummy.foobar > dummy2.foobar', shell=True).wait()
(ra, dec) = np.loadtxt('dummy2.foobar', usecols = (0, 1), unpack = True)
os.remove('dummy.foobar')
os.remove('dummy2.foobar')
data = {'ra':ra, 'dec':dec, 'x':x, 'y':y, 'b1':b1, 'b2':b2, 'b3':b3, 'biso':biso, 'bauto':bauto,
'sexflagB':sexflagB, 'v1':v1, 'v2':v2, 'v3':v3, 'viso':viso, 'vauto':vauto, 'sexflagV':sexflagV,
'r1':r1, 'r2':r2, 'r3':r3, 'riso':riso, 'rauto':rauto, 'starR':starR, 'sexflagR':sexflagR, 'i1':i1, 'i2':i2,
'i3':i3, 'iiso':iiso, 'iauto':iauto, 'sexflagI':sexflagI, 'z1':z1, 'z2':z2, 'z3':z3, 'ziso':ziso,
'zauto':zauto, 'sexflagz':sexflagz, 'k1':k1, 'k2':k2, 'k3':k3, 'kiso':kiso, 'kauto':kauto,
'sexflagK':sexflagK, 'fwhmR':fwhmR, 'kron_a':majorax, 'kron_b':minorax, 'isoarea':isoarea,
'theta':theta}
return data
#-----------------------------------
def updateBCat(b='',r='',rb='',zpb='',imglist='imglist',rsegmap='',oldcat='',null=-99,xmin=0.,xmax=1e6,
ymin=0.,ymax=1e6,clname='',pixscale=0.238,errborder=50.,includeLDP=False):
abconvb = vega2AB['bkpno']
(foo1,foo2)=np.loadtxt(r, usecols = (0,1), unpack=True, comments= '#')
nsrcs=len(foo1)
nullArr = np.zeros(nsrcs) + null
(keys,files)=np.loadtxt(imglist,usecols=(0,1),unpack=True,dtype={'names':('keys','files'), 'formats':('S4','S30')})
imgs={}
for x in range(len(keys)):
imgs[keys[x]]=files[x]
bbkgsub=imgs['bimg'][:-5]+'_bkgsub.fits'
hdu = pf.open(oldcat)
oldData = hdu[1].data
if includeLDP == True:
#Add in LDP redshifts
megatab=pf.open('/Users/tyler/megacat.v5.7.fits')
megadat=megatab[1].data
(mzldp,mq)=(megadat['zldp'],megadat['q'])
wfiSky=coordinates.SkyCoord(ra=oldData['ra']*u.degree, dec=oldData['dec']*u.degree)
megaSky=coordinates.SkyCoord(ra=megadat['ra']*u.degree, dec=megadat['dec']*u.degree)
idx, d2d, _ = megaSky.match_to_catalog_sky(wfiSky)
match = np.where(d2d.arcsec <= 0.5)
(oldData['zLDP'][idx][match],oldData['Q'][idx][match])=(mzldp[match],mq[match])
(oldRx,oldRy) = (oldData['x'],oldData['y'])
tmpData = mergeLists(b=b,r=r,zpb=zpb,null=null,rimg=imgs['rimg'])
res=matchxy(tmpData['x'],tmpData['y'],oldRx,oldRy)
newData = {'b1':tmpData['b1'][res], 'b2':tmpData['b2'][res], 'b3':tmpData['b3'][res],
'biso':tmpData['biso'][res], 'bauto':tmpData['bauto'][res],
'r1':tmpData['r1'][res], 'r2':tmpData['r2'][res], 'r3':tmpData['r3'][res],
'riso':tmpData['riso'][res], 'rauto':tmpData['rauto'][res],
'starB':tmpData['starB'][res], 'sexflagB':tmpData['sexflagB'][res],
'fwhmB':tmpData['fwhmB'][res]}
#Convert the B-band fluxes from SExtractor into uJy fluxes.
#Update the dictionary appropriately. Correct
#the B-band data for seeing if its seeing is worse than the worst
#WFI seeing image.
photcols = (4, 5, 6, 7, 8)
(b1m, b2m, b3m, bisom, bautom) = (calflux(newData['b1'], zpb, abconv = abconvb),
calflux(newData['b2'], zpb, abconv = abconvb),
calflux(newData['b3'], zpb, abconv = abconvb),
calflux(newData['biso'], zpb, abconv = abconvb),
calflux(newData['bauto'], zpb, abconv = abconvb))
(b1c, b2c, b3c, bisoc, bautoc) = (newData['b1'], newData['b2'], newData['b3'],
newData['biso'], newData['bauto'])
# pdb.set_trace()
#Calculate uncertainties
print '\n\tCalculating uncertainties...\n'
(auton,ison,n1,n2,n3)=(np.sqrt(np.pi*tmpData['kron_a'][res]*tmpData['kron_b'][res]),
np.sqrt(tmpData['isoarea'][res]),np.sqrt(np.pi*(1.0/pixscale)**2.0),
np.sqrt(np.pi*(2.0/pixscale)**2.0),np.sqrt(np.pi*(3.0/pixscale)**2.0))
berrpars = lb.main(bbkgsub, rsegmap, outplot='bdepth.pdf', clname=clname, pixscale=pixscale,
border=errborder, persec=False, aprange=[0.5,2.0],maxrange=500.)
bautoerr=(sigfunc(auton,berrpars[0],berrpars[1],berrpars[2])/bautoc)
bisoerr=(sigfunc(ison,berrpars[0],berrpars[1],berrpars[2])/bisoc)
b1err=(sigfunc(n1,berrpars[0],berrpars[1],berrpars[2])/b1c)
b2err=(sigfunc(n2,berrpars[0],berrpars[1],berrpars[2])/b2c)
b3err=(sigfunc(n3,berrpars[0],berrpars[1],berrpars[2])/b3c)
print '\tB-band done\n'
#Correct for seeing (if necessary)
if rb != "":
(rb1, rb2, rb3, rbiso, rbauto) = np.loadtxt(rb, usecols = photcols, unpack = True, comments = '#')
(r1zp, r2zp, r3zp, risozp, rautozp) = (backZP(newData['r1'],flux2mag(oldData['fR1'],23.9)),
backZP(newData['r2'],flux2mag(oldData['fR2'],23.9)),
backZP(newData['r3'],flux2mag(oldData['fR3'],23.9)),
backZP(newData['riso'],flux2mag(oldData['fRiso'],23.9)),
backZP(newData['rauto'],flux2mag(oldData['fRauto'],23.9)))
(rb1m, rb2m , rb3m, rbisom, rbautom) = (calflux(rb1[res], r1zp), calflux(rb2[res], r2zp),calflux(rb3[res], r3zp),
calflux(rbiso[res], risozp),calflux(rbauto[res], rautozp))
#Calculate erorrs again! >_<
rbkgsub=imgs['simg'][:-5]+'_bkgsub.fits'
rerrpars = lb.main(rbkgsub, rsegmap, outplot='r_smooth_depth.pdf', clname=clname, pixscale=pixscale,
border=errborder, persec=False, aprange=[0.5,2.0],maxrange=500.)
rautoerr=(sigfunc(auton,rerrpars[0],rerrpars[1],rerrpars[2])/rb1[res])*rbautom
risoerr=(sigfunc(ison,rerrpars[0],rerrpars[1],rerrpars[2])/rbiso[res])*rbisom
r1err=(sigfunc(n1,rerrpars[0],rerrpars[1],rerrpars[2])/rb1[res])*rb1m
r2err=(sigfunc(n2,rerrpars[0],rerrpars[1],rerrpars[2])/rb2[res])*rb2m
r3err=(sigfunc(n3,rerrpars[0],rerrpars[1],rerrpars[2])/rb3[res])*rb3m
print '\tSmoothed R-band done\n'
# pdb.set_trace()
(b1mc, b2mc, b3mc, bisomc, bautomc) = (seeingCorr(oldData['fR1'], rb1m, b1m), seeingCorr(oldData['fR2'], rb2m, b1m),
seeingCorr(oldData['fR3'], rb3m, b3m), seeingCorr(oldData['fRiso'], rbisom, bisom),
seeingCorr(oldData['fRauto'], rbautom, bautom))
(b1ecorr,b2ecorr,b3ecorr,bisoecorr,bautoecorr) = (b1err*b1mc, b2err*b2mc, b3err*b3mc,
bisoerr*bisomc, bautoerr*bautomc)
(newData['b1e'],newData['b2e'],newData['b3e'],
newData['bisoe'],newData['bautoe']) = (addquad(b1ecorr,oldData['fR1err'],r1err,b1mc,oldData['fR1'],rb1m),
addquad(b2ecorr,oldData['fR2err'],r2err,b2mc,oldData['fR2'],rb2m),
addquad(b3ecorr,oldData['fR3err'],r3err,b3mc,oldData['fR3'],rb3m),
addquad(bisoecorr,oldData['fRisoerr'],risoerr,bisomc,oldData['fRiso'],rbisom),
addquad(bautoecorr,oldData['fRautoerr'],rautoerr,bautomc,oldData['fRauto'],rbautom))
else:
(newData['b1e'], newData['b2e'], newData['b3e'],
newData['bisoe'], newData['bautoe']) = (b1err*newData['b1'], b2err*newData['b2'],
b3err*newData['b3'], bisoerr*newData['biso'],
bautoerr*newData['bauto'])
(b1mc, b2mc, b3mc, bisomc, bautomc) = (b1m, b2m, b3m, bisom, bautom)
(newData['b1'], newData['b2'], newData['b3'],
newData['biso'], newData['bauto']) = (b1mc, b2mc, b3mc, bisomc, bautomc)
outname=clname+'_catalogB_v7.0.fits'
# final=oldData
# (final['fwhmB'],final['fB1'],final['fB1err'],final['fB2'],
# final['fB2err'],final['fB3'],final['fB3err'],final['fBiso'],
# final['fBisoerr'],final['fBauto'],final['fBautoerr'],
# final['starB'],final['sexflagB']) = (newData['fwhmB'],newData['b1'],newData['b1e'],newData['b2'],newData['b2e'],
# newData['b3'],newData['b3e'],newData['biso'],newData['bisoe'],
# newData['bauto'],newData['bautoe'],newData['starB'],newData['sexflagB'])
final={'ids':oldData['ids'],'ra':oldData['ra'],'dec':oldData['dec'],'x':oldData['x'],'y':oldData['y'],'ebv':oldData['ebv'],'fwhmB':oldData['fwhmB'],
'fwhmV':oldData['fwhmV'],'fwhmR':oldData['fwhmR'],'fwhmI':oldData['fwhmI'],'fwhmz':oldData['fwhmz'],'fwhmK':oldData['fwhmK'],
'fB1':newData['b1'],'fB1err':newData['b1e'],'fB2':newData['b2'],'fB2err':newData['b2e'],'fB3':newData['b3'],'fB3err':newData['b3e'],
'fBiso':newData['biso'],'fBisoerr':newData['bisoe'],'fBauto':newData['bauto'],'fBautoerr':newData['bautoe'],'fV1':oldData['fV1'],
'fV1err':oldData['fV1err'],'fV2':oldData['fV2'],'fV2err':oldData['fV2err'],'fV3':oldData['fV3'],'fV3err':oldData['fV3err'],'fViso':oldData['fViso'],
'fVisoerr':oldData['fVisoerr'],'fVauto':oldData['fVauto'],'fVautoerr':oldData['fVautoerr'],'fR1':oldData['fR1'],'fR1err':oldData['fR1err'],
'fR2':oldData['fR2'],'fR2err':oldData['fR2err'],'fR3':oldData['fR3'],'fR3err':oldData['fR3err'],'fRiso':oldData['fRiso'],'fRisoerr':oldData['fRisoerr']
,'fRauto':oldData['fRauto'],'fRautoerr':oldData['fRautoerr'],'fI1':oldData['fI1'],'fI1err':oldData['fI1err'],'fI2':oldData['fI2'],
'fI2err':oldData['fI2err'],'fI3':oldData['fI3'],'fI3err':oldData['fI3err'],'fIiso':oldData['fIiso'],'fIisoerr':oldData['fIisoerr'],
'fIauto':oldData['fIauto'],'fIautoerr':oldData['fIautoerr'],'fz1':oldData['fz1'],'fz1err':oldData['fz1err'],'fz2':oldData['fz2'],
'fz2err':oldData['fz2err'],'fz3':oldData['fz3'],'fz3err':oldData['fz3err'],'fziso':oldData['fziso'],'fzisoerr':oldData['fzisoerr'],
'fzauto':oldData['fzauto'],'fzautoerr':oldData['fzautoerr'],'fK1':oldData['fK1'],'fK1err':oldData['fK1err'],'fK2':oldData['fK2'],
'fK2err':oldData['fK2err'],'fK3':oldData['fK3'],'fK3err':oldData['fK3err'],'fKiso':oldData['fKiso'],'fKisoerr':oldData['fKisoerr'],
'fKauto':oldData['fKauto'],'fKautoerr':oldData['fKautoerr'],'zLDP':oldData['zLDP'],'Q':oldData['Q'],'starB':newData['starB'],'starV':oldData['starV'],
'starR':oldData['starR'],
'starI':oldData['starI'],'starz':oldData['starz'],'starK':oldData['starK'],'sexflagB':newData['sexflagB'],'sexflagV':oldData['sexflagV'],
'sexflagR':oldData['sexflagR'],'sexflagI':oldData['sexflagI'],'sexflagz':oldData['sexflagz'],'sexflagK':oldData['sexflagK']}
# s=['ids','ra','dec','x','y','ebv','fwhmB','fwhmV','fwhmR','fwhmI','fwhmz','fwhmK','fB1','fB1err','fB2',
# 'fB2err','fB3','fB3err','fBiso','fBisoerr',
# 'fBauto','fBautoerr','fV1','fV1err','fV2','fV2err','fV3','fV3err','fViso','fVisoerr','fVauto',
# 'fVautoerr','fR1','fR1err','fR2','fR2err','fR3','fR3err','fRiso','fRisoerr','fRauto',
# 'fRautoerr','fI1','fI1err','fI2','fI2err','fI3','fI3err','fIiso','fIisoerr','fIauto',
# 'fIautoerr','fz1','fz1err','fz2','fz2err','fz3','fz3err','fziso','fzisoerr','fzauto',
# 'fzautoerr','fK1','fK1err','fK2','fK2err','fK3','fK3err','fKiso','fKisoerr','fKauto',
# 'fKautoerr','zLDP','Q','starB','starV','starR','starI','starz','starK','sexflagB','sexflagV','sexflagR','sexflagI',
# 'sexflagz','sexflagK']
# pdb.set_trace()
#Save the dictionary as a FITS table
tab = Table(final, names=('ids','ra','dec','x','y','ebv','fwhmB','fwhmV','fwhmR','fwhmI','fwhmz','fwhmK','fB1','fB1err','fB2',
'fB2err','fB3','fB3err','fBiso','fBisoerr',
'fBauto','fBautoerr','fV1','fV1err','fV2','fV2err','fV3','fV3err','fViso','fVisoerr','fVauto',
'fVautoerr','fR1','fR1err','fR2','fR2err','fR3','fR3err','fRiso','fRisoerr','fRauto',
'fRautoerr','fI1','fI1err','fI2','fI2err','fI3','fI3err','fIiso','fIisoerr','fIauto',
'fIautoerr','fz1','fz1err','fz2','fz2err','fz3','fz3err','fziso','fzisoerr','fzauto',
'fzautoerr','fK1','fK1err','fK2','fK2err','fK3','fK3err','fKiso','fKisoerr','fKauto',
'fKautoerr','zLDP','Q','starB','starV','starR','starI','starz','starK','sexflagB','sexflagV','sexflagR','sexflagI',
'sexflagz','sexflagK'))
tab.write(outname, format='fits', overwrite=True)
#-----------------------------------
def updateKCat(k='',r='',rk='',zpk='',imglist='imglist',rsegmap='',oldcat='',null=-99,xmin=0.,xmax=1e6,
ymin=0.,ymax=1e6,clname='',pixscale=0.238,errborder=50.,expmap=''):
abconvk = vega2AB['k']
(foo1,foo2)=np.loadtxt(r, usecols = (0,1), unpack=True, comments= '#')
nsrcs=len(foo1)
nullArr = np.zeros(nsrcs) + null
(keys,files)=np.loadtxt(imglist,usecols=(0,1),unpack=True,dtype={'names':('keys','files'), 'formats':('S4','S30')})
imgs={}
for x in range(len(keys)):
imgs[keys[x]]=files[x]
bbkgsub=imgs['kimg'][:-5]+'_bkgsub.fits'
hdu = pf.open(oldcat)
oldData = hdu[1].data
(oldRx,oldRy) = (oldData['x'],oldData['y'])
tmpData = mergeLists(k=k,r=r,zpk=zpk,null=null,rimg=imgs['rimg'])
res=matchxy(oldRx,oldRy,tmpData['x'],tmpData['y'])
newData = {'k1':tmpData['k1'][res], 'k2':tmpData['k2'][res], 'k3':tmpData['k3'][res],
'kiso':tmpData['kiso'][res], 'kauto':tmpData['kauto'][res],
'r1':tmpData['r1'][res], 'r2':tmpData['r2'][res], 'r3':tmpData['r3'][res],
'riso':tmpData['riso'][res], 'rauto':tmpData['rauto'][res],
'starK':tmpData['starK'][res], 'sexflagK':tmpData['sexflagK'][res],
'fwhmK':tmpData['fwhmK'][res]}
#Convert the K-band fluxes from SExtractor into uJy fluxes.
#Update the dictionary appropriately. Correct
#the B-band data for seeing if its seeing is worse than the worst
#WFI seeing image.
photcols = (4, 5, 6, 7, 8)
(k1m, k2m, k3m, kisom, kautom) = (calflux(newData['k1'], zpk, abconv = abconvk),
calflux(newData['k2'], zpk, abconv = abconvk),
calflux(newData['k3'], zpk, abconv = abconvk),
calflux(newData['kiso'], zpk, abconv = abconvk),
calflux(newData['kauto'], zpk, abconv = abconvk))
(k1c, k2c, k3c, kisoc, kautoc) = (newData['k1'], newData['k2'], newData['k3'],
newData['kiso'], newData['kauto'])
#Calculate uncertainties
print '\n\tCalculating uncertainties...\n'
(auton,ison,n1,n2,n3)=(np.sqrt(np.pi*tmpData['kron_a'][res]*tmpData['kron_b'][res]),
np.sqrt(tmpData['isoarea'][res]),np.sqrt(np.pi*(1.0/pixscale)**2.0),
np.sqrt(np.pi*(2.0/pixscale)**2.0),np.sqrt(np.pi*(3.0/pixscale)**2.0))
kerrpars = lb.main(bbkgsub, rsegmap, outplot='kdepth.pdf', clname=clname, pixscale=pixscale,
border=errborder, persec=False, aprange=[0.5,2.0],maxrange=500.,
expmap=expmap)
kautoerr=(sigfunc(auton,kerrpars[0],kerrpars[1],kerrpars[2])/kautoc)
kisoerr=(sigfunc(ison,kerrpars[0],kerrpars[1],kerrpars[2])/kisoc)
k1err=(sigfunc(n1,kerrpars[0],kerrpars[1],kerrpars[2])/k1c)
k2err=(sigfunc(n2,kerrpars[0],kerrpars[1],kerrpars[2])/k2c)
k3err=(sigfunc(n3,kerrpars[0],kerrpars[1],kerrpars[2])/k3c)
print '\tK-band done\n'
#Correct for seeing (if necessary)
if rk != "":
(rk1, rk2, rk3, rkiso, rkauto) = np.loadtxt(rk, usecols = photcols, unpack = True, comments = '#')
(r1zp, r2zp, r3zp, risozp, rautozp) = (backZP(newData['r1'],flux2mag(oldData['fR1'],23.9)),
backZP(newData['r2'],flux2mag(oldData['fR2'],23.9)),
backZP(newData['r3'],flux2mag(oldData['fR3'],23.9)),
backZP(newData['riso'],flux2mag(oldData['fRiso'],23.9)),
backZP(newData['rauto'],flux2mag(oldData['fRauto'],23.9)))
(rk1m, rk2m , rk3m, rkisom, rkautom) = (calflux(rk1[res], r1zp), calflux(rk2[res], r2zp),calflux(rk3[res], r3zp),
calflux(rkiso[res], risozp),calflux(rkauto[res], rautozp))
#Calculate erorrs again! >_<
rbkgsub=imgs['simg'][:-5]+'_bkgsub.fits'
rerrpars = lb.main(rbkgsub, rsegmap, outplot='r_smooth_depth.pdf', clname=clname, pixscale=pixscale,
border=errborder, persec=False, aprange=[0.5,2.0],maxrange=500.)
rautoerr=(sigfunc(auton,rerrpars[0],rerrpars[1],rerrpars[2])/rk1[res])*rkautom
risoerr=(sigfunc(ison,rerrpars[0],rerrpars[1],rerrpars[2])/rkiso[res])*rkisom
r1err=(sigfunc(n1,rerrpars[0],rerrpars[1],rerrpars[2])/rk1[res])*rk1m
r2err=(sigfunc(n2,rerrpars[0],rerrpars[1],rerrpars[2])/rk2[res])*rk2m
r3err=(sigfunc(n3,rerrpars[0],rerrpars[1],rerrpars[2])/rk3[res])*rk3m
print '\tSmoothed R-band done\n'
(k1mc, k2mc, k3mc, kisomc, kautomc) = (seeingCorr(oldData['fR1'], rk1m, k1m), seeingCorr(oldData['fR2'], rk2m, k1m),
seeingCorr(oldData['fR3'], rk3m, k3m), seeingCorr(oldData['fRiso'], rkisom, kisom),
seeingCorr(oldData['fRauto'], rkautom, kautom))
(k1ecorr,k2ecorr,k3ecorr,kisoecorr,kautoecorr) = (k1err*k1mc, k2err*k2mc, k3err*k3mc,
kisoerr*kisomc, kautoerr*kautomc)
(newData['k1e'],newData['k2e'],newData['k3e'],
newData['kisoe'],newData['kautoe']) = (addquad(k1ecorr,oldData['fR1err'],r1err,k1mc,oldData['fR1'],rk1m),
addquad(k2ecorr,oldData['fR2err'],r2err,k2mc,oldData['fR2'],rk2m),
addquad(k3ecorr,oldData['fR3err'],r3err,k3mc,oldData['fR3'],rk3m),
addquad(kisoecorr,oldData['fRisoerr'],risoerr,kisomc,oldData['fRiso'],rkisom),
addquad(kautoecorr,oldData['fRautoerr'],rautoerr,kautomc,oldData['fRauto'],rkautom))
else:
(newData['k1e'], newData['k2e'], newData['k3e'],
newData['kisoe'], newData['kautoe']) = (k1err*newData['k1'], k2err*newData['k2'],
k3err*newData['k3'], kisoerr*newData['kiso'],
kautoerr*newData['kauto'])
(k1mc, k2mc, k3mc, kisomc, kautomc) = (k1m, k2m, k3m, kisom, kautom)
(newData['k1'], newData['k2'], newData['k3'],
newData['kiso'], newData['kauto']) = (k1mc, k2mc, k3mc, kisomc, kautomc)
#Scale uncertainties by the exposure map
expdata=pf.open(expmap)[0].data
expmax=np.max(expdata)
expvalues=expdata[tmpData['y'][res].astype(int),tmpData['x'][res].astype(int)]
scales=1./np.sqrt(expvalues/expmax)
(newData['k1e'],newData['k2e'],newData['k3e'],
newData['kisoe'],newData['kautoe']) = (newData['k1e']*scales, newData['k2e']*scales, newData['k3e']*scales,
newData['kisoe']*scales, newData['kautoe']*scales)
outname=clname+'_catalogK_v7.0.fits'
#Check if old catalog has more sources than K-band (weird problem, but crept up) and remove spurious sources (should only be 1 or 2 max)
if len(oldData['x']) > len(res):
use=matchxy(tmpData['x'],tmpData['y'],oldData['x'],oldData['y'])
else:
use=np.arange(len(oldData['x']))
final={'ids':oldData['ids'][use],'ra':oldData['ra'][use],'dec':oldData['dec'][use],'x':oldData['x'][use],'y':oldData['y'][use],'ebv':oldData['ebv'][use],'fwhmB':oldData['fwhmB'][use],
'fwhmV':oldData['fwhmV'][use],'fwhmR':oldData['fwhmR'][use],'fwhmI':oldData['fwhmI'][use],'fwhmz':oldData['fwhmz'][use],'fwhmK':newData['fwhmK'],
'fB1':oldData['fB1'][use],'fB1err':oldData['fB1err'][use],'fB2':oldData['fB2'][use],'fB2err':oldData['fB2err'][use],'fB3':oldData['fB3'][use],'fB3err':oldData['fB3err'][use],
'fBiso':oldData['fBiso'][use],'fBisoerr':oldData['fBisoerr'][use],'fBauto':oldData['fBauto'][use],'fBautoerr':oldData['fBautoerr'][use],'fV1':oldData['fV1'][use],
'fV1err':oldData['fV1err'][use],'fV2':oldData['fV2'][use],'fV2err':oldData['fV2err'][use],'fV3':oldData['fV3'][use],'fV3err':oldData['fV3err'][use],'fViso':oldData['fViso'][use],
'fVisoerr':oldData['fVisoerr'][use],'fVauto':oldData['fVauto'][use],'fVautoerr':oldData['fVautoerr'][use],'fR1':oldData['fR1'][use],'fR1err':oldData['fR1err'][use],
'fR2':oldData['fR2'][use],'fR2err':oldData['fR2err'][use],'fR3':oldData['fR3'][use],'fR3err':oldData['fR3err'][use],'fRiso':oldData['fRiso'][use],'fRisoerr':oldData['fRisoerr'][use]
,'fRauto':oldData['fRauto'][use],'fRautoerr':oldData['fRautoerr'][use],'fI1':oldData['fI1'][use],'fI1err':oldData['fI1err'][use],'fI2':oldData['fI2'][use],
'fI2err':oldData['fI2err'][use],'fI3':oldData['fI3'][use],'fI3err':oldData['fI3err'][use],'fIiso':oldData['fIiso'][use],'fIisoerr':oldData['fIisoerr'][use],
'fIauto':oldData['fIauto'][use],'fIautoerr':oldData['fIautoerr'][use],'fz1':oldData['fz1'][use],'fz1err':oldData['fz1err'][use],'fz2':oldData['fz2'][use],
'fz2err':oldData['fz2err'][use],'fz3':oldData['fz3'][use],'fz3err':oldData['fz3err'][use],'fziso':oldData['fziso'][use],'fzisoerr':oldData['fzisoerr'][use],
'fzauto':oldData['fzauto'][use],'fzautoerr':oldData['fzautoerr'][use],'fK1':newData['k1'],'fK1err':newData['k1e'],'fK2':newData['k2'],
'fK2err':newData['k2e'],'fK3':newData['k3'],'fK3err':newData['k3e'],'fKiso':newData['kiso'],'fKisoerr':newData['kisoe'],
'fKauto':newData['kauto'],'fKautoerr':newData['kautoe'],'zLDP':oldData['zLDP'][use],'Q':oldData['Q'][use],'starB':oldData['starB'][use],'starV':oldData['starV'][use],
'starR':oldData['starR'][use],
'starI':oldData['starI'][use],'starz':oldData['starz'][use],'starK':newData['starK'],'sexflagB':oldData['sexflagB'][use],'sexflagV':oldData['sexflagV'][use],
'sexflagR':oldData['sexflagR'][use],'sexflagI':oldData['sexflagI'][use],'sexflagz':oldData['sexflagz'][use],'sexflagK':newData['sexflagK']}
tab = Table(final, names=('ids','ra','dec','x','y','ebv','fwhmB','fwhmV','fwhmR','fwhmI','fwhmz','fwhmK','fB1','fB1err','fB2',
'fB2err','fB3','fB3err','fBiso','fBisoerr',
'fBauto','fBautoerr','fV1','fV1err','fV2','fV2err','fV3','fV3err','fViso','fVisoerr','fVauto',
'fVautoerr','fR1','fR1err','fR2','fR2err','fR3','fR3err','fRiso','fRisoerr','fRauto',
'fRautoerr','fI1','fI1err','fI2','fI2err','fI3','fI3err','fIiso','fIisoerr','fIauto',
'fIautoerr','fz1','fz1err','fz2','fz2err','fz3','fz3err','fziso','fzisoerr','fzauto',
'fzautoerr','fK1','fK1err','fK2','fK2err','fK3','fK3err','fKiso','fKisoerr','fKauto',
'fKautoerr','zLDP','Q','starB','starV','starR','starI','starz','starK','sexflagB','sexflagV','sexflagR','sexflagI',
'sexflagz','sexflagK'))
tab.write(outname, format='fits', overwrite=True)
#-----------------------------------
def main(b="", v="", r="", i="", z="", k="", rb="", rk="", imglist='', rsegmap="",
zpb=0.0, zpk=0.0, zpz=0.0, null=-99, kpno = False, clname="",pixscale=0.238,
outprefix='',idname='',synth=False, megacat=megaLocation,maxz=100.,
xmin=-99,xmax=-99,ymin=-99,ymax=-99,errborder=50.0,errs=True,wkimg='',fors='',classStar=0.9):
"""
"""
(keys,files)=np.loadtxt(imglist,usecols=(0,1),unpack=True,dtype={'names':('keys','files'), 'formats':('S5','S30')})
imgs={}
for x in range(len(keys)):
imgs[keys[x]]=files[x]
if 'bimg' in imgs:
bbkgsub=imgs['bimg'][:-5]+'_bkgsub.fits'
if 'vimg' in imgs:
vbkgsub=imgs['vimg'][:-5]+'_bkgsub.fits'
if 'rimg' in imgs:
rbkgsub=imgs['rimg'][:-5]+'_bkgsub.fits'
if 'iimg' in imgs:
ibkgsub=imgs['iimg'][:-5]+'_bkgsub.fits'
if 'zimg' in imgs:
zbkgsub=imgs['zimg'][:-5]+'_bkgsub.fits'
if 'kimg' in imgs:
kbkgsub=imgs['kimg'][:-5]+'_bkgsub.fits'
if 'rbimg' in imgs:
rbbkgsub=imgs['rbimg'][:-5]+'_bkgsub.fits'
if 'rkimg' in imgs:
rkbkgsub=imgs['rkimg'][:-5]+'_bkgsub.fits'
if idname == '':
idname = raw_input('Please enter an ID name for sources: ')
if outprefix == '':
outname=clname+'_catalog.fits'
else:
outname=outprefix+'.fits'
if kpno == True:
abconvb = vega2AB['bkpno']
else:
abconvb = vega2AB['bctio']
#Merge the SExtractor photometry from different filters into one dictionary
print '\n---------\nMERGING CATALOGS\n---------\n'
data=mergeLists(b=b, v=v, r=r, i=i, z=z, k=k, rimg=imgs['rimg'], zpb=zpb, zpk=zpk, null=null)
(foo1,foo2)=np.loadtxt(r, usecols = (0,1), unpack=True, comments= '#')
nsrcs=len(foo1)
#Add in LDP redshifts
megatab=pf.open(megacat)
megadat=megatab[1].data
(mzldp,mqual)=(megadat['zldp'],megadat['q'])
wfiSky=coordinates.SkyCoord(ra=data['ra']*u.degree, dec=data['dec']*u.degree)
megaSky=coordinates.SkyCoord(ra=megadat['ra']*u.degree, dec=megadat['dec']*u.degree)
idx, d2d, _ = wfiSky.match_to_catalog_sky(megaSky)
match = np.where(d2d.arcsec <= 0.5)
nullArr = np.zeros(len(data['ra'])) - 99.0
wzldp = np.zeros(len(data['ra'])) - 99.0
wqual = np.zeros(len(data['ra'])) - 99.0
wzldp[match] = mzldp[idx][match]
wqual[match] = mqual[idx][match]
#Calibrate the WFI data off of the FORS photometry and update the 'data' dictionary
#with the new fluxes. Add in the AB zeropoints for the WFI data. Includes r = 1, 2, 3"
#circular apertures, isophotal photometry, and the SExtractor 'auto' apertures.
print '\n---------\nCALIBRATING WFI PHOTOMETRY\n---------\n'
#Save the counts to pass on to the data dictionary
(data['v1c'], data['v2c'], data['v3c'], data['visoc'],
data['vautoc'], data['r1c'], data['r2c'], data['r3c'],
data['risoc'], data['rautoc'], data['i1c'], data['i2c'],
data['i3c'], data['iisoc'], data['iautoc']) = (data['v1'], data['v2'], data['v3'], data['viso'], data['vauto'],
data['r1'], data['r2'], data['r3'], data['riso'], data['rauto'],
data['i1'], data['i2'], data['i3'], data['iiso'], data['iauto'])
#Calibrate the photometry in the different apertures (1", 2", 3", ISO, AUTO)
(v1f, r1f, i1f, v1zp, r1zp, i1zp) = zpWFI(data['ra'], data['dec'], data['v1'], data['r1'], data['i1'], data['v3'],
data['r3'], data['i3'], data['starR'],synth=synth,photref=fors,plot=True,show=True,
classStar=classStar)
#Calculate (V-R), (V-I), and (R-I) colors in 1" apertures to pass to the remaining
#WFI calibration calls for larger apertures.
(v1m,r1m,i1m) = (flux2mag(v1f,zp=23.9), flux2mag(r1f,zp=23.9), flux2mag(i1f,zp=23.9))
(vr1,vi1,ri1) = (v1m-r1m, v1m-i1m, r1m-i1m)
print '\t r=1" done\n'
(v2f, r2f, i2f, v2zp, r2zp, i2zp) = zpWFI(data['ra'], data['dec'], data['v2'], data['r2'], data['i2'], data['v3'],
data['r3'], data['i3'], data['starR'],synth=synth,vr1=vr1,vi1=vi1,ri1=ri1,photref=fors,
classStar=classStar)
print '\t r=2" done\n'
(v3f, r3f, i3f, v3zp, r3zp, i3zp) = zpWFI(data['ra'], data['dec'], data['v3'], data['r3'], data['i3'], data['v3'],
data['r3'], data['i3'], data['starR'],synth=synth,vr1=vr1,vi1=vi1,ri1=ri1,photref=fors,
classStar=classStar)
print '\t r=3" done\n'
(visof, risof, iisof, visozp, risozp, iisozp) = zpWFI(data['ra'], data['dec'], data['viso'], data['riso'], data['iiso'],
data['v3'], data['r3'], data['i3'], data['starR'],synth=synth,
vr1=vr1,vi1=vi1,ri1=ri1,photref=fors,classStar=classStar)
print '\t r=iso done\n'
(vautof, rautof, iautof, vautozp, rautozp, iautozp) = zpWFI(data['ra'], data['dec'], data['vauto'], data['rauto'],
data['iauto'], data['v3'], data['r3'], data['i3'],
data['starR'],synth=synth,vr1=vr1,vi1=vi1,ri1=ri1,photref=fors,
classStar=classStar)
print '\t r=auto done\n'
#Save calibrated fluxes into the data dictionary
(data['v1'], data['v2'], data['v3'], data['viso'],
data['vauto'], data['r1'], data['r2'], data['r3'],
data['riso'], data['rauto'], data['i1'], data['i2'],
data['i3'], data['iiso'], data['iauto']) = (v1f, v2f, v3f, visof, vautof, r1f, r2f, r3f, risof, rautof, i1f,
i2f, i3f, iisof, iautof)
#Convert the B- and K-band fluxes from SExtractor into uJy fluxes.
#Update the 'data' dictionary appropriately. Correct the B- and K-band
#data for seeing if their seeing is worse than the worst WFI seeing image.
photcols = (4, 5, 6, 7, 8)
if b != "":
if rb != "":
(Rb1, Rb2, Rb3, Rbiso, Rbauto) = np.loadtxt(rb, usecols = photcols, unpack = True, comments = '#')
(data['b1'], data['b2'], data['b3'], data['biso'], data['bauto']) = (seeingCorr(data['b1'],data['r1c'],Rb1),
seeingCorr(data['b2'],data['r2c'],Rb2),
seeingCorr(data['b3'],data['r3c'],Rb3),
seeingCorr(data['biso'],data['risoc'],Rbiso),
seeingCorr(data['bauto'],data['rautoc'],Rbauto))
(data['b1c'],data['b2c'],data['b3c'],data['bisoc'],data['bautoc']) = (data['b1'], data['b2'], data['b3'], data['biso'], data['bauto'])
(data['b1'], data['b2'], data['b3'], data['biso'], data['bauto']) = (calflux(data['b1'], zpb, abconv = abconvb),
calflux(data['b2'], zpb, abconv = abconvb),
calflux(data['b3'], zpb, abconv = abconvb),
calflux(data['biso'], zpb, abconv = abconvb),
calflux(data['bauto'], zpb, abconv = abconvb))
if k != "":
if rk != "":
(Rk1, Rk2, Rk3, Rkiso, Rkauto) = np.loadtxt(rk, usecols = photcols, unpack = True, comments = '#')
(data['k1'], data['k2'], data['k3'], data['kiso'], data['kauto']) = (seeingCorr(data['k1'],data['r1c'],Rk1),
seeingCorr(data['k2'],data['r2c'],Rk2),
seeingCorr(data['k3'],data['r3c'],Rk3),
seeingCorr(data['kiso'],data['risoc'],Rkiso),
seeingCorr(data['kauto'],data['rautoc'],Rkauto))
(data['k1c'],data['k2c'],data['k3c'],data['kisoc'],data['kautoc']) = (data['k1'], data['k2'], data['k3'], data['kiso'], data['kauto'])
(data['k1'], data['k2'], data['k3'], data['kiso'], data['kauto']) = (calflux(data['k1'], zpk, abconv = vega2AB['k']),
calflux(data['k2'], zpk, abconv = vega2AB['k']),
calflux(data['k3'], zpk, abconv = vega2AB['k']),
calflux(data['kiso'], zpk, abconv = vega2AB['k']),
calflux(data['kauto'], zpk, abconv = vega2AB['k']))
# if k != "":
# (k1m, k2m, k3m, kisom, kautom) = (calflux(data['k1'], zpk, abconv = vega2AB['k']), calflux(data['k2'], zpk, abconv = vega2AB['k']),
# calflux(data['k3'], zpk, abconv = vega2AB['k']), calflux(data['kiso'], zpk, abconv = vega2AB['k']),
# calflux(data['kauto'], zpk, abconv = vega2AB['k']))
# (k1c, k2c, k3c, kisoc, kautoc) = (data['k1'], data['k2'], data['k3'], data['kiso'], data['kauto'])
# (data['k1c'],data['k2c'],data['k3c'],data['kisoc'],data['kautoc']) = (k1c, k2c, k3c, kisoc, kautoc)
#Correct for seeing if necessary
# if rk != '':
# (rk1, rk2, rk3, rkiso, rkauto) = np.loadtxt(rk, usecols = photcols, unpack = True, comments = '#')
# (rk1m, rk2m , rk3m, rkisom, rkautom) = (calflux(rk1, data['r1zp'], vega2AB['r']), calflux(rk2, data['r2zp'], vega2AB['r']),
# calflux(rk3, data['r3zp'], vega2AB['r']), calflux(rkiso, data['r2zp'], vega2AB['r']),
# calflux(rkauto, data['rauto'], vega2AB['r']))
# (k1mc, k2mc, k3mc, kisomc, kautomc) = (seeingCorr(data['r1'], rk1m, k1m), seeingCorr(data['r2'], rk2m, k1m),
# seeingCorr(data['r3'], rk3m, k3m), seeingCorr(data['riso'], rkisom, kisom),
# seeingCorr(data['rauto'], rkautom, kautom))
# (data['k1e'],data['k2e'],data['k3e'],data['kisoe'],data['kautoe']) = (addquad(data['k1e'],data['r1e'],k1mc,data['r1']),
# addquad(data['k2e'],data['r2e'],k2mc,data['r2']),
# addquad(data['k3e'],data['r3e'],k3mc,data['r3']),
# addquad(data['kisoe'],data['risoe'],kisomc,data['riso']),
# addquad(data['kautoe'],data['rautoe'],kauotmc,
# data['rauto']))
# else:
# (k1mc, k2mc, k3mc, kisomc, kautomc) = (k1m, k2m, k3m, kisom, kautom)
# (k1f, k2f, k3f, kisof, kautof) = (mag2flux(k1mc, ab2ujy=True), mag2flux(k2mc, ab2ujy=True), mag2flux(k3mc, ab2ujy=True),
# mag2flux(kisomc, ab2ujy=True), mag2flux(kautomc, ab2ujy=True))
# (data['k1'], data['k2'], data['k3'], data['kiso'], data['kauto']) = (k1f, k2f, k3f, kisof, kautof)
#Convert z-band from SExtractor to uJy fluxes. Note that the z-band observations were calibrated using
#Smith standards in AB magnitudes, so no AB conversion is necessary.
if z != "":
(data['z1c'],data['z2c'],data['z3c'],data['zisoc'],data['zautoc']) = (data['z1'], data['z2'], data['z3'], data['ziso'], data['zauto'])
(data['z1'], data['z2'], data['z3'],data['ziso'], data['zauto']) = (calflux(data['z1'], zpz, abconv = 0.0), calflux(data['z2'], zpz, abconv = 0.0),
calflux(data['z3'], zpz, abconv = 0.0), calflux(data['ziso'], zpz, abconv = 0.0),
calflux(data['zauto'], zpz, abconv = 0.0))
#Compute the errors based on Labbe et al. using empty apertures to determine the noise in the
#sky background.
print '\n---------\nCOMPUTING ERRORS\n---------\n'
(auton,ison,n1,n2,n3)=(np.sqrt(np.pi*data['kron_a']*data['kron_b']),np.sqrt(data['isoarea']),
np.sqrt(np.pi*(1.0/pixscale)**2.0),np.sqrt(np.pi*(2.0/pixscale)**2.0),
np.sqrt(np.pi*(3.0/pixscale)**2.0))
filts = 'bvrizk'
if errs == True:
for x in filts:
if x+'img' in imgs:
errpars = lb.main(imgs[x+'img'][:-5]+'_bkgsub.fits', rsegmap, outplot=x+'depth.pdf', clname=clname,
pixscale=pixscale, border = errborder, aprange=[0.5,2.0],maxrange=500.)
data[x+'autoerr'] = (sigfunc(auton, errpars[0], errpars[1], errpars[2])/data[x+'autoc'])*data[x+'auto']
data[x+'isoerr'] = (sigfunc(ison, errpars[0], errpars[1], errpars[2])/data[x+'isoc']) *data[x+'iso']
data[x+'1err'] = (sigfunc(n1, errpars[0], errpars[1], errpars[2])/data[x+'1c']) *data[x+'1']
data[x+'2err'] = (sigfunc(n2, errpars[0], errpars[1], errpars[2])/data[x+'2c']) *data[x+'2']
data[x+'3err'] = (sigfunc(n3, errpars[0], errpars[1], errpars[2])/data[x+'3c']) *data[x+'3']
print '\t '+x.upper()+'-band errors done...\n' if 'z' not in x else '\t '+x+'-band errors done...\n'
else:
for y in ['auto','iso','1','2','3']:
data[x+y+'err'] = np.zeros(len(data['ra'])) - 99.0
else:
for x in filts:
for y in ['auto','iso','1','2','3']:
data[x+y+'err'] = np.zeros(len(data['ra'])) - 99.0
# if 'bimg' in imgs:
# berrpars = lb.main(bbkgsub, rsegmap, outplot='bdepth.pdf', clname=clname, pixscale=pixscale,
# border=errborder, persec=False, aprange=[0.5,2.0],maxrange=500.)
# bautoerr=(sigfunc(auton,berrpars[0],berrpars[1],berrpars[2])/bautoc)*data['bauto']
# bisoerr=(sigfunc(ison,berrpars[0],berrpars[1],berrpars[2])/bisoc)*data['biso']
# b1err=(sigfunc(n1,berrpars[0],berrpars[1],berrpars[2])/b1c)*data['b1']
# b2err=(sigfunc(n2,berrpars[0],berrpars[1],berrpars[2])/b2c)*data['b2']
# b3err=(sigfunc(n3,berrpars[0],berrpars[1],berrpars[2])/b3c)*data['b3']
# print '\t B-band done\n'
# else:
# (berrpars,bautoerr,bisoerr,b1err,b2err,b3err)=(nullArr,nullArr,nullArr,nullArr,nullArr,nullArr)
#
# if 'zimg' in imgs:
# zerrpars = lb.main(zbkgsub, rsegmap, outplot='zdepth.pdf', clname=clname, pixscale=pixscale,
# border=errborder, persec=False, aprange=[0.5,2.0],maxrange=500.)
# zautoerr=(sigfunc(auton,zerrpars[0],zerrpars[1],zerrpars[2])/zautoc)*data['zauto']
# zisoerr=(sigfunc(ison,zerrpars[0],zerrpars[1],zerrpars[2])/zisoc)*data['ziso']
# z1err=(sigfunc(n1,zerrpars[0],zerrpars[1],zerrpars[2])/z1c)*data['z1']
# z2err=(sigfunc(n2,zerrpars[0],zerrpars[1],zerrpars[2])/z2c)*data['z2']
# z3err=(sigfunc(n3,zerrpars[0],zerrpars[1],zerrpars[2])/z3c)*data['z3']
# print '\t z-band done\n'
# else:
# (zerrpars,zautoerr,zisoerr,z1err,z2err,z3err)=(nullArr,nullArr,nullArr,nullArr,nullArr,nullArr)
#
# if 'kimg' in imgs:
# kerrpars = lb.main(kbkgsub, rsegmap, outplot='kdepth.pdf', clname=clname, pixscale=pixscale,
# border=errborder, persec=True, aprange=[0.5,2.0])
# kautoerr=(sigfunc(auton,kerrpars[0],kerrpars[1],kerrpars[2])/kautoc)*data['kauto']
# kisoerr=(sigfunc(ison,kerrpars[0],kerrpars[1],kerrpars[2])/kisoc)*data['kiso']
# k1err=(sigfunc(n1,kerrpars[0],kerrpars[1],kerrpars[2])/k1c)*data['k1']
# k2err=(sigfunc(n2,kerrpars[0],kerrpars[1],kerrpars[2])/k2c)*data['k2']
# k3err=(sigfunc(n3,kerrpars[0],kerrpars[1],kerrpars[2])/k3c)*data['k3']
# print '\t K-band done\n'
# else:
# (kerrpars,kautoerr,kisoerr,k1err,k2err,k3err)=(nullArr,nullArr,nullArr,nullArr,nullArr,nullArr)
#Get K-band weight map values at central pixel of each object (value is normalized to the maximum in the weight map).
#Also update the uncertainties in the K-band by adjusting for the weight.
wk=np.zeros(nsrcs) - 99.0
if wkimg != '':
expdata=pf.open(wkimg)[0].data
expmax=np.max(expdata)
expvalues=expdata[data['y'].astype(int),data['x'].astype(int)]
wk=(expvalues/expmax)
kerrscale=1./np.sqrt(wk)
data['k1err'] = data['k1err'] * kerrscale
data['k2err'] = data['k2err'] * kerrscale
data['k3err'] = data['k3err'] * kerrscale
data['kautoerr'] = data['kautoerr'] * kerrscale
data['kisoerr'] = data['kisoerr'] * kerrscale
#If correcting B- and K-band for worse seeing than WFI, add errors in quadrature with the R-band uncertainties
if rb != '':
(data['Rb1'], data['Rb2'], data['Rb3'], data['Rbiso'], data['Rbauto']) = (calflux(Rb1,zp=r1zp), calflux(Rb2,zp=r2zp),
calflux(Rb3,zp=r3zp), calflux(Rbiso,zp=risozp),
calflux(Rbauto,zp=rautozp))
errpars = lb.main(imgs['rbimg'][:-5]+'_bkgsub.fits', rsegmap, outplot='rbdepth.pdf', clname=clname,
pixscale=pixscale, border = errborder, aprange=[0.5,2.0],maxrange=500.)
data['Rbautoerr'] = (sigfunc(auton, errpars[0], errpars[1], errpars[2])/Rbauto)*data['Rbauto']
data['Rbisoerr'] = (sigfunc(ison, errpars[0], errpars[1], errpars[2])/Rbiso) *data['Rbiso']
data['Rb1err'] = (sigfunc(n1, errpars[0], errpars[1], errpars[2])/Rb1) *data['Rb1']
data['Rb2err'] = (sigfunc(n2, errpars[0], errpars[1], errpars[2])/Rb2) *data['Rb2']
data['Rb3err'] = (sigfunc(n3, errpars[0], errpars[1], errpars[2])/Rb3) *data['Rb3']
aper = ['1','2','3','auto','iso']
for x in aper:
data['b'+x+'err'] = addquad(data['b'+x+'err'],data['r'+x+'err'],data['Rb'+x+'err'],data['b'+x],data['r'+x],data['Rb'+x])
if rk != '':
(data['Rk1'], data['Rk2'], data['Rk3'], data['Rkiso'], data['Rkauto']) = (calflux(Rk1,zp=r1zp), calflux(Rk2,zp=r2zp),
calflux(Rk3,zp=r3zp), calflux(Rkiso,zp=risozp),
calflux(Rkauto,zp=rautozp))
errpars = lb.main(imgs['rkimg'][:-5]+'_bkgsub.fits', rsegmap, outplot='rkdepth.pdf', clname=clname,
pixscale=pixscale, border = errborder, aprange=[0.5,2.0],maxrange=500.)
data['Rkautoerr'] = (sigfunc(auton, errpars[0], errpars[1], errpars[2])/Rkauto)*data['Rkauto']
data['Rkisoerr'] = (sigfunc(ison, errpars[0], errpars[1], errpars[2])/Rkiso) *data['Rkiso']
data['Rk1err'] = (sigfunc(n1, errpars[0], errpars[1], errpars[2])/Rk1) *data['Rk1']
data['Rk2err'] = (sigfunc(n2, errpars[0], errpars[1], errpars[2])/Rk2) *data['Rk2']
data['Rk3err'] = (sigfunc(n3, errpars[0], errpars[1], errpars[2])/Rk3) *data['Rk3']
aper = ['1','2','3','auto','iso']
for x in aper:
data['k'+x+'err'] = addquad(data['k'+x+'err'],data['r'+x+'err'],data['Rk'+x+'err'],data['k'+x],data['r'+x],data['Rk'+x])
#Look up the E(B-V) values from the Schlegel dust maps
data['ebv']=getEBV(data['ra'],data['dec'])
#Set up field and ID name arrays
ids = ['WFI_'+idname+'_'+str(int(x+1)) for x in range(len(data['ra']))]
field = [clname for x in range(len(data['ra']))]
#Construct the actual catalog
cat = {'field':field,'ids':ids,'ra':data['ra'],'dec':data['dec'],'x':data['x'],'y':data['y'],'ebv':data['ebv'],'fwhmR':data['fwhmR'],
'fB1':data['b1'],'fB1err':data['b1err'],'fB2':data['b2'],'fB2err':data['b2err'],'fB3':data['b3'],'fB3err':data['b3err'],
'fBiso':data['biso'],'fBisoerr':data['bisoerr'],'fBauto':data['bauto'],'fBautoerr':data['bautoerr'],
'fV1':data['v1'],'fV1err':data['v1err'],'fV2':data['v2'],'fV2err':data['v2err'],'fV3':data['v3'],'fV3err':data['v3err'],
'fViso':data['viso'],'fVisoerr':data['visoerr'],'fVauto':data['vauto'],'fVautoerr':data['vautoerr'],
'fR1':data['r1'],'fR1err':data['r1err'],'fR2':data['r2'],'fR2err':data['r2err'],'fR3':data['r3'],'fR3err':data['r3err'],
'fRiso':data['riso'],'fRisoerr':data['risoerr'],'fRauto':data['rauto'],'fRautoerr':data['rautoerr'],
'fI1':data['i1'],'fI1err':data['i1err'],'fI2':data['i2'],'fI2err':data['i2err'],'fI3':data['i3'],'fI3err':data['i3err'],
'fIiso':data['iiso'],'fIisoerr':data['iisoerr'],'fIauto':data['iauto'],'fIautoerr':data['iautoerr'],
'fz1':data['z1'],'fz1err':data['z1err'],'fz2':data['z2'],'fz2err':data['z2err'],'fz3':data['z3'],'fz3err':data['z3err'],
'fziso':data['ziso'],'fzisoerr':data['zisoerr'],'fzauto':data['zauto'],'fzautoerr':data['zautoerr'],
'fK1':data['k1'],'fK1err':data['k1err'],'fK2':data['k2'],'fK2err':data['k2err'],'fK3':data['k3'],'fK3err':data['k3err'],
'fKiso':data['kiso'],'fKisoerr':data['kisoerr'],'fKauto':data['kauto'],'fKautoerr':data['kautoerr'],
'zLDP5_7':wzldp,'Q5_7':wqual,'class_StarR':data['starR'],'iso_area':ison**2.0,'major_ax':data['kron_a'],
'minor_ax':data['kron_b'],'theta':data['theta'],'sexflagB':data['sexflagB'],'sexflagV':data['sexflagV'],
'sexflagR':data['sexflagR'],'sexflagI':data['sexflagI'],'sexflagz':data['sexflagz'],'sexflagK':data['sexflagK'],'wK':wk}
#Search for and replace NaNs and +/- inf in the catalog. Replace with flag value -88
for x in colNames:
if ('field' not in x) and ('ids' not in x):
nan=np.where(np.isnan(cat[x]))
inf=np.where(np.isinf(cat[x]))
cat[x][nan]=-88
cat[x][inf]=-88
#Search for and replace bad flux values (log10(abs(flux)) < -8 or > 4). Replace with flag value -77
for x in range(len(fluxNames)):
test=np.log10(np.abs(cat[fluxNames[x]]))
bad=np.where((test < -8.) | (test > 4.))
cat[fluxNames[x]][bad]=-77
cat[errNames[x]][bad] =-77
#Search for where K-band weight map is zero and set fluxes and errors to flag value -77
(kfluxes,kerrs)=(['fK1','fK2','fK3','fKiso','fKauto'],['fK1err','fK2err','fK3err','fKisoerr','fKautoerr'])
for x in range(len(kfluxes)):
bad=np.where(cat['wK'] == 0.)
cat[kfluxes[x]][bad]=-77
cat[kerrs[x]][bad] =-77
#Turn the catalog into a Pandas dataframe and select only sources within the X and Y bounds.
xmin = np.min(data['x']) if xmin == -99 else xmin
xmax = np.max(data['x']) if xmax == -99 else xmax
ymin = np.min(data['y']) if ymin == -99 else ymin
ymax = np.max(data['y']) if ymax == -99 else ymax
catDF = pd.DataFrame.from_dict(cat)
final=catDF.query('x > '+str(xmin)+' & x < '+str(xmax)+' & y > '+str(ymin)+' & y < '+str(ymax))
#Save the dictionary as a FITS table
tab = Table({colNames[x]:final[colNames[x]].values for x in range(len(colNames))},
names=colNames, dtype=Fdtype).write(outname, format='fits', overwrite=True)
| []
| []
| [
"EDISCS"
]
| [] | ["EDISCS"] | python | 1 | 0 | |
src/modules/collector/tools/net/net_test.go | package net
import (
"fmt"
"os"
"runtime"
"testing"
"github.com/didi/nightingale/src/modules/collector/tools/internal/common"
)
func TestAddrString(t *testing.T) {
v := Addr{IP: "192.168.0.1", Port: 8000}
s := fmt.Sprintf("%v", v)
if s != "{\"ip\":\"192.168.0.1\",\"port\":8000}" {
t.Errorf("Addr string is invalid: %v", v)
}
}
func TestNetIOCountersStatString(t *testing.T) {
v := IOCountersStat{
Name: "test",
BytesSent: 100,
}
e := `{"name":"test","bytesSent":100,"bytesRecv":0,"packetsSent":0,"packetsRecv":0,"errin":0,"errout":0,"dropin":0,"dropout":0,"fifoin":0,"fifoout":0}`
if e != fmt.Sprintf("%v", v) {
t.Errorf("NetIOCountersStat string is invalid: %v", v)
}
}
func TestNetProtoCountersStatString(t *testing.T) {
v := ProtoCountersStat{
Protocol: "tcp",
Stats: map[string]int64{
"MaxConn": -1,
"ActiveOpens": 4000,
"PassiveOpens": 3000,
},
}
e := `{"protocol":"tcp","stats":{"ActiveOpens":4000,"MaxConn":-1,"PassiveOpens":3000}}`
if e != fmt.Sprintf("%v", v) {
t.Errorf("NetProtoCountersStat string is invalid: %v", v)
}
}
func TestNetConnectionStatString(t *testing.T) {
v := ConnectionStat{
Fd: 10,
Family: 10,
Type: 10,
Uids: []int32{10, 10},
}
e := `{"fd":10,"family":10,"type":10,"localaddr":{"ip":"","port":0},"remoteaddr":{"ip":"","port":0},"status":"","uids":[10,10],"pid":0}`
if e != fmt.Sprintf("%v", v) {
t.Errorf("NetConnectionStat string is invalid: %v", v)
}
}
func TestNetIOCountersAll(t *testing.T) {
v, err := IOCounters(false)
per, err := IOCounters(true)
if err != nil {
t.Errorf("Could not get NetIOCounters: %v", err)
}
if len(v) != 1 {
t.Errorf("Could not get NetIOCounters: %v", v)
}
if v[0].Name != "all" {
t.Errorf("Invalid NetIOCounters: %v", v)
}
var pr uint64
for _, p := range per {
pr += p.PacketsRecv
}
if v[0].PacketsRecv != pr {
t.Errorf("invalid sum value: %v, %v", v[0].PacketsRecv, pr)
}
}
func TestNetIOCountersPerNic(t *testing.T) {
v, err := IOCounters(true)
if err != nil {
t.Errorf("Could not get NetIOCounters: %v", err)
}
if len(v) == 0 {
t.Errorf("Could not get NetIOCounters: %v", v)
}
for _, vv := range v {
if vv.Name == "" {
t.Errorf("Invalid NetIOCounters: %v", vv)
}
}
}
func TestGetNetIOCountersAll(t *testing.T) {
n := []IOCountersStat{
{
Name: "a",
BytesRecv: 10,
PacketsRecv: 10,
},
{
Name: "b",
BytesRecv: 10,
PacketsRecv: 10,
Errin: 10,
},
}
ret, err := getIOCountersAll(n)
if err != nil {
t.Error(err)
}
if len(ret) != 1 {
t.Errorf("invalid return count")
}
if ret[0].Name != "all" {
t.Errorf("invalid return name")
}
if ret[0].BytesRecv != 20 {
t.Errorf("invalid count bytesrecv")
}
if ret[0].Errin != 10 {
t.Errorf("invalid count errin")
}
}
func TestNetInterfaces(t *testing.T) {
v, err := Interfaces()
if err != nil {
t.Errorf("Could not get NetInterfaceStat: %v", err)
}
if len(v) == 0 {
t.Errorf("Could not get NetInterfaceStat: %v", err)
}
for _, vv := range v {
if vv.Name == "" {
t.Errorf("Invalid NetInterface: %v", vv)
}
}
}
func TestNetProtoCountersStatsAll(t *testing.T) {
v, err := ProtoCounters(nil)
if err != nil {
t.Fatalf("Could not get NetProtoCounters: %v", err)
}
if len(v) == 0 {
t.Fatalf("Could not get NetProtoCounters: %v", err)
}
for _, vv := range v {
if vv.Protocol == "" {
t.Errorf("Invalid NetProtoCountersStat: %v", vv)
}
if len(vv.Stats) == 0 {
t.Errorf("Invalid NetProtoCountersStat: %v", vv)
}
}
}
func TestNetProtoCountersStats(t *testing.T) {
v, err := ProtoCounters([]string{"tcp", "ip"})
if err != nil {
t.Fatalf("Could not get NetProtoCounters: %v", err)
}
if len(v) == 0 {
t.Fatalf("Could not get NetProtoCounters: %v", err)
}
if len(v) != 2 {
t.Fatalf("Go incorrect number of NetProtoCounters: %v", err)
}
for _, vv := range v {
if vv.Protocol != "tcp" && vv.Protocol != "ip" {
t.Errorf("Invalid NetProtoCountersStat: %v", vv)
}
if len(vv.Stats) == 0 {
t.Errorf("Invalid NetProtoCountersStat: %v", vv)
}
}
}
func TestNetConnections(t *testing.T) {
if ci := os.Getenv("CI"); ci != "" { // skip if test on drone.io
return
}
v, err := Connections("inet")
if err != nil {
t.Errorf("could not get NetConnections: %v", err)
}
if len(v) == 0 {
t.Errorf("could not get NetConnections: %v", v)
}
for _, vv := range v {
if vv.Family == 0 {
t.Errorf("invalid NetConnections: %v", vv)
}
}
}
func TestNetFilterCounters(t *testing.T) {
if ci := os.Getenv("CI"); ci != "" { // skip if test on drone.io
return
}
if runtime.GOOS == "linux" {
// some test environment has not the path.
if !common.PathExists("/proc/sys/net/netfilter/nf_conntrackCount") {
t.SkipNow()
}
}
v, err := FilterCounters()
if err != nil {
t.Errorf("could not get NetConnections: %v", err)
}
if len(v) == 0 {
t.Errorf("could not get NetConnections: %v", v)
}
for _, vv := range v {
if vv.ConnTrackMax == 0 {
t.Errorf("nf_conntrackMax needs to be greater than zero: %v", vv)
}
}
}
| [
"\"CI\"",
"\"CI\""
]
| []
| [
"CI"
]
| [] | ["CI"] | go | 1 | 0 | |
discordbot.py | from discord.ext import commands
import os
bot = commands.Bot(command_prefix='/')
token = os.environ['DISCORD_BOT_TOKEN']
@bot.event
async def on_message(message):
if message.author.bot:
return
if message.content == ('$mjoha'):
await message.channel.send("<@588392127919161357>"+'おはよう:heart:')
if message.content == ('$mjlove'):
await message.channel.send("<@588392127919161357>"+'大好き:heart:')
if message.content == ('$mjbut'):
await message.channel.send("<@588392127919161357>"+'嫌い!:rage: ')
if message.content == ('$mjwakeup'):
await message.channel.send("<@588392127919161357>"+'起きて:heart:')
if message.content == ('$mjyabai'):
await message.channel.send("<@588392127919161357>"+'含み損すごい:heart:')
if message.content == ('$mjome'):
await message.channel.send("<@588392127919161357>"+'大好き:heart:')
if message.content == ('$uwaki'):
await message.channel.send("<@588392127919161357>"+'あっちに逝こうか:heart:')
if message.content == ('$mjsongiri'):
await message.channel.send("<@588392127919161357>"+'残念損切り:heart:')
if message.content == ('おめでとう'):
await message.channel.send('私も嬉しい:heart:')
if message.content == ('MJさんは?'):
await message.channel.send('ドイツ:heart:')
if message.content == ('利確どこ?'):
await message.channel.send('私も知りたい:heart:')
if message.content == ("<@588392127919161357>"):
await message.channel.send("<@588392127919161357>" +'呼んでるわよ?:heart:')
if message.content == (":99_gbpjpy:"):
await message.channel.send("<@588392127919161357>" +':99_gbpjpy: のおーだ知りたいなぁ:heart:')
if message.content == (":99_usdjpy:"):
await message.channel.send("<@588392127919161357>" +':99_usdjpy: のおーだ知りたいなぁ:heart:')
if message.content == ('れんかぶす'):
await message.channel.send("<@588392127919161357>"+'はかっこいいよ:heart:')
if message.content == ('てんが利確どこ?'):
await message.channel.send("<@522237932606717968>"+'早よ言え:heart:')
if message.content == ('れんか'):
await message.channel.send('https://cdn.discordapp.com/attachments/650134943623675904/652116436332052500/image0.jpg' +' ぶ~す:heart:')
if message.content == ('アンちゃん、おはよ!'):
await message.channel.send('おはよぅ:heart:')
@bot.command()
async def ping(ctx):
await ctx.send('pong')
bot.run(token)
| []
| []
| [
"DISCORD_BOT_TOKEN"
]
| [] | ["DISCORD_BOT_TOKEN"] | python | 1 | 0 | |
test/e2e/scalenodes.go | package e2e
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"os"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/Azure/go-autorest/autorest/to"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/util/retry"
"github.com/Azure/ARO-RP/pkg/util/ready"
)
var _ = Describe("Scale nodes", func() {
// hack: do this before we scale down, because it takes a while for the
// nodes to settle after scale down
Specify("node count should match the cluster resource and nodes should be ready", func() {
ctx := context.Background()
oc, err := clients.OpenshiftClusters.Get(ctx, os.Getenv("RESOURCEGROUP"), os.Getenv("CLUSTER"))
Expect(err).NotTo(HaveOccurred())
var expectedNodeCount int = 3 // for masters
for _, wp := range *oc.WorkerProfiles {
expectedNodeCount += int(*wp.Count)
}
nodes, err := clients.Kubernetes.CoreV1().Nodes().List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
var nodeCount int32
for _, node := range nodes.Items {
if ready.NodeIsReady(&node) {
nodeCount++
} else {
for _, c := range node.Status.Conditions {
log.Warnf("node %s status %s", node.Name, c.String())
}
}
}
Expect(nodeCount).To(BeEquivalentTo(expectedNodeCount))
})
Specify("nodes should scale up and down", func() {
mss, err := clients.MachineAPI.MachineV1beta1().MachineSets("openshift-machine-api").List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(mss.Items).NotTo(BeEmpty())
err = scale(mss.Items[0].Name, 1)
Expect(err).NotTo(HaveOccurred())
err = waitForScale(mss.Items[0].Name)
Expect(err).NotTo(HaveOccurred())
err = scale(mss.Items[0].Name, -1)
Expect(err).NotTo(HaveOccurred())
err = waitForScale(mss.Items[0].Name)
Expect(err).NotTo(HaveOccurred())
})
})
func scale(name string, delta int32) error {
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
ms, err := clients.MachineAPI.MachineV1beta1().MachineSets("openshift-machine-api").Get(name, metav1.GetOptions{})
if err != nil {
return err
}
if ms.Spec.Replicas == nil {
ms.Spec.Replicas = to.Int32Ptr(1)
}
*ms.Spec.Replicas += delta
_, err = clients.MachineAPI.MachineV1beta1().MachineSets(ms.Namespace).Update(ms)
return err
})
}
func waitForScale(name string) error {
return wait.PollImmediate(10*time.Second, 30*time.Minute, func() (bool, error) {
ms, err := clients.MachineAPI.MachineV1beta1().MachineSets("openshift-machine-api").Get(name, metav1.GetOptions{})
if err != nil {
return false, err
}
if ms.Spec.Replicas == nil {
ms.Spec.Replicas = to.Int32Ptr(1)
}
return ms.Status.ObservedGeneration == ms.Generation &&
ms.Status.AvailableReplicas == *ms.Spec.Replicas, nil
})
}
| [
"\"RESOURCEGROUP\"",
"\"CLUSTER\""
]
| []
| [
"RESOURCEGROUP",
"CLUSTER"
]
| [] | ["RESOURCEGROUP", "CLUSTER"] | go | 2 | 0 | |
mottak/web/webapp/src/main/java/no/nav/foreldrepenger/fordel/web/app/konfig/EnvKonfigVerdiProvider.java | package no.nav.foreldrepenger.fordel.web.app.konfig;
import java.util.Properties;
import javax.enterprise.context.ApplicationScoped;
import no.nav.vedtak.konfig.KonfigVerdi;
import no.nav.vedtak.konfig.PropertiesKonfigVerdiProvider;
@ApplicationScoped
public class EnvKonfigVerdiProvider extends PropertiesKonfigVerdiProvider {
public EnvKonfigVerdiProvider() {
super(getEnv());
}
@Override
public int getPrioritet() {
return 20;
}
@Override
public <V> V getVerdi(String key, KonfigVerdi.Converter<V> converter) {
V verdi = super.getVerdi(key, converter);
if (verdi == null) {
verdi = super.getVerdi(upperKey(key), converter);
}
if(verdi==null) {
verdi = super.getVerdi(endpointUrlKey(key), converter);
}
return verdi;
}
@Override
public boolean harVerdi(String key) {
return super.harVerdi(key) || super.harVerdi(upperKey(key)) || super.harVerdi(endpointUrlKey(key));
}
private String endpointUrlKey(String key) {
// hack diff mellom NAIS og SKYA env for endepunkter
return key == null ? null : upperKey(key).replaceAll("_URL$", "_ENDPOINTURL");
}
private String upperKey(String key) {
// hack diff mellom NAIS og SKYA env (upper vs. lower case og '_' istdf. '.')
return key == null ? null : key.toUpperCase().replace('.', '_');
}
private static Properties getEnv() {
Properties props = new Properties();
props.putAll(System.getenv());
return props;
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
src/core/config/config.go | // Copyright 2018 Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package config provide config for core api and other modules
// Before accessing user settings, need to call Load()
// For system settings, no need to call Load()
package config
import (
"crypto/tls"
"crypto/x509"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"strings"
"github.com/goharbor/harbor/src/common"
comcfg "github.com/goharbor/harbor/src/common/config"
"github.com/goharbor/harbor/src/common/models"
"github.com/goharbor/harbor/src/common/secret"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/core/promgr"
"github.com/goharbor/harbor/src/core/promgr/pmsdriver"
"github.com/goharbor/harbor/src/core/promgr/pmsdriver/admiral"
"github.com/goharbor/harbor/src/core/promgr/pmsdriver/local"
)
const (
defaultKeyPath = "/etc/core/key"
defaultTokenFilePath = "/etc/core/token/tokens.properties"
defaultRegistryTokenPrivateKeyPath = "/etc/core/private_key.pem"
)
var (
// SecretStore manages secrets
SecretStore *secret.Store
// GlobalProjectMgr is initialized based on the deploy mode
GlobalProjectMgr promgr.ProjectManager
keyProvider comcfg.KeyProvider
// AdmiralClient is initialized only under integration deploy mode
// and can be passed to project manager as a parameter
AdmiralClient *http.Client
// TokenReader is used in integration mode to read token
TokenReader admiral.TokenReader
// defined as a var for testing.
defaultCACertPath = "/etc/core/ca/ca.crt"
cfgMgr *comcfg.CfgManager
)
// Init configurations
func Init() error {
// init key provider
initKeyProvider()
cfgMgr = comcfg.NewDBCfgManager()
log.Info("init secret store")
// init secret store
initSecretStore()
log.Info("init project manager based on deploy mode")
// init project manager based on deploy mode
if err := initProjectManager(); err != nil {
log.Errorf("Failed to initialise project manager, error: %v", err)
return err
}
return nil
}
// InitWithSettings init config with predefined configs, and optionally overwrite the keyprovider
func InitWithSettings(cfgs map[string]interface{}, kp ...comcfg.KeyProvider) {
Init()
cfgMgr = comcfg.NewInMemoryManager()
cfgMgr.UpdateConfig(cfgs)
if len(kp) > 0 {
keyProvider = kp[0]
}
}
func initKeyProvider() {
path := os.Getenv("KEY_PATH")
if len(path) == 0 {
path = defaultKeyPath
}
log.Infof("key path: %s", path)
keyProvider = comcfg.NewFileKeyProvider(path)
}
func initSecretStore() {
m := map[string]string{}
m[JobserviceSecret()] = secret.JobserviceUser
SecretStore = secret.NewStore(m)
}
func initProjectManager() error {
var driver pmsdriver.PMSDriver
if WithAdmiral() {
log.Debugf("Initialising Admiral client with certificate: %s", defaultCACertPath)
content, err := ioutil.ReadFile(defaultCACertPath)
if err != nil {
return err
}
pool := x509.NewCertPool()
if ok := pool.AppendCertsFromPEM(content); !ok {
return fmt.Errorf("failed to append cert content into cert worker")
}
AdmiralClient = &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
TLSClientConfig: &tls.Config{
RootCAs: pool,
},
},
}
// integration with admiral
log.Info("initializing the project manager based on PMS...")
path := os.Getenv("SERVICE_TOKEN_FILE_PATH")
if len(path) == 0 {
path = defaultTokenFilePath
}
log.Infof("service token file path: %s", path)
TokenReader = &admiral.FileTokenReader{
Path: path,
}
driver = admiral.NewDriver(AdmiralClient, AdmiralEndpoint(), TokenReader)
} else {
// standalone
log.Info("initializing the project manager based on local database...")
driver = local.NewDriver()
}
GlobalProjectMgr = promgr.NewDefaultProjectManager(driver, true)
return nil
}
// GetCfgManager return the current config manager
func GetCfgManager() *comcfg.CfgManager {
if cfgMgr == nil {
return comcfg.NewDBCfgManager()
}
return cfgMgr
}
// Load configurations
func Load() error {
return cfgMgr.Load()
}
// Upload save all system configurations
func Upload(cfg map[string]interface{}) error {
return cfgMgr.UpdateConfig(cfg)
}
// GetSystemCfg returns the system configurations
func GetSystemCfg() (map[string]interface{}, error) {
sysCfg := cfgMgr.GetAll()
if len(sysCfg) == 0 {
return nil, errors.New("can not load system config, the database might be down")
}
return sysCfg, nil
}
// AuthMode ...
func AuthMode() (string, error) {
err := cfgMgr.Load()
if err != nil {
log.Errorf("failed to load config, error %v", err)
return "db_auth", err
}
return cfgMgr.Get(common.AUTHMode).GetString(), nil
}
// TokenPrivateKeyPath returns the path to the key for signing token for registry
func TokenPrivateKeyPath() string {
path := os.Getenv("TOKEN_PRIVATE_KEY_PATH")
if len(path) == 0 {
path = defaultRegistryTokenPrivateKeyPath
}
return path
}
// LDAPConf returns the setting of ldap server
func LDAPConf() (*models.LdapConf, error) {
err := cfgMgr.Load()
if err != nil {
return nil, err
}
return &models.LdapConf{
LdapURL: cfgMgr.Get(common.LDAPURL).GetString(),
LdapSearchDn: cfgMgr.Get(common.LDAPSearchDN).GetString(),
LdapSearchPassword: cfgMgr.Get(common.LDAPSearchPwd).GetString(),
LdapBaseDn: cfgMgr.Get(common.LDAPBaseDN).GetString(),
LdapUID: cfgMgr.Get(common.LDAPUID).GetString(),
LdapFilter: cfgMgr.Get(common.LDAPFilter).GetString(),
LdapScope: cfgMgr.Get(common.LDAPScope).GetInt(),
LdapConnectionTimeout: cfgMgr.Get(common.LDAPTimeout).GetInt(),
LdapVerifyCert: cfgMgr.Get(common.LDAPVerifyCert).GetBool(),
}, nil
}
// LDAPGroupConf returns the setting of ldap group search
func LDAPGroupConf() (*models.LdapGroupConf, error) {
err := cfgMgr.Load()
if err != nil {
return nil, err
}
return &models.LdapGroupConf{
LdapGroupBaseDN: cfgMgr.Get(common.LDAPGroupBaseDN).GetString(),
LdapGroupFilter: cfgMgr.Get(common.LDAPGroupSearchFilter).GetString(),
LdapGroupNameAttribute: cfgMgr.Get(common.LDAPGroupAttributeName).GetString(),
LdapGroupSearchScope: cfgMgr.Get(common.LDAPGroupSearchScope).GetInt(),
LdapGroupAdminDN: cfgMgr.Get(common.LDAPGroupAdminDn).GetString(),
LdapGroupMembershipAttribute: cfgMgr.Get(common.LDAPGroupMembershipAttribute).GetString(),
}, nil
}
// TokenExpiration returns the token expiration time (in minute)
func TokenExpiration() (int, error) {
return cfgMgr.Get(common.TokenExpiration).GetInt(), nil
}
// RobotTokenDuration returns the token expiration time of robot account (in minute)
func RobotTokenDuration() int {
return cfgMgr.Get(common.RobotTokenDuration).GetInt()
}
// ExtEndpoint returns the external URL of Harbor: protocol://host:port
func ExtEndpoint() (string, error) {
return cfgMgr.Get(common.ExtEndpoint).GetString(), nil
}
// ExtURL returns the external URL: host:port
func ExtURL() (string, error) {
endpoint, err := ExtEndpoint()
if err != nil {
log.Errorf("failed to load config, error %v", err)
}
l := strings.Split(endpoint, "://")
if len(l) > 0 {
return l[1], nil
}
return endpoint, nil
}
// SecretKey returns the secret key to encrypt the password of target
func SecretKey() (string, error) {
return keyProvider.Get(nil)
}
// SelfRegistration returns the enablement of self registration
func SelfRegistration() (bool, error) {
return cfgMgr.Get(common.SelfRegistration).GetBool(), nil
}
// RegistryURL ...
func RegistryURL() (string, error) {
return cfgMgr.Get(common.RegistryURL).GetString(), nil
}
// InternalJobServiceURL returns jobservice URL for internal communication between Harbor containers
func InternalJobServiceURL() string {
return strings.TrimSuffix(cfgMgr.Get(common.JobServiceURL).GetString(), "/")
}
// InternalCoreURL returns the local harbor core url
func InternalCoreURL() string {
return strings.TrimSuffix(cfgMgr.Get(common.CoreURL).GetString(), "/")
}
// LocalCoreURL returns the local harbor core url
func LocalCoreURL() string {
return cfgMgr.Get(common.CoreLocalURL).GetString()
}
// InternalTokenServiceEndpoint returns token service endpoint for internal communication between Harbor containers
func InternalTokenServiceEndpoint() string {
return InternalCoreURL() + "/service/token"
}
// InternalNotaryEndpoint returns notary server endpoint for internal communication between Harbor containers
// This is currently a conventional value and can be unaccessible when Harbor is not deployed with Notary.
func InternalNotaryEndpoint() string {
return cfgMgr.Get(common.NotaryURL).GetString()
}
// InitialAdminPassword returns the initial password for administrator
func InitialAdminPassword() (string, error) {
return cfgMgr.Get(common.AdminInitialPassword).GetString(), nil
}
// OnlyAdminCreateProject returns the flag to restrict that only sys admin can create project
func OnlyAdminCreateProject() (bool, error) {
return cfgMgr.Get(common.ProjectCreationRestriction).GetString() == common.ProCrtRestrAdmOnly, nil
}
// Email returns email server settings
func Email() (*models.Email, error) {
err := cfgMgr.Load()
if err != nil {
return nil, err
}
return &models.Email{
Host: cfgMgr.Get(common.EmailHost).GetString(),
Port: cfgMgr.Get(common.EmailPort).GetInt(),
Username: cfgMgr.Get(common.EmailUsername).GetString(),
Password: cfgMgr.Get(common.EmailPassword).GetString(),
SSL: cfgMgr.Get(common.EmailSSL).GetBool(),
From: cfgMgr.Get(common.EmailFrom).GetString(),
Identity: cfgMgr.Get(common.EmailIdentity).GetString(),
Insecure: cfgMgr.Get(common.EmailInsecure).GetBool(),
}, nil
}
// Database returns database settings
func Database() (*models.Database, error) {
database := &models.Database{}
database.Type = cfgMgr.Get(common.DatabaseType).GetString()
postgresql := &models.PostGreSQL{
Host: cfgMgr.Get(common.PostGreSQLHOST).GetString(),
Port: cfgMgr.Get(common.PostGreSQLPort).GetInt(),
Username: cfgMgr.Get(common.PostGreSQLUsername).GetString(),
Password: cfgMgr.Get(common.PostGreSQLPassword).GetString(),
Database: cfgMgr.Get(common.PostGreSQLDatabase).GetString(),
SSLMode: cfgMgr.Get(common.PostGreSQLSSLMode).GetString(),
MaxIdleConns: cfgMgr.Get(common.PostGreSQLMaxIdleConns).GetInt(),
MaxOpenConns: cfgMgr.Get(common.PostGreSQLMaxOpenConns).GetInt(),
}
database.PostGreSQL = postgresql
return database, nil
}
// CoreSecret returns a secret to mark harbor-core when communicate with
// other component
func CoreSecret() string {
return os.Getenv("CORE_SECRET")
}
// JobserviceSecret returns a secret to mark Jobservice when communicate with
// other component
// TODO replace it with method of SecretStore
func JobserviceSecret() string {
return os.Getenv("JOBSERVICE_SECRET")
}
// WithNotary returns a bool value to indicate if Harbor's deployed with Notary
func WithNotary() bool {
return cfgMgr.Get(common.WithNotary).GetBool()
}
// WithClair returns a bool value to indicate if Harbor's deployed with Clair
func WithClair() bool {
return cfgMgr.Get(common.WithClair).GetBool()
}
// ClairEndpoint returns the end point of clair instance, by default it's the one deployed within Harbor.
func ClairEndpoint() string {
return cfgMgr.Get(common.ClairURL).GetString()
}
// ClairDB return Clair db info
func ClairDB() (*models.PostGreSQL, error) {
clairDB := &models.PostGreSQL{
Host: cfgMgr.Get(common.ClairDBHost).GetString(),
Port: cfgMgr.Get(common.ClairDBPort).GetInt(),
Username: cfgMgr.Get(common.ClairDBUsername).GetString(),
Password: cfgMgr.Get(common.ClairDBPassword).GetString(),
Database: cfgMgr.Get(common.ClairDB).GetString(),
SSLMode: cfgMgr.Get(common.ClairDBSSLMode).GetString(),
}
return clairDB, nil
}
// AdmiralEndpoint returns the URL of admiral, if Harbor is not deployed with admiral it should return an empty string.
func AdmiralEndpoint() string {
if cfgMgr.Get(common.AdmiralEndpoint).GetString() == "NA" {
return ""
}
return cfgMgr.Get(common.AdmiralEndpoint).GetString()
}
// ScanAllPolicy returns the policy which controls the scan all.
func ScanAllPolicy() models.ScanAllPolicy {
var res models.ScanAllPolicy
log.Infof("Scan all policy %v", cfgMgr.Get(common.ScanAllPolicy).GetString())
if err := json.Unmarshal([]byte(cfgMgr.Get(common.ScanAllPolicy).GetString()), &res); err != nil {
log.Errorf("Failed to unmarshal the value in configuration for Scan All policy, error: %v, returning the default policy", err)
return models.DefaultScanAllPolicy
}
return res
}
// WithAdmiral returns a bool to indicate if Harbor's deployed with admiral.
func WithAdmiral() bool {
return len(AdmiralEndpoint()) > 0
}
// UAASettings returns the UAASettings to access UAA service.
func UAASettings() (*models.UAASettings, error) {
err := cfgMgr.Load()
if err != nil {
return nil, err
}
us := &models.UAASettings{
Endpoint: cfgMgr.Get(common.UAAEndpoint).GetString(),
ClientID: cfgMgr.Get(common.UAAClientID).GetString(),
ClientSecret: cfgMgr.Get(common.UAAClientSecret).GetString(),
VerifyCert: cfgMgr.Get(common.UAAVerifyCert).GetBool(),
}
return us, nil
}
// ReadOnly returns a bool to indicates if Harbor is in read only mode.
func ReadOnly() bool {
return cfgMgr.Get(common.ReadOnly).GetBool()
}
// WithChartMuseum returns a bool to indicate if chartmuseum is deployed with Harbor.
func WithChartMuseum() bool {
return cfgMgr.Get(common.WithChartMuseum).GetBool()
}
// GetChartMuseumEndpoint returns the endpoint of the chartmuseum service
// otherwise an non nil error is returned
func GetChartMuseumEndpoint() (string, error) {
chartEndpoint := strings.TrimSpace(cfgMgr.Get(common.ChartRepoURL).GetString())
if len(chartEndpoint) == 0 {
return "", errors.New("empty chartmuseum endpoint")
}
return chartEndpoint, nil
}
// GetRedisOfRegURL returns the URL of Redis used by registry
func GetRedisOfRegURL() string {
return os.Getenv("_REDIS_URL_REG")
}
// GetPortalURL returns the URL of portal
func GetPortalURL() string {
url := os.Getenv("PORTAL_URL")
if len(url) == 0 {
return common.DefaultPortalURL
}
return url
}
// GetRegistryCtlURL returns the URL of registryctl
func GetRegistryCtlURL() string {
url := os.Getenv("REGISTRYCTL_URL")
if len(url) == 0 {
return common.DefaultRegistryCtlURL
}
return url
}
// GetClairHealthCheckServerURL returns the URL of
// the health check server of Clair
func GetClairHealthCheckServerURL() string {
url := os.Getenv("CLAIR_HEALTH_CHECK_SERVER_URL")
if len(url) == 0 {
return common.DefaultClairHealthCheckServerURL
}
return url
}
// HTTPAuthProxySetting returns the setting of HTTP Auth proxy. the settings are only meaningful when the auth_mode is
// set to http_auth
func HTTPAuthProxySetting() (*models.HTTPAuthProxy, error) {
if err := cfgMgr.Load(); err != nil {
return nil, err
}
return &models.HTTPAuthProxy{
Endpoint: cfgMgr.Get(common.HTTPAuthProxyEndpoint).GetString(),
TokenReviewEndpoint: cfgMgr.Get(common.HTTPAuthProxyTokenReviewEndpoint).GetString(),
VerifyCert: cfgMgr.Get(common.HTTPAuthProxyVerifyCert).GetBool(),
SkipSearch: cfgMgr.Get(common.HTTPAuthProxySkipSearch).GetBool(),
}, nil
}
// OIDCSetting returns the setting of OIDC provider, currently there's only one OIDC provider allowed for Harbor and it's
// only effective when auth_mode is set to oidc_auth
func OIDCSetting() (*models.OIDCSetting, error) {
if err := cfgMgr.Load(); err != nil {
return nil, err
}
scopeStr := cfgMgr.Get(common.OIDCScope).GetString()
extEndpoint := strings.TrimSuffix(cfgMgr.Get(common.ExtEndpoint).GetString(), "/")
scope := []string{}
for _, s := range strings.Split(scopeStr, ",") {
scope = append(scope, strings.TrimSpace(s))
}
return &models.OIDCSetting{
Name: cfgMgr.Get(common.OIDCName).GetString(),
Endpoint: cfgMgr.Get(common.OIDCEndpoint).GetString(),
VerifyCert: cfgMgr.Get(common.OIDCVerifyCert).GetBool(),
ClientID: cfgMgr.Get(common.OIDCCLientID).GetString(),
ClientSecret: cfgMgr.Get(common.OIDCClientSecret).GetString(),
GroupsClaim: cfgMgr.Get(common.OIDCGroupsClaim).GetString(),
RedirectURL: extEndpoint + common.OIDCCallbackPath,
Scope: scope,
}, nil
}
// NotificationEnable returns a bool to indicates if notification enabled in harbor
func NotificationEnable() bool {
return cfgMgr.Get(common.NotificationEnable).GetBool()
}
// QuotaPerProjectEnable returns a bool to indicates if quota per project enabled in harbor
func QuotaPerProjectEnable() bool {
return cfgMgr.Get(common.QuotaPerProjectEnable).GetBool()
}
// QuotaSetting returns the setting of quota.
func QuotaSetting() (*models.QuotaSetting, error) {
if err := cfgMgr.Load(); err != nil {
return nil, err
}
return &models.QuotaSetting{
CountPerProject: cfgMgr.Get(common.CountPerProject).GetInt64(),
StoragePerProject: cfgMgr.Get(common.StoragePerProject).GetInt64(),
}, nil
}
| [
"\"KEY_PATH\"",
"\"SERVICE_TOKEN_FILE_PATH\"",
"\"TOKEN_PRIVATE_KEY_PATH\"",
"\"CORE_SECRET\"",
"\"JOBSERVICE_SECRET\"",
"\"_REDIS_URL_REG\"",
"\"PORTAL_URL\"",
"\"REGISTRYCTL_URL\"",
"\"CLAIR_HEALTH_CHECK_SERVER_URL\""
]
| []
| [
"_REDIS_URL_REG",
"REGISTRYCTL_URL",
"PORTAL_URL",
"CORE_SECRET",
"SERVICE_TOKEN_FILE_PATH",
"TOKEN_PRIVATE_KEY_PATH",
"CLAIR_HEALTH_CHECK_SERVER_URL",
"KEY_PATH",
"JOBSERVICE_SECRET"
]
| [] | ["_REDIS_URL_REG", "REGISTRYCTL_URL", "PORTAL_URL", "CORE_SECRET", "SERVICE_TOKEN_FILE_PATH", "TOKEN_PRIVATE_KEY_PATH", "CLAIR_HEALTH_CHECK_SERVER_URL", "KEY_PATH", "JOBSERVICE_SECRET"] | go | 9 | 0 | |
models/baseline_models/egnn.py | import torch
import torch.nn as nn
from torch_scatter import scatter_sum
from models.utils import GaussianSmearing, MLP
class EnBaseLayer(nn.Module):
def __init__(self, hidden_dim, edge_feat_dim, num_r_gaussian, update_x=True, act_fn='relu', norm=False):
super().__init__()
self.r_min = 0.
self.r_max = 10. ** 2
self.hidden_dim = hidden_dim
self.num_r_gaussian = num_r_gaussian
self.edge_feat_dim = edge_feat_dim
self.update_x = update_x
self.act_fn = act_fn
self.norm = norm
if num_r_gaussian > 1:
self.r_expansion = GaussianSmearing(self.r_min, self.r_max, num_gaussians=num_r_gaussian)
self.edge_mlp = MLP(2 * hidden_dim + edge_feat_dim + num_r_gaussian, hidden_dim, hidden_dim,
num_layer=2, norm=norm, act_fn=act_fn, act_last=True)
self.edge_inf = nn.Sequential(nn.Linear(hidden_dim, 1), nn.Sigmoid())
if self.update_x:
self.x_mlp = MLP(hidden_dim, 1, hidden_dim, num_layer=2, norm=norm, act_fn=act_fn)
self.node_mlp = MLP(2 * hidden_dim, hidden_dim, hidden_dim, num_layer=2, norm=norm, act_fn=act_fn)
def forward(self, G, h, x):
"""Forward pass of the linear layer
Args:
h: dict of node-features
x: coordinates
G: minibatch of (homo)graphs
Returns:
tensor with new features [B, n_points, n_features_out]
"""
edge_index = G.edges()
src, dst = edge_index
if self.edge_feat_dim > 0:
edge_feat = G.edata['w'] # shape: [#edges_in_batch, #bond_types]
else:
edge_feat = None
rel_x = x[dst] - x[src]
r = torch.sum(rel_x ** 2, -1, keepdim=True)
if self.num_r_gaussian > 1:
r_feat = self.r_expansion(r)
else:
r_feat = r
hi, hj = h[dst], h[src]
# \phi_e in Eq(3)
if edge_feat is None:
mij = self.edge_mlp(torch.cat([r_feat, hi, hj], -1))
else:
mij = self.edge_mlp(torch.cat([edge_feat, r_feat, hi, hj], -1))
eij = self.edge_inf(mij)
mi = scatter_sum(mij * eij, dst, dim=0, dim_size=h.shape[0])
# h update in Eq(6)
h = h + self.node_mlp(torch.cat([mi, h], -1))
if self.update_x:
# x update in Eq(4)
xi, xj = x[dst], x[src]
delta_x = scatter_sum((xi - xj) * self.x_mlp(mij), dst, dim=0)
x = x + delta_x
return h, x
class EnEquiNetwork(nn.Module):
def __init__(self, num_layers, hidden_dim, edge_feat_dim, num_r_gaussian,
update_x=True, act_fn='relu', norm=False):
super().__init__()
# Build the network
self.num_layers = num_layers
self.hidden_dim = hidden_dim
self.edge_feat_dim = edge_feat_dim
self.num_r_gaussian = num_r_gaussian
self.update_x = update_x
self.act_fn = act_fn
self.norm = norm
self.net = self._build_network()
def _build_network(self):
# Equivariant layers
layers = []
for l_idx in range(self.num_layers):
layer = EnBaseLayer(self.hidden_dim, self.edge_feat_dim, self.num_r_gaussian,
update_x=self.update_x, act_fn=self.act_fn, norm=self.norm)
layers.append(layer)
return nn.ModuleList(layers)
def forward(self, G, return_h=False):
x = G.ndata['x']
h = G.ndata['f'].squeeze(-1)
all_x = [x]
all_h = [h]
for l_idx, layer in enumerate(self.net):
h, x = layer(G, h, x)
all_x.append(x)
all_h.append(h)
if return_h:
return x, all_x, all_h
else:
return x, all_x
| []
| []
| []
| [] | [] | python | null | null | null |
v1/messages/generic_test.go | // Copyright 2019 go-gtp authors. All rights reserved.
// Use of this source code is governed by a MIT-style license that can be
// found in the LICENSE file.
package messages_test
import (
"testing"
"github.com/wmnsk/go-gtp/v1/messages"
"github.com/wmnsk/go-gtp/v1/testutils"
)
func TestGeneric(t *testing.T) {
cases := []testutils.TestCase{
{
Description: "Normal",
Structured: messages.NewGeneric(messages.MsgTypeEchoRequest, 0, 0),
Serialized: []byte{
0x32, 0x01, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
},
},
}
testutils.Run(t, cases, func(b []byte) (testutils.Serializable, error) {
v, err := messages.ParseGeneric(b)
if err != nil {
return nil, err
}
v.Payload = nil
return v, nil
})
}
| []
| []
| []
| [] | [] | go | null | null | null |
functions/Connect.go | package functions
import (
"encoding/base64"
"fmt"
"os"
"github.com/amimof/huego"
"github.com/spf13/viper"
)
//ConnectHUE to HUE and return TOKEN
func ConnectHUE() (token string, bridge *huego.Bridge) {
user := viper.GetString("hueconfig.user")
token64, err := base64.StdEncoding.DecodeString(viper.GetString("hueconfig.token"))
token = string(token64)
if err != nil {
panic(err)
}
hostip := viper.GetString("hueconfig.ip")
if token == "" {
token = os.Getenv("HUE_TOKEN")
user = os.Getenv("HUE_USER")
}
if user == "" {
fmt.Print("No HUEUSER env set, enter username: ")
fmt.Scanln(&user)
if user != "" {
viper.SetDefault("hueconfig.user", user)
viper.WriteConfig()
}
}
if hostip == "" {
bridge, _ = huego.Discover()
bconfig, err := bridge.GetConfig()
if err == nil {
hostip = bconfig.IPAddress
fmt.Println(hostip)
}
} else {
bridge = huego.New(hostip, user)
}
if token == "" {
token, _ = bridge.CreateUser(user)
if token != "" {
viper.SetDefault("hueconfig.token", base64.StdEncoding.EncodeToString([]byte(token)))
viper.WriteConfig()
}
}
bridge.Login(token)
return token, bridge
}
| [
"\"HUE_TOKEN\"",
"\"HUE_USER\""
]
| []
| [
"HUE_USER",
"HUE_TOKEN"
]
| [] | ["HUE_USER", "HUE_TOKEN"] | go | 2 | 0 | |
pkg/ibmcloudprovider/volume_provider.go | /**
* Copyright 2021 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package ibmcloudprovider ...
package ibmcloudprovider
import (
"fmt"
"os"
"time"
"github.com/IBM/ibm-csi-common/pkg/utils"
"github.com/IBM/ibmcloud-volume-interface/config"
"github.com/IBM/ibmcloud-volume-interface/lib/provider"
"github.com/IBM/ibmcloud-volume-interface/provider/local"
provider_util "github.com/IBM/ibmcloud-volume-vpc/block/utils"
vpcconfig "github.com/IBM/ibmcloud-volume-vpc/block/vpcconfig"
"github.com/IBM/ibmcloud-volume-vpc/common/registry"
"go.uber.org/zap"
"golang.org/x/net/context"
)
// IBMCloudStorageProvider Provider
type IBMCloudStorageProvider struct {
ProviderName string
ProviderConfig *config.Config
Registry registry.Providers
ClusterInfo *utils.ClusterInfo
}
var _ CloudProviderInterface = &IBMCloudStorageProvider{}
// NewIBMCloudStorageProvider ...
func NewIBMCloudStorageProvider(configPath string, logger *zap.Logger) (*IBMCloudStorageProvider, error) {
logger.Info("NewIBMCloudStorageProvider-Reading provider configuration...")
// Load config file
conf, err := config.ReadConfig(configPath, logger)
if err != nil {
logger.Fatal("Error loading configuration")
return nil, err
}
// Get only VPC_API_VERSION, in "2019-07-02T00:00:00.000Z" case vpc need only 2019-07-02"
dateTime, err := time.Parse(time.RFC3339, conf.VPC.APIVersion)
if err == nil {
conf.VPC.APIVersion = fmt.Sprintf("%d-%02d-%02d", dateTime.Year(), dateTime.Month(), dateTime.Day())
} else {
logger.Warn("Failed to parse VPC_API_VERSION, setting default value")
conf.VPC.APIVersion = "2020-07-02" // setting default values
}
var clusterInfo *utils.ClusterInfo
logger.Info("Fetching clusterInfo")
if conf.IKS != nil && conf.IKS.Enabled || os.Getenv("IKS_ENABLED") == "True" {
clusterInfo, err = utils.NewClusterInfo(logger)
if err != nil {
logger.Fatal("Unable to load ClusterInfo", local.ZapError(err))
return nil, err
}
logger.Info("Fetched clusterInfo..")
if conf.Bluemix.Encryption || conf.VPC.Encryption {
// api Key if encryption is enabled
logger.Info("Creating NewAPIKeyImpl...")
apiKeyImp, err := utils.NewAPIKeyImpl(logger)
if err != nil {
logger.Fatal("Unable to create API key getter", local.ZapError(err))
return nil, err
}
logger.Info("Created NewAPIKeyImpl...")
err = apiKeyImp.UpdateIAMKeys(conf)
if err != nil {
logger.Fatal("Unable to get API key", local.ZapError(err))
return nil, err
}
}
}
// Update the CSRF Token
if conf.Bluemix.PrivateAPIRoute != "" {
conf.Bluemix.CSRFToken = string([]byte{}) // TODO~ Need to remove it
}
if conf.API == nil {
conf.API = &config.APIConfig{
PassthroughSecret: string([]byte{}), // // TODO~ Need to remove it
}
}
vpcBlockConfig := &vpcconfig.VPCBlockConfig{
VPCConfig: conf.VPC,
IKSConfig: conf.IKS,
APIConfig: conf.API,
ServerConfig: conf.Server,
}
// Prepare provider registry
registry, err := provider_util.InitProviders(vpcBlockConfig, logger)
if err != nil {
logger.Fatal("Error configuring providers", local.ZapError(err))
}
var providerName string
if isRunningInIKS() && conf.IKS.Enabled {
providerName = conf.IKS.IKSBlockProviderName
} else if conf.VPC.Enabled {
providerName = conf.VPC.VPCBlockProviderName
}
cloudProvider := &IBMCloudStorageProvider{
ProviderName: providerName,
ProviderConfig: conf,
Registry: registry,
ClusterInfo: clusterInfo,
}
logger.Info("Successfully read provider configuration")
return cloudProvider, nil
}
func isRunningInIKS() bool {
return true //TODO Check the master KUBE version
}
// GetProviderSession ...
func (icp *IBMCloudStorageProvider) GetProviderSession(ctx context.Context, logger *zap.Logger) (provider.Session, error) {
logger.Info("IBMCloudStorageProvider-GetProviderSession...")
if icp.ProviderConfig.API == nil {
icp.ProviderConfig.API = &config.APIConfig{
PassthroughSecret: string([]byte{}), // // TODO~ Need to remove it
}
}
vpcBlockConfig := &vpcconfig.VPCBlockConfig{
VPCConfig: icp.ProviderConfig.VPC,
IKSConfig: icp.ProviderConfig.IKS,
APIConfig: icp.ProviderConfig.API,
ServerConfig: icp.ProviderConfig.Server,
}
session, isFatal, err := provider_util.OpenProviderSessionWithContext(ctx, vpcBlockConfig, icp.Registry, icp.ProviderName, logger)
if err != nil || isFatal {
logger.Error("Failed to get provider session", zap.Reflect("Error", err))
return nil, err
}
// Instantiate CloudProvider
logger.Info("Successfully got the provider session", zap.Reflect("ProviderName", session.ProviderName()))
return session, nil
}
// GetConfig ...
func (icp *IBMCloudStorageProvider) GetConfig() *config.Config {
return icp.ProviderConfig
}
// GetClusterInfo ...
func (icp *IBMCloudStorageProvider) GetClusterInfo() *utils.ClusterInfo {
return icp.ClusterInfo
}
| [
"\"IKS_ENABLED\""
]
| []
| [
"IKS_ENABLED"
]
| [] | ["IKS_ENABLED"] | go | 1 | 0 | |
cmd/util.go | // Copyright 2016-2018, Pulumi Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"bytes"
"context"
"encoding/json"
"fmt"
"os"
"os/exec"
"os/signal"
"path/filepath"
"sort"
"strconv"
"strings"
multierror "github.com/hashicorp/go-multierror"
opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
survey "gopkg.in/AlecAivazis/survey.v1"
surveycore "gopkg.in/AlecAivazis/survey.v1/core"
git "gopkg.in/src-d/go-git.v4"
"github.com/pulumi/pulumi/pkg/backend"
"github.com/pulumi/pulumi/pkg/backend/display"
"github.com/pulumi/pulumi/pkg/backend/filestate"
"github.com/pulumi/pulumi/pkg/backend/httpstate"
"github.com/pulumi/pulumi/pkg/backend/state"
"github.com/pulumi/pulumi/pkg/diag/colors"
"github.com/pulumi/pulumi/pkg/engine"
"github.com/pulumi/pulumi/pkg/util/cancel"
"github.com/pulumi/pulumi/pkg/util/ciutil"
"github.com/pulumi/pulumi/pkg/util/cmdutil"
"github.com/pulumi/pulumi/pkg/util/contract"
"github.com/pulumi/pulumi/pkg/util/gitutil"
"github.com/pulumi/pulumi/pkg/util/logging"
"github.com/pulumi/pulumi/pkg/util/tracing"
"github.com/pulumi/pulumi/pkg/workspace"
)
func hasDebugCommands() bool {
return cmdutil.IsTruthy(os.Getenv("PULUMI_DEBUG_COMMANDS"))
}
func useLegacyDiff() bool {
return cmdutil.IsTruthy(os.Getenv("PULUMI_ENABLE_LEGACY_DIFF"))
}
func currentBackend(opts display.Options) (backend.Backend, error) {
url, err := workspace.GetCurrentCloudURL()
if err != nil {
return nil, errors.Wrapf(err, "could not get cloud url")
}
if filestate.IsFileStateBackendURL(url) {
return filestate.New(cmdutil.Diag(), url)
}
return httpstate.Login(commandContext(), cmdutil.Diag(), url, opts)
}
// This is used to control the contents of the tracing header.
var tracingHeader = os.Getenv("PULUMI_TRACING_HEADER")
func commandContext() context.Context {
ctx := context.Background()
if cmdutil.IsTracingEnabled() {
if cmdutil.TracingRootSpan != nil {
ctx = opentracing.ContextWithSpan(ctx, cmdutil.TracingRootSpan)
}
tracingOptions := tracing.Options{
PropagateSpans: true,
TracingHeader: tracingHeader,
}
ctx = tracing.ContextWithOptions(ctx, tracingOptions)
}
return ctx
}
// createStack creates a stack with the given name, and optionally selects it as the current.
func createStack(
b backend.Backend, stackRef backend.StackReference, opts interface{}, setCurrent bool,
secretsProvider string) (backend.Stack, error) {
// As part of creating the stack, we also need to configure the secrets provider for the stack. Today, we only
// have to do this configuration step when you are using the passphrase provider (which is used for all filestate,
// stacks and well as httpstate stacks that opted into this by passing --secrets-provider passphrase
// while initialing a stack). The only other supported provider today (the provider that uses the pulumi service
// does not need to be initialized explicitly, as creating the stack inside the Pulumi service does this).
if _, ok := b.(filestate.Backend); ok || secretsProvider == "passphrase" {
if _, pharseErr := newPassphraseSecretsManager(stackRef.Name(), stackConfigFile); pharseErr != nil {
return nil, pharseErr
}
} else if secretsProvider != "" && secretsProvider != "default" {
// All other non-default secrets providers are handled by the cloud secrets provider which
// uses a URL schema to identify the provider
if _, secretsErr := newCloudSecretsManager(stackRef.Name(), stackConfigFile, secretsProvider); secretsErr != nil {
return nil, secretsErr
}
}
stack, err := b.CreateStack(commandContext(), stackRef, opts)
if err != nil {
// If it's a StackAlreadyExistsError, don't wrap it.
if _, ok := err.(*backend.StackAlreadyExistsError); ok {
return nil, err
}
return nil, errors.Wrapf(err, "could not create stack")
}
if setCurrent {
if err = state.SetCurrentStack(stack.Ref().String()); err != nil {
return nil, err
}
}
return stack, nil
}
// requireStack will require that a stack exists. If stackName is blank, the currently selected stack from
// the workspace is returned. If no stack with either the given name, or a currently selected stack, exists,
// and we are in an interactive terminal, the user will be prompted to create a new stack.
func requireStack(
stackName string, offerNew bool, opts display.Options, setCurrent bool) (backend.Stack, error) {
if stackName == "" {
return requireCurrentStack(offerNew, opts, setCurrent)
}
b, err := currentBackend(opts)
if err != nil {
return nil, err
}
stackRef, err := b.ParseStackReference(stackName)
if err != nil {
return nil, err
}
stack, err := b.GetStack(commandContext(), stackRef)
if err != nil {
return nil, err
}
if stack != nil {
return stack, err
}
// No stack was found. If we're in a terminal, prompt to create one.
if offerNew && cmdutil.Interactive() {
fmt.Printf("The stack '%s' does not exist.\n", stackName)
fmt.Printf("\n")
_, err = cmdutil.ReadConsole("If you would like to create this stack now, please press <ENTER>, otherwise " +
"press ^C")
if err != nil {
return nil, err
}
return createStack(b, stackRef, nil, setCurrent, "")
}
return nil, errors.Errorf("no stack named '%s' found", stackName)
}
func requireCurrentStack(offerNew bool, opts display.Options, setCurrent bool) (backend.Stack, error) {
// Search for the current stack.
b, err := currentBackend(opts)
if err != nil {
return nil, err
}
stack, err := state.CurrentStack(commandContext(), b)
if err != nil {
return nil, err
} else if stack != nil {
return stack, nil
}
// If no current stack exists, and we are interactive, prompt to select or create one.
return chooseStack(b, offerNew, opts, setCurrent)
}
// chooseStack will prompt the user to choose amongst the full set of stacks in the given backend. If offerNew is
// true, then the option to create an entirely new stack is provided and will create one as desired.
func chooseStack(
b backend.Backend, offerNew bool, opts display.Options, setCurrent bool) (backend.Stack, error) {
// Prepare our error in case we need to issue it. Bail early if we're not interactive.
var chooseStackErr string
if offerNew {
chooseStackErr = "no stack selected; please use `pulumi stack select` or `pulumi stack init` to choose one"
} else {
chooseStackErr = "no stack selected; please use `pulumi stack select` to choose one"
}
if !cmdutil.Interactive() {
return nil, errors.New(chooseStackErr)
}
proj, err := workspace.DetectProject()
if err != nil {
return nil, err
}
// List stacks as available options.
var options []string
summaries, err := b.ListStacks(commandContext(), &proj.Name)
if err != nil {
return nil, errors.Wrapf(err, "could not query backend for stacks")
}
for _, summary := range summaries {
name := summary.Name().String()
options = append(options, name)
}
sort.Strings(options)
// If we are offering to create a new stack, add that to the end of the list.
const newOption = "<create a new stack>"
if offerNew {
options = append(options, newOption)
} else if len(options) == 0 {
// If no options are available, we can't offer a choice!
return nil, errors.New("this command requires a stack, but there are none")
}
// If a stack is already selected, make that the default.
var current string
currStack, currErr := state.CurrentStack(commandContext(), b)
contract.IgnoreError(currErr)
if currStack != nil {
current = currStack.Ref().String()
}
// Customize the prompt a little bit (and disable color since it doesn't match our scheme).
surveycore.DisableColor = true
surveycore.QuestionIcon = ""
surveycore.SelectFocusIcon = opts.Color.Colorize(colors.BrightGreen + ">" + colors.Reset)
message := "\rPlease choose a stack"
if offerNew {
message += ", or create a new one:"
} else {
message += ":"
}
message = opts.Color.Colorize(colors.SpecPrompt + message + colors.Reset)
var option string
if err = survey.AskOne(&survey.Select{
Message: message,
Options: options,
Default: current,
}, &option, nil); err != nil {
return nil, errors.New(chooseStackErr)
}
if option == newOption {
hint := "Please enter your desired stack name."
if b.SupportsOrganizations() {
hint += "\nTo create a stack in an organization, " +
"use the format <org-name>/<stack-name> (e.g. `acmecorp/dev`)"
}
stackName, readErr := cmdutil.ReadConsole(hint)
if readErr != nil {
return nil, readErr
}
stackRef, parseErr := b.ParseStackReference(stackName)
if parseErr != nil {
return nil, parseErr
}
return createStack(b, stackRef, nil, setCurrent, "")
}
// With the stack name selected, look it up from the backend.
stackRef, err := b.ParseStackReference(option)
if err != nil {
return nil, errors.Wrap(err, "parsing selected stack")
}
stack, err := b.GetStack(commandContext(), stackRef)
if err != nil {
return nil, errors.Wrap(err, "getting selected stack")
}
// If setCurrent is true, we'll persist this choice so it'll be used for future CLI operations.
if setCurrent {
if err = state.SetCurrentStack(stackRef.String()); err != nil {
return nil, err
}
}
return stack, nil
}
// projType represents the various types of Pulumi project. All Pulumi projects are denoted by a
// Pulumi.yaml in the root of the workspace.
type projType string
const (
// pulumiAppProj is a Pulumi application project.
pulumiAppProj projType = "pulumi-app"
// pulumiPolicyProj is a Pulumi resource policy project.
pulumiPolicyProj projType = "pulumi-policy"
)
// readProject attempts to detect and read a project of type `projType` for the current workspace.
// If the project is successfully detected and read, it is returned along with the path to its
// containing directory, which will be used as the root of the project's Pulumi program.
func readProject(projType projType) (*workspace.Project, string, error) {
pwd, err := os.Getwd()
if err != nil {
return nil, "", err
}
// Now that we got here, we have a path, so we will try to load it.
path, err := workspace.DetectProjectPathFrom(pwd)
if err != nil {
return nil, "", errors.Wrapf(err, "failed to find current Pulumi project because of "+
"an error when searching for the Pulumi.yaml file (searching upwards from %s)", pwd)
} else if path == "" {
return nil, "", errReadProjNoPulumiYAML(projType, pwd)
}
proj, err := workspace.LoadProject(path)
if err != nil {
return nil, "", errors.Wrapf(err, "failed to load Pulumi project located at %q", path)
}
return proj, filepath.Dir(path), nil
}
func errReadProjNoPulumiYAML(projType projType, pwd string) error {
switch projType {
case pulumiPolicyProj:
return fmt.Errorf("no Pulumi.yaml project file found (searching upwards from %s)", pwd)
default:
return fmt.Errorf(
"no Pulumi.yaml project file found (searching upwards from %s). If you have not "+
"created a project yet, use `pulumi new` to do so", pwd)
}
}
// anyWriter is an io.Writer that will set itself to `true` iff any call to `anyWriter.Write` is made with a
// non-zero-length slice. This can be used to determine whether or not any data was ever written to the writer.
type anyWriter bool
func (w *anyWriter) Write(d []byte) (int, error) {
if len(d) > 0 {
*w = true
}
return len(d), nil
}
// isGitWorkTreeDirty returns true if the work tree for the current directory's repository is dirty.
func isGitWorkTreeDirty(repoRoot string) (bool, error) {
gitBin, err := exec.LookPath("git")
if err != nil {
return false, err
}
gitStatusCmd := exec.Command(gitBin, "status", "--porcelain", "-z")
var anyOutput anyWriter
var stderr bytes.Buffer
gitStatusCmd.Dir = repoRoot
gitStatusCmd.Stdout = &anyOutput
gitStatusCmd.Stderr = &stderr
if err = gitStatusCmd.Run(); err != nil {
if ee, ok := err.(*exec.ExitError); ok {
ee.Stderr = stderr.Bytes()
}
return false, errors.Wrapf(err, "'git status' failed")
}
return bool(anyOutput), nil
}
// getUpdateMetadata returns an UpdateMetadata object, with optional data about the environment
// performing the update.
func getUpdateMetadata(msg, root string) (*backend.UpdateMetadata, error) {
m := &backend.UpdateMetadata{
Message: msg,
Environment: make(map[string]string),
}
if err := addGitMetadata(root, m); err != nil {
logging.V(3).Infof("errors detecting git metadata: %s", err)
}
addCIMetadataToEnvironment(m.Environment)
return m, nil
}
// addGitMetadata populate's the environment metadata bag with Git-related values.
func addGitMetadata(repoRoot string, m *backend.UpdateMetadata) error {
var allErrors *multierror.Error
// Gather git-related data as appropriate. (Returns nil, nil if no repo found.)
repo, err := gitutil.GetGitRepository(repoRoot)
if err != nil {
return errors.Wrapf(err, "detecting Git repository")
}
if repo == nil {
return nil
}
if err := AddGitRemoteMetadataToMap(repo, m.Environment); err != nil {
allErrors = multierror.Append(allErrors, err)
}
if err := addGitCommitMetadata(repo, repoRoot, m); err != nil {
allErrors = multierror.Append(allErrors, err)
}
return allErrors.ErrorOrNil()
}
// AddGitRemoteMetadataToMap reads the given git repo and adds its metadata to the given map bag.
func AddGitRemoteMetadataToMap(repo *git.Repository, env map[string]string) error {
var allErrors *multierror.Error
// Get the remote URL for this repo.
remoteURL, err := gitutil.GetGitRemoteURL(repo, "origin")
if err != nil {
return errors.Wrap(err, "detecting Git remote URL")
}
if remoteURL == "" {
return nil
}
// Check if the remote URL is a GitHub or a GitLab URL.
if err := addVCSMetadataToEnvironment(remoteURL, env); err != nil {
allErrors = multierror.Append(allErrors, err)
}
return allErrors.ErrorOrNil()
}
func addVCSMetadataToEnvironment(remoteURL string, env map[string]string) error {
// GitLab, Bitbucket, Azure DevOps etc. repo slug if applicable.
// We don't require a cloud-hosted VCS, so swallow errors.
vcsInfo, err := gitutil.TryGetVCSInfo(remoteURL)
if err != nil {
return errors.Wrap(err, "detecting VCS project information")
}
env[backend.VCSRepoOwner] = vcsInfo.Owner
env[backend.VCSRepoName] = vcsInfo.Repo
env[backend.VCSRepoKind] = vcsInfo.Kind
return nil
}
func addGitCommitMetadata(repo *git.Repository, repoRoot string, m *backend.UpdateMetadata) error {
// When running in a CI/CD environment, the current git repo may be running from a
// detached HEAD and may not have have the latest commit message. We fall back to
// CI-system specific environment variables when possible.
ciVars := ciutil.DetectVars()
// Commit at HEAD
head, err := repo.Head()
if err != nil {
return errors.Wrap(err, "getting repository HEAD")
}
hash := head.Hash()
m.Environment[backend.GitHead] = hash.String()
commit, commitErr := repo.CommitObject(hash)
if commitErr != nil {
return errors.Wrap(commitErr, "getting HEAD commit info")
}
// If in detached head, will be "HEAD", and fallback to use value from CI/CD system if possible.
// Otherwise, the value will be like "refs/heads/master".
headName := head.Name().String()
if headName == "HEAD" && ciVars.BranchName != "" {
headName = ciVars.BranchName
}
if headName != "HEAD" {
m.Environment[backend.GitHeadName] = headName
}
// If there is no message set manually, default to the Git commit's title.
msg := strings.TrimSpace(commit.Message)
if msg == "" && ciVars.CommitMessage != "" {
msg = ciVars.CommitMessage
}
if m.Message == "" {
m.Message = gitCommitTitle(msg)
}
// Store committer and author information.
m.Environment[backend.GitCommitter] = commit.Committer.Name
m.Environment[backend.GitCommitterEmail] = commit.Committer.Email
m.Environment[backend.GitAuthor] = commit.Author.Name
m.Environment[backend.GitAuthorEmail] = commit.Author.Email
// If the worktree is dirty, set a bit, as this could be a mistake.
isDirty, err := isGitWorkTreeDirty(repoRoot)
if err != nil {
return errors.Wrapf(err, "checking git worktree dirty state")
}
m.Environment[backend.GitDirty] = strconv.FormatBool(isDirty)
return nil
}
// gitCommitTitle turns a commit message into its title, simply by taking the first line.
func gitCommitTitle(s string) string {
if ixCR := strings.Index(s, "\r"); ixCR != -1 {
s = s[:ixCR]
}
if ixLF := strings.Index(s, "\n"); ixLF != -1 {
s = s[:ixLF]
}
return s
}
// addCIMetadataToEnvironment populates the environment metadata bag with CI/CD-related values.
func addCIMetadataToEnvironment(env map[string]string) {
// Add the key/value pair to env, if there actually is a value.
addIfSet := func(key, val string) {
if val != "" {
env[key] = val
}
}
// Use our built-in CI/CD detection logic.
vars := ciutil.DetectVars()
if vars.Name == "" {
return
}
env[backend.CISystem] = string(vars.Name)
addIfSet(backend.CIBuildID, vars.BuildID)
addIfSet(backend.CIBuildType, vars.BuildType)
addIfSet(backend.CIBuildURL, vars.BuildURL)
addIfSet(backend.CIPRHeadSHA, vars.SHA)
addIfSet(backend.CIPRNumber, vars.PRNumber)
}
type cancellationScope struct {
context *cancel.Context
sigint chan os.Signal
done chan bool
}
func (s *cancellationScope) Context() *cancel.Context {
return s.context
}
func (s *cancellationScope) Close() {
signal.Stop(s.sigint)
close(s.sigint)
<-s.done
}
type cancellationScopeSource int
var cancellationScopes = backend.CancellationScopeSource(cancellationScopeSource(0))
func (cancellationScopeSource) NewScope(events chan<- engine.Event, isPreview bool) backend.CancellationScope {
cancelContext, cancelSource := cancel.NewContext(context.Background())
c := &cancellationScope{
context: cancelContext,
sigint: make(chan os.Signal),
done: make(chan bool),
}
go func() {
for range c.sigint {
// If we haven't yet received a SIGINT, call the cancellation func. Otherwise call the termination
// func.
if cancelContext.CancelErr() == nil {
message := "^C received; cancelling. If you would like to terminate immediately, press ^C again.\n"
if !isPreview {
message += colors.BrightRed + "Note that terminating immediately may lead to orphaned resources " +
"and other inconsistencies.\n" + colors.Reset
}
events <- engine.Event{
Type: engine.StdoutColorEvent,
Payload: engine.StdoutEventPayload{
Message: message,
Color: colors.Always,
},
}
cancelSource.Cancel()
} else {
message := colors.BrightRed + "^C received; terminating" + colors.Reset
events <- engine.Event{
Type: engine.StdoutColorEvent,
Payload: engine.StdoutEventPayload{
Message: message,
Color: colors.Always,
},
}
cancelSource.Terminate()
}
}
close(c.done)
}()
signal.Notify(c.sigint, os.Interrupt)
return c
}
// printJSON simply prints out some object, formatted as JSON, using standard indentation.
func printJSON(v interface{}) error {
out, err := json.MarshalIndent(v, "", " ")
if err != nil {
return err
}
fmt.Println(string(out))
return nil
}
// updateFlagsToOptions ensures that the given update flags represent a valid combination. If so, an UpdateOptions
// is returned with a nil-error; otherwise, the non-nil error contains information about why the combination is invalid.
func updateFlagsToOptions(interactive, skipPreview, yes bool) (backend.UpdateOptions, error) {
if !interactive && !yes {
return backend.UpdateOptions{},
errors.New("--yes must be passed in non-interactive mode")
}
return backend.UpdateOptions{
AutoApprove: yes,
SkipPreview: skipPreview,
}, nil
}
| [
"\"PULUMI_DEBUG_COMMANDS\"",
"\"PULUMI_ENABLE_LEGACY_DIFF\"",
"\"PULUMI_TRACING_HEADER\""
]
| []
| [
"PULUMI_DEBUG_COMMANDS",
"PULUMI_TRACING_HEADER",
"PULUMI_ENABLE_LEGACY_DIFF"
]
| [] | ["PULUMI_DEBUG_COMMANDS", "PULUMI_TRACING_HEADER", "PULUMI_ENABLE_LEGACY_DIFF"] | go | 3 | 0 | |
docs/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
import os
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
from recommonmark.parser import CommonMarkParser
# -- Project information -----------------------------------------------------
project = u'devilbox'
copyright = u'2018, cytopia'
author = u'cytopia'
# The short X.Y version
version = u''
# The full version, including alpha/beta/rc tags
release = u''
# -- General configuration ---------------------------------------------------
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo'
]
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = '.rst'
# Exclude Markdown files for now
#source_suffix = ['.rst', '.md']
#source_parsers = {
# '.md': CommonMarkParser,
#}
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
#html_theme = 'sphinx_rtd_theme'
html_theme = 'default'
html_logo = 'img/logo.png'
# From:
# * https://github.com/snide/sphinx_png_theme#using-this-theme-locally-then-building-on-read-the-docs
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# https://sphinx-rtd-theme.readthedocs.io/en/latest/configuring.html#project-wide-configuration
html_theme_options = {
'canonical_url': '',
'analytics_id': '',
'logo_only': False,
'display_version': True,
'prev_next_buttons_location': 'bottom',
#'style_external_links': False,
#'vcs_pageview_mode': '',
# Toc options
'collapse_navigation': False,
'sticky_navigation': True,
'navigation_depth': 5,
#'includehidden': True,
#'titles_only': False
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
def setup(app):
app.add_stylesheet('css/custom.css')
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'devilboxdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'devilbox.tex', u'devilbox Documentation',
u'cytopia', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'devilbox', u'devilbox Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'devilbox', u'devilbox Documentation',
author, 'devilbox', 'One line description of project.',
'Miscellaneous'),
]
| []
| []
| [
"READTHEDOCS"
]
| [] | ["READTHEDOCS"] | python | 1 | 0 | |
manager/manager.go | // Copyright 2018 The Dimios Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package manager
import (
"context"
"fmt"
"net/http"
"time"
"os"
"github.com/bborbe/io/util"
"github.com/seibert-media/dimios/hook"
"github.com/seibert-media/dimios/whitelist"
"github.com/bborbe/http/client_builder"
"github.com/bborbe/teamvault-utils/connector"
"github.com/bborbe/teamvault-utils/model"
"github.com/bborbe/teamvault-utils/parser"
"github.com/golang/glog"
"github.com/pkg/errors"
"github.com/seibert-media/dimios/apply"
"github.com/seibert-media/dimios/change"
"github.com/seibert-media/dimios/finder"
"github.com/seibert-media/dimios/k8s"
file_provider "github.com/seibert-media/dimios/k8s/file"
remote_provider "github.com/seibert-media/dimios/k8s/remote"
k8s_discovery "k8s.io/client-go/discovery"
k8s_dynamic "k8s.io/client-go/dynamic"
k8s_rest "k8s.io/client-go/rest"
k8s_clientcmd "k8s.io/client-go/tools/clientcmd"
// Required for using GCP auth
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
)
// Manager is the main application package
type Manager struct {
Staging bool
TemplateDirectory string
TeamvaultURL string
TeamvaultUser string
TeamvaultPassword string
TeamvaultConfigPath string
Namespaces string
Whitelist string
Kubeconfig string
Webhook bool
Port int
}
// ReadTeamvaultConfig from path
func (m *Manager) ReadTeamvaultConfig() error {
teamvaultConfigPath := model.TeamvaultConfigPath(m.TeamvaultConfigPath)
if teamvaultConfigPath.Exists() {
teamvaultConfig, err := teamvaultConfigPath.Parse()
if err != nil {
glog.V(2).Infof("parse teamvault config failed: %v", err)
return err
}
m.TeamvaultURL = teamvaultConfig.Url.String()
m.TeamvaultUser = teamvaultConfig.User.String()
m.TeamvaultPassword = teamvaultConfig.Password.String()
}
return nil
}
// Validate if all Manager values are set correctly
func (m *Manager) Validate() error {
if len(m.TemplateDirectory) == 0 {
return fmt.Errorf("template directory missing")
}
if len(m.Namespaces) == 0 {
return fmt.Errorf("namespace missing")
}
if len(m.Kubeconfig) == 0 && os.Getenv("KUBERNETES_SERVICE_HOST") == "" && os.Getenv("KUBERNETES_SERVICE_PORT") == "" {
return fmt.Errorf("kubeconfig missing")
}
if len(m.TeamvaultURL) == 0 && !m.Staging {
return fmt.Errorf("teamvault url missing")
}
if len(m.TeamvaultUser) == 0 && !m.Staging {
return fmt.Errorf("teamvault user missing")
}
if len(m.TeamvaultPassword) == 0 && !m.Staging {
return fmt.Errorf("teamvault password missing")
}
return nil
}
// Run Manager
func (m *Manager) Run(ctx context.Context) error {
glog.V(0).Info("dimios started")
discovery, dynamicPool, err := m.createClients()
if err != nil {
return err
}
fileProvider := file_provider.New(
file_provider.TemplateDirectory(m.TemplateDirectory),
m.createTeamvaultConfigParser(),
)
remoteProvider := remote_provider.New(
discovery,
dynamicPool,
)
applier := apply.New(
m.Staging,
discovery,
dynamicPool,
)
getter := finder.New(
fileProvider,
remoteProvider,
k8s.NamespacesFromCommaSeperatedList(m.Namespaces),
whitelist.ByString(m.Whitelist),
)
syncer := &change.Syncer{
Applier: applier,
Getter: getter,
}
if m.Webhook {
server := &http.Server{
Addr: fmt.Sprintf(":%d", m.Port),
Handler: hook.NewHandler(syncer),
}
glog.V(1).Infof("start webserver on port %d", m.Port)
return server.ListenAndServe()
}
glog.V(1).Infof("run sync")
if err := syncer.Run(ctx); err != nil {
glog.V(0).Infof("dimios failed: %v", err)
return err
}
glog.V(0).Info("dimios completed successful")
return nil
}
func createConfig(kubeconfig string) (*k8s_rest.Config, error) {
if len(kubeconfig) > 0 {
glog.V(4).Infof("create kube config from flags")
kubeconfig, err := util.NormalizePath(kubeconfig)
if err != nil {
return nil, errors.Wrap(err, "normalize path failed")
}
return k8s_clientcmd.BuildConfigFromFlags("", kubeconfig)
}
glog.V(4).Infof("create in cluster kube config")
return k8s_rest.InClusterConfig()
}
func (m *Manager) createClients() (*k8s_discovery.DiscoveryClient, k8s_dynamic.ClientPool, error) {
cfg, err := createConfig(m.Kubeconfig)
if err != nil {
return nil, nil, errors.Wrap(err, "create clientConfig failed")
}
discovery, err := k8s_discovery.NewDiscoveryClientForConfig(cfg)
if err != nil {
return nil, nil, errors.Wrap(err, "creating k8s_discovery client failed")
}
dynamicPool := k8s_dynamic.NewDynamicClientPool(cfg)
return discovery, dynamicPool, nil
}
func (m *Manager) createTeamvaultConfigParser() parser.Parser {
return parser.New(m.createTeamvaultConnector())
}
func (m *Manager) createTeamvaultConnector() connector.Connector {
var teamvaultConnector connector.Connector
if m.Staging {
teamvaultConnector = connector.NewDummy()
}
httpClient := client_builder.New().WithTimeout(5 * time.Second).Build()
teamvaultConnector = connector.New(
httpClient.Do,
model.TeamvaultUrl(m.TeamvaultURL),
model.TeamvaultUser(m.TeamvaultUser),
model.TeamvaultPassword(m.TeamvaultPassword),
)
return teamvaultConnector
}
| [
"\"KUBERNETES_SERVICE_HOST\"",
"\"KUBERNETES_SERVICE_PORT\""
]
| []
| [
"KUBERNETES_SERVICE_HOST",
"KUBERNETES_SERVICE_PORT"
]
| [] | ["KUBERNETES_SERVICE_HOST", "KUBERNETES_SERVICE_PORT"] | go | 2 | 0 | |
EduOne/EduOne/wsgi.py | """
WSGI config for EduOne project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'EduOne.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
webapp/ENV/lib/python3.6/site-packages/distributed/client.py | from __future__ import print_function, division, absolute_import
import atexit
from collections import Iterator, defaultdict
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures._base import DoneAndNotDoneFutures, CancelledError
from contextlib import contextmanager
import copy
from datetime import timedelta
import errno
from functools import partial
from glob import glob
import itertools
import json
import logging
from numbers import Number, Integral
import os
import sys
import uuid
import threading
import six
import socket
import warnings
import weakref
import dask
from dask.base import tokenize, normalize_token, collections_to_dsk
from dask.core import flatten, get_dependencies
from dask.compatibility import apply, unicode
from dask.context import _globals
try:
from cytoolz import first, groupby, merge, valmap, keymap
except ImportError:
from toolz import first, groupby, merge, valmap, keymap
try:
from dask.delayed import single_key
except ImportError:
single_key = first
from tornado import gen
from tornado.gen import TimeoutError
from tornado.locks import Event, Condition
from tornado.ioloop import IOLoop
from tornado.queues import Queue
from .batched import BatchedSend
from .utils_comm import (WrappedKey, unpack_remotedata, pack_data,
scatter_to_workers, gather_from_workers)
from .cfexecutor import ClientExecutor
from .compatibility import Queue as pyQueue, Empty, isqueue, html_escape
from .config import config
from .core import connect, rpc, clean_exception, CommClosedError
from .metrics import time
from .node import Node
from .protocol import to_serialize
from .protocol.pickle import dumps, loads
from .publish import Datasets
from .security import Security
from .sizeof import sizeof
from .threadpoolexecutor import rejoin
from .worker import dumps_task, get_client, get_worker, secede
from .utils import (All, sync, funcname, ignoring, queue_to_iterator,
tokey, log_errors, str_graph, key_split, format_bytes, asciitable,
thread_state, no_default, PeriodicCallback, LoopRunner,
parse_timedelta, shutting_down)
from .versions import get_versions
logger = logging.getLogger(__name__)
_global_client = [None]
def _get_global_client():
wr = _global_client[0]
return wr and wr()
def _set_global_client(c):
_global_client[0] = weakref.ref(c) if c is not None else None
class Future(WrappedKey):
""" A remotely running computation
A Future is a local proxy to a result running on a remote worker. A user
manages future objects in the local Python process to determine what
happens in the larger cluster.
Parameters
----------
key: str, or tuple
Key of remote data to which this future refers
client: Client
Client that should own this future. Defaults to _get_global_client()
inform: bool
Do we inform the scheduler that we need an update on this future
Examples
--------
Futures typically emerge from Client computations
>>> my_future = client.submit(add, 1, 2) # doctest: +SKIP
We can track the progress and results of a future
>>> my_future # doctest: +SKIP
<Future: status: finished, key: add-8f6e709446674bad78ea8aeecfee188e>
We can get the result or the exception and traceback from the future
>>> my_future.result() # doctest: +SKIP
See Also
--------
Client: Creates futures
"""
_cb_executor = None
_cb_executor_pid = None
def __init__(self, key, client=None, inform=True, state=None):
self.key = key
self._cleared = False
tkey = tokey(key)
self.client = client or _get_global_client()
self.client._inc_ref(tkey)
self._generation = self.client.generation
if tkey in self.client.futures:
self._state = self.client.futures[tkey]
else:
self._state = self.client.futures[tkey] = FutureState()
if inform:
self.client._send_to_scheduler({'op': 'client-desires-keys',
'keys': [tokey(key)],
'client': self.client.id})
if state is not None:
try:
handler = self.client._state_handlers[state]
except KeyError:
pass
else:
handler(key=key)
@property
def executor(self):
return self.client
@property
def status(self):
return self._state.status
def done(self):
""" Is the computation complete? """
return self._state.done()
def result(self, timeout=None):
""" Wait until computation completes, gather result to local process.
If *timeout* seconds are elapsed before returning, a
``dask.distributed.TimeoutError`` is raised.
"""
if self.client.asynchronous:
return self.client.sync(self._result, callback_timeout=timeout)
# shorten error traceback
result = self.client.sync(self._result, callback_timeout=timeout,
raiseit=False)
if self.status == 'error':
six.reraise(*result)
elif self.status == 'cancelled':
raise result
else:
return result
@gen.coroutine
def _result(self, raiseit=True):
yield self._state.wait()
if self.status == 'error':
exc = clean_exception(self._state.exception,
self._state.traceback)
if raiseit:
six.reraise(*exc)
else:
raise gen.Return(exc)
elif self.status == 'cancelled':
exception = CancelledError(self.key)
if raiseit:
raise exception
else:
raise gen.Return(exception)
else:
result = yield self.client._gather([self])
raise gen.Return(result[0])
@gen.coroutine
def _exception(self):
yield self._state.wait()
if self.status == 'error':
raise gen.Return(self._state.exception)
else:
raise gen.Return(None)
def exception(self, timeout=None, **kwargs):
""" Return the exception of a failed task
If *timeout* seconds are elapsed before returning, a
``dask.distributed.TimeoutError`` is raised.
See Also
--------
Future.traceback
"""
return self.client.sync(self._exception, callback_timeout=timeout,
**kwargs)
def add_done_callback(self, fn):
""" Call callback on future when callback has finished
The callback ``fn`` should take the future as its only argument. This
will be called regardless of if the future completes successfully,
errs, or is cancelled
The callback is executed in a separate thread.
"""
cls = Future
if cls._cb_executor is None or cls._cb_executor_pid != os.getpid():
cls._cb_executor = ThreadPoolExecutor(1)
cls._cb_executor_pid = os.getpid()
def execute_callback(fut):
try:
fn(fut)
except BaseException:
logger.exception("Error in callback %s of %s:", fn, fut)
self.client.loop.add_callback(done_callback, self,
partial(cls._cb_executor.submit, execute_callback))
def cancel(self, **kwargs):
""" Cancel request to run this future
See Also
--------
Client.cancel
"""
return self.client.cancel([self], **kwargs)
def cancelled(self):
""" Returns True if the future has been cancelled """
return self._state.status == 'cancelled'
@gen.coroutine
def _traceback(self):
yield self._state.wait()
if self.status == 'error':
raise gen.Return(self._state.traceback)
else:
raise gen.Return(None)
def traceback(self, timeout=None, **kwargs):
""" Return the traceback of a failed task
This returns a traceback object. You can inspect this object using the
``traceback`` module. Alternatively if you call ``future.result()``
this traceback will accompany the raised exception.
If *timeout* seconds are elapsed before returning, a
``dask.distributed.TimeoutError`` is raised.
Examples
--------
>>> import traceback # doctest: +SKIP
>>> tb = future.traceback() # doctest: +SKIP
>>> traceback.export_tb(tb) # doctest: +SKIP
[...]
See Also
--------
Future.exception
"""
return self.client.sync(self._traceback, callback_timeout=timeout,
**kwargs)
@property
def type(self):
return self._state.type
def release(self, _in_destructor=False):
# NOTE: this method can be called from different threads
# (see e.g. Client.get() or Future.__del__())
if not self._cleared and self.client.generation == self._generation:
self._cleared = True
self.client.loop.add_callback(self.client._dec_ref, tokey(self.key))
def __getstate__(self):
return (self.key, self.client.scheduler.address)
def __setstate__(self, state):
key, address = state
c = get_client(address)
Future.__init__(self, key, c)
c._send_to_scheduler({'op': 'update-graph', 'tasks': {},
'keys': [tokey(self.key)], 'client': c.id})
def __del__(self):
try:
self.release()
except RuntimeError: # closed event loop
pass
def __repr__(self):
if self.type:
try:
typ = self.type.__name__
except AttributeError:
typ = str(self.type)
return '<Future: status: %s, type: %s, key: %s>' % (self.status,
typ, self.key)
else:
return '<Future: status: %s, key: %s>' % (self.status, self.key)
def _repr_html_(self):
text = '<b>Future: %s</b> ' % html_escape(key_split(self.key))
text += ('<font color="gray">status: </font>'
'<font color="%(color)s">%(status)s</font>, ') % {
'status': self.status,
'color': 'red' if self.status == 'error' else 'black'}
if self.type:
try:
typ = self.type.__name__
except AttributeError:
typ = str(self.type)
text += '<font color="gray">type: </font>%s, ' % typ
text += '<font color="gray">key: </font>%s' % html_escape(str(self.key))
return text
def __await__(self):
return self.result().__await__()
class FutureState(object):
"""A Future's internal state.
This is shared between all Futures with the same key and client.
"""
__slots__ = ('_event', 'status', 'type', 'exception', 'traceback')
def __init__(self):
self._event = None
self.status = 'pending'
self.type = None
def _get_event(self):
# Can't create Event eagerly in constructor as it can fetch
# its IOLoop from the wrong thread
# (https://github.com/tornadoweb/tornado/issues/2189)
event = self._event
if event is None:
event = self._event = Event()
return event
def cancel(self):
self.status = 'cancelled'
self._get_event().set()
def finish(self, type=None):
self.status = 'finished'
self._get_event().set()
if type is not None:
self.type = type
def lose(self):
self.status = 'lost'
self._get_event().clear()
def set_error(self, exception, traceback):
if isinstance(exception, bytes):
try:
exception = loads(exception)
except TypeError:
exception = Exception("Undeserializable exception", exception)
if traceback:
if isinstance(traceback, bytes):
try:
traceback = loads(traceback)
except (TypeError, AttributeError):
traceback = None
else:
traceback = None
self.status = 'error'
self.exception = exception
self.traceback = traceback
self._get_event().set()
def done(self):
return self._event is not None and self._event.is_set()
def reset(self):
self.status = 'pending'
if self._event is not None:
self._event.clear()
@gen.coroutine
def wait(self, timeout=None):
yield self._get_event().wait(timeout)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.status)
@gen.coroutine
def done_callback(future, callback):
""" Coroutine that waits on future, then calls callback """
while future.status == 'pending':
yield future._state.wait()
callback(future)
@partial(normalize_token.register, Future)
def normalize_future(f):
return [f.key, type(f)]
class AllExit(Exception):
"""Custom exception class to exit All(...) early.
"""
class Client(Node):
""" Connect to and drive computation on a distributed Dask cluster
The Client connects users to a dask.distributed compute cluster. It
provides an asynchronous user interface around functions and futures. This
class resembles executors in ``concurrent.futures`` but also allows
``Future`` objects within ``submit/map`` calls.
Parameters
----------
address: string, or Cluster
This can be the address of a ``Scheduler`` server like a string
``'127.0.0.1:8786'`` or a cluster object like ``LocalCluster()``
timeout: int
Timeout duration for initial connection to the scheduler
set_as_default: bool (True)
Claim this scheduler as the global dask scheduler
scheduler_file: string (optional)
Path to a file with scheduler information if available
security: (optional)
Optional security information
asynchronous: bool (False by default)
Set to True if using this client within async/await functions or within
Tornado gen.coroutines. Otherwise this should remain False for normal
use.
name: string (optional)
Gives the client a name that will be included in logs generated on
the scheduler for matters relating to this client
heartbeat_interval: int
Time in milliseconds between heartbeats to scheduler
Examples
--------
Provide cluster's scheduler node address on initialization:
>>> client = Client('127.0.0.1:8786') # doctest: +SKIP
Use ``submit`` method to send individual computations to the cluster
>>> a = client.submit(add, 1, 2) # doctest: +SKIP
>>> b = client.submit(add, 10, 20) # doctest: +SKIP
Continue using submit or map on results to build up larger computations
>>> c = client.submit(add, a, b) # doctest: +SKIP
Gather results with the ``gather`` method.
>>> client.gather(c) # doctest: +SKIP
33
See Also
--------
distributed.scheduler.Scheduler: Internal scheduler
"""
def __init__(self, address=None, loop=None, timeout=no_default,
set_as_default=True, scheduler_file=None,
security=None, asynchronous=False,
name=None, heartbeat_interval=None, **kwargs):
if timeout == no_default:
timeout = config.get('connect-timeout', '10s')
if timeout is not None:
timeout = parse_timedelta(timeout, 's')
self._timeout = timeout
self.futures = dict()
self.refcount = defaultdict(lambda: 0)
self.coroutines = []
if name is None and 'client-name' in config:
name = config['client-name']
self.id = type(self).__name__ + ('-' + name + '-' if name else '-') + str(uuid.uuid1(clock_seq=os.getpid()))
self.generation = 0
self.status = 'newly-created'
self._pending_msg_buffer = []
self.extensions = {}
self.scheduler_file = scheduler_file
self._startup_kwargs = kwargs
self.cluster = None
self.scheduler = None
self._scheduler_identity = {}
self._lock = threading.Lock()
self._refcount_lock = threading.Lock()
self.datasets = Datasets(self)
# Communication
self.security = security or Security()
self.scheduler_comm = None
assert isinstance(self.security, Security)
self.connection_args = self.security.get_connection_args('client')
self._connecting_to_scheduler = False
self._asynchronous = asynchronous
self._should_close_loop = not loop
self._loop_runner = LoopRunner(loop=loop, asynchronous=asynchronous)
self.loop = self._loop_runner.loop
if heartbeat_interval is None:
heartbeat_interval = config.get('client-heartbeat-interval', 5000)
heartbeat_interval = parse_timedelta(heartbeat_interval, default='ms')
self._periodic_callbacks = dict()
self._periodic_callbacks['scheduler-info'] = PeriodicCallback(
self._update_scheduler_info, 2000, io_loop=self.loop
)
self._periodic_callbacks['heartbeat'] = PeriodicCallback(
self._heartbeat,
heartbeat_interval * 1000,
io_loop=self.loop
)
if address is None and 'scheduler-address' in config:
address = config['scheduler-address']
if address:
logger.info("Config value `scheduler-address` found: %s",
address)
if isinstance(address, rpc):
self.scheduler = address
elif hasattr(address, "scheduler_address"):
# It's a LocalCluster or LocalCluster-compatible object
self.cluster = address
self._start_arg = address
if set_as_default:
self._previous_get = _globals.get('get')
dask.set_options(get=self.get)
self._previous_shuffle = _globals.get('shuffle')
dask.set_options(shuffle='tasks')
self._handlers = {
'key-in-memory': self._handle_key_in_memory,
'lost-data': self._handle_lost_data,
'cancelled-key': self._handle_cancelled_key,
'task-erred': self._handle_task_erred,
'restart': self._handle_restart,
'error': self._handle_error
}
self._state_handlers = {
'memory': self._handle_key_in_memory,
'lost': self._handle_lost_data,
'erred': self._handle_task_erred
}
super(Client, self).__init__(connection_args=self.connection_args,
io_loop=self.loop)
self.start(timeout=timeout)
from distributed.recreate_exceptions import ReplayExceptionClient
ReplayExceptionClient(self)
@classmethod
def current(cls):
""" Return global client if one exists, otherwise raise ValueError """
return default_client()
@property
def asynchronous(self):
""" Are we running in the event loop?
This is true if the user signaled that we might be when creating the
client as in the following::
client = Client(asynchronous=True)
However, we override this expectation if we can definitively tell that
we are running from a thread that is not the event loop. This is
common when calling get_client() from within a worker task. Even
though the client was originally created in asynchronous mode we may
find ourselves in contexts when it is better to operate synchronously.
"""
return self._asynchronous and self.loop is IOLoop.current()
def sync(self, func, *args, **kwargs):
asynchronous = kwargs.pop('asynchronous', None)
if asynchronous or self.asynchronous or getattr(thread_state, 'asynchronous', False):
callback_timeout = kwargs.pop('callback_timeout', None)
future = func(*args, **kwargs)
if callback_timeout is not None:
future = gen.with_timeout(timedelta(seconds=callback_timeout),
future)
return future
else:
return sync(self.loop, func, *args, **kwargs)
def __repr__(self):
# Note: avoid doing I/O here...
info = self._scheduler_identity
addr = info.get('address')
if addr:
workers = info.get('workers', {})
nworkers = len(workers)
ncores = sum(w['ncores'] for w in workers.values())
return '<%s: scheduler=%r processes=%d cores=%d>' % (
self.__class__.__name__, addr, nworkers, ncores)
elif self.scheduler is not None:
return '<%s: scheduler=%r>' % (
self.__class__.__name__, self.scheduler.address)
else:
return '<%s: not connected>' % (self.__class__.__name__,)
def _repr_html_(self):
if self.cluster and hasattr(self.cluster, 'scheduler'):
info = self.cluster.scheduler.identity()
elif (self._loop_runner.is_started() and
self.scheduler and
not (self.asynchronous and self.loop is IOLoop.current())):
info = sync(self.loop, self.scheduler.identity)
else:
info = False
if self.scheduler is not None:
text = ("<h3>Client</h3>\n"
"<ul>\n"
" <li><b>Scheduler: </b>%s\n") % self.scheduler.address
else:
text = ("<h3>Client</h3>\n"
"<ul>\n"
" <li><b>Scheduler: not connected</b>\n")
if info and 'bokeh' in info['services']:
protocol, rest = self.scheduler.address.split('://')
port = info['services']['bokeh']
if protocol == 'inproc':
host = 'localhost'
else:
host = rest.split(':')[0]
template = config.get('diagnostics-link', 'http://{host}:{port}/status')
address = template.format(host=host, port=port, **os.environ)
text += " <li><b>Dashboard: </b><a href='%(web)s' target='_blank'>%(web)s</a>\n" % {'web': address}
text += "</ul>\n"
if info:
workers = len(info['workers'])
cores = sum(w['ncores'] for w in info['workers'].values())
memory = sum(w['memory_limit'] for w in info['workers'].values())
memory = format_bytes(memory)
text2 = ("<h3>Cluster</h3>\n"
"<ul>\n"
" <li><b>Workers: </b>%d</li>\n"
" <li><b>Cores: </b>%d</li>\n"
" <li><b>Memory: </b>%s</li>\n"
"</ul>\n") % (workers, cores, memory)
return ('<table style="border: 2px solid white;">\n'
'<tr>\n'
'<td style="vertical-align: top; border: 0px solid white">\n%s</td>\n'
'<td style="vertical-align: top; border: 0px solid white">\n%s</td>\n'
'</tr>\n</table>') % (text, text2)
else:
return text
def start(self, **kwargs):
""" Start scheduler running in separate thread """
if self.status != 'newly-created':
return
self._loop_runner.start()
_set_global_client(self)
self.status = 'connecting'
if self.asynchronous:
self._started = self._start(**kwargs)
else:
sync(self.loop, self._start, **kwargs)
def __await__(self):
return self._started.__await__()
def _send_to_scheduler_safe(self, msg):
if self.status in ('running', 'closing'):
try:
self.scheduler_comm.send(msg)
except CommClosedError:
if self.status == 'running':
raise
elif self.status in ('connecting', 'newly-created'):
self._pending_msg_buffer.append(msg)
def _send_to_scheduler(self, msg):
if self.status in ('running', 'closing', 'connecting', 'newly-created'):
self.loop.add_callback(self._send_to_scheduler_safe, msg)
else:
raise Exception("Tried sending message after closing. Status: %s\n"
"Message: %s" % (self.status, msg))
@gen.coroutine
def _start(self, timeout=no_default, **kwargs):
if timeout == no_default:
timeout = self._timeout
if timeout is not None:
timeout = parse_timedelta(timeout, 's')
address = self._start_arg
if self.cluster is not None:
# Ensure the cluster is started (no-op if already running)
try:
yield self.cluster._start()
except AttributeError: # Some clusters don't have this method
pass
except Exception:
logger.info("Tried to start cluster and received an error. "
"Proceeding.", exc_info=True)
address = self.cluster.scheduler_address
elif self.scheduler_file is not None:
while not os.path.exists(self.scheduler_file):
yield gen.sleep(0.01)
for i in range(10):
try:
with open(self.scheduler_file) as f:
cfg = json.load(f)
address = cfg['address']
break
except (ValueError, KeyError): # JSON file not yet flushed
yield gen.sleep(0.01)
elif self._start_arg is None:
from .deploy import LocalCluster
try:
self.cluster = LocalCluster(loop=self.loop, asynchronous=True,
**self._startup_kwargs)
yield self.cluster
except (OSError, socket.error) as e:
if e.errno != errno.EADDRINUSE:
raise
# The default port was taken, use a random one
self.cluster = LocalCluster(scheduler_port=0, loop=self.loop,
asynchronous=True,
**self._startup_kwargs)
yield self.cluster
# Wait for all workers to be ready
# XXX should be a LocalCluster method instead
while (not self.cluster.workers or
len(self.cluster.scheduler.workers) < len(self.cluster.workers)):
yield gen.sleep(0.01)
address = self.cluster.scheduler_address
if self.scheduler is None:
self.scheduler = rpc(address, timeout=timeout,
connection_args=self.connection_args)
self.scheduler_comm = None
yield self._ensure_connected(timeout=timeout)
for pc in self._periodic_callbacks.values():
pc.start()
self.coroutines.append(self._handle_report())
raise gen.Return(self)
@gen.coroutine
def _reconnect(self, timeout=0.1):
with log_errors():
assert self.scheduler_comm.comm.closed()
self.status = 'connecting'
self.scheduler_comm = None
for st in self.futures.values():
st.cancel()
self.futures.clear()
while self.status == 'connecting':
try:
yield self._ensure_connected()
break
except EnvironmentError:
yield gen.sleep(timeout)
@gen.coroutine
def _ensure_connected(self, timeout=None):
if (self.scheduler_comm and not self.scheduler_comm.closed() or
self._connecting_to_scheduler or self.scheduler is None):
return
self._connecting_to_scheduler = True
try:
comm = yield connect(self.scheduler.address, timeout=timeout,
connection_args=self.connection_args)
yield self._update_scheduler_info()
yield comm.write({'op': 'register-client',
'client': self.id,
'reply': False})
finally:
self._connecting_to_scheduler = False
msg = yield comm.read()
assert len(msg) == 1
assert msg[0]['op'] == 'stream-start'
bcomm = BatchedSend(interval='10ms', loop=self.loop)
bcomm.start(comm)
self.scheduler_comm = bcomm
_set_global_client(self)
self.status = 'running'
for msg in self._pending_msg_buffer:
self._send_to_scheduler(msg)
del self._pending_msg_buffer[:]
logger.debug("Started scheduling coroutines. Synchronized")
@gen.coroutine
def _update_scheduler_info(self):
if self.status not in ('running', 'connecting'):
return
try:
self._scheduler_identity = yield self.scheduler.identity()
except EnvironmentError:
logger.debug("Not able to query scheduler for identity")
def _heartbeat(self):
if self.scheduler_comm:
self.scheduler_comm.send({'op': 'heartbeat'})
def __enter__(self):
if not self._loop_runner.is_started():
self.start()
return self
@gen.coroutine
def __aenter__(self):
yield self._started
raise gen.Return(self)
@gen.coroutine
def __aexit__(self, typ, value, traceback):
yield self._close()
def __exit__(self, type, value, traceback):
self.close()
def __del__(self):
self.close()
def _inc_ref(self, key):
with self._refcount_lock:
self.refcount[key] += 1
def _dec_ref(self, key):
with self._refcount_lock:
self.refcount[key] -= 1
if self.refcount[key] == 0:
del self.refcount[key]
self._release_key(key)
def _release_key(self, key):
""" Release key from distributed memory """
logger.debug("Release key %s", key)
st = self.futures.pop(key, None)
if st is not None:
st.cancel()
if self.status != 'closed':
self._send_to_scheduler({'op': 'client-releases-keys',
'keys': [key],
'client': self.id})
@gen.coroutine
def _handle_report(self):
""" Listen to scheduler """
with log_errors():
try:
while True:
if self.scheduler_comm is None:
break
try:
msgs = yield self.scheduler_comm.comm.read()
except CommClosedError:
if self.status == 'running':
logger.info("Client report stream closed to scheduler")
logger.info("Reconnecting...")
self.status = 'connecting'
yield self._reconnect()
continue
else:
break
if not isinstance(msgs, list):
msgs = [msgs]
breakout = False
for msg in msgs:
logger.debug("Client receives message %s", msg)
if 'status' in msg and 'error' in msg['status']:
six.reraise(*clean_exception(**msg))
op = msg.pop('op')
if op == 'close' or op == 'stream-closed':
breakout = True
break
try:
handler = self._handlers[op]
handler(**msg)
except Exception as e:
logger.exception(e)
if breakout:
break
except CancelledError:
pass
def _handle_key_in_memory(self, key=None, type=None, workers=None):
state = self.futures.get(key)
if state is not None:
if type and not state.type: # Type exists and not yet set
try:
type = loads(type)
except Exception:
type = None
# Here, `type` may be a str if actual type failed
# serializing in Worker
else:
type = None
state.finish(type)
def _handle_lost_data(self, key=None):
state = self.futures.get(key)
if state is not None:
state.lose()
def _handle_cancelled_key(self, key=None):
state = self.futures.get(key)
if state is not None:
state.cancel()
def _handle_task_erred(self, key=None, exception=None, traceback=None):
state = self.futures.get(key)
if state is not None:
state.set_error(exception, traceback)
def _handle_restart(self):
logger.info("Receive restart signal from scheduler")
for state in self.futures.values():
state.cancel()
self.futures.clear()
with ignoring(AttributeError):
self._restart_event.set()
def _handle_error(self, exception=None):
logger.warning("Scheduler exception:")
logger.exception(exception)
@gen.coroutine
def _close(self, fast=False):
""" Send close signal and wait until scheduler completes """
self.status = 'closing'
with log_errors():
for pc in self._periodic_callbacks.values():
pc.stop()
self._scheduler_identity = {}
with ignoring(AttributeError):
dask.set_options(get=self._previous_get)
with ignoring(AttributeError):
dask.set_options(shuffle=self._previous_shuffle)
if self.get == _globals.get('get'):
del _globals['get']
if self.status == 'closed':
raise gen.Return()
if self.scheduler_comm and self.scheduler_comm.comm and not self.scheduler_comm.comm.closed():
self._send_to_scheduler({'op': 'close-stream'})
yield self.scheduler_comm.close()
for key in list(self.futures):
self._release_key(key=key)
if self._start_arg is None:
with ignoring(AttributeError):
yield self.cluster._close()
self.status = 'closed'
if _get_global_client() is self:
_set_global_client(None)
coroutines = set(self.coroutines)
for f in self.coroutines:
# cancel() works on asyncio futures (Tornado 5)
# but is a no-op on Tornado futures
f.cancel()
if f.cancelled():
coroutines.remove(f)
del self.coroutines[:]
if not fast:
with ignoring(TimeoutError):
yield gen.with_timeout(timedelta(seconds=2),
list(coroutines))
with ignoring(AttributeError):
self.scheduler.close_rpc()
self.scheduler = None
self.status = 'closed'
_shutdown = _close
def close(self, timeout=no_default):
""" Close this client
Clients will also close automatically when your Python session ends
If you started a client without arguments like ``Client()`` then this
will also close the local cluster that was started at the same time.
See Also
--------
Client.restart
"""
if timeout == no_default:
timeout = self._timeout * 2
# XXX handling of self.status here is not thread-safe
if self.status == 'closed':
return
self.status = 'closing'
if self.asynchronous:
future = self._close()
if timeout:
future = gen.with_timeout(timedelta(seconds=timeout), future)
return future
if self._start_arg is None:
with ignoring(AttributeError):
self.cluster.close()
sync(self.loop, self._close, fast=True)
assert self.status == 'closed'
if self._should_close_loop and not shutting_down():
self._loop_runner.stop()
with ignoring(AttributeError):
dask.set_options(get=self._previous_get)
with ignoring(AttributeError):
dask.set_options(shuffle=self._previous_shuffle)
if self.get == _globals.get('get'):
del _globals['get']
def shutdown(self, *args, **kwargs):
""" Deprecated, see close instead
This was deprecated because "shutdown" was sometimes confusingly
thought to refer to the cluster rather than the client
"""
warnings.warn("Shutdown is deprecated. Please use close instead")
return self.close(*args, **kwargs)
def get_executor(self, **kwargs):
"""
Return a concurrent.futures Executor for submitting tasks on this Client
Parameters
----------
**kwargs:
Any submit()- or map()- compatible arguments, such as
`workers` or `resources`.
Returns
-------
An Executor object that's fully compatible with the concurrent.futures
API.
"""
return ClientExecutor(self, **kwargs)
def submit(self, func, *args, **kwargs):
""" Submit a function application to the scheduler
Parameters
----------
func: callable
*args:
**kwargs:
pure: bool (defaults to True)
Whether or not the function is pure. Set ``pure=False`` for
impure functions like ``np.random.random``.
workers: set, iterable of sets
A set of worker hostnames on which computations may be performed.
Leave empty to default to all workers (common case)
key: str
Unique identifier for the task. Defaults to function-name and hash
allow_other_workers: bool (defaults to False)
Used with `workers`. Inidicates whether or not the computations
may be performed on workers that are not in the `workers` set(s).
retries: int (default to 0)
Number of allowed automatic retries if the task fails
priority: Number
Optional prioritization of task. Zero is default.
Higher priorities take precedence
fifo_timeout: str timedelta (default '100ms')
Allowed amount of time between calls to consider the same priority
Examples
--------
>>> c = client.submit(add, a, b) # doctest: +SKIP
Returns
-------
Future
See Also
--------
Client.map: Submit on many arguments at once
"""
if not callable(func):
raise TypeError("First input to submit must be a callable function")
key = kwargs.pop('key', None)
pure = kwargs.pop('pure', True)
workers = kwargs.pop('workers', None)
resources = kwargs.pop('resources', None)
retries = kwargs.pop('retries', None)
priority = kwargs.pop('priority', 0)
fifo_timeout = kwargs.pop('fifo_timeout', '100ms')
allow_other_workers = kwargs.pop('allow_other_workers', False)
if allow_other_workers not in (True, False, None):
raise TypeError("allow_other_workers= must be True or False")
if key is None:
if pure:
key = funcname(func) + '-' + tokenize(func, kwargs, *args)
else:
key = funcname(func) + '-' + str(uuid.uuid4())
skey = tokey(key)
with self._lock:
if skey in self.futures:
return Future(key, self, inform=False)
if allow_other_workers and workers is None:
raise ValueError("Only use allow_other_workers= if using workers=")
if isinstance(workers, six.string_types + (Number,)):
workers = [workers]
if workers is not None:
restrictions = {skey: workers}
loose_restrictions = [skey] if allow_other_workers else []
else:
restrictions = {}
loose_restrictions = []
if kwargs:
dsk = {skey: (apply, func, list(args), kwargs)}
else:
dsk = {skey: (func,) + tuple(args)}
futures = self._graph_to_futures(dsk, [skey], restrictions,
loose_restrictions, priority={skey: 0},
user_priority=priority,
resources={skey: resources} if resources else None,
retries=retries,
fifo_timeout=fifo_timeout)
logger.debug("Submit %s(...), %s", funcname(func), key)
return futures[skey]
def _threaded_map(self, q_out, func, qs_in, **kwargs):
""" Internal function for mapping Queue """
if isqueue(qs_in[0]):
get = pyQueue.get
elif isinstance(qs_in[0], Iterator):
get = next
else:
raise NotImplementedError()
while True:
try:
args = [get(q) for q in qs_in]
except StopIteration as e:
q_out.put(e)
break
f = self.submit(func, *args, **kwargs)
q_out.put(f)
def map(self, func, *iterables, **kwargs):
""" Map a function on a sequence of arguments
Arguments can be normal objects or Futures
Parameters
----------
func: callable
iterables: Iterables, Iterators, or Queues
key: str, list
Prefix for task names if string. Explicit names if list.
pure: bool (defaults to True)
Whether or not the function is pure. Set ``pure=False`` for
impure functions like ``np.random.random``.
workers: set, iterable of sets
A set of worker hostnames on which computations may be performed.
Leave empty to default to all workers (common case)
retries: int (default to 0)
Number of allowed automatic retries if a task fails
priority: Number
Optional prioritization of task. Zero is default.
Higher priorities take precedence
fifo_timeout: str timedelta (default '100ms')
Allowed amount of time between calls to consider the same priority
Examples
--------
>>> L = client.map(func, sequence) # doctest: +SKIP
Returns
-------
List, iterator, or Queue of futures, depending on the type of the
inputs.
See also
--------
Client.submit: Submit a single function
"""
if not callable(func):
raise TypeError("First input to map must be a callable function")
if (all(map(isqueue, iterables)) or
all(isinstance(i, Iterator) for i in iterables)):
maxsize = kwargs.pop('maxsize', 0)
q_out = pyQueue(maxsize=maxsize)
t = threading.Thread(target=self._threaded_map,
name="Threaded map()",
args=(q_out, func, iterables),
kwargs=kwargs)
t.daemon = True
t.start()
if isqueue(iterables[0]):
return q_out
else:
return queue_to_iterator(q_out)
key = kwargs.pop('key', None)
key = key or funcname(func)
pure = kwargs.pop('pure', True)
workers = kwargs.pop('workers', None)
retries = kwargs.pop('retries', None)
resources = kwargs.pop('resources', None)
user_priority = kwargs.pop('priority', 0)
allow_other_workers = kwargs.pop('allow_other_workers', False)
fifo_timeout = kwargs.pop('fifo_timeout', '100ms')
if allow_other_workers and workers is None:
raise ValueError("Only use allow_other_workers= if using workers=")
iterables = list(zip(*zip(*iterables)))
if isinstance(key, list):
keys = key
else:
if pure:
keys = [key + '-' + tokenize(func, kwargs, *args)
for args in zip(*iterables)]
else:
uid = str(uuid.uuid4())
keys = [key + '-' + uid + '-' + str(i)
for i in range(min(map(len, iterables)))] if iterables else []
if not kwargs:
dsk = {key: (func,) + args
for key, args in zip(keys, zip(*iterables))}
else:
dsk = {key: (apply, func, (tuple, list(args)), kwargs)
for key, args in zip(keys, zip(*iterables))}
if isinstance(workers, six.string_types + (Number,)):
workers = [workers]
if isinstance(workers, (list, set)):
if workers and isinstance(first(workers), (list, set)):
if len(workers) != len(keys):
raise ValueError("You only provided %d worker restrictions"
" for a sequence of length %d" % (len(workers), len(keys)))
restrictions = dict(zip(keys, workers))
else:
restrictions = {k: workers for k in keys}
elif workers is None:
restrictions = {}
else:
raise TypeError("Workers must be a list or set of workers or None")
if allow_other_workers not in (True, False, None):
raise TypeError("allow_other_workers= must be True or False")
if allow_other_workers is True:
loose_restrictions = set(keys)
else:
loose_restrictions = set()
priority = dict(zip(keys, range(len(keys))))
if resources:
resources = {k: resources for k in keys}
else:
resources = None
futures = self._graph_to_futures(dsk, keys, restrictions,
loose_restrictions,
priority=priority,
resources=resources,
retries=retries,
user_priority=user_priority,
fifo_timeout=fifo_timeout)
logger.debug("map(%s, ...)", funcname(func))
return [futures[tokey(k)] for k in keys]
@gen.coroutine
def _gather(self, futures, errors='raise', direct=None, local_worker=None):
futures2, keys = unpack_remotedata(futures, byte_keys=True)
keys = [tokey(key) for key in keys]
bad_data = dict()
if direct is None:
try:
w = get_worker()
except Exception:
direct = False
else:
if w.scheduler.address == self.scheduler.address:
direct = True
@gen.coroutine
def wait(k):
""" Want to stop the All(...) early if we find an error """
st = self.futures[k]
yield st.wait()
if st.status != 'finished' and errors == 'raise' :
raise AllExit()
while True:
logger.debug("Waiting on futures to clear before gather")
with ignoring(AllExit):
yield All([wait(key) for key in keys if key in self.futures])
failed = ('error', 'cancelled')
exceptions = set()
bad_keys = set()
for key in keys:
if (key not in self.futures or
self.futures[key].status in failed):
exceptions.add(key)
if errors == 'raise':
try:
st = self.futures[key]
exception = st.exception
traceback = st.traceback
except (AttributeError, KeyError):
six.reraise(CancelledError,
CancelledError(key),
None)
else:
six.reraise(type(exception),
exception,
traceback)
if errors == 'skip':
bad_keys.add(key)
bad_data[key] = None
else:
raise ValueError("Bad value, `errors=%s`" % errors)
keys = [k for k in keys if k not in bad_keys]
data = {}
if local_worker: # look inside local worker
data.update({k: local_worker.data[k]
for k in keys
if k in local_worker.data})
keys = [k for k in keys if k not in data]
if direct or local_worker: # gather directly from workers
who_has = yield self.scheduler.who_has(keys=keys)
data2, missing_keys, missing_workers = yield gather_from_workers(
who_has, rpc=self.rpc, close=False)
response = {'status': 'OK', 'data': data2}
if missing_keys:
keys2 = [key for key in keys if key not in data2]
response = yield self.scheduler.gather(keys=keys2)
if response['status'] == 'OK':
response['data'].update(data2)
else: # ask scheduler to gather data for us
response = yield self.scheduler.gather(keys=keys)
if response['status'] == 'error':
log = logger.warning if errors == 'raise' else logger.debug
log("Couldn't gather %s keys, rescheduling %s", (len(response['keys']), response['keys']))
for key in response['keys']:
self._send_to_scheduler({'op': 'report-key',
'key': key})
for key in response['keys']:
self.futures[key].reset()
else:
break
if bad_data and errors == 'skip' and isinstance(futures2, list):
futures2 = [f for f in futures2 if f not in bad_data]
data.update(response['data'])
result = pack_data(futures2, merge(data, bad_data))
raise gen.Return(result)
def _threaded_gather(self, qin, qout, **kwargs):
""" Internal function for gathering Queue """
while True:
L = [qin.get()]
while qin.empty():
try:
L.append(qin.get_nowait())
except Empty:
break
results = self.gather(L, **kwargs)
for item in results:
qout.put(item)
def gather(self, futures, errors='raise', maxsize=0, direct=None,
asynchronous=None):
""" Gather futures from distributed memory
Accepts a future, nested container of futures, iterator, or queue.
The return type will match the input type.
Parameters
----------
futures: Collection of futures
This can be a possibly nested collection of Future objects.
Collections can be lists, sets, iterators, queues or dictionaries
errors: string
Either 'raise' or 'skip' if we should raise if a future has erred
or skip its inclusion in the output collection
maxsize: int
If the input is a queue then this produces an output queue with a
maximum size.
Returns
-------
results: a collection of the same type as the input, but now with
gathered results rather than futures
Examples
--------
>>> from operator import add # doctest: +SKIP
>>> c = Client('127.0.0.1:8787') # doctest: +SKIP
>>> x = c.submit(add, 1, 2) # doctest: +SKIP
>>> c.gather(x) # doctest: +SKIP
3
>>> c.gather([x, [x], x]) # support lists and dicts # doctest: +SKIP
[3, [3], 3]
>>> seq = c.gather(iter([x, x])) # support iterators # doctest: +SKIP
>>> next(seq) # doctest: +SKIP
3
See Also
--------
Client.scatter: Send data out to cluster
"""
if isqueue(futures):
qout = pyQueue(maxsize=maxsize)
t = threading.Thread(target=self._threaded_gather,
name="Threaded gather()",
args=(futures, qout),
kwargs={'errors': errors, 'direct': direct})
t.daemon = True
t.start()
return qout
elif isinstance(futures, Iterator):
return (self.gather(f, errors=errors, direct=direct)
for f in futures)
else:
if hasattr(thread_state, 'execution_state'): # within worker task
local_worker = thread_state.execution_state['worker']
else:
local_worker = None
return self.sync(self._gather, futures, errors=errors,
direct=direct, local_worker=local_worker,
asynchronous=asynchronous)
@gen.coroutine
def _scatter(self, data, workers=None, broadcast=False, direct=None,
local_worker=None, timeout=no_default, hash=True):
if timeout == no_default:
timeout = self._timeout
if isinstance(workers, six.string_types + (Number,)):
workers = [workers]
if isinstance(data, dict) and not all(isinstance(k, (bytes, unicode))
for k in data):
d = yield self._scatter(keymap(tokey, data), workers, broadcast)
raise gen.Return({k: d[tokey(k)] for k in data})
if isinstance(data, type(range(0))):
data = list(data)
input_type = type(data)
names = False
unpack = False
if isinstance(data, Iterator):
data = list(data)
if isinstance(data, (set, frozenset)):
data = list(data)
if not isinstance(data, (dict, list, tuple, set, frozenset)):
unpack = True
data = [data]
if isinstance(data, (list, tuple)):
if hash:
names = [type(x).__name__ + '-' + tokenize(x) for x in data]
else:
names = [type(x).__name__ + '-' + uuid.uuid4().hex for x in data]
data = dict(zip(names, data))
assert isinstance(data, dict)
types = valmap(type, data)
if direct is None:
try:
w = get_worker()
except Exception:
direct = False
else:
if w.scheduler.address == self.scheduler.address:
direct = True
if local_worker: # running within task
local_worker.update_data(data=data, report=False)
yield self.scheduler.update_data(
who_has={key: [local_worker.address] for key in data},
nbytes=valmap(sizeof, data),
client=self.id)
else:
data2 = valmap(to_serialize, data)
if direct:
ncores = None
start = time()
while not ncores:
if ncores is not None:
yield gen.sleep(0.1)
if time() > start + timeout:
raise gen.TimeoutError("No valid workers found")
ncores = yield self.scheduler.ncores(workers=workers)
if not ncores:
raise ValueError("No valid workers")
_, who_has, nbytes = yield scatter_to_workers(ncores, data2,
report=False,
rpc=self.rpc)
yield self.scheduler.update_data(who_has=who_has,
nbytes=nbytes,
client=self.id)
else:
yield self.scheduler.scatter(data=data2, workers=workers,
client=self.id,
broadcast=broadcast)
out = {k: Future(k, self, inform=False) for k in data}
for key, typ in types.items():
self.futures[key].finish(type=typ)
if direct and broadcast:
n = None if broadcast is True else broadcast
yield self._replicate(list(out.values()), workers=workers, n=n)
if issubclass(input_type, (list, tuple, set, frozenset)):
out = input_type(out[k] for k in names)
if unpack:
assert len(out) == 1
out = list(out.values())[0]
raise gen.Return(out)
def _threaded_scatter(self, q_or_i, qout, **kwargs):
""" Internal function for scattering Iterable/Queue data """
while True:
if isqueue(q_or_i):
L = [q_or_i.get()]
while not q_or_i.empty():
try:
L.append(q_or_i.get_nowait())
except Empty:
break
else:
try:
L = [next(q_or_i)]
except StopIteration as e:
qout.put(e)
break
futures = self.scatter(L, **kwargs)
for future in futures:
qout.put(future)
def scatter(self, data, workers=None, broadcast=False, direct=None,
hash=True, maxsize=0, timeout=no_default, asynchronous=None):
""" Scatter data into distributed memory
This moves data from the local client process into the workers of the
distributed scheduler. Note that it is often better to submit jobs to
your workers to have them load the data rather than loading data
locally and then scattering it out to them.
Parameters
----------
data: list, iterator, dict, Queue, or object
Data to scatter out to workers. Output type matches input type.
workers: list of tuples (optional)
Optionally constrain locations of data.
Specify workers as hostname/port pairs, e.g. ``('127.0.0.1', 8787)``.
broadcast: bool (defaults to False)
Whether to send each data element to all workers.
By default we round-robin based on number of cores.
direct: bool (defaults to automatically check)
Send data directly to workers, bypassing the central scheduler
This avoids burdening the scheduler but assumes that the client is
able to talk directly with the workers.
maxsize: int (optional)
Maximum size of queue if using queues, 0 implies infinite
hash: bool (optional)
Whether or not to hash data to determine key.
If False then this uses a random key
Returns
-------
List, dict, iterator, or queue of futures matching the type of input.
Examples
--------
>>> c = Client('127.0.0.1:8787') # doctest: +SKIP
>>> c.scatter(1) # doctest: +SKIP
<Future: status: finished, key: c0a8a20f903a4915b94db8de3ea63195>
>>> c.scatter([1, 2, 3]) # doctest: +SKIP
[<Future: status: finished, key: c0a8a20f903a4915b94db8de3ea63195>,
<Future: status: finished, key: 58e78e1b34eb49a68c65b54815d1b158>,
<Future: status: finished, key: d3395e15f605bc35ab1bac6341a285e2>]
>>> c.scatter({'x': 1, 'y': 2, 'z': 3}) # doctest: +SKIP
{'x': <Future: status: finished, key: x>,
'y': <Future: status: finished, key: y>,
'z': <Future: status: finished, key: z>}
Constrain location of data to subset of workers
>>> c.scatter([1, 2, 3], workers=[('hostname', 8788)]) # doctest: +SKIP
Handle streaming sequences of data with iterators or queues
>>> seq = c.scatter(iter([1, 2, 3])) # doctest: +SKIP
>>> next(seq) # doctest: +SKIP
<Future: status: finished, key: c0a8a20f903a4915b94db8de3ea63195>,
Broadcast data to all workers
>>> [future] = c.scatter([element], broadcast=True) # doctest: +SKIP
See Also
--------
Client.gather: Gather data back to local process
"""
if timeout == no_default:
timeout = self._timeout
if isqueue(data) or isinstance(data, Iterator):
logger.debug("Starting thread for streaming data")
qout = pyQueue(maxsize=maxsize)
t = threading.Thread(target=self._threaded_scatter,
name="Threaded scatter()",
args=(data, qout),
kwargs={'workers': workers,
'broadcast': broadcast})
t.daemon = True
t.start()
if isqueue(data):
return qout
else:
return queue_to_iterator(qout)
else:
if hasattr(thread_state, 'execution_state'): # within worker task
local_worker = thread_state.execution_state['worker']
else:
local_worker = None
return self.sync(self._scatter, data, workers=workers,
broadcast=broadcast, direct=direct,
local_worker=local_worker, timeout=timeout,
asynchronous=asynchronous, hash=hash)
@gen.coroutine
def _cancel(self, futures, force=False):
keys = list({tokey(f.key) for f in futures_of(futures)})
yield self.scheduler.cancel(keys=keys, client=self.id, force=force)
for k in keys:
st = self.futures.pop(k, None)
if st is not None:
st.cancel()
def cancel(self, futures, asynchronous=None, force=False):
"""
Cancel running futures
This stops future tasks from being scheduled if they have not yet run
and deletes them if they have already run. After calling, this result
and all dependent results will no longer be accessible
Parameters
----------
futures: list of Futures
force: boolean (False)
Cancel this future even if other clients desire it
"""
return self.sync(self._cancel, futures, asynchronous=asynchronous,
force=force)
@gen.coroutine
def _publish_dataset(self, **kwargs):
with log_errors():
coroutines = []
for name, data in kwargs.items():
keys = [tokey(f.key) for f in futures_of(data)]
coroutines.append(self.scheduler.publish_put(keys=keys,
name=tokey(name), data=dumps(data), client=self.id))
yield coroutines
def publish_dataset(self, **kwargs):
"""
Publish named datasets to scheduler
This stores a named reference to a dask collection or list of futures
on the scheduler. These references are available to other Clients
which can download the collection or futures with ``get_dataset``.
Datasets are not immediately computed. You may wish to call
``Client.persist`` prior to publishing a dataset.
Parameters
----------
kwargs: dict
named collections to publish on the scheduler
Examples
--------
Publishing client:
>>> df = dd.read_csv('s3://...') # doctest: +SKIP
>>> df = c.persist(df) # doctest: +SKIP
>>> c.publish_dataset(my_dataset=df) # doctest: +SKIP
Receiving client:
>>> c.list_datasets() # doctest: +SKIP
['my_dataset']
>>> df2 = c.get_dataset('my_dataset') # doctest: +SKIP
Returns
-------
None
See Also
--------
Client.list_datasets
Client.get_dataset
Client.unpublish_dataset
Client.persist
"""
return self.sync(self._publish_dataset, **kwargs)
def unpublish_dataset(self, name, **kwargs):
"""
Remove named datasets from scheduler
Examples
--------
>>> c.list_datasets() # doctest: +SKIP
['my_dataset']
>>> c.unpublish_datasets('my_dataset') # doctest: +SKIP
>>> c.list_datasets() # doctest: +SKIP
[]
See Also
--------
Client.publish_dataset
"""
return self.sync(self.scheduler.publish_delete, name=name, **kwargs)
def list_datasets(self, **kwargs):
"""
List named datasets available on the scheduler
See Also
--------
Client.publish_dataset
Client.get_dataset
"""
return self.sync(self.scheduler.publish_list, **kwargs)
@gen.coroutine
def _get_dataset(self, name):
out = yield self.scheduler.publish_get(name=name, client=self.id)
if out is None:
raise KeyError("Dataset '%s' not found" % name)
with temp_default_client(self):
data = loads(out['data'])
raise gen.Return(data)
def get_dataset(self, name, **kwargs):
"""
Get named dataset from the scheduler
See Also
--------
Client.publish_dataset
Client.list_datasets
"""
return self.sync(self._get_dataset, tokey(name), **kwargs)
@gen.coroutine
def _run_on_scheduler(self, function, *args, **kwargs):
response = yield self.scheduler.run_function(function=dumps(function),
args=dumps(args),
kwargs=dumps(kwargs))
if response['status'] == 'error':
six.reraise(*clean_exception(**response))
else:
raise gen.Return(response['result'])
def run_on_scheduler(self, function, *args, **kwargs):
""" Run a function on the scheduler process
This is typically used for live debugging. The function should take a
keyword argument ``dask_scheduler=``, which will be given the scheduler
object itself.
Examples
--------
>>> def get_number_of_tasks(dask_scheduler=None):
... return len(dask_scheduler.tasks)
>>> client.run_on_scheduler(get_number_of_tasks) # doctest: +SKIP
100
See Also
--------
Client.run: Run a function on all workers
Client.start_ipython_scheduler: Start an IPython session on scheduler
"""
return self.sync(self._run_on_scheduler, function, *args,
**kwargs)
@gen.coroutine
def _run(self, function, *args, **kwargs):
nanny = kwargs.pop('nanny', False)
workers = kwargs.pop('workers', None)
responses = yield self.scheduler.broadcast(msg=dict(op='run',
function=dumps(function),
args=dumps(args),
kwargs=dumps(kwargs)),
workers=workers, nanny=nanny)
results = {}
for key, resp in responses.items():
if resp['status'] == 'OK':
results[key] = resp['result']
elif resp['status'] == 'error':
six.reraise(*clean_exception(**resp))
raise gen.Return(results)
def run(self, function, *args, **kwargs):
"""
Run a function on all workers outside of task scheduling system
This calls a function on all currently known workers immediately,
blocks until those results come back, and returns the results
asynchronously as a dictionary keyed by worker address. This method
if generally used for side effects, such and collecting diagnostic
information or installing libraries.
If your function takes an input argument named ``dask_worker`` then
that variable will be populated with the worker itself.
Parameters
----------
function: callable
*args: arguments for remote function
**kwargs: keyword arguments for remote function
workers: list
Workers on which to run the function. Defaults to all known workers.
Examples
--------
>>> c.run(os.getpid) # doctest: +SKIP
{'192.168.0.100:9000': 1234,
'192.168.0.101:9000': 4321,
'192.168.0.102:9000': 5555}
Restrict computation to particular workers with the ``workers=``
keyword argument.
>>> c.run(os.getpid, workers=['192.168.0.100:9000',
... '192.168.0.101:9000']) # doctest: +SKIP
{'192.168.0.100:9000': 1234,
'192.168.0.101:9000': 4321}
>>> def get_status(dask_worker):
... return dask_worker.status
>>> c.run(get_hostname) # doctest: +SKIP
{'192.168.0.100:9000': 'running',
'192.168.0.101:9000': 'running}
"""
return self.sync(self._run, function, *args, **kwargs)
@gen.coroutine
def _run_coroutine(self, function, *args, **kwargs):
workers = kwargs.pop('workers', None)
wait = kwargs.pop('wait', True)
responses = yield self.scheduler.broadcast(msg=dict(op='run_coroutine',
function=dumps(function),
args=dumps(args),
kwargs=dumps(kwargs),
wait=wait),
workers=workers)
if not wait:
raise gen.Return(None)
else:
results = {}
for key, resp in responses.items():
if resp['status'] == 'OK':
results[key] = resp['result']
elif resp['status'] == 'error':
six.reraise(*clean_exception(**resp))
raise gen.Return(results)
def run_coroutine(self, function, *args, **kwargs):
"""
Spawn a coroutine on all workers.
This spaws a coroutine on all currently known workers and then waits
for the coroutine on each worker. The coroutines' results are returned
as a dictionary keyed by worker address.
Parameters
----------
function: a coroutine function
(typically a function wrapped in gen.coroutine or
a Python 3.5+ async function)
*args: arguments for remote function
**kwargs: keyword arguments for remote function
wait: boolean (default True)
Whether to wait for coroutines to end.
workers: list
Workers on which to run the function. Defaults to all known workers.
"""
return self.sync(self._run_coroutine, function, *args, **kwargs)
def _graph_to_futures(self, dsk, keys, restrictions=None,
loose_restrictions=None, priority=None,
user_priority=0, resources=None, retries=None,
fifo_timeout=0):
with self._lock:
keyset = set(keys)
flatkeys = list(map(tokey, keys))
futures = {key: Future(key, self, inform=False) for key in keyset}
values = {k for k, v in dsk.items() if isinstance(v, Future)
and k not in keyset}
if values:
dsk = dask.optimization.inline(dsk, keys=values)
d = {k: unpack_remotedata(v) for k, v in dsk.items()}
extra_keys = set.union(*[v[1] for v in d.values()]) if d else set()
dsk2 = str_graph({k: v[0] for k, v in d.items()}, extra_keys)
dsk3 = {k: v for k, v in dsk2.items() if k is not v}
if restrictions:
restrictions = keymap(tokey, restrictions)
restrictions = valmap(list, restrictions)
if loose_restrictions is not None:
loose_restrictions = list(map(tokey, loose_restrictions))
dependencies = {tokey(k): set(map(tokey, v[1])) for k, v in d.items()}
for s in dependencies.values():
for v in s:
if v not in self.futures:
raise CancelledError(v)
for k, v in dsk3.items():
dependencies[k] |= get_dependencies(dsk3, task=v)
if priority is None:
dependencies2 = {key: {dep for dep in deps if dep in dependencies}
for key, deps in dependencies.items()}
priority = dask.order.order(dsk3, dependencies2)
if isinstance(retries, Number) and retries > 0:
retries = {k: retries for k in dsk3}
self._send_to_scheduler({'op': 'update-graph',
'tasks': valmap(dumps_task, dsk3),
'dependencies': valmap(list, dependencies),
'keys': list(flatkeys),
'restrictions': restrictions or {},
'loose_restrictions': loose_restrictions,
'priority': priority,
'user_priority': user_priority,
'resources': resources,
'submitting_task': getattr(thread_state, 'key', None),
'retries': retries,
'fifo_timeout': fifo_timeout})
return futures
def get(self, dsk, keys, restrictions=None, loose_restrictions=None,
resources=None, sync=True, asynchronous=None, direct=None,
retries=None, priority=0, fifo_timeout='60s', **kwargs):
""" Compute dask graph
Parameters
----------
dsk: dict
keys: object, or nested lists of objects
restrictions: dict (optional)
A mapping of {key: {set of worker hostnames}} that restricts where
jobs can take place
retries: int (default to 0)
Number of allowed automatic retries if computing a result fails
priority: Number
Optional prioritization of task. Zero is default.
Higher priorities take precedence
sync: bool (optional)
Returns Futures if False or concrete values if True (default).
direct: bool
Gather results directly from workers
Examples
--------
>>> from operator import add # doctest: +SKIP
>>> c = Client('127.0.0.1:8787') # doctest: +SKIP
>>> c.get({'x': (add, 1, 2)}, 'x') # doctest: +SKIP
3
See Also
--------
Client.compute: Compute asynchronous collections
"""
futures = self._graph_to_futures(dsk, set(flatten([keys])),
restrictions, loose_restrictions,
resources=resources,
fifo_timeout=fifo_timeout,
retries=retries,
user_priority=priority,
)
packed = pack_data(keys, futures)
if sync:
if getattr(thread_state, 'key', False):
try:
secede()
should_rejoin = True
except Exception:
should_rejoin = False
try:
results = self.gather(packed, asynchronous=asynchronous,
direct=direct)
finally:
for f in futures.values():
f.release()
if getattr(thread_state, 'key', False) and should_rejoin:
rejoin()
return results
return packed
def _optimize_insert_futures(self, dsk, keys):
""" Replace known keys in dask graph with Futures
When given a Dask graph that might have overlapping keys with our known
results we replace the values of that graph with futures. This can be
used as an optimization to avoid recomputation.
This returns the same graph if unchanged but a new graph if any changes
were necessary.
"""
changed = False
for key in list(dsk):
if tokey(key) in self.futures:
if not changed:
changed = True
dsk = dict(dsk)
dsk[key] = Future(key, self, inform=False)
if changed:
dsk, _ = dask.optimization.cull(dsk, keys)
return dsk
def normalize_collection(self, collection):
"""
Replace collection's tasks by already existing futures if they exist
This normalizes the tasks within a collections task graph against the
known futures within the scheduler. It returns a copy of the
collection with a task graph that includes the overlapping futures.
Examples
--------
>>> len(x.__dask_graph__()) # x is a dask collection with 100 tasks # doctest: +SKIP
100
>>> set(client.futures).intersection(x.__dask_graph__()) # some overlap exists # doctest: +SKIP
10
>>> x = client.normalize_collection(x) # doctest: +SKIP
>>> len(x.__dask_graph__()) # smaller computational graph # doctest: +SKIP
20
See Also
--------
Client.persist: trigger computation of collection's tasks
"""
with self._lock:
dsk = self._optimize_insert_futures(
collection.__dask_graph__(),
collection.__dask_keys__())
if dsk is collection.__dask_graph__():
return collection
else:
return redict_collection(collection, dsk)
def compute(self, collections, sync=False, optimize_graph=True,
workers=None, allow_other_workers=False, resources=None,
retries=0, priority=0, fifo_timeout='60s', **kwargs):
""" Compute dask collections on cluster
Parameters
----------
collections: iterable of dask objects or single dask object
Collections like dask.array or dataframe or dask.value objects
sync: bool (optional)
Returns Futures if False (default) or concrete values if True
optimize_graph: bool
Whether or not to optimize the underlying graphs
workers: str, list, dict
Which workers can run which parts of the computation
If a string a list then the output collections will run on the listed
workers, but other sub-computations can run anywhere
If a dict then keys should be (tuples of) collections and values
should be addresses or lists.
allow_other_workers: bool, list
If True then all restrictions in workers= are considered loose
If a list then only the keys for the listed collections are loose
retries: int (default to 0)
Number of allowed automatic retries if computing a result fails
priority: Number
Optional prioritization of task. Zero is default.
Higher priorities take precedence
fifo_timeout: timedelta str (defaults to '60s')
Allowed amount of time between calls to consider the same priority
**kwargs:
Options to pass to the graph optimize calls
Returns
-------
List of Futures if input is a sequence, or a single future otherwise
Examples
--------
>>> from dask import delayed
>>> from operator import add
>>> x = delayed(add)(1, 2)
>>> y = delayed(add)(x, x)
>>> xx, yy = client.compute([x, y]) # doctest: +SKIP
>>> xx # doctest: +SKIP
<Future: status: finished, key: add-8f6e709446674bad78ea8aeecfee188e>
>>> xx.result() # doctest: +SKIP
3
>>> yy.result() # doctest: +SKIP
6
Also support single arguments
>>> xx = client.compute(x) # doctest: +SKIP
See Also
--------
Client.get: Normal synchronous dask.get function
"""
if isinstance(collections, (list, tuple, set, frozenset)):
singleton = False
else:
collections = [collections]
singleton = True
traverse = kwargs.pop('traverse', True)
if traverse:
collections = tuple(dask.delayed(a)
if isinstance(a, (list, set, tuple, dict, Iterator))
else a for a in collections)
variables = [a for a in collections if dask.is_dask_collection(a)]
dsk = self.collections_to_dsk(variables, optimize_graph, **kwargs)
names = ['finalize-%s' % tokenize(v) for v in variables]
dsk2 = {}
for i, (name, v) in enumerate(zip(names, variables)):
func, extra_args = v.__dask_postcompute__()
keys = v.__dask_keys__()
if func is single_key and len(keys) == 1 and not extra_args:
names[i] = keys[0]
else:
dsk2[name] = (func, keys) + extra_args
restrictions, loose_restrictions = self.get_restrictions(collections,
workers, allow_other_workers)
if resources:
resources = self._expand_resources(resources,
all_keys=itertools.chain(dsk, dsk2))
if retries:
retries = self._expand_retries(retries,
all_keys=itertools.chain(dsk, dsk2))
else:
retries = None
if not isinstance(priority, Number):
priority = {k: p for c, p in priority.items()
for k in self._expand_key(c)}
futures_dict = self._graph_to_futures(merge(dsk2, dsk), names,
restrictions, loose_restrictions,
resources=resources,
retries=retries,
user_priority=priority,
fifo_timeout=fifo_timeout)
i = 0
futures = []
for arg in collections:
if dask.is_dask_collection(arg):
futures.append(futures_dict[names[i]])
i += 1
else:
futures.append(arg)
if sync:
result = self.gather(futures)
else:
result = futures
if singleton:
return first(result)
else:
return result
def persist(self, collections, optimize_graph=True, workers=None,
allow_other_workers=None, resources=None, retries=None,
priority=0, fifo_timeout='60s', **kwargs):
""" Persist dask collections on cluster
Starts computation of the collection on the cluster in the background.
Provides a new dask collection that is semantically identical to the
previous one, but now based off of futures currently in execution.
Parameters
----------
collections: sequence or single dask object
Collections like dask.array or dataframe or dask.value objects
optimize_graph: bool
Whether or not to optimize the underlying graphs
workers: str, list, dict
Which workers can run which parts of the computation
If a string a list then the output collections will run on the listed
workers, but other sub-computations can run anywhere
If a dict then keys should be (tuples of) collections and values
should be addresses or lists.
allow_other_workers: bool, list
If True then all restrictions in workers= are considered loose
If a list then only the keys for the listed collections are loose
retries: int (default to 0)
Number of allowed automatic retries if computing a result fails
priority: Number
Optional prioritization of task. Zero is default.
Higher priorities take precedence
fifo_timeout: timedelta str (defaults to '60s')
Allowed amount of time between calls to consider the same priority
kwargs:
Options to pass to the graph optimize calls
Returns
-------
List of collections, or single collection, depending on type of input.
Examples
--------
>>> xx = client.persist(x) # doctest: +SKIP
>>> xx, yy = client.persist([x, y]) # doctest: +SKIP
See Also
--------
Client.compute
"""
if isinstance(collections, (tuple, list, set, frozenset)):
singleton = False
else:
singleton = True
collections = [collections]
assert all(map(dask.is_dask_collection, collections))
dsk = self.collections_to_dsk(collections, optimize_graph, **kwargs)
names = {k for c in collections for k in flatten(c.__dask_keys__())}
restrictions, loose_restrictions = self.get_restrictions(collections,
workers, allow_other_workers)
if resources:
resources = self._expand_resources(resources,
all_keys=itertools.chain(dsk, names))
if retries:
retries = self._expand_retries(retries,
all_keys=itertools.chain(dsk, names))
else:
retries = None
if not isinstance(priority, Number):
priority = {k: p for c, p in priority.items()
for k in self._expand_key(c)}
futures = self._graph_to_futures(dsk, names, restrictions,
loose_restrictions,
resources=resources,
retries=retries,
user_priority=priority,
fifo_timeout=fifo_timeout)
postpersists = [c.__dask_postpersist__() for c in collections]
result = [func({k: futures[k] for k in flatten(c.__dask_keys__())}, *args)
for (func, args), c in zip(postpersists, collections)]
if singleton:
return first(result)
else:
return result
@gen.coroutine
def _upload_environment(self, zipfile):
name = os.path.split(zipfile)[1]
yield self._upload_large_file(zipfile, name)
def unzip(dask_worker=None):
from distributed.utils import log_errors
import zipfile
import shutil
with log_errors():
a = os.path.join(dask_worker.worker_dir, name)
b = os.path.join(dask_worker.local_dir, name)
c = os.path.dirname(b)
shutil.move(a, b)
with zipfile.ZipFile(b) as f:
f.extractall(path=c)
for fn in glob(os.path.join(c, name[:-4], 'bin', '*')):
st = os.stat(fn)
os.chmod(fn, st.st_mode | 64) # chmod u+x fn
assert os.path.exists(os.path.join(c, name[:-4]))
return c
yield self._run(unzip, nanny=True)
raise gen.Return(name[:-4])
def upload_environment(self, name, zipfile):
return self.sync(self._upload_environment, name, zipfile)
@gen.coroutine
def _restart(self, timeout=no_default):
if timeout == no_default:
timeout = self._timeout * 2
self._send_to_scheduler({'op': 'restart', 'timeout': timeout})
self._restart_event = Event()
try:
yield self._restart_event.wait(self.loop.time() + timeout)
except gen.TimeoutError:
logger.error("Restart timed out after %f seconds", timeout)
pass
self.generation += 1
with self._refcount_lock:
self.refcount.clear()
raise gen.Return(self)
def restart(self, **kwargs):
""" Restart the distributed network
This kills all active work, deletes all data on the network, and
restarts the worker processes.
"""
return self.sync(self._restart, **kwargs)
@gen.coroutine
def _upload_file(self, filename, raise_on_error=True):
with open(filename, 'rb') as f:
data = f.read()
_, fn = os.path.split(filename)
d = yield self.scheduler.broadcast(msg={'op': 'upload_file',
'filename': fn,
'data': to_serialize(data)})
if any(v['status'] == 'error' for v in d.values()):
exceptions = [loads(v['exception']) for v in d.values()
if v['status'] == 'error']
if raise_on_error:
raise exceptions[0]
else:
raise gen.Return(exceptions[0])
assert all(len(data) == v['nbytes'] for v in d.values())
@gen.coroutine
def _upload_large_file(self, local_filename, remote_filename=None):
if remote_filename is None:
remote_filename = os.path.split(local_filename)[1]
with open(local_filename, 'rb') as f:
data = f.read()
[future] = yield self._scatter([data])
key = future.key
yield self._replicate(future)
def dump_to_file(dask_worker=None):
if not os.path.isabs(remote_filename):
fn = os.path.join(dask_worker.local_dir, remote_filename)
else:
fn = remote_filename
with open(fn, 'wb') as f:
f.write(dask_worker.data[key])
return len(dask_worker.data[key])
response = yield self._run(dump_to_file)
assert all(len(data) == v for v in response.values())
def upload_file(self, filename, **kwargs):
""" Upload local package to workers
This sends a local file up to all worker nodes. This file is placed
into a temporary directory on Python's system path so any .py, .egg
or .zip files will be importable.
Parameters
----------
filename: string
Filename of .py, .egg or .zip file to send to workers
Examples
--------
>>> client.upload_file('mylibrary.egg') # doctest: +SKIP
>>> from mylibrary import myfunc # doctest: +SKIP
>>> L = c.map(myfunc, seq) # doctest: +SKIP
"""
result = self.sync(self._upload_file, filename,
raise_on_error=self.asynchronous, **kwargs)
if isinstance(result, Exception):
raise result
else:
return result
@gen.coroutine
def _rebalance(self, futures=None, workers=None):
yield _wait(futures)
keys = list({tokey(f.key) for f in self.futures_of(futures)})
result = yield self.scheduler.rebalance(keys=keys, workers=workers)
assert result['status'] == 'OK'
def rebalance(self, futures=None, workers=None, **kwargs):
""" Rebalance data within network
Move data between workers to roughly balance memory burden. This
either affects a subset of the keys/workers or the entire network,
depending on keyword arguments.
This operation is generally not well tested against normal operation of
the scheduler. It it not recommended to use it while waiting on
computations.
Parameters
----------
futures: list, optional
A list of futures to balance, defaults all data
workers: list, optional
A list of workers on which to balance, defaults to all workers
"""
return self.sync(self._rebalance, futures, workers, **kwargs)
@gen.coroutine
def _replicate(self, futures, n=None, workers=None, branching_factor=2):
futures = self.futures_of(futures)
yield _wait(futures)
keys = {tokey(f.key) for f in futures}
yield self.scheduler.replicate(keys=list(keys), n=n, workers=workers,
branching_factor=branching_factor)
def replicate(self, futures, n=None, workers=None, branching_factor=2,
**kwargs):
""" Set replication of futures within network
Copy data onto many workers. This helps to broadcast frequently
accessed data and it helps to improve resilience.
This performs a tree copy of the data throughout the network
individually on each piece of data. This operation blocks until
complete. It does not guarantee replication of data to future workers.
Parameters
----------
futures: list of futures
Futures we wish to replicate
n: int, optional
Number of processes on the cluster on which to replicate the data.
Defaults to all.
workers: list of worker addresses
Workers on which we want to restrict the replication.
Defaults to all.
branching_factor: int, optional
The number of workers that can copy data in each generation
Examples
--------
>>> x = c.submit(func, *args) # doctest: +SKIP
>>> c.replicate([x]) # send to all workers # doctest: +SKIP
>>> c.replicate([x], n=3) # send to three workers # doctest: +SKIP
>>> c.replicate([x], workers=['alice', 'bob']) # send to specific # doctest: +SKIP
>>> c.replicate([x], n=1, workers=['alice', 'bob']) # send to one of specific workers # doctest: +SKIP
>>> c.replicate([x], n=1) # reduce replications # doctest: +SKIP
See also
--------
Client.rebalance
"""
return self.sync(self._replicate, futures, n=n, workers=workers,
branching_factor=branching_factor, **kwargs)
def ncores(self, workers=None, **kwargs):
""" The number of threads/cores available on each worker node
Parameters
----------
workers: list (optional)
A list of workers that we care about specifically.
Leave empty to receive information about all workers.
Examples
--------
>>> c.ncores() # doctest: +SKIP
{'192.168.1.141:46784': 8,
'192.167.1.142:47548': 8,
'192.167.1.143:47329': 8,
'192.167.1.144:37297': 8}
See Also
--------
Client.who_has
Client.has_what
"""
if (isinstance(workers, tuple)
and all(isinstance(i, (str, tuple)) for i in workers)):
workers = list(workers)
if workers is not None and not isinstance(workers, (list, set)):
workers = [workers]
return self.sync(self.scheduler.ncores, workers=workers, **kwargs)
def who_has(self, futures=None, **kwargs):
""" The workers storing each future's data
Parameters
----------
futures: list (optional)
A list of futures, defaults to all data
Examples
--------
>>> x, y, z = c.map(inc, [1, 2, 3]) # doctest: +SKIP
>>> wait([x, y, z]) # doctest: +SKIP
>>> c.who_has() # doctest: +SKIP
{'inc-1c8dd6be1c21646c71f76c16d09304ea': ['192.168.1.141:46784'],
'inc-1e297fc27658d7b67b3a758f16bcf47a': ['192.168.1.141:46784'],
'inc-fd65c238a7ea60f6a01bf4c8a5fcf44b': ['192.168.1.141:46784']}
>>> c.who_has([x, y]) # doctest: +SKIP
{'inc-1c8dd6be1c21646c71f76c16d09304ea': ['192.168.1.141:46784'],
'inc-1e297fc27658d7b67b3a758f16bcf47a': ['192.168.1.141:46784']}
See Also
--------
Client.has_what
Client.ncores
"""
if futures is not None:
futures = self.futures_of(futures)
keys = list(map(tokey, {f.key for f in futures}))
else:
keys = None
return self.sync(self.scheduler.who_has, keys=keys, **kwargs)
def has_what(self, workers=None, **kwargs):
""" Which keys are held by which workers
This returns the keys of the data that are held in each worker's
memory.
Parameters
----------
workers: list (optional)
A list of worker addresses, defaults to all
Examples
--------
>>> x, y, z = c.map(inc, [1, 2, 3]) # doctest: +SKIP
>>> wait([x, y, z]) # doctest: +SKIP
>>> c.has_what() # doctest: +SKIP
{'192.168.1.141:46784': ['inc-1c8dd6be1c21646c71f76c16d09304ea',
'inc-fd65c238a7ea60f6a01bf4c8a5fcf44b',
'inc-1e297fc27658d7b67b3a758f16bcf47a']}
See Also
--------
Client.who_has
Client.ncores
Client.processing
"""
if (isinstance(workers, tuple)
and all(isinstance(i, (str, tuple)) for i in workers)):
workers = list(workers)
if workers is not None and not isinstance(workers, (list, set)):
workers = [workers]
return self.sync(self.scheduler.has_what, workers=workers, **kwargs)
def stacks(self, workers=None):
""" The task queues on each worker
Parameters
----------
workers: list (optional)
A list of worker addresses, defaults to all
Examples
--------
>>> x, y, z = c.map(inc, [1, 2, 3]) # doctest: +SKIP
>>> c.stacks() # doctest: +SKIP
{'192.168.1.141:46784': ['inc-1c8dd6be1c21646c71f76c16d09304ea',
'inc-fd65c238a7ea60f6a01bf4c8a5fcf44b',
'inc-1e297fc27658d7b67b3a758f16bcf47a']}
See Also
--------
Client.processing
Client.who_has
Client.has_what
Client.ncores
"""
if (isinstance(workers, tuple)
and all(isinstance(i, (str, tuple)) for i in workers)):
workers = list(workers)
if workers is not None and not isinstance(workers, (list, set)):
workers = [workers]
return sync(self.loop, self.scheduler.stacks, workers=workers)
def processing(self, workers=None):
""" The tasks currently running on each worker
Parameters
----------
workers: list (optional)
A list of worker addresses, defaults to all
Examples
--------
>>> x, y, z = c.map(inc, [1, 2, 3]) # doctest: +SKIP
>>> c.processing() # doctest: +SKIP
{'192.168.1.141:46784': ['inc-1c8dd6be1c21646c71f76c16d09304ea',
'inc-fd65c238a7ea60f6a01bf4c8a5fcf44b',
'inc-1e297fc27658d7b67b3a758f16bcf47a']}
See Also
--------
Client.stacks
Client.who_has
Client.has_what
Client.ncores
"""
if (isinstance(workers, tuple)
and all(isinstance(i, (str, tuple)) for i in workers)):
workers = list(workers)
if workers is not None and not isinstance(workers, (list, set)):
workers = [workers]
return valmap(set, sync(self.loop, self.scheduler.processing,
workers=workers))
def nbytes(self, keys=None, summary=True, **kwargs):
""" The bytes taken up by each key on the cluster
This is as measured by ``sys.getsizeof`` which may not accurately
reflect the true cost.
Parameters
----------
keys: list (optional)
A list of keys, defaults to all keys
summary: boolean, (optional)
Summarize keys into key types
Examples
--------
>>> x, y, z = c.map(inc, [1, 2, 3]) # doctest: +SKIP
>>> c.nbytes(summary=False) # doctest: +SKIP
{'inc-1c8dd6be1c21646c71f76c16d09304ea': 28,
'inc-1e297fc27658d7b67b3a758f16bcf47a': 28,
'inc-fd65c238a7ea60f6a01bf4c8a5fcf44b': 28}
>>> c.nbytes(summary=True) # doctest: +SKIP
{'inc': 84}
See Also
--------
Client.who_has
"""
return self.sync(self.scheduler.nbytes, keys=keys,
summary=summary, **kwargs)
def call_stack(self, futures=None, keys=None):
""" The actively running call stack of all relevant keys
You can specify data of interest either by providing futures or
collections in the ``futures=`` keyword or a list of explicit keys in
the ``keys=`` keyword. If neither are provided then all call stacks
will be returned.
Parameters
----------
futures: list (optional)
List of futures, defaults to all data
keys: list (optional)
List of key names, defaults to all data
Examples
--------
>>> df = dd.read_parquet(...).persist() # doctest: +SKIP
>>> client.call_stack(df) # call on collections
>>> client.call_stack() # Or call with no arguments for all activity # doctest: +SKIP
"""
keys = keys or []
if futures is not None:
futures = self.futures_of(futures)
keys += list(map(tokey, {f.key for f in futures}))
return self.sync(self.scheduler.call_stack, keys=keys or None)
def profile(self, key=None, start=None, stop=None, workers=None, merge_workers=True):
""" Collect statistical profiling information about recent work
Parameters
----------
key: str
Key prefix to select, this is typically a function name like 'inc'
Leave as None to collect all data
start: time
stop: time
workers: list
List of workers to restrict profile information
Examples
--------
>>> client.profile() # call on collections
"""
if isinstance(workers, six.string_types + (Number,)):
workers = [workers]
return self.sync(self.scheduler.profile, key=key, workers=workers,
merge_workers=merge_workers, start=start, stop=stop)
def scheduler_info(self, **kwargs):
""" Basic information about the workers in the cluster
Examples
--------
>>> c.scheduler_info() # doctest: +SKIP
{'id': '2de2b6da-69ee-11e6-ab6a-e82aea155996',
'services': {},
'type': 'Scheduler',
'workers': {'127.0.0.1:40575': {'active': 0,
'last-seen': 1472038237.4845693,
'name': '127.0.0.1:40575',
'services': {},
'stored': 0,
'time-delay': 0.0061032772064208984}}}
"""
self.sync(self._update_scheduler_info)
return self._scheduler_identity
def write_scheduler_file(self, scheduler_file):
""" Write the scheduler information to a json file.
This facilitates easy sharing of scheduler information using a file
system. The scheduler file can be used to instantiate a second Client
using the same scheduler.
Parameters
----------
scheduler_file: str
Path to a write the scheduler file.
Examples
--------
>>> client = Client()
>>> client.write_scheduler_file('scheduler.json')
# connect to previous client's scheduler
>>> client2 = Client(scheduler_file='scheduler.json')
"""
if self.scheduler_file:
raise ValueError('Scheduler file already set')
else:
self.scheduler_file = scheduler_file
with open(self.scheduler_file, 'w') as f:
json.dump(self.scheduler_info(), f, indent=2)
def get_metadata(self, keys, default=no_default):
""" Get arbitrary metadata from scheduler
See set_metadata for the full docstring with examples
Parameters
----------
keys: key or list
Key to access. If a list then gets within a nested collection
default: optional
If the key does not exist then return this value instead.
If not provided then this raises a KeyError if the key is not
present
See also
--------
Client.set_metadata
"""
if not isinstance(keys, list):
keys = [keys]
return self.sync(self.scheduler.get_metadata, keys=keys,
default=default)
def get_scheduler_logs(self, n=None):
""" Get logs from scheduler
Parameters
----------
n: int
Number of logs to retrive. Maxes out at 10000 by default,
confiruable in config.yaml::log-length
Returns
-------
Logs in reversed order (newest first)
"""
return self.sync(self.scheduler.logs, n=n)
def get_worker_logs(self, n=None, workers=None):
""" Get logs from workers
Parameters
----------
n: int
Number of logs to retrive. Maxes out at 10000 by default,
confiruable in config.yaml::log-length
workers: iterable
List of worker addresses to retrive. Gets all workers by default.
Returns
-------
Dictionary mapping worker address to logs.
Logs are returned in reversed order (newest first)
"""
return self.sync(self.scheduler.worker_logs, n=n, workers=workers)
def retire_workers(self, workers=None, close_workers=True, **kwargs):
""" Retire certain workers on the scheduler
See dask.distributed.Scheduler.retire_workers for the full docstring.
Examples
--------
You can get information about active workers using the following:
>>> workers = client.scheduler_info()['workers']
From that list you may want to select some workers to close
>>> client.retire_workers(workers=['tcp://address:port', ...])
See Also
--------
dask.distributed.Scheduler.retire_workers
"""
return self.sync(self.scheduler.retire_workers, workers=workers,
close_workers=close_workers, **kwargs)
def set_metadata(self, key, value):
""" Set arbitrary metadata in the scheduler
This allows you to store small amounts of data on the central scheduler
process for administrative purposes. Data should be msgpack
serializable (ints, strings, lists, dicts)
If the key corresponds to a task then that key will be cleaned up when
the task is forgotten by the scheduler.
If the key is a list then it will be assumed that you want to index
into a nested dictionary structure using those keys. For example if
you call the following::
>>> client.set_metadata(['a', 'b', 'c'], 123)
Then this is the same as setting
>>> scheduler.task_metadata['a']['b']['c'] = 123
The lower level dictionaries will be created on demand.
Examples
--------
>>> client.set_metadata('x', 123) # doctest: +SKIP
>>> client.get_metadata('x') # doctest: +SKIP
123
>>> client.set_metadata(['x', 'y'], 123) # doctest: +SKIP
>>> client.get_metadata('x') # doctest: +SKIP
{'y': 123}
>>> client.set_metadata(['x', 'w', 'z'], 456) # doctest: +SKIP
>>> client.get_metadata('x') # doctest: +SKIP
{'y': 123, 'w': {'z': 456}}
>>> client.get_metadata(['x', 'w']) # doctest: +SKIP
{'z': 456}
See Also
--------
get_metadata
"""
if not isinstance(key, list):
key = [key]
return self.sync(self.scheduler.set_metadata, keys=key, value=value)
def get_versions(self, check=False):
""" Return version info for the scheduler, all workers and myself
Parameters
----------
check : boolean, default False
raise ValueError if all required & optional packages
do not match
Examples
--------
>>> c.get_versions() # doctest: +SKIP
"""
client = get_versions()
try:
scheduler = sync(self.loop, self.scheduler.versions)
except KeyError:
scheduler = None
workers = sync(self.loop, self.scheduler.broadcast, msg={'op': 'versions'})
result = {'scheduler': scheduler, 'workers': workers, 'client': client}
if check:
# we care about the required & optional packages matching
def to_packages(d):
return dict(sum(d['packages'].values(), []))
client_versions = to_packages(result['client'])
versions = [('scheduler', to_packages(result['scheduler']))]
versions.extend((w, to_packages(d))
for w, d in sorted(workers.items()))
mismatched = defaultdict(list)
for name, vers in versions:
for pkg, cv in client_versions.items():
v = vers.get(pkg, 'MISSING')
if cv != v:
mismatched[pkg].append((name, v))
if mismatched:
errs = []
for pkg, versions in sorted(mismatched.items()):
rows = [('client', client_versions[pkg])]
rows.extend(versions)
errs.append("%s\n%s" % (pkg, asciitable(['', 'version'], rows)))
raise ValueError("Mismatched versions found\n"
"\n"
"%s" % ('\n\n'.join(errs)))
return result
def futures_of(self, futures):
return futures_of(futures, client=self)
def start_ipython(self, *args, **kwargs):
raise Exception("Method moved to start_ipython_workers")
@gen.coroutine
def _start_ipython_workers(self, workers):
if workers is None:
workers = yield self.scheduler.ncores()
responses = yield self.scheduler.broadcast(
msg=dict(op='start_ipython'), workers=workers,
)
raise gen.Return((workers, responses))
def start_ipython_workers(self, workers=None, magic_names=False,
qtconsole=False, qtconsole_args=None):
""" Start IPython kernels on workers
Parameters
----------
workers: list (optional)
A list of worker addresses, defaults to all
magic_names: str or list(str) (optional)
If defined, register IPython magics with these names for
executing code on the workers. If string has asterix then expand
asterix into 0, 1, ..., n for n workers
qtconsole: bool (optional)
If True, launch a Jupyter QtConsole connected to the worker(s).
qtconsole_args: list(str) (optional)
Additional arguments to pass to the qtconsole on startup.
Examples
--------
>>> info = c.start_ipython_workers() # doctest: +SKIP
>>> %remote info['192.168.1.101:5752'] worker.data # doctest: +SKIP
{'x': 1, 'y': 100}
>>> c.start_ipython_workers('192.168.1.101:5752', magic_names='w') # doctest: +SKIP
>>> %w worker.data # doctest: +SKIP
{'x': 1, 'y': 100}
>>> c.start_ipython_workers('192.168.1.101:5752', qtconsole=True) # doctest: +SKIP
Add asterix * in magic names to add one magic per worker
>>> c.start_ipython_workers(magic_names='w_*') # doctest: +SKIP
>>> %w_0 worker.data # doctest: +SKIP
{'x': 1, 'y': 100}
>>> %w_1 worker.data # doctest: +SKIP
{'z': 5}
Returns
-------
iter_connection_info: list
List of connection_info dicts containing info necessary
to connect Jupyter clients to the workers.
See Also
--------
Client.start_ipython_scheduler: start ipython on the scheduler
"""
if isinstance(workers, six.string_types + (Number,)):
workers = [workers]
(workers, info_dict) = sync(self.loop, self._start_ipython_workers, workers)
if magic_names and isinstance(magic_names, six.string_types):
if '*' in magic_names:
magic_names = [magic_names.replace('*', str(i))
for i in range(len(workers))]
else:
magic_names = [magic_names]
if 'IPython' in sys.modules:
from ._ipython_utils import register_remote_magic
register_remote_magic()
if magic_names:
from ._ipython_utils import register_worker_magic
for worker, magic_name in zip(workers, magic_names):
connection_info = info_dict[worker]
register_worker_magic(connection_info, magic_name)
if qtconsole:
from ._ipython_utils import connect_qtconsole
for worker, connection_info in info_dict.items():
name = 'dask-' + worker.replace(':', '-').replace('/', '-')
connect_qtconsole(connection_info, name=name,
extra_args=qtconsole_args,
)
return info_dict
def start_ipython_scheduler(self, magic_name='scheduler_if_ipython',
qtconsole=False, qtconsole_args=None):
""" Start IPython kernel on the scheduler
Parameters
----------
magic_name: str or None (optional)
If defined, register IPython magic with this name for
executing code on the scheduler.
If not defined, register %scheduler magic if IPython is running.
qtconsole: bool (optional)
If True, launch a Jupyter QtConsole connected to the worker(s).
qtconsole_args: list(str) (optional)
Additional arguments to pass to the qtconsole on startup.
Examples
--------
>>> c.start_ipython_scheduler() # doctest: +SKIP
>>> %scheduler scheduler.processing # doctest: +SKIP
{'127.0.0.1:3595': {'inc-1', 'inc-2'},
'127.0.0.1:53589': {'inc-2', 'add-5'}}
>>> c.start_ipython_scheduler(qtconsole=True) # doctest: +SKIP
Returns
-------
connection_info: dict
connection_info dict containing info necessary
to connect Jupyter clients to the scheduler.
See Also
--------
Client.start_ipython_workers: Start IPython on the workers
"""
info = sync(self.loop, self.scheduler.start_ipython)
if magic_name == 'scheduler_if_ipython':
# default to %scheduler if in IPython, no magic otherwise
in_ipython = False
if 'IPython' in sys.modules:
from IPython import get_ipython
in_ipython = bool(get_ipython())
if in_ipython:
magic_name = 'scheduler'
else:
magic_name = None
if magic_name:
from ._ipython_utils import register_worker_magic
register_worker_magic(info, magic_name)
if qtconsole:
from ._ipython_utils import connect_qtconsole
connect_qtconsole(info, name='dask-scheduler',
extra_args=qtconsole_args,)
return info
@classmethod
def _expand_key(cls, k):
"""
Expand a user-provided task key specification, e.g. in a resources
or retries dictionary.
"""
if not isinstance(k, tuple):
k = (k,)
for kk in k:
if dask.is_dask_collection(kk):
for kkk in kk.__dask_keys__():
yield tokey(kkk)
else:
yield tokey(kk)
@classmethod
def _expand_retries(cls, retries, all_keys):
"""
Expand the user-provided "retries" specification
to a {task key: Integral} dictionary.
"""
if retries and isinstance(retries, dict):
result = {name: value
for key, value in retries.items()
for name in cls._expand_key(key)}
elif isinstance(retries, Integral):
# Each task unit may potentially fail, allow retrying all of them
result = {name: retries for name in all_keys}
else:
raise TypeError("`retries` should be an integer or dict, got %r"
% (type(retries,)))
return keymap(tokey, result)
def _expand_resources(cls, resources, all_keys):
"""
Expand the user-provided "resources" specification
to a {task key: {resource name: Number}} dictionary.
"""
# Resources can either be a single dict such as {'GPU': 2},
# indicating a requirement for all keys, or a nested dict
# such as {'x': {'GPU': 1}, 'y': {'SSD': 4}} indicating
# per-key requirements
if not isinstance(resources, dict):
raise TypeError("`resources` should be a dict, got %r"
% (type(resources,)))
per_key_reqs = {}
global_reqs = {}
all_keys = list(all_keys)
for k, v in resources.items():
if isinstance(v, dict):
# It's a per-key requirement
per_key_reqs.update((kk, v) for kk in cls._expand_key(k))
else:
# It's a global requirement
global_reqs.update((kk, {k: v}) for kk in all_keys)
if global_reqs and per_key_reqs:
raise ValueError("cannot have both per-key and all-key requirements "
"in resources dict %r" % (resources,))
return global_reqs or per_key_reqs
@classmethod
def get_restrictions(cls, collections, workers, allow_other_workers):
""" Get restrictions from inputs to compute/persist """
if isinstance(workers, (str, tuple, list)):
workers = {tuple(collections): workers}
if isinstance(workers, dict):
restrictions = {}
for colls, ws in workers.items():
if isinstance(ws, str):
ws = [ws]
if dask.is_dask_collection(colls):
keys = flatten(colls.__dask_keys__())
else:
keys = list({k for c in flatten(colls)
for k in flatten(c.__dask_keys__())})
restrictions.update({k: ws for k in keys})
else:
restrictions = {}
if allow_other_workers is True:
loose_restrictions = list(restrictions)
elif allow_other_workers:
loose_restrictions = list({k for c in flatten(allow_other_workers)
for k in c.__dask_keys__()})
else:
loose_restrictions = []
return restrictions, loose_restrictions
@staticmethod
def collections_to_dsk(collections, *args, **kwargs):
return collections_to_dsk(collections, *args, **kwargs)
class Executor(Client):
""" Deprecated: see Client """
def __init__(self, *args, **kwargs):
warnings.warn("Executor has been renamed to Client")
super(Executor, self).__init__(*args, **kwargs)
def CompatibleExecutor(*args, **kwargs):
raise Exception("This has been moved to the Client.get_executor() method")
@gen.coroutine
def _wait(fs, timeout=None, return_when='ALL_COMPLETED'):
if timeout is not None and not isinstance(timeout, Number):
raise TypeError("timeout= keyword received a non-numeric value.\n"
"Beware that wait expects a list of values\n"
" Bad: wait(x, y, z)\n"
" Good: wait([x, y, z])")
fs = futures_of(fs)
if return_when == 'ALL_COMPLETED':
future = All({f._state.wait() for f in fs})
if timeout is not None:
future = gen.with_timeout(timedelta(seconds=timeout), future)
yield future
done, not_done = set(fs), set()
cancelled = [f.key for f in done
if f.status == 'cancelled']
if cancelled:
raise CancelledError(cancelled)
else:
raise NotImplementedError("Only return_when='ALL_COMPLETED' supported")
raise gen.Return(DoneAndNotDoneFutures(done, not_done))
ALL_COMPLETED = 'ALL_COMPLETED'
def wait(fs, timeout=None, return_when='ALL_COMPLETED'):
""" Wait until all futures are complete
Parameters
----------
fs: list of futures
timeout: number, optional
Time in seconds after which to raise a ``dask.distributed.TimeoutError``
-------
Named tuple of completed, not completed
"""
client = default_client()
result = client.sync(_wait, fs, timeout=timeout, return_when=return_when)
return result
@gen.coroutine
def _as_completed(fs, queue):
fs = futures_of(fs)
groups = groupby(lambda f: f.key, fs)
firsts = [v[0] for v in groups.values()]
wait_iterator = gen.WaitIterator(*[f._state.wait() for f in firsts])
while not wait_iterator.done():
yield wait_iterator.next()
# TODO: handle case of restarted futures
future = firsts[wait_iterator.current_index]
for f in groups[future.key]:
queue.put_nowait(f)
@gen.coroutine
def _first_completed(futures):
""" Return a single completed future
See Also:
_as_completed
"""
q = Queue()
yield _as_completed(futures, q)
result = yield q.get()
raise gen.Return(result)
class as_completed(object):
"""
Return futures in the order in which they complete
This returns an iterator that yields the input future objects in the order
in which they complete. Calling ``next`` on the iterator will block until
the next future completes, irrespective of order.
Additionally, you can also add more futures to this object during
computation with the ``.add`` method
Examples
--------
>>> x, y, z = client.map(inc, [1, 2, 3]) # doctest: +SKIP
>>> for future in as_completed([x, y, z]): # doctest: +SKIP
... print(future.result()) # doctest: +SKIP
3
2
4
Add more futures during computation
>>> x, y, z = client.map(inc, [1, 2, 3]) # doctest: +SKIP
>>> ac = as_completed([x, y, z]) # doctest: +SKIP
>>> for future in ac: # doctest: +SKIP
... print(future.result()) # doctest: +SKIP
... if random.random() < 0.5: # doctest: +SKIP
... ac.add(c.submit(double, future)) # doctest: +SKIP
4
2
8
3
6
12
24
Optionally wait until the result has been gathered as well
>>> ac = as_completed([x, y, z], with_results=True) # doctest: +SKIP
>>> for future, result in ac: # doctest: +SKIP
... print(result) # doctest: +SKIP
2
4
3
"""
def __init__(self, futures=None, loop=None, with_results=False):
if futures is None:
futures = []
self.futures = defaultdict(lambda: 0)
self.queue = pyQueue()
self.lock = threading.Lock()
self.loop = loop or default_client().loop
self.condition = Condition()
self.thread_condition = threading.Condition()
self.with_results = with_results
if futures:
self.update(futures)
def _notify(self):
self.condition.notify()
with self.thread_condition:
self.thread_condition.notify()
@gen.coroutine
def track_future(self, future):
try:
yield _wait(future)
except CancelledError:
del self.futures[future]
if not self.futures:
self._notify()
return
if self.with_results:
result = yield future._result()
with self.lock:
self.futures[future] -= 1
if not self.futures[future]:
del self.futures[future]
if self.with_results:
self.queue.put_nowait((future, result))
else:
self.queue.put_nowait(future)
self._notify()
def update(self, futures):
""" Add multiple futures to the collection.
The added futures will emit from the iterator once they finish"""
with self.lock:
for f in futures:
if not isinstance(f, Future):
raise TypeError("Input must be a future, got %s" % f)
self.futures[f] += 1
self.loop.add_callback(self.track_future, f)
def add(self, future):
""" Add a future to the collection
This future will emit from the iterator once it finishes
"""
self.update((future,))
def is_empty(self):
"""Return True if there no waiting futures, False otherwise"""
return not self.count()
def count(self):
""" Return the number of futures yet to be returned
This includes both the number of futures still computing, as well as
those that are finished, but have not yet been returned from this
iterator.
"""
with self.lock:
return len(self.futures) + len(self.queue.queue)
def __iter__(self):
return self
def __aiter__(self):
return self
def __next__(self):
while self.queue.empty():
if self.is_empty():
raise StopIteration()
with self.thread_condition:
self.thread_condition.wait(timeout=0.100)
return self.queue.get()
@gen.coroutine
def __anext__(self):
if not self.futures and self.queue.empty():
raise StopAsyncIteration
while self.queue.empty():
if not self.futures:
raise StopAsyncIteration
yield self.condition.wait()
raise gen.Return(self.queue.get())
next = __next__
def next_batch(self, block=True):
""" Get next batch of futures from as_completed iterator
Parameters
----------
block: bool, optional
If True then wait until we have some result, otherwise return
immediately, even with an empty list. Defaults to True.
Examples
--------
>>> ac = as_completed(futures) # doctest: +SKIP
>>> client.gather(ac.next_batch()) # doctest: +SKIP
[4, 1, 3]
>>> client.gather(ac.next_batch(block=False)) # doctest: +SKIP
[]
Returns
-------
List of futures or (future, result) tuples
"""
if block:
batch = [next(self)]
else:
batch = []
while not self.queue.empty():
batch.append(self.queue.get())
return batch
def batches(self):
"""
Yield all finished futures at once rather than one-by-one
This returns an iterator of lists of futures or lists of
(future, result) tuples rather than individual futures or individual
(future, result) tuples. It will yield these as soon as possible
without waiting.
Examples
--------
>>> for batch in as_completed(futures).batches(): # doctest: +SKIP
... results = client.gather(batch)
... print(results)
[4, 2]
[1, 3, 7]
[5]
[6]
"""
while True:
try:
yield self.next_batch(block=True)
except StopIteration:
return
def AsCompleted(*args, **kwargs):
raise Exception("This has moved to as_completed")
def default_client(c=None):
""" Return a client if one has started """
c = c or _get_global_client()
if c:
return c
else:
raise ValueError("No clients found\n"
"Start an client and point it to the scheduler address\n"
" from distributed import Client\n"
" client = Client('ip-addr-of-scheduler:8786')\n")
def ensure_default_get(client):
if _globals['get'] != client.get:
print("Setting global dask scheduler to use distributed")
dask.set_options(get=client.get)
def redict_collection(c, dsk):
from dask.delayed import Delayed
if isinstance(c, Delayed):
return Delayed(c.key, [dsk])
else:
cc = copy.copy(c)
cc.dask = dsk
return cc
def futures_of(o, client=None):
""" Future objects in a collection """
stack = [o]
futures = set()
while stack:
x = stack.pop()
if type(x) in (tuple, set, list):
stack.extend(x)
if type(x) is dict:
stack.extend(x.values())
if isinstance(x, Future):
futures.add(x)
if dask.is_dask_collection(x):
stack.extend(x.__dask_graph__().values())
if client is not None:
bad = {f for f in futures if f.cancelled()}
if bad:
raise CancelledError(bad)
return list(futures)
def fire_and_forget(obj):
""" Run tasks at least once, even if we release the futures
Under normal operation Dask will not run any tasks for which there is not
an active future (this avoids unnecessary work in many situations).
However sometimes you want to just fire off a task, not track its future,
and expect it to finish eventually. You can use this function on a future
or collection of futures to ask Dask to complete the task even if no active
client is tracking it.
The results will not be kept in memory after the task completes (unless
there is an active future) so this is only useful for tasks that depend on
side effects.
Parameters
----------
obj: Future, list, dict, dask collection
The futures that you want to run at least once
Examples
--------
>>> fire_and_forget(client.submit(func, *args)) # doctest: +SKIP
"""
futures = futures_of(obj)
for future in futures:
future.client._send_to_scheduler({'op': 'client-desires-keys',
'keys': [tokey(future.key)],
'client': 'fire-and-forget'})
@contextmanager
def temp_default_client(c):
""" Set the default client for the duration of the context
Parameters
----------
c : Client
This is what default_client() will return within the with-block.
"""
old_exec = default_client()
_set_global_client(c)
try:
yield
finally:
_set_global_client(old_exec)
def _close_global_client():
"""
Force close of global client. This cleans up when a client
wasn't close explicitly, e.g. interactive sessions.
"""
c = _get_global_client()
if c is not None:
c._should_close_loop = False
c.close(timeout=2)
atexit.register(_close_global_client)
| []
| []
| []
| [] | [] | python | 0 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.