filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
pkg/executor/executor.go
|
// Copyright 2020 The SQLFlow Authors. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"bufio"
"bytes"
"encoding/base64"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"strings"
"sync"
"sqlflow.org/sqlflow/pkg/codegen/optimize"
"sqlflow.org/sqlflow/pkg/codegen/pai"
"sqlflow.org/sqlflow/pkg/codegen/tensorflow"
"sqlflow.org/sqlflow/pkg/codegen/xgboost"
"sqlflow.org/sqlflow/pkg/database"
"sqlflow.org/sqlflow/pkg/ir"
"sqlflow.org/sqlflow/pkg/model"
"sqlflow.org/sqlflow/pkg/pipe"
pb "sqlflow.org/sqlflow/pkg/proto"
)
var rePyDiagnosis = regexp.MustCompile("sqlflow_submitter.tensorflow.diag.SQLFlowDiagnostic: (.*)")
// Figures contains analyzed figures as strings
type Figures struct {
Image string
Text string
}
// Executor call code geneartor to generate submitter program and execute it.
type Executor interface {
Setup(*pipe.Writer, *database.DB, string, string, *pb.Session)
ExecuteQuery(*ir.NormalStmt) error
ExecuteTrain(*ir.TrainStmt) error
ExecutePredict(*ir.PredictStmt) error
ExecuteExplain(*ir.ExplainStmt) error
ExecuteEvaluate(*ir.EvaluateStmt) error
ExecuteShowTrain(*ir.ShowTrainStmt) error
ExecuteOptimize(*ir.OptimizeStmt) error
ExecuteRun(*ir.RunStmt) error
GetTrainStmtFromModel() bool
}
// New returns a proper Submitter from configurations in environment variables.
func New(executor string) Executor {
if executor == "" {
executor = os.Getenv("SQLFLOW_submitter")
}
switch executor {
case "default":
return &pythonExecutor{}
case "pai":
return &paiExecutor{&pythonExecutor{}}
case "alisa":
return &alisaExecutor{&pythonExecutor{}}
// TODO(typhoonzero): add executor like alps, elasticdl
default:
return &pythonExecutor{}
}
}
type logChanWriter struct {
wr *pipe.Writer
m sync.Mutex
buf bytes.Buffer
prev string
}
// Run interprets the SQLFlow IR.
// TODO(yancey1989): this is a temporary way to decouple executor from the ir package,
// as the discussion of https://github.com/sql-machine-learning/sqlflow/issues/2494,
// SQLFlow would generate target code instead of interpret an IR.
func Run(it Executor, stmt ir.SQLFlowStmt) error {
switch v := stmt.(type) {
case *ir.TrainStmt:
return it.ExecuteTrain(stmt.(*ir.TrainStmt))
case *ir.PredictStmt:
return it.ExecutePredict(stmt.(*ir.PredictStmt))
case *ir.ExplainStmt:
return it.ExecuteExplain(stmt.(*ir.ExplainStmt))
case *ir.EvaluateStmt:
return it.ExecuteEvaluate(stmt.(*ir.EvaluateStmt))
case *ir.OptimizeStmt:
return it.ExecuteOptimize(stmt.(*ir.OptimizeStmt))
case *ir.RunStmt:
return it.ExecuteRun(stmt.(*ir.RunStmt))
case *ir.NormalStmt:
return it.ExecuteQuery(stmt.(*ir.NormalStmt))
case *ir.ShowTrainStmt:
return it.ExecuteShowTrain(stmt.(*ir.ShowTrainStmt))
default:
return fmt.Errorf("unregistered SQLFlow IR type: %s", v)
}
}
func (cw *logChanWriter) Write(p []byte) (n int, err error) {
// Both cmd.Stdout and cmd.Stderr are writing to cw
cw.m.Lock()
defer cw.m.Unlock()
n, err = cw.buf.Write(p)
if err != nil {
return n, err
}
for {
line, err := cw.buf.ReadString('\n')
cw.prev = cw.prev + line
// ReadString returns err != nil if and only if the returned Data
// does not end in delim.
if err != nil {
break
}
if err := cw.wr.Write(cw.prev); err != nil {
return len(cw.prev), err
}
cw.prev = ""
}
return n, nil
}
func (cw *logChanWriter) Close() {
if len(cw.prev) > 0 {
cw.wr.Write(cw.prev)
cw.prev = ""
}
}
type pythonExecutor struct {
Writer *pipe.Writer
Db *database.DB
ModelDir string
Cwd string
Session *pb.Session
}
func (s *pythonExecutor) Setup(w *pipe.Writer, db *database.DB, modelDir string, cwd string, session *pb.Session) {
// cwd is used to store train scripts and save output models.
s.Writer, s.Db, s.ModelDir, s.Cwd, s.Session = w, db, modelDir, cwd, session
}
func (s *pythonExecutor) SaveModel(cl *ir.TrainStmt) error {
m := model.New(s.Cwd, cl.OriginalSQL)
modelURI := cl.Into
if s.ModelDir != "" {
modelURI = fmt.Sprintf("file://%s/%s", s.ModelDir, cl.Into)
}
return m.Save(modelURI, s.Session)
}
func (s *pythonExecutor) runProgram(program string, logStderr bool) error {
cmd := sqlflowCmd(s.Cwd, s.Db.DriverName)
cmd.Stdin = bytes.NewBufferString(program)
errorLog, e := s.runCommand(cmd, nil, logStderr)
if e != nil {
// return the diagnostic message
sub := rePyDiagnosis.FindStringSubmatch(errorLog)
if len(sub) == 2 {
return fmt.Errorf("%s", sub[1])
}
// if no diagnostic message, return the full stack trace
return fmt.Errorf("failed: %v\n%sGenerated Code:%[2]s\n%s\n%[2]sOutput%[2]s\n%[4]v", e, "==========", program, errorLog)
}
return nil
}
func (s *pythonExecutor) runCommand(cmd *exec.Cmd, context map[string]string, logStderr bool) (string, error) {
cw := &logChanWriter{wr: s.Writer}
defer cw.Close()
for k, v := range context {
os.Setenv(k, v)
}
var stderr bytes.Buffer
var stdout bytes.Buffer
if logStderr {
w := io.MultiWriter(cw, &stderr)
wStdout := bufio.NewWriter(&stdout)
cmd.Stdout, cmd.Stderr = wStdout, w
} else {
w := io.MultiWriter(cw, &stdout)
wStderr := bufio.NewWriter(&stderr)
cmd.Stdout, cmd.Stderr = w, wStderr
}
if e := cmd.Run(); e != nil {
return stderr.String(), e
}
return ``, nil
}
func (s *pythonExecutor) ExecuteQuery(stmt *ir.NormalStmt) error {
return runNormalStmt(s.Writer, string(*stmt), s.Db)
}
func (s *pythonExecutor) ExecuteTrain(cl *ir.TrainStmt) (e error) {
var code string
if cl.GetModelKind() == ir.XGBoost {
if code, e = xgboost.Train(cl, s.Session); e != nil {
return e
}
} else {
if code, e = tensorflow.Train(cl, s.Session); e != nil {
return e
}
}
if e := s.runProgram(code, false); e != nil {
return e
}
return s.SaveModel(cl)
}
func (s *pythonExecutor) ExecutePredict(cl *ir.PredictStmt) (e error) {
// NOTE(typhoonzero): model is already loaded under s.Cwd
if e = createPredictionResultTable(cl, s.Db, s.Session); e != nil {
return e
}
var code string
if cl.TrainStmt.GetModelKind() == ir.XGBoost {
if code, e = xgboost.Pred(cl, s.Session); e != nil {
return e
}
} else {
if code, e = tensorflow.Pred(cl, s.Session); e != nil {
return e
}
}
return s.runProgram(code, false)
}
func (s *pythonExecutor) ExecuteExplain(cl *ir.ExplainStmt) error {
// NOTE(typhoonzero): model is already loaded under s.Cwd
var code string
var err error
db, err := database.OpenAndConnectDB(s.Session.DbConnStr)
if err != nil {
return err
}
defer db.Close()
if cl.TrainStmt.GetModelKind() == ir.XGBoost {
code, err = xgboost.Explain(cl, s.Session)
// TODO(typhoonzero): deal with XGBoost model explain result table creation.
} else {
code, err = tensorflow.Explain(cl, s.Session)
if cl.Into != "" {
err := createExplainResultTable(db, cl, cl.Into, pai.ModelTypeTF, cl.TrainStmt.Estimator)
if err != nil {
return err
}
}
}
if err != nil {
return err
}
if err = s.runProgram(code, false); err != nil {
return err
}
img, err := readExplainResult(path.Join(s.Cwd, "summary.png"))
if err != nil {
return err
}
termFigure, err := ioutil.ReadFile(path.Join(s.Cwd, "summary.txt"))
if err != nil {
return err
}
s.Writer.Write(Figures{img, string(termFigure)})
return nil
}
func (s *pythonExecutor) ExecuteEvaluate(cl *ir.EvaluateStmt) error {
// NOTE(typhoonzero): model is already loaded under s.Cwd
var code string
var err error
if cl.TrainStmt.GetModelKind() == ir.XGBoost {
code, err = xgboost.Evaluate(cl, s.Session)
if err != nil {
return err
}
} else {
code, err = tensorflow.Evaluate(cl, s.Session)
if err != nil {
return err
}
}
if cl.Into != "" {
// create evaluation result table
db, err := database.OpenAndConnectDB(s.Session.DbConnStr)
if err != nil {
return err
}
defer db.Close()
// default always output evaluation loss
metricNames := []string{"loss"}
metricsAttr, ok := cl.Attributes["validation.metrics"]
if ok {
metricsList := strings.Split(metricsAttr.(string), ",")
metricNames = append(metricNames, metricsList...)
}
err = createEvaluationResultTable(db, cl.Into, metricNames)
if err != nil {
return err
}
}
if err = s.runProgram(code, false); err != nil {
return err
}
return nil
}
func generateOptFlowOptimizeCodeAndExecute(cl *ir.OptimizeStmt, submitter *pythonExecutor, session *pb.Session, cwd string, dbName string, tableName string, isPai bool) error {
// Generate optimization code
runnerFileName := "custom_optimize_runner"
runnerCode, submitCode, err := optimize.GenerateOptFlowOptimizeCode(cl, session, dbName, tableName,
runnerFileName)
if err != nil {
return err
}
// Write the runner code to cwd for submission
runnerFilePath := fmt.Sprintf("%s/%s.py", cwd, runnerFileName)
err = ioutil.WriteFile(runnerFilePath, []byte(runnerCode), 0644)
if err != nil {
return err
}
if isPai {
err = copyPythonPackage("sqlflow_submitter", cwd)
if err != nil {
return err
}
}
// Note: OptFlow submit API logs on stderr but not stdout
if err = submitter.runProgram(submitCode, true); err != nil {
return err
}
return nil
}
func (s *pythonExecutor) ExecuteOptimize(cl *ir.OptimizeStmt) error {
// TODO(sneaxiy): to be implemented
return fmt.Errorf("ExecuteOptimize is not supported in default submitter")
}
func (s *pythonExecutor) ExecuteRun(runStmt *ir.RunStmt) error {
if len(runStmt.Parameters) == 0 {
return fmt.Errorf("Parameters shouldn't be empty")
}
context := map[string]string{
"SQLFLOW_TO_RUN_SELECT": runStmt.Select,
"SQLFLOW_TO_RUN_INTO": runStmt.Into,
}
// The first parameter is the program name
program := runStmt.Parameters[0]
fileExtension := filepath.Ext(program)
if len(fileExtension) == 0 {
// If the file extension is empty, it's an executable binary.
// Build the command
cmd := exec.Command(program, runStmt.Parameters[1:]...)
cmd.Dir = s.Cwd
_, e := s.runCommand(cmd, context, false)
return e
} else if strings.EqualFold(fileExtension, ".py") {
// If the first parameter is python Program
if _, e := os.Stat(program); e != nil {
return fmt.Errorf("Failed to get the python file %s", program)
}
// Build the command
cmd := exec.Command("python", runStmt.Parameters...)
cmd.Dir = s.Cwd
_, e := s.runCommand(cmd, context, false)
return e
} else {
// TODO(brightcoder01): Implement the execution of the program built using other script languages.
return fmt.Errorf("The other executable except Python program is not supported yet")
}
}
func createEvaluationResultTable(db *database.DB, tableName string, metricNames []string) error {
dropStmt := fmt.Sprintf(`DROP TABLE IF EXISTS %s;`, tableName)
var e error
if _, e = db.Exec(dropStmt); e != nil {
return fmt.Errorf("failed executing %s: %q", dropStmt, e)
}
columnDef := ""
columnDefList := []string{}
if db.DriverName == "mysql" {
for _, mn := range metricNames {
columnDefList = append(columnDefList,
fmt.Sprintf("%s VARCHAR(255)", mn))
}
} else {
// Hive, MaxCompute
for _, mn := range metricNames {
columnDefList = append(columnDefList,
fmt.Sprintf("%s STRING", mn))
}
}
columnDef = strings.Join(columnDefList, ",")
createStmt := fmt.Sprintf(`CREATE TABLE IF NOT EXISTS %s (%s);`, tableName, columnDef)
if _, e = db.Exec(createStmt); e != nil {
return fmt.Errorf("failed executing %s: %q", createStmt, e)
}
return nil
}
func readExplainResult(target string) (string, error) {
r, err := os.Open(target)
if err != nil {
return "", err
}
defer r.Close()
body, err := ioutil.ReadAll(r)
if err != nil {
return "", err
}
img := base64.StdEncoding.EncodeToString(body)
return fmt.Sprintf("<div align='center'><img src='data:image/png;base64,%s' /></div>", img), nil
}
func (s *pythonExecutor) GetTrainStmtFromModel() bool { return true }
func (s *pythonExecutor) ExecuteShowTrain(showTrain *ir.ShowTrainStmt) error {
model, err := model.Load(showTrain.ModelName, "", s.Db)
if err != nil {
s.Writer.Write("Load model meta " + showTrain.ModelName + " failed.")
return err
}
header := make(map[string]interface{})
header["columnNames"] = []string{"Model", "Train Statement"}
s.Writer.Write(header)
s.Writer.Write([]interface{}{showTrain.ModelName, strings.TrimSpace(model.TrainSelect)})
return nil
}
|
[
"\"SQLFLOW_submitter\""
] |
[] |
[
"SQLFLOW_submitter"
] |
[]
|
["SQLFLOW_submitter"]
|
go
| 1 | 0 | |
benchmark_smac_trie_test.go
|
// Copyright Piero de Salvia.
// All Rights Reserved
package smac
import (
"math/rand"
"os"
"testing"
)
func init() {
initBenchmark()
goPath := os.Getenv("GOPATH")
wordFile := goPath + "/src/github.com/pierods/smac/demo/allwords.txt"
benchAlphabet := "abcdefghijklmnopqrstuvwxyz1234567890'/&\""
autoComplete, err := NewAutoCompleteTrieF(benchAlphabet, wordFile, 0, 0)
if err != nil {
os.Exit(-1)
}
AcTrie = autoComplete
}
var AcTrie AutoCompleteTrie
func BenchmarkTrieCompleteWords(b *testing.B) {
for i := 0; i < b.N; i++ {
w := testWords[rand.Intn(wordsInTestData)]
AcTrie.Complete(w)
}
}
func BenchmarkTriePrefixes(b *testing.B) {
for i := 0; i < b.N; i++ {
p := testPrefixes[rand.Intn(prefixesInTestData)]
AcTrie.Complete(p)
}
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
config.py
|
import os
class Config:
NEWS_SOURCES_BASE_URL ='https://newsapi.org/v2/sources?language=en&category={}&apiKey={}'
ARTICLES_BASE_URL = 'https://newsapi.org/v2/everything?language=en&sources={}&apiKey={}'
NEWS_API_KEY = os.environ.get('NEWS_API_KEY')
@staticmethod
def init_app(app):
pass
class ProdConfig(Config):
pass
class DevConfig(Config):
DEBUG = True
config_options = {
'development':DevConfig,
'production':ProdConfig
}
|
[] |
[] |
[
"NEWS_API_KEY"
] |
[]
|
["NEWS_API_KEY"]
|
python
| 1 | 0 | |
test/e2e/framework/util.go
|
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"math/rand"
"net"
"net/http"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
goruntime "runtime"
"sort"
"strconv"
"strings"
"sync"
"syscall"
"text/tabwriter"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5"
"k8s.io/kubernetes/pkg/api"
apierrs "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered"
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
batch "k8s.io/kubernetes/pkg/apis/batch/v1"
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/conditions"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/typed/discovery"
"k8s.io/kubernetes/pkg/client/typed/dynamic"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/controller"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/runtime/schema"
sshutil "k8s.io/kubernetes/pkg/ssh"
"k8s.io/kubernetes/pkg/types"
uexec "k8s.io/kubernetes/pkg/util/exec"
labelsutil "k8s.io/kubernetes/pkg/util/labels"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/system"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/version"
"k8s.io/kubernetes/pkg/watch"
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
testutils "k8s.io/kubernetes/test/utils"
"github.com/blang/semver"
"golang.org/x/crypto/ssh"
"golang.org/x/net/websocket"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
gomegatypes "github.com/onsi/gomega/types"
)
const (
// How long to wait for the pod to be listable
PodListTimeout = time.Minute
// Initial pod start can be delayed O(minutes) by slow docker pulls
// TODO: Make this 30 seconds once #4566 is resolved.
PodStartTimeout = 5 * time.Minute
// How long to wait for the pod to no longer be running
podNoLongerRunningTimeout = 30 * time.Second
// If there are any orphaned namespaces to clean up, this test is running
// on a long lived cluster. A long wait here is preferably to spurious test
// failures caused by leaked resources from a previous test run.
NamespaceCleanupTimeout = 15 * time.Minute
// Some pods can take much longer to get ready due to volume attach/detach latency.
slowPodStartTimeout = 15 * time.Minute
// How long to wait for a service endpoint to be resolvable.
ServiceStartTimeout = 1 * time.Minute
// How often to Poll pods, nodes and claims.
Poll = 2 * time.Second
// service accounts are provisioned after namespace creation
// a service account is required to support pod creation in a namespace as part of admission control
ServiceAccountProvisionTimeout = 2 * time.Minute
// How long to try single API calls (like 'get' or 'list'). Used to prevent
// transient failures from failing tests.
// TODO: client should not apply this timeout to Watch calls. Increased from 30s until that is fixed.
SingleCallTimeout = 5 * time.Minute
// How long nodes have to be "ready" when a test begins. They should already
// be "ready" before the test starts, so this is small.
NodeReadyInitialTimeout = 20 * time.Second
// How long pods have to be "ready" when a test begins.
PodReadyBeforeTimeout = 5 * time.Minute
// How long pods have to become scheduled onto nodes
podScheduledBeforeTimeout = PodListTimeout + (20 * time.Second)
podRespondingTimeout = 2 * time.Minute
ServiceRespondingTimeout = 2 * time.Minute
EndpointRegisterTimeout = time.Minute
// How long claims have to become dynamically provisioned
ClaimProvisionTimeout = 5 * time.Minute
// When these values are updated, also update cmd/kubelet/app/options/options.go
currentPodInfraContainerImageName = "gcr.io/google_containers/pause"
currentPodInfraContainerImageVersion = "3.0"
// How long each node is given during a process that restarts all nodes
// before the test is considered failed. (Note that the total time to
// restart all nodes will be this number times the number of nodes.)
RestartPerNodeTimeout = 5 * time.Minute
// How often to Poll the statues of a restart.
RestartPoll = 20 * time.Second
// How long a node is allowed to become "Ready" after it is restarted before
// the test is considered failed.
RestartNodeReadyAgainTimeout = 5 * time.Minute
// How long a pod is allowed to become "running" and "ready" after a node
// restart before test is considered failed.
RestartPodReadyAgainTimeout = 5 * time.Minute
// Number of times we want to retry Updates in case of conflict
UpdateRetries = 5
// Number of objects that gc can delete in a second.
// GC issues 2 requestes for single delete.
gcThroughput = 10
// TODO(justinsb): Avoid hardcoding this.
awsMasterIP = "172.20.0.9"
// Default time to wait for nodes to become schedulable.
// Set so high for scale tests.
NodeSchedulableTimeout = 4 * time.Hour
)
var (
// Label allocated to the image puller static pod that runs on each node
// before e2es.
ImagePullerLabels = map[string]string{"name": "e2e-image-puller"}
// For parsing Kubectl version for version-skewed testing.
gitVersionRegexp = regexp.MustCompile("GitVersion:\"(v.+?)\"")
// Slice of regexps for names of pods that have to be running to consider a Node "healthy"
requiredPerNodePods = []*regexp.Regexp{
regexp.MustCompile(".*kube-proxy.*"),
regexp.MustCompile(".*fluentd-elasticsearch.*"),
regexp.MustCompile(".*node-problem-detector.*"),
}
)
type Address struct {
internalIP string
externalIP string
hostname string
}
// GetServerArchitecture fetches the architecture of the cluster's apiserver.
func GetServerArchitecture(c clientset.Interface) string {
arch := ""
sVer, err := c.Discovery().ServerVersion()
if err != nil || sVer.Platform == "" {
// If we failed to get the server version for some reason, default to amd64.
arch = "amd64"
} else {
// Split the platform string into OS and Arch separately.
// The platform string may for example be "linux/amd64", "linux/arm" or "windows/amd64".
osArchArray := strings.Split(sVer.Platform, "/")
arch = osArchArray[1]
}
return arch
}
// GetPauseImageName fetches the pause image name for the same architecture as the apiserver.
func GetPauseImageName(c clientset.Interface) string {
return currentPodInfraContainerImageName + "-" + GetServerArchitecture(c) + ":" + currentPodInfraContainerImageVersion
}
// GetPauseImageNameForHostArch fetches the pause image name for the same architecture the test is running on.
// TODO: move this function to the test/utils
func GetPauseImageNameForHostArch() string {
return currentPodInfraContainerImageName + "-" + goruntime.GOARCH + ":" + currentPodInfraContainerImageVersion
}
// SubResource proxy should have been functional in v1.0.0, but SubResource
// proxy via tunneling is known to be broken in v1.0. See
// https://github.com/kubernetes/kubernetes/pull/15224#issuecomment-146769463
//
// TODO(ihmccreery): remove once we don't care about v1.0 anymore, (tentatively
// in v1.3).
var SubResourcePodProxyVersion = version.MustParse("v1.1.0")
var subResourceServiceAndNodeProxyVersion = version.MustParse("v1.2.0")
func GetServicesProxyRequest(c clientset.Interface, request *restclient.Request) (*restclient.Request, error) {
subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c.Discovery())
if err != nil {
return nil, err
}
if subResourceProxyAvailable {
return request.Resource("services").SubResource("proxy"), nil
}
return request.Prefix("proxy").Resource("services"), nil
}
// unique identifier of the e2e run
var RunId = uuid.NewUUID()
type CreateTestingNSFn func(baseName string, c clientset.Interface, labels map[string]string) (*v1.Namespace, error)
type ContainerFailures struct {
status *v1.ContainerStateTerminated
Restarts int
}
func GetMasterHost() string {
masterUrl, err := url.Parse(TestContext.Host)
ExpectNoError(err)
return masterUrl.Host
}
func nowStamp() string {
return time.Now().Format(time.StampMilli)
}
func log(level string, format string, args ...interface{}) {
fmt.Fprintf(GinkgoWriter, nowStamp()+": "+level+": "+format+"\n", args...)
}
func Logf(format string, args ...interface{}) {
log("INFO", format, args...)
}
func Failf(format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
log("INFO", msg)
Fail(nowStamp()+": "+msg, 1)
}
func Skipf(format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
log("INFO", msg)
Skip(nowStamp() + ": " + msg)
}
func SkipUnlessNodeCountIsAtLeast(minNodeCount int) {
if TestContext.CloudConfig.NumNodes < minNodeCount {
Skipf("Requires at least %d nodes (not %d)", minNodeCount, TestContext.CloudConfig.NumNodes)
}
}
func SkipUnlessNodeCountIsAtMost(maxNodeCount int) {
if TestContext.CloudConfig.NumNodes <= maxNodeCount {
Skipf("Requires at most %d nodes (not %d)", maxNodeCount, TestContext.CloudConfig.NumNodes)
}
}
func SkipUnlessAtLeast(value int, minValue int, message string) {
if value < minValue {
Skipf(message)
}
}
func SkipIfProviderIs(unsupportedProviders ...string) {
if ProviderIs(unsupportedProviders...) {
Skipf("Not supported for providers %v (found %s)", unsupportedProviders, TestContext.Provider)
}
}
func SkipUnlessProviderIs(supportedProviders ...string) {
if !ProviderIs(supportedProviders...) {
Skipf("Only supported for providers %v (not %s)", supportedProviders, TestContext.Provider)
}
}
func SkipUnlessNodeOSDistroIs(supportedNodeOsDistros ...string) {
if !NodeOSDistroIs(supportedNodeOsDistros...) {
Skipf("Only supported for node OS distro %v (not %s)", supportedNodeOsDistros, TestContext.NodeOSDistro)
}
}
func SkipIfContainerRuntimeIs(runtimes ...string) {
for _, runtime := range runtimes {
if runtime == TestContext.ContainerRuntime {
Skipf("Not supported under container runtime %s", runtime)
}
}
}
func ProviderIs(providers ...string) bool {
for _, provider := range providers {
if strings.ToLower(provider) == strings.ToLower(TestContext.Provider) {
return true
}
}
return false
}
func NodeOSDistroIs(supportedNodeOsDistros ...string) bool {
for _, distro := range supportedNodeOsDistros {
if strings.ToLower(distro) == strings.ToLower(TestContext.NodeOSDistro) {
return true
}
}
return false
}
func SkipUnlessServerVersionGTE(v semver.Version, c discovery.ServerVersionInterface) {
gte, err := ServerVersionGTE(v, c)
if err != nil {
Failf("Failed to get server version: %v", err)
}
if !gte {
Skipf("Not supported for server versions before %q", v)
}
}
// Detects whether the federation namespace exists in the underlying cluster
func SkipUnlessFederated(c clientset.Interface) {
federationNS := os.Getenv("FEDERATION_NAMESPACE")
if federationNS == "" {
federationNS = "federation"
}
_, err := c.Core().Namespaces().Get(federationNS)
if err != nil {
if apierrs.IsNotFound(err) {
Skipf("Could not find federation namespace %s: skipping federated test", federationNS)
} else {
Failf("Unexpected error getting namespace: %v", err)
}
}
}
func SkipIfMissingResource(clientPool dynamic.ClientPool, gvr schema.GroupVersionResource, namespace string) {
dynamicClient, err := clientPool.ClientForGroupVersionResource(gvr)
if err != nil {
Failf("Unexpected error getting dynamic client for %v: %v", gvr.GroupVersion(), err)
}
apiResource := metav1.APIResource{Name: gvr.Resource, Namespaced: true}
_, err = dynamicClient.Resource(&apiResource, namespace).List(&v1.ListOptions{})
if err != nil {
// not all resources support list, so we ignore those
if apierrs.IsMethodNotSupported(err) || apierrs.IsNotFound(err) || apierrs.IsForbidden(err) {
Skipf("Could not find %s resource, skipping test: %#v", gvr, err)
}
Failf("Unexpected error getting %v: %v", gvr, err)
}
}
// ProvidersWithSSH are those providers where each node is accessible with SSH
var ProvidersWithSSH = []string{"gce", "gke", "aws"}
// providersWithMasterSSH are those providers where master node is accessible with SSH
var providersWithMasterSSH = []string{"gce", "gke", "kubemark", "aws"}
type podCondition func(pod *v1.Pod) (bool, error)
// logPodStates logs basic info of provided pods for debugging.
func logPodStates(pods []v1.Pod) {
// Find maximum widths for pod, node, and phase strings for column printing.
maxPodW, maxNodeW, maxPhaseW, maxGraceW := len("POD"), len("NODE"), len("PHASE"), len("GRACE")
for i := range pods {
pod := &pods[i]
if len(pod.ObjectMeta.Name) > maxPodW {
maxPodW = len(pod.ObjectMeta.Name)
}
if len(pod.Spec.NodeName) > maxNodeW {
maxNodeW = len(pod.Spec.NodeName)
}
if len(pod.Status.Phase) > maxPhaseW {
maxPhaseW = len(pod.Status.Phase)
}
}
// Increase widths by one to separate by a single space.
maxPodW++
maxNodeW++
maxPhaseW++
maxGraceW++
// Log pod info. * does space padding, - makes them left-aligned.
Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s",
maxPodW, "POD", maxNodeW, "NODE", maxPhaseW, "PHASE", maxGraceW, "GRACE", "CONDITIONS")
for _, pod := range pods {
grace := ""
if pod.DeletionGracePeriodSeconds != nil {
grace = fmt.Sprintf("%ds", *pod.DeletionGracePeriodSeconds)
}
Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s",
maxPodW, pod.ObjectMeta.Name, maxNodeW, pod.Spec.NodeName, maxPhaseW, pod.Status.Phase, maxGraceW, grace, pod.Status.Conditions)
}
Logf("") // Final empty line helps for readability.
}
// errorBadPodsStates create error message of basic info of bad pods for debugging.
func errorBadPodsStates(badPods []v1.Pod, desiredPods int, ns, desiredState string, timeout time.Duration) string {
errStr := fmt.Sprintf("%d / %d pods in namespace %q are NOT in %s state in %v\n", len(badPods), desiredPods, ns, desiredState, timeout)
// Pirnt bad pods info only if there are fewer than 10 bad pods
if len(badPods) > 10 {
return errStr + "There are too many bad pods. Please check log for details."
}
buf := bytes.NewBuffer(nil)
w := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0)
fmt.Fprintln(w, "POD\tNODE\tPHASE\tGRACE\tCONDITIONS")
for _, badPod := range badPods {
grace := ""
if badPod.DeletionGracePeriodSeconds != nil {
grace = fmt.Sprintf("%ds", *badPod.DeletionGracePeriodSeconds)
}
podInfo := fmt.Sprintf("%s\t%s\t%s\t%s\t%s",
badPod.ObjectMeta.Name, badPod.Spec.NodeName, badPod.Status.Phase, grace, badPod.Status.Conditions)
fmt.Fprintln(w, podInfo)
}
w.Flush()
return errStr + buf.String()
}
// WaitForPodsSuccess waits till all labels matching the given selector enter
// the Success state. The caller is expected to only invoke this method once the
// pods have been created.
func WaitForPodsSuccess(c clientset.Interface, ns string, successPodLabels map[string]string, timeout time.Duration) error {
successPodSelector := labels.SelectorFromSet(successPodLabels)
start, badPods, desiredPods := time.Now(), []v1.Pod{}, 0
if wait.PollImmediate(30*time.Second, timeout, func() (bool, error) {
podList, err := c.Core().Pods(ns).List(v1.ListOptions{LabelSelector: successPodSelector.String()})
if err != nil {
Logf("Error getting pods in namespace %q: %v", ns, err)
return false, nil
}
if len(podList.Items) == 0 {
Logf("Waiting for pods to enter Success, but no pods in %q match label %v", ns, successPodLabels)
return true, nil
}
badPods = []v1.Pod{}
desiredPods = len(podList.Items)
for _, pod := range podList.Items {
if pod.Status.Phase != v1.PodSucceeded {
badPods = append(badPods, pod)
}
}
successPods := len(podList.Items) - len(badPods)
Logf("%d / %d pods in namespace %q are in Success state (%d seconds elapsed)",
successPods, len(podList.Items), ns, int(time.Since(start).Seconds()))
if len(badPods) == 0 {
return true, nil
}
return false, nil
}) != nil {
logPodStates(badPods)
LogPodsWithLabels(c, ns, successPodLabels, Logf)
return errors.New(errorBadPodsStates(badPods, desiredPods, ns, "SUCCESS", timeout))
}
return nil
}
var ReadyReplicaVersion = version.MustParse("v1.4.0")
// WaitForPodsRunningReady waits up to timeout to ensure that all pods in
// namespace ns are either running and ready, or failed but controlled by a
// controller. Also, it ensures that at least minPods are running and
// ready. It has separate behavior from other 'wait for' pods functions in
// that it requests the list of pods on every iteration. This is useful, for
// example, in cluster startup, because the number of pods increases while
// waiting.
// If ignoreLabels is not empty, pods matching this selector are ignored and
// this function waits for minPods to enter Running/Ready and for all pods
// matching ignoreLabels to enter Success phase. Otherwise an error is returned
// even if there are minPods pods, some of which are in Running/Ready
// and some in Success. This is to allow the client to decide if "Success"
// means "Ready" or not.
func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods int32, timeout time.Duration, ignoreLabels map[string]string) error {
// This can be removed when we no longer have 1.3 servers running with upgrade tests.
hasReadyReplicas, err := ServerVersionGTE(ReadyReplicaVersion, c.Discovery())
if err != nil {
Logf("Error getting the server version: %v", err)
return err
}
ignoreSelector := labels.SelectorFromSet(ignoreLabels)
start := time.Now()
Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready",
timeout, minPods, ns)
wg := sync.WaitGroup{}
wg.Add(1)
var waitForSuccessError error
badPods := []v1.Pod{}
desiredPods := 0
go func() {
waitForSuccessError = WaitForPodsSuccess(c, ns, ignoreLabels, timeout)
wg.Done()
}()
if wait.PollImmediate(Poll, timeout, func() (bool, error) {
// We get the new list of pods, replication controllers, and
// replica sets in every iteration because more pods come
// online during startup and we want to ensure they are also
// checked.
replicas, replicaOk := int32(0), int32(0)
if hasReadyReplicas {
rcList, err := c.Core().ReplicationControllers(ns).List(v1.ListOptions{})
if err != nil {
Logf("Error getting replication controllers in namespace '%s': %v", ns, err)
return false, nil
}
for _, rc := range rcList.Items {
replicas += *rc.Spec.Replicas
replicaOk += rc.Status.ReadyReplicas
}
rsList, err := c.Extensions().ReplicaSets(ns).List(v1.ListOptions{})
if err != nil {
Logf("Error getting replication sets in namespace %q: %v", ns, err)
return false, nil
}
for _, rs := range rsList.Items {
replicas += *rs.Spec.Replicas
replicaOk += rs.Status.ReadyReplicas
}
}
podList, err := c.Core().Pods(ns).List(v1.ListOptions{})
if err != nil {
Logf("Error getting pods in namespace '%s': %v", ns, err)
return false, nil
}
nOk := int32(0)
badPods = []v1.Pod{}
desiredPods = len(podList.Items)
for _, pod := range podList.Items {
if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(pod.Labels)) {
Logf("%v in state %v, ignoring", pod.Name, pod.Status.Phase)
continue
}
if res, err := testutils.PodRunningReady(&pod); res && err == nil {
nOk++
} else {
if pod.Status.Phase != v1.PodFailed {
Logf("The status of Pod %s is %s (Ready = false), waiting for it to be either Running (with Ready = true) or Failed", pod.ObjectMeta.Name, pod.Status.Phase)
badPods = append(badPods, pod)
} else if _, ok := pod.Annotations[v1.CreatedByAnnotation]; !ok {
Logf("Pod %s is Failed, but it's not controlled by a controller", pod.ObjectMeta.Name)
badPods = append(badPods, pod)
}
//ignore failed pods that are controlled by some controller
}
}
Logf("%d / %d pods in namespace '%s' are running and ready (%d seconds elapsed)",
nOk, len(podList.Items), ns, int(time.Since(start).Seconds()))
if hasReadyReplicas {
Logf("expected %d pod replicas in namespace '%s', %d are Running and Ready.", replicas, ns, replicaOk)
}
if replicaOk == replicas && nOk >= minPods && len(badPods) == 0 {
return true, nil
}
logPodStates(badPods)
return false, nil
}) != nil {
return errors.New(errorBadPodsStates(badPods, desiredPods, ns, "RUNNING and READY", timeout))
}
wg.Wait()
if waitForSuccessError != nil {
return waitForSuccessError
}
return nil
}
func kubectlLogPod(c clientset.Interface, pod v1.Pod, containerNameSubstr string, logFunc func(ftm string, args ...interface{})) {
for _, container := range pod.Spec.Containers {
if strings.Contains(container.Name, containerNameSubstr) {
// Contains() matches all strings if substr is empty
logs, err := GetPodLogs(c, pod.Namespace, pod.Name, container.Name)
if err != nil {
logs, err = getPreviousPodLogs(c, pod.Namespace, pod.Name, container.Name)
if err != nil {
logFunc("Failed to get logs of pod %v, container %v, err: %v", pod.Name, container.Name, err)
}
}
By(fmt.Sprintf("Logs of %v/%v:%v on node %v", pod.Namespace, pod.Name, container.Name, pod.Spec.NodeName))
logFunc("%s : STARTLOG\n%s\nENDLOG for container %v:%v:%v", containerNameSubstr, logs, pod.Namespace, pod.Name, container.Name)
}
}
}
func LogFailedContainers(c clientset.Interface, ns string, logFunc func(ftm string, args ...interface{})) {
podList, err := c.Core().Pods(ns).List(v1.ListOptions{})
if err != nil {
logFunc("Error getting pods in namespace '%s': %v", ns, err)
return
}
logFunc("Running kubectl logs on non-ready containers in %v", ns)
for _, pod := range podList.Items {
if res, err := testutils.PodRunningReady(&pod); !res || err != nil {
kubectlLogPod(c, pod, "", Logf)
}
}
}
func LogPodsWithLabels(c clientset.Interface, ns string, match map[string]string, logFunc func(ftm string, args ...interface{})) {
podList, err := c.Core().Pods(ns).List(v1.ListOptions{LabelSelector: labels.SelectorFromSet(match).String()})
if err != nil {
logFunc("Error getting pods in namespace %q: %v", ns, err)
return
}
logFunc("Running kubectl logs on pods with labels %v in %v", match, ns)
for _, pod := range podList.Items {
kubectlLogPod(c, pod, "", logFunc)
}
}
func LogContainersInPodsWithLabels(c clientset.Interface, ns string, match map[string]string, containerSubstr string, logFunc func(ftm string, args ...interface{})) {
podList, err := c.Core().Pods(ns).List(v1.ListOptions{LabelSelector: labels.SelectorFromSet(match).String()})
if err != nil {
Logf("Error getting pods in namespace %q: %v", ns, err)
return
}
for _, pod := range podList.Items {
kubectlLogPod(c, pod, containerSubstr, logFunc)
}
}
// DeleteNamespaces deletes all namespaces that match the given delete and skip filters.
// Filter is by simple strings.Contains; first skip filter, then delete filter.
// Returns the list of deleted namespaces or an error.
func DeleteNamespaces(c clientset.Interface, deleteFilter, skipFilter []string) ([]string, error) {
By("Deleting namespaces")
nsList, err := c.Core().Namespaces().List(v1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
var deleted []string
var wg sync.WaitGroup
OUTER:
for _, item := range nsList.Items {
if skipFilter != nil {
for _, pattern := range skipFilter {
if strings.Contains(item.Name, pattern) {
continue OUTER
}
}
}
if deleteFilter != nil {
var shouldDelete bool
for _, pattern := range deleteFilter {
if strings.Contains(item.Name, pattern) {
shouldDelete = true
break
}
}
if !shouldDelete {
continue OUTER
}
}
wg.Add(1)
deleted = append(deleted, item.Name)
go func(nsName string) {
defer wg.Done()
defer GinkgoRecover()
Expect(c.Core().Namespaces().Delete(nsName, nil)).To(Succeed())
Logf("namespace : %v api call to delete is complete ", nsName)
}(item.Name)
}
wg.Wait()
return deleted, nil
}
func WaitForNamespacesDeleted(c clientset.Interface, namespaces []string, timeout time.Duration) error {
By("Waiting for namespaces to vanish")
nsMap := map[string]bool{}
for _, ns := range namespaces {
nsMap[ns] = true
}
//Now POLL until all namespaces have been eradicated.
return wait.Poll(2*time.Second, timeout,
func() (bool, error) {
nsList, err := c.Core().Namespaces().List(v1.ListOptions{})
if err != nil {
return false, err
}
for _, item := range nsList.Items {
if _, ok := nsMap[item.Name]; ok {
return false, nil
}
}
return true, nil
})
}
func waitForServiceAccountInNamespace(c clientset.Interface, ns, serviceAccountName string, timeout time.Duration) error {
w, err := c.Core().ServiceAccounts(ns).Watch(v1.SingleObject(v1.ObjectMeta{Name: serviceAccountName}))
if err != nil {
return err
}
_, err = watch.Until(timeout, w, conditions.ServiceAccountHasSecrets)
return err
}
func WaitForPodCondition(c clientset.Interface, ns, podName, desc string, timeout time.Duration, condition podCondition) error {
Logf("Waiting up to %[1]v for pod %[2]s status to be %[3]s", timeout, podName, desc)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pod, err := c.Core().Pods(ns).Get(podName)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("Pod %q in namespace %q disappeared. Error: %v", podName, ns, err)
return err
}
// Aligning this text makes it much more readable
Logf("Get pod %[1]s in namespace '%[2]s' failed, ignoring for %[3]v. Error: %[4]v",
podName, ns, Poll, err)
continue
}
done, err := condition(pod)
if done {
return err
}
Logf("Waiting for pod %[1]s in namespace '%[2]s' status to be '%[3]s'"+
"(found phase: %[4]q, readiness: %[5]t) (%[6]v elapsed)",
podName, ns, desc, pod.Status.Phase, testutils.PodReady(pod), time.Since(start))
}
return fmt.Errorf("gave up waiting for pod '%s' to be '%s' after %v", podName, desc, timeout)
}
// WaitForMatchPodsCondition finds match pods based on the input ListOptions.
// waits and checks if all match pods are in the given podCondition
func WaitForMatchPodsCondition(c clientset.Interface, opts v1.ListOptions, desc string, timeout time.Duration, condition podCondition) error {
Logf("Waiting up to %v for matching pods' status to be %s", timeout, desc)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pods, err := c.Core().Pods(v1.NamespaceAll).List(opts)
if err != nil {
return err
}
conditionNotMatch := []string{}
for _, pod := range pods.Items {
done, err := condition(&pod)
if done && err != nil {
return fmt.Errorf("Unexpected error: %v", err)
}
if !done {
conditionNotMatch = append(conditionNotMatch, format.Pod(&pod))
}
}
if len(conditionNotMatch) <= 0 {
return err
}
Logf("%d pods are not %s", len(conditionNotMatch), desc)
}
return fmt.Errorf("gave up waiting for matching pods to be '%s' after %v", desc, timeout)
}
// WaitForDefaultServiceAccountInNamespace waits for the default service account to be provisioned
// the default service account is what is associated with pods when they do not specify a service account
// as a result, pods are not able to be provisioned in a namespace until the service account is provisioned
func WaitForDefaultServiceAccountInNamespace(c clientset.Interface, namespace string) error {
return waitForServiceAccountInNamespace(c, namespace, "default", ServiceAccountProvisionTimeout)
}
// WaitForFederationApiserverReady waits for the federation apiserver to be ready.
// It tests the readiness by sending a GET request and expecting a non error response.
func WaitForFederationApiserverReady(c *federation_release_1_5.Clientset) error {
return wait.PollImmediate(time.Second, 1*time.Minute, func() (bool, error) {
_, err := c.Federation().Clusters().List(v1.ListOptions{})
if err != nil {
return false, nil
}
return true, nil
})
}
// WaitForPersistentVolumePhase waits for a PersistentVolume to be in a specific phase or until timeout occurs, whichever comes first.
func WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase, c clientset.Interface, pvName string, Poll, timeout time.Duration) error {
Logf("Waiting up to %v for PersistentVolume %s to have phase %s", timeout, pvName, phase)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pv, err := c.Core().PersistentVolumes().Get(pvName)
if err != nil {
Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err)
continue
} else {
if pv.Status.Phase == phase {
Logf("PersistentVolume %s found and phase=%s (%v)", pvName, phase, time.Since(start))
return nil
} else {
Logf("PersistentVolume %s found but phase is %s instead of %s.", pvName, pv.Status.Phase, phase)
}
}
}
return fmt.Errorf("PersistentVolume %s not in phase %s within %v", pvName, phase, timeout)
}
// WaitForPersistentVolumeDeleted waits for a PersistentVolume to get deleted or until timeout occurs, whichever comes first.
func WaitForPersistentVolumeDeleted(c clientset.Interface, pvName string, Poll, timeout time.Duration) error {
Logf("Waiting up to %v for PersistentVolume %s to get deleted", timeout, pvName)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pv, err := c.Core().PersistentVolumes().Get(pvName)
if err == nil {
Logf("PersistentVolume %s found and phase=%s (%v)", pvName, pv.Status.Phase, time.Since(start))
continue
} else {
if apierrs.IsNotFound(err) {
Logf("PersistentVolume %s was removed", pvName)
return nil
} else {
Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err)
}
}
}
return fmt.Errorf("PersistentVolume %s still exists within %v", pvName, timeout)
}
// WaitForPersistentVolumeClaimPhase waits for a PersistentVolumeClaim to be in a specific phase or until timeout occurs, whichever comes first.
func WaitForPersistentVolumeClaimPhase(phase v1.PersistentVolumeClaimPhase, c clientset.Interface, ns string, pvcName string, Poll, timeout time.Duration) error {
Logf("Waiting up to %v for PersistentVolumeClaim %s to have phase %s", timeout, pvcName, phase)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pvc, err := c.Core().PersistentVolumeClaims(ns).Get(pvcName)
if err != nil {
Logf("Get persistent volume claim %s in failed, ignoring for %v: %v", pvcName, Poll, err)
continue
} else {
if pvc.Status.Phase == phase {
Logf("PersistentVolumeClaim %s found and phase=%s (%v)", pvcName, phase, time.Since(start))
return nil
} else {
Logf("PersistentVolumeClaim %s found but phase is %s instead of %s.", pvcName, pvc.Status.Phase, phase)
}
}
}
return fmt.Errorf("PersistentVolumeClaim %s not in phase %s within %v", pvcName, phase, timeout)
}
// CreateTestingNS should be used by every test, note that we append a common prefix to the provided test name.
// Please see NewFramework instead of using this directly.
func CreateTestingNS(baseName string, c clientset.Interface, labels map[string]string) (*v1.Namespace, error) {
if labels == nil {
labels = map[string]string{}
}
labels["e2e-run"] = string(RunId)
namespaceObj := &v1.Namespace{
ObjectMeta: v1.ObjectMeta{
GenerateName: fmt.Sprintf("e2e-tests-%v-", baseName),
Namespace: "",
Labels: labels,
},
Status: v1.NamespaceStatus{},
}
// Be robust about making the namespace creation call.
var got *v1.Namespace
if err := wait.PollImmediate(Poll, 30*time.Second, func() (bool, error) {
var err error
got, err = c.Core().Namespaces().Create(namespaceObj)
if err != nil {
Logf("Unexpected error while creating namespace: %v", err)
return false, nil
}
return true, nil
}); err != nil {
return nil, err
}
if TestContext.VerifyServiceAccount {
if err := WaitForDefaultServiceAccountInNamespace(c, got.Name); err != nil {
// Even if we fail to create serviceAccount in the namespace,
// we have successfully create a namespace.
// So, return the created namespace.
return got, err
}
}
return got, nil
}
// CheckTestingNSDeletedExcept checks whether all e2e based existing namespaces are in the Terminating state
// and waits until they are finally deleted. It ignores namespace skip.
func CheckTestingNSDeletedExcept(c clientset.Interface, skip string) error {
// TODO: Since we don't have support for bulk resource deletion in the API,
// while deleting a namespace we are deleting all objects from that namespace
// one by one (one deletion == one API call). This basically exposes us to
// throttling - currently controller-manager has a limit of max 20 QPS.
// Once #10217 is implemented and used in namespace-controller, deleting all
// object from a given namespace should be much faster and we will be able
// to lower this timeout.
// However, now Density test is producing ~26000 events and Load capacity test
// is producing ~35000 events, thus assuming there are no other requests it will
// take ~30 minutes to fully delete the namespace. Thus I'm setting it to 60
// minutes to avoid any timeouts here.
timeout := 60 * time.Minute
Logf("Waiting for terminating namespaces to be deleted...")
for start := time.Now(); time.Since(start) < timeout; time.Sleep(15 * time.Second) {
namespaces, err := c.Core().Namespaces().List(v1.ListOptions{})
if err != nil {
Logf("Listing namespaces failed: %v", err)
continue
}
terminating := 0
for _, ns := range namespaces.Items {
if strings.HasPrefix(ns.ObjectMeta.Name, "e2e-tests-") && ns.ObjectMeta.Name != skip {
if ns.Status.Phase == v1.NamespaceActive {
return fmt.Errorf("Namespace %s is active", ns.ObjectMeta.Name)
}
terminating++
}
}
if terminating == 0 {
return nil
}
}
return fmt.Errorf("Waiting for terminating namespaces to be deleted timed out")
}
// deleteNS deletes the provided namespace, waits for it to be completely deleted, and then checks
// whether there are any pods remaining in a non-terminating state.
func deleteNS(c clientset.Interface, clientPool dynamic.ClientPool, namespace string, timeout time.Duration) error {
if err := c.Core().Namespaces().Delete(namespace, nil); err != nil {
return err
}
// wait for namespace to delete or timeout.
err := wait.PollImmediate(5*time.Second, timeout, func() (bool, error) {
if _, err := c.Core().Namespaces().Get(namespace); err != nil {
if apierrs.IsNotFound(err) {
return true, nil
}
Logf("Error while waiting for namespace to be terminated: %v", err)
return false, nil
}
return false, nil
})
// verify there is no more remaining content in the namespace
remainingContent, cerr := hasRemainingContent(c, clientPool, namespace)
if cerr != nil {
return cerr
}
// if content remains, let's dump information about the namespace, and system for flake debugging.
remainingPods := 0
missingTimestamp := 0
if remainingContent {
// log information about namespace, and set of namespaces in api server to help flake detection
logNamespace(c, namespace)
logNamespaces(c, namespace)
// if we can, check if there were pods remaining with no timestamp.
remainingPods, missingTimestamp, _ = countRemainingPods(c, namespace)
}
// a timeout waiting for namespace deletion happened!
if err != nil {
// some content remains in the namespace
if remainingContent {
// pods remain
if remainingPods > 0 {
// but they were all undergoing deletion (kubelet is probably culprit)
if missingTimestamp == 0 {
return fmt.Errorf("namespace %v was not deleted with limit: %v, pods remaining: %v, pods missing deletion timestamp: %v", namespace, err, remainingPods, missingTimestamp)
}
// pods remained, but were not undergoing deletion (namespace controller is probably culprit)
return fmt.Errorf("namespace %v was not deleted with limit: %v, pods remaining: %v", namespace, err, remainingPods)
}
// other content remains (namespace controller is probably screwed up)
return fmt.Errorf("namespace %v was not deleted with limit: %v, namespaced content other than pods remain", namespace, err)
}
// no remaining content, but namespace was not deleted (namespace controller is probably wedged)
return fmt.Errorf("namespace %v was not deleted with limit: %v, namespace is empty but is not yet removed", namespace, err)
}
return nil
}
// logNamespaces logs the number of namespaces by phase
// namespace is the namespace the test was operating against that failed to delete so it can be grepped in logs
func logNamespaces(c clientset.Interface, namespace string) {
namespaceList, err := c.Core().Namespaces().List(v1.ListOptions{})
if err != nil {
Logf("namespace: %v, unable to list namespaces: %v", namespace, err)
return
}
numActive := 0
numTerminating := 0
for _, namespace := range namespaceList.Items {
if namespace.Status.Phase == v1.NamespaceActive {
numActive++
} else {
numTerminating++
}
}
Logf("namespace: %v, total namespaces: %v, active: %v, terminating: %v", namespace, len(namespaceList.Items), numActive, numTerminating)
}
// logNamespace logs detail about a namespace
func logNamespace(c clientset.Interface, namespace string) {
ns, err := c.Core().Namespaces().Get(namespace)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("namespace: %v no longer exists", namespace)
return
}
Logf("namespace: %v, unable to get namespace due to error: %v", namespace, err)
return
}
Logf("namespace: %v, DeletionTimetamp: %v, Finalizers: %v, Phase: %v", ns.Name, ns.DeletionTimestamp, ns.Spec.Finalizers, ns.Status.Phase)
}
// countRemainingPods queries the server to count number of remaining pods, and number of pods that had a missing deletion timestamp.
func countRemainingPods(c clientset.Interface, namespace string) (int, int, error) {
// check for remaining pods
pods, err := c.Core().Pods(namespace).List(v1.ListOptions{})
if err != nil {
return 0, 0, err
}
// nothing remains!
if len(pods.Items) == 0 {
return 0, 0, nil
}
// stuff remains, log about it
logPodStates(pods.Items)
// check if there were any pods with missing deletion timestamp
numPods := len(pods.Items)
missingTimestamp := 0
for _, pod := range pods.Items {
if pod.DeletionTimestamp == nil {
missingTimestamp++
}
}
return numPods, missingTimestamp, nil
}
// hasRemainingContent checks if there is remaining content in the namespace via API discovery
func hasRemainingContent(c clientset.Interface, clientPool dynamic.ClientPool, namespace string) (bool, error) {
// some tests generate their own framework.Client rather than the default
// TODO: ensure every test call has a configured clientPool
if clientPool == nil {
return false, nil
}
// find out what content is supported on the server
resources, err := c.Discovery().ServerPreferredNamespacedResources()
if err != nil {
return false, err
}
groupVersionResources, err := discovery.GroupVersionResources(resources)
if err != nil {
return false, err
}
// TODO: temporary hack for https://github.com/kubernetes/kubernetes/issues/31798
ignoredResources := sets.NewString("bindings")
contentRemaining := false
// dump how many of resource type is on the server in a log.
for gvr := range groupVersionResources {
// get a client for this group version...
dynamicClient, err := clientPool.ClientForGroupVersionResource(gvr)
if err != nil {
// not all resource types support list, so some errors here are normal depending on the resource type.
Logf("namespace: %s, unable to get client - gvr: %v, error: %v", namespace, gvr, err)
continue
}
// get the api resource
apiResource := metav1.APIResource{Name: gvr.Resource, Namespaced: true}
// TODO: temporary hack for https://github.com/kubernetes/kubernetes/issues/31798
if ignoredResources.Has(apiResource.Name) {
Logf("namespace: %s, resource: %s, ignored listing per whitelist", namespace, apiResource.Name)
continue
}
obj, err := dynamicClient.Resource(&apiResource, namespace).List(&v1.ListOptions{})
if err != nil {
// not all resources support list, so we ignore those
if apierrs.IsMethodNotSupported(err) || apierrs.IsNotFound(err) || apierrs.IsForbidden(err) {
continue
}
return false, err
}
unstructuredList, ok := obj.(*runtime.UnstructuredList)
if !ok {
return false, fmt.Errorf("namespace: %s, resource: %s, expected *runtime.UnstructuredList, got %#v", namespace, apiResource.Name, obj)
}
if len(unstructuredList.Items) > 0 {
Logf("namespace: %s, resource: %s, items remaining: %v", namespace, apiResource.Name, len(unstructuredList.Items))
contentRemaining = true
}
}
return contentRemaining, nil
}
func ContainerInitInvariant(older, newer runtime.Object) error {
oldPod := older.(*v1.Pod)
newPod := newer.(*v1.Pod)
if len(oldPod.Spec.InitContainers) == 0 {
return nil
}
if len(oldPod.Spec.InitContainers) != len(newPod.Spec.InitContainers) {
return fmt.Errorf("init container list changed")
}
if oldPod.UID != newPod.UID {
return fmt.Errorf("two different pods exist in the condition: %s vs %s", oldPod.UID, newPod.UID)
}
if err := initContainersInvariants(oldPod); err != nil {
return err
}
if err := initContainersInvariants(newPod); err != nil {
return err
}
oldInit, _, _ := podInitialized(oldPod)
newInit, _, _ := podInitialized(newPod)
if oldInit && !newInit {
// TODO: we may in the future enable resetting PodInitialized = false if the kubelet needs to restart it
// from scratch
return fmt.Errorf("pod cannot be initialized and then regress to not being initialized")
}
return nil
}
func podInitialized(pod *v1.Pod) (ok bool, failed bool, err error) {
allInit := true
initFailed := false
for _, s := range pod.Status.InitContainerStatuses {
switch {
case initFailed && s.State.Waiting == nil:
return allInit, initFailed, fmt.Errorf("container %s is after a failed container but isn't waiting", s.Name)
case allInit && s.State.Waiting == nil:
return allInit, initFailed, fmt.Errorf("container %s is after an initializing container but isn't waiting", s.Name)
case s.State.Terminated == nil:
allInit = false
case s.State.Terminated.ExitCode != 0:
allInit = false
initFailed = true
case !s.Ready:
return allInit, initFailed, fmt.Errorf("container %s initialized but isn't marked as ready", s.Name)
}
}
return allInit, initFailed, nil
}
func initContainersInvariants(pod *v1.Pod) error {
allInit, initFailed, err := podInitialized(pod)
if err != nil {
return err
}
if !allInit || initFailed {
for _, s := range pod.Status.ContainerStatuses {
if s.State.Waiting == nil || s.RestartCount != 0 {
return fmt.Errorf("container %s is not waiting but initialization not complete", s.Name)
}
if s.State.Waiting.Reason != "PodInitializing" {
return fmt.Errorf("container %s should have reason PodInitializing: %s", s.Name, s.State.Waiting.Reason)
}
}
}
_, c := v1.GetPodCondition(&pod.Status, v1.PodInitialized)
if c == nil {
return fmt.Errorf("pod does not have initialized condition")
}
if c.LastTransitionTime.IsZero() {
return fmt.Errorf("PodInitialized condition should always have a transition time")
}
switch {
case c.Status == v1.ConditionUnknown:
return fmt.Errorf("PodInitialized condition should never be Unknown")
case c.Status == v1.ConditionTrue && (initFailed || !allInit):
return fmt.Errorf("PodInitialized condition was True but all not all containers initialized")
case c.Status == v1.ConditionFalse && (!initFailed && allInit):
return fmt.Errorf("PodInitialized condition was False but all containers initialized")
}
return nil
}
type InvariantFunc func(older, newer runtime.Object) error
func CheckInvariants(events []watch.Event, fns ...InvariantFunc) error {
errs := sets.NewString()
for i := range events {
j := i + 1
if j >= len(events) {
continue
}
for _, fn := range fns {
if err := fn(events[i].Object, events[j].Object); err != nil {
errs.Insert(err.Error())
}
}
}
if errs.Len() > 0 {
return fmt.Errorf("invariants violated:\n* %s", strings.Join(errs.List(), "\n* "))
}
return nil
}
// Waits default amount of time (PodStartTimeout) for the specified pod to become running.
// Returns an error if timeout occurs first, or pod goes in to failed state.
func WaitForPodRunningInNamespace(c clientset.Interface, pod *v1.Pod) error {
// this short-cicuit is needed for cases when we pass a list of pods instead
// of newly created pod (e.g. VerifyPods) which means we are getting already
// running pod for which waiting does not make sense and will always fail
if pod.Status.Phase == v1.PodRunning {
return nil
}
return waitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, pod.ResourceVersion, PodStartTimeout)
}
// Waits default amount of time (PodStartTimeout) for the specified pod to become running.
// Returns an error if timeout occurs first, or pod goes in to failed state.
func WaitForPodNameRunningInNamespace(c clientset.Interface, podName, namespace string) error {
return waitTimeoutForPodRunningInNamespace(c, podName, namespace, "", PodStartTimeout)
}
// Waits an extended amount of time (slowPodStartTimeout) for the specified pod to become running.
// The resourceVersion is used when Watching object changes, it tells since when we care
// about changes to the pod. Returns an error if timeout occurs first, or pod goes in to failed state.
func waitForPodRunningInNamespaceSlow(c clientset.Interface, podName, namespace, resourceVersion string) error {
return waitTimeoutForPodRunningInNamespace(c, podName, namespace, resourceVersion, slowPodStartTimeout)
}
func waitTimeoutForPodRunningInNamespace(c clientset.Interface, podName, namespace, resourceVersion string, timeout time.Duration) error {
w, err := c.Core().Pods(namespace).Watch(v1.SingleObject(v1.ObjectMeta{Name: podName, ResourceVersion: resourceVersion}))
if err != nil {
return err
}
_, err = watch.Until(timeout, w, conditions.PodRunning)
return err
}
// Waits default amount of time (podNoLongerRunningTimeout) for the specified pod to stop running.
// Returns an error if timeout occurs first.
func WaitForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace, resourceVersion string) error {
return WaitTimeoutForPodNoLongerRunningInNamespace(c, podName, namespace, resourceVersion, podNoLongerRunningTimeout)
}
func WaitTimeoutForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace, resourceVersion string, timeout time.Duration) error {
w, err := c.Core().Pods(namespace).Watch(v1.SingleObject(v1.ObjectMeta{Name: podName, ResourceVersion: resourceVersion}))
if err != nil {
return err
}
_, err = watch.Until(timeout, w, conditions.PodCompleted)
return err
}
func waitTimeoutForPodReadyInNamespace(c clientset.Interface, podName, namespace, resourceVersion string, timeout time.Duration) error {
w, err := c.Core().Pods(namespace).Watch(v1.SingleObject(v1.ObjectMeta{Name: podName, ResourceVersion: resourceVersion}))
if err != nil {
return err
}
_, err = watch.Until(timeout, w, conditions.PodRunningAndReady)
return err
}
// WaitForPodNotPending returns an error if it took too long for the pod to go out of pending state.
// The resourceVersion is used when Watching object changes, it tells since when we care
// about changes to the pod.
func WaitForPodNotPending(c clientset.Interface, ns, podName, resourceVersion string) error {
w, err := c.Core().Pods(ns).Watch(v1.SingleObject(v1.ObjectMeta{Name: podName, ResourceVersion: resourceVersion}))
if err != nil {
return err
}
_, err = watch.Until(PodStartTimeout, w, conditions.PodNotPending)
return err
}
// waitForPodTerminatedInNamespace returns an error if it took too long for the pod
// to terminate or if the pod terminated with an unexpected reason.
func waitForPodTerminatedInNamespace(c clientset.Interface, podName, reason, namespace string) error {
return WaitForPodCondition(c, namespace, podName, "terminated due to deadline exceeded", PodStartTimeout, func(pod *v1.Pod) (bool, error) {
if pod.Status.Phase == v1.PodFailed {
if pod.Status.Reason == reason {
return true, nil
} else {
return true, fmt.Errorf("Expected pod %v in namespace %v to be terminated with reason %v, got reason: %v", podName, namespace, reason, pod.Status.Reason)
}
}
return false, nil
})
}
// waitForPodSuccessInNamespaceTimeout returns nil if the pod reached state success, or an error if it reached failure or ran too long.
func waitForPodSuccessInNamespaceTimeout(c clientset.Interface, podName string, namespace string, timeout time.Duration) error {
return WaitForPodCondition(c, namespace, podName, "success or failure", timeout, func(pod *v1.Pod) (bool, error) {
if pod.Spec.RestartPolicy == v1.RestartPolicyAlways {
return true, fmt.Errorf("pod %q will never terminate with a succeeded state since its restart policy is Always", podName)
}
switch pod.Status.Phase {
case v1.PodSucceeded:
By("Saw pod success")
return true, nil
case v1.PodFailed:
return true, fmt.Errorf("pod %q failed with status: %+v", podName, pod.Status)
default:
return false, nil
}
})
}
// WaitForPodSuccessInNamespace returns nil if the pod reached state success, or an error if it reached failure or until podStartupTimeout.
func WaitForPodSuccessInNamespace(c clientset.Interface, podName string, namespace string) error {
return waitForPodSuccessInNamespaceTimeout(c, podName, namespace, PodStartTimeout)
}
// WaitForPodSuccessInNamespaceSlow returns nil if the pod reached state success, or an error if it reached failure or until slowPodStartupTimeout.
func WaitForPodSuccessInNamespaceSlow(c clientset.Interface, podName string, namespace string) error {
return waitForPodSuccessInNamespaceTimeout(c, podName, namespace, slowPodStartTimeout)
}
// waitForRCPodOnNode returns the pod from the given replication controller (described by rcName) which is scheduled on the given node.
// In case of failure or too long waiting time, an error is returned.
func waitForRCPodOnNode(c clientset.Interface, ns, rcName, node string) (*v1.Pod, error) {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": rcName}))
var p *v1.Pod = nil
err := wait.PollImmediate(10*time.Second, 5*time.Minute, func() (bool, error) {
Logf("Waiting for pod %s to appear on node %s", rcName, node)
options := v1.ListOptions{LabelSelector: label.String()}
pods, err := c.Core().Pods(ns).List(options)
if err != nil {
return false, err
}
for _, pod := range pods.Items {
if pod.Spec.NodeName == node {
Logf("Pod %s found on node %s", pod.Name, node)
p = &pod
return true, nil
}
}
return false, nil
})
return p, err
}
// WaitForRCToStabilize waits till the RC has a matching generation/replica count between spec and status.
func WaitForRCToStabilize(c clientset.Interface, ns, name string, timeout time.Duration) error {
options := v1.ListOptions{FieldSelector: fields.Set{
"metadata.name": name,
"metadata.namespace": ns,
}.AsSelector().String()}
w, err := c.Core().ReplicationControllers(ns).Watch(options)
if err != nil {
return err
}
_, err = watch.Until(timeout, w, func(event watch.Event) (bool, error) {
switch event.Type {
case watch.Deleted:
return false, apierrs.NewNotFound(schema.GroupResource{Resource: "replicationcontrollers"}, "")
}
switch rc := event.Object.(type) {
case *v1.ReplicationController:
if rc.Name == name && rc.Namespace == ns &&
rc.Generation <= rc.Status.ObservedGeneration &&
*(rc.Spec.Replicas) == rc.Status.Replicas {
return true, nil
}
Logf("Waiting for rc %s to stabilize, generation %v observed generation %v spec.replicas %d status.replicas %d",
name, rc.Generation, rc.Status.ObservedGeneration, *(rc.Spec.Replicas), rc.Status.Replicas)
}
return false, nil
})
return err
}
func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labels.Selector, interval, timeout time.Duration) error {
return wait.PollImmediate(interval, timeout, func() (bool, error) {
Logf("Waiting for pod %s to disappear", podName)
options := v1.ListOptions{LabelSelector: label.String()}
pods, err := c.Core().Pods(ns).List(options)
if err != nil {
return false, err
}
found := false
for _, pod := range pods.Items {
if pod.Name == podName {
Logf("Pod %s still exists", podName)
found = true
break
}
}
if !found {
Logf("Pod %s no longer exists", podName)
return true, nil
}
return false, nil
})
}
// WaitForRCPodToDisappear returns nil if the pod from the given replication controller (described by rcName) no longer exists.
// In case of failure or too long waiting time, an error is returned.
func WaitForRCPodToDisappear(c clientset.Interface, ns, rcName, podName string) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": rcName}))
// NodeController evicts pod after 5 minutes, so we need timeout greater than that to observe effects.
// The grace period must be set to 0 on the pod for it to be deleted during the partition.
// Otherwise, it goes to the 'Terminating' state till the kubelet confirms deletion.
return WaitForPodToDisappear(c, ns, podName, label, 20*time.Second, 10*time.Minute)
}
// WaitForService waits until the service appears (exist == true), or disappears (exist == false)
func WaitForService(c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error {
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
_, err := c.Core().Services(namespace).Get(name)
switch {
case err == nil:
if !exist {
return false, nil
}
Logf("Service %s in namespace %s found.", name, namespace)
return true, nil
case apierrs.IsNotFound(err):
if exist {
return false, nil
}
Logf("Service %s in namespace %s disappeared.", name, namespace)
return true, nil
default:
Logf("Get service %s in namespace %s failed: %v", name, namespace, err)
return false, nil
}
})
if err != nil {
stateMsg := map[bool]string{true: "to appear", false: "to disappear"}
return fmt.Errorf("error waiting for service %s/%s %s: %v", namespace, name, stateMsg[exist], err)
}
return nil
}
//WaitForServiceEndpointsNum waits until the amount of endpoints that implement service to expectNum.
func WaitForServiceEndpointsNum(c clientset.Interface, namespace, serviceName string, expectNum int, interval, timeout time.Duration) error {
return wait.Poll(interval, timeout, func() (bool, error) {
Logf("Waiting for amount of service:%s endpoints to be %d", serviceName, expectNum)
list, err := c.Core().Endpoints(namespace).List(v1.ListOptions{})
if err != nil {
return false, err
}
for _, e := range list.Items {
if e.Name == serviceName && countEndpointsNum(&e) == expectNum {
return true, nil
}
}
return false, nil
})
}
func countEndpointsNum(e *v1.Endpoints) int {
num := 0
for _, sub := range e.Subsets {
num += len(sub.Addresses)
}
return num
}
// WaitForReplicationController waits until the RC appears (exist == true), or disappears (exist == false)
func WaitForReplicationController(c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error {
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
_, err := c.Core().ReplicationControllers(namespace).Get(name)
if err != nil {
Logf("Get ReplicationController %s in namespace %s failed (%v).", name, namespace, err)
return !exist, nil
} else {
Logf("ReplicationController %s in namespace %s found.", name, namespace)
return exist, nil
}
})
if err != nil {
stateMsg := map[bool]string{true: "to appear", false: "to disappear"}
return fmt.Errorf("error waiting for ReplicationController %s/%s %s: %v", namespace, name, stateMsg[exist], err)
}
return nil
}
func WaitForEndpoint(c clientset.Interface, ns, name string) error {
for t := time.Now(); time.Since(t) < EndpointRegisterTimeout; time.Sleep(Poll) {
endpoint, err := c.Core().Endpoints(ns).Get(name)
Expect(err).NotTo(HaveOccurred())
if len(endpoint.Subsets) == 0 || len(endpoint.Subsets[0].Addresses) == 0 {
Logf("Endpoint %s/%s is not ready yet", ns, name)
continue
} else {
return nil
}
}
return fmt.Errorf("Failed to get endpoints for %s/%s", ns, name)
}
// Context for checking pods responses by issuing GETs to them (via the API
// proxy) and verifying that they answer with ther own pod name.
type podProxyResponseChecker struct {
c clientset.Interface
ns string
label labels.Selector
controllerName string
respondName bool // Whether the pod should respond with its own name.
pods *v1.PodList
}
func PodProxyResponseChecker(c clientset.Interface, ns string, label labels.Selector, controllerName string, respondName bool, pods *v1.PodList) podProxyResponseChecker {
return podProxyResponseChecker{c, ns, label, controllerName, respondName, pods}
}
// CheckAllResponses issues GETs to all pods in the context and verify they
// reply with their own pod name.
func (r podProxyResponseChecker) CheckAllResponses() (done bool, err error) {
successes := 0
options := v1.ListOptions{LabelSelector: r.label.String()}
currentPods, err := r.c.Core().Pods(r.ns).List(options)
Expect(err).NotTo(HaveOccurred())
for i, pod := range r.pods.Items {
// Check that the replica list remains unchanged, otherwise we have problems.
if !isElementOf(pod.UID, currentPods) {
return false, fmt.Errorf("pod with UID %s is no longer a member of the replica set. Must have been restarted for some reason. Current replica set: %v", pod.UID, currentPods)
}
subResourceProxyAvailable, err := ServerVersionGTE(SubResourcePodProxyVersion, r.c.Discovery())
if err != nil {
return false, err
}
var body []byte
if subResourceProxyAvailable {
body, err = r.c.Core().RESTClient().Get().
Namespace(r.ns).
Resource("pods").
SubResource("proxy").
Name(string(pod.Name)).
Do().
Raw()
} else {
body, err = r.c.Core().RESTClient().Get().
Prefix("proxy").
Namespace(r.ns).
Resource("pods").
Name(string(pod.Name)).
Do().
Raw()
}
if err != nil {
Logf("Controller %s: Failed to GET from replica %d [%s]: %v\npod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
continue
}
// The response checker expects the pod's name unless !respondName, in
// which case it just checks for a non-empty response.
got := string(body)
what := ""
if r.respondName {
what = "expected"
want := pod.Name
if got != want {
Logf("Controller %s: Replica %d [%s] expected response %q but got %q",
r.controllerName, i+1, pod.Name, want, got)
continue
}
} else {
what = "non-empty"
if len(got) == 0 {
Logf("Controller %s: Replica %d [%s] expected non-empty response",
r.controllerName, i+1, pod.Name)
continue
}
}
successes++
Logf("Controller %s: Got %s result from replica %d [%s]: %q, %d of %d required successes so far",
r.controllerName, what, i+1, pod.Name, got, successes, len(r.pods.Items))
}
if successes < len(r.pods.Items) {
return false, nil
}
return true, nil
}
// ServerVersionGTE returns true if v is greater than or equal to the server
// version.
//
// TODO(18726): This should be incorporated into client.VersionInterface.
func ServerVersionGTE(v semver.Version, c discovery.ServerVersionInterface) (bool, error) {
serverVersion, err := c.ServerVersion()
if err != nil {
return false, fmt.Errorf("Unable to get server version: %v", err)
}
sv, err := version.Parse(serverVersion.GitVersion)
if err != nil {
return false, fmt.Errorf("Unable to parse server version %q: %v", serverVersion.GitVersion, err)
}
return sv.GTE(v), nil
}
func SkipUnlessKubectlVersionGTE(v semver.Version) {
gte, err := KubectlVersionGTE(v)
if err != nil {
Failf("Failed to get kubectl version: %v", err)
}
if !gte {
Skipf("Not supported for kubectl versions before %q", v)
}
}
// KubectlVersionGTE returns true if the kubectl version is greater than or
// equal to v.
func KubectlVersionGTE(v semver.Version) (bool, error) {
kv, err := KubectlVersion()
if err != nil {
return false, err
}
return kv.GTE(v), nil
}
// KubectlVersion gets the version of kubectl that's currently being used (see
// --kubectl-path in e2e.go to use an alternate kubectl).
func KubectlVersion() (semver.Version, error) {
output := RunKubectlOrDie("version", "--client")
matches := gitVersionRegexp.FindStringSubmatch(output)
if len(matches) != 2 {
return semver.Version{}, fmt.Errorf("Could not find kubectl version in output %v", output)
}
// Don't use the full match, as it contains "GitVersion:\"" and a
// trailing "\"". Just use the submatch.
return version.Parse(matches[1])
}
func PodsResponding(c clientset.Interface, ns, name string, wantName bool, pods *v1.PodList) error {
By("trying to dial each unique pod")
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
return wait.PollImmediate(Poll, podRespondingTimeout, PodProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses)
}
func PodsCreated(c clientset.Interface, ns, name string, replicas int32) (*v1.PodList, error) {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
return PodsCreatedByLabel(c, ns, name, replicas, label)
}
func PodsCreatedByLabel(c clientset.Interface, ns, name string, replicas int32, label labels.Selector) (*v1.PodList, error) {
timeout := 2 * time.Minute
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
options := v1.ListOptions{LabelSelector: label.String()}
// List the pods, making sure we observe all the replicas.
pods, err := c.Core().Pods(ns).List(options)
if err != nil {
return nil, err
}
created := []v1.Pod{}
for _, pod := range pods.Items {
if pod.DeletionTimestamp != nil {
continue
}
created = append(created, pod)
}
Logf("Pod name %s: Found %d pods out of %d", name, len(created), replicas)
if int32(len(created)) == replicas {
pods.Items = created
return pods, nil
}
}
return nil, fmt.Errorf("Pod name %s: Gave up waiting %v for %d pods to come up", name, timeout, replicas)
}
func podsRunning(c clientset.Interface, pods *v1.PodList) []error {
// Wait for the pods to enter the running state. Waiting loops until the pods
// are running so non-running pods cause a timeout for this test.
By("ensuring each pod is running")
e := []error{}
error_chan := make(chan error)
for _, pod := range pods.Items {
go func(p v1.Pod) {
error_chan <- WaitForPodRunningInNamespace(c, &p)
}(pod)
}
for range pods.Items {
err := <-error_chan
if err != nil {
e = append(e, err)
}
}
return e
}
func VerifyPods(c clientset.Interface, ns, name string, wantName bool, replicas int32) error {
pods, err := PodsCreated(c, ns, name, replicas)
if err != nil {
return err
}
e := podsRunning(c, pods)
if len(e) > 0 {
return fmt.Errorf("failed to wait for pods running: %v", e)
}
err = PodsResponding(c, ns, name, wantName, pods)
if err != nil {
return fmt.Errorf("failed to wait for pods responding: %v", err)
}
return nil
}
func ServiceResponding(c clientset.Interface, ns, name string) error {
By(fmt.Sprintf("trying to dial the service %s.%s via the proxy", ns, name))
return wait.PollImmediate(Poll, ServiceRespondingTimeout, func() (done bool, err error) {
proxyRequest, errProxy := GetServicesProxyRequest(c, c.Core().RESTClient().Get())
if errProxy != nil {
Logf("Failed to get services proxy request: %v:", errProxy)
return false, nil
}
body, err := proxyRequest.Namespace(ns).
Name(name).
Do().
Raw()
if err != nil {
Logf("Failed to GET from service %s: %v:", name, err)
return false, nil
}
got := string(body)
if len(got) == 0 {
Logf("Service %s: expected non-empty response", name)
return false, err // stop polling
}
Logf("Service %s: found nonempty answer: %s", name, got)
return true, nil
})
}
func restclientConfig(kubeContext string) (*clientcmdapi.Config, error) {
Logf(">>> kubeConfig: %s\n", TestContext.KubeConfig)
if TestContext.KubeConfig == "" {
return nil, fmt.Errorf("KubeConfig must be specified to load client config")
}
c, err := clientcmd.LoadFromFile(TestContext.KubeConfig)
if err != nil {
return nil, fmt.Errorf("error loading KubeConfig: %v", err.Error())
}
if kubeContext != "" {
Logf(">>> kubeContext: %s\n", kubeContext)
c.CurrentContext = kubeContext
}
return c, nil
}
type ClientConfigGetter func() (*restclient.Config, error)
func LoadConfig() (*restclient.Config, error) {
if TestContext.NodeE2E {
// This is a node e2e test, apply the node e2e configuration
return &restclient.Config{Host: TestContext.Host}, nil
}
c, err := restclientConfig(TestContext.KubeContext)
if err != nil {
return nil, err
}
return clientcmd.NewDefaultClientConfig(*c, &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: TestContext.Host}}).ClientConfig()
}
func LoadFederatedConfig(overrides *clientcmd.ConfigOverrides) (*restclient.Config, error) {
c, err := restclientConfig(federatedKubeContext)
if err != nil {
return nil, fmt.Errorf("error creating federation client config: %v", err.Error())
}
cfg, err := clientcmd.NewDefaultClientConfig(*c, overrides).ClientConfig()
if cfg != nil {
//TODO(colhom): this is only here because https://github.com/kubernetes/kubernetes/issues/25422
cfg.NegotiatedSerializer = api.Codecs
}
if err != nil {
return cfg, fmt.Errorf("error creating federation client config: %v", err.Error())
}
return cfg, nil
}
func LoadFederationClientset_1_5() (*federation_release_1_5.Clientset, error) {
config, err := LoadFederatedConfig(&clientcmd.ConfigOverrides{})
if err != nil {
return nil, err
}
c, err := federation_release_1_5.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("error creating federation clientset: %v", err.Error())
}
return c, nil
}
func LoadInternalClientset() (*internalclientset.Clientset, error) {
config, err := LoadConfig()
if err != nil {
return nil, fmt.Errorf("error creating client: %v", err.Error())
}
return internalclientset.NewForConfig(config)
}
func LoadClientset() (*clientset.Clientset, error) {
config, err := LoadConfig()
if err != nil {
return nil, fmt.Errorf("error creating client: %v", err.Error())
}
return clientset.NewForConfig(config)
}
// randomSuffix provides a random string to append to pods,services,rcs.
// TODO: Allow service names to have the same form as names
// for pods and replication controllers so we don't
// need to use such a function and can instead
// use the UUID utility function.
func randomSuffix() string {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
return strconv.Itoa(r.Int() % 10000)
}
func ExpectNoError(err error, explain ...interface{}) {
if err != nil {
Logf("Unexpected error occurred: %v", err)
}
ExpectWithOffset(1, err).NotTo(HaveOccurred(), explain...)
}
func ExpectNoErrorWithRetries(fn func() error, maxRetries int, explain ...interface{}) {
var err error
for i := 0; i < maxRetries; i++ {
err = fn()
if err == nil {
return
}
Logf("(Attempt %d of %d) Unexpected error occurred: %v", i+1, maxRetries, err)
}
ExpectWithOffset(1, err).NotTo(HaveOccurred(), explain...)
}
// Stops everything from filePath from namespace ns and checks if everything matching selectors from the given namespace is correctly stopped.
func Cleanup(filePath, ns string, selectors ...string) {
By("using delete to clean up resources")
var nsArg string
if ns != "" {
nsArg = fmt.Sprintf("--namespace=%s", ns)
}
RunKubectlOrDie("delete", "--grace-period=0", "-f", filePath, nsArg)
AssertCleanup(ns, selectors...)
}
// Asserts that cleanup of a namespace wrt selectors occurred.
func AssertCleanup(ns string, selectors ...string) {
var nsArg string
if ns != "" {
nsArg = fmt.Sprintf("--namespace=%s", ns)
}
for _, selector := range selectors {
resources := RunKubectlOrDie("get", "rc,svc", "-l", selector, "--no-headers", nsArg)
if resources != "" {
Failf("Resources left running after stop:\n%s", resources)
}
pods := RunKubectlOrDie("get", "pods", "-l", selector, nsArg, "-o", "go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ \"\\n\" }}{{ end }}{{ end }}")
if pods != "" {
Failf("Pods left unterminated after stop:\n%s", pods)
}
}
}
// validatorFn is the function which is individual tests will implement.
// we may want it to return more than just an error, at some point.
type validatorFn func(c clientset.Interface, podID string) error
// ValidateController is a generic mechanism for testing RC's that are running.
// It takes a container name, a test name, and a validator function which is plugged in by a specific test.
// "containername": this is grepped for.
// "containerImage" : this is the name of the image we expect to be launched. Not to confuse w/ images (kitten.jpg) which are validated.
// "testname": which gets bubbled up to the logging/failure messages if errors happen.
// "validator" function: This function is given a podID and a client, and it can do some specific validations that way.
func ValidateController(c clientset.Interface, containerImage string, replicas int, containername string, testname string, validator validatorFn, ns string) {
getPodsTemplate := "--template={{range.items}}{{.metadata.name}} {{end}}"
// NB: kubectl adds the "exists" function to the standard template functions.
// This lets us check to see if the "running" entry exists for each of the containers
// we care about. Exists will never return an error and it's safe to check a chain of
// things, any one of which may not exist. In the below template, all of info,
// containername, and running might be nil, so the normal index function isn't very
// helpful.
// This template is unit-tested in kubectl, so if you change it, update the unit test.
// You can read about the syntax here: http://golang.org/pkg/text/template/.
getContainerStateTemplate := fmt.Sprintf(`--template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "%s") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}`, containername)
getImageTemplate := fmt.Sprintf(`--template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if eq .name "%s"}}{{.image}}{{end}}{{end}}{{end}}`, containername)
By(fmt.Sprintf("waiting for all containers in %s pods to come up.", testname)) //testname should be selector
waitLoop:
for start := time.Now(); time.Since(start) < PodStartTimeout; time.Sleep(5 * time.Second) {
getPodsOutput := RunKubectlOrDie("get", "pods", "-o", "template", getPodsTemplate, "-l", testname, fmt.Sprintf("--namespace=%v", ns))
pods := strings.Fields(getPodsOutput)
if numPods := len(pods); numPods != replicas {
By(fmt.Sprintf("Replicas for %s: expected=%d actual=%d", testname, replicas, numPods))
continue
}
var runningPods []string
for _, podID := range pods {
running := RunKubectlOrDie("get", "pods", podID, "-o", "template", getContainerStateTemplate, fmt.Sprintf("--namespace=%v", ns))
if running != "true" {
Logf("%s is created but not running", podID)
continue waitLoop
}
currentImage := RunKubectlOrDie("get", "pods", podID, "-o", "template", getImageTemplate, fmt.Sprintf("--namespace=%v", ns))
if currentImage != containerImage {
Logf("%s is created but running wrong image; expected: %s, actual: %s", podID, containerImage, currentImage)
continue waitLoop
}
// Call the generic validator function here.
// This might validate for example, that (1) getting a url works and (2) url is serving correct content.
if err := validator(c, podID); err != nil {
Logf("%s is running right image but validator function failed: %v", podID, err)
continue waitLoop
}
Logf("%s is verified up and running", podID)
runningPods = append(runningPods, podID)
}
// If we reach here, then all our checks passed.
if len(runningPods) == replicas {
return
}
}
// Reaching here means that one of more checks failed multiple times. Assuming its not a race condition, something is broken.
Failf("Timed out after %v seconds waiting for %s pods to reach valid state", PodStartTimeout.Seconds(), testname)
}
// KubectlCmd runs the kubectl executable through the wrapper script.
func KubectlCmd(args ...string) *exec.Cmd {
defaultArgs := []string{}
// Reference a --server option so tests can run anywhere.
if TestContext.Host != "" {
defaultArgs = append(defaultArgs, "--"+clientcmd.FlagAPIServer+"="+TestContext.Host)
}
if TestContext.KubeConfig != "" {
defaultArgs = append(defaultArgs, "--"+clientcmd.RecommendedConfigPathFlag+"="+TestContext.KubeConfig)
// Reference the KubeContext
if TestContext.KubeContext != "" {
defaultArgs = append(defaultArgs, "--"+clientcmd.FlagContext+"="+TestContext.KubeContext)
}
} else {
if TestContext.CertDir != "" {
defaultArgs = append(defaultArgs,
fmt.Sprintf("--certificate-authority=%s", filepath.Join(TestContext.CertDir, "ca.crt")),
fmt.Sprintf("--client-certificate=%s", filepath.Join(TestContext.CertDir, "kubecfg.crt")),
fmt.Sprintf("--client-key=%s", filepath.Join(TestContext.CertDir, "kubecfg.key")))
}
}
kubectlArgs := append(defaultArgs, args...)
//We allow users to specify path to kubectl, so you can test either "kubectl" or "cluster/kubectl.sh"
//and so on.
cmd := exec.Command(TestContext.KubectlPath, kubectlArgs...)
//caller will invoke this and wait on it.
return cmd
}
// kubectlBuilder is used to build, customize and execute a kubectl Command.
// Add more functions to customize the builder as needed.
type kubectlBuilder struct {
cmd *exec.Cmd
timeout <-chan time.Time
}
func NewKubectlCommand(args ...string) *kubectlBuilder {
b := new(kubectlBuilder)
b.cmd = KubectlCmd(args...)
return b
}
func (b *kubectlBuilder) WithEnv(env []string) *kubectlBuilder {
b.cmd.Env = env
return b
}
func (b *kubectlBuilder) WithTimeout(t <-chan time.Time) *kubectlBuilder {
b.timeout = t
return b
}
func (b kubectlBuilder) WithStdinData(data string) *kubectlBuilder {
b.cmd.Stdin = strings.NewReader(data)
return &b
}
func (b kubectlBuilder) WithStdinReader(reader io.Reader) *kubectlBuilder {
b.cmd.Stdin = reader
return &b
}
func (b kubectlBuilder) ExecOrDie() string {
str, err := b.Exec()
Logf("stdout: %q", str)
// In case of i/o timeout error, try talking to the apiserver again after 2s before dying.
// Note that we're still dying after retrying so that we can get visibility to triage it further.
if isTimeout(err) {
Logf("Hit i/o timeout error, talking to the server 2s later to see if it's temporary.")
time.Sleep(2 * time.Second)
retryStr, retryErr := RunKubectl("version")
Logf("stdout: %q", retryStr)
Logf("err: %v", retryErr)
}
Expect(err).NotTo(HaveOccurred())
return str
}
func isTimeout(err error) bool {
switch err := err.(type) {
case net.Error:
if err.Timeout() {
return true
}
case *url.Error:
if err, ok := err.Err.(net.Error); ok && err.Timeout() {
return true
}
}
return false
}
func (b kubectlBuilder) Exec() (string, error) {
var stdout, stderr bytes.Buffer
cmd := b.cmd
cmd.Stdout, cmd.Stderr = &stdout, &stderr
Logf("Running '%s %s'", cmd.Path, strings.Join(cmd.Args[1:], " ")) // skip arg[0] as it is printed separately
if err := cmd.Start(); err != nil {
return "", fmt.Errorf("error starting %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v\n", cmd, cmd.Stdout, cmd.Stderr, err)
}
errCh := make(chan error, 1)
go func() {
errCh <- cmd.Wait()
}()
select {
case err := <-errCh:
if err != nil {
var rc int = 127
if ee, ok := err.(*exec.ExitError); ok {
Logf("rc: %d", rc)
rc = int(ee.Sys().(syscall.WaitStatus).ExitStatus())
}
return "", uexec.CodeExitError{
Err: fmt.Errorf("error running %v:\nCommand stdout:\n%v\nstderr:\n%v\nerror:\n%v\n", cmd, cmd.Stdout, cmd.Stderr, err),
Code: rc,
}
}
case <-b.timeout:
b.cmd.Process.Kill()
return "", fmt.Errorf("timed out waiting for command %v:\nCommand stdout:\n%v\nstderr:\n%v\n", cmd, cmd.Stdout, cmd.Stderr)
}
Logf("stderr: %q", stderr.String())
return stdout.String(), nil
}
// RunKubectlOrDie is a convenience wrapper over kubectlBuilder
func RunKubectlOrDie(args ...string) string {
return NewKubectlCommand(args...).ExecOrDie()
}
// RunKubectl is a convenience wrapper over kubectlBuilder
func RunKubectl(args ...string) (string, error) {
return NewKubectlCommand(args...).Exec()
}
// RunKubectlOrDieInput is a convenience wrapper over kubectlBuilder that takes input to stdin
func RunKubectlOrDieInput(data string, args ...string) string {
return NewKubectlCommand(args...).WithStdinData(data).ExecOrDie()
}
func StartCmdAndStreamOutput(cmd *exec.Cmd) (stdout, stderr io.ReadCloser, err error) {
stdout, err = cmd.StdoutPipe()
if err != nil {
return
}
stderr, err = cmd.StderrPipe()
if err != nil {
return
}
Logf("Asynchronously running '%s %s'", cmd.Path, strings.Join(cmd.Args, " "))
err = cmd.Start()
return
}
// Rough equivalent of ctrl+c for cleaning up processes. Intended to be run in defer.
func TryKill(cmd *exec.Cmd) {
if err := cmd.Process.Kill(); err != nil {
Logf("ERROR failed to kill command %v! The process may leak", cmd)
}
}
// testContainerOutputMatcher runs the given pod in the given namespace and waits
// for all of the containers in the podSpec to move into the 'Success' status, and tests
// the specified container log against the given expected output using the given matcher.
func (f *Framework) testContainerOutputMatcher(scenarioName string,
pod *v1.Pod,
containerIndex int,
expectedOutput []string,
matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) {
By(fmt.Sprintf("Creating a pod to test %v", scenarioName))
if containerIndex < 0 || containerIndex >= len(pod.Spec.Containers) {
Failf("Invalid container index: %d", containerIndex)
}
ExpectNoError(f.MatchContainerOutput(pod, pod.Spec.Containers[containerIndex].Name, expectedOutput, matcher))
}
// MatchContainerOutput creates a pod and waits for all it's containers to exit with success.
// It then tests that the matcher with each expectedOutput matches the output of the specified container.
func (f *Framework) MatchContainerOutput(
pod *v1.Pod,
containerName string,
expectedOutput []string,
matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) error {
podClient := f.PodClient()
ns := f.Namespace.Name
createdPod := podClient.Create(pod)
defer func() {
By("delete the pod")
podClient.DeleteSync(createdPod.Name, &v1.DeleteOptions{}, podNoLongerRunningTimeout)
}()
// Wait for client pod to complete.
if err := WaitForPodSuccessInNamespace(f.ClientSet, createdPod.Name, ns); err != nil {
return fmt.Errorf("expected pod %q success: %v", pod.Name, err)
}
// Grab its logs. Get host first.
podStatus, err := podClient.Get(createdPod.Name)
if err != nil {
return fmt.Errorf("failed to get pod status: %v", err)
}
Logf("Trying to get logs from node %s pod %s container %s: %v",
podStatus.Spec.NodeName, podStatus.Name, containerName, err)
// Sometimes the actual containers take a second to get started, try to get logs for 60s
logs, err := GetPodLogs(f.ClientSet, ns, podStatus.Name, containerName)
if err != nil {
Logf("Failed to get logs from node %q pod %q container %q. %v",
podStatus.Spec.NodeName, podStatus.Name, containerName, err)
return fmt.Errorf("failed to get logs from %s for %s: %v", podStatus.Name, containerName, err)
}
for _, expected := range expectedOutput {
m := matcher(expected)
matches, err := m.Match(logs)
if err != nil {
return fmt.Errorf("expected %q in container output: %v", expected, err)
} else if !matches {
return fmt.Errorf("expected %q in container output: %s", expected, m.FailureMessage(logs))
}
}
return nil
}
func RunDeployment(config testutils.DeploymentConfig) error {
By(fmt.Sprintf("creating deployment %s in namespace %s", config.Name, config.Namespace))
config.NodeDumpFunc = DumpNodeDebugInfo
config.ContainerDumpFunc = LogFailedContainers
return testutils.RunDeployment(config)
}
func RunReplicaSet(config testutils.ReplicaSetConfig) error {
By(fmt.Sprintf("creating replicaset %s in namespace %s", config.Name, config.Namespace))
config.NodeDumpFunc = DumpNodeDebugInfo
config.ContainerDumpFunc = LogFailedContainers
return testutils.RunReplicaSet(config)
}
func RunRC(config testutils.RCConfig) error {
By(fmt.Sprintf("creating replication controller %s in namespace %s", config.Name, config.Namespace))
config.NodeDumpFunc = DumpNodeDebugInfo
config.ContainerDumpFunc = LogFailedContainers
return testutils.RunRC(config)
}
type EventsLister func(opts v1.ListOptions, ns string) (*v1.EventList, error)
func DumpEventsInNamespace(eventsLister EventsLister, namespace string) {
By(fmt.Sprintf("Collecting events from namespace %q.", namespace))
events, err := eventsLister(v1.ListOptions{}, namespace)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Found %d events.", len(events.Items)))
// Sort events by their first timestamp
sortedEvents := events.Items
if len(sortedEvents) > 1 {
sort.Sort(byFirstTimestamp(sortedEvents))
}
for _, e := range sortedEvents {
Logf("At %v - event for %v: %v %v: %v", e.FirstTimestamp, e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
}
// Note that we don't wait for any Cleanup to propagate, which means
// that if you delete a bunch of pods right before ending your test,
// you may or may not see the killing/deletion/Cleanup events.
}
func DumpAllNamespaceInfo(c clientset.Interface, namespace string) {
DumpEventsInNamespace(func(opts v1.ListOptions, ns string) (*v1.EventList, error) {
return c.Core().Events(ns).List(opts)
}, namespace)
// If cluster is large, then the following logs are basically useless, because:
// 1. it takes tens of minutes or hours to grab all of them
// 2. there are so many of them that working with them are mostly impossible
// So we dump them only if the cluster is relatively small.
maxNodesForDump := 20
if nodes, err := c.Core().Nodes().List(v1.ListOptions{}); err == nil {
if len(nodes.Items) <= maxNodesForDump {
dumpAllPodInfo(c)
dumpAllNodeInfo(c)
} else {
Logf("skipping dumping cluster info - cluster too large")
}
} else {
Logf("unable to fetch node list: %v", err)
}
}
// byFirstTimestamp sorts a slice of events by first timestamp, using their involvedObject's name as a tie breaker.
type byFirstTimestamp []v1.Event
func (o byFirstTimestamp) Len() int { return len(o) }
func (o byFirstTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
func (o byFirstTimestamp) Less(i, j int) bool {
if o[i].FirstTimestamp.Equal(o[j].FirstTimestamp) {
return o[i].InvolvedObject.Name < o[j].InvolvedObject.Name
}
return o[i].FirstTimestamp.Before(o[j].FirstTimestamp)
}
func dumpAllPodInfo(c clientset.Interface) {
pods, err := c.Core().Pods("").List(v1.ListOptions{})
if err != nil {
Logf("unable to fetch pod debug info: %v", err)
}
logPodStates(pods.Items)
}
func dumpAllNodeInfo(c clientset.Interface) {
// It should be OK to list unschedulable Nodes here.
nodes, err := c.Core().Nodes().List(v1.ListOptions{})
if err != nil {
Logf("unable to fetch node list: %v", err)
return
}
names := make([]string, len(nodes.Items))
for ix := range nodes.Items {
names[ix] = nodes.Items[ix].Name
}
DumpNodeDebugInfo(c, names, Logf)
}
func DumpNodeDebugInfo(c clientset.Interface, nodeNames []string, logFunc func(fmt string, args ...interface{})) {
for _, n := range nodeNames {
logFunc("\nLogging node info for node %v", n)
node, err := c.Core().Nodes().Get(n)
if err != nil {
logFunc("Error getting node info %v", err)
}
logFunc("Node Info: %v", node)
logFunc("\nLogging kubelet events for node %v", n)
for _, e := range getNodeEvents(c, n) {
logFunc("source %v type %v message %v reason %v first ts %v last ts %v, involved obj %+v",
e.Source, e.Type, e.Message, e.Reason, e.FirstTimestamp, e.LastTimestamp, e.InvolvedObject)
}
logFunc("\nLogging pods the kubelet thinks is on node %v", n)
podList, err := GetKubeletPods(c, n)
if err != nil {
logFunc("Unable to retrieve kubelet pods for node %v", n)
continue
}
for _, p := range podList.Items {
logFunc("%v started at %v (%d+%d container statuses recorded)", p.Name, p.Status.StartTime, len(p.Status.InitContainerStatuses), len(p.Status.ContainerStatuses))
for _, c := range p.Status.InitContainerStatuses {
logFunc("\tInit container %v ready: %v, restart count %v",
c.Name, c.Ready, c.RestartCount)
}
for _, c := range p.Status.ContainerStatuses {
logFunc("\tContainer %v ready: %v, restart count %v",
c.Name, c.Ready, c.RestartCount)
}
}
HighLatencyKubeletOperations(c, 10*time.Second, n, logFunc)
// TODO: Log node resource info
}
}
// logNodeEvents logs kubelet events from the given node. This includes kubelet
// restart and node unhealthy events. Note that listing events like this will mess
// with latency metrics, beware of calling it during a test.
func getNodeEvents(c clientset.Interface, nodeName string) []v1.Event {
selector := fields.Set{
"involvedObject.kind": "Node",
"involvedObject.name": nodeName,
"involvedObject.namespace": v1.NamespaceAll,
"source": "kubelet",
}.AsSelector().String()
options := v1.ListOptions{FieldSelector: selector}
events, err := c.Core().Events(api.NamespaceSystem).List(options)
if err != nil {
Logf("Unexpected error retrieving node events %v", err)
return []v1.Event{}
}
return events.Items
}
// waitListSchedulableNodesOrDie is a wrapper around listing nodes supporting retries.
func waitListSchedulableNodesOrDie(c clientset.Interface) *v1.NodeList {
var nodes *v1.NodeList
var err error
if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
nodes, err = c.Core().Nodes().List(v1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
return err == nil, nil
}) != nil {
ExpectNoError(err, "Timed out while listing nodes for e2e cluster.")
}
return nodes
}
// Node is schedulable if:
// 1) doesn't have "unschedulable" field set
// 2) it's Ready condition is set to true
// 3) doesn't have NetworkUnavailable condition set to true
func isNodeSchedulable(node *v1.Node) bool {
nodeReady := IsNodeConditionSetAsExpected(node, v1.NodeReady, true)
networkReady := IsNodeConditionUnset(node, v1.NodeNetworkUnavailable) ||
IsNodeConditionSetAsExpectedSilent(node, v1.NodeNetworkUnavailable, false)
return !node.Spec.Unschedulable && nodeReady && networkReady
}
// Test whether a fake pod can be scheduled on "node", given its current taints.
func isNodeUntainted(node *v1.Node) bool {
fakePod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(),
},
ObjectMeta: v1.ObjectMeta{
Name: "fake-not-scheduled",
Namespace: "fake-not-scheduled",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "fake-not-scheduled",
Image: "fake-not-scheduled",
},
},
},
}
nodeInfo := schedulercache.NewNodeInfo()
nodeInfo.SetNode(node)
fit, _, err := predicates.PodToleratesNodeTaints(fakePod, nil, nodeInfo)
if err != nil {
Failf("Can't test predicates for node %s: %v", node.Name, err)
return false
}
return fit
}
// GetReadySchedulableNodesOrDie addresses the common use case of getting nodes you can do work on.
// 1) Needs to be schedulable.
// 2) Needs to be ready.
// If EITHER 1 or 2 is not true, most tests will want to ignore the node entirely.
func GetReadySchedulableNodesOrDie(c clientset.Interface) (nodes *v1.NodeList) {
nodes = waitListSchedulableNodesOrDie(c)
// previous tests may have cause failures of some nodes. Let's skip
// 'Not Ready' nodes, just in case (there is no need to fail the test).
FilterNodes(nodes, func(node v1.Node) bool {
return isNodeSchedulable(&node) && isNodeUntainted(&node)
})
return nodes
}
func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) error {
Logf("Waiting up to %v for all (but %d) nodes to be schedulable", timeout, TestContext.AllowedNotReadyNodes)
var notSchedulable []*v1.Node
return wait.PollImmediate(30*time.Second, timeout, func() (bool, error) {
notSchedulable = nil
opts := v1.ListOptions{
ResourceVersion: "0",
FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector().String(),
}
nodes, err := c.Core().Nodes().List(opts)
if err != nil {
Logf("Unexpected error listing nodes: %v", err)
// Ignore the error here - it will be retried.
return false, nil
}
for i := range nodes.Items {
node := &nodes.Items[i]
if !isNodeSchedulable(node) {
notSchedulable = append(notSchedulable, node)
}
}
// Framework allows for <TestContext.AllowedNotReadyNodes> nodes to be non-ready,
// to make it possible e.g. for incorrect deployment of some small percentage
// of nodes (which we allow in cluster validation). Some nodes that are not
// provisioned correctly at startup will never become ready (e.g. when something
// won't install correctly), so we can't expect them to be ready at any point.
//
// However, we only allow non-ready nodes with some specific reasons.
if len(notSchedulable) > 0 {
Logf("Unschedulable nodes:")
for i := range notSchedulable {
Logf("-> %s Ready=%t Network=%t",
notSchedulable[i].Name,
IsNodeConditionSetAsExpected(notSchedulable[i], v1.NodeReady, true),
IsNodeConditionSetAsExpected(notSchedulable[i], v1.NodeNetworkUnavailable, false))
}
}
if len(notSchedulable) > TestContext.AllowedNotReadyNodes {
return false, nil
}
return allowedNotReadyReasons(notSchedulable), nil
})
}
func AddOrUpdateLabelOnNode(c clientset.Interface, nodeName string, labelKey, labelValue string) {
ExpectNoError(testutils.AddLabelsToNode(c, nodeName, map[string]string{labelKey: labelValue}))
}
func ExpectNodeHasLabel(c clientset.Interface, nodeName string, labelKey string, labelValue string) {
By("verifying the node has the label " + labelKey + " " + labelValue)
node, err := c.Core().Nodes().Get(nodeName)
ExpectNoError(err)
Expect(node.Labels[labelKey]).To(Equal(labelValue))
}
// RemoveLabelOffNode is for cleaning up labels temporarily added to node,
// won't fail if target label doesn't exist or has been removed.
func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKey string) {
By("removing the label " + labelKey + " off the node " + nodeName)
ExpectNoError(testutils.RemoveLabelOffNode(c, nodeName, []string{labelKey}))
By("verifying the node doesn't have the label " + labelKey)
ExpectNoError(testutils.VerifyLabelsRemoved(c, nodeName, []string{labelKey}))
}
func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taint v1.Taint) {
for attempt := 0; attempt < UpdateRetries; attempt++ {
node, err := c.Core().Nodes().Get(nodeName)
ExpectNoError(err)
nodeTaints, err := v1.GetTaintsFromNodeAnnotations(node.Annotations)
ExpectNoError(err)
var newTaints []v1.Taint
updated := false
for _, existingTaint := range nodeTaints {
if taint.MatchTaint(existingTaint) {
newTaints = append(newTaints, taint)
updated = true
continue
}
newTaints = append(newTaints, existingTaint)
}
if !updated {
newTaints = append(newTaints, taint)
}
taintsData, err := json.Marshal(newTaints)
ExpectNoError(err)
if node.Annotations == nil {
node.Annotations = make(map[string]string)
}
node.Annotations[v1.TaintsAnnotationKey] = string(taintsData)
_, err = c.Core().Nodes().Update(node)
if err != nil {
if !apierrs.IsConflict(err) {
ExpectNoError(err)
} else {
Logf("Conflict when trying to add/update taint %v to %v", taint, nodeName)
}
} else {
break
}
time.Sleep(100 * time.Millisecond)
}
}
func taintExists(taints []v1.Taint, taintToFind v1.Taint) bool {
for _, taint := range taints {
if taint.MatchTaint(taintToFind) {
return true
}
}
return false
}
func ExpectNodeHasTaint(c clientset.Interface, nodeName string, taint v1.Taint) {
By("verifying the node has the taint " + taint.ToString())
node, err := c.Core().Nodes().Get(nodeName)
ExpectNoError(err)
nodeTaints, err := v1.GetTaintsFromNodeAnnotations(node.Annotations)
ExpectNoError(err)
if len(nodeTaints) == 0 || !taintExists(nodeTaints, taint) {
Failf("Failed to find taint %s on node %s", taint.ToString(), nodeName)
}
}
func deleteTaint(oldTaints []v1.Taint, taintToDelete v1.Taint) ([]v1.Taint, error) {
newTaints := []v1.Taint{}
found := false
for _, oldTaint := range oldTaints {
if oldTaint.MatchTaint(taintToDelete) {
found = true
continue
}
newTaints = append(newTaints, taintToDelete)
}
if !found {
return nil, fmt.Errorf("taint %s not found.", taintToDelete.ToString())
}
return newTaints, nil
}
// RemoveTaintOffNode is for cleaning up taints temporarily added to node,
// won't fail if target taint doesn't exist or has been removed.
func RemoveTaintOffNode(c clientset.Interface, nodeName string, taint v1.Taint) {
By("removing the taint " + taint.ToString() + " off the node " + nodeName)
for attempt := 0; attempt < UpdateRetries; attempt++ {
node, err := c.Core().Nodes().Get(nodeName)
ExpectNoError(err)
nodeTaints, err := v1.GetTaintsFromNodeAnnotations(node.Annotations)
ExpectNoError(err)
if len(nodeTaints) == 0 {
return
}
if !taintExists(nodeTaints, taint) {
return
}
newTaints, err := deleteTaint(nodeTaints, taint)
ExpectNoError(err)
if len(newTaints) == 0 {
delete(node.Annotations, v1.TaintsAnnotationKey)
} else {
taintsData, err := json.Marshal(newTaints)
ExpectNoError(err)
node.Annotations[v1.TaintsAnnotationKey] = string(taintsData)
}
_, err = c.Core().Nodes().Update(node)
if err != nil {
if !apierrs.IsConflict(err) {
ExpectNoError(err)
} else {
Logf("Conflict when trying to add/update taint %s to node %v", taint.ToString(), nodeName)
}
} else {
break
}
time.Sleep(100 * time.Millisecond)
}
nodeUpdated, err := c.Core().Nodes().Get(nodeName)
ExpectNoError(err)
By("verifying the node doesn't have the taint " + taint.ToString())
taintsGot, err := v1.GetTaintsFromNodeAnnotations(nodeUpdated.Annotations)
ExpectNoError(err)
if taintExists(taintsGot, taint) {
Failf("Failed removing taint " + taint.ToString() + " of the node " + nodeName)
}
}
func getScalerForKind(internalClientset internalclientset.Interface, kind schema.GroupKind) (kubectl.Scaler, error) {
switch kind {
case api.Kind("ReplicationController"):
return kubectl.ScalerFor(api.Kind("ReplicationController"), internalClientset)
case extensionsinternal.Kind("ReplicaSet"):
return kubectl.ScalerFor(extensionsinternal.Kind("ReplicaSet"), internalClientset)
case extensionsinternal.Kind("Deployment"):
return kubectl.ScalerFor(extensionsinternal.Kind("Deployment"), internalClientset)
default:
return nil, fmt.Errorf("Unsupported kind for getting Scaler: %v", kind)
}
}
func ScaleResource(
clientset clientset.Interface,
internalClientset internalclientset.Interface,
ns, name string,
size uint,
wait bool,
kind schema.GroupKind,
) error {
By(fmt.Sprintf("Scaling %v %s in namespace %s to %d", kind, name, ns, size))
scaler, err := getScalerForKind(internalClientset, kind)
if err != nil {
return err
}
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
if err = scaler.Scale(ns, name, size, nil, waitForScale, waitForReplicas); err != nil {
return fmt.Errorf("error while scaling RC %s to %d replicas: %v", name, size, err)
}
if !wait {
return nil
}
return WaitForControlledPodsRunning(clientset, ns, name, kind)
}
// Wait up to 10 minutes for pods to become Running.
func WaitForControlledPodsRunning(c clientset.Interface, ns, name string, kind schema.GroupKind) error {
rtObject, err := getRuntimeObjectForKind(c, kind, ns, name)
if err != nil {
return err
}
selector, err := getSelectorFromRuntimeObject(rtObject)
if err != nil {
return err
}
err = testutils.WaitForPodsWithLabelRunning(c, ns, selector)
if err != nil {
return fmt.Errorf("Error while waiting for replication controller %s pods to be running: %v", name, err)
}
return nil
}
func ScaleRC(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string, size uint, wait bool) error {
return ScaleResource(clientset, internalClientset, ns, name, size, wait, api.Kind("ReplicationController"))
}
func ScaleDeployment(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string, size uint, wait bool) error {
return ScaleResource(clientset, internalClientset, ns, name, size, wait, extensionsinternal.Kind("Deployment"))
}
// Returns true if all the specified pods are scheduled, else returns false.
func podsWithLabelScheduled(c clientset.Interface, ns string, label labels.Selector) (bool, error) {
PodStore := testutils.NewPodStore(c, ns, label, fields.Everything())
defer PodStore.Stop()
pods := PodStore.List()
if len(pods) == 0 {
return false, nil
}
for _, pod := range pods {
if pod.Spec.NodeName == "" {
return false, nil
}
}
return true, nil
}
// Wait for all matching pods to become scheduled and at least one
// matching pod exists. Return the list of matching pods.
func WaitForPodsWithLabelScheduled(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) {
err = wait.PollImmediate(Poll, podScheduledBeforeTimeout,
func() (bool, error) {
pods, err = WaitForPodsWithLabel(c, ns, label)
if err != nil {
return false, err
}
for _, pod := range pods.Items {
if pod.Spec.NodeName == "" {
return false, nil
}
}
return true, nil
})
return pods, err
}
// Wait up to PodListTimeout for getting pods with certain label
func WaitForPodsWithLabel(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) {
for t := time.Now(); time.Since(t) < PodListTimeout; time.Sleep(Poll) {
options := v1.ListOptions{LabelSelector: label.String()}
pods, err = c.Core().Pods(ns).List(options)
Expect(err).NotTo(HaveOccurred())
if len(pods.Items) > 0 {
break
}
}
if pods == nil || len(pods.Items) == 0 {
err = fmt.Errorf("Timeout while waiting for pods with label %v", label)
}
return
}
// Wait for exact amount of matching pods to become running and ready.
// Return the list of matching pods.
func WaitForPodsWithLabelRunningReady(c clientset.Interface, ns string, label labels.Selector, num int, timeout time.Duration) (pods *v1.PodList, err error) {
var current int
err = wait.Poll(Poll, timeout,
func() (bool, error) {
pods, err := WaitForPodsWithLabel(c, ns, label)
if err != nil {
Logf("Failed to list pods: %v", err)
return false, nil
}
current = 0
for _, pod := range pods.Items {
if flag, err := testutils.PodRunningReady(&pod); err == nil && flag == true {
current++
}
}
if current != num {
Logf("Got %v pods running and ready, expect: %v", current, num)
return false, nil
}
return true, nil
})
return pods, err
}
func getRuntimeObjectForKind(c clientset.Interface, kind schema.GroupKind, ns, name string) (runtime.Object, error) {
switch kind {
case api.Kind("ReplicationController"):
return c.Core().ReplicationControllers(ns).Get(name)
case extensionsinternal.Kind("ReplicaSet"):
return c.Extensions().ReplicaSets(ns).Get(name)
case extensionsinternal.Kind("Deployment"):
return c.Extensions().Deployments(ns).Get(name)
default:
return nil, fmt.Errorf("Unsupported kind when getting runtime object: %v", kind)
}
}
func deleteResource(c clientset.Interface, kind schema.GroupKind, ns, name string, deleteOption *v1.DeleteOptions) error {
switch kind {
case api.Kind("ReplicationController"):
return c.Core().ReplicationControllers(ns).Delete(name, deleteOption)
case extensionsinternal.Kind("ReplicaSet"):
return c.Extensions().ReplicaSets(ns).Delete(name, deleteOption)
case extensionsinternal.Kind("Deployment"):
return c.Extensions().Deployments(ns).Delete(name, deleteOption)
default:
return fmt.Errorf("Unsupported kind when deleting: %v", kind)
}
}
func getSelectorFromRuntimeObject(obj runtime.Object) (labels.Selector, error) {
switch typed := obj.(type) {
case *v1.ReplicationController:
return labels.SelectorFromSet(typed.Spec.Selector), nil
case *extensions.ReplicaSet:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
case *extensions.Deployment:
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
default:
return nil, fmt.Errorf("Unsupported kind when getting selector: %v", obj)
}
}
func getReplicasFromRuntimeObject(obj runtime.Object) (int32, error) {
switch typed := obj.(type) {
case *v1.ReplicationController:
if typed.Spec.Replicas != nil {
return *typed.Spec.Replicas, nil
}
return 0, nil
case *extensions.ReplicaSet:
if typed.Spec.Replicas != nil {
return *typed.Spec.Replicas, nil
}
return 0, nil
case *extensions.Deployment:
if typed.Spec.Replicas != nil {
return *typed.Spec.Replicas, nil
}
return 0, nil
default:
return -1, fmt.Errorf("Unsupported kind when getting number of replicas: %v", obj)
}
}
func getReaperForKind(internalClientset internalclientset.Interface, kind schema.GroupKind) (kubectl.Reaper, error) {
switch kind {
case api.Kind("ReplicationController"):
return kubectl.ReaperFor(api.Kind("ReplicationController"), internalClientset)
case extensionsinternal.Kind("ReplicaSet"):
return kubectl.ReaperFor(extensionsinternal.Kind("ReplicaSet"), internalClientset)
case extensionsinternal.Kind("Deployment"):
return kubectl.ReaperFor(extensionsinternal.Kind("Deployment"), internalClientset)
default:
return nil, fmt.Errorf("Unsupported kind: %v", kind)
}
}
// DeleteResourceAndPods deletes a given resource and all pods it spawned
func DeleteResourceAndPods(clientset clientset.Interface, internalClientset internalclientset.Interface, kind schema.GroupKind, ns, name string) error {
By(fmt.Sprintf("deleting %v %s in namespace %s", kind, name, ns))
rtObject, err := getRuntimeObjectForKind(clientset, kind, ns, name)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("%v %s not found: %v", kind, name, err)
return nil
}
return err
}
selector, err := getSelectorFromRuntimeObject(rtObject)
if err != nil {
return err
}
reaper, err := getReaperForKind(internalClientset, kind)
if err != nil {
return err
}
ps, err := podStoreForSelector(clientset, ns, selector)
if err != nil {
return err
}
defer ps.Stop()
startTime := time.Now()
err = reaper.Stop(ns, name, 0, nil)
if apierrs.IsNotFound(err) {
Logf("%v %s was already deleted: %v", kind, name, err)
return nil
}
if err != nil {
return fmt.Errorf("error while stopping %v: %s: %v", kind, name, err)
}
deleteTime := time.Now().Sub(startTime)
Logf("Deleting %v %s took: %v", kind, name, deleteTime)
err = waitForPodsInactive(ps, 10*time.Millisecond, 10*time.Minute)
if err != nil {
return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err)
}
terminatePodTime := time.Now().Sub(startTime) - deleteTime
Logf("Terminating %v %s pods took: %v", kind, name, terminatePodTime)
// this is to relieve namespace controller's pressure when deleting the
// namespace after a test.
err = waitForPodsGone(ps, 10*time.Second, 10*time.Minute)
if err != nil {
return fmt.Errorf("error while waiting for pods gone %s: %v", name, err)
}
return nil
}
func DeleteRCAndPods(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string) error {
return DeleteResourceAndPods(clientset, internalClientset, api.Kind("ReplicationController"), ns, name)
}
// DeleteResourceAndWaitForGC deletes only given resource and waits for GC to delete the pods.
func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns, name string) error {
By(fmt.Sprintf("deleting %v %s in namespace %s, will wait for the garbage collector to delete the pods", kind, name, ns))
rtObject, err := getRuntimeObjectForKind(c, kind, ns, name)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("%v %s not found: %v", kind, name, err)
return nil
}
return err
}
selector, err := getSelectorFromRuntimeObject(rtObject)
if err != nil {
return err
}
replicas, err := getReplicasFromRuntimeObject(rtObject)
if err != nil {
return err
}
ps, err := podStoreForSelector(c, ns, selector)
if err != nil {
return err
}
defer ps.Stop()
startTime := time.Now()
falseVar := false
deleteOption := &v1.DeleteOptions{OrphanDependents: &falseVar}
err = deleteResource(c, kind, ns, name, deleteOption)
if err != nil && apierrs.IsNotFound(err) {
Logf("%v %s was already deleted: %v", kind, name, err)
return nil
}
if err != nil {
return err
}
deleteTime := time.Now().Sub(startTime)
Logf("Deleting %v %s took: %v", kind, name, deleteTime)
var interval, timeout time.Duration
switch {
case replicas < 100:
interval = 100 * time.Millisecond
case replicas < 1000:
interval = 1 * time.Second
default:
interval = 10 * time.Second
}
if replicas < 5000 {
timeout = 10 * time.Minute
} else {
timeout = time.Duration(replicas/gcThroughput) * time.Second
// gcThroughput is pretty strict now, add a bit more to it
timeout = timeout + 3*time.Minute
}
err = waitForPodsInactive(ps, interval, timeout)
if err != nil {
return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err)
}
terminatePodTime := time.Now().Sub(startTime) - deleteTime
Logf("Terminating %v %s pods took: %v", kind, name, terminatePodTime)
err = waitForPodsGone(ps, interval, 10*time.Minute)
if err != nil {
return fmt.Errorf("error while waiting for pods gone %s: %v", name, err)
}
return nil
}
// DeleteRCAndWaitForGC deletes only the Replication Controller and waits for GC to delete the pods.
func DeleteRCAndWaitForGC(c clientset.Interface, ns, name string) error {
return DeleteResourceAndWaitForGC(c, api.Kind("ReplicationController"), ns, name)
}
// podStoreForSelector creates a PodStore that monitors pods from given namespace matching given selector.
// It waits until the reflector does a List() before returning.
func podStoreForSelector(c clientset.Interface, ns string, selector labels.Selector) (*testutils.PodStore, error) {
ps := testutils.NewPodStore(c, ns, selector, fields.Everything())
err := wait.Poll(1*time.Second, 2*time.Minute, func() (bool, error) {
if len(ps.Reflector.LastSyncResourceVersion()) != 0 {
return true, nil
}
return false, nil
})
return ps, err
}
// waitForPodsInactive waits until there are no active pods left in the PodStore.
// This is to make a fair comparison of deletion time between DeleteRCAndPods
// and DeleteRCAndWaitForGC, because the RC controller decreases status.replicas
// when the pod is inactvie.
func waitForPodsInactive(ps *testutils.PodStore, interval, timeout time.Duration) error {
return wait.PollImmediate(interval, timeout, func() (bool, error) {
pods := ps.List()
for _, pod := range pods {
if controller.IsPodActive(pod) {
return false, nil
}
}
return true, nil
})
}
// waitForPodsGone waits until there are no pods left in the PodStore.
func waitForPodsGone(ps *testutils.PodStore, interval, timeout time.Duration) error {
return wait.PollImmediate(interval, timeout, func() (bool, error) {
if pods := ps.List(); len(pods) == 0 {
return true, nil
}
return false, nil
})
}
// Delete a ReplicaSet and all pods it spawned
func DeleteReplicaSet(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string) error {
By(fmt.Sprintf("deleting ReplicaSet %s in namespace %s", name, ns))
rc, err := clientset.Extensions().ReplicaSets(ns).Get(name)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("ReplicaSet %s was already deleted: %v", name, err)
return nil
}
return err
}
reaper, err := kubectl.ReaperFor(extensionsinternal.Kind("ReplicaSet"), internalClientset)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("ReplicaSet %s was already deleted: %v", name, err)
return nil
}
return err
}
startTime := time.Now()
err = reaper.Stop(ns, name, 0, nil)
if apierrs.IsNotFound(err) {
Logf("ReplicaSet %s was already deleted: %v", name, err)
return nil
}
deleteRSTime := time.Now().Sub(startTime)
Logf("Deleting RS %s took: %v", name, deleteRSTime)
if err == nil {
err = waitForReplicaSetPodsGone(clientset, rc)
}
terminatePodTime := time.Now().Sub(startTime) - deleteRSTime
Logf("Terminating ReplicaSet %s pods took: %v", name, terminatePodTime)
return err
}
// waitForReplicaSetPodsGone waits until there are no pods reported under a
// ReplicaSet selector (because the pods have completed termination).
func waitForReplicaSetPodsGone(c clientset.Interface, rs *extensions.ReplicaSet) error {
return wait.PollImmediate(Poll, 2*time.Minute, func() (bool, error) {
selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
ExpectNoError(err)
options := v1.ListOptions{LabelSelector: selector.String()}
if pods, err := c.Core().Pods(rs.Namespace).List(options); err == nil && len(pods.Items) == 0 {
return true, nil
}
return false, nil
})
}
// Waits for the deployment status to become valid (i.e. max unavailable and max surge aren't violated anymore).
// Note that the status should stay valid at all times unless shortly after a scaling event or the deployment is just created.
// To verify that the deployment status is valid and wait for the rollout to finish, use WaitForDeploymentStatus instead.
func WaitForDeploymentStatusValid(c clientset.Interface, d *extensions.Deployment) error {
var (
oldRSs, allOldRSs, allRSs []*extensions.ReplicaSet
newRS *extensions.ReplicaSet
deployment *extensions.Deployment
reason string
)
err := wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
var err error
deployment, err = c.Extensions().Deployments(d.Namespace).Get(d.Name)
if err != nil {
return false, err
}
oldRSs, allOldRSs, newRS, err = deploymentutil.GetAllReplicaSets(deployment, c)
if err != nil {
return false, err
}
if newRS == nil {
// New RC hasn't been created yet.
reason = "new replica set hasn't been created yet"
Logf(reason)
return false, nil
}
allRSs = append(oldRSs, newRS)
// The old/new ReplicaSets need to contain the pod-template-hash label
for i := range allRSs {
if !labelsutil.SelectorHasLabel(allRSs[i].Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) {
reason = "all replica sets need to contain the pod-template-hash label"
Logf(reason)
return false, nil
}
}
totalCreated := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
maxCreated := *(deployment.Spec.Replicas) + deploymentutil.MaxSurge(*deployment)
if totalCreated > maxCreated {
reason = fmt.Sprintf("total pods created: %d, more than the max allowed: %d", totalCreated, maxCreated)
Logf(reason)
return false, nil
}
minAvailable := deploymentutil.MinAvailable(deployment)
if deployment.Status.AvailableReplicas < minAvailable {
reason = fmt.Sprintf("total pods available: %d, less than the min required: %d", deployment.Status.AvailableReplicas, minAvailable)
Logf(reason)
return false, nil
}
// When the deployment status and its underlying resources reach the desired state, we're done
if deployment.Status.Replicas == *(deployment.Spec.Replicas) &&
deployment.Status.UpdatedReplicas == *(deployment.Spec.Replicas) &&
deployment.Status.AvailableReplicas == *(deployment.Spec.Replicas) {
return true, nil
}
reason = fmt.Sprintf("deployment status: %#v", deployment.Status)
Logf(reason)
return false, nil
})
if err == wait.ErrWaitTimeout {
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
logPodsOfDeployment(c, deployment)
err = fmt.Errorf("%s", reason)
}
if err != nil {
return fmt.Errorf("error waiting for deployment %q status to match expectation: %v", d.Name, err)
}
return nil
}
// Waits for the deployment to reach desired state.
// Returns an error if the deployment's rolling update strategy (max unavailable or max surge) is broken at any times.
func WaitForDeploymentStatus(c clientset.Interface, d *extensions.Deployment) error {
var (
oldRSs, allOldRSs, allRSs []*extensions.ReplicaSet
newRS *extensions.ReplicaSet
deployment *extensions.Deployment
)
err := wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
var err error
deployment, err = c.Extensions().Deployments(d.Namespace).Get(d.Name)
if err != nil {
return false, err
}
oldRSs, allOldRSs, newRS, err = deploymentutil.GetAllReplicaSets(deployment, c)
if err != nil {
return false, err
}
if newRS == nil {
// New RS hasn't been created yet.
return false, nil
}
allRSs = append(oldRSs, newRS)
// The old/new ReplicaSets need to contain the pod-template-hash label
for i := range allRSs {
if !labelsutil.SelectorHasLabel(allRSs[i].Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) {
return false, nil
}
}
totalCreated := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
maxCreated := *(deployment.Spec.Replicas) + deploymentutil.MaxSurge(*deployment)
if totalCreated > maxCreated {
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
logPodsOfDeployment(c, deployment)
return false, fmt.Errorf("total pods created: %d, more than the max allowed: %d", totalCreated, maxCreated)
}
minAvailable := deploymentutil.MinAvailable(deployment)
if deployment.Status.AvailableReplicas < minAvailable {
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
logPodsOfDeployment(c, deployment)
return false, fmt.Errorf("total pods available: %d, less than the min required: %d", deployment.Status.AvailableReplicas, minAvailable)
}
// When the deployment status and its underlying resources reach the desired state, we're done
if deployment.Status.Replicas == *(deployment.Spec.Replicas) &&
deployment.Status.UpdatedReplicas == *(deployment.Spec.Replicas) {
return true, nil
}
return false, nil
})
if err == wait.ErrWaitTimeout {
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
logPodsOfDeployment(c, deployment)
}
if err != nil {
return fmt.Errorf("error waiting for deployment %q status to match expectation: %v", d.Name, err)
}
return nil
}
// WaitForDeploymentUpdatedReplicasLTE waits for given deployment to be observed by the controller and has at least a number of updatedReplicas
func WaitForDeploymentUpdatedReplicasLTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int, desiredGeneration int64) error {
err := wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
if err != nil {
return false, err
}
if deployment.Status.ObservedGeneration >= desiredGeneration && deployment.Status.UpdatedReplicas >= int32(minUpdatedReplicas) {
return true, nil
}
return false, nil
})
if err != nil {
return fmt.Errorf("error waiting for deployment %s to have at least %d updpatedReplicas: %v", deploymentName, minUpdatedReplicas, err)
}
return nil
}
// WaitForDeploymentRollbackCleared waits for given deployment either started rolling back or doesn't need to rollback.
// Note that rollback should be cleared shortly, so we only wait for 1 minute here to fail early.
func WaitForDeploymentRollbackCleared(c clientset.Interface, ns, deploymentName string) error {
err := wait.Poll(Poll, 1*time.Minute, func() (bool, error) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
if err != nil {
return false, err
}
// Rollback not set or is kicked off
if deployment.Spec.RollbackTo == nil {
return true, nil
}
return false, nil
})
if err != nil {
return fmt.Errorf("error waiting for deployment %s rollbackTo to be cleared: %v", deploymentName, err)
}
return nil
}
// WaitForDeploymentRevisionAndImage waits for the deployment's and its new RS's revision and container image to match the given revision and image.
// Note that deployment revision and its new RS revision should be updated shortly, so we only wait for 1 minute here to fail early.
func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName string, revision, image string) error {
var deployment *extensions.Deployment
var newRS *extensions.ReplicaSet
var reason string
err := wait.Poll(Poll, 1*time.Minute, func() (bool, error) {
var err error
deployment, err = c.Extensions().Deployments(ns).Get(deploymentName)
if err != nil {
return false, err
}
// The new ReplicaSet needs to be non-nil and contain the pod-template-hash label
newRS, err = deploymentutil.GetNewReplicaSet(deployment, c)
if err != nil {
return false, err
}
if newRS == nil {
reason = fmt.Sprintf("New replica set for deployment %q is yet to be created", deployment.Name)
Logf(reason)
return false, nil
}
if !labelsutil.SelectorHasLabel(newRS.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) {
reason = fmt.Sprintf("New replica set %q doesn't have DefaultDeploymentUniqueLabelKey", newRS.Name)
Logf(reason)
return false, nil
}
// Check revision of this deployment, and of the new replica set of this deployment
if deployment.Annotations == nil || deployment.Annotations[deploymentutil.RevisionAnnotation] != revision {
reason = fmt.Sprintf("Deployment %q doesn't have the required revision set", deployment.Name)
Logf(reason)
return false, nil
}
if deployment.Spec.Template.Spec.Containers[0].Image != image {
reason = fmt.Sprintf("Deployment %q doesn't have the required image set", deployment.Name)
Logf(reason)
return false, nil
}
if newRS.Annotations == nil || newRS.Annotations[deploymentutil.RevisionAnnotation] != revision {
reason = fmt.Sprintf("New replica set %q doesn't have the required revision set", newRS.Name)
Logf(reason)
return false, nil
}
if newRS.Spec.Template.Spec.Containers[0].Image != image {
reason = fmt.Sprintf("New replica set %q doesn't have the required image set", newRS.Name)
Logf(reason)
return false, nil
}
return true, nil
})
if err == wait.ErrWaitTimeout {
logReplicaSetsOfDeployment(deployment, nil, newRS)
err = fmt.Errorf(reason)
}
if newRS == nil {
return fmt.Errorf("deployment %q failed to create new replica set", deploymentName)
}
if err != nil {
return fmt.Errorf("error waiting for deployment %q (got %s / %s) and new replica set %q (got %s / %s) revision and image to match expectation (expected %s / %s): %v", deploymentName, deployment.Annotations[deploymentutil.RevisionAnnotation], deployment.Spec.Template.Spec.Containers[0].Image, newRS.Name, newRS.Annotations[deploymentutil.RevisionAnnotation], newRS.Spec.Template.Spec.Containers[0].Image, revision, image, err)
}
return nil
}
func WaitForOverlappingAnnotationMatch(c clientset.Interface, ns, deploymentName, expected string) error {
return wait.Poll(Poll, 1*time.Minute, func() (bool, error) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
if err != nil {
return false, err
}
if deployment.Annotations[deploymentutil.OverlapAnnotation] == expected {
return true, nil
}
return false, nil
})
}
// CheckNewRSAnnotations check if the new RS's annotation is as expected
func CheckNewRSAnnotations(c clientset.Interface, ns, deploymentName string, expectedAnnotations map[string]string) error {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
if err != nil {
return err
}
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c)
if err != nil {
return err
}
for k, v := range expectedAnnotations {
// Skip checking revision annotations
if k != deploymentutil.RevisionAnnotation && v != newRS.Annotations[k] {
return fmt.Errorf("Expected new RS annotations = %+v, got %+v", expectedAnnotations, newRS.Annotations)
}
}
return nil
}
func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds int) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
options := v1.ListOptions{LabelSelector: label.String()}
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
pods, err := c.Core().Pods(ns).List(options)
if err != nil {
return false, nil
}
for _, pod := range pods.Items {
if !deploymentutil.IsPodAvailable(&pod, int32(minReadySeconds), time.Now()) {
return false, nil
}
}
return true, nil
})
}
// Waits for the deployment to clean up old rcs.
func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error {
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
if err != nil {
return false, err
}
_, oldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c)
if err != nil {
return false, err
}
return len(oldRSs) == desiredRSNum, nil
})
}
func logReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) {
Logf("Deployment: %+v. Selector = %+v", *deployment, deployment.Spec.Selector)
for i := range allOldRSs {
Logf("All old ReplicaSets (%d/%d) of deployment %s: %+v. Selector = %+v", i+1, len(allOldRSs), deployment.Name, *allOldRSs[i], allOldRSs[i].Spec.Selector)
}
if newRS != nil {
Logf("New ReplicaSet of deployment %s: %+v. Selector = %+v", deployment.Name, *newRS, newRS.Spec.Selector)
} else {
Logf("New ReplicaSet of deployment %s is nil.", deployment.Name)
}
}
func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string, desiredGeneration int64) error {
return deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) { return c.Extensions().Deployments(ns).Get(deploymentName) }, desiredGeneration, Poll, 1*time.Minute)
}
func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType extensions.DeploymentConditionType) error {
var conditions []extensions.DeploymentCondition
pollErr := wait.PollImmediate(time.Second, 1*time.Minute, func() (bool, error) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
if err != nil {
return false, err
}
conditions = deployment.Status.Conditions
cond := deploymentutil.GetDeploymentCondition(deployment.Status, condType)
return cond != nil && cond.Reason == reason, nil
})
if pollErr == wait.ErrWaitTimeout {
pollErr = fmt.Errorf("deployment %q never updated with the desired condition and reason: %v", deploymentName, conditions)
}
return pollErr
}
func logPodsOfDeployment(c clientset.Interface, deployment *extensions.Deployment) {
minReadySeconds := deployment.Spec.MinReadySeconds
podList, err := deploymentutil.ListPods(deployment,
func(namespace string, options v1.ListOptions) (*v1.PodList, error) {
return c.Core().Pods(namespace).List(options)
})
if err != nil {
Logf("Failed to list pods of deployment %s: %v", deployment.Name, err)
return
}
if err == nil {
for _, pod := range podList.Items {
availability := "not available"
if deploymentutil.IsPodAvailable(&pod, minReadySeconds, time.Now()) {
availability = "available"
}
Logf("Pod %s is %s: %+v", pod.Name, availability, pod)
}
}
}
// Waits for the number of events on the given object to reach a desired count.
func WaitForEvents(c clientset.Interface, ns string, objOrRef runtime.Object, desiredEventsCount int) error {
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
events, err := c.Core().Events(ns).Search(objOrRef)
if err != nil {
return false, fmt.Errorf("error in listing events: %s", err)
}
eventsCount := len(events.Items)
if eventsCount == desiredEventsCount {
return true, nil
}
if eventsCount < desiredEventsCount {
return false, nil
}
// Number of events has exceeded the desired count.
return false, fmt.Errorf("number of events has exceeded the desired count, eventsCount: %d, desiredCount: %d", eventsCount, desiredEventsCount)
})
}
// Waits for the number of events on the given object to be at least a desired count.
func WaitForPartialEvents(c clientset.Interface, ns string, objOrRef runtime.Object, atLeastEventsCount int) error {
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
events, err := c.Core().Events(ns).Search(objOrRef)
if err != nil {
return false, fmt.Errorf("error in listing events: %s", err)
}
eventsCount := len(events.Items)
if eventsCount >= atLeastEventsCount {
return true, nil
}
return false, nil
})
}
type updateDeploymentFunc func(d *extensions.Deployment)
func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateDeploymentFunc) (deployment *extensions.Deployment, err error) {
deployments := c.Extensions().Deployments(namespace)
var updateErr error
pollErr := wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
if deployment, err = deployments.Get(name); err != nil {
return false, err
}
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(deployment)
if deployment, err = deployments.Update(deployment); err == nil {
Logf("Updating deployment %s", name)
return true, nil
}
updateErr = err
return false, nil
})
if pollErr == wait.ErrWaitTimeout {
pollErr = fmt.Errorf("couldn't apply the provided updated to deployment %q: %v", name, updateErr)
}
return deployment, pollErr
}
type updateRsFunc func(d *extensions.ReplicaSet)
func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateRsFunc) (*extensions.ReplicaSet, error) {
var rs *extensions.ReplicaSet
var updateErr error
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
var err error
if rs, err = c.Extensions().ReplicaSets(namespace).Get(name); err != nil {
return false, err
}
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(rs)
if rs, err = c.Extensions().ReplicaSets(namespace).Update(rs); err == nil {
Logf("Updating replica set %q", name)
return true, nil
}
updateErr = err
return false, nil
})
if pollErr == wait.ErrWaitTimeout {
pollErr = fmt.Errorf("couldn't apply the provided updated to replicaset %q: %v", name, updateErr)
}
return rs, pollErr
}
type updateRcFunc func(d *v1.ReplicationController)
func UpdateReplicationControllerWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateRcFunc) (*v1.ReplicationController, error) {
var rc *v1.ReplicationController
var updateErr error
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
var err error
if rc, err = c.Core().ReplicationControllers(namespace).Get(name); err != nil {
return false, err
}
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(rc)
if rc, err = c.Core().ReplicationControllers(namespace).Update(rc); err == nil {
Logf("Updating replication controller %q", name)
return true, nil
}
updateErr = err
return false, nil
})
if pollErr == wait.ErrWaitTimeout {
pollErr = fmt.Errorf("couldn't apply the provided updated to rc %q: %v", name, updateErr)
}
return rc, pollErr
}
type updateStatefulSetFunc func(*apps.StatefulSet)
func UpdateStatefulSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateStatefulSetFunc) (statefulSet *apps.StatefulSet, err error) {
statefulSets := c.Apps().StatefulSets(namespace)
var updateErr error
pollErr := wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
if statefulSet, err = statefulSets.Get(name); err != nil {
return false, err
}
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(statefulSet)
if statefulSet, err = statefulSets.Update(statefulSet); err == nil {
Logf("Updating stateful set %s", name)
return true, nil
}
updateErr = err
return false, nil
})
if pollErr == wait.ErrWaitTimeout {
pollErr = fmt.Errorf("couldn't apply the provided updated to stateful set %q: %v", name, updateErr)
}
return statefulSet, pollErr
}
type updateJobFunc func(*batch.Job)
func UpdateJobWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateJobFunc) (job *batch.Job, err error) {
jobs := c.Batch().Jobs(namespace)
var updateErr error
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
if job, err = jobs.Get(name); err != nil {
return false, err
}
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(job)
if job, err = jobs.Update(job); err == nil {
Logf("Updating job %s", name)
return true, nil
}
updateErr = err
return false, nil
})
if pollErr == wait.ErrWaitTimeout {
pollErr = fmt.Errorf("couldn't apply the provided updated to job %q: %v", name, updateErr)
}
return job, pollErr
}
// NodeAddresses returns the first address of the given type of each node.
func NodeAddresses(nodelist *v1.NodeList, addrType v1.NodeAddressType) []string {
hosts := []string{}
for _, n := range nodelist.Items {
for _, addr := range n.Status.Addresses {
// Use the first external IP address we find on the node, and
// use at most one per node.
// TODO(roberthbailey): Use the "preferred" address for the node, once
// such a thing is defined (#2462).
if addr.Type == addrType {
hosts = append(hosts, addr.Address)
break
}
}
}
return hosts
}
// NodeSSHHosts returns SSH-able host names for all schedulable nodes - this excludes master node.
// It returns an error if it can't find an external IP for every node, though it still returns all
// hosts that it found in that case.
func NodeSSHHosts(c clientset.Interface) ([]string, error) {
nodelist := waitListSchedulableNodesOrDie(c)
// TODO(roberthbailey): Use the "preferred" address for the node, once such a thing is defined (#2462).
hosts := NodeAddresses(nodelist, v1.NodeExternalIP)
// Error if any node didn't have an external IP.
if len(hosts) != len(nodelist.Items) {
return hosts, fmt.Errorf(
"only found %d external IPs on nodes, but found %d nodes. Nodelist: %v",
len(hosts), len(nodelist.Items), nodelist)
}
sshHosts := make([]string, 0, len(hosts))
for _, h := range hosts {
sshHosts = append(sshHosts, net.JoinHostPort(h, "22"))
}
return sshHosts, nil
}
type SSHResult struct {
User string
Host string
Cmd string
Stdout string
Stderr string
Code int
}
// SSH synchronously SSHs to a node running on provider and runs cmd. If there
// is no error performing the SSH, the stdout, stderr, and exit code are
// returned.
func SSH(cmd, host, provider string) (SSHResult, error) {
result := SSHResult{Host: host, Cmd: cmd}
// Get a signer for the provider.
signer, err := GetSigner(provider)
if err != nil {
return result, fmt.Errorf("error getting signer for provider %s: '%v'", provider, err)
}
// RunSSHCommand will default to Getenv("USER") if user == "", but we're
// defaulting here as well for logging clarity.
result.User = os.Getenv("KUBE_SSH_USER")
if result.User == "" {
result.User = os.Getenv("USER")
}
stdout, stderr, code, err := sshutil.RunSSHCommand(cmd, result.User, host, signer)
result.Stdout = stdout
result.Stderr = stderr
result.Code = code
return result, err
}
func LogSSHResult(result SSHResult) {
remote := fmt.Sprintf("%s@%s", result.User, result.Host)
Logf("ssh %s: command: %s", remote, result.Cmd)
Logf("ssh %s: stdout: %q", remote, result.Stdout)
Logf("ssh %s: stderr: %q", remote, result.Stderr)
Logf("ssh %s: exit code: %d", remote, result.Code)
}
func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*SSHResult, error) {
Logf("Getting external IP address for %s", node.Name)
host := ""
for _, a := range node.Status.Addresses {
if a.Type == v1.NodeExternalIP {
host = a.Address + ":22"
break
}
}
if host == "" {
return nil, fmt.Errorf("couldn't find external IP address for node %s", node.Name)
}
Logf("SSH %q on %s(%s)", cmd, node.Name, host)
result, err := SSH(cmd, host, provider)
LogSSHResult(result)
if result.Code != 0 || err != nil {
return nil, fmt.Errorf("failed running %q: %v (exit code %d)",
cmd, err, result.Code)
}
return &result, nil
}
func IssueSSHCommand(cmd, provider string, node *v1.Node) error {
_, err := IssueSSHCommandWithResult(cmd, provider, node)
if err != nil {
return err
}
return nil
}
// NewHostExecPodSpec returns the pod spec of hostexec pod
func NewHostExecPodSpec(ns, name string) *v1.Pod {
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: name,
Namespace: ns,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "hostexec",
Image: "gcr.io/google_containers/hostexec:1.2",
ImagePullPolicy: v1.PullIfNotPresent,
},
},
HostNetwork: true,
SecurityContext: &v1.PodSecurityContext{},
},
}
return pod
}
// RunHostCmd runs the given cmd in the context of the given pod using `kubectl exec`
// inside of a shell.
func RunHostCmd(ns, name, cmd string) (string, error) {
return RunKubectl("exec", fmt.Sprintf("--namespace=%v", ns), name, "--", "/bin/sh", "-c", cmd)
}
// RunHostCmdOrDie calls RunHostCmd and dies on error.
func RunHostCmdOrDie(ns, name, cmd string) string {
stdout, err := RunHostCmd(ns, name, cmd)
Logf("stdout: %v", stdout)
ExpectNoError(err)
return stdout
}
// LaunchHostExecPod launches a hostexec pod in the given namespace and waits
// until it's Running
func LaunchHostExecPod(client clientset.Interface, ns, name string) *v1.Pod {
hostExecPod := NewHostExecPodSpec(ns, name)
pod, err := client.Core().Pods(ns).Create(hostExecPod)
ExpectNoError(err)
err = WaitForPodRunningInNamespace(client, pod)
ExpectNoError(err)
return pod
}
// GetSigner returns an ssh.Signer for the provider ("gce", etc.) that can be
// used to SSH to their nodes.
func GetSigner(provider string) (ssh.Signer, error) {
// Get the directory in which SSH keys are located.
keydir := filepath.Join(os.Getenv("HOME"), ".ssh")
// Select the key itself to use. When implementing more providers here,
// please also add them to any SSH tests that are disabled because of signer
// support.
keyfile := ""
switch provider {
case "gce", "gke", "kubemark":
keyfile = "google_compute_engine"
case "aws":
// If there is an env. variable override, use that.
aws_keyfile := os.Getenv("AWS_SSH_KEY")
if len(aws_keyfile) != 0 {
return sshutil.MakePrivateKeySignerFromFile(aws_keyfile)
}
// Otherwise revert to home dir
keyfile = "kube_aws_rsa"
case "vagrant":
keyfile := os.Getenv("VAGRANT_SSH_KEY")
if len(keyfile) != 0 {
return sshutil.MakePrivateKeySignerFromFile(keyfile)
}
return nil, fmt.Errorf("VAGRANT_SSH_KEY env variable should be provided")
default:
return nil, fmt.Errorf("GetSigner(...) not implemented for %s", provider)
}
key := filepath.Join(keydir, keyfile)
return sshutil.MakePrivateKeySignerFromFile(key)
}
// CheckPodsRunningReady returns whether all pods whose names are listed in
// podNames in namespace ns are running and ready, using c and waiting at most
// timeout.
func CheckPodsRunningReady(c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool {
return CheckPodsCondition(c, ns, podNames, timeout, testutils.PodRunningReady, "running and ready")
}
// CheckPodsRunningReadyOrSucceeded returns whether all pods whose names are
// listed in podNames in namespace ns are running and ready, or succeeded; use
// c and waiting at most timeout.
func CheckPodsRunningReadyOrSucceeded(c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool {
return CheckPodsCondition(c, ns, podNames, timeout, testutils.PodRunningReadyOrSucceeded, "running and ready, or succeeded")
}
// CheckPodsCondition returns whether all pods whose names are listed in podNames
// in namespace ns are in the condition, using c and waiting at most timeout.
func CheckPodsCondition(c clientset.Interface, ns string, podNames []string, timeout time.Duration, condition podCondition, desc string) bool {
np := len(podNames)
Logf("Waiting up to %v for %d pods to be %s: %s", timeout, np, desc, podNames)
result := make(chan bool, len(podNames))
for ix := range podNames {
// Launch off pod readiness checkers.
go func(name string) {
err := WaitForPodCondition(c, ns, name, desc, timeout, condition)
result <- err == nil
}(podNames[ix])
}
// Wait for them all to finish.
success := true
// TODO(a-robinson): Change to `for range` syntax and remove logging once we
// support only Go >= 1.4.
for _, podName := range podNames {
if !<-result {
Logf("Pod %[1]s failed to be %[2]s.", podName, desc)
success = false
}
}
Logf("Wanted all %d pods to be %s. Result: %t. Pods: %v", np, desc, success, podNames)
return success
}
// WaitForNodeToBeReady returns whether node name is ready within timeout.
func WaitForNodeToBeReady(c clientset.Interface, name string, timeout time.Duration) bool {
return WaitForNodeToBe(c, name, v1.NodeReady, true, timeout)
}
// WaitForNodeToBeNotReady returns whether node name is not ready (i.e. the
// readiness condition is anything but ready, e.g false or unknown) within
// timeout.
func WaitForNodeToBeNotReady(c clientset.Interface, name string, timeout time.Duration) bool {
return WaitForNodeToBe(c, name, v1.NodeReady, false, timeout)
}
func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionType, wantTrue, silent bool) bool {
// Check the node readiness condition (logging all).
for _, cond := range node.Status.Conditions {
// Ensure that the condition type and the status matches as desired.
if cond.Type == conditionType {
if (cond.Status == v1.ConditionTrue) == wantTrue {
return true
} else {
if !silent {
Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
}
return false
}
}
}
if !silent {
Logf("Couldn't find condition %v on node %v", conditionType, node.Name)
}
return false
}
func IsNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionType, wantTrue bool) bool {
return isNodeConditionSetAsExpected(node, conditionType, wantTrue, false)
}
func IsNodeConditionSetAsExpectedSilent(node *v1.Node, conditionType v1.NodeConditionType, wantTrue bool) bool {
return isNodeConditionSetAsExpected(node, conditionType, wantTrue, true)
}
func IsNodeConditionUnset(node *v1.Node, conditionType v1.NodeConditionType) bool {
for _, cond := range node.Status.Conditions {
if cond.Type == conditionType {
return false
}
}
return true
}
// WaitForNodeToBe returns whether node "name's" condition state matches wantTrue
// within timeout. If wantTrue is true, it will ensure the node condition status
// is ConditionTrue; if it's false, it ensures the node condition is in any state
// other than ConditionTrue (e.g. not true or unknown).
func WaitForNodeToBe(c clientset.Interface, name string, conditionType v1.NodeConditionType, wantTrue bool, timeout time.Duration) bool {
Logf("Waiting up to %v for node %s condition %s to be %t", timeout, name, conditionType, wantTrue)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
node, err := c.Core().Nodes().Get(name)
if err != nil {
Logf("Couldn't get node %s", name)
continue
}
if IsNodeConditionSetAsExpected(node, conditionType, wantTrue) {
return true
}
}
Logf("Node %s didn't reach desired %s condition status (%t) within %v", name, conditionType, wantTrue, timeout)
return false
}
// Checks whether not-ready nodes can be ignored while checking if all nodes are
// ready (we allow e.g. for incorrect provisioning of some small percentage of nodes
// while validating cluster, and those nodes may never become healthy).
// Currently we allow only for:
// - not present CNI plugins on node
// TODO: we should extend it for other reasons.
func allowedNotReadyReasons(nodes []*v1.Node) bool {
for _, node := range nodes {
index, condition := v1.GetNodeCondition(&node.Status, v1.NodeReady)
if index == -1 ||
!strings.Contains(condition.Message, "could not locate kubenet required CNI plugins") {
return false
}
}
return true
}
// Checks whether all registered nodes are ready.
// TODO: we should change the AllNodesReady call in AfterEach to WaitForAllNodesHealthy,
// and figure out how to do it in a configurable way, as we can't expect all setups to run
// default test add-ons.
func AllNodesReady(c clientset.Interface, timeout time.Duration) error {
Logf("Waiting up to %v for all (but %d) nodes to be ready", timeout, TestContext.AllowedNotReadyNodes)
var notReady []*v1.Node
err := wait.PollImmediate(Poll, timeout, func() (bool, error) {
notReady = nil
// It should be OK to list unschedulable Nodes here.
nodes, err := c.Core().Nodes().List(v1.ListOptions{})
if err != nil {
return false, err
}
for i := range nodes.Items {
node := &nodes.Items[i]
if !IsNodeConditionSetAsExpected(node, v1.NodeReady, true) {
notReady = append(notReady, node)
}
}
// Framework allows for <TestContext.AllowedNotReadyNodes> nodes to be non-ready,
// to make it possible e.g. for incorrect deployment of some small percentage
// of nodes (which we allow in cluster validation). Some nodes that are not
// provisioned correctly at startup will never become ready (e.g. when something
// won't install correctly), so we can't expect them to be ready at any point.
//
// However, we only allow non-ready nodes with some specific reasons.
if len(notReady) > TestContext.AllowedNotReadyNodes {
return false, nil
}
return allowedNotReadyReasons(notReady), nil
})
if err != nil && err != wait.ErrWaitTimeout {
return err
}
if len(notReady) > TestContext.AllowedNotReadyNodes || !allowedNotReadyReasons(notReady) {
return fmt.Errorf("Not ready nodes: %#v", notReady)
}
return nil
}
// checks whether all registered nodes are ready and all required Pods are running on them.
func WaitForAllNodesHealthy(c clientset.Interface, timeout time.Duration) error {
Logf("Waiting up to %v for all nodes to be ready", timeout)
var notReady []v1.Node
var missingPodsPerNode map[string][]string
err := wait.PollImmediate(Poll, timeout, func() (bool, error) {
notReady = nil
// It should be OK to list unschedulable Nodes here.
nodes, err := c.Core().Nodes().List(v1.ListOptions{ResourceVersion: "0"})
if err != nil {
return false, err
}
for _, node := range nodes.Items {
if !IsNodeConditionSetAsExpected(&node, v1.NodeReady, true) {
notReady = append(notReady, node)
}
}
pods, err := c.Core().Pods(v1.NamespaceAll).List(v1.ListOptions{ResourceVersion: "0"})
if err != nil {
return false, err
}
systemPodsPerNode := make(map[string][]string)
for _, pod := range pods.Items {
if pod.Namespace == api.NamespaceSystem && pod.Status.Phase == v1.PodRunning {
if pod.Spec.NodeName != "" {
systemPodsPerNode[pod.Spec.NodeName] = append(systemPodsPerNode[pod.Spec.NodeName], pod.Name)
}
}
}
missingPodsPerNode = make(map[string][]string)
for _, node := range nodes.Items {
if !system.IsMasterNode(node.Name) {
for _, requiredPod := range requiredPerNodePods {
foundRequired := false
for _, presentPod := range systemPodsPerNode[node.Name] {
if requiredPod.MatchString(presentPod) {
foundRequired = true
break
}
}
if !foundRequired {
missingPodsPerNode[node.Name] = append(missingPodsPerNode[node.Name], requiredPod.String())
}
}
}
}
return len(notReady) == 0 && len(missingPodsPerNode) == 0, nil
})
if err != nil && err != wait.ErrWaitTimeout {
return err
}
if len(notReady) > 0 {
return fmt.Errorf("Not ready nodes: %v", notReady)
}
if len(missingPodsPerNode) > 0 {
return fmt.Errorf("Not running system Pods: %v", missingPodsPerNode)
}
return nil
}
// Filters nodes in NodeList in place, removing nodes that do not
// satisfy the given condition
// TODO: consider merging with pkg/client/cache.NodeLister
func FilterNodes(nodeList *v1.NodeList, fn func(node v1.Node) bool) {
var l []v1.Node
for _, node := range nodeList.Items {
if fn(node) {
l = append(l, node)
}
}
nodeList.Items = l
}
// ParseKVLines parses output that looks like lines containing "<key>: <val>"
// and returns <val> if <key> is found. Otherwise, it returns the empty string.
func ParseKVLines(output, key string) string {
delim := ":"
key = key + delim
for _, line := range strings.Split(output, "\n") {
pieces := strings.SplitAfterN(line, delim, 2)
if len(pieces) != 2 {
continue
}
k, v := pieces[0], pieces[1]
if k == key {
return strings.TrimSpace(v)
}
}
return ""
}
func RestartKubeProxy(host string) error {
// TODO: Make it work for all providers.
if !ProviderIs("gce", "gke", "aws") {
return fmt.Errorf("unsupported provider: %s", TestContext.Provider)
}
// kubelet will restart the kube-proxy since it's running in a static pod
Logf("Killing kube-proxy on node %v", host)
result, err := SSH("sudo pkill kube-proxy", host, TestContext.Provider)
if err != nil || result.Code != 0 {
LogSSHResult(result)
return fmt.Errorf("couldn't restart kube-proxy: %v", err)
}
// wait for kube-proxy to come back up
sshCmd := "sudo /bin/sh -c 'pgrep kube-proxy | wc -l'"
err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
Logf("Waiting for kubeproxy to come back up with %v on %v", sshCmd, host)
result, err := SSH(sshCmd, host, TestContext.Provider)
if err != nil {
return false, err
}
if result.Code != 0 {
LogSSHResult(result)
return false, fmt.Errorf("failed to run command, exited %d", result.Code)
}
if result.Stdout == "0\n" {
return false, nil
}
Logf("kube-proxy is back up.")
return true, nil
})
if err != nil {
return fmt.Errorf("kube-proxy didn't recover: %v", err)
}
return nil
}
func RestartApiserver(c discovery.ServerVersionInterface) error {
// TODO: Make it work for all providers.
if !ProviderIs("gce", "gke", "aws") {
return fmt.Errorf("unsupported provider: %s", TestContext.Provider)
}
if ProviderIs("gce", "aws") {
return sshRestartMaster()
}
// GKE doesn't allow ssh access, so use a same-version master
// upgrade to teardown/recreate master.
v, err := c.ServerVersion()
if err != nil {
return err
}
return masterUpgradeGKE(v.GitVersion[1:]) // strip leading 'v'
}
func sshRestartMaster() error {
if !ProviderIs("gce", "aws") {
return fmt.Errorf("unsupported provider: %s", TestContext.Provider)
}
var command string
if ProviderIs("gce") {
command = "sudo docker ps | grep /kube-apiserver | cut -d ' ' -f 1 | xargs sudo docker kill"
} else {
command = "sudo /etc/init.d/kube-apiserver restart"
}
Logf("Restarting master via ssh, running: %v", command)
result, err := SSH(command, GetMasterHost()+":22", TestContext.Provider)
if err != nil || result.Code != 0 {
LogSSHResult(result)
return fmt.Errorf("couldn't restart apiserver: %v", err)
}
return nil
}
func WaitForApiserverUp(c clientset.Interface) error {
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
body, err := c.Core().RESTClient().Get().AbsPath("/healthz").Do().Raw()
if err == nil && string(body) == "ok" {
return nil
}
}
return fmt.Errorf("waiting for apiserver timed out")
}
// WaitForClusterSize waits until the cluster has desired size and there is no not-ready nodes in it.
// By cluster size we mean number of Nodes excluding Master Node.
func WaitForClusterSize(c clientset.Interface, size int, timeout time.Duration) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
nodes, err := c.Core().Nodes().List(v1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
Logf("Failed to list nodes: %v", err)
continue
}
numNodes := len(nodes.Items)
// Filter out not-ready nodes.
FilterNodes(nodes, func(node v1.Node) bool {
return IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
})
numReady := len(nodes.Items)
if numNodes == size && numReady == size {
Logf("Cluster has reached the desired size %d", size)
return nil
}
Logf("Waiting for cluster size %d, current size %d, not ready nodes %d", size, numNodes, numNodes-numReady)
}
return fmt.Errorf("timeout waiting %v for cluster size to be %d", timeout, size)
}
func GenerateMasterRegexp(prefix string) string {
return prefix + "(-...)?"
}
// waitForMasters waits until the cluster has the desired number of ready masters in it.
func WaitForMasters(masterPrefix string, c clientset.Interface, size int, timeout time.Duration) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
nodes, err := c.Core().Nodes().List(v1.ListOptions{})
if err != nil {
Logf("Failed to list nodes: %v", err)
continue
}
// Filter out nodes that are not master replicas
FilterNodes(nodes, func(node v1.Node) bool {
res, err := regexp.Match(GenerateMasterRegexp(masterPrefix), ([]byte)(node.Name))
if err != nil {
Logf("Failed to match regexp to node name: %v", err)
return false
}
return res
})
numNodes := len(nodes.Items)
// Filter out not-ready nodes.
FilterNodes(nodes, func(node v1.Node) bool {
return IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
})
numReady := len(nodes.Items)
if numNodes == size && numReady == size {
Logf("Cluster has reached the desired number of masters %d", size)
return nil
}
Logf("Waiting for the number of masters %d, current %d, not ready master nodes %d", size, numNodes, numNodes-numReady)
}
return fmt.Errorf("timeout waiting %v for the number of masters to be %d", timeout, size)
}
// GetHostExternalAddress gets the node for a pod and returns the first External
// address. Returns an error if the node the pod is on doesn't have an External
// address.
func GetHostExternalAddress(client clientset.Interface, p *v1.Pod) (externalAddress string, err error) {
node, err := client.Core().Nodes().Get(p.Spec.NodeName)
if err != nil {
return "", err
}
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeExternalIP {
if address.Address != "" {
externalAddress = address.Address
break
}
}
}
if externalAddress == "" {
err = fmt.Errorf("No external address for pod %v on node %v",
p.Name, p.Spec.NodeName)
}
return
}
type extractRT struct {
http.Header
}
func (rt *extractRT) RoundTrip(req *http.Request) (*http.Response, error) {
rt.Header = req.Header
return &http.Response{}, nil
}
// headersForConfig extracts any http client logic necessary for the provided
// config.
func headersForConfig(c *restclient.Config) (http.Header, error) {
extract := &extractRT{}
rt, err := restclient.HTTPWrappersForConfig(c, extract)
if err != nil {
return nil, err
}
if _, err := rt.RoundTrip(&http.Request{}); err != nil {
return nil, err
}
return extract.Header, nil
}
// OpenWebSocketForURL constructs a websocket connection to the provided URL, using the client
// config, with the specified protocols.
func OpenWebSocketForURL(url *url.URL, config *restclient.Config, protocols []string) (*websocket.Conn, error) {
tlsConfig, err := restclient.TLSConfigFor(config)
if err != nil {
return nil, fmt.Errorf("failed to create tls config: %v", err)
}
if tlsConfig != nil {
url.Scheme = "wss"
if !strings.Contains(url.Host, ":") {
url.Host += ":443"
}
} else {
url.Scheme = "ws"
if !strings.Contains(url.Host, ":") {
url.Host += ":80"
}
}
headers, err := headersForConfig(config)
if err != nil {
return nil, fmt.Errorf("failed to load http headers: %v", err)
}
cfg, err := websocket.NewConfig(url.String(), "http://localhost")
if err != nil {
return nil, fmt.Errorf("failed to create websocket config: %v", err)
}
cfg.Header = headers
cfg.TlsConfig = tlsConfig
cfg.Protocol = protocols
return websocket.DialConfig(cfg)
}
// getIngressAddress returns the ips/hostnames associated with the Ingress.
func getIngressAddress(client clientset.Interface, ns, name string) ([]string, error) {
ing, err := client.Extensions().Ingresses(ns).Get(name)
if err != nil {
return nil, err
}
addresses := []string{}
for _, a := range ing.Status.LoadBalancer.Ingress {
if a.IP != "" {
addresses = append(addresses, a.IP)
}
if a.Hostname != "" {
addresses = append(addresses, a.Hostname)
}
}
return addresses, nil
}
// WaitForIngressAddress waits for the Ingress to acquire an address.
func WaitForIngressAddress(c clientset.Interface, ns, ingName string, timeout time.Duration) (string, error) {
var address string
err := wait.PollImmediate(10*time.Second, timeout, func() (bool, error) {
ipOrNameList, err := getIngressAddress(c, ns, ingName)
if err != nil || len(ipOrNameList) == 0 {
Logf("Waiting for Ingress %v to acquire IP, error %v", ingName, err)
return false, nil
}
address = ipOrNameList[0]
return true, nil
})
return address, err
}
// Looks for the given string in the log of a specific pod container
func LookForStringInLog(ns, podName, container, expectedString string, timeout time.Duration) (result string, err error) {
return LookForString(expectedString, timeout, func() string {
return RunKubectlOrDie("logs", podName, container, fmt.Sprintf("--namespace=%v", ns))
})
}
// Looks for the given string in a file in a specific pod container
func LookForStringInFile(ns, podName, container, file, expectedString string, timeout time.Duration) (result string, err error) {
return LookForString(expectedString, timeout, func() string {
return RunKubectlOrDie("exec", podName, "-c", container, fmt.Sprintf("--namespace=%v", ns), "--", "cat", file)
})
}
// Looks for the given string in the output of a command executed in a specific pod container
func LookForStringInPodExec(ns, podName string, command []string, expectedString string, timeout time.Duration) (result string, err error) {
return LookForString(expectedString, timeout, func() string {
// use the first container
args := []string{"exec", podName, fmt.Sprintf("--namespace=%v", ns), "--"}
args = append(args, command...)
return RunKubectlOrDie(args...)
})
}
// Looks for the given string in the output of fn, repeatedly calling fn until
// the timeout is reached or the string is found. Returns last log and possibly
// error if the string was not found.
func LookForString(expectedString string, timeout time.Duration, fn func() string) (result string, err error) {
for t := time.Now(); time.Since(t) < timeout; time.Sleep(Poll) {
result = fn()
if strings.Contains(result, expectedString) {
return
}
}
err = fmt.Errorf("Failed to find \"%s\", last result: \"%s\"", expectedString, result)
return
}
// getSvcNodePort returns the node port for the given service:port.
func getSvcNodePort(client clientset.Interface, ns, name string, svcPort int) (int, error) {
svc, err := client.Core().Services(ns).Get(name)
if err != nil {
return 0, err
}
for _, p := range svc.Spec.Ports {
if p.Port == int32(svcPort) {
if p.NodePort != 0 {
return int(p.NodePort), nil
}
}
}
return 0, fmt.Errorf(
"No node port found for service %v, port %v", name, svcPort)
}
// GetNodePortURL returns the url to a nodeport Service.
func GetNodePortURL(client clientset.Interface, ns, name string, svcPort int) (string, error) {
nodePort, err := getSvcNodePort(client, ns, name, svcPort)
if err != nil {
return "", err
}
// This list of nodes must not include the master, which is marked
// unschedulable, since the master doesn't run kube-proxy. Without
// kube-proxy NodePorts won't work.
var nodes *v1.NodeList
if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
nodes, err = client.Core().Nodes().List(v1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
return err == nil, nil
}) != nil {
return "", err
}
if len(nodes.Items) == 0 {
return "", fmt.Errorf("Unable to list nodes in cluster.")
}
for _, node := range nodes.Items {
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeExternalIP {
if address.Address != "" {
return fmt.Sprintf("http://%v:%v", address.Address, nodePort), nil
}
}
}
}
return "", fmt.Errorf("Failed to find external address for service %v", name)
}
// ScaleRCByLabels scales an RC via ns/label lookup. If replicas == 0 it waits till
// none are running, otherwise it does what a synchronous scale operation would do.
func ScaleRCByLabels(clientset clientset.Interface, internalClientset internalclientset.Interface, ns string, l map[string]string, replicas uint) error {
listOpts := v1.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(l)).String()}
rcs, err := clientset.Core().ReplicationControllers(ns).List(listOpts)
if err != nil {
return err
}
if len(rcs.Items) == 0 {
return fmt.Errorf("RC with labels %v not found in ns %v", l, ns)
}
Logf("Scaling %v RCs with labels %v in ns %v to %v replicas.", len(rcs.Items), l, ns, replicas)
for _, labelRC := range rcs.Items {
name := labelRC.Name
if err := ScaleRC(clientset, internalClientset, ns, name, replicas, false); err != nil {
return err
}
rc, err := clientset.Core().ReplicationControllers(ns).Get(name)
if err != nil {
return err
}
if replicas == 0 {
ps, err := podStoreForSelector(clientset, rc.Namespace, labels.SelectorFromSet(rc.Spec.Selector))
if err != nil {
return err
}
defer ps.Stop()
if err = waitForPodsGone(ps, 10*time.Second, 10*time.Minute); err != nil {
return fmt.Errorf("error while waiting for pods gone %s: %v", name, err)
}
} else {
if err := testutils.WaitForPodsWithLabelRunning(
clientset, ns, labels.SelectorFromSet(labels.Set(rc.Spec.Selector))); err != nil {
return err
}
}
}
return nil
}
// TODO(random-liu): Change this to be a member function of the framework.
func GetPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) {
return getPodLogsInternal(c, namespace, podName, containerName, false)
}
func getPreviousPodLogs(c clientset.Interface, namespace, podName, containerName string) (string, error) {
return getPodLogsInternal(c, namespace, podName, containerName, true)
}
// utility function for gomega Eventually
func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName string, previous bool) (string, error) {
logs, err := c.Core().RESTClient().Get().
Resource("pods").
Namespace(namespace).
Name(podName).SubResource("log").
Param("container", containerName).
Param("previous", strconv.FormatBool(previous)).
Do().
Raw()
if err != nil {
return "", err
}
if err == nil && strings.Contains(string(logs), "Internal Error") {
return "", fmt.Errorf("Fetched log contains \"Internal Error\": %q.", string(logs))
}
return string(logs), err
}
// EnsureLoadBalancerResourcesDeleted ensures that cloud load balancer resources that were created
// are actually cleaned up. Currently only implemented for GCE/GKE.
func EnsureLoadBalancerResourcesDeleted(ip, portRange string) error {
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
return ensureGCELoadBalancerResourcesDeleted(ip, portRange)
}
return nil
}
func ensureGCELoadBalancerResourcesDeleted(ip, portRange string) error {
gceCloud, ok := TestContext.CloudConfig.Provider.(*gcecloud.GCECloud)
if !ok {
return fmt.Errorf("failed to convert CloudConfig.Provider to GCECloud: %#v", TestContext.CloudConfig.Provider)
}
project := TestContext.CloudConfig.ProjectID
region, err := gcecloud.GetGCERegion(TestContext.CloudConfig.Zone)
if err != nil {
return fmt.Errorf("could not get region for zone %q: %v", TestContext.CloudConfig.Zone, err)
}
return wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) {
service := gceCloud.GetComputeService()
list, err := service.ForwardingRules.List(project, region).Do()
if err != nil {
return false, err
}
for ix := range list.Items {
item := list.Items[ix]
if item.PortRange == portRange && item.IPAddress == ip {
Logf("found a load balancer: %v", item)
return false, nil
}
}
return true, nil
})
}
// The following helper functions can block/unblock network from source
// host to destination host by manipulating iptable rules.
// This function assumes it can ssh to the source host.
//
// Caution:
// Recommend to input IP instead of hostnames. Using hostnames will cause iptables to
// do a DNS lookup to resolve the name to an IP address, which will
// slow down the test and cause it to fail if DNS is absent or broken.
//
// Suggested usage pattern:
// func foo() {
// ...
// defer UnblockNetwork(from, to)
// BlockNetwork(from, to)
// ...
// }
//
func BlockNetwork(from string, to string) {
Logf("block network traffic from %s to %s", from, to)
iptablesRule := fmt.Sprintf("OUTPUT --destination %s --jump REJECT", to)
dropCmd := fmt.Sprintf("sudo iptables --insert %s", iptablesRule)
if result, err := SSH(dropCmd, from, TestContext.Provider); result.Code != 0 || err != nil {
LogSSHResult(result)
Failf("Unexpected error: %v", err)
}
}
func UnblockNetwork(from string, to string) {
Logf("Unblock network traffic from %s to %s", from, to)
iptablesRule := fmt.Sprintf("OUTPUT --destination %s --jump REJECT", to)
undropCmd := fmt.Sprintf("sudo iptables --delete %s", iptablesRule)
// Undrop command may fail if the rule has never been created.
// In such case we just lose 30 seconds, but the cluster is healthy.
// But if the rule had been created and removing it failed, the node is broken and
// not coming back. Subsequent tests will run or fewer nodes (some of the tests
// may fail). Manual intervention is required in such case (recreating the
// cluster solves the problem too).
err := wait.Poll(time.Millisecond*100, time.Second*30, func() (bool, error) {
result, err := SSH(undropCmd, from, TestContext.Provider)
if result.Code == 0 && err == nil {
return true, nil
}
LogSSHResult(result)
if err != nil {
Logf("Unexpected error: %v", err)
}
return false, nil
})
if err != nil {
Failf("Failed to remove the iptable REJECT rule. Manual intervention is "+
"required on host %s: remove rule %s, if exists", from, iptablesRule)
}
}
func isElementOf(podUID types.UID, pods *v1.PodList) bool {
for _, pod := range pods.Items {
if pod.UID == podUID {
return true
}
}
return false
}
func CheckRSHashLabel(rs *extensions.ReplicaSet) error {
if len(rs.Labels[extensions.DefaultDeploymentUniqueLabelKey]) == 0 ||
len(rs.Spec.Selector.MatchLabels[extensions.DefaultDeploymentUniqueLabelKey]) == 0 ||
len(rs.Spec.Template.Labels[extensions.DefaultDeploymentUniqueLabelKey]) == 0 {
return fmt.Errorf("unexpected RS missing required pod-hash-template: %+v, selector = %+v, template = %+v", rs, rs.Spec.Selector, rs.Spec.Template)
}
return nil
}
func CheckPodHashLabel(pods *v1.PodList) error {
invalidPod := ""
for _, pod := range pods.Items {
if len(pod.Labels[extensions.DefaultDeploymentUniqueLabelKey]) == 0 {
if len(invalidPod) == 0 {
invalidPod = "unexpected pods missing required pod-hash-template:"
}
invalidPod = fmt.Sprintf("%s %+v;", invalidPod, pod)
}
}
if len(invalidPod) > 0 {
return fmt.Errorf("%s", invalidPod)
}
return nil
}
// timeout for proxy requests.
const proxyTimeout = 2 * time.Minute
// NodeProxyRequest performs a get on a node proxy endpoint given the nodename and rest client.
func NodeProxyRequest(c clientset.Interface, node, endpoint string) (restclient.Result, error) {
// proxy tends to hang in some cases when Node is not ready. Add an artificial timeout for this call.
// This will leak a goroutine if proxy hangs. #22165
subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c.Discovery())
if err != nil {
return restclient.Result{}, err
}
var result restclient.Result
finished := make(chan struct{})
go func() {
if subResourceProxyAvailable {
result = c.Core().RESTClient().Get().
Resource("nodes").
SubResource("proxy").
Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)).
Suffix(endpoint).
Do()
} else {
result = c.Core().RESTClient().Get().
Prefix("proxy").
Resource("nodes").
Name(fmt.Sprintf("%v:%v", node, ports.KubeletPort)).
Suffix(endpoint).
Do()
}
finished <- struct{}{}
}()
select {
case <-finished:
return result, nil
case <-time.After(proxyTimeout):
return restclient.Result{}, nil
}
}
// GetKubeletPods retrieves the list of pods on the kubelet
func GetKubeletPods(c clientset.Interface, node string) (*v1.PodList, error) {
return getKubeletPods(c, node, "pods")
}
// GetKubeletRunningPods retrieves the list of running pods on the kubelet. The pods
// includes necessary information (e.g., UID, name, namespace for
// pods/containers), but do not contain the full spec.
func GetKubeletRunningPods(c clientset.Interface, node string) (*v1.PodList, error) {
return getKubeletPods(c, node, "runningpods")
}
func getKubeletPods(c clientset.Interface, node, resource string) (*v1.PodList, error) {
result := &v1.PodList{}
client, err := NodeProxyRequest(c, node, resource)
if err != nil {
return &v1.PodList{}, err
}
if err = client.Into(result); err != nil {
return &v1.PodList{}, err
}
return result, nil
}
// LaunchWebserverPod launches a pod serving http on port 8080 to act
// as the target for networking connectivity checks. The ip address
// of the created pod will be returned if the pod is launched
// successfully.
func LaunchWebserverPod(f *Framework, podName, nodeName string) (ip string) {
containerName := fmt.Sprintf("%s-container", podName)
port := 8080
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: containerName,
Image: "gcr.io/google_containers/porter:cd5cb5791ebaa8641955f0e8c2a9bed669b1eaab",
Env: []v1.EnvVar{{Name: fmt.Sprintf("SERVE_PORT_%d", port), Value: "foo"}},
Ports: []v1.ContainerPort{{ContainerPort: int32(port)}},
},
},
NodeName: nodeName,
RestartPolicy: v1.RestartPolicyNever,
},
}
podClient := f.ClientSet.Core().Pods(f.Namespace.Name)
_, err := podClient.Create(pod)
ExpectNoError(err)
ExpectNoError(f.WaitForPodRunning(podName))
createdPod, err := podClient.Get(podName)
ExpectNoError(err)
ip = fmt.Sprintf("%s:%d", createdPod.Status.PodIP, port)
Logf("Target pod IP:port is %s", ip)
return
}
// CheckConnectivityToHost launches a pod running wget on the
// specified node to test connectivity to the specified host. An
// error will be returned if the host is not reachable from the pod.
func CheckConnectivityToHost(f *Framework, nodeName, podName, host string, timeout int) error {
contName := fmt.Sprintf("%s-container", podName)
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: contName,
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"wget", fmt.Sprintf("--timeout=%d", timeout), "-s", host},
},
},
NodeName: nodeName,
RestartPolicy: v1.RestartPolicyNever,
},
}
podClient := f.ClientSet.Core().Pods(f.Namespace.Name)
_, err := podClient.Create(pod)
if err != nil {
return err
}
err = WaitForPodSuccessInNamespace(f.ClientSet, podName, f.Namespace.Name)
if err != nil {
logs, logErr := GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, contName)
if logErr != nil {
Logf("Warning: Failed to get logs from pod %q: %v", pod.Name, logErr)
} else {
Logf("pod %s/%s \"wget\" logs:\n%s", f.Namespace.Name, pod.Name, logs)
}
}
return err
}
// CoreDump SSHs to the master and all nodes and dumps their logs into dir.
// It shells out to cluster/log-dump.sh to accomplish this.
func CoreDump(dir string) {
cmd := exec.Command(path.Join(TestContext.RepoRoot, "cluster", "log-dump.sh"), dir)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
Logf("Error running cluster/log-dump.sh: %v", err)
}
}
func UpdatePodWithRetries(client clientset.Interface, ns, name string, update func(*v1.Pod)) (*v1.Pod, error) {
for i := 0; i < 3; i++ {
pod, err := client.Core().Pods(ns).Get(name)
if err != nil {
return nil, fmt.Errorf("Failed to get pod %q: %v", name, err)
}
update(pod)
pod, err = client.Core().Pods(ns).Update(pod)
if err == nil {
return pod, nil
}
if !apierrs.IsConflict(err) && !apierrs.IsServerTimeout(err) {
return nil, fmt.Errorf("Failed to update pod %q: %v", name, err)
}
}
return nil, fmt.Errorf("Too many retries updating Pod %q", name)
}
func GetPodsInNamespace(c clientset.Interface, ns string, ignoreLabels map[string]string) ([]*v1.Pod, error) {
pods, err := c.Core().Pods(ns).List(v1.ListOptions{})
if err != nil {
return []*v1.Pod{}, err
}
ignoreSelector := labels.SelectorFromSet(ignoreLabels)
filtered := []*v1.Pod{}
for _, p := range pods.Items {
if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(p.Labels)) {
continue
}
filtered = append(filtered, &p)
}
return filtered, nil
}
// RunCmd runs cmd using args and returns its stdout and stderr. It also outputs
// cmd's stdout and stderr to their respective OS streams.
func RunCmd(command string, args ...string) (string, string, error) {
Logf("Running %s %v", command, args)
var bout, berr bytes.Buffer
cmd := exec.Command(command, args...)
// We also output to the OS stdout/stderr to aid in debugging in case cmd
// hangs and never returns before the test gets killed.
//
// This creates some ugly output because gcloud doesn't always provide
// newlines.
cmd.Stdout = io.MultiWriter(os.Stdout, &bout)
cmd.Stderr = io.MultiWriter(os.Stderr, &berr)
err := cmd.Run()
stdout, stderr := bout.String(), berr.String()
if err != nil {
return "", "", fmt.Errorf("error running %s %v; got error %v, stdout %q, stderr %q",
command, args, err, stdout, stderr)
}
return stdout, stderr, nil
}
// retryCmd runs cmd using args and retries it for up to SingleCallTimeout if
// it returns an error. It returns stdout and stderr.
func retryCmd(command string, args ...string) (string, string, error) {
var err error
stdout, stderr := "", ""
wait.Poll(Poll, SingleCallTimeout, func() (bool, error) {
stdout, stderr, err = RunCmd(command, args...)
if err != nil {
Logf("Got %v", err)
return false, nil
}
return true, nil
})
return stdout, stderr, err
}
// GetPodsScheduled returns a number of currently scheduled and not scheduled Pods.
func GetPodsScheduled(masterNodes sets.String, pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Pod) {
for _, pod := range pods.Items {
if !masterNodes.Has(pod.Spec.NodeName) {
if pod.Spec.NodeName != "" {
_, scheduledCondition := v1.GetPodCondition(&pod.Status, v1.PodScheduled)
Expect(scheduledCondition != nil).To(Equal(true))
Expect(scheduledCondition.Status).To(Equal(v1.ConditionTrue))
scheduledPods = append(scheduledPods, pod)
} else {
_, scheduledCondition := v1.GetPodCondition(&pod.Status, v1.PodScheduled)
Expect(scheduledCondition != nil).To(Equal(true))
Expect(scheduledCondition.Status).To(Equal(v1.ConditionFalse))
if scheduledCondition.Reason == "Unschedulable" {
notScheduledPods = append(notScheduledPods, pod)
}
}
}
}
return
}
// WaitForStableCluster waits until all existing pods are scheduled and returns their amount.
func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int {
timeout := 10 * time.Minute
startTime := time.Now()
allPods, err := c.Core().Pods(v1.NamespaceAll).List(v1.ListOptions{})
ExpectNoError(err)
// API server returns also Pods that succeeded. We need to filter them out.
currentPods := make([]v1.Pod, 0, len(allPods.Items))
for _, pod := range allPods.Items {
if pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed {
currentPods = append(currentPods, pod)
}
}
allPods.Items = currentPods
scheduledPods, currentlyNotScheduledPods := GetPodsScheduled(masterNodes, allPods)
for len(currentlyNotScheduledPods) != 0 {
time.Sleep(2 * time.Second)
allPods, err := c.Core().Pods(v1.NamespaceAll).List(v1.ListOptions{})
ExpectNoError(err)
scheduledPods, currentlyNotScheduledPods = GetPodsScheduled(masterNodes, allPods)
if startTime.Add(timeout).Before(time.Now()) {
Failf("Timed out after %v waiting for stable cluster.", timeout)
break
}
}
return len(scheduledPods)
}
// GetMasterAndWorkerNodesOrDie will return a list masters and schedulable worker nodes
func GetMasterAndWorkerNodesOrDie(c clientset.Interface) (sets.String, *v1.NodeList) {
nodes := &v1.NodeList{}
masters := sets.NewString()
all, _ := c.Core().Nodes().List(v1.ListOptions{})
for _, n := range all.Items {
if system.IsMasterNode(n.Name) {
masters.Insert(n.Name)
} else if isNodeSchedulable(&n) && isNodeUntainted(&n) {
nodes.Items = append(nodes.Items, n)
}
}
return masters, nodes
}
func ListNamespaceEvents(c clientset.Interface, ns string) error {
ls, err := c.Core().Events(ns).List(v1.ListOptions{})
if err != nil {
return err
}
for _, event := range ls.Items {
glog.Infof("Event(%#v): type: '%v' reason: '%v' %v", event.InvolvedObject, event.Type, event.Reason, event.Message)
}
return nil
}
// E2ETestNodePreparer implements testutils.TestNodePreparer interface, which is used
// to create/modify Nodes before running a test.
type E2ETestNodePreparer struct {
client clientset.Interface
// Specifies how many nodes should be modified using the given strategy.
// Only one strategy can be applied to a single Node, so there needs to
// be at least <sum_of_keys> Nodes in the cluster.
countToStrategy []testutils.CountToStrategy
nodeToAppliedStrategy map[string]testutils.PrepareNodeStrategy
}
func NewE2ETestNodePreparer(client clientset.Interface, countToStrategy []testutils.CountToStrategy) testutils.TestNodePreparer {
return &E2ETestNodePreparer{
client: client,
countToStrategy: countToStrategy,
nodeToAppliedStrategy: make(map[string]testutils.PrepareNodeStrategy),
}
}
func (p *E2ETestNodePreparer) PrepareNodes() error {
nodes := GetReadySchedulableNodesOrDie(p.client)
numTemplates := 0
for k := range p.countToStrategy {
numTemplates += k
}
if numTemplates > len(nodes.Items) {
return fmt.Errorf("Can't prepare Nodes. Got more templates than existing Nodes.")
}
index := 0
sum := 0
for _, v := range p.countToStrategy {
sum += v.Count
for ; index < sum; index++ {
if err := testutils.DoPrepareNode(p.client, &nodes.Items[index], v.Strategy); err != nil {
glog.Errorf("Aborting node preparation: %v", err)
return err
}
p.nodeToAppliedStrategy[nodes.Items[index].Name] = v.Strategy
}
}
return nil
}
func (p *E2ETestNodePreparer) CleanupNodes() error {
var encounteredError error
nodes := GetReadySchedulableNodesOrDie(p.client)
for i := range nodes.Items {
var err error
name := nodes.Items[i].Name
strategy, found := p.nodeToAppliedStrategy[name]
if found {
if err = testutils.DoCleanupNode(p.client, name, strategy); err != nil {
glog.Errorf("Skipping cleanup of Node: failed update of %v: %v", name, err)
encounteredError = err
}
}
}
return encounteredError
}
func CleanupGCEResources(loadBalancerName string) (err error) {
gceCloud, ok := TestContext.CloudConfig.Provider.(*gcecloud.GCECloud)
if !ok {
return fmt.Errorf("failed to convert CloudConfig.Provider to GCECloud: %#v", TestContext.CloudConfig.Provider)
}
gceCloud.DeleteFirewall(loadBalancerName)
gceCloud.DeleteForwardingRule(loadBalancerName)
gceCloud.DeleteGlobalStaticIP(loadBalancerName)
hc, _ := gceCloud.GetHttpHealthCheck(loadBalancerName)
gceCloud.DeleteTargetPool(loadBalancerName, hc)
return nil
}
// getMaster populates the externalIP, internalIP and hostname fields of the master.
// If any of these is unavailable, it is set to "".
func getMaster(c clientset.Interface) Address {
master := Address{}
// Populate the internal IP.
eps, err := c.Core().Endpoints(v1.NamespaceDefault).Get("kubernetes")
if err != nil {
Failf("Failed to get kubernetes endpoints: %v", err)
}
if len(eps.Subsets) != 1 || len(eps.Subsets[0].Addresses) != 1 {
Failf("There are more than 1 endpoints for kubernetes service: %+v", eps)
}
master.internalIP = eps.Subsets[0].Addresses[0].IP
// Populate the external IP/hostname.
url, err := url.Parse(TestContext.Host)
if err != nil {
Failf("Failed to parse hostname: %v", err)
}
if net.ParseIP(url.Host) != nil {
// TODO: Check that it is external IP (not having a reserved IP address as per RFC1918).
master.externalIP = url.Host
} else {
master.hostname = url.Host
}
return master
}
// GetMasterAddress returns the hostname/external IP/internal IP as appropriate for e2e tests on a particular provider
// which is the address of the interface used for communication with the kubelet.
func GetMasterAddress(c clientset.Interface) string {
master := getMaster(c)
switch TestContext.Provider {
case "gce", "gke":
return master.externalIP
case "aws":
return awsMasterIP
default:
Failf("This test is not supported for provider %s and should be disabled", TestContext.Provider)
}
return ""
}
// GetNodeExternalIP returns node external IP concatenated with port 22 for ssh
// e.g. 1.2.3.4:22
func GetNodeExternalIP(node *v1.Node) string {
Logf("Getting external IP address for %s", node.Name)
host := ""
for _, a := range node.Status.Addresses {
if a.Type == v1.NodeExternalIP {
host = a.Address + ":22"
break
}
}
if host == "" {
Failf("Couldn't get the external IP of host %s with addresses %v", node.Name, node.Status.Addresses)
}
return host
}
|
[
"\"FEDERATION_NAMESPACE\"",
"\"KUBE_SSH_USER\"",
"\"USER\"",
"\"HOME\"",
"\"AWS_SSH_KEY\"",
"\"VAGRANT_SSH_KEY\""
] |
[] |
[
"VAGRANT_SSH_KEY",
"AWS_SSH_KEY",
"KUBE_SSH_USER",
"USER",
"FEDERATION_NAMESPACE",
"HOME"
] |
[]
|
["VAGRANT_SSH_KEY", "AWS_SSH_KEY", "KUBE_SSH_USER", "USER", "FEDERATION_NAMESPACE", "HOME"]
|
go
| 6 | 0 | |
python/TumblingWindow/tumbling-windows.py
|
# -*- coding: utf-8 -*-
"""
tumbling-windows.py
~~~~~~~~~~~~~~~~~~~
This module:
1. Creates a table environment
2. Creates a source table from a Kinesis Data Stream
3. Creates a sink table writing to a Kinesis Data Stream
4. Queries from the Source Table and
creates a tumbling window over 10 seconds to calculate the cumulative price over the window.
5. These tumbling window results are inserted into the Sink table.
"""
from pyflink.table import EnvironmentSettings, StreamTableEnvironment
from pyflink.table.window import Tumble
import os
import json
# 1. Creates a Table Environment
env_settings = (
EnvironmentSettings.new_instance().in_streaming_mode().use_blink_planner().build()
)
table_env = StreamTableEnvironment.create(environment_settings=env_settings)
statement_set = table_env.create_statement_set()
APPLICATION_PROPERTIES_FILE_PATH = "/etc/flink/application_properties.json" # on kda
is_local = (
True if os.environ.get("IS_LOCAL") else False
) # set this env var in your local environment
if is_local:
# only for local, overwrite variable to properties and pass in your jars delimited by a semicolon (;)
APPLICATION_PROPERTIES_FILE_PATH = "application_properties.json" # local
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
table_env.get_config().get_configuration().set_string(
"pipeline.jars",
"file:///" + CURRENT_DIR + "/lib/amazon-kinesis-sql-connector-flink-2.0.3.jar",
)
def get_application_properties():
if os.path.isfile(APPLICATION_PROPERTIES_FILE_PATH):
with open(APPLICATION_PROPERTIES_FILE_PATH, "r") as file:
contents = file.read()
properties = json.loads(contents)
return properties
else:
print('A file at "{}" was not found'.format(APPLICATION_PROPERTIES_FILE_PATH))
def property_map(props, property_group_id):
for prop in props:
if prop["PropertyGroupId"] == property_group_id:
return prop["PropertyMap"]
def create_table(table_name, stream_name, region, stream_initpos):
return """ CREATE TABLE {0} (
ticker VARCHAR(6),
price DOUBLE,
event_time TIMESTAMP(3),
WATERMARK FOR event_time AS event_time - INTERVAL '5' SECOND
)
PARTITIONED BY (ticker)
WITH (
'connector' = 'kinesis',
'stream' = '{1}',
'aws.region' = '{2}',
'scan.stream.initpos' = '{3}',
'sink.partitioner-field-delimiter' = ';',
'sink.producer.collection-max-count' = '100',
'format' = 'json',
'json.timestamp-format.standard' = 'ISO-8601'
) """.format(
table_name, stream_name, region, stream_initpos
)
def perform_tumbling_window_aggregation(input_table_name):
# use SQL Table in the Table API
input_table = table_env.from_path(input_table_name)
tumbling_window_table = (
input_table.window(
Tumble.over("10.seconds").on("event_time").alias("ten_second_window")
)
.group_by("ticker, ten_second_window")
.select("ticker, price.sum as price, ten_second_window.end as event_time")
)
return tumbling_window_table
def main():
# Application Property Keys
input_property_group_key = "consumer.config.0"
producer_property_group_key = "producer.config.0"
input_stream_key = "input.stream.name"
input_region_key = "aws.region"
input_starting_position_key = "flink.stream.initpos"
output_stream_key = "output.stream.name"
output_region_key = "aws.region"
# tables
input_table_name = "input_table"
output_table_name = "output_table"
# get application properties
props = get_application_properties()
input_property_map = property_map(props, input_property_group_key)
output_property_map = property_map(props, producer_property_group_key)
input_stream = input_property_map[input_stream_key]
input_region = input_property_map[input_region_key]
stream_initpos = input_property_map[input_starting_position_key]
output_stream = output_property_map[output_stream_key]
output_region = output_property_map[output_region_key]
# 2. Creates a source table from a Kinesis Data Stream
table_env.execute_sql(
create_table(input_table_name, input_stream, input_region, stream_initpos)
)
# 3. Creates a sink table writing to a Kinesis Data Stream
table_env.execute_sql(
create_table(output_table_name, output_stream, output_region, stream_initpos)
)
# 4. Queries from the Source Table and creates a tumbling window over 10 seconds to calculate the cumulative price
# over the window.
tumbling_window_table = perform_tumbling_window_aggregation(input_table_name)
# 5. These tumbling windows are inserted into the sink table
tumbling_window_table.execute_insert(output_table_name).wait()
statement_set.execute()
if __name__ == "__main__":
main()
|
[] |
[] |
[
"IS_LOCAL"
] |
[]
|
["IS_LOCAL"]
|
python
| 1 | 0 | |
main.go
|
package gcfexample
import (
"cloud.google.com/go/logging"
"context"
"contrib.go.opencensus.io/exporter/stackdriver"
"contrib.go.opencensus.io/exporter/stackdriver/propagation"
"fmt"
"go.opencensus.io/trace"
"google.golang.org/genproto/googleapis/api/monitoredres"
"math/rand"
"net/http"
"os"
"sync"
)
var (
logger *logging.Logger
once sync.Once
)
// configFunc sets the global configuration; it's overridden in tests.
var configFunc = defaultConfigFunc
type StructureLogExample struct {
ThingOne string `json:"thing_one"`
BatchSize int `json:"batch_size"`
}
func Gcfexample(w http.ResponseWriter, r *http.Request) {
once.Do(func() {
if err := configFunc(); err != nil {
panic(err)
}
})
defer logger.Flush()
// random number between 1 ad 6
batchAttempt := int64(rand.Int() % 6)
ctx := r.Context()
var span *trace.Span
httpFormat := &propagation.HTTPFormat{}
sc, ok := httpFormat.SpanContextFromRequest(r)
if ok {
ctx, span = trace.StartSpanWithRemoteParent(ctx, "helloworld", sc,
trace.WithSampler(trace.ProbabilitySampler(.10)),
trace.WithSpanKind(trace.SpanKindServer),
)
defer span.End()
}
logger.Log(logging.Entry{
Payload: "Handling new HTTP request",
Severity: logging.Info,
})
logger.Log(logging.Entry{
Payload: StructureLogExample{ThingOne: "dafoolyouare", BatchSize: int(batchAttempt)},
Severity: logging.Info,
Labels: map[string]string{
"rsc": "3711",
"r": "2138",
"gri": "1908",
"adg": "912",
},
})
projectId := os.Getenv("GCP_PROJECT")
_, err := createCustomMetric(projectId, "custom.googleapis.com/dataops/gcfexample/ametric")
if err != nil {
logger.Log(logging.Entry{
Payload: fmt.Sprintf("Unable to create MetricDescription %v", err),
Severity: logging.Error,
Labels: map[string]string{
"rsc": "3711",
"r": "2138",
"gri": "1908",
"adg": "912",
},
})
}
err = writeTimeSeriesValue(projectId, "custom.googleapis.com/dataops/gcfexample/ametric")
if err != nil {
logger.Log(logging.Entry{
Payload: fmt.Sprintf("writeTimeSeriesValue failed %v", err),
Severity: logging.Error,
Labels: map[string]string{
"rsc": "3711",
"r": "2138",
"gri": "1908",
"adg": "912",
},
})
}
w.Write([]byte(fmt.Sprintf("016 Batch Attempts = %d", batchAttempt)))
}
func defaultConfigFunc() error {
var err error
projectId := os.Getenv("GCP_PROJECT")
if projectId == "" {
return fmt.Errorf("GCP_PROJECT environment variable unset or missing")
}
functionName := os.Getenv("FUNCTION_NAME")
if functionName == "" {
return fmt.Errorf("FUNCTION_NAME environment variable unset or missing")
}
region := os.Getenv("FUNCTION_REGION")
if region == "" {
return fmt.Errorf("FUNCTION_REGION environment variable unset or missing")
}
stackdriverExporter, err := stackdriver.NewExporter(stackdriver.Options{ProjectID: projectId})
if err != nil {
return err
}
trace.RegisterExporter(stackdriverExporter)
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
client, err := logging.NewClient(context.Background(), projectId)
if err != nil {
return err
}
monitoredResource := monitoredres.MonitoredResource{
Type: "cloud_function",
Labels: map[string]string{
"function_name": functionName,
"region": region,
},
}
commonResource := logging.CommonResource(&monitoredResource)
logger = client.Logger(functionName, commonResource)
return nil
}
|
[
"\"GCP_PROJECT\"",
"\"GCP_PROJECT\"",
"\"FUNCTION_NAME\"",
"\"FUNCTION_REGION\""
] |
[] |
[
"FUNCTION_NAME",
"GCP_PROJECT",
"FUNCTION_REGION"
] |
[]
|
["FUNCTION_NAME", "GCP_PROJECT", "FUNCTION_REGION"]
|
go
| 3 | 0 | |
boto/auth.py
|
# Copyright 2010 Google Inc.
# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Handles authentication required to AWS and GS
"""
import base64
import boto
import boto.auth_handler
import boto.exception
import boto.plugin
import boto.utils
import copy
import datetime
from email.utils import formatdate
import hmac
import os
import posixpath
from boto.compat import urllib, encodebytes, parse_qs_safe, urlparse, six
from boto.auth_handler import AuthHandler
from boto.exception import BotoClientError
from boto.utils import get_utf8able_str
try:
from hashlib import sha1 as sha
from hashlib import sha256 as sha256
except ImportError:
import sha
sha256 = None
# Region detection strings to determine if SigV2 should be used
# by default
S3_AUTH_DETECT = [
'-ap-northeast-1',
'.ap-northeast-1',
'-ap-southeast-1',
'.ap-southeast-1',
'-ap-southeast-2',
'.ap-southeast-2',
'-eu-west-1',
'.eu-west-1',
'-external-1',
'.external-1',
'-sa-east-1',
'.sa-east-1',
'-us-east-1',
'.us-east-1',
'-us-gov-west-1',
'.us-gov-west-1',
'-us-west-1',
'.us-west-1',
'-us-west-2',
'.us-west-2'
]
SIGV4_DETECT = [
'.cn-',
# In eu-central and ap-northeast-2 we support both host styles for S3
'.eu-central',
'-eu-central',
'.ap-northeast-2',
'-ap-northeast-2',
'.ap-south-1',
'-ap-south-1',
'.us-east-2',
'-us-east-2',
'-ca-central',
'.ca-central',
'.eu-west-2',
'-eu-west-2',
]
class HmacKeys(object):
"""Key based Auth handler helper."""
def __init__(self, host, config, provider, anon=False):
if provider.access_key is None or provider.secret_key is None:
if not anon:
raise boto.auth_handler.NotReadyToAuthenticate()
else:
self._hmac = None
self._hmac_256 = None
self.host = host
self.update_provider(provider)
def update_provider(self, provider):
self._provider = provider
if self._provider.secret_key: # Anonymous handler has no key.
self._hmac = hmac.new(self._provider.secret_key.encode('utf-8'),
digestmod=sha)
if sha256:
self._hmac_256 = hmac.new(
self._provider.secret_key.encode('utf-8'), digestmod=sha256)
else:
self._hmac_256 = None
def algorithm(self):
if self._hmac_256:
return 'HmacSHA256'
else:
return 'HmacSHA1'
def _get_hmac(self):
if self._hmac_256:
digestmod = sha256
else:
digestmod = sha
return hmac.new(self._provider.secret_key.encode('utf-8'),
digestmod=digestmod)
def sign_string(self, string_to_sign):
new_hmac = self._get_hmac()
new_hmac.update(string_to_sign.encode('utf-8'))
return encodebytes(new_hmac.digest()).decode('utf-8').strip()
def __getstate__(self):
pickled_dict = copy.copy(self.__dict__)
del pickled_dict['_hmac']
del pickled_dict['_hmac_256']
return pickled_dict
def __setstate__(self, dct):
self.__dict__ = dct
self.update_provider(self._provider)
class AnonAuthHandler(AuthHandler, HmacKeys):
"""
Implements Anonymous requests.
"""
capability = ['anon']
def __init__(self, host, config, provider):
AuthHandler.__init__(self, host, config, provider)
HmacKeys.__init__(self, host, config, provider, anon=True)
def add_auth(self, http_request, **kwargs):
pass
class HmacAuthV1Handler(AuthHandler, HmacKeys):
""" Implements the HMAC request signing used by S3 and GS."""
capability = ['hmac-v1', 's3']
def __init__(self, host, config, provider):
AuthHandler.__init__(self, host, config, provider)
HmacKeys.__init__(self, host, config, provider)
self._hmac_256 = None
def update_provider(self, provider):
super(HmacAuthV1Handler, self).update_provider(provider)
self._hmac_256 = None
def add_auth(self, http_request, **kwargs):
headers = http_request.headers
method = http_request.method
auth_path = http_request.auth_path
if 'Date' not in headers:
headers['Date'] = formatdate(usegmt=True)
if self._provider.security_token:
key = self._provider.security_token_header
headers[key] = self._provider.security_token
string_to_sign = boto.utils.canonical_string(method, auth_path,
headers, None,
self._provider)
boto.log.debug('StringToSign:\n%s' % string_to_sign)
b64_hmac = self.sign_string(string_to_sign)
auth_hdr = self._provider.auth_header
auth = ("%s %s:%s" % (auth_hdr, self._provider.access_key, b64_hmac))
boto.log.debug('Signature:\n%s' % auth)
headers['Authorization'] = auth
class HmacAuthV2Handler(AuthHandler, HmacKeys):
"""
Implements the simplified HMAC authorization used by CloudFront.
"""
capability = ['hmac-v2', 'cloudfront']
def __init__(self, host, config, provider):
AuthHandler.__init__(self, host, config, provider)
HmacKeys.__init__(self, host, config, provider)
self._hmac_256 = None
def update_provider(self, provider):
super(HmacAuthV2Handler, self).update_provider(provider)
self._hmac_256 = None
def add_auth(self, http_request, **kwargs):
headers = http_request.headers
if 'Date' not in headers:
headers['Date'] = formatdate(usegmt=True)
if self._provider.security_token:
key = self._provider.security_token_header
headers[key] = self._provider.security_token
b64_hmac = self.sign_string(headers['Date'])
auth_hdr = self._provider.auth_header
headers['Authorization'] = ("%s %s:%s" %
(auth_hdr,
self._provider.access_key, b64_hmac))
class HmacAuthV3Handler(AuthHandler, HmacKeys):
"""Implements the new Version 3 HMAC authorization used by Route53."""
capability = ['hmac-v3', 'route53', 'ses']
def __init__(self, host, config, provider):
AuthHandler.__init__(self, host, config, provider)
HmacKeys.__init__(self, host, config, provider)
def add_auth(self, http_request, **kwargs):
headers = http_request.headers
if 'Date' not in headers:
headers['Date'] = formatdate(usegmt=True)
if self._provider.security_token:
key = self._provider.security_token_header
headers[key] = self._provider.security_token
b64_hmac = self.sign_string(headers['Date'])
s = "AWS3-HTTPS AWSAccessKeyId=%s," % self._provider.access_key
s += "Algorithm=%s,Signature=%s" % (self.algorithm(), b64_hmac)
headers['X-Amzn-Authorization'] = s
class HmacAuthV3HTTPHandler(AuthHandler, HmacKeys):
"""
Implements the new Version 3 HMAC authorization used by DynamoDB.
"""
capability = ['hmac-v3-http']
def __init__(self, host, config, provider):
AuthHandler.__init__(self, host, config, provider)
HmacKeys.__init__(self, host, config, provider)
def headers_to_sign(self, http_request):
"""
Select the headers from the request that need to be included
in the StringToSign.
"""
headers_to_sign = {'Host': self.host}
for name, value in http_request.headers.items():
lname = name.lower()
if lname.startswith('x-amz'):
headers_to_sign[name] = value
return headers_to_sign
def canonical_headers(self, headers_to_sign):
"""
Return the headers that need to be included in the StringToSign
in their canonical form by converting all header keys to lower
case, sorting them in alphabetical order and then joining
them into a string, separated by newlines.
"""
l = sorted(['%s:%s' % (n.lower().strip(),
headers_to_sign[n].strip()) for n in headers_to_sign])
return '\n'.join(l)
def string_to_sign(self, http_request):
"""
Return the canonical StringToSign as well as a dict
containing the original version of all headers that
were included in the StringToSign.
"""
headers_to_sign = self.headers_to_sign(http_request)
canonical_headers = self.canonical_headers(headers_to_sign)
string_to_sign = '\n'.join([http_request.method,
http_request.auth_path,
'',
canonical_headers,
'',
http_request.body])
return string_to_sign, headers_to_sign
def add_auth(self, req, **kwargs):
"""
Add AWS3 authentication to a request.
:type req: :class`boto.connection.HTTPRequest`
:param req: The HTTPRequest object.
"""
# This could be a retry. Make sure the previous
# authorization header is removed first.
if 'X-Amzn-Authorization' in req.headers:
del req.headers['X-Amzn-Authorization']
req.headers['X-Amz-Date'] = formatdate(usegmt=True)
if self._provider.security_token:
req.headers['X-Amz-Security-Token'] = self._provider.security_token
string_to_sign, headers_to_sign = self.string_to_sign(req)
boto.log.debug('StringToSign:\n%s' % string_to_sign)
hash_value = sha256(string_to_sign.encode('utf-8')).digest()
b64_hmac = self.sign_string(hash_value)
s = "AWS3 AWSAccessKeyId=%s," % self._provider.access_key
s += "Algorithm=%s," % self.algorithm()
s += "SignedHeaders=%s," % ';'.join(headers_to_sign)
s += "Signature=%s" % b64_hmac
req.headers['X-Amzn-Authorization'] = s
class HmacAuthV4Handler(AuthHandler, HmacKeys):
"""
Implements the new Version 4 HMAC authorization.
"""
capability = ['hmac-v4']
def __init__(self, host, config, provider,
service_name=None, region_name=None):
AuthHandler.__init__(self, host, config, provider)
HmacKeys.__init__(self, host, config, provider)
# You can set the service_name and region_name to override the
# values which would otherwise come from the endpoint, e.g.
# <service>.<region>.amazonaws.com.
self.service_name = service_name
self.region_name = region_name
def _sign(self, key, msg, hex=False):
if not isinstance(key, bytes):
key = key.encode('utf-8')
if hex:
sig = hmac.new(key, msg.encode('utf-8'), sha256).hexdigest()
else:
sig = hmac.new(key, msg.encode('utf-8'), sha256).digest()
return sig
def headers_to_sign(self, http_request):
"""
Select the headers from the request that need to be included
in the StringToSign.
"""
host_header_value = self.host_header(self.host, http_request)
if http_request.headers.get('Host'):
host_header_value = http_request.headers['Host']
headers_to_sign = {'Host': host_header_value}
for name, value in http_request.headers.items():
lname = name.lower()
if lname.startswith('x-amz'):
if isinstance(value, bytes):
value = value.decode('utf-8')
headers_to_sign[name] = value
return headers_to_sign
def host_header(self, host, http_request):
port = http_request.port
secure = http_request.protocol == 'https'
if ((port == 80 and not secure) or (port == 443 and secure)):
return host
return '%s:%s' % (host, port)
def query_string(self, http_request):
parameter_names = sorted(http_request.params.keys())
pairs = []
for pname in parameter_names:
pval = get_utf8able_str(http_request.params[pname])
pairs.append(urllib.parse.quote(pname, safe=''.encode('ascii')) +
'=' +
urllib.parse.quote(pval, safe='-_~'.encode('ascii')))
return '&'.join(pairs)
def canonical_query_string(self, http_request):
# POST requests pass parameters in through the
# http_request.body field.
if http_request.method == 'POST':
return ""
l = []
for param in sorted(http_request.params):
value = get_utf8able_str(http_request.params[param])
l.append('%s=%s' % (urllib.parse.quote(param, safe='-_.~'),
urllib.parse.quote(value, safe='-_.~')))
return '&'.join(l)
def canonical_headers(self, headers_to_sign):
"""
Return the headers that need to be included in the StringToSign
in their canonical form by converting all header keys to lower
case, sorting them in alphabetical order and then joining
them into a string, separated by newlines.
"""
canonical = []
for header in headers_to_sign:
c_name = header.lower().strip()
raw_value = str(headers_to_sign[header])
if '"' in raw_value:
c_value = raw_value.strip()
else:
c_value = ' '.join(raw_value.strip().split())
canonical.append('%s:%s' % (c_name, c_value))
return '\n'.join(sorted(canonical))
def signed_headers(self, headers_to_sign):
l = ['%s' % n.lower().strip() for n in headers_to_sign]
l = sorted(l)
return ';'.join(l)
def canonical_uri(self, http_request):
path = http_request.auth_path
# Normalize the path
# in windows normpath('/') will be '\\' so we chane it back to '/'
normalized = posixpath.normpath(path).replace('\\', '/')
# Then urlencode whatever's left.
encoded = urllib.parse.quote(normalized)
if len(path) > 1 and path.endswith('/'):
encoded += '/'
return encoded
def payload(self, http_request):
body = http_request.body
# If the body is a file like object, we can use
# boto.utils.compute_hash, which will avoid reading
# the entire body into memory.
if hasattr(body, 'seek') and hasattr(body, 'read'):
return boto.utils.compute_hash(body, hash_algorithm=sha256)[0]
elif not isinstance(body, bytes):
body = body.encode('utf-8')
return sha256(body).hexdigest()
def canonical_request(self, http_request):
cr = [http_request.method.upper()]
cr.append(self.canonical_uri(http_request))
cr.append(self.canonical_query_string(http_request))
headers_to_sign = self.headers_to_sign(http_request)
cr.append(self.canonical_headers(headers_to_sign) + '\n')
cr.append(self.signed_headers(headers_to_sign))
cr.append(self.payload(http_request))
return '\n'.join(cr)
def scope(self, http_request):
scope = [self._provider.access_key]
scope.append(http_request.timestamp)
scope.append(http_request.region_name)
scope.append(http_request.service_name)
scope.append('aws4_request')
return '/'.join(scope)
def split_host_parts(self, host):
return host.split('.')
def determine_region_name(self, host):
parts = self.split_host_parts(host)
if self.region_name is not None:
region_name = self.region_name
elif len(parts) > 1:
if parts[1] == 'us-gov':
region_name = 'us-gov-west-1'
else:
if len(parts) == 3:
region_name = 'us-east-1'
else:
region_name = parts[1]
else:
region_name = parts[0]
return region_name
def determine_service_name(self, host):
parts = self.split_host_parts(host)
if self.service_name is not None:
service_name = self.service_name
else:
service_name = parts[0]
return service_name
def credential_scope(self, http_request):
scope = []
http_request.timestamp = http_request.headers['X-Amz-Date'][0:8]
scope.append(http_request.timestamp)
# The service_name and region_name either come from:
# * The service_name/region_name attrs or (if these values are None)
# * parsed from the endpoint <service>.<region>.amazonaws.com.
region_name = self.determine_region_name(http_request.host)
service_name = self.determine_service_name(http_request.host)
http_request.service_name = service_name
http_request.region_name = region_name
scope.append(http_request.region_name)
scope.append(http_request.service_name)
scope.append('aws4_request')
return '/'.join(scope)
def string_to_sign(self, http_request, canonical_request):
"""
Return the canonical StringToSign as well as a dict
containing the original version of all headers that
were included in the StringToSign.
"""
sts = ['AWS4-HMAC-SHA256']
sts.append(http_request.headers['X-Amz-Date'])
sts.append(self.credential_scope(http_request))
sts.append(sha256(canonical_request.encode('utf-8')).hexdigest())
return '\n'.join(sts)
def signature(self, http_request, string_to_sign):
key = self._provider.secret_key
k_date = self._sign(('AWS4' + key).encode('utf-8'),
http_request.timestamp)
k_region = self._sign(k_date, http_request.region_name)
k_service = self._sign(k_region, http_request.service_name)
k_signing = self._sign(k_service, 'aws4_request')
return self._sign(k_signing, string_to_sign, hex=True)
def add_auth(self, req, **kwargs):
"""
Add AWS4 authentication to a request.
:type req: :class`boto.connection.HTTPRequest`
:param req: The HTTPRequest object.
"""
# This could be a retry. Make sure the previous
# authorization header is removed first.
if 'X-Amzn-Authorization' in req.headers:
del req.headers['X-Amzn-Authorization']
now = datetime.datetime.utcnow()
req.headers['X-Amz-Date'] = now.strftime('%Y%m%dT%H%M%SZ')
if self._provider.security_token:
req.headers['X-Amz-Security-Token'] = self._provider.security_token
qs = self.query_string(req)
qs_to_post = qs
# We do not want to include any params that were mangled into
# the params if performing s3-sigv4 since it does not
# belong in the body of a post for some requests. Mangled
# refers to items in the query string URL being added to the
# http response params. However, these params get added to
# the body of the request, but the query string URL does not
# belong in the body of the request. ``unmangled_resp`` is the
# response that happened prior to the mangling. This ``unmangled_req``
# kwarg will only appear for s3-sigv4.
if 'unmangled_req' in kwargs:
qs_to_post = self.query_string(kwargs['unmangled_req'])
if qs_to_post and req.method == 'POST':
# Stash request parameters into post body
# before we generate the signature.
req.body = qs_to_post
req.headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
req.headers['Content-Length'] = str(len(req.body))
else:
# Safe to modify req.path here since
# the signature will use req.auth_path.
req.path = req.path.split('?')[0]
if qs:
# Don't insert the '?' unless there's actually a query string
req.path = req.path + '?' + qs
canonical_request = self.canonical_request(req)
boto.log.debug('CanonicalRequest:\n%s' % canonical_request)
string_to_sign = self.string_to_sign(req, canonical_request)
boto.log.debug('StringToSign:\n%s' % string_to_sign)
signature = self.signature(req, string_to_sign)
boto.log.debug('Signature:\n%s' % signature)
headers_to_sign = self.headers_to_sign(req)
l = ['AWS4-HMAC-SHA256 Credential=%s' % self.scope(req)]
l.append('SignedHeaders=%s' % self.signed_headers(headers_to_sign))
l.append('Signature=%s' % signature)
req.headers['Authorization'] = ','.join(l)
class S3HmacAuthV4Handler(HmacAuthV4Handler, AuthHandler):
"""
Implements a variant of Version 4 HMAC authorization specific to S3.
"""
capability = ['hmac-v4-s3']
def __init__(self, *args, **kwargs):
super(S3HmacAuthV4Handler, self).__init__(*args, **kwargs)
if self.region_name:
self.region_name = self.clean_region_name(self.region_name)
def clean_region_name(self, region_name):
if region_name.startswith('s3-'):
return region_name[3:]
return region_name
def canonical_uri(self, http_request):
# S3 does **NOT** do path normalization that SigV4 typically does.
# Urlencode the path, **NOT** ``auth_path`` (because vhosting).
path = urllib.parse.urlparse(http_request.path)
# Because some quoting may have already been applied, let's back it out.
unquoted = urllib.parse.unquote(path.path)
# Requote, this time addressing all characters.
encoded = urllib.parse.quote(unquoted, safe='/~')
return encoded
def canonical_query_string(self, http_request):
# Note that we just do not return an empty string for
# POST request. Query strings in url are included in canonical
# query string.
l = []
for param in sorted(http_request.params):
value = get_utf8able_str(http_request.params[param])
l.append('%s=%s' % (urllib.parse.quote(param, safe='-_.~'),
urllib.parse.quote(value, safe='-_.~')))
return '&'.join(l)
def host_header(self, host, http_request):
port = http_request.port
secure = http_request.protocol == 'https'
if ((port == 80 and not secure) or (port == 443 and secure)):
return http_request.host
return '%s:%s' % (http_request.host, port)
def headers_to_sign(self, http_request):
"""
Select the headers from the request that need to be included
in the StringToSign.
"""
host_header_value = self.host_header(self.host, http_request)
headers_to_sign = {'Host': host_header_value}
for name, value in http_request.headers.items():
lname = name.lower()
# Hooray for the only difference! The main SigV4 signer only does
# ``Host`` + ``x-amz-*``. But S3 wants pretty much everything
# signed, except for authorization itself.
if lname not in ['authorization']:
headers_to_sign[name] = value
return headers_to_sign
def determine_region_name(self, host):
# S3's different format(s) of representing region/service from the
# rest of AWS makes this hurt too.
#
# Possible domain formats:
# - s3.amazonaws.com (Classic)
# - s3-us-west-2.amazonaws.com (Specific region)
# - bukkit.s3.amazonaws.com (Vhosted Classic)
# - bukkit.s3-ap-northeast-1.amazonaws.com (Vhosted specific region)
# - s3.cn-north-1.amazonaws.com.cn - (Beijing region)
# - bukkit.s3.cn-north-1.amazonaws.com.cn - (Vhosted Beijing region)
parts = self.split_host_parts(host)
if self.region_name is not None:
region_name = self.region_name
else:
# Classic URLs - s3-us-west-2.amazonaws.com
if len(parts) == 3:
region_name = self.clean_region_name(parts[0])
# Special-case for Classic.
if region_name == 's3':
region_name = 'us-east-1'
else:
# Iterate over the parts in reverse order.
for offset, part in enumerate(reversed(parts)):
part = part.lower()
# Look for the first thing starting with 's3'.
# Until there's a ``.s3`` TLD, we should be OK. :P
if part == 's3':
# If it's by itself, the region is the previous part.
region_name = parts[-offset]
# Unless it's Vhosted classic
if region_name == 'amazonaws':
region_name = 'us-east-1'
break
elif part.startswith('s3-'):
region_name = self.clean_region_name(part)
break
return region_name
def determine_service_name(self, host):
# Should this signing mechanism ever be used for anything else, this
# will fail. Consider utilizing the logic from the parent class should
# you find yourself here.
return 's3'
def mangle_path_and_params(self, req):
"""
Returns a copy of the request object with fixed ``auth_path/params``
attributes from the original.
"""
modified_req = copy.copy(req)
# Unlike the most other services, in S3, ``req.params`` isn't the only
# source of query string parameters.
# Because of the ``query_args``, we may already have a query string
# **ON** the ``path/auth_path``.
# Rip them apart, so the ``auth_path/params`` can be signed
# appropriately.
parsed_path = urllib.parse.urlparse(modified_req.auth_path)
modified_req.auth_path = parsed_path.path
if modified_req.params is None:
modified_req.params = {}
else:
# To keep the original request object untouched. We must make
# a copy of the params dictionary. Because the copy of the
# original request directly refers to the params dictionary
# of the original request.
copy_params = req.params.copy()
modified_req.params = copy_params
raw_qs = parsed_path.query
existing_qs = parse_qs_safe(
raw_qs,
keep_blank_values=True
)
# ``parse_qs`` will return lists. Don't do that unless there's a real,
# live list provided.
for key, value in existing_qs.items():
if isinstance(value, (list, tuple)):
if len(value) == 1:
existing_qs[key] = value[0]
modified_req.params.update(existing_qs)
return modified_req
def payload(self, http_request):
if http_request.headers.get('x-amz-content-sha256'):
return http_request.headers['x-amz-content-sha256']
return super(S3HmacAuthV4Handler, self).payload(http_request)
def add_auth(self, req, **kwargs):
if 'x-amz-content-sha256' not in req.headers:
if '_sha256' in req.headers:
req.headers['x-amz-content-sha256'] = req.headers.pop('_sha256')
else:
req.headers['x-amz-content-sha256'] = self.payload(req)
updated_req = self.mangle_path_and_params(req)
return super(S3HmacAuthV4Handler, self).add_auth(updated_req,
unmangled_req=req,
**kwargs)
def presign(self, req, expires, iso_date=None):
"""
Presign a request using SigV4 query params. Takes in an HTTP request
and an expiration time in seconds and returns a URL.
http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html
"""
if iso_date is None:
iso_date = datetime.datetime.utcnow().strftime('%Y%m%dT%H%M%SZ')
region = self.determine_region_name(req.host)
service = self.determine_service_name(req.host)
params = {
'X-Amz-Algorithm': 'AWS4-HMAC-SHA256',
'X-Amz-Credential': '%s/%s/%s/%s/aws4_request' % (
self._provider.access_key,
iso_date[:8],
region,
service
),
'X-Amz-Date': iso_date,
'X-Amz-Expires': expires,
'X-Amz-SignedHeaders': 'host'
}
if self._provider.security_token:
params['X-Amz-Security-Token'] = self._provider.security_token
headers_to_sign = self.headers_to_sign(req)
l = sorted(['%s' % n.lower().strip() for n in headers_to_sign])
params['X-Amz-SignedHeaders'] = ';'.join(l)
req.params.update(params)
cr = self.canonical_request(req)
# We need to replace the payload SHA with a constant
cr = '\n'.join(cr.split('\n')[:-1]) + '\nUNSIGNED-PAYLOAD'
# Date header is expected for string_to_sign, but unused otherwise
req.headers['X-Amz-Date'] = iso_date
sts = self.string_to_sign(req, cr)
signature = self.signature(req, sts)
# Add signature to params now that we have it
req.params['X-Amz-Signature'] = signature
return '%s://%s%s?%s' % (req.protocol, req.host, req.path,
urllib.parse.urlencode(req.params))
class STSAnonHandler(AuthHandler):
"""
Provides pure query construction (no actual signing).
Used for making anonymous STS request for operations like
``assume_role_with_web_identity``.
"""
capability = ['sts-anon']
def _escape_value(self, value):
# This is changed from a previous version because this string is
# being passed to the query string and query strings must
# be url encoded. In particular STS requires the saml_response to
# be urlencoded when calling assume_role_with_saml.
return urllib.parse.quote(value)
def _build_query_string(self, params):
keys = list(params.keys())
keys.sort(key=lambda x: x.lower())
pairs = []
for key in keys:
val = get_utf8able_str(params[key])
pairs.append(key + '=' + self._escape_value(get_utf8able_str(val)))
return '&'.join(pairs)
def add_auth(self, http_request, **kwargs):
headers = http_request.headers
qs = self._build_query_string(
http_request.params
)
boto.log.debug('query_string in body: %s' % qs)
headers['Content-Type'] = 'application/x-www-form-urlencoded'
# This will be a POST so the query string should go into the body
# as opposed to being in the uri
http_request.body = qs
class QuerySignatureHelper(HmacKeys):
"""
Helper for Query signature based Auth handler.
Concrete sub class need to implement _calc_sigature method.
"""
def add_auth(self, http_request, **kwargs):
headers = http_request.headers
params = http_request.params
params['AWSAccessKeyId'] = self._provider.access_key
params['SignatureVersion'] = self.SignatureVersion
params['Timestamp'] = boto.utils.get_ts()
qs, signature = self._calc_signature(
http_request.params, http_request.method,
http_request.auth_path, http_request.host)
boto.log.debug('query_string: %s Signature: %s' % (qs, signature))
if http_request.method == 'POST':
headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
http_request.body = qs + '&Signature=' + urllib.parse.quote_plus(signature)
http_request.headers['Content-Length'] = str(len(http_request.body))
else:
http_request.body = ''
# if this is a retried request, the qs from the previous try will
# already be there, we need to get rid of that and rebuild it
http_request.path = http_request.path.split('?')[0]
http_request.path = (http_request.path + '?' + qs +
'&Signature=' + urllib.parse.quote_plus(signature))
class QuerySignatureV0AuthHandler(QuerySignatureHelper, AuthHandler):
"""Provides Signature V0 Signing"""
SignatureVersion = 0
capability = ['sign-v0']
def _calc_signature(self, params, *args):
boto.log.debug('using _calc_signature_0')
hmac = self._get_hmac()
s = params['Action'] + params['Timestamp']
hmac.update(s.encode('utf-8'))
keys = params.keys()
keys.sort(cmp=lambda x, y: cmp(x.lower(), y.lower()))
pairs = []
for key in keys:
val = get_utf8able_str(params[key])
pairs.append(key + '=' + urllib.parse.quote(val))
qs = '&'.join(pairs)
return (qs, base64.b64encode(hmac.digest()))
class QuerySignatureV1AuthHandler(QuerySignatureHelper, AuthHandler):
"""
Provides Query Signature V1 Authentication.
"""
SignatureVersion = 1
capability = ['sign-v1', 'mturk']
def __init__(self, *args, **kw):
QuerySignatureHelper.__init__(self, *args, **kw)
AuthHandler.__init__(self, *args, **kw)
self._hmac_256 = None
def _calc_signature(self, params, *args):
boto.log.debug('using _calc_signature_1')
hmac = self._get_hmac()
keys = list(params.keys())
keys.sort(key=lambda x: x.lower())
pairs = []
for key in keys:
hmac.update(key.encode('utf-8'))
val = get_utf8able_str(params[key]).encode('utf-8')
hmac.update(val)
pairs.append(key + '=' + urllib.parse.quote(val))
qs = '&'.join(pairs)
return (qs, base64.b64encode(hmac.digest()))
class QuerySignatureV2AuthHandler(QuerySignatureHelper, AuthHandler):
"""Provides Query Signature V2 Authentication."""
SignatureVersion = 2
capability = ['sign-v2', 'ec2', 'ec2', 'emr', 'fps', 'ecs',
'sdb', 'iam', 'rds', 'sns', 'sqs', 'cloudformation']
def _calc_signature(self, params, verb, path, server_name):
boto.log.debug('using _calc_signature_2')
string_to_sign = '%s\n%s\n%s\n' % (verb, server_name.lower(), path)
hmac = self._get_hmac()
params['SignatureMethod'] = self.algorithm()
if self._provider.security_token:
params['SecurityToken'] = self._provider.security_token
keys = sorted(params.keys())
pairs = []
for key in keys:
val = get_utf8able_str(params[key]).encode('utf-8')
pairs.append(urllib.parse.quote(key, safe='') + '=' +
urllib.parse.quote(val, safe='-_~'))
qs = '&'.join(pairs)
boto.log.debug('query string: %s' % qs)
string_to_sign += qs
boto.log.debug('string_to_sign: %s' % string_to_sign)
hmac.update(string_to_sign.encode('utf-8'))
b64 = base64.b64encode(hmac.digest())
boto.log.debug('len(b64)=%d' % len(b64))
boto.log.debug('base64 encoded digest: %s' % b64)
return (qs, b64)
class POSTPathQSV2AuthHandler(QuerySignatureV2AuthHandler, AuthHandler):
"""
Query Signature V2 Authentication relocating signed query
into the path and allowing POST requests with Content-Types.
"""
capability = ['mws']
def add_auth(self, req, **kwargs):
req.params['AWSAccessKeyId'] = self._provider.access_key
req.params['SignatureVersion'] = self.SignatureVersion
req.params['Timestamp'] = boto.utils.get_ts()
qs, signature = self._calc_signature(req.params, req.method,
req.auth_path, req.host)
boto.log.debug('query_string: %s Signature: %s' % (qs, signature))
if req.method == 'POST':
req.headers['Content-Length'] = str(len(req.body))
req.headers['Content-Type'] = req.headers.get('Content-Type',
'text/plain')
else:
req.body = ''
# if this is a retried req, the qs from the previous try will
# already be there, we need to get rid of that and rebuild it
req.path = req.path.split('?')[0]
req.path = (req.path + '?' + qs +
'&Signature=' + urllib.parse.quote_plus(signature))
def get_auth_handler(host, config, provider, requested_capability=None):
"""Finds an AuthHandler that is ready to authenticate.
Lists through all the registered AuthHandlers to find one that is willing
to handle for the requested capabilities, config and provider.
:type host: string
:param host: The name of the host
:type config:
:param config:
:type provider:
:param provider:
Returns:
An implementation of AuthHandler.
Raises:
boto.exception.NoAuthHandlerFound
"""
ready_handlers = []
auth_handlers = boto.plugin.get_plugin(AuthHandler, requested_capability)
for handler in auth_handlers:
try:
ready_handlers.append(handler(host, config, provider))
except boto.auth_handler.NotReadyToAuthenticate:
pass
if not ready_handlers:
checked_handlers = auth_handlers
names = [handler.__name__ for handler in checked_handlers]
raise boto.exception.NoAuthHandlerFound(
'No handler was ready to authenticate. %d handlers were checked.'
' %s '
'Check your credentials' % (len(names), str(names)))
# We select the last ready auth handler that was loaded, to allow users to
# customize how auth works in environments where there are shared boto
# config files (e.g., /etc/boto.cfg and ~/.boto): The more general,
# system-wide shared configs should be loaded first, and the user's
# customizations loaded last. That way, for example, the system-wide
# config might include a plugin_directory that includes a service account
# auth plugin shared by all users of a Google Compute Engine instance
# (allowing sharing of non-user data between various services), and the
# user could override this with a .boto config that includes user-specific
# credentials (for access to user data).
return ready_handlers[-1]
def detect_potential_sigv4(func):
def _wrapper(self):
if os.environ.get('EC2_USE_SIGV4', False):
return ['hmac-v4']
if boto.config.get('ec2', 'use-sigv4', False):
return ['hmac-v4']
if hasattr(self, 'region'):
# If you're making changes here, you should also check
# ``boto/iam/connection.py``, as several things there are also
# endpoint-related.
if getattr(self.region, 'endpoint', ''):
for test in SIGV4_DETECT:
if test in self.region.endpoint:
return ['hmac-v4']
return func(self)
return _wrapper
def detect_potential_s3sigv4(func):
def _wrapper(self):
if os.environ.get('S3_USE_SIGV4', False):
return ['hmac-v4-s3']
if boto.config.get('s3', 'use-sigv4', False):
return ['hmac-v4-s3']
if not hasattr(self, 'host'):
return func(self)
# Keep the old explicit logic in case somebody was adding to the list.
for test in SIGV4_DETECT:
if test in self.host:
return ['hmac-v4-s3']
# Use default for non-aws hosts. Adding a url scheme is necessary if
# not present for urlparse to properly function.
host = self.host
if not self.host.startswith('http://') or \
self.host.startswith('https://'):
host = 'https://' + host
netloc = urlparse(host).netloc
if not (netloc.endswith('amazonaws.com') or
netloc.endswith('amazonaws.com.cn')):
return func(self)
# Use the default for the global endpoint
if netloc.endswith('s3.amazonaws.com'):
return func(self)
# Use the default for regions that support sigv4 and sigv2
if any(test in self.host for test in S3_AUTH_DETECT):
return func(self)
# Use anonymous if enabled.
if hasattr(self, 'anon') and self.anon:
return func(self)
# Default to sigv4 for aws hosts outside of regions that are known
# to support sigv2
return ['hmac-v4-s3']
return _wrapper
|
[] |
[] |
[
"EC2_USE_SIGV4",
"S3_USE_SIGV4"
] |
[]
|
["EC2_USE_SIGV4", "S3_USE_SIGV4"]
|
python
| 2 | 0 | |
app/config_example.py
|
import os
#Flask settings
os.environ['SECRET_KEY'] = '22222'
os.environ['UPLOADED_PHOTOS_DEST'] = "app/upload_photos"
#Flask-Mail settings
os.environ['MAIL_USERNAME'] = '[email protected]'
os.environ['MAIL_PASSWORD'] = 'password'
os.environ['MAIL_DEFAULT_SENDER'] = '[email protected]'
#Admin user default
os.environ['ADMIN_USER'] = 'user'
os.environ['ADMIN_EMAIL'] = '[email protected]'
os.environ['ADMIN_PASSWORD'] = 'password'
|
[] |
[] |
[
"UPLOADED_PHOTOS_DEST",
"MAIL_PASSWORD",
"ADMIN_USER",
"ADMIN_EMAIL",
"MAIL_DEFAULT_SENDER",
"SECRET_KEY",
"MAIL_USERNAME",
"ADMIN_PASSWORD"
] |
[]
|
["UPLOADED_PHOTOS_DEST", "MAIL_PASSWORD", "ADMIN_USER", "ADMIN_EMAIL", "MAIL_DEFAULT_SENDER", "SECRET_KEY", "MAIL_USERNAME", "ADMIN_PASSWORD"]
|
python
| 8 | 0 | |
misc/ng_php_conf_hooks.py
|
import os, re, shutil
# #
try:
internalIp = os.environ['OPENSHIFT_DIY_IP']
except:
try:
from subprocess import check_output
import subprocess
# ip = check_output(["dig", "+short", "@resolver1.opendns.com",
# "myip.opendns.com"]).decode().strip()
st = """/sbin/ifconfig |grep -B1 "inet addr" |awk '{ if ( $1 == "inet" ) { print $2 } else if ( $2 == "Link" ) { printf "%s:" ,$1 } }' |awk -F: '{ print $1 ": " $3 }'"""
output = subprocess.Popen([st], stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
ip2 = output.communicate()[0]
print
ip2.rstrip()
internalIp = ip2.split('lo:')[1].replace(' ', '')
print internalIp
except:
print 'We could not find internal ip of your app or your system,' \
'problem maybe ocurred because this is working by linux system and you are using windows system'
print
'\n So we use ip="127.0.0.1" as your free internal for setting nginx listen to it ip !!!!!!!!'
internalIp = '127.0.0.1'
try:
CurrentDir=os.path.dirname(os.path.realpath(__file__))
Parent_Dir=os.path.abspath(os.path.join(CurrentDir, '..'))
except:
CurrentDir=os.getcwd()
Parent_Dir=os.path.abspath(os.path.join(CurrentDir, '..'))
try:
#internalIp = os.environ['OPENSHIFT_DIY_IP']
internalPort = os.environ['OPENSHIFT_DIY_PORT']
OPENSHIFT_HOMEDIR=os.environ['OPENSHIFT_HOMEDIR']
runtimeDir = os.environ['OPENSHIFT_HOMEDIR'] + "/app-root/runtime"
Destination = os.environ['OPENSHIFT_HOMEDIR'] + "/app-root/runtime/repo"
Bash_File=os.environ['OPENSHIFT_HOMEDIR']
repoDir=Destination+'/www'
nginx_dir=os.environ['OPENSHIFT_HOMEDIR']+"/app-root/runtime/srv/nginx"
except:
print """we could not to find out enviroment parameters like:\n
#internalIp = os.environ['OPENSHIFT_DIY_IP']
internalPort = os.environ['OPENSHIFT_DIY_PORT']
OPENSHIFT_HOMEDIR=os.environ['OPENSHIFT_HOMEDIR']
runtimeDir = os.environ['OPENSHIFT_HOMEDIR'] + "/app-root/runtime"
Destination = os.environ['OPENSHIFT_HOMEDIR'] + "/app-root/runtime/repo"
Bash_File=os.environ['OPENSHIFT_HOMEDIR']
repoDir=Destination+'/www'
nginx_dir=os.environ['OPENSHIFT_HOMEDIR']+"/app-root/runtime/srv/nginx"""
try:
OPENSHIFT_HOMEDIR=os.environ['OPENSHIFT_HOMEDIR']
Gear_DNS=os.environ['OPENSHIFT_GEAR_DNS']
except:
internalPort = '8080'
os.environ['OPENSHIFT_HOMEDIR'] = Parent_Dir
OPENSHIFT_HOMEDIR=os.environ['OPENSHIFT_HOMEDIR']
runtimeDir = os.environ['OPENSHIFT_HOMEDIR'] + "/app-root/runtime"
# Destination = os.environ['OPENSHIFT_HOMEDIR'] + "/app-root/runtime/repo"
Destination= os.path.abspath(os.path.join(Parent_Dir, '..'))
Bash_File=os.environ['OPENSHIFT_HOMEDIR']
repoDir=Destination+'/www'
nginx_dir=os.environ['OPENSHIFT_HOMEDIR']+"/app-root/runtime/srv/nginx"
Gear_DNS='127.0.0.1'
www_Gear_DNS='www'+Gear_DNS
print Parent_Dir
from sys import platform as _platform
if _platform == "linux" or _platform == "linux2":
# linux
print 'we are using linux '
elif _platform == "darwin":
# OS X
print 'we are using OS X '
elif _platform == "win32":
# Windows...
print 'we are using Windos '
Parent_Dir = Parent_Dir.replace('\\', '/')
repoDir = repoDir.replace('\\', '/')
Bash_File = Bash_File.replace('\\', '/')
runtimeDir = runtimeDir.replace('\\', '/')
Destination=Destination.replace('\\','/')
## copy file_source Contains to File_Target if is not similar and make it if
# there is no target file
def replace(file_pattern,file_target):
# Read contents from file_target as a single string
if os.path.isfile(file_target):
pass
else:
file_pattern2 = open(file_pattern, 'r')
pattern = file_pattern2.readlines()
file_pattern2.close()
file_handle2= open(file_target, 'wb')
file_handle2.writelines(pattern)
file_handle2.close()
file_handle = open(file_target, 'r')
# file_string1 = file_handle.read()
file_string = file_handle.readlines()
file_handle.close()
file_pattern2 = open(file_pattern, 'r')
pattern = file_pattern2.readlines()
file_pattern2.close()
file_handle2= open(file_target, 'a+b')
i=-1
t=-1
for line in range(i+1, len(pattern)):
I_S=0
for j in range(t+1, len(file_string)):
if pattern[line] in file_string[j] :
I_S=1
break
else:
pass
if I_S==0 :
file_handle2.writelines(pattern[line])
file_handle2.close()
## copy new files and strings to destination
for root, dirs, files in os.walk(Parent_Dir):
curent_path0=root.split(Parent_Dir)[1]+'/'
curent_path=curent_path0.replace('\\','/')
root=root.replace('\\','/')
for dir2 in dirs:
if os.path.isdir(Destination+ curent_path+dir2):
pass
else:
if not os.path.isdir(Destination):os.mkdir(Destination)
os.mkdir(Destination+ curent_path+dir2)
for file2 in files:
if os.path.isfile(Destination+ curent_path+file2):
path = os.path.join(root, file2)
size_source = os.stat(path.replace('\\','/')).st_size # in bytes
size_target=os.stat(Destination+ curent_path+file2).st_size
if size_source != size_target:
replace(Parent_Dir+curent_path+file2,Destination+ curent_path+file2)
else:
replace(Parent_Dir+curent_path+file2,Destination+ curent_path+file2)
#replace(Parent_Dir+"/misc/templates/bash_profile.tpl",Bash_File+'/app-root/data/.bash_profile')
try:
f = open(Destination + '/misc/templates/php-fpm.conf.default', 'r')
conf = f.read().replace('{{OPENSHIFT_INTERNAL_IP}}', internalIp).replace('9000','25641').replace('{{OPENSHIFT_REPO_DIR}}', repoDir).replace('{{OPENSHIFT_RUNTIME_DIR}}', runtimeDir)
f.close()
f = open(runtimeDir + '/srv/php/etc/php-fpm.conf', 'w')
f.write(conf)
f.close()
except:pass
try:
f = open(Destination + '/misc/templates/php.ini.tpl', 'r')
conf = f.read().replace('{{OPENSHIFT_INTERNAL_IP}}', internalIp).replace('8081',internalPort).replace('{{OPENSHIFT_REPO_DIR}}', repoDir).replace('{{OPENSHIFT_RUNTIME_DIR}}', runtimeDir)
f.close()
f = open(runtimeDir + '/srv/php/etc/apache2/php.ini', 'w')
f.write(conf)
f.close()
except:pass
f = open(Destination + '/misc/templates/nginx.conf.tpl', 'r')
conf = f.read().replace('{{OPENSHIFT_INTERNAL_IP}}', internalIp)\
.replace('{{OPENSHIFT_INTERNAL_PORT}}',internalPort)
conf=conf.replace('{{OPENSHIFT_HOMEDIR}}',OPENSHIFT_HOMEDIR).replace('{{OPENSHIFT_REPO_DIR}}', repoDir)
conf=conf.replace('{{OPENSHIFT_RUNTIME_DIR}}', runtimeDir).replace('{{OPENSHIFT_GEAR_DNS}}', Gear_DNS)\
.replace('{{www.OPENSHIFT_GEAR_DNS}}', www_Gear_DNS).replace('{{NGINX_DIR}}', nginx_dir )
f.close()
f = open(runtimeDir + '/srv/nginx/conf/nginx.conf', 'w')
#f = open(Destination + '/nginx.conf', 'w')
f.write(conf)
f.close()
|
[] |
[] |
[
"OPENSHIFT_HOMEDIR",
"OPENSHIFT_GEAR_DNS",
"OPENSHIFT_DIY_IP",
"OPENSHIFT_DIY_PORT"
] |
[]
|
["OPENSHIFT_HOMEDIR", "OPENSHIFT_GEAR_DNS", "OPENSHIFT_DIY_IP", "OPENSHIFT_DIY_PORT"]
|
python
| 4 | 0 | |
docs/conf.py
|
import os
import re
import sys
from os import path
sys.path.insert(0, path.abspath('..'))
project = 'Finetuner'
slug = re.sub(r'\W+', '-', project.lower())
author = 'Jina AI'
copyright = 'Jina AI Limited. All rights reserved.'
source_suffix = ['.rst', '.md']
master_doc = 'index'
language = 'en'
repo_dir = '../'
try:
if 'JINA_VERSION' not in os.environ:
pkg_name = 'finetuner'
libinfo_py = path.join(repo_dir, pkg_name, '__init__.py')
libinfo_content = open(libinfo_py, 'r').readlines()
version_line = [
l.strip() for l in libinfo_content if l.startswith('__version__')
][0]
exec(version_line)
else:
__version__ = os.environ['JINA_VERSION']
except FileNotFoundError:
__version__ = '0.0.0'
version = __version__
release = __version__
templates_path = ['_templates']
exclude_patterns = [
'_build',
'Thumbs.db',
'.DS_Store',
'tests',
'page_templates',
'.github',
]
pygments_style = 'rainbow_dash'
html_theme = 'furo'
base_url = '/'
html_baseurl = 'https://finetuner.jina.ai'
sitemap_url_scheme = '{link}'
sitemap_locales = [None]
sitemap_filename = "sitemap.xml"
html_theme_options = {
'light_logo': 'logo-light.svg',
'dark_logo': 'logo-dark.svg',
"sidebar_hide_name": True,
"light_css_variables": {
"color-brand-primary": "#009191",
"color-brand-content": "#009191",
},
"dark_css_variables": {
"color-brand-primary": "#FBCB67",
"color-brand-content": "#FBCB67",
},
}
html_static_path = ['_static']
html_extra_path = ['html_extra']
html_css_files = [
'main.css',
'docbot.css',
'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0-beta2/css/all.min.css',
]
html_js_files = []
htmlhelp_basename = slug
html_show_sourcelink = False
html_favicon = '_static/favicon.png'
latex_documents = [(master_doc, f'{slug}.tex', project, author, 'manual')]
man_pages = [(master_doc, slug, project, [author], 1)]
texinfo_documents = [
(master_doc, slug, project, author, slug, project, 'Miscellaneous')
]
epub_title = project
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
extensions = [
'sphinx.ext.autodoc',
'sphinx_autodoc_typehints',
'sphinx.ext.viewcode',
'sphinx.ext.coverage',
'sphinxcontrib.apidoc',
'sphinxarg.ext',
'sphinx_markdown_tables',
'sphinx_copybutton',
'sphinx_sitemap',
'sphinx.ext.intersphinx',
'sphinxext.opengraph',
'notfound.extension',
'myst_parser',
'sphinx_design',
'sphinx_inline_tabs',
'sphinx_multiversion',
]
myst_enable_extensions = ['colon_fence', 'dollarmath']
myst_dmath_double_inline = True
# -- Custom 404 page
# sphinx-notfound-page
# https://github.com/readthedocs/sphinx-notfound-page
notfound_context = {
'title': 'Page Not Found',
'body': '''
<h1>Page Not Found</h1>
<p>Oops, we couldn't find that page. </p>
<p>You can try using the search box or check our menu on the left hand side of this page.</p>
<p>If neither of those options work, please create a Github issue ticket <a href="https://github.com/jina-ai/finetuner/">here</a>, and one of our team will respond.</p>
''',
}
notfound_no_urls_prefix = True
apidoc_module_dir = repo_dir
apidoc_output_dir = 'api'
apidoc_excluded_paths = ['tests', 'legacy', 'hub', 'toy*', 'setup.py']
apidoc_separate_modules = True
apidoc_extra_args = ['-t', 'template/']
autodoc_member_order = 'bysource'
autodoc_mock_imports = [
'argparse',
'numpy',
'np',
'tensorflow',
'torch',
'scipy',
'keras',
'paddle',
]
autoclass_content = 'both'
set_type_checking_flag = False
html_last_updated_fmt = ''
nitpicky = True
nitpick_ignore = [('py:class', 'type')]
linkcheck_ignore = [
# Avoid link check on local uri
'http://0.0.0.0:*',
'pods/encode.yml',
'https://github.com/jina-ai/jina/commit/*',
'.github/*',
'extra-requirements.txt',
'fastentrypoints.py' '../../101',
'../../102',
'http://www.twinsun.com/tz/tz-link.htm', # Broken link from pytz library
'https://urllib3.readthedocs.io/en/latest/contrib.html#google-app-engine', # Broken link from urllib3 library
'https://linuxize.com/post/how-to-add-swap-space-on-ubuntu-20-04/',
# This link works but gets 403 error on linkcheck
]
linkcheck_timeout = 20
linkcheck_retries = 2
linkcheck_anchors = False
ogp_site_url = 'https://finetuner.jina.ai/'
ogp_image = 'https://finetuner.jina.ai/_static/banner.png'
ogp_use_first_image = False
ogp_description_length = 300
ogp_type = 'website'
ogp_site_name = f'Finetuner {os.environ.get("SPHINX_MULTIVERSION_VERSION", version)} Documentation'
ogp_custom_meta_tags = [
'<meta name="twitter:card" content="summary_large_image">',
'<meta name="twitter:site" content="@JinaAI_">',
'<meta name="twitter:creator" content="@JinaAI_">',
'<meta name="description" content="Finetuner allows one to finetune any deep neural network for better embedding on search tasks.">',
'<meta property="og:description" content="Finetuner allows one to finetune any deep neural network for better embedding on search tasks.">',
'''
<!-- Global site tag (gtag.js) - Google Analytics -->
<script async src="https://www.googletagmanager.com/gtag/js?id=G-1ESRNDCK35"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'G-1ESRNDCK35');
</script>
<!-- Place this tag in your head or just before your close body tag. -->
<script async defer src="https://buttons.github.io/buttons.js"></script>
<script async defer src="https://cdn.jsdelivr.net/npm/[email protected]"></script>
''',
]
def smv_config(string: str):
return r'^{}$'.format(string.strip().replace(' ', '|'))
html_context = {
'latest_finetuner_version': os.environ.get('LATEST_FINETUNER_VERSION', 'main')
}
smv_tag_whitelist = smv_config(os.environ.get('SMV_TAG_WHITELIST', 'v2.4.7'))
smv_branch_whitelist = smv_config(os.environ.get('SMV_BRANCH_WHITELIST', 'main'))
smv_remote_whitelist = None
def set_qa_server_address(app):
# This sets the server address to <qa-bot>
server_address = app.config['server_address']
js_text = """
document.addEventListener("DOMContentLoaded", function() {
document.querySelector("qa-bot").setAttribute("server", "%s");
});
""" % server_address
app.add_js_file(None, body=js_text)
def setup(app):
from sphinx.domains.python import PyField
from sphinx.util.docfields import Field
from sphinx.locale import _
app.add_object_type(
'confval',
'confval',
objname='configuration value',
indextemplate='pair: %s; configuration value',
doc_field_types=[
PyField(
'type',
label=_('Type'),
has_arg=False,
names=('type',),
bodyrolename='class',
),
Field(
'default',
label=_('Default'),
has_arg=False,
names=('default',),
),
],
)
app.add_config_value(
name='server_address',
default=os.getenv(
'FINETUNER_DOCSBOT_SERVER', 'https://finetuner-docsbot.jina.ai'
),
rebuild='',
)
app.connect('builder-inited', set_qa_server_address)
|
[] |
[] |
[
"SPHINX_MULTIVERSION_VERSION",
"FINETUNER_DOCSBOT_SERVER",
"LATEST_FINETUNER_VERSION",
"JINA_VERSION",
"SMV_TAG_WHITELIST",
"SMV_BRANCH_WHITELIST"
] |
[]
|
["SPHINX_MULTIVERSION_VERSION", "FINETUNER_DOCSBOT_SERVER", "LATEST_FINETUNER_VERSION", "JINA_VERSION", "SMV_TAG_WHITELIST", "SMV_BRANCH_WHITELIST"]
|
python
| 6 | 0 | |
SheetMe.py
|
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "oschub.settings")
import django
django.setup()
import gspread
from google.oauth2 import service_account
from eventreg.models import EventUserData, Event
from accounts.models import MailList
import datetime
from decouple import config
import json
# creates a spreadSheet.
def createSpreadSheet(mailList, title="NewSpreadsheet"):
try:
global createdNewSpreadSheet
if not createdNewSpreadSheet:
sheet = service.create(title)
print("[$] SpreadSheet ID: " + str(sheet.id))
for index, emailid in enumerate(mailList):
# Commented code cause Ownership Access error
# if index == 0:
# sheet.share(emailid, perm_type="user", role="owner")
# else:
sheet.share(emailid, perm_type="user", role="writer", notify=True)
print("Shared sheet to " + emailid)
createdNewSpreadSheet = True
except gspread.exceptions.APIError as error:
print("API Error: Trying Again !!")
print(error)
createSpreadSheet(mailList, title) # If API error then try again
def createSheet(title="EventName", row="10000", col="25"):
try:
global createdNewSpreadSheet
sheet = service.open("Events") # opens the file "Events"
print("[x] Found spreadsheet 'Events' ")
if createdNewSpreadSheet:
sheet.add_worksheet(title, rows=row, cols=col)
tmp = sheet.get_worksheet(0)
sheet.del_worksheet(tmp)
print(f"[!] Renamed default Sheet1 to {title}")
createdNewSpreadSheet = False
else:
sheet.add_worksheet(title, rows=row, cols=col)
print("[x] Added sheet - " + title)
worksheet = sheet.worksheet(title)
worksheet.append_row(["Reg No", "Name", "Email", "Registered", "Attended"])
worksheet.format(
"A1:E1", {"horizontalAlignment": "CENTER", "textFormat": {"bold": True}}
)
print(f"[x] Added Header data to the sheet {title}")
return worksheet
except gspread.exceptions.SpreadsheetNotFound:
print('[!] "Events" SpreadSheet not found, attempting to create a new one')
createSpreadSheet(admin_mail, "Events")
createSheet(title)
def getCompletedEvents():
# Filtering out the events that are over
events = Event.objects.all().filter(
eventDate__lt=datetime.date.today()
) # gets the events with date before today
eventlist = []
for event in events:
eventlist.append(event.eventName.replace(':', '|'))
events = Event.objects.filter(eventDate=datetime.date.today()).filter(
eventEndTime__lt=datetime.datetime.now().strftime("%H:%M:%S")
)
for event in events:
eventlist.append(event.eventName.replace(':', '|'))
return eventlist
def updateData():
admin_mail_latest = getAdminMail()
event_list = getCompletedEvents()
# If spreadsheet not found then make a new one
try:
sheet = service.open("Events")
except gspread.exceptions.SpreadsheetNotFound:
print('[!] "Events" SpreadSheet not found, attempting to create a new one')
createSpreadSheet(admin_mail, "Events")
sheet = service.open("Events")
# sharing the sheet once again to share the file with newly added user
for email_id in admin_mail_latest:
if email_id not in admin_mail:
sheet.share(email_id, perm_type="user", role="writer", notify=True)
print("Shared sheet to " + email_id)
# get all the available worksheets
worksheet = sheet.worksheets()
sheetList = []
for work in worksheet:
sheetList.append(work.title)
# getting user data for the events that are over
for event in event_list:
studentList = []
if event in sheetList:
print(f"[!] Skipping the Sheet, the worksheet {event} already exists !!")
else:
students = EventUserData.objects.filter(eventName__eventName=event.replace('|', ':'))
for student in students:
studentList.append(
[
student.studentReg,
student.studentName,
student.studentEmail,
"Yes" if student.studentRegistered else "No",
"Yes" if student.studentCheckedIn else "No",
]
)
worksheet = createSheet(event)
worksheet.batch_update(
[{"range": f"A2:E{len(studentList) + 1}", "values": studentList}]
)
print("[x] Added user data set to sheet " + event)
def getAdminMail():
admin_mail = []
mailList = MailList.objects.all()
for mail in mailList:
admin_mail.append(mail.email)
return admin_mail
def delAllSpreadsheet():
for spreadsheet in service.openall():
service.del_spreadsheet(spreadsheet.id)
print("deleted " + spreadsheet.title + " || " + spreadsheet.id)
# CAUTION: First Email is given owner access, rest all emails are given writer access due to API restrictions.
createdNewSpreadSheet = False
admin_mail = getAdminMail()
SCOPE = [
"https://spreadsheets.google.com/feeds",
"https://www.googleapis.com/auth/spreadsheets",
"https://www.googleapis.com/auth/drive.file",
"https://www.googleapis.com/auth/drive",
]
credential_info = json.loads(config("CREDENTIALS"))
credential = service_account.Credentials.from_service_account_info(credential_info, scopes=SCOPE)
service = gspread.authorize(credential)
if __name__ == "__main__":
# Use the following method to update data to the google spreadsheet
updateData()
# Use the following method to delete all the existing spreadsheets of the bot account
# delAllSpreadsheet()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
individual/ch5/igweb/shared/templatefuncs/funcs.go
|
package templatefuncs
import (
"os"
"strconv"
"time"
"go.isomorphicgo.org/go/isokit"
)
func RubyDate(t time.Time) string {
layout := time.RubyDate
return t.Format(layout)
}
func UnixTime(t time.Time) string {
return strconv.FormatInt(t.Unix(), 10)
}
func IsProduction() bool {
if isokit.OperatingEnvironment() == isokit.ServerEnvironment {
return os.Getenv("IGWEB_MODE") == "production"
} else {
return false
}
}
|
[
"\"IGWEB_MODE\""
] |
[] |
[
"IGWEB_MODE"
] |
[]
|
["IGWEB_MODE"]
|
go
| 1 | 0 | |
backend/manage.py
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
import os
import sys
################################################################################
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'server.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
################################################################################
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
google/appengine/ext/datastore_admin/backup_handler.py
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Handler for data backup operation.
Generic datastore admin console transfers control to ConfirmBackupHandler
after selection of entities. The ConfirmBackupHandler confirms with user
his choice, enters a backup name and transfers control to
DoBackupHandler. DoBackupHandler starts backup mappers and displays confirmation
page.
This module also contains actual mapper code for backing data over.
"""
from __future__ import with_statement
import cStringIO
import datetime
import itertools
import logging
import os
import random
import re
import time
import urllib
import xml.dom.minidom
from google.appengine.datastore import entity_pb
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import app_identity
from google.appengine.api import blobstore as blobstore_api
from google.appengine.api import capabilities
from google.appengine.api import datastore
from google.appengine.api import datastore_types
from google.appengine.api import files
from google.appengine.api import taskqueue
from google.appengine.api import urlfetch
from google.appengine.api.files import records
from google.appengine.api.taskqueue import taskqueue_service_pb
from google.appengine.datastore import datastore_query
from google.appengine.datastore import datastore_rpc
from google.appengine.ext import blobstore
from google.appengine.ext import db
from google.appengine.ext import deferred
from google.appengine.ext import webapp
from google.appengine.ext.datastore_admin import backup_pb2
from google.appengine.ext.datastore_admin import config
from google.appengine.ext.datastore_admin import utils
from google.appengine.runtime import apiproxy_errors
try:
from google.appengine.ext.mapreduce import context
from google.appengine.ext.mapreduce import datastore_range_iterators as db_iters
from google.appengine.ext.mapreduce import input_readers
from google.appengine.ext.mapreduce import json_util
from google.appengine.ext.mapreduce import operation as op
from google.appengine.ext.mapreduce import output_writers
except ImportError:
from google.appengine._internal.mapreduce import context
from google.appengine._internal.mapreduce import datastore_range_iterators as db_iters
from google.appengine._internal.mapreduce import input_readers
from google.appengine._internal.mapreduce import json_util
from google.appengine._internal.mapreduce import operation as op
from google.appengine._internal.mapreduce import output_writers
try:
from google.appengine.ext.datastore_admin import services_client
except ImportError:
pass
XSRF_ACTION = 'backup'
BUCKET_PATTERN = (r'^([a-zA-Z0-9]+([\-_]+[a-zA-Z0-9]+)*)'
r'(\.([a-zA-Z0-9]+([\-_]+[a-zA-Z0-9]+)*))*$')
MAX_BUCKET_LEN = 222
MIN_BUCKET_LEN = 3
MAX_BUCKET_SEGMENT_LEN = 63
NUM_KINDS_DEFERRED_THRESHOLD = 10
MAX_BLOBS_PER_DELETE = 500
TEST_WRITE_FILENAME_PREFIX = 'datastore_backup_write_test'
MAX_KEYS_LIST_SIZE = 100
MAX_TEST_FILENAME_TRIES = 10
MEANING_TO_PRIMITIVE_TYPE = {
entity_pb.Property.GD_WHEN: backup_pb2.EntitySchema.DATE_TIME,
entity_pb.Property.GD_RATING: backup_pb2.EntitySchema.RATING,
entity_pb.Property.ATOM_LINK: backup_pb2.EntitySchema.LINK,
entity_pb.Property.ATOM_CATEGORY: backup_pb2.EntitySchema.CATEGORY,
entity_pb.Property.GD_PHONENUMBER: backup_pb2.EntitySchema.PHONE_NUMBER,
entity_pb.Property.GD_POSTALADDRESS: backup_pb2.EntitySchema.POSTAL_ADDRESS,
entity_pb.Property.GD_EMAIL: backup_pb2.EntitySchema.EMAIL,
entity_pb.Property.GD_IM: backup_pb2.EntitySchema.IM_HANDLE,
entity_pb.Property.BLOBKEY: backup_pb2.EntitySchema.BLOB_KEY,
entity_pb.Property.TEXT: backup_pb2.EntitySchema.TEXT,
entity_pb.Property.BLOB: backup_pb2.EntitySchema.BLOB,
entity_pb.Property.BYTESTRING: backup_pb2.EntitySchema.SHORT_BLOB
}
class ConfirmBackupHandler(webapp.RequestHandler):
"""Handler to deal with requests from the admin console to backup data."""
SUFFIX = 'confirm_backup'
@classmethod
def Render(cls, handler):
"""Rendering method that can be called by main.py.
Args:
handler: the webapp.RequestHandler invoking the method
"""
kinds = handler.request.get_all('kind')
sizes_known, size_total, remainder = utils.ParseKindsAndSizes(kinds)
notreadonly_warning = capabilities.CapabilitySet(
'datastore_v3', capabilities=['write']).is_enabled()
blob_warning = bool(blobstore.BlobInfo.all().count(1))
template_params = {
'run_as_a_service': handler.request.get('run_as_a_service'),
'form_target': DoBackupHandler.SUFFIX,
'kind_list': kinds,
'remainder': remainder,
'sizes_known': sizes_known,
'size_total': size_total,
'queues': None,
'datastore_admin_home': utils.GenerateHomeUrl(handler.request),
'namespaces': get_namespaces(handler.request.get('namespace', None)),
'xsrf_token': utils.CreateXsrfToken(XSRF_ACTION),
'notreadonly_warning': notreadonly_warning,
'blob_warning': blob_warning,
'backup_name': 'datastore_backup_%s' % time.strftime('%Y_%m_%d')
}
utils.RenderToResponse(handler, 'confirm_backup.html', template_params)
def get_namespaces(selected_namespace):
namespaces = [('--All--', '*', selected_namespace is None)]
for ns in datastore.Query('__namespace__', keys_only=True).Run():
ns_name = ns.name() or ''
namespaces.append((ns_name or '--Default--',
ns_name,
ns_name == selected_namespace))
return namespaces
class ConfirmDeleteBackupHandler(webapp.RequestHandler):
"""Handler to confirm admin console requests to delete a backup copy."""
SUFFIX = 'confirm_delete_backup'
@classmethod
def Render(cls, handler):
"""Rendering method that can be called by main.py.
Args:
handler: the webapp.RequestHandler invoking the method
"""
requested_backup_ids = handler.request.get_all('backup_id')
backups = []
gs_warning = False
if requested_backup_ids:
for backup in db.get(requested_backup_ids):
if backup:
backups.append(backup)
gs_warning |= backup.filesystem == files.GS_FILESYSTEM
template_params = {
'form_target': DoBackupDeleteHandler.SUFFIX,
'datastore_admin_home': utils.GenerateHomeUrl(handler.request),
'backups': backups,
'xsrf_token': utils.CreateXsrfToken(XSRF_ACTION),
'gs_warning': gs_warning,
'run_as_a_service': handler.request.get('run_as_a_service'),
}
utils.RenderToResponse(handler, 'confirm_delete_backup.html',
template_params)
class ConfirmAbortBackupHandler(webapp.RequestHandler):
"""Handler to confirm admin console requests to abort a backup copy."""
SUFFIX = 'confirm_abort_backup'
@classmethod
def Render(cls, handler):
"""Rendering method that can be called by main.py.
Args:
handler: the webapp.RequestHandler invoking the method
"""
requested_backup_ids = handler.request.get_all('backup_id')
backups = []
if requested_backup_ids:
for backup in db.get(requested_backup_ids):
if backup:
backups.append(backup)
template_params = {
'form_target': DoBackupAbortHandler.SUFFIX,
'datastore_admin_home': utils.GenerateHomeUrl(handler.request),
'backups': backups,
'xsrf_token': utils.CreateXsrfToken(XSRF_ACTION),
'run_as_a_service': handler.request.get('run_as_a_service'),
}
utils.RenderToResponse(handler, 'confirm_abort_backup.html',
template_params)
class ConfirmRestoreFromBackupHandler(webapp.RequestHandler):
"""Handler to confirm admin console requests to restore from backup."""
SUFFIX = 'confirm_restore_from_backup'
@classmethod
def Render(cls, handler, default_backup_id=None,
default_delete_backup_after_restore=False):
"""Rendering method that can be called by main.py.
Args:
handler: the webapp.RequestHandler invoking the method
default_backup_id: default value for handler.request
default_delete_backup_after_restore: default value for handler.request
"""
backup_id = handler.request.get('backup_id', default_backup_id)
backup = db.get(backup_id) if backup_id else None
notreadonly_warning = capabilities.CapabilitySet(
'datastore_v3', capabilities=['write']).is_enabled()
original_app_warning = backup.original_app
if os.getenv('APPLICATION_ID') == original_app_warning:
original_app_warning = None
template_params = {
'form_target': DoBackupRestoreHandler.SUFFIX,
'queues': None,
'datastore_admin_home': utils.GenerateHomeUrl(handler.request),
'backup': backup,
'delete_backup_after_restore': handler.request.get(
'delete_backup_after_restore', default_delete_backup_after_restore),
'xsrf_token': utils.CreateXsrfToken(XSRF_ACTION),
'notreadonly_warning': notreadonly_warning,
'original_app_warning': original_app_warning,
'run_as_a_service': handler.request.get('run_as_a_service'),
}
utils.RenderToResponse(handler, 'confirm_restore_from_backup.html',
template_params)
class ConfirmBackupImportHandler(webapp.RequestHandler):
"""Handler to import backup information."""
SUFFIX = 'backup_information'
@classmethod
def Render(cls, handler):
"""Rendering method that can be called by main.py.
Args:
handler: the webapp.RequestHandler invoking the method
"""
gs_handle = handler.request.get('gs_handle')
error = None if gs_handle else 'Google Cloud Storage path is missing'
other_backup_info_files = []
selected_backup_info_file = None
backup_info_specified = False
if not error:
try:
gs_handle = gs_handle.rstrip()
bucket_name, prefix = parse_gs_handle(gs_handle)
validate_gs_bucket_name(bucket_name)
if not is_accessible_bucket_name(bucket_name):
raise BackupValidationError(
'Bucket "%s" is not accessible' % bucket_name)
if prefix.endswith('.backup_info'):
prefix = prefix[0:prefix.rfind('/')]
backup_info_specified = True
elif prefix and not prefix.endswith('/'):
prefix += '/'
for backup_info_file in list_bucket_files(bucket_name, prefix):
backup_info_path = '/gs/%s/%s' % (bucket_name, backup_info_file)
if backup_info_specified and backup_info_path == gs_handle:
selected_backup_info_file = backup_info_path
elif (backup_info_file.endswith('.backup_info')
and backup_info_file.count('.') == 1):
other_backup_info_files.append(backup_info_path)
except Exception, ex:
error = 'Failed to read bucket: %s' % ex.message
logging.exception(ex.message)
template_params = {
'error': error,
'form_target': DoBackupImportHandler.SUFFIX,
'datastore_admin_home': utils.GenerateHomeUrl(handler.request),
'selected_backup_info_file': selected_backup_info_file,
'other_backup_info_files': other_backup_info_files,
'backup_info_specified': backup_info_specified,
'xsrf_token': utils.CreateXsrfToken(XSRF_ACTION),
'run_as_a_service': handler.request.get('run_as_a_service'),
}
utils.RenderToResponse(handler, 'confirm_backup_import.html',
template_params)
class BackupInformationHandler(webapp.RequestHandler):
"""Handler to display backup information."""
SUFFIX = 'backup_information'
@classmethod
def Render(cls, handler):
"""Rendering method that can be called by main.py.
Args:
handler: the webapp.RequestHandler invoking the method
"""
backup_ids = handler.request.get_all('backup_id')
template_params = {
'backups': db.get(backup_ids),
'datastore_admin_home': utils.GenerateHomeUrl(handler.request),
'run_as_a_service': handler.request.get('run_as_a_service'),
}
utils.RenderToResponse(handler, 'backup_information.html', template_params)
class BaseDoHandler(webapp.RequestHandler):
"""Base class for all Do*Handlers."""
MAPREDUCE_DETAIL = config.MAPREDUCE_PATH + '/detail?mapreduce_id='
def get(self):
"""Handler for get requests to datastore_admin backup operations.
Status of executed jobs is displayed.
"""
jobs = self.request.get_all('job')
remote_job = self.request.get('remote_job')
tasks = self.request.get_all('task')
error = self.request.get('error', '')
xsrf_error = self.request.get('xsrf_error', '')
template_params = {
'job_list': jobs,
'remote_job': remote_job,
'task_list': tasks,
'mapreduce_detail': self.MAPREDUCE_DETAIL,
'error': error,
'xsrf_error': xsrf_error,
'datastore_admin_home': utils.GenerateHomeUrl(self.request),
}
utils.RenderToResponse(self, self._get_html_page, template_params)
@property
def _get_html_page(self):
"""Return the name of the HTML page for HTTP/GET requests."""
raise NotImplementedError
@property
def _get_post_html_page(self):
"""Return the name of the HTML page for HTTP/POST requests."""
raise NotImplementedError
def _ProcessPostRequest(self):
"""Process the HTTP/POST request and return the result as parametrs."""
raise NotImplementedError
def _GetBasicMapperParams(self):
namespace = self.request.get('namespace', None)
if namespace == '*':
namespace = None
return {'namespace': namespace}
def SendRedirect(self, path=None, params=()):
"""Send a redirect response."""
run_as_a_service = self.request.get('run_as_a_service')
if run_as_a_service:
params = list(params)
params.append(('run_as_a_service', True))
dest = config.BASE_PATH
if path:
dest = '%s/%s' % (dest, path)
if params:
dest = '%s?%s' % (dest, urllib.urlencode(params))
self.redirect(dest)
def post(self):
"""Handler for post requests to datastore_admin/backup.do.
Redirects to the get handler after processing the request.
"""
token = self.request.get('xsrf_token')
if not utils.ValidateXsrfToken(token, XSRF_ACTION):
parameters = [('xsrf_error', '1')]
else:
try:
parameters = self._ProcessPostRequest()
except Exception, e:
error = self._HandleException(e)
parameters = [('error', error)]
self.SendRedirect(self._get_post_html_page, parameters)
def _HandleException(self, e):
"""Make exception handling overridable by tests.
Args:
e: The exception to handle.
Returns:
The exception error string.
"""
logging.exception(e.message)
return '%s: %s' % (type(e), e.message)
class BackupValidationError(utils.Error):
"""Raised upon backup request validation."""
def _perform_backup(run_as_a_service, kinds, selected_namespace,
filesystem, gs_bucket_name, backup,
queue, mapper_params, max_jobs):
"""Triggers backup mapper jobs.
Args:
run_as_a_service: True if backup should be done via admin-jobs
kinds: a sequence of kind names
selected_namespace: The selected namespace or None for all
filesystem: files.BLOBSTORE_FILESYSTEM or files.GS_FILESYSTEM
or None to default to blobstore
gs_bucket_name: the GS file system bucket in which to store the backup
when using the GS file system, and otherwise ignored
backup: the backup name
queue: the task queue for the backup task
mapper_params: the mapper parameters
max_jobs: if backup needs more jobs than this, defer them
Returns:
The job or task ids.
Raises:
BackupValidationError: On validation error.
Exception: On other error.
"""
BACKUP_COMPLETE_HANDLER = __name__ + '.BackupCompleteHandler'
BACKUP_HANDLER = __name__ + '.BackupEntity.map'
INPUT_READER = __name__ + '.DatastoreEntityProtoInputReader'
OUTPUT_WRITER = output_writers.__name__ + '.FileRecordsOutputWriter'
if run_as_a_service:
if not gs_bucket_name:
raise BackupValidationError('Bucket name missing.')
gs_bucket_name = validate_and_canonicalize_gs_bucket(gs_bucket_name)
datastore_admin_service = services_client.DatastoreAdminClient()
description = 'Remote backup job: %s' % backup
remote_job_id = datastore_admin_service.create_backup(
description, backup, gs_bucket_name, selected_namespace, kinds)
return [('remote_job', remote_job_id)]
queue = queue or os.environ.get('HTTP_X_APPENGINE_QUEUENAME', 'default')
if queue[0] == '_':
queue = 'default'
if not filesystem:
filesystem = files.BLOBSTORE_FILESYSTEM
if filesystem == files.GS_FILESYSTEM:
if not gs_bucket_name:
raise BackupValidationError('Bucket name missing.')
gs_bucket_name = validate_and_canonicalize_gs_bucket(gs_bucket_name)
elif filesystem == files.BLOBSTORE_FILESYSTEM:
pass
else:
raise BackupValidationError('Unknown filesystem "%s".' % filesystem)
backup_info = None
job_operation = None
job_name = 'datastore_backup_%s_%%(kind)s' % re.sub(r'[^\w]', '_', backup)
try:
job_operation = utils.StartOperation('Backup: %s' % backup)
backup_info = BackupInformation(parent=job_operation)
backup_info.filesystem = filesystem
backup_info.name = backup
backup_info.kinds = kinds
if selected_namespace is not None:
backup_info.namespaces = [selected_namespace]
backup_info.put(force_writes=True)
mapreduce_params = {
'done_callback_handler': BACKUP_COMPLETE_HANDLER,
'backup_info_pk': str(backup_info.key()),
'force_ops_writes': True,
}
mapper_params = dict(mapper_params)
mapper_params['filesystem'] = filesystem
if filesystem == files.GS_FILESYSTEM:
mapper_params['gs_bucket_name'] = gs_bucket_name
if len(kinds) <= max_jobs:
return [('job', job) for job in _run_map_jobs(
job_operation.key(), backup_info.key(), kinds, job_name,
BACKUP_HANDLER, INPUT_READER, OUTPUT_WRITER,
mapper_params, mapreduce_params, queue)]
else:
retry_options = taskqueue.TaskRetryOptions(task_retry_limit=1)
deferred_task = deferred.defer(_run_map_jobs_deferred,
backup, job_operation.key(),
backup_info.key(), kinds, job_name,
BACKUP_HANDLER, INPUT_READER,
OUTPUT_WRITER, mapper_params,
mapreduce_params, queue, _queue=queue,
_url=config.DEFERRED_PATH,
_retry_options=retry_options)
return [('task', deferred_task.name)]
except Exception:
logging.exception('Failed to start a datastore backup job[s] for "%s".',
backup)
if backup_info:
delete_backup_info(backup_info)
if job_operation:
job_operation.status = utils.DatastoreAdminOperation.STATUS_FAILED
job_operation.put(force_writes=True)
raise
class BackupLinkHandler(webapp.RequestHandler):
"""Handler to deal with requests to the backup link to backup data."""
SUFFIX = 'backup.create'
def get(self):
"""Handler for get requests to datastore_admin/backup.create."""
self.post()
def post(self):
"""Handler for post requests to datastore_admin/backup.create."""
try:
if ('X-AppEngine-TaskName' not in self.request.headers and
'X-AppEngine-Cron' not in self.request.headers):
logging.critical('Scheduled backups must be started via task queue or '
'cron.')
self.response.set_status(403)
return
backup_prefix = self.request.get('name')
if not backup_prefix:
if self.request.headers.get('X-AppEngine-Cron'):
backup_prefix = 'cron-'
else:
backup_prefix = 'link-'
backup_prefix_with_date = backup_prefix + time.strftime('%Y_%m_%d')
backup_name = backup_prefix_with_date
backup_suffix_counter = 1
while BackupInformation.name_exists(backup_name):
backup_suffix_counter += 1
backup_name = backup_prefix_with_date + '-' + str(backup_suffix_counter)
kinds = self.request.get_all('kind')
if not kinds:
self.errorResponse('Backup must include at least one kind.')
return
for kind in kinds:
if not utils.IsKindNameVisible(kind):
self.errorResponse('Invalid kind %s.' % kind)
return
namespace = self.request.get('namespace', None)
if namespace == '*':
namespace = None
mapper_params = {'namespace': namespace}
_perform_backup(self.request.get('run_as_a_service', False),
kinds,
namespace,
self.request.get('filesystem'),
self.request.get('gs_bucket_name'),
backup_name,
self.request.get('queue'),
mapper_params,
1000000)
except Exception, e:
self.errorResponse(e.message)
def errorResponse(self, message):
logging.error('Could not create backup via link: %s', message)
self.response.set_status(400, message)
class DatastoreEntityProtoInputReader(input_readers.RawDatastoreInputReader):
"""An input reader which yields datastore entity proto for a kind."""
_KEY_RANGE_ITER_CLS = db_iters.KeyRangeEntityProtoIterator
class DoBackupHandler(BaseDoHandler):
"""Handler to deal with requests from the admin console to backup data."""
SUFFIX = 'backup.do'
_get_html_page = 'do_backup.html'
_get_post_html_page = SUFFIX
def _ProcessPostRequest(self):
"""Triggers backup mapper jobs and returns their ids."""
try:
backup = self.request.get('backup_name').strip()
if not backup:
raise BackupValidationError('Unspecified backup name.')
if BackupInformation.name_exists(backup):
raise BackupValidationError('Backup "%s" already exists.' % backup)
mapper_params = self._GetBasicMapperParams()
backup_result = _perform_backup(self.request.get('run_as_a_service',
False),
self.request.get_all('kind'),
mapper_params.get('namespace'),
self.request.get('filesystem'),
self.request.get('gs_bucket_name'),
backup,
self.request.get('queue'),
mapper_params,
10)
return backup_result
except Exception, e:
logging.exception(e.message)
return [('error', e.message)]
def _run_map_jobs_deferred(backup_name, job_operation_key, backup_info_key,
kinds, job_name, backup_handler, input_reader,
output_writer, mapper_params, mapreduce_params,
queue):
backup_info = BackupInformation.get(backup_info_key)
if backup_info:
try:
_run_map_jobs(job_operation_key, backup_info_key, kinds, job_name,
backup_handler, input_reader, output_writer, mapper_params,
mapreduce_params, queue)
except BaseException:
logging.exception('Failed to start a datastore backup job[s] for "%s".',
backup_name)
delete_backup_info(backup_info)
else:
logging.info('Missing backup info, can not start backup jobs for "%s"',
backup_name)
def _run_map_jobs(job_operation_key, backup_info_key, kinds, job_name,
backup_handler, input_reader, output_writer, mapper_params,
mapreduce_params, queue):
"""Creates backup/restore MR jobs for the given operation.
Args:
job_operation_key: a key of utils.DatastoreAdminOperation entity.
backup_info_key: a key of BackupInformation entity.
kinds: a list of kinds to run the M/R for.
job_name: the M/R job name prefix.
backup_handler: M/R job completion handler.
input_reader: M/R input reader.
output_writer: M/R output writer.
mapper_params: custom parameters to pass to mapper.
mapreduce_params: dictionary parameters relevant to the whole job.
queue: the name of the queue that will be used by the M/R.
Returns:
Ids of all started mapper jobs as list of strings.
"""
backup_info = BackupInformation.get(backup_info_key)
if not backup_info:
return []
jobs = utils.RunMapForKinds(
job_operation_key,
kinds,
job_name,
backup_handler,
input_reader,
output_writer,
mapper_params,
mapreduce_params,
queue_name=queue)
backup_info.active_jobs = jobs
backup_info.put(force_writes=True)
return jobs
def get_backup_files(backup_info, selected_kinds=None):
"""Returns the backup filenames for selected kinds or all if None/Empty."""
if backup_info.blob_files:
return backup_info.blob_files
else:
kinds_backup_files = backup_info.get_kind_backup_files(selected_kinds)
return list(itertools.chain(*(
kind_backup_files.files for kind_backup_files in kinds_backup_files)))
def delete_backup_files(filesystem, backup_files):
if backup_files:
if filesystem == files.BLOBSTORE_FILESYSTEM:
blob_keys = []
for fname in backup_files:
blob_key = files.blobstore.get_blob_key(fname)
if blob_key:
blob_keys.append(blob_key)
if len(blob_keys) == MAX_BLOBS_PER_DELETE:
blobstore_api.delete(blob_keys)
blob_keys = []
if blob_keys:
blobstore_api.delete(blob_keys)
def delete_backup_info(backup_info, delete_files=True):
"""Deletes a backup including its associated files and other metadata."""
if backup_info.blob_files:
delete_backup_files(backup_info.filesystem, backup_info.blob_files)
backup_info.delete(force_writes=True)
else:
kinds_backup_files = tuple(backup_info.get_kind_backup_files())
if delete_files:
delete_backup_files(backup_info.filesystem, itertools.chain(*(
kind_backup_files.files for kind_backup_files in kinds_backup_files)))
db.delete(kinds_backup_files + (backup_info,), force_writes=True)
class DoBackupDeleteHandler(BaseDoHandler):
"""Handler to deal with datastore admin requests to delete backup data."""
SUFFIX = 'backup_delete.do'
def get(self):
self.post()
def post(self):
"""Handler for post requests to datastore_admin/backup_delete.do.
Deletes are executed and user is redirected to the base-path handler.
"""
backup_ids = self.request.get_all('backup_id')
token = self.request.get('xsrf_token')
params = ()
if backup_ids and utils.ValidateXsrfToken(token, XSRF_ACTION):
try:
for backup_info in db.get(backup_ids):
if backup_info:
delete_backup_info(backup_info)
except Exception, e:
logging.exception('Failed to delete datastore backup.')
params = [('error', e.message)]
self.SendRedirect(params=params)
class DoBackupAbortHandler(BaseDoHandler):
"""Handler to deal with datastore admin requests to abort pending backups."""
SUFFIX = 'backup_abort.do'
def get(self):
self.post()
def post(self):
"""Handler for post requests to datastore_admin/backup_abort.do.
Abort is executed and user is redirected to the base-path handler.
"""
backup_ids = self.request.get_all('backup_id')
token = self.request.get('xsrf_token')
params = ()
if backup_ids and utils.ValidateXsrfToken(token, XSRF_ACTION):
try:
for backup_info in db.get(backup_ids):
if backup_info:
operation = backup_info.parent()
if operation.parent_key():
job_id = str(operation.parent_key())
datastore_admin_service = services_client.DatastoreAdminClient()
datastore_admin_service.abort_backup(job_id)
else:
utils.AbortAdminOperation(operation.key())
delete_backup_info(backup_info)
except Exception, e:
logging.exception('Failed to abort pending datastore backup.')
params = [('error', e.message)]
self.SendRedirect(params=params)
class DoBackupRestoreHandler(BaseDoHandler):
"""Handler to restore backup data.
Deals with requests from the admin console.
"""
SUFFIX = 'backup_restore.do'
BACKUP_RESTORE_HANDLER = __name__ + '.RestoreEntity.map'
RESTORE_COMPLETE_HANDLER = __name__ + '.RestoreCompleteHandler'
INPUT_READER = input_readers.__name__ + '.RecordsReader'
_get_html_page = 'do_restore_from_backup.html'
_get_post_html_page = SUFFIX
def _ProcessPostRequest(self):
"""Triggers backup restore mapper jobs and returns their ids."""
backup_id = self.request.get('backup_id')
if not backup_id:
return [('error', 'Unspecified Backup.')]
backup = db.get(db.Key(backup_id))
if not backup:
return [('error', 'Invalid Backup id.')]
if backup.gs_handle:
if not is_readable_gs_handle(backup.gs_handle):
return [('error', 'Backup not readable')]
kinds = set(self.request.get_all('kind'))
if not (backup.blob_files or kinds):
return [('error', 'No kinds were selected')]
backup_kinds = set(backup.kinds)
difference = kinds.difference(backup_kinds)
if difference:
return [('error', 'Backup does not have kind[s] %s' %
', '.join(difference))]
if self.request.get('run_as_a_service', False):
if backup.filesystem != files.GS_FILESYSTEM:
return [('error',
'Restore as a service is only available for GS backups')]
datastore_admin_service = services_client.DatastoreAdminClient()
description = 'Remote restore job: %s' % backup.name
remote_job_id = datastore_admin_service.restore_from_backup(
description, backup_id, list(kinds))
return [('remote_job', remote_job_id)]
queue = self.request.get('queue')
job_name = 'datastore_backup_restore_%s' % re.sub(r'[^\w]', '_',
backup.name)
job_operation = None
try:
operation_name = 'Restoring %s from backup: %s' % (
', '.join(kinds) if kinds else 'all', backup.name)
job_operation = utils.StartOperation(operation_name)
mapper_params = self._GetBasicMapperParams()
kinds = list(kinds) if len(backup_kinds) != len(kinds) else []
mapper_params['files'] = get_backup_files(backup, kinds)
mapper_params['kind_filter'] = kinds
mapper_params['original_app'] = backup.original_app
mapreduce_params = {
'backup_name': backup.name,
'force_ops_writes': True,
}
shard_count = min(max(utils.MAPREDUCE_MIN_SHARDS,
len(mapper_params['files'])),
utils.MAPREDUCE_MAX_SHARDS)
job = utils.StartMap(job_operation.key(), job_name,
self.BACKUP_RESTORE_HANDLER, self.INPUT_READER, None,
mapper_params, mapreduce_params, queue_name=queue,
shard_count=shard_count)
return [('job', job)]
except Exception:
logging.exception('Failed to start a restore from backup job "%s".',
job_name)
if job_operation:
job_operation.status = utils.DatastoreAdminOperation.STATUS_FAILED
job_operation.put(force_writes=True)
raise
class DoBackupImportHandler(BaseDoHandler):
"""Handler to deal with datastore admin requests to import backup info."""
SUFFIX = 'import_backup.do'
def get(self):
self.post()
def post(self):
"""Handler for post requests to datastore_admin/import_backup.do.
Import is executed and user is redirected to the base-path handler.
"""
gs_handle = self.request.get('gs_handle')
token = self.request.get('xsrf_token')
error = None
if gs_handle and utils.ValidateXsrfToken(token, XSRF_ACTION):
try:
bucket_name, path = parse_gs_handle(gs_handle)
file_content = get_gs_object(bucket_name, path)
entities = parse_backup_info_file(file_content)
original_backup_info = entities.next()
entity = datastore.Entity(BackupInformation.kind())
entity.update(original_backup_info)
backup_info = BackupInformation.from_entity(entity)
if original_backup_info.key().app() != os.getenv('APPLICATION_ID'):
backup_info.original_app = original_backup_info.key().app()
def tx():
backup_info.put(force_writes=True)
kind_files_models = []
for entity in entities:
kind_files = backup_info.create_kind_backup_files(
entity.key().name(), entity['files'])
kind_files_models.append(kind_files)
db.put(kind_files_models, force_writes=True)
db.run_in_transaction(tx)
backup_id = str(backup_info.key())
except Exception, e:
logging.exception('Failed to Import datastore backup information.')
error = e.message
if error:
self.SendRedirect(params=[('error', error)])
elif self.request.get('Restore'):
ConfirmRestoreFromBackupHandler.Render(
self, default_backup_id=backup_id,
default_delete_backup_after_restore=True)
else:
self.SendRedirect()
class BackupInformation(db.Model):
"""An entity to keep information on a datastore backup."""
name = db.StringProperty()
kinds = db.StringListProperty()
namespaces = db.StringListProperty()
filesystem = db.StringProperty(default=files.BLOBSTORE_FILESYSTEM)
start_time = db.DateTimeProperty(auto_now_add=True)
active_jobs = db.StringListProperty()
completed_jobs = db.StringListProperty()
complete_time = db.DateTimeProperty(default=None)
blob_files = db.StringListProperty()
original_app = db.StringProperty(default=None)
gs_handle = db.TextProperty(default=None)
destination = db.StringProperty()
@classmethod
def kind(cls):
return utils.BACKUP_INFORMATION_KIND
@classmethod
def name_exists(cls, backup_name):
query = BackupInformation.all(keys_only=True)
query.filter('name =', backup_name)
return query.get() is not None
def create_kind_backup_files_key(self, kind):
return db.Key.from_path(KindBackupFiles.kind(), kind, parent=self.key())
def create_kind_backup_files(self, kind, kind_files):
return KindBackupFiles(key=self.create_kind_backup_files_key(kind),
files=kind_files)
def get_kind_backup_files(self, kinds=None):
if kinds:
return db.get([self.create_kind_backup_files_key(kind) for kind in kinds])
else:
return KindBackupFiles.all().ancestor(self).run()
class KindBackupFiles(db.Model):
"""An entity to keep files information per kind for a backup.
A key for this model should created using kind as a name and the associated
BackupInformation as a parent.
"""
files = db.StringListProperty(indexed=False)
@property
def backup_kind(self):
return self.key().name()
@classmethod
def kind(cls):
return utils.BACKUP_INFORMATION_FILES_KIND
def BackupCompleteHandler(operation, job_id, mapreduce_state):
"""Updates BackupInformation record for a completed mapper job."""
mapreduce_spec = mapreduce_state.mapreduce_spec
filenames = mapreduce_spec.mapper.output_writer_class().get_filenames(
mapreduce_state)
_perform_backup_complete(operation,
job_id,
mapreduce_spec.mapper.params['entity_kind'],
mapreduce_spec.params['backup_info_pk'],
mapreduce_spec.mapper.params.get('gs_bucket_name'),
filenames,
mapreduce_spec.params.get('done_callback_queue'))
@db.transactional
def _perform_backup_complete(
operation, job_id, kind, backup_info_pk, gs_bucket_name, filenames, queue):
backup_info = BackupInformation.get(backup_info_pk)
if backup_info:
if job_id in backup_info.active_jobs:
backup_info.active_jobs.remove(job_id)
backup_info.completed_jobs = list(
set(backup_info.completed_jobs + [job_id]))
if backup_info.filesystem == files.BLOBSTORE_FILESYSTEM:
filenames = drop_empty_files(filenames)
kind_backup_files = backup_info.get_kind_backup_files([kind])[0]
if kind_backup_files:
kind_backup_files.files = list(set(kind_backup_files.files + filenames))
else:
kind_backup_files = backup_info.create_kind_backup_files(kind, filenames)
db.put((backup_info, kind_backup_files), force_writes=True)
if operation.status == utils.DatastoreAdminOperation.STATUS_COMPLETED:
deferred.defer(finalize_backup_info, backup_info.key(),
gs_bucket_name,
_url=config.DEFERRED_PATH,
_queue=queue,
_transactional=True)
else:
logging.warn('BackupInfo was not found for %s', backup_info_pk)
def finalize_backup_info(backup_info_pk, gs_bucket):
"""Finalize the state of BackupInformation and creates info file for GS."""
def get_backup_info():
return BackupInformation.get(backup_info_pk)
backup_info = db.run_in_transaction(get_backup_info)
if backup_info:
complete_time = datetime.datetime.now()
backup_info.complete_time = complete_time
gs_handle = None
if backup_info.filesystem == files.GS_FILESYSTEM:
gs_handle = BackupInfoWriter(gs_bucket).write(backup_info)[0]
def set_backup_info_with_finalize_info():
backup_info = get_backup_info()
backup_info.complete_time = complete_time
backup_info.gs_handle = gs_handle
backup_info.put(force_writes=True)
db.run_in_transaction(set_backup_info_with_finalize_info)
logging.info('Backup %s completed', backup_info.name)
else:
logging.warn('Backup %s could not be found', backup_info_pk)
def parse_backup_info_file(content):
"""Returns entities iterator from a backup_info file content."""
reader = records.RecordsReader(cStringIO.StringIO(content))
version = reader.read()
if version != '1':
raise IOError('Unsupported version')
return (datastore.Entity.FromPb(record) for record in reader)
@db.non_transactional
def drop_empty_files(filenames):
"""Deletes empty files and returns filenames minus the deleted ones."""
non_empty_filenames = []
empty_file_keys = []
blobs_info = blobstore.BlobInfo.get(
[files.blobstore.get_blob_key(fn) for fn in filenames])
for filename, blob_info in itertools.izip(filenames, blobs_info):
if blob_info:
if blob_info.size > 0:
non_empty_filenames.append(filename)
else:
empty_file_keys.append(blob_info.key())
blobstore_api.delete(empty_file_keys)
return non_empty_filenames
class BackupInfoWriter(object):
"""A class for writing Datastore backup metadata files."""
def __init__(self, gs_bucket):
"""Construct a BackupInfoWriter.
Args:
gs_bucket: Required string for the target GS bucket.
"""
self.__gs_bucket = gs_bucket
def write(self, backup_info):
"""Write the metadata files for the given backup_info.
As a side effect, updates the backup_info in-memory entity object with the
gs_handle to the Backup info filename. This is not saved to the datastore.
Args:
backup_info: Required BackupInformation.
Returns:
A list with Backup info filename followed by Kind info filenames.
"""
fn = self._write_backup_info(backup_info)
return [fn] + self._write_kind_info(backup_info)
def _generate_filename(self, backup_info, suffix):
key_str = str(backup_info.key()).replace('/', '_')
return '/gs/%s/%s%s' % (self.__gs_bucket, key_str, suffix)
def _write_backup_info(self, backup_info):
"""Writes a backup_info_file.
Args:
backup_info: Required BackupInformation.
Returns:
Backup info filename.
"""
filename = self._generate_filename(backup_info, '.backup_info')
backup_info.gs_handle = filename
info_file = files.open(files.gs.create(filename), 'a', exclusive_lock=True)
try:
with records.RecordsWriter(info_file) as writer:
writer.write('1')
writer.write(db.model_to_protobuf(backup_info).SerializeToString())
for kind_files in backup_info.get_kind_backup_files():
writer.write(db.model_to_protobuf(kind_files).SerializeToString())
finally:
info_file.close(finalize=True)
return filename
def _write_kind_info(self, backup_info):
"""Writes type information schema for each kind in backup_info.
Args:
backup_info: Required BackupInformation.
Returns:
A list with all created filenames.
"""
def get_backup_files_tx():
kind_backup_files_list = []
for kind_backup_files in backup_info.get_kind_backup_files():
kind_backup_files_list.append(kind_backup_files)
return kind_backup_files_list
kind_backup_files_list = db.run_in_transaction(get_backup_files_tx)
filenames = []
for kind_backup_files in kind_backup_files_list:
backup = self._create_kind_backup(backup_info, kind_backup_files)
filename = self._generate_filename(
backup_info, '.%s.backup_info' % kind_backup_files.backup_kind)
self._write_kind_backup_info_file(filename, backup)
filenames.append(filename)
return filenames
def _create_kind_backup(self, backup_info, kind_backup_files):
"""Creates and populate a backup_pb2.Backup."""
backup = backup_pb2.Backup()
backup.backup_info.backup_name = backup_info.name
backup.backup_info.start_timestamp = datastore_types.DatetimeToTimestamp(
backup_info.start_time)
backup.backup_info.end_timestamp = datastore_types.DatetimeToTimestamp(
backup_info.complete_time)
kind = kind_backup_files.backup_kind
kind_info = backup.kind_info.add()
kind_info.kind = kind
kind_info.entity_schema.kind = kind
kind_info.file.extend(kind_backup_files.files)
entity_type_info = EntityTypeInfo(kind=kind)
for sharded_aggregation in SchemaAggregationResult.load(
backup_info.key(), kind):
if sharded_aggregation.is_partial:
kind_info.is_partial = True
if sharded_aggregation.entity_type_info:
entity_type_info.merge(sharded_aggregation.entity_type_info)
entity_type_info.populate_entity_schema(kind_info.entity_schema)
return backup
@classmethod
def _write_kind_backup_info_file(cls, filename, backup):
"""Writes a kind backup_info.
Args:
filename: The name of the file to be created as string.
backup: apphosting.ext.datastore_admin.Backup proto.
"""
f = files.open(files.gs.create(filename), 'a', exclusive_lock=True)
try:
f.write(backup.SerializeToString())
finally:
f.close(finalize=True)
class PropertyTypeInfo(json_util.JsonMixin):
"""Type information for an entity property."""
def __init__(self, name, is_repeated=False, primitive_types=None,
embedded_entities=None):
"""Construct a PropertyTypeInfo instance.
Args:
name: The name of the property as a string.
is_repeated: A boolean that indicates if the property is repeated.
primitive_types: Optional list of PrimitiveType integer values.
embedded_entities: Optional list of EntityTypeInfo.
"""
self.__name = name
self.__is_repeated = is_repeated
self.__primitive_types = set(primitive_types) if primitive_types else set()
self.__embedded_entities = {}
for entity in embedded_entities or ():
if entity.kind in self.__embedded_entities:
self.__embedded_entities[entity.kind].merge(entity)
else:
self.__embedded_entities[entity.kind] = entity
@property
def name(self):
return self.__name
@property
def is_repeated(self):
return self.__is_repeated
@property
def primitive_types(self):
return self.__primitive_types
def embedded_entities_kind_iter(self):
return self.__embedded_entities.iterkeys()
def get_embedded_entity(self, kind):
return self.__embedded_entities.get(kind)
def merge(self, other):
"""Merge a PropertyTypeInfo with this instance.
Args:
other: Required PropertyTypeInfo to merge.
Returns:
True if anything was changed. False otherwise.
Raises:
ValueError: if property names do not match.
TypeError: if other is not instance of PropertyTypeInfo.
"""
if not isinstance(other, PropertyTypeInfo):
raise TypeError('Expected PropertyTypeInfo, was %r' % (other,))
if other.__name != self.__name:
raise ValueError('Property names mismatch (%s, %s)' %
(self.__name, other.__name))
changed = False
if other.__is_repeated and not self.__is_repeated:
self.__is_repeated = True
changed = True
if not other.__primitive_types.issubset(self.__primitive_types):
self.__primitive_types = self.__primitive_types.union(
other.__primitive_types)
changed = True
for kind, other_embedded_entity in other.__embedded_entities.iteritems():
embedded_entity = self.__embedded_entities.get(kind)
if embedded_entity:
changed = embedded_entity.merge(other_embedded_entity) or changed
else:
self.__embedded_entities[kind] = other_embedded_entity
changed = True
return changed
def populate_entity_schema_field(self, entity_schema):
"""Add an populate a Field to the given entity_schema.
Args:
entity_schema: apphosting.ext.datastore_admin.EntitySchema proto.
"""
if not (self.__primitive_types or self.__embedded_entities):
return
field = entity_schema.field.add()
field.name = self.__name
field_type = field.type.add()
field_type.is_list = self.__is_repeated
field_type.primitive_type.extend(self.__primitive_types)
for embedded_entity in self.__embedded_entities.itervalues():
embedded_entity_schema = field_type.embedded_schema.add()
embedded_entity.populate_entity_schema(embedded_entity_schema)
def to_json(self):
json = dict()
json['name'] = self.__name
json['is_repeated'] = self.__is_repeated
json['primitive_types'] = list(self.__primitive_types)
json['embedded_entities'] = [e.to_json() for e in
self.__embedded_entities.itervalues()]
return json
@classmethod
def from_json(cls, json):
return cls(json['name'], json['is_repeated'], json.get('primitive_types'),
[EntityTypeInfo.from_json(entity_json) for entity_json
in json.get('embedded_entities')])
class EntityTypeInfo(json_util.JsonMixin):
"""Type information for an entity."""
def __init__(self, kind=None, properties=None):
"""Construct an EntityTypeInfo instance.
Args:
kind: An optional kind name as string.
properties: An optional list of PropertyTypeInfo.
"""
self.__kind = kind
self.__properties = {}
for property_type_info in properties or ():
if property_type_info.name in self.__properties:
self.__properties[property_type_info.name].merge(property_type_info)
else:
self.__properties[property_type_info.name] = property_type_info
@property
def kind(self):
return self.__kind
def properties_name_iter(self):
return self.__properties.iterkeys()
def get_property(self, name):
return self.__properties.get(name)
def merge(self, other):
"""Merge an EntityTypeInfo with this instance.
Args:
other: Required EntityTypeInfo to merge.
Returns:
True if anything was changed. False otherwise.
Raises:
ValueError: if kinds do not match.
TypeError: if other is not instance of EntityTypeInfo.
"""
if not isinstance(other, EntityTypeInfo):
raise TypeError('Expected EntityTypeInfo, was %r' % (other,))
if other.__kind != self.__kind:
raise ValueError('Kinds mismatch (%s, %s)' % (self.__kind, other.__kind))
changed = False
for name, other_property in other.__properties.iteritems():
self_property = self.__properties.get(name)
if self_property:
changed = self_property.merge(other_property) or changed
else:
self.__properties[name] = other_property
changed = True
return changed
def populate_entity_schema(self, entity_schema):
"""Populates the given entity_schema with values from this instance.
Args:
entity_schema: apphosting.ext.datastore_admin.EntitySchema proto.
"""
if self.__kind:
entity_schema.kind = self.__kind
for property_type_info in self.__properties.itervalues():
property_type_info.populate_entity_schema_field(entity_schema)
def to_json(self):
return {
'kind': self.__kind,
'properties': [p.to_json() for p in self.__properties.itervalues()]
}
@classmethod
def from_json(cls, json):
kind = json.get('kind')
properties_json = json.get('properties')
if properties_json:
return cls(kind, [PropertyTypeInfo.from_json(p) for p in properties_json])
else:
return cls(kind)
@classmethod
def create_from_entity_proto(cls, entity_proto):
"""Creates and populates an EntityTypeInfo from an EntityProto."""
properties = [cls.__get_property_type_info(property_proto) for
property_proto in itertools.chain(
entity_proto.property_list(),
entity_proto.raw_property_list())]
kind = utils.get_kind_from_entity_pb(entity_proto)
return cls(kind, properties)
@classmethod
def __get_property_type_info(cls, property_proto):
"""Returns the type mapping for the provided property."""
name = property_proto.name()
is_repeated = bool(property_proto.multiple())
primitive_type = None
entity_type = None
if property_proto.has_meaning():
primitive_type = MEANING_TO_PRIMITIVE_TYPE.get(property_proto.meaning())
if primitive_type is None:
value = property_proto.value()
if value.has_int64value():
primitive_type = backup_pb2.EntitySchema.INTEGER
elif value.has_booleanvalue():
primitive_type = backup_pb2.EntitySchema.BOOLEAN
elif value.has_stringvalue():
if property_proto.meaning() == entity_pb.Property.ENTITY_PROTO:
entity_proto = entity_pb.EntityProto()
try:
entity_proto.ParsePartialFromString(value.stringvalue())
except Exception:
pass
else:
entity_type = EntityTypeInfo.create_from_entity_proto(entity_proto)
else:
primitive_type = backup_pb2.EntitySchema.STRING
elif value.has_doublevalue():
primitive_type = backup_pb2.EntitySchema.FLOAT
elif value.has_pointvalue():
primitive_type = backup_pb2.EntitySchema.GEO_POINT
elif value.has_uservalue():
primitive_type = backup_pb2.EntitySchema.USER
elif value.has_referencevalue():
primitive_type = backup_pb2.EntitySchema.REFERENCE
return PropertyTypeInfo(
name, is_repeated,
(primitive_type,) if primitive_type is not None else None,
(entity_type,) if entity_type else None)
class SchemaAggregationResult(db.Model):
"""Persistent aggregated type information for a kind.
An instance can be retrieved via the load method or created
using the create method. An instance aggregates all type information
for all seen embedded_entities via the merge method and persisted when needed
using the model put method.
"""
entity_type_info = json_util.JsonProperty(
EntityTypeInfo, default=EntityTypeInfo(), indexed=False)
is_partial = db.BooleanProperty(default=False)
def merge(self, other):
"""Merge a SchemaAggregationResult or an EntityTypeInfo with this instance.
Args:
other: Required SchemaAggregationResult or EntityTypeInfo to merge.
Returns:
True if anything was changed. False otherwise.
"""
if self.is_partial:
return False
if isinstance(other, SchemaAggregationResult):
other = other.entity_type_info
return self.entity_type_info.merge(other)
@classmethod
def _get_parent_key(cls, backup_id, kind_name):
return datastore_types.Key.from_path('Kind', kind_name, parent=backup_id)
@classmethod
def create(cls, backup_id, kind_name, shard_id):
"""Create SchemaAggregationResult instance.
Args:
backup_id: Required BackupInformation Key.
kind_name: Required kind name as string.
shard_id: Required shard id as string.
Returns:
A new SchemaAggregationResult instance.
"""
parent = cls._get_parent_key(backup_id, kind_name)
return SchemaAggregationResult(
key_name=shard_id, parent=parent,
entity_type_info=EntityTypeInfo(kind=kind_name))
@classmethod
def load(cls, backup_id, kind_name, shard_id=None):
"""Retrieve SchemaAggregationResult from the Datastore.
Args:
backup_id: Required BackupInformation Key.
kind_name: Required kind name as string.
shard_id: Optional shard id as string.
Returns:
SchemaAggregationResult iterator or an entity if shard_id not None.
"""
parent = cls._get_parent_key(backup_id, kind_name)
if shard_id:
key = datastore_types.Key.from_path(cls.kind(), shard_id, parent=parent)
return SchemaAggregationResult.get(key)
else:
return db.Query(cls).ancestor(parent).run()
@classmethod
def kind(cls):
return utils.BACKUP_INFORMATION_KIND_TYPE_INFO
class SchemaAggregationPool(object):
"""An MR pool to aggregation type information per kind."""
def __init__(self, backup_id, kind, shard_id):
"""Construct SchemaAggregationPool instance.
Args:
backup_id: Required BackupInformation Key.
kind: Required kind name as string.
shard_id: Required shard id as string.
"""
self.__backup_id = backup_id
self.__kind = kind
self.__shard_id = shard_id
self.__aggregation = SchemaAggregationResult.load(backup_id, kind, shard_id)
if not self.__aggregation:
self.__aggregation = SchemaAggregationResult.create(backup_id, kind,
shard_id)
self.__needs_save = True
else:
self.__needs_save = False
def merge(self, entity_type_info):
"""Merge EntityTypeInfo into aggregated type information."""
if self.__aggregation.merge(entity_type_info):
self.__needs_save = True
def flush(self):
"""Save aggregated type information to the datastore if changed."""
if self.__needs_save:
def update_aggregation_tx():
aggregation = SchemaAggregationResult.load(
self.__backup_id, self.__kind, self.__shard_id)
if aggregation:
if aggregation.merge(self.__aggregation):
aggregation.put(force_writes=True)
self.__aggregation = aggregation
else:
self.__aggregation.put(force_writes=True)
def mark_aggregation_as_partial_tx():
aggregation = SchemaAggregationResult.load(
self.__backup_id, self.__kind, self.__shard_id)
if aggregation is None:
aggregation = SchemaAggregationResult.create(
self.__backup_id, self.__kind, self.__shard_id)
aggregation.is_partial = True
aggregation.put(force_writes=True)
self.__aggregation = aggregation
try:
db.run_in_transaction(update_aggregation_tx)
except apiproxy_errors.RequestTooLargeError:
db.run_in_transaction(mark_aggregation_as_partial_tx)
self.__needs_save = False
class AggregateSchema(op.Operation):
"""An MR Operation to aggregation type information for a kind.
This operation will register an MR pool, SchemaAggregationPool, if
one is not already registered and will invoke the pool's merge operation
per entity. The pool is responsible for keeping a persistent state of
type aggregation using the sharded db model, SchemaAggregationResult.
"""
def __init__(self, entity_proto):
self.__entity_info = EntityTypeInfo.create_from_entity_proto(entity_proto)
def __call__(self, ctx):
pool = ctx.get_pool('schema_aggregation_pool')
if not pool:
backup_id = datastore_types.Key(
context.get().mapreduce_spec.params['backup_info_pk'])
pool = SchemaAggregationPool(
backup_id, self.__entity_info.kind, ctx.shard_id)
ctx.register_pool('schema_aggregation_pool', pool)
pool.merge(self.__entity_info)
class BackupEntity(object):
"""A class which dumps the entity to the writer."""
def map(self, entity_proto):
"""Backup entity map handler.
Args:
entity_proto: An instance of entity_pb.EntityProto.
Yields:
A serialized entity_pb.EntityProto as a string
"""
yield entity_proto.SerializeToString()
yield AggregateSchema(entity_proto)
class RestoreEntity(object):
"""A class which restore the entity to datastore."""
def __init__(self):
self.initialized = False
self.kind_filter = None
self.app_id = None
def initialize(self):
"""Initialize a restore mapper instance."""
if self.initialized:
return
mapper_params = get_mapper_params_from_context()
kind_filter = mapper_params.get('kind_filter')
self.kind_filter = set(kind_filter) if kind_filter else None
original_app = mapper_params.get('original_app')
target_app = os.getenv('APPLICATION_ID')
if original_app and target_app != original_app:
self.app_id = target_app
self.initialized = True
def map(self, record):
"""Restore entity map handler.
Args:
record: A serialized entity_pb.EntityProto.
Yields:
A operation.db.Put for the mapped entity
"""
self.initialize()
pb = entity_pb.EntityProto(contents=record)
if self.app_id:
utils.FixKeys(pb, self.app_id)
if not self.kind_filter or (
utils.get_kind_from_entity_pb(pb) in self.kind_filter):
yield utils.Put(pb)
if self.app_id:
yield utils.ReserveKey(datastore_types.Key._FromPb(pb.key()))
def get_mapper_params_from_context():
"""Get mapper params from MR context. Split out for ease of testing."""
return context.get().mapreduce_spec.mapper.params
def validate_gs_bucket_name(bucket_name):
"""Validate the format of the given bucket_name.
Validation rules are based:
https://developers.google.com/storage/docs/bucketnaming#requirements
Args:
bucket_name: The bucket name to validate.
Raises:
BackupValidationError: If the bucket name is invalid.
"""
if len(bucket_name) > MAX_BUCKET_LEN:
raise BackupValidationError(
'Bucket name length should not be longer than %d' % MAX_BUCKET_LEN)
if len(bucket_name) < MIN_BUCKET_LEN:
raise BackupValidationError(
'Bucket name length should be longer than %d' % MIN_BUCKET_LEN)
if bucket_name.lower().startswith('goog'):
raise BackupValidationError(
'Bucket name should not start with a "goog" prefix')
bucket_elements = bucket_name.split('.')
for bucket_element in bucket_elements:
if len(bucket_element) > MAX_BUCKET_SEGMENT_LEN:
raise BackupValidationError(
'Segment length of bucket name should not be longer than %d' %
MAX_BUCKET_SEGMENT_LEN)
if not re.match(BUCKET_PATTERN, bucket_name):
raise BackupValidationError('Invalid bucket name "%s"' % bucket_name)
def is_accessible_bucket_name(bucket_name):
"""Returns True if the application has access to the specified bucket."""
scope = config.GoogleApiScope('devstorage.read_write')
bucket_url = config.GsBucketURL(bucket_name)
auth_token, _ = app_identity.get_access_token(scope)
result = urlfetch.fetch(bucket_url, method=urlfetch.HEAD, headers={
'Authorization': 'OAuth %s' % auth_token,
'x-goog-api-version': '2'})
return result and result.status_code == 200
def verify_bucket_writable(bucket_name):
"""Verify the application can write to the specified bucket.
Args:
bucket_name: The bucket to verify.
Raises:
BackupValidationError: If the bucket is not writable.
"""
path = '/gs/%s' % bucket_name
try:
file_names = files.gs.listdir(path,
{'prefix': TEST_WRITE_FILENAME_PREFIX,
'max_keys': MAX_KEYS_LIST_SIZE})
except (files.InvalidParameterError, files.PermissionDeniedError):
raise BackupValidationError('Bucket "%s" not accessible' % bucket_name)
except files.InvalidFileNameError:
raise BackupValidationError('Bucket "%s" does not exist' % bucket_name)
file_name = '%s/%s.tmp' % (path, TEST_WRITE_FILENAME_PREFIX)
file_name_try = 0
while True:
if file_name_try >= MAX_TEST_FILENAME_TRIES:
return
if file_name not in file_names:
break
gen = random.randint(0, 9999)
file_name = '%s/%s_%s.tmp' % (path, TEST_WRITE_FILENAME_PREFIX, gen)
file_name_try += 1
try:
test_file = files.open(files.gs.create(file_name), 'a', exclusive_lock=True)
try:
test_file.write('test')
finally:
test_file.close(finalize=True)
except files.PermissionDeniedError:
raise BackupValidationError('Bucket "%s" is not writable' % bucket_name)
try:
files.delete(file_name)
except (files.InvalidArgumentError, files.InvalidFileNameError, IOError):
logging.warn('Failed to delete test file %s', file_name)
def is_readable_gs_handle(gs_handle):
"""Return True if the application can read the specified gs_handle."""
try:
with files.open(gs_handle) as bak_file:
bak_file.read(1)
except files.PermissionDeniedError:
return False
return True
def parse_gs_handle(gs_handle):
"""Splits [/gs/]?bucket_name[/folder]*[/file]? to (bucket_name, path | '')."""
if gs_handle.startswith('/'):
filesystem = gs_handle[1:].split('/', 1)[0]
if filesystem == 'gs':
gs_handle = gs_handle[4:]
else:
raise BackupValidationError('Unsupported filesystem: %s' % filesystem)
tokens = gs_handle.split('/', 1)
return (tokens[0], '') if len(tokens) == 1 else tuple(tokens)
def validate_and_canonicalize_gs_bucket(gs_bucket_name):
bucket_name, path = parse_gs_handle(gs_bucket_name)
gs_bucket_name = ('%s/%s' % (bucket_name, path)).rstrip('/')
validate_gs_bucket_name(bucket_name)
verify_bucket_writable(bucket_name)
return gs_bucket_name
def list_bucket_files(bucket_name, prefix, max_keys=1000):
"""Returns a listing of of a bucket that matches the given prefix."""
scope = config.GoogleApiScope('devstorage.read_only')
bucket_url = config.GsBucketURL(bucket_name)
url = bucket_url + '?'
query = [('max-keys', max_keys)]
if prefix:
query.append(('prefix', prefix))
url += urllib.urlencode(query)
auth_token, _ = app_identity.get_access_token(scope)
result = urlfetch.fetch(url, method=urlfetch.GET, headers={
'Authorization': 'OAuth %s' % auth_token,
'x-goog-api-version': '2'})
if result and result.status_code == 200:
doc = xml.dom.minidom.parseString(result.content)
return [node.childNodes[0].data for node in doc.getElementsByTagName('Key')]
raise BackupValidationError('Request to Google Cloud Storage failed')
def get_gs_object(bucket_name, path):
"""Returns a listing of of a bucket that matches the given prefix."""
scope = config.GoogleApiScope('devstorage.read_only')
bucket_url = config.GsBucketURL(bucket_name)
url = bucket_url + path
auth_token, _ = app_identity.get_access_token(scope)
result = urlfetch.fetch(url, method=urlfetch.GET, headers={
'Authorization': 'OAuth %s' % auth_token,
'x-goog-api-version': '2'})
if result and result.status_code == 200:
return result.content
if result and result.status_code == 403:
raise BackupValidationError(
'Requested path %s is not accessible/access denied' % url)
if result and result.status_code == 404:
raise BackupValidationError('Requested path %s was not found' % url)
raise BackupValidationError('Error encountered accessing requested path %s' %
url)
def get_queue_names(app_id=None, max_rows=100):
"""Returns a list with all non-special queue names for app_id."""
rpc = apiproxy_stub_map.UserRPC('taskqueue')
request = taskqueue_service_pb.TaskQueueFetchQueuesRequest()
response = taskqueue_service_pb.TaskQueueFetchQueuesResponse()
if app_id:
request.set_app_id(app_id)
request.set_max_rows(max_rows)
queues = ['default']
try:
rpc.make_call('FetchQueues', request, response)
rpc.check_success()
for queue in response.queue_list():
if (queue.mode() == taskqueue_service_pb.TaskQueueMode.PUSH and
not queue.queue_name().startswith('__') and
queue.queue_name() != 'default'):
queues.append(queue.queue_name())
except Exception:
logging.exception('Failed to get queue names.')
return queues
def handlers_list(base_path):
return [
(r'%s/%s' % (base_path, BackupLinkHandler.SUFFIX),
BackupLinkHandler),
(r'%s/%s' % (base_path, ConfirmBackupHandler.SUFFIX),
ConfirmBackupHandler),
(r'%s/%s' % (base_path, DoBackupHandler.SUFFIX), DoBackupHandler),
(r'%s/%s' % (base_path, DoBackupRestoreHandler.SUFFIX),
DoBackupRestoreHandler),
(r'%s/%s' % (base_path, DoBackupDeleteHandler.SUFFIX),
DoBackupDeleteHandler),
(r'%s/%s' % (base_path, DoBackupAbortHandler.SUFFIX),
DoBackupAbortHandler),
(r'%s/%s' % (base_path, DoBackupImportHandler.SUFFIX),
DoBackupImportHandler),
]
|
[] |
[] |
[
"HTTP_X_APPENGINE_QUEUENAME",
"APPLICATION_ID"
] |
[]
|
["HTTP_X_APPENGINE_QUEUENAME", "APPLICATION_ID"]
|
python
| 2 | 0 | |
flash/text/classification/data.py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from datasets import DatasetDict, load_dataset
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from torch import Tensor
from transformers import AutoTokenizer, default_data_collator
from transformers.modeling_outputs import SequenceClassifierOutput
from flash.core.classification import ClassificationState
from flash.data.auto_dataset import AutoDataset
from flash.data.data_module import DataModule
from flash.data.process import Postprocess, Preprocess
class TextClassificationPreprocess(Preprocess):
def __init__(
self,
input: str,
backbone: str,
max_length: int,
target: str,
filetype: str,
train_file: Optional[str] = None,
label_to_class_mapping: Optional[Dict[str, int]] = None,
):
"""
This class contains the preprocessing logic for text classification
Args:
# tokenizer: Hugging Face Tokenizer. # TODO: Add back a tokenizer argument and make backbone optional?
input: The field storing the text to be classified.
max_length: Maximum number of tokens within a single sentence.
target: The field storing the class id of the associated text.
filetype: .csv or .json format type.
label_to_class_mapping: Dictionary mapping target labels to class indexes.
"""
super().__init__()
if label_to_class_mapping is None:
if train_file is not None:
label_to_class_mapping = self.get_label_to_class_mapping(train_file, target, filetype)
else:
raise MisconfigurationException(
"Either ``label_to_class_mapping`` or ``train_file`` needs to be provided"
)
self.backbone = backbone
self.tokenizer = AutoTokenizer.from_pretrained(backbone, use_fast=True)
self.input = input
self.filetype = filetype
self.max_length = max_length
self.label_to_class_mapping = label_to_class_mapping
self.target = target
self._tokenize_fn = partial(
self._tokenize_fn,
tokenizer=self.tokenizer,
input=self.input,
max_length=self.max_length,
truncation=True,
padding="max_length"
)
class_to_label_mapping = ['CLASS_UNKNOWN'] * (max(self.label_to_class_mapping.values()) + 1)
for label, cls in self.label_to_class_mapping.items():
class_to_label_mapping[cls] = label
self.set_state(ClassificationState(class_to_label_mapping))
def get_state_dict(self) -> Dict[str, Any]:
return {
"input": self.input,
"backbone": self.backbone,
"max_length": self.max_length,
"target": self.target,
"filetype": self.filetype,
"label_to_class_mapping": self.label_to_class_mapping,
}
@classmethod
def load_state_dict(cls, state_dict: Dict[str, Any], strict: bool):
return cls(**state_dict)
def per_batch_transform(self, batch: Any) -> Any:
if "labels" not in batch:
# todo: understand why an extra dimension has been added.
if batch["input_ids"].dim() == 3:
batch["input_ids"] = batch["input_ids"].squeeze(0)
return batch
@staticmethod
def _tokenize_fn(
ex: Union[Dict[str, str], str],
tokenizer=None,
input: str = None,
max_length: int = None,
**kwargs
) -> Callable:
"""This function is used to tokenize sentences using the provided tokenizer."""
if isinstance(ex, dict):
ex = ex[input]
return tokenizer(ex, max_length=max_length, **kwargs)
def collate(self, samples: Any) -> Tensor:
"""Override to convert a set of samples to a batch"""
if isinstance(samples, dict):
samples = [samples]
return default_data_collator(samples)
def _transform_label(self, ex: Dict[str, str]):
ex[self.target] = self.label_to_class_mapping[ex[self.target]]
return ex
@staticmethod
def get_label_to_class_mapping(file: str, target: str, filetype: str) -> Dict[str, int]:
data_files = {'train': file}
dataset_dict = load_dataset(filetype, data_files=data_files)
label_to_class_mapping = {v: k for k, v in enumerate(list(sorted(list(set(dataset_dict['train'][target])))))}
return label_to_class_mapping
def load_data(
self,
filepath: str,
dataset: AutoDataset,
columns: Union[List[str], Tuple[str]] = ("input_ids", "attention_mask", "labels"),
use_full: bool = True
):
data_files = {}
stage = dataset.running_stage.value
data_files[stage] = str(filepath)
# FLASH_TESTING is set in the CI to run faster.
if use_full and os.getenv("FLASH_TESTING", "0") == "0":
dataset_dict = load_dataset(self.filetype, data_files=data_files)
else:
# used for debugging. Avoid processing the entire dataset # noqa E265
dataset_dict = DatasetDict({
stage: load_dataset(self.filetype, data_files=data_files, split=[f'{stage}[:20]'])[0]
})
dataset_dict = dataset_dict.map(self._tokenize_fn, batched=True)
# convert labels to ids
if not self.predicting:
dataset_dict = dataset_dict.map(self._transform_label)
dataset_dict = dataset_dict.map(self._tokenize_fn, batched=True)
# Hugging Face models expect target to be named ``labels``.
if not self.predicting and self.target != "labels":
dataset_dict.rename_column_(self.target, "labels")
dataset_dict.set_format("torch", columns=columns)
if not self.predicting:
dataset.num_classes = len(self.label_to_class_mapping)
return dataset_dict[stage]
def predict_load_data(self, sample: Any, dataset: AutoDataset):
if isinstance(sample, str) and os.path.isfile(sample) and sample.endswith(".csv"):
return self.load_data(sample, dataset, columns=["input_ids", "attention_mask"])
else:
if isinstance(sample, str):
sample = [sample]
if isinstance(sample, list) and all(isinstance(s, str) for s in sample):
return [self._tokenize_fn(s) for s in sample]
else:
raise MisconfigurationException("Currently, we support only list of sentences")
class TextClassificationPostProcess(Postprocess):
def per_batch_transform(self, batch: Any) -> Any:
if isinstance(batch, SequenceClassifierOutput):
batch = batch.logits
return super().per_batch_transform(batch)
class TextClassificationData(DataModule):
"""Data Module for text classification tasks"""
preprocess_cls = TextClassificationPreprocess
postprocess_cls = TextClassificationPostProcess
@property
def num_classes(self) -> int:
return len(self._preprocess.label_to_class_mapping)
@classmethod
def from_files(
cls,
train_file: Optional[str],
input: Optional[str] = 'input',
target: Optional[str] = 'labels',
filetype: str = "csv",
backbone: str = "prajjwal1/bert-tiny",
val_file: Optional[str] = None,
test_file: Optional[str] = None,
predict_file: Optional[str] = None,
max_length: int = 128,
label_to_class_mapping: Optional[dict] = None,
batch_size: int = 16,
num_workers: Optional[int] = None,
preprocess: Optional[Preprocess] = None,
postprocess: Optional[Postprocess] = None,
) -> 'TextClassificationData':
"""Creates a TextClassificationData object from files.
Args:
train_file: Path to training data.
input: The field storing the text to be classified.
target: The field storing the class id of the associated text.
filetype: .csv or .json
backbone: Tokenizer backbone to use, can use any HuggingFace tokenizer.
val_file: Path to validation data.
test_file: Path to test data.
batch_size: the batchsize to use for parallel loading. Defaults to 64.
num_workers: The number of workers to use for parallelized loading.
Defaults to None which equals the number of available CPU threads,
or 0 for Darwin platform.
Returns:
TextClassificationData: The constructed data module.
Examples::
train_df = pd.read_csv("train_data.csv")
tab_data = TabularData.from_df(train_df, target="fraud",
num_cols=["account_value"],
cat_cols=["account_type"])
"""
preprocess = preprocess or cls.preprocess_cls(
input,
backbone,
max_length,
target,
filetype,
train_file,
label_to_class_mapping,
)
postprocess = postprocess or cls.postprocess_cls()
return cls.from_load_data_inputs(
train_load_data_input=train_file,
val_load_data_input=val_file,
test_load_data_input=test_file,
predict_load_data_input=predict_file,
batch_size=batch_size,
num_workers=num_workers,
preprocess=preprocess,
postprocess=postprocess,
)
@classmethod
def from_file(
cls,
predict_file: str,
input: str,
backbone="bert-base-cased",
filetype="csv",
max_length: int = 128,
label_to_class_mapping: Optional[dict] = None,
batch_size: int = 16,
num_workers: Optional[int] = None,
preprocess: Optional[Preprocess] = None,
postprocess: Optional[Postprocess] = None,
) -> 'TextClassificationData':
"""Creates a TextClassificationData object from files.
Args:
predict_file: Path to training data.
input: The field storing the text to be classified.
filetype: .csv or .json
backbone: Tokenizer backbone to use, can use any HuggingFace tokenizer.
batch_size: the batchsize to use for parallel loading. Defaults to 64.
num_workers: The number of workers to use for parallelized loading.
Defaults to None which equals the number of available CPU threads,
or 0 for Darwin platform.
"""
return cls.from_files(
None,
input=input,
target=None,
filetype=filetype,
backbone=backbone,
val_file=None,
test_file=None,
predict_file=predict_file,
max_length=max_length,
label_to_class_mapping=label_to_class_mapping,
batch_size=batch_size,
num_workers=num_workers,
preprocess=preprocess,
postprocess=postprocess,
)
|
[] |
[] |
[
"FLASH_TESTING"
] |
[]
|
["FLASH_TESTING"]
|
python
| 1 | 0 | |
app.py
|
from flask import Flask
import os
app = Flask(__name__)
port = os.getenv('PORT', '5000')
@app.route("/")
def hello():
return "Hello World!"
if __name__ == "__main__":
app.run(host='0.0.0.0', port=int(port))
|
[] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
python
| 1 | 0 | |
cmd/addrserver/addrserver.go
|
// Copyright 2020 Demian Harvill
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Command line gRPC server for MServiceAddrbook
package main
import (
"fmt"
"io"
"net"
"os"
"strconv"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/gaterace/addrbook/pkg/addrauth"
"github.com/gaterace/addrbook/pkg/addrservice"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"github.com/kylelemons/go-gypsy/yaml"
"database/sql"
_ "github.com/go-sql-driver/mysql"
)
func main() {
configPath := os.Getenv("ADDR_CONF")
if configPath == "" {
configPath = "conf.yaml"
}
config, err := yaml.ReadFile(configPath)
if err != nil {
fmt.Printf("configuration not found: " + configPath)
os.Exit(1)
}
log_file, _ := config.Get("log_file")
cert_file, _ := config.Get("cert_file")
key_file, _ := config.Get("key_file")
tls, _ := config.GetBool("tls")
port, _ := config.GetInt("port")
db_user, _ := config.Get("db_user")
db_pwd, _ := config.Get("db_pwd")
db_transport, _ := config.Get("db_transport")
jwt_pub_file, _ := config.Get("jwt_pub_file")
var logWriter io.Writer
if log_file == "" {
logWriter = os.Stderr
} else {
logfile, _ := os.OpenFile(log_file, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
defer logfile.Close()
logWriter = logfile
}
logger := log.NewLogfmtLogger(log.NewSyncWriter(logWriter))
logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
level.Info(logger).Log("log_file", log_file)
level.Info(logger).Log("cert_file", cert_file)
level.Info(logger).Log("key_file", key_file)
level.Info(logger).Log("tls", tls)
level.Info(logger).Log("port", port)
level.Info(logger).Log("db_user", db_user)
level.Info(logger).Log("db_transport", db_transport)
level.Info(logger).Log("jwt_pub_file", jwt_pub_file)
if port == 0 {
port = 50057
}
listen_port := ":" + strconv.Itoa(int(port))
lis, err := net.Listen("tcp", listen_port)
if err != nil {
level.Error(logger).Log("what", "net.listen", "error", err)
os.Exit(1)
}
var opts []grpc.ServerOption
if tls {
creds, err := credentials.NewServerTLSFromFile(cert_file, key_file)
if err != nil {
level.Error(logger).Log("what", "Failed to generate credentials", "error", err)
os.Exit(1)
}
opts = []grpc.ServerOption{grpc.Creds(creds)}
}
s := grpc.NewServer(opts...)
addrService := addrservice.NewAddrService()
sqlDb, err := SetupDatabaseConnections(db_user, db_pwd, db_transport)
if err != nil {
level.Error(logger).Log("what", "SetupDatabaseConnections", "error", err)
os.Exit(1)
}
addrService.SetLogger(logger)
addrService.SetDatabaseConnection(sqlDb)
addrAuth := addrauth.NewAddrAuth(addrService)
addrAuth.SetLogger(logger)
addrAuth.SetPublicKey(jwt_pub_file)
addrAuth.SetDatabaseConnection(sqlDb)
err = addrAuth.NewApiServer(s)
if err != nil {
level.Error(logger).Log("what", "NewApiServer", "error", err)
os.Exit(1)
}
level.Info(logger).Log("msg", "starting grpc server")
err = s.Serve(lis)
if err != nil {
level.Error(logger).Log("what", "Serve", "error", err)
}
level.Info(logger).Log("msg", "shutting down grpc server")
}
// Helper to set up the database connection.
func SetupDatabaseConnections(db_user string, db_pwd string, db_transport string) (*sql.DB, error) {
var sqlDb *sql.DB
endpoint := db_user + ":" + db_pwd + "@" + db_transport + "/addrbook"
var err error
sqlDb, err = sql.Open("mysql", endpoint)
if err == nil {
err = sqlDb.Ping()
if err != nil {
sqlDb = nil
}
}
return sqlDb, err
}
|
[
"\"ADDR_CONF\""
] |
[] |
[
"ADDR_CONF"
] |
[]
|
["ADDR_CONF"]
|
go
| 1 | 0 | |
tests/test_main.py
|
import os
import subprocess
import sys
from django.test import TestCase
from django.core.exceptions import ImproperlyConfigured
from unittest.mock import patch
from configurations.importer import ConfigurationImporter
ROOT_DIR = os.path.dirname(os.path.dirname(__file__))
TEST_PROJECT_DIR = os.path.join(ROOT_DIR, 'test_project')
class MainTests(TestCase):
def test_simple(self):
from tests.settings import main
self.assertEqual(main.ATTRIBUTE_SETTING, True)
self.assertEqual(main.PROPERTY_SETTING, 1)
self.assertEqual(main.METHOD_SETTING, 2)
self.assertEqual(main.LAMBDA_SETTING, 3)
self.assertNotEqual(main.PRISTINE_LAMBDA_SETTING, 4)
self.assertTrue(lambda: callable(main.PRISTINE_LAMBDA_SETTING))
self.assertNotEqual(main.PRISTINE_FUNCTION_SETTING, 5)
self.assertTrue(lambda: callable(main.PRISTINE_FUNCTION_SETTING))
self.assertEqual(main.ALLOWED_HOSTS, ['base'])
self.assertEqual(main.PRE_SETUP_TEST_SETTING, 6)
self.assertRaises(AttributeError, lambda: main.POST_SETUP_TEST_SETTING)
self.assertEqual(main.Test.POST_SETUP_TEST_SETTING, 7)
def test_global_arrival(self):
from django.conf import settings
self.assertEqual(settings.PROPERTY_SETTING, 1)
self.assertRaises(AttributeError, lambda: settings._PRIVATE_SETTING)
self.assertNotEqual(settings.PRISTINE_LAMBDA_SETTING, 4)
self.assertTrue(lambda: callable(settings.PRISTINE_LAMBDA_SETTING))
self.assertNotEqual(settings.PRISTINE_FUNCTION_SETTING, 5)
self.assertTrue(lambda: callable(settings.PRISTINE_FUNCTION_SETTING))
self.assertEqual(settings.PRE_SETUP_TEST_SETTING, 6)
@patch.dict(os.environ, clear=True, DJANGO_CONFIGURATION='Test')
def test_empty_module_var(self):
self.assertRaises(ImproperlyConfigured, ConfigurationImporter)
@patch.dict(os.environ, clear=True,
DJANGO_SETTINGS_MODULE='tests.settings.main')
def test_empty_class_var(self):
self.assertRaises(ImproperlyConfigured, ConfigurationImporter)
def test_global_settings(self):
from configurations.base import Configuration
self.assertIn('dictConfig', Configuration.LOGGING_CONFIG)
self.assertEqual(repr(Configuration),
"<Configuration 'configurations.base.Configuration'>")
def test_deprecated_settings_but_set_by_user(self):
from tests.settings.main import TestWithDefaultSetExplicitely
TestWithDefaultSetExplicitely.setup()
self.assertEqual(TestWithDefaultSetExplicitely.DEFAULT_AUTO_FIELD,
"django.db.models.BigAutoField")
def test_repr(self):
from tests.settings.main import Test
self.assertEqual(repr(Test),
"<Configuration 'tests.settings.main.Test'>")
@patch.dict(os.environ, clear=True,
DJANGO_SETTINGS_MODULE='tests.settings.main',
DJANGO_CONFIGURATION='Test')
def test_initialization(self):
importer = ConfigurationImporter()
self.assertEqual(importer.module, 'tests.settings.main')
self.assertEqual(importer.name, 'Test')
self.assertEqual(
repr(importer),
"<ConfigurationImporter for 'tests.settings.main.Test'>")
@patch.dict(os.environ, clear=True,
DJANGO_SETTINGS_MODULE='tests.settings.inheritance',
DJANGO_CONFIGURATION='Inheritance')
def test_initialization_inheritance(self):
importer = ConfigurationImporter()
self.assertEqual(importer.module,
'tests.settings.inheritance')
self.assertEqual(importer.name, 'Inheritance')
@patch.dict(os.environ, clear=True,
DJANGO_SETTINGS_MODULE='tests.settings.main',
DJANGO_CONFIGURATION='NonExisting')
@patch.object(sys, 'argv', ['python', 'manage.py', 'test',
'--settings=tests.settings.main',
'--configuration=Test'])
def test_configuration_option(self):
importer = ConfigurationImporter(check_options=False)
self.assertEqual(importer.module, 'tests.settings.main')
self.assertEqual(importer.name, 'NonExisting')
importer = ConfigurationImporter(check_options=True)
self.assertEqual(importer.module, 'tests.settings.main')
self.assertEqual(importer.name, 'Test')
def test_configuration_argument_in_cli(self):
"""
Verify that's configuration option has been added to managements
commands
"""
proc = subprocess.Popen(['django-cadmin', 'test', '--help'],
stdout=subprocess.PIPE)
self.assertIn('--configuration', proc.communicate()[0].decode('utf-8'))
proc = subprocess.Popen(['django-cadmin', 'runserver', '--help'],
stdout=subprocess.PIPE)
self.assertIn('--configuration', proc.communicate()[0].decode('utf-8'))
def test_configuration_argument_in_runypy_cli(self):
"""
Verify that's configuration option has been added to managements
commands when using the -m entry point
"""
proc = subprocess.Popen(
[sys.executable, '-m', 'configurations', 'test', '--help'],
stdout=subprocess.PIPE,
)
self.assertIn('--configuration', proc.communicate()[0].decode('utf-8'))
proc = subprocess.Popen(
[sys.executable, '-m', 'configurations', 'runserver', '--help'],
stdout=subprocess.PIPE,
)
self.assertIn('--configuration', proc.communicate()[0].decode('utf-8'))
def test_django_setup_only_called_once(self):
proc = subprocess.Popen(
[sys.executable, os.path.join(os.path.dirname(__file__),
'setup_test.py')],
stdout=subprocess.PIPE)
res = proc.communicate()
stdout = res[0].decode('utf-8')
self.assertIn('setup_1', stdout)
self.assertIn('setup_2', stdout)
self.assertIn('setup_done', stdout)
self.assertEqual(proc.returncode, 0)
def test_utils_reraise(self):
from configurations.utils import reraise
class CustomException(Exception):
pass
with self.assertRaises(CustomException) as cm:
try:
raise CustomException
except Exception as exc:
reraise(exc, "Couldn't setup configuration", None)
self.assertEqual(cm.exception.args, ("Couldn't setup configuration: ",))
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
cloudprober/grpc_gcp_prober/prober.py
|
"""Main method of the cloudprober as an entrypoint to execute probes."""
import os
import argparse
import sys
import traceback
import firestore_probes
import grpc
import grpc_gcp
import pkg_resources
import spanner_probes
from stackdriver_util import StackdriverUtil
from google import auth
import google.auth.transport.grpc as transport_grpc
from google.auth.transport.requests import Request
from google.cloud.firestore_v1beta1.proto import firestore_pb2_grpc
import google.protobuf.text_format
from google.cloud.spanner_v1.proto import spanner_pb2_grpc
_OAUTH_SCOPE = 'https://www.googleapis.com/auth/cloud-platform'
_SPANNER_TARGET = os.environ['SPANNER_TARGET']
_FIRESTORE_TARGET = os.environ['FIRESTORE_TARGET']
def _get_args():
"""Retrieves arguments passed in while executing the main method.
Returns:
An object containing all the values for each argument parsed in.
Raises:
NotImplementedError: An error occurred when api does not match any records.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--api', type=str, help='define the cloud api to probe')
parser.add_argument('--extension',
type=bool,
help='options to use grpc-gcp extension')
return parser.parse_args()
def _secure_authorized_channel(credentials,
request,
target,
ssl_credentials=None,
**kwargs):
metadata_plugin = transport_grpc.AuthMetadataPlugin(credentials, request)
# Create a set of grpc.CallCredentials using the metadata plugin.
google_auth_credentials = grpc.metadata_call_credentials(metadata_plugin)
if ssl_credentials is None:
ssl_credentials = grpc.ssl_channel_credentials()
# Combine the ssl credentials and the authorization credentials.
composite_credentials = grpc.composite_channel_credentials(
ssl_credentials, google_auth_credentials)
return grpc_gcp.secure_channel(target, composite_credentials, **kwargs)
def _get_stub_channel(target, use_extension=False):
cred, _ = auth.default([_OAUTH_SCOPE])
if not use_extension:
return _secure_authorized_channel(cred, Request(), target)
config = grpc_gcp.api_config_from_text_pb(
pkg_resources.resource_string(__name__, 'spanner.grpc.config'))
options = [(grpc_gcp.API_CONFIG_CHANNEL_ARG, config)]
return _secure_authorized_channel(cred, Request(), target, options=options)
def _execute_probe(api, use_extension=False):
"""Execute a probe function given certain Cloud api and probe name.
Args:
api: the name of the api provider, e.g. "spanner", "firestore".
use_extension: option to use grpc-gcp extension when creating channel.
Raises:
NotImplementedError: An error occurred when api does not match any records.
"""
util = StackdriverUtil(api)
if api == 'spanner':
channel = _get_stub_channel(_SPANNER_TARGET, use_extension)
stub = spanner_pb2_grpc.SpannerStub(channel)
probe_functions = spanner_probes.PROBE_FUNCTIONS
elif api == 'firestore':
channel = _get_stub_channel(_FIRESTORE_TARGET)
stub = firestore_pb2_grpc.FirestoreStub(channel)
probe_functions = firestore_probes.PROBE_FUNCTIONS
else:
raise NotImplementedError('gRPC prober is not implemented for %s !' % api)
total = len(probe_functions)
success = 0
# Execute all probes for given api
for probe_name in probe_functions:
probe_function = probe_functions[probe_name]
try:
probe_function(stub)
success += 1
except Exception: # pylint: disable=broad-except
# report any kind of exception to Stackdriver
util.report_error(traceback.format_exc())
if success == total:
util.set_success(True)
# Summarize metrics
util.output_metrics()
# Fail this probe if any function fails
if success != total:
sys.exit(1)
if __name__ == '__main__':
args = _get_args()
_execute_probe(args.api, args.extension)
|
[] |
[] |
[
"SPANNER_TARGET",
"FIRESTORE_TARGET"
] |
[]
|
["SPANNER_TARGET", "FIRESTORE_TARGET"]
|
python
| 2 | 0 | |
flex/whitebox/server_test.go
|
// Copyright (c) 2019 Couchbase, Inc.
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
package whitebox
import (
"encoding/json"
"fmt"
"os"
"reflect"
"testing"
"github.com/blevesearch/bleve/v2"
"github.com/blevesearch/bleve/v2/analysis"
"github.com/blevesearch/bleve/v2/analysis/datetime/flexible"
"github.com/blevesearch/bleve/v2/mapping"
"github.com/blevesearch/bleve/v2/registry"
"github.com/couchbase/query/datastore"
"github.com/couchbase/query/errors"
"github.com/couchbase/query/expression"
"github.com/couchbase/query/expression/parser"
"github.com/couchbase/query/parser/n1ql"
"github.com/couchbase/query/plannerbase"
"github.com/couchbase/query/server"
"github.com/couchbase/n1fty/flex"
"github.com/couchbase/n1fty/util"
)
// These tests seem to break in buildbot environment.
// To execute these tests in a local dev environment...
//
// WHITEBOX=y go test ./flex/whitebox
//
func checkSkipTest(t *testing.T) bool {
if os.Getenv("WHITEBOX") == "y" {
return false
}
t.Skip()
fmt.Println("use WHITEBOX=y environment variable to enable")
return true
}
func init() {
// Needed for BleveToCondFlexIndexes() to work on dynamic indexes.
registry.RegisterDateTimeParser("disabled",
func(config map[string]interface{}, cache *registry.Cache) (analysis.DateTimeParser, error) {
return flexible.New(nil), nil // With no layouts, "disabled" always return error.
})
}
func initIndexesById(t *testing.T, m map[string]*Index) map[string]*Index {
for id, idx := range m {
if idx.IndexMapping == nil {
continue
}
j, err := json.Marshal(idx.IndexMapping)
if err != nil {
t.Fatalf("initIndexesById, json.Marshal, err: %v", err)
}
idx.IndexMapping = bleve.NewIndexMapping()
err = json.Unmarshal(j, &idx.IndexMapping)
if err != nil {
t.Fatalf("initIndexesById, json.Unmarshal, err: %v", err)
}
if idx.CondFlexIndexes == nil {
cfis, err := flex.BleveToCondFlexIndexes("", "", idx.IndexMapping, nil, "", "")
if err != nil {
t.Fatalf("initIndexesById, id: %v, BleveToCondFlexIndexes err: %v",
id, err)
return nil
}
if len(cfis) != 1 {
t.Fatalf("initIndexesById, id: %v, BleveToCondFlexIndexes len != 1, got: %+v",
id, cfis)
return nil
}
idx.CondFlexIndexes = cfis
}
util.SetIndexMapping(idx.Name(), &util.MappingDetails{
UUID: id,
SourceName: idx.SourceName,
IMapping: idx.IndexMapping,
})
}
return m
}
func emitExpr(t *testing.T, e expression.Expression) {
fmt.Printf("==========\n")
fmt.Printf("e: %+v\n", e)
f, ok := e.(expression.Function)
if !ok {
return
}
fmt.Printf(" f.Name(): %v\n", f.Name())
dnf := plannerbase.NewDNF(e, false, true)
eDNF, err := dnf.Map(e)
if err != nil {
t.Errorf("did not expect dnf err: %v", err)
}
fmt.Printf(" eDNF: %+v\n", eDNF)
}
func TestParse(t *testing.T) {
e, _ := parser.Parse("object_pairs('$1') AND x > 1 AND 1 < x")
emitExpr(t, e)
e, _ = parser.Parse("x = 1")
emitExpr(t, e)
e, _ = parser.Parse("x > 10 AND y > 20 AND (z > 30 OR w > 40)")
emitExpr(t, e)
e, _ = parser.Parse("x > 10 AND y > 20 AND (z > 30 OR w > 40) AND (zz > 300 OR ww > 400)")
emitExpr(t, e)
e, _ = parser.Parse("(x > 10 AND y > 20) AND (z > 30 OR w > 40) AND (zz > 300 OR ww > 400)")
emitExpr(t, e)
e, _ = parser.Parse("(x > 10 AND y > 20) AND (z > 30 OR w > 40) AND (zz > 300 OR ww > 400) OR true")
emitExpr(t, e)
e, _ = parser.Parse("x > 10 AND y > 20 AND IFNULL(z > 30 OR w > 40, NULL)")
emitExpr(t, e)
e, _ = parser.Parse("x > 10 AND y > 20 AND IFNULL(NULL, NULL)")
emitExpr(t, e)
e, _ = parser.Parse("x > 10 AND x > 11 AND x > 12 AND x > 13 AND x > 14")
emitExpr(t, e)
}
func TestParseSelectFrom(t *testing.T) {
s, err := n1ql.ParseStatement("SELECT * FROM b WHERE b.x > 10")
if err != nil {
t.Errorf("got err: %v", err)
}
fmt.Printf("==========\n")
fmt.Printf("s: %+v\n", s)
s, err = n1ql.ParseStatement("SELECT * FROM b JOIN c ON b.f = c.f WHERE b.x > 10")
if err != nil {
t.Errorf("got err: %v", err)
}
fmt.Printf("==========\n")
fmt.Printf("s: %+v\n", s)
s, err = n1ql.ParseStatement("SELECT * FROM b JOIN c ON b.f = c.f LET z = c.x WHERE b.x > 10 AND z > 10")
if err != nil {
t.Errorf("got err: %v", err)
}
fmt.Printf("==========\n")
fmt.Printf("s: %+v\n", s)
}
func TestSelect1(t *testing.T) {
if checkSkipTest(t) {
return
}
s, err := NewServer("./", nil)
if err != nil {
t.Fatalf("did not expect err: %v", err)
}
r, err := ExecuteStatement(s, "select 1", nil, nil)
if err != nil {
t.Errorf("did not expect err: %v", err)
}
fmt.Printf("r: %+v\n", r)
r, err = ExecuteStatement(s, "select 1 + 2 as three", nil, nil)
if err != nil {
t.Errorf("did not expect err: %v", err)
}
fmt.Printf("r: %+v\n", r)
}
func TestSelectStarFromDataEmpty(t *testing.T) {
if checkSkipTest(t) {
return
}
c := MakeWrapCallbacksForIndexType(datastore.IndexType("FTS"), nil)
s, err := NewServer("./", c)
if err != nil {
t.Fatalf("did not expect err: %v", err)
}
r, err := ExecuteStatement(s, "select * from data:empty", nil, nil)
if err != nil {
t.Errorf("did not expect err: %v", err)
}
fmt.Printf("r: %+v\n", r)
}
func TestSelectStarFromData1Doc(t *testing.T) {
if checkSkipTest(t) {
return
}
c := MakeWrapCallbacksForIndexType(datastore.IndexType("FTS"), nil)
s, err := NewServer("./", c)
if err != nil {
t.Fatalf("did not expect err: %v", err)
}
r, err := ExecuteStatement(s, "select * from data:`1doc`", nil, nil)
if err != nil {
t.Errorf("did not expect err: %v", err)
}
fmt.Printf("r: %+v\n", r)
}
func TestSearchWithEmptyIndexes(t *testing.T) {
if checkSkipTest(t) {
return
}
c := MakeWrapCallbacksForIndexType(datastore.IndexType("FTS"), nil)
s, err := NewServer("./", c)
if err != nil {
t.Fatalf("did not expect err: %v", err)
}
r, err := ExecuteStatement(s,
"select * from data:`1doc` as b"+
` WHERE SEARCH(b.a, "hello")`, nil, nil)
if err != nil {
t.Errorf("did not expect err: %v", err)
}
fmt.Printf("r: %+v\n", r)
}
func TestNotSargable(t *testing.T) {
if checkSkipTest(t) {
return
}
initIndexer := func(indexer *Indexer) (*Indexer, errors.Error) {
if indexer.IndexesById == nil {
indexer.IndexesById = initIndexesById(t, map[string]*Index{
"ftsIdx": {
SourceName: "1doc",
Parent: indexer,
IdStr: "ftsIdx",
NameStr: "ftsIdx",
IndexMapping: &mapping.IndexMappingImpl{
DefaultAnalyzer: "keyword",
DefaultDateTimeParser: "disabled",
DefaultMapping: &mapping.DocumentMapping{
Enabled: true,
},
},
},
})
}
return indexer, nil
}
c := MakeWrapCallbacksForIndexType(datastore.IndexType("FTS"), initIndexer)
s, err := NewServer("./", c)
if err != nil {
t.Fatalf("did not expect err: %v", err)
}
r, err := ExecuteStatement(s,
"select * from data:`1doc` as b"+
` WHERE SEARCH(b.a, "hello", {"index": "ftsIdx"})`, nil, nil)
if err != nil {
t.Errorf("did not expect err: %v", err)
}
fmt.Printf("r: %+v\n", r)
r, err = ExecuteStatement(s,
"select *, META() from data:`1doc` as b", nil, nil)
if err != nil {
t.Errorf("did not expect err: %v", err)
}
fmt.Printf("r: %+v\n", r)
r, err = ExecuteStatement(s,
"select * from data:`1doc` as b"+
` WHERE SEARCH(b.a, {"match": "hello"}, {"index": "ftsIdx"})`, nil, nil)
if err != nil {
t.Errorf("did not expect err: %v", err)
}
fmt.Printf("r: %+v\n", r)
r, err = ExecuteStatement(s,
"select * from data:`1doc` as b UNNEST children as c UNNEST c.pets as cpets"+
" LET x = c.pets"+
` WHERE SEARCH(b.a, {"match": "hello"}, {"index": "ftsIdx"})`+
` AND x = "fluffy"`+
` AND cpets = "spot"`,
nil, nil)
if err != nil {
t.Errorf("did not expect err: %v", err)
}
fmt.Printf("r: %+v\n", r)
}
func TestOrdersData(t *testing.T) {
if checkSkipTest(t) {
return
}
indexesById := map[string]*Index{}
initIndexer := func(indexer *Indexer) (*Indexer, errors.Error) {
if indexer.IndexesById == nil {
indexer.IndexesById = initIndexesById(t, map[string]*Index{
"ftsIdx": {
SourceName: "orders",
Parent: indexer,
IdStr: "ftsIdx",
NameStr: "ftsIdx",
IndexMapping: &mapping.IndexMappingImpl{
DefaultAnalyzer: "keyword",
DefaultDateTimeParser: "disabled",
DefaultMapping: &mapping.DocumentMapping{
Enabled: true,
Properties: map[string]*mapping.DocumentMapping{
"custId": {
Enabled: true,
Fields: []*mapping.FieldMapping{
{
Name: "custId",
Type: "text",
Index: true,
},
},
},
"orderlines": {
Enabled: true,
Properties: map[string]*mapping.DocumentMapping{
"productId": {
Enabled: true,
Fields: []*mapping.FieldMapping{
{
Name: "productId",
Type: "text",
Index: true,
},
},
},
},
},
},
},
},
},
})
for id, v := range indexer.IndexesById {
indexesById[id] = v
}
}
return indexer, nil
}
c := MakeWrapCallbacksForIndexType(datastore.IndexType("FTS"), initIndexer)
s, err := NewServer("./", c)
if err != nil {
t.Fatalf("did not expect err: %v", err)
}
testOrdersData(t, s, indexesById, []testOrdersDataCase{
{
`SELECT *
FROM data:orders as o UNNEST o.orderlines as orderline
WHERE orderline.productId = "sugar22"`,
3,
flex.FieldTracks{
flex.FieldTrack("orderlines.productId"): 1,
},
true,
`{"field":"orderlines.productId","term":"sugar22"}`,
},
{
`SELECT *
FROM data:orders as o UNNEST o.orderlines as orderline
WHERE orderline.productId = "sugar22"
AND (o.custId = "ccc" OR o.custId = "abc")`,
3,
flex.FieldTracks{
flex.FieldTrack("orderlines.productId"): 1,
flex.FieldTrack("custId"): 2,
},
true,
`{"conjuncts":[{"field":"orderlines.productId","term":"sugar22"},{"disjuncts":[{"field":"custId","term":"ccc"},{"field":"custId","term":"abc"}]}]}`,
},
{
`SELECT *
FROM data:orders as o UNNEST orderlines as orderline
LEFT OUTER JOIN [] as o2 ON o.id = o2.id
WHERE o.custId = "ccc" OR o.custId = "abc"`,
6,
flex.FieldTracks{
flex.FieldTrack("custId"): 2,
},
true,
`{"disjuncts":[{"field":"custId","term":"ccc"},{"field":"custId","term":"abc"}]}`,
},
{
`SELECT *
FROM data:orders as o
LEFT OUTER JOIN [] as o2 ON o.id = o2.id
UNNEST o.orderlines as orderline
LET c = o.custId
WHERE c = "ccc" OR c = "abc"`,
6,
flex.FieldTracks{
flex.FieldTrack("custId"): 2,
},
true,
`{"disjuncts":[{"field":"custId","term":"ccc"},{"field":"custId","term":"abc"}]}`,
},
})
}
func TestOrdersDataDynamicIndex(t *testing.T) {
if checkSkipTest(t) {
return
}
indexesById := map[string]*Index{}
initIndexer := func(indexer *Indexer) (*Indexer, errors.Error) {
if indexer.IndexesById == nil {
indexer.IndexesById = initIndexesById(t, map[string]*Index{
"ftsIdx": {
Parent: indexer,
IdStr: "ftsIdx",
NameStr: "ftsIdx",
IndexMapping: &mapping.IndexMappingImpl{
DefaultAnalyzer: "keyword",
DefaultDateTimeParser: "disabled",
DefaultMapping: &mapping.DocumentMapping{
Enabled: true,
Dynamic: true,
},
IndexDynamic: true,
},
},
})
for id, v := range indexer.IndexesById {
indexesById[id] = v
}
}
return indexer, nil
}
c := MakeWrapCallbacksForIndexType(datastore.IndexType("FTS"), initIndexer)
s, err := NewServer("./", c)
if err != nil {
t.Fatalf("did not expect err: %v", err)
}
testOrdersData(t, s, indexesById, []testOrdersDataCase{
{
`SELECT *
FROM data:orders as o UNNEST o.orderlines as orderline
WHERE orderline.productId = "sugar22"`,
3,
flex.FieldTracks{
flex.FieldTrack("orderlines.productId"): 1,
},
false,
`{"field":"orderlines.productId","term":"sugar22"}`,
},
{
`SELECT *
FROM data:orders as o UNNEST o.orderlines as orderline
WHERE orderline.productId = "sugar22"
AND (o.custId = "ccc" OR o.custId = "abc")`,
3,
flex.FieldTracks{
flex.FieldTrack("orderlines.productId"): 1,
flex.FieldTrack("custId"): 2,
},
false,
`{"conjuncts":[{"field":"orderlines.productId","term":"sugar22"},{"disjuncts":[{"field":"custId","term":"ccc"},{"field":"custId","term":"abc"}]}]}`,
},
{
`SELECT *
FROM data:orders as o UNNEST orderlines as orderline
LEFT OUTER JOIN [] as o2 ON o.id = o2.id
WHERE o.custId = "ccc" OR o.custId = "abc"`,
6,
flex.FieldTracks{
flex.FieldTrack("custId"): 2,
},
false,
`{"disjuncts":[{"field":"custId","term":"ccc"},{"field":"custId","term":"abc"}]}`,
},
{
`SELECT *
FROM data:orders as o
LEFT OUTER JOIN [] as o2 ON o.id = o2.id
UNNEST o.orderlines as orderline
LET c = o.custId
WHERE c = "ccc" OR c = "abc"`,
6,
flex.FieldTracks{
flex.FieldTrack("custId"): 2,
},
false,
`{"disjuncts":[{"field":"custId","term":"ccc"},{"field":"custId","term":"abc"}]}`,
},
// ---------------------------------------------------------------
{
`SELECT *
FROM data:orders as o
WHERE ANY ol IN o.orderlines
SATISFIES ol.instructions = "expedite" END`,
0,
flex.FieldTracks{
flex.FieldTrack("orderlines.instructions"): 1,
},
false,
`{"field":"orderlines.instructions","term":"expedite"}`,
},
{
`SELECT *
FROM data:orders as o
WHERE ANY ol IN o.orderlines
SATISFIES ol.qty = 100 END`,
0,
flex.FieldTracks{},
false,
``,
},
{
`SELECT *
FROM data:orders as o
WHERE ANY ol IN o.orderlines
SATISFIES ol.instructions = "expedite" AND ol.qty = 100 END`,
0,
flex.FieldTracks{},
false,
``,
},
{
`SELECT *
FROM data:orders as o UNNEST o.orderlines as ol
WHERE ol.qty = 100`,
0,
flex.FieldTracks{},
false,
``,
},
})
}
type testOrdersDataCase struct {
stmt string
expectNumResults int
expectFieldTracks flex.FieldTracks
expectNeedsFiltering bool
expectBleveQuery string
}
func testOrdersData(t *testing.T, s *server.Server, indexesById map[string]*Index,
moreTests []testOrdersDataCase) {
if len(indexesById) > 0 {
t.Fatalf("expected empty indexesById")
}
tests := append([]testOrdersDataCase{
{
`SELECT *, META() as META from data:orders as o WHERE custId = "ccc"`,
2,
flex.FieldTracks{
flex.FieldTrack("custId"): 1,
},
false,
`{"field":"custId","term":"ccc"}`,
},
{
`SELECT *, META() as META FROM data:orders as o
WHERE custId = "ccc" OR custId = "ddd"`,
2,
flex.FieldTracks{
flex.FieldTrack("custId"): 2,
},
false,
`{"disjuncts":[{"field":"custId","term":"ccc"},{"field":"custId","term":"ddd"}]}`,
},
{
`SELECT *, META() as META FROM data:orders as o
WHERE custId = "ccc" OR custId = "abc"`,
3,
flex.FieldTracks{
flex.FieldTrack("custId"): 2,
},
false,
`{"disjuncts":[{"field":"custId","term":"ccc"},{"field":"custId","term":"abc"}]}`,
},
{
`SELECT *, META() as META FROM data:orders as o
WHERE ANY orderline IN o.orderlines
SATISFIES orderline.productId = "sugar22" END`,
3,
flex.FieldTracks{
flex.FieldTrack("orderlines.productId"): 1,
},
false,
`{"field":"orderlines.productId","term":"sugar22"}`,
},
{
`SELECT *, META() as META FROM data:orders as o
WHERE ANY orderline IN o.orderlines
SATISFIES orderline.productId = "sugar22" END
AND (o.custId = "ccc" OR o.custId = "abc")`,
3,
flex.FieldTracks{
flex.FieldTrack("orderlines.productId"): 1,
flex.FieldTrack("custId"): 2,
},
false,
`{"conjuncts":[{"field":"orderlines.productId","term":"sugar22"},{"disjuncts":[{"field":"custId","term":"ccc"},{"field":"custId","term":"abc"}]}]}`,
},
{
`SELECT *
FROM data:orders as o LEFT OUTER JOIN [] as o2 ON o.id = o2.id
WHERE o.custId = "ccc" OR o.custId = "abc"`,
3,
flex.FieldTracks{
flex.FieldTrack("custId"): 2,
},
false,
`{"disjuncts":[{"field":"custId","term":"ccc"},{"field":"custId","term":"abc"}]}`,
},
{
`SELECT *
FROM data:orders as o
WHERE o.custId >= "a" AND o.custId <= "b"`,
1,
flex.FieldTracks{
flex.FieldTrack("custId"): 2,
},
false,
`{"field":"custId","inclusive_max":true,"inclusive_min":true,"max":"b","min":"a"}`,
},
{
`SELECT *
FROM data:orders as o
WHERE ISSTRING(o.custId) AND o.custId < "b"`,
1,
flex.FieldTracks{
flex.FieldTrack("custId"): 1,
},
false,
`{"field":"custId","inclusive_max":false,"max":"b"}`,
},
}, moreTests...)
for testi, test := range tests {
r, err := ExecuteStatement(s, test.stmt, nil, nil)
if err != nil {
t.Fatalf("did not expect err: %v", err)
}
if len(r) != test.expectNumResults {
t.Fatalf("test: %+v\n got len(r): %d, r: %+v", test, len(r), r)
}
if len(indexesById) != 1 || indexesById["ftsIdx"] == nil {
t.Fatalf("expected ftsIdx, got: %+v", indexesById)
}
idx := indexesById["ftsIdx"]
last := idx.lastSargableFlexOk
if last == nil {
if len(test.expectFieldTracks) <= 0 {
idx.lastSargableFlexErr = nil
continue // On to next test if we were expecting not-sargable flex.
}
t.Fatalf("testi: %d, test: %+v, expected lastSargableFlexOk",
testi, test)
}
if !reflect.DeepEqual(last.fieldTracks, test.expectFieldTracks) {
t.Fatalf("test: %+v\n last.fieldTracks (%+v) != test.expectFieldTracks: %+v",
test, last.fieldTracks, test.expectFieldTracks)
}
if last.needsFiltering != test.expectNeedsFiltering {
t.Fatalf("test: %+v\n last.needsFiltering mismatch: %+v",
test, last.needsFiltering)
}
bleveQueryJson, _ := json.Marshal(last.bleveQuery)
if string(bleveQueryJson) != test.expectBleveQuery {
t.Fatalf("test: %+v\n last.bleveQuery mismatch: %s",
test, bleveQueryJson)
}
idx.lastSargableFlexOk = nil
idx.lastSargableFlexErr = nil
}
}
|
[
"\"WHITEBOX\""
] |
[] |
[
"WHITEBOX"
] |
[]
|
["WHITEBOX"]
|
go
| 1 | 0 | |
integrationtest/vm/virtualrouter/scheduler/test_delete_vm_check_simple_scheduler_state.py
|
'''
New Integration Test for Simple VM stop/start scheduler.
@author: MengLai
'''
import os
import time
import zstackwoodpecker.operations.vm_operations as vm_ops
import zstackwoodpecker.operations.scheduler_operations as schd_ops
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.resource_operations as res_ops
_config_ = {
'timeout' : 2000,
'noparallel' : True
}
test_stub = test_lib.lib_get_test_stub()
vm = None
schd1 = None
schd2 = None
def check_scheduler_state(schd, target_state):
conditions = res_ops.gen_query_conditions('uuid', '=', schd.uuid)
schd_state = res_ops.query_resource(res_ops.SCHEDULER, conditions)[0].state
if schd_state != target_state:
test_util.test_fail('check scheduler state, it is expected to be %s, but it is %s' % (target_state, schd_state))
return True
def check_scheduler_msg(msg, timestamp):
msg_mismatch = 0
for i in range(0, 20):
if test_lib.lib_find_in_local_management_server_log(timestamp + i, msg, vm.get_vm().uuid):
msg_mismatch = 1
return True
if msg_mismatch == 0:
return False
def test():
global vm
global schd1
global schd2
delete_policy = test_lib.lib_get_delete_policy('vm')
vm = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1'))
vm.set_delete_policy('Delay')
start_date = int(time.time())
schd1 = vm_ops.stop_vm_scheduler(vm.get_vm().uuid, 'simple', 'simple_stop_vm_scheduler', start_date+10, 20)
schd2 = vm_ops.start_vm_scheduler(vm.get_vm().uuid, 'simple', 'simple_start_vm_scheduler', start_date+20, 20)
test_stub.sleep_util(start_date+45)
test_util.test_dsc('check scheduler state after create scheduler')
check_scheduler_state(schd1, 'Enabled')
check_scheduler_state(schd2, 'Enabled')
if not check_scheduler_msg('run scheduler for job: StopVmInstanceJob', start_date+10):
test_util.test_fail('StopVmInstanceJob not executed at expected timestamp range')
if not check_scheduler_msg('run scheduler for job: StartVmInstanceJob', start_date+20):
test_util.test_fail('StartVmInstanceJob not executed at expected timestamp range')
vm.destroy()
current_time = int(time.time())
except_start_time = start_date + 20 * (((current_time - start_date) % 20) + 1)
test_stub.sleep_util(except_start_time + 45)
test_util.test_dsc('check scheduler state after destroy vm')
check_scheduler_state(schd1, 'Disabled')
check_scheduler_state(schd2, 'Disabled')
if check_scheduler_msg('run scheduler for job: StopVmInstanceJob', except_start_time+10):
test_util.test_fail('StopVmInstanceJob executed at unexpected timestamp range')
if check_scheduler_msg('run scheduler for job: StartVmInstanceJob', except_start_time+20):
test_util.test_fail('StartVmInstanceJob executed at unexpected timestamp range')
vm.recover()
current_time = int(time.time())
except_start_time = start_date + 20 * (((current_time - start_date) % 20) + 1)
test_stub.sleep_util(except_start_time + 45)
test_util.test_dsc('check scheduler state after recover vm')
check_scheduler_state(schd1, 'Disabled')
check_scheduler_state(schd2, 'Disabled')
# if not check_scheduler_msg('run scheduler for job: StopVmInstanceJob', except_start_time+10):
# test_util.test_fail('StopVmInstanceJob not executed at expected timestamp range')
# if not check_scheduler_msg('run scheduler for job: StartVmInstanceJob', except_start_time+20):
# test_util.test_fail('StartVmInstanceJob not executed at expected timestamp range' )
schd_ops.delete_scheduler(schd1.uuid)
schd_ops.delete_scheduler(schd2.uuid)
vm.set_delete_policy(delete_policy)
vm.destroy()
conditions = res_ops.gen_query_conditions('uuid', '=', schd1.uuid)
if len(res_ops.query_resource(res_ops.SCHEDULER, conditions)) > 0:
test_util.test_fail('check stop vm scheduler, it is expected to be destroied, but it still exists')
conditions = res_ops.gen_query_conditions('uuid', '=', schd2.uuid)
if len(res_ops.query_resource(res_ops.SCHEDULER, conditions)) > 0:
test_util.test_fail('check start vm scheduler, it is expected to be destroied, but it still exists')
test_util.test_pass('Check Scheduler State after Destroy and Recover VM Success')
#Will be called only if exception happens in test().
def error_cleanup():
global vm
global schd1
global schd2
vm.set_delete_policy(delete_policy)
if vm:
vm.destroy()
if schd1:
schd_ops.delete_scheduler(schd1.uuid)
if schd2:
schd_ops.delete_scheduler(schd2.uuid)
|
[] |
[] |
[
"l3VlanNetworkName1"
] |
[]
|
["l3VlanNetworkName1"]
|
python
| 1 | 0 | |
my_mrp/manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "my_mrp.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
examples/sending_files/main.go
|
package main
import (
"fmt"
"os"
"github.com/mymmrac/telego"
tu "github.com/mymmrac/telego/telegoutil"
)
func main() {
botToken := os.Getenv("TOKEN")
bot, err := telego.NewBot(botToken, telego.WithDefaultLogger(true, true))
if err != nil {
fmt.Println(err)
os.Exit(1)
}
// Document parameters
document := tu.Document(
// Chat ID as Integer
tu.ID(1234567),
// Send using file from disk
tu.File(mustOpen("my_file.txt")),
// Send using external URL
// tu.FileByURL("https://example.com/my_file.txt"),
// Send using file ID
// tu.FileByID("<file ID of your file>"),
).WithCaption("My cool file from disk")
// Sending document
msg, err := bot.SendDocument(document)
if err != nil {
fmt.Println(err)
return
}
fmt.Println(msg.Document)
// =========================================== //
// Photo parameters
photo := tu.Photo(
// Chat ID as String (target username)
tu.Username("@my_cool_channel"),
// Send using file from disk
tu.File(mustOpen("my_photo.png")),
).WithCaption("My cool photo")
// Sending photo
_, _ = bot.SendPhoto(photo)
// =========================================== //
// Media group parameters
mediaGroup := tu.MediaGroup(
tu.ID(1234567),
// Specify slice of telego.InputMedia with media you want to send as a group
tu.MediaPhoto(tu.File(mustOpen("my_photo.png"))),
tu.MediaPhoto(tu.FileByID("<file ID of your photo>")),
tu.MediaPhoto(tu.FileByURL("https://example.com/my_photo.png")),
)
// Sending media group
_, _ = bot.SendMediaGroup(mediaGroup)
}
// Helper function to open file or panic
func mustOpen(filename string) *os.File {
file, err := os.Open(filename)
if err != nil {
panic(err)
}
return file
}
|
[
"\"TOKEN\""
] |
[] |
[
"TOKEN"
] |
[]
|
["TOKEN"]
|
go
| 1 | 0 | |
hail/python/hailtop/aiocloud/aiogoogle/credentials.py
|
from typing import Dict, Optional
import os
import json
import time
import logging
from urllib.parse import urlencode
import jwt
from hailtop.utils import request_retry_transient_errors
import hailtop.httpx
from ..common.credentials import CloudCredentials
log = logging.getLogger(__name__)
class GoogleExpiringAccessToken:
@staticmethod
def from_dict(data: dict) -> 'GoogleExpiringAccessToken':
now = time.time()
token = data['access_token']
expiry_time = now + data['expires_in'] // 2
return GoogleExpiringAccessToken(token, expiry_time)
def __init__(self, token, expiry_time: int):
self.token = token
self._expiry_time = expiry_time
def expired(self) -> bool:
now = time.time()
return self._expiry_time <= now
class GoogleCredentials(CloudCredentials):
_http_session: hailtop.httpx.ClientSession
def __init__(self,
http_session: Optional[hailtop.httpx.ClientSession] = None,
**kwargs):
self._access_token: Optional[GoogleExpiringAccessToken] = None
if http_session is not None:
assert len(kwargs) == 0
self._http_session = http_session
else:
self._http_session = hailtop.httpx.ClientSession(**kwargs)
@staticmethod
def from_file(credentials_file: str) -> 'GoogleCredentials':
with open(credentials_file) as f:
credentials = json.load(f)
return GoogleCredentials.from_credentials_data(credentials)
@staticmethod
def from_credentials_data(credentials: dict, **kwargs) -> 'GoogleCredentials':
credentials_type = credentials['type']
if credentials_type == 'service_account':
return GoogleServiceAccountCredentials(credentials, **kwargs)
if credentials_type == 'authorized_user':
return GoogleApplicationDefaultCredentials(credentials, **kwargs)
raise ValueError(f'unknown Google Cloud credentials type {credentials_type}')
@staticmethod
def default_credentials() -> 'GoogleCredentials':
credentials_file = os.environ.get('GOOGLE_APPLICATION_CREDENTIALS')
if credentials_file is None:
application_default_credentials_file = f'{os.environ["HOME"]}/.config/gcloud/application_default_credentials.json'
if os.path.exists(application_default_credentials_file):
credentials_file = application_default_credentials_file
if credentials_file:
creds = GoogleCredentials.from_file(credentials_file)
log.info(f'using credentials file {credentials_file}: {creds}')
return creds
log.warning('unable to locate Google Cloud credentials file, will attempt to '
'use instance metadata server instead')
return GoogleInstanceMetadataCredentials()
async def auth_headers(self) -> Dict[str, str]:
if self._access_token is None or self._access_token.expired():
self._access_token = await self._get_access_token()
return {'Authorization': f'Bearer {self._access_token.token}'}
async def _get_access_token(self) -> GoogleExpiringAccessToken:
raise NotImplementedError
async def close(self):
await self._http_session.close()
# protocol documented here:
# https://developers.google.com/identity/protocols/oauth2/web-server#offline
# studying `gcloud --log-http print-access-token` was also useful
class GoogleApplicationDefaultCredentials(GoogleCredentials):
def __init__(self, credentials, **kwargs):
super().__init__(**kwargs)
self.credentials = credentials
def __str__(self):
return 'ApplicationDefaultCredentials'
async def _get_access_token(self) -> GoogleExpiringAccessToken:
async with await request_retry_transient_errors(
self._http_session, 'POST',
'https://www.googleapis.com/oauth2/v4/token',
headers={
'content-type': 'application/x-www-form-urlencoded'
},
data=urlencode({
'grant_type': 'refresh_token',
'client_id': self.credentials['client_id'],
'client_secret': self.credentials['client_secret'],
'refresh_token': self.credentials['refresh_token']
})) as resp:
return GoogleExpiringAccessToken.from_dict(await resp.json())
# protocol documented here:
# https://developers.google.com/identity/protocols/oauth2/service-account
# studying `gcloud --log-http print-access-token` was also useful
class GoogleServiceAccountCredentials(GoogleCredentials):
def __init__(self, key, **kwargs):
super().__init__(**kwargs)
self.key = key
def __str__(self):
return f'GoogleServiceAccountCredentials for {self.key["client_email"]}'
async def _get_access_token(self) -> GoogleExpiringAccessToken:
now = int(time.time())
scope = 'openid https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/compute'
assertion = {
"aud": "https://www.googleapis.com/oauth2/v4/token",
"iat": now,
"scope": scope,
"exp": now + 300, # 5m
"iss": self.key['client_email']
}
encoded_assertion = jwt.encode(assertion, self.key['private_key'], algorithm='RS256')
async with await request_retry_transient_errors(
self._http_session, 'POST',
'https://www.googleapis.com/oauth2/v4/token',
headers={
'content-type': 'application/x-www-form-urlencoded'
},
data=urlencode({
'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer',
'assertion': encoded_assertion
})) as resp:
return GoogleExpiringAccessToken.from_dict(await resp.json())
# https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances#applications
class GoogleInstanceMetadataCredentials(GoogleCredentials):
async def _get_access_token(self) -> GoogleExpiringAccessToken:
async with await request_retry_transient_errors(
self._http_session, 'GET',
'http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token',
headers={'Metadata-Flavor': 'Google'}) as resp:
return GoogleExpiringAccessToken.from_dict(await resp.json())
|
[] |
[] |
[
"HOME",
"GOOGLE_APPLICATION_CREDENTIALS"
] |
[]
|
["HOME", "GOOGLE_APPLICATION_CREDENTIALS"]
|
python
| 2 | 0 | |
src/yggdrasill/yggdrasill.go
|
package main
import (
"fmt"
"os"
"log"
"github.com/dmnlk/gomadare"
"github.com/dmnlk/stringUtils"
"github.com/rem7/goprowl"
"strings"
)
var (
CONSUMER_KEY string
CONSUMER_KEY_SECRET string
ACCESS_TOKEN string
ACCESS_TOKEN_SECRET string
SCREEN_NAME string
PROWL_API_KEY string
PROWL goprowl.Goprowl
)
func main() {
err := configureToken()
if err != nil {
fmt.Println(err)
return
}
err = PROWL.RegisterKey(PROWL_API_KEY)
if err != nil {
fmt.Println(err)
return
}
SCREEN_NAME = os.Getenv("SCREEN_NAME")
if len(SCREEN_NAME) < 0 {
return
}
client := gomadare.NewClient(CONSUMER_KEY, CONSUMER_KEY_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
client.GetUserStream(nil, func(s gomadare.Status, e gomadare.Event) {
if &s != nil {
go sendReplyAndRetweetToProwl(s)
}
if &e != nil {
go sendEventToProwl(e)
}
})
}
func configureToken() error {
CONSUMER_KEY = os.Getenv("CONSUMER_KEY")
CONSUMER_KEY_SECRET = os.Getenv("CONSUMER_KEY_SECRET")
ACCESS_TOKEN = os.Getenv("ACCESS_TOKEN")
ACCESS_TOKEN_SECRET = os.Getenv("ACCESS_TOKEN_SECRET")
PROWL_API_KEY = os.Getenv("PROWL_API_KEY")
if ng := stringUtils.IsAnyEmpty(CONSUMER_KEY, CONSUMER_KEY_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET, PROWL_API_KEY); ng {
return fmt.Errorf("some key invalid")
}
return nil
}
func sendEventToProwl(e gomadare.Event) {
if stringUtils.IsEmpty(e.Event) {
return
}
if (e.Event == "favorite" || e.Event == "unfavorite" || e.Event == "retweeted_retweet" || e.Event == "follow") && e.Source.ScreenName == SCREEN_NAME {
return
}
emoji := getEventEmoji(e)
n := &goprowl.Notification{
Application: "yggdrasill",
Description: emoji + " " + e.TargetObject.Text,
Event: e.Event + " by " + e.Source.ScreenName,
Priority: "1",
}
PROWL.Push(n)
}
func getEventEmoji(event gomadare.Event) string {
if event.Event == "favorite" {
return "\u2b50"
}
if event.Event == "unfavorite" {
return "\U0001f44e"
}
if event.Event == "list_member_removed" {
return "\u274c"
}
if event.Event == "list_member_added" {
return "\u2755"
}
if event.Event == "follow" {
return "\u2661"
}
log.Println("unknown event:" + event.Event)
return event.Event
}
func getProwlNotification(event gomadare.Event) goprowl.Notification {
n := &goprowl.Notification{
Application: "Yggdrsill",
Priority: "1",
}
return *n
}
func sendReplyAndRetweetToProwl(s gomadare.Status) {
// reply Event
if len(s.Entities.UserMentions) > 0 {
for _, mention := range s.Entities.UserMentions {
if mention.ScreenName == "dmnlk" {
var n *goprowl.Notification
if strings.Contains(s.Text, "RT") {
n = &goprowl.Notification{
Application: "yggdrasill",
Description: "\U0001f4a1" + " " + s.Text,
Event: "RT by " + s.User.ScreenName,
Priority: "1",
}
} else {
n = &goprowl.Notification{
Application: "yggdrasill",
Description: "\U0001f4a1" + " " + s.Text,
Event: "Mentioned by " + s.User.ScreenName,
Priority: "1",
}
}
PROWL.Push(n)
}
}
}
//RTイベント
if s.RetweetedStatus.User.ScreenName == SCREEN_NAME {
n := &goprowl.Notification{
Application: "yggdrasill",
Description: "\U0001f4a1" + " " + s.Text,
Event: "RT by " + s.User.ScreenName,
Priority: "1",
}
PROWL.Push(n)
}
}
|
[
"\"SCREEN_NAME\"",
"\"CONSUMER_KEY\"",
"\"CONSUMER_KEY_SECRET\"",
"\"ACCESS_TOKEN\"",
"\"ACCESS_TOKEN_SECRET\"",
"\"PROWL_API_KEY\""
] |
[] |
[
"PROWL_API_KEY",
"CONSUMER_KEY_SECRET",
"CONSUMER_KEY",
"SCREEN_NAME",
"ACCESS_TOKEN",
"ACCESS_TOKEN_SECRET"
] |
[]
|
["PROWL_API_KEY", "CONSUMER_KEY_SECRET", "CONSUMER_KEY", "SCREEN_NAME", "ACCESS_TOKEN", "ACCESS_TOKEN_SECRET"]
|
go
| 6 | 0 | |
test/test_0_basic.py
|
import platform
import textwrap
import pytest
from cibuildwheel.logger import Logger
from . import test_projects, utils
basic_project = test_projects.new_c_project(
setup_py_add=textwrap.dedent('''
import os
if os.environ.get("CIBUILDWHEEL", "0") != "1":
raise Exception("CIBUILDWHEEL environment variable is not set to 1")
''')
)
def test(tmp_path):
project_dir = tmp_path / 'project'
basic_project.generate(project_dir)
# build the wheels
actual_wheels = utils.cibuildwheel_run(project_dir)
# check that the expected wheels are produced
expected_wheels = utils.expected_wheels('spam', '0.1.0')
assert set(actual_wheels) == set(expected_wheels)
@pytest.mark.skip(reason='to keep test output clean')
def test_sample_build(tmp_path, capfd):
project_dir = tmp_path / 'project'
basic_project.generate(project_dir)
# build the wheels, and let the output passthrough to the caller, so
# we can see how it looks
with capfd.disabled():
logger = Logger()
logger.step('test_sample_build')
try:
utils.cibuildwheel_run(project_dir)
finally:
logger.step_end()
def test_build_identifiers(tmp_path):
project_dir = tmp_path / 'project'
basic_project.generate(project_dir)
# check that the number of expected wheels matches the number of build
# identifiers
# after adding CIBW_MANYLINUX_IMAGE to support manylinux2010, there
# can be multiple wheels for each wheel, though, so we need to limit
# the expected wheels
if platform.machine() in ['x86_64', 'i686']:
expected_wheels = [w for w in utils.expected_wheels('spam', '0.1.0')
if '-manylinux' not in w or '-manylinux1' in w]
else:
expected_wheels = utils.expected_wheels('spam', '0.1.0')
build_identifiers = utils.cibuildwheel_get_build_identifiers(project_dir)
assert len(expected_wheels) == len(build_identifiers)
|
[] |
[] |
[
"CIBUILDWHEEL"
] |
[]
|
["CIBUILDWHEEL"]
|
python
| 1 | 0 | |
util/util.go
|
package util
import (
"bytes"
"io"
)
// ReaderToByte reads an io.Reader into a byte slice and returns it.
func ReaderToByte(stream io.Reader) []byte {
buf := new(bytes.Buffer)
_, err := buf.ReadFrom(stream)
if err != nil {
panic(err)
}
return buf.Bytes()
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
tests/functional/rest/test_client.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Functional tests for `copra.rest.Client` class.
Without any additional user input, this module will test all of the
unauthenticated methods of the copra.rest.Client.
An API key for the Coinbase Pro sandbox is required to test the authenticated
methods. The key information as well as the ids of a few test accounts are
read in to this module as environment variables by the dotenv module from a
file named .env. The .env file must reside in the same directory as this test
module.
An example .env file named .env.sample is provided. To test the authenticated
methods, fill out the .env.sample file accordingly and rename it to .env.
"""
import os.path
if os.path.isfile(os.path.join(os.path.dirname(__file__), '.env')):
from dotenv import load_dotenv
load_dotenv()
else:
print("\n** .env file not found. Authenticated methods will be skipped. **\n")
import asyncio
from datetime import datetime, timedelta
import os
import json
import random
import time
from uuid import uuid4
from asynctest import TestCase, skipUnless, expectedFailure
from dateutil import parser
from copra.rest import APIRequestError, Client, SANDBOX_URL
from copra.rest.client import USER_AGENT
KEY = os.getenv('KEY')
SECRET = os.getenv('SECRET')
PASSPHRASE = os.getenv('PASSPHRASE')
TEST_AUTH = True if (KEY and SECRET and PASSPHRASE) else False
TEST_BTC_ACCOUNT = os.getenv('TEST_BTC_ACCOUNT')
TEST_USD_ACCOUNT = os.getenv('TEST_USD_ACCOUNT')
TEST_USD_PAYMENT_METHOD = os.getenv('TEST_USD_PAYMENT_METHOD')
TEST_USD_COINBASE_ACCOUNT = os.getenv('TEST_USD_COINBASE_ACCOUNT')
HTTPBIN = 'http://httpbin.org'
class TestRest(TestCase):
"""Tests for copra.rest.Client"""
def setUp(self):
self.client = Client(self.loop)
if TEST_AUTH:
self.auth_client = Client(self.loop, SANDBOX_URL, auth=True,
key=KEY, secret=SECRET,
passphrase=PASSPHRASE)
def tearDown(self):
self.loop.create_task(self.client.close())
if TEST_AUTH:
self.loop.run_until_complete(self.auth_client.cancel_all(stop=True))
self.loop.create_task(self.auth_client.close())
# try to avoid public rate limit, allow for aiohttp cleanup and
# all outstanding Coinbase actions to complete
self.loop.run_until_complete(asyncio.sleep(1))
async def test_user_agent(self):
async with Client(self.loop, HTTPBIN) as client:
headers, body = await client.get('/user-agent')
self.assertEqual(body['user-agent'], USER_AGENT)
async def test__handle_error(self):
async with Client(self.loop, HTTPBIN) as client:
with self.assertRaises(APIRequestError) as cm:
headers, body = await client.get('/status/404')
async def test_delete(self):
async with Client(self.loop, HTTPBIN) as client:
headers, body = await client.delete('/delete')
self.assertEqual(body['args'], {})
self.assertEqual(body['headers']['User-Agent'], USER_AGENT)
self.assertIsInstance(headers, dict)
self.assertIn('Content-Type', headers)
self.assertIn('Content-Length', headers)
params = {'key1': 'item1', 'key2': 'item2'}
headers, body = await client.delete('/delete', params=params)
self.assertEqual(body['args'], params)
async def test_get(self):
async with Client(self.loop, HTTPBIN) as client:
headers, body = await client.get('/get')
body['args'].pop('no-cache', None)
self.assertEqual(body['args'], {})
self.assertEqual(body['headers']['User-Agent'], USER_AGENT)
self.assertIsInstance(headers, dict)
self.assertIn('Content-Type', headers)
self.assertIn('Content-Length', headers)
params = {'key1': 'item1', 'key2': 'item2'}
headers, body = await client.get('/get', params=params)
self.assertEqual(body['args'], params)
async def test_post(self):
async with Client(self.loop, HTTPBIN) as client:
headers, body = await client.post('/post')
self.assertEqual(body['form'], {})
self.assertEqual(body['headers']['User-Agent'], USER_AGENT)
self.assertIsInstance(headers, dict)
self.assertIn('Content-Type', headers)
self.assertIn('Content-Length', headers)
data = {"key1": "item1", "key2": "item2"}
headers, body = await client.post('/post', data=data)
self.assertEqual(json.loads(body['data']), data)
async def test_products(self):
keys = {'id', 'base_currency', 'quote_currency', 'base_min_size',
'base_max_size', 'quote_increment', 'display_name', 'status',
'margin_enabled', 'status_message', 'min_market_funds',
'max_market_funds', 'post_only', 'limit_only', 'cancel_only'}
# Sometimes returns 'accesible' as a key. ??
products = await self.client.products()
self.assertIsInstance(products, list)
self.assertGreater(len(products), 1)
self.assertIsInstance(products[0], dict)
self.assertGreaterEqual(len(products[0]), len(keys))
self.assertGreaterEqual(products[0].keys(), keys)
async def test_order_book(self):
keys = {'sequence', 'bids', 'asks'}
ob1 = await self.client.order_book('BTC-USD', level=1)
self.assertIsInstance(ob1, dict)
self.assertEqual(ob1.keys(), keys)
self.assertIsInstance(ob1['bids'], list)
self.assertEqual(len(ob1['bids']), 1)
self.assertEqual(len(ob1['bids'][0]), 3)
self.assertIsInstance(ob1['asks'], list)
self.assertEqual(len(ob1['asks']), 1)
self.assertEqual(len(ob1['asks'][0]), 3)
ob2 = await self.client.order_book('BTC-USD', level=2)
self.assertIsInstance(ob2, dict)
self.assertEqual(ob2.keys(), keys)
self.assertIsInstance(ob2['bids'], list)
self.assertEqual(len(ob2['bids']), 50)
self.assertEqual(len(ob2['bids'][0]), 3)
self.assertIsInstance(ob2['asks'], list)
self.assertEqual(len(ob2['asks']), 50)
self.assertEqual(len(ob2['asks'][0]), 3)
ob3 = await self.client.order_book('BTC-USD', level=3)
self.assertIsInstance(ob3, dict)
self.assertEqual(ob3.keys(), keys)
self.assertIsInstance(ob3['bids'], list)
self.assertGreater(len(ob3['bids']), 50)
self.assertEqual(len(ob3['bids'][0]), 3)
self.assertIsInstance(ob3['asks'], list)
self.assertGreater(len(ob3['asks']), 50)
self.assertEqual(len(ob3['asks'][0]), 3)
async def test_ticker(self):
keys = {'trade_id', 'price', 'size', 'bid', 'ask', 'volume', 'time'}
tick = await self.client.ticker('BTC-USD')
self.assertIsInstance(tick, dict)
self.assertEqual(tick.keys(), keys)
async def test_trades(self):
keys = {'time', 'trade_id', 'price', 'size', 'side'}
trades, before, after = await self.client.trades('BTC-USD')
self.assertIsInstance(trades, list)
self.assertIsInstance(trades[0], dict)
self.assertIsInstance(before, str)
self.assertIsInstance(after, str)
self.assertEqual(len(trades), 100)
self.assertEqual(trades[0].keys(), keys)
trades, before, after = await self.client.trades('BTC-USD', 5)
self.assertEqual(len(trades), 5)
trades_after, after_after, before_after = await self.client.trades('BTC-USD', 5, after=after)
self.assertLess(trades_after[0]['trade_id'], trades[-1]['trade_id'])
trades_before, after_before, before_before = await self.client.trades('BTC-USD', 5, before=before)
if trades_before:
self.assertGreater(trades_before[-1]['trade_id'], trades[0]['trade_id'])
else:
self.assertIsNone(after_before)
self.assertIsInstance(after_after, str)
await asyncio.sleep(20)
trades_before, after_before, before_before = await self.client.trades('BTC-USD', 5, before=before)
if (trades_before):
self.assertGreater(trades_before[-1]['trade_id'], trades[0]['trade_id'])
else:
self.assertIsNone(after_before)
self.assertIsInstance(after_after, str)
async def test_historic_rates(self):
rates = await self.client.historic_rates('BTC-USD', 900)
self.assertIsInstance(rates, list)
self.assertEqual(len(rates[0]), 6)
self.assertEqual(rates[0][0] - rates[1][0], 900)
end = datetime.utcnow()
start = end - timedelta(days=1)
rates = await self.client.historic_rates('LTC-USD', 3600, start.isoformat(), end.isoformat())
self.assertIsInstance(rates, list)
self.assertEqual(len(rates), 24)
self.assertEqual(len(rates[0]), 6)
self.assertEqual(rates[0][0] - rates[1][0], 3600)
async def test_get_24hour_stats(self):
keys = {'open', 'high', 'low', 'volume', 'last', 'volume_30day'}
stats = await self.client.get_24hour_stats('BTC-USD')
self.assertIsInstance(stats, dict)
self.assertEqual(stats.keys(), keys)
async def test_currencies(self):
keys = {'id', 'name', 'min_size', 'status', 'message', 'details'}
currencies = await self.client.currencies()
self.assertIsInstance(currencies, list)
self.assertGreater(len(currencies), 1)
self.assertIsInstance(currencies[0], dict)
self.assertEqual(currencies[0].keys(), keys)
async def test_server_time(self):
time = await self.client.server_time()
self.assertIsInstance(time, dict)
self.assertIn('iso', time)
self.assertIn('epoch', time)
self.assertIsInstance(time['iso'], str)
self.assertIsInstance(time['epoch'], float)
@skipUnless(TEST_AUTH, "Authentication credentials not provided.")
async def test_accounts(self):
keys = {'id', 'currency', 'balance', 'available', 'hold', 'profile_id'}
accounts = await self.auth_client.accounts()
self.assertIsInstance(accounts, list)
self.assertIsInstance(accounts[0], dict)
self.assertGreaterEqual(accounts[0].keys(), keys)
@skipUnless(TEST_AUTH and TEST_BTC_ACCOUNT, "Auth credentials and test BTC account ID required")
async def test_account(self):
keys = {'id', 'currency', 'balance', 'available', 'hold', 'profile_id'}
account = await self.auth_client.account(TEST_BTC_ACCOUNT)
self.assertIsInstance(account, dict)
self.assertEqual(account.keys(), keys)
self.assertEqual(account['id'], TEST_BTC_ACCOUNT)
self.assertEqual(account['currency'], 'BTC')
@skipUnless(TEST_AUTH and TEST_BTC_ACCOUNT, "Auth credentials and test BTC account ID required")
async def test_account_history(self):
# Assumes market_order works.
orders = []
for i in range(1,6):
size = 0.001 * i
order = await self.auth_client.market_order('buy', 'BTC-USD', size)
orders.append(order)
await asyncio.sleep(0.25)
history, before, after = await self.auth_client.account_history(
TEST_BTC_ACCOUNT, limit=3)
keys = {'amount', 'balance', 'created_at', 'details', 'id', 'type'}
self.assertIsInstance(history, list)
self.assertEqual(len(history), 3)
self.assertEqual(history[0].keys(), keys)
self.assertEqual(history[0]['type'], 'match')
self.assertEqual(history[0]['details']['order_id'], orders[4]['id'])
self.assertEqual(history[0]['details']['product_id'], 'BTC-USD')
after_history, after_before, after_after = await self.auth_client.account_history(TEST_BTC_ACCOUNT, after=after)
self.assertGreater(history[-1]['id'], after_history[0]['id'])
original_history, _, _ = await self.auth_client.account_history(TEST_BTC_ACCOUNT, before=after_before)
self.assertEqual(original_history, history)
@skipUnless(TEST_AUTH and TEST_BTC_ACCOUNT, "Auth credentials and test BTC account ID required")
async def test_holds(self):
# Assumes cancel, cancel_all and limit_order work
await self.auth_client.cancel_all(stop=True)
holds, _, _ = await self.auth_client.holds(TEST_BTC_ACCOUNT)
offset = len(holds)
orders = []
for i in range(1, 8):
size = .001 * i
price = 10000 + i * 1000
order = await self.auth_client.limit_order('sell', 'BTC-USD', price, size)
orders.append(order)
await asyncio.sleep(.25)
holds, _, _ = await self.auth_client.holds(TEST_BTC_ACCOUNT)
keys = {'amount', 'created_at', 'id', 'ref', 'type'}
self.assertEqual(len(holds), 7 + offset)
self.assertEqual(holds[0].keys(), keys)
self.assertEqual(float(holds[0]['amount']), .007)
self.assertEqual(orders[6]['id'], holds[0]['ref'])
holds, before, after = await self.auth_client.holds(TEST_BTC_ACCOUNT,
limit=5)
self.assertEqual(len(holds), 5)
after_holds, after_before, after_after = await self.auth_client.holds(
TEST_BTC_ACCOUNT, after=after)
self.assertEqual(len(after_holds), 2 + offset)
original_holds, _, _ = await self.auth_client.holds(TEST_BTC_ACCOUNT,
before=after_before, limit=5)
self.assertEqual(original_holds, holds)
for order in orders[4:]:
resp = await self.auth_client.cancel(order['id'])
self.assertEqual(resp[0], order['id'])
holds, _, _ = await self.auth_client.holds(TEST_BTC_ACCOUNT)
total = 0
for hold in holds:
if hold['type'] == 'order':
total += float(hold['amount'])
self.assertAlmostEqual(total, 0.01)
@skipUnless(TEST_AUTH, "Auth credentials required")
async def test_limit_order(self):
# Assumes cancel works
for side, base_price in (('buy', 1), ('sell', 50000)):
# default time_in_force
price = base_price + (random.randint(1, 9) / 10)
size = random.randint(1, 10) / 1000
order = await self.auth_client.limit_order(side, 'BTC-USD',
price=price, size=size)
await self.auth_client.cancel(order['id'])
keys = {'created_at', 'executed_value', 'fill_fees', 'filled_size',
'id', 'post_only', 'price', 'product_id', 'settled', 'side',
'size', 'status', 'stp', 'time_in_force', 'type'}
self.assertEqual(order.keys(), keys)
self.assertEqual(float(order['price']), price)
self.assertEqual(float(order['size']), size)
self.assertEqual(order['product_id'], 'BTC-USD')
self.assertEqual(order['side'], side)
self.assertEqual(order['stp'], 'dc')
self.assertEqual(order['type'], 'limit')
self.assertEqual(order['time_in_force'], 'GTC')
# client_oid, explicit time_in_force
price = base_price + (random.randint(1, 9) / 10)
size = random.randint(1, 10) / 1000
client_oid = str(uuid4())
order = await self.auth_client.limit_order(side, 'BTC-USD',
price=price, size=size,
time_in_force='GTC',
client_oid=client_oid)
await self.auth_client.cancel(order['id'])
self.assertEqual(order.keys(), keys)
self.assertEqual(float(order['price']), price)
self.assertEqual(float(order['size']), size)
self.assertEqual(order['product_id'], 'BTC-USD')
self.assertEqual(order['side'], side)
self.assertEqual(order['stp'], 'dc')
self.assertEqual(order['type'], 'limit')
self.assertEqual(order['time_in_force'], 'GTC')
# IOC time_in_force
price = base_price + (random.randint(1, 9) / 10)
size = random.randint(1, 10) / 1000
order = await self.auth_client.limit_order(side, 'BTC-USD',
price=price, size=size,
time_in_force='IOC')
try:
await self.auth_client.cancel(order['id'])
except APIRequestError:
pass
self.assertEqual(order.keys(), keys)
self.assertEqual(float(order['price']), price)
self.assertEqual(float(order['size']), size)
self.assertEqual(order['product_id'], 'BTC-USD')
self.assertEqual(order['side'], side)
self.assertEqual(order['stp'], 'dc')
self.assertEqual(order['type'], 'limit')
self.assertEqual(order['time_in_force'], 'IOC')
# FOK time_in_force
price = base_price + (random.randint(1, 9) / 10)
size = random.randint(1, 10) / 1000
order = await self.auth_client.limit_order(side, 'BTC-USD',
price=price, size=size,
time_in_force='FOK')
if 'reject_reason' in order:
keys = {'created_at', 'executed_value', 'fill_fees', 'filled_size',
'id', 'post_only', 'price', 'product_id', 'reject_reason',
'settled', 'side', 'size', 'status', 'time_in_force',
'type'}
try:
await self.auth_client.cancel(order['id'])
except APIRequestError:
pass
self.assertEqual(order.keys(), keys)
self.assertEqual(float(order['price']), price)
self.assertEqual(float(order['size']), size)
self.assertEqual(order['product_id'], 'BTC-USD')
self.assertEqual(order['side'], side)
self.assertEqual(order['type'], 'limit')
self.assertEqual(order['time_in_force'], 'FOK')
# GTT time_in_force, iterate cancel_after
for ca_str, ca_int in [('min', 60), ('hour', 3600), ('day', 86400)]:
o_time = await self.client.server_time()
o_time = float(o_time['epoch'])
price = base_price + (random.randint(1, 9) / 10)
size = random.randint(1, 10) / 1000
order = await self.auth_client.limit_order(side, 'BTC-USD',
price=price, size=size,
time_in_force='GTT',
cancel_after=ca_str)
await self.auth_client.cancel(order['id'])
keys = {'created_at', 'executed_value', 'expire_time', 'fill_fees',
'filled_size', 'id', 'post_only', 'price', 'product_id', 'settled',
'side', 'size', 'status', 'stp', 'time_in_force', 'type'}
self.assertEqual(order.keys(), keys)
self.assertEqual(float(order['price']), price)
self.assertEqual(float(order['size']), size)
self.assertEqual(order['product_id'], 'BTC-USD')
self.assertEqual(order['side'], side)
self.assertEqual(order['stp'], 'dc')
self.assertEqual(order['type'], 'limit')
self.assertEqual(order['time_in_force'], 'GTT')
e_time = parser.parse(order['expire_time']).timestamp()
self.assertLessEqual(e_time - o_time - ca_int, 1.0)
@skipUnless(TEST_AUTH, "Auth credentials required")
async def test_limit_order_stop(self):
# Assumes cancel works
#stop loss
order = await self.auth_client.limit_order('sell', 'BTC-USD', 2.1, .001,
stop='loss', stop_price=2.5)
try:
await self.auth_client.cancel(order['id'])
except APIRequestError:
pass
keys = {'created_at', 'executed_value', 'fill_fees', 'filled_size',
'id', 'post_only', 'price', 'product_id', 'settled', 'side',
'size', 'status', 'stp', 'time_in_force', 'type', 'stop',
'stop_price'}
self.assertEqual(order.keys(), keys)
self.assertEqual(float(order['price']), 2.1)
self.assertEqual(float(order['size']), .001)
self.assertEqual(order['product_id'], 'BTC-USD')
self.assertEqual(order['side'], 'sell')
self.assertEqual(order['stp'], 'dc')
self.assertEqual(order['type'], 'limit')
self.assertEqual(order['time_in_force'], 'GTC')
self.assertEqual(order['stop'], 'loss')
self.assertEqual(float(order['stop_price']), 2.5)
#stop entry
order = await self.auth_client.limit_order('buy', 'BTC-USD', 9000, .001,
stop='entry', stop_price=9550)
try:
await self.auth_client.cancel(order['id'])
except APIRequestError:
pass
keys = {'created_at', 'executed_value', 'fill_fees', 'filled_size',
'id', 'post_only', 'price', 'product_id', 'settled', 'side',
'size', 'status', 'stp', 'time_in_force', 'type', 'stop',
'stop_price'}
self.assertEqual(order.keys(), keys)
self.assertEqual(float(order['price']), 9000)
self.assertEqual(float(order['size']), .001)
self.assertEqual(order['product_id'], 'BTC-USD')
self.assertEqual(order['side'], 'buy')
self.assertEqual(order['stp'], 'dc')
self.assertEqual(order['type'], 'limit')
self.assertEqual(order['time_in_force'], 'GTC')
self.assertEqual(order['stop'], 'entry')
self.assertEqual(float(order['stop_price']), 9550)
@skipUnless(TEST_AUTH, "Auth credentials required")
async def test_market_order(self):
# Assumes cancel works
for side in ('buy', 'sell'):
# Size
size = random.randint(1, 10) / 1000
order = await self.auth_client.market_order(side, 'BTC-USD', size=size)
keys = {'created_at', 'executed_value', 'fill_fees', 'filled_size',
'funds', 'id', 'post_only', 'product_id', 'settled', 'side',
'size', 'status', 'stp', 'type'}
if side == 'sell':
keys.remove('funds')
self.assertEqual(order.keys(), keys)
self.assertEqual(float(order['size']), size)
self.assertEqual(order['product_id'], 'BTC-USD')
self.assertEqual(order['side'], side)
self.assertEqual(order['stp'], 'dc')
self.assertEqual(order['type'], 'market')
self.assertEqual(order['post_only'], False)
await asyncio.sleep(.5)
# Funds
funds = 100 + random.randint(1, 10)
order = await self.auth_client.market_order(side, 'BTC-USD', funds=funds)
keys = {'created_at', 'executed_value', 'fill_fees', 'filled_size',
'funds', 'id', 'post_only', 'product_id', 'settled', 'side',
'specified_funds', 'status', 'stp', 'type'}
if side == 'sell':
keys.add('size')
self.assertEqual(order.keys(), keys)
self.assertEqual(order['product_id'], 'BTC-USD')
self.assertEqual(order['side'], side)
self.assertEqual(order['stp'], 'dc')
self.assertEqual(float(order['specified_funds']), funds)
self.assertEqual(order['type'], 'market')
self.assertEqual(order['post_only'], False)
await asyncio.sleep(.5)
#client_oid
client_oid = str(uuid4())
order = await self.auth_client.market_order('sell', 'BTC-USD', funds=100,
client_oid=client_oid, stp='dc')
self.assertEqual(order.keys(), keys)
self.assertEqual(order.keys(), keys)
self.assertEqual(order['product_id'], 'BTC-USD')
self.assertEqual(order['side'], side)
self.assertEqual(order['stp'], 'dc')
self.assertEqual(float(order['funds']), 100)
self.assertEqual(order['type'], 'market')
self.assertEqual(order['post_only'], False)
await asyncio.sleep(.5)
# This really shouldn't raise an error, but as of 11/18, the Coinbase
# sandbox won't accept an stp other dc even though the Coinbase API
# documentation claims otherwise.
with self.assertRaises(APIRequestError):
order = await self.auth_client.market_order('sell', 'BTC-USD',
funds=100, client_oid=client_oid, stp='cb')
@skipUnless(TEST_AUTH, "Auth credentials required")
async def test_market_order_stop(self):
# Assumes cancel works
# stop loss
order = await self.auth_client.market_order('sell', 'BTC-USD', .001,
stop='loss', stop_price=2.5)
try:
await self.auth_client.cancel(order['id'])
except APIRequestError:
pass
keys = {'created_at', 'executed_value', 'fill_fees', 'filled_size',
'id', 'post_only', 'product_id', 'settled', 'side', 'size',
'status', 'stop', 'stop_price', 'stp', 'type'}
self.assertEqual(order.keys(), keys)
self.assertEqual(float(order['size']), .001)
self.assertEqual(order['product_id'], 'BTC-USD')
self.assertEqual(order['side'], 'sell')
self.assertEqual(order['stp'], 'dc')
self.assertEqual(order['type'], 'market')
self.assertEqual(order['post_only'], False)
self.assertEqual(order['stop'], 'loss')
self.assertEqual(float(order['stop_price']), 2.5)
await asyncio.sleep(0.5)
# stop entry
order = await self.auth_client.market_order('buy', 'BTC-USD', .001,
stop='entry', stop_price=10000)
try:
await self.auth_client.cancel(order['id'])
except APIRequestError:
pass
keys = {'created_at', 'executed_value', 'fill_fees', 'filled_size',
'funds', 'id', 'post_only', 'product_id', 'settled', 'side',
'size', 'status', 'stop', 'stop_price', 'stp', 'type'}
self.assertEqual(order.keys(), keys)
self.assertEqual(float(order['size']), .001)
self.assertEqual(order['product_id'], 'BTC-USD')
self.assertEqual(order['side'], 'buy')
self.assertEqual(order['stp'], 'dc')
self.assertEqual(order['type'], 'market')
self.assertEqual(order['post_only'], False)
self.assertEqual(order['stop'], 'entry')
self.assertEqual(float(order['stop_price']), 10000)
@skipUnless(TEST_AUTH, "Auth credentials required")
async def test_cancel(self):
# Assumes limit_order and market_order work.
l_order = await self.auth_client.limit_order('buy', 'BTC-USD',
price=1, size=1)
m_order = await self.auth_client.market_order('sell', 'BTC-USD', .001)
s_order = await self.auth_client.limit_order('sell', 'BTC-USD', 2, 5,
stop='loss', stop_price=10)
resp = await self.auth_client.cancel(l_order['id'])
self.assertEqual(len(resp), 1)
self.assertEqual(resp[0], l_order['id'])
with self.assertRaises(APIRequestError):
await self.auth_client.cancel(m_order['id'])
resp = await self.auth_client.cancel(s_order['id'])
self.assertEqual(len(resp), 1)
self.assertEqual(resp[0], s_order['id'])
@skipUnless(TEST_AUTH, "Auth credentials required")
async def test_cancel_all(self):
# Assumes market_order, limit_order, and orders work
await self.auth_client.cancel_all(stop=True)
orders, _, _ = await self.auth_client.orders(['open', 'active'])
self.assertEqual(len(orders), 0)
await asyncio.sleep(0.5)
for price in (1, 2, 3):
order = await self.auth_client.limit_order('buy', 'BTC-USD',
price=price, size=1)
await asyncio.sleep(0.5)
for price in (20000, 30000, 40000):
order = await self.auth_client.limit_order('sell', 'LTC-USD',
price=price, size=0.01)
await asyncio.sleep(0.5)
order = await self.auth_client.limit_order('buy', 'ETH-USD', 1, .01)
order = await self.auth_client.market_order('sell', 'LTC-USD', .02,
stop='loss', stop_price=1)
order = await self.auth_client.limit_order('buy', 'LTC-USD', 8000, .01,
stop='entry', stop_price=6500)
order = await self.auth_client.market_order('buy', 'ETH-USD', .03,
stop='entry', stop_price=2000)
orders, _, _ = await self.auth_client.orders(['open', 'active'])
self.assertEqual(len(orders), 10)
resp = await self.auth_client.cancel_all('BTC-USD')
self.assertEqual(len(resp), 3)
await asyncio.sleep(.5)
orders, _, _ = await self.auth_client.orders(['open', 'active'])
self.assertEqual(len(orders), 7)
resp = await self.auth_client.cancel_all()
self.assertEqual(len(resp), 4)
await asyncio.sleep(.5)
orders, _, _ = await self.auth_client.orders(['open', 'active'])
self.assertEqual(len(orders), 3)
resp = await self.auth_client.cancel_all(product_id='LTC-USD', stop=True)
self.assertEqual(len(resp), 2)
await asyncio.sleep(.5)
orders, _, _ = await self.auth_client.orders(['open', 'active'])
self.assertEqual(len(orders), 1)
resp = await self.auth_client.cancel_all(stop=True)
self.assertEqual(len(resp), 1)
await asyncio.sleep(.5)
orders, _, _ = await self.auth_client.orders(['open', 'active'])
self.assertEqual(orders, [])
@skipUnless(TEST_AUTH, "Auth credentials required")
async def test_orders(self):
# Assumes limit_order, market_order, and cancel_all work
await self.auth_client.cancel_all(stop=True)
orders, _, _, = await self.auth_client.orders(['open', 'active'])
self.assertEqual(len(orders), 0)
open_ids = []
for i in range(1, 4):
price = 1 + i /10
size = .001 * i
order = await self.auth_client.limit_order('buy', 'BTC-USD',
price=price, size=size)
open_ids.append(order['id'])
open_orders, _, _ = await self.auth_client.orders('open')
self.assertEqual(len(open_orders), 3)
self.assertEqual(open_orders[0]['id'], open_ids[2])
self.assertEqual(open_orders[1]['id'], open_ids[1])
self.assertEqual(open_orders[2]['id'], open_ids[0])
active_ids = []
for i in range(1,4):
price = i + 1
stop_price = i
size = .01 * i
order = await self.auth_client.limit_order('sell', 'LTC-USD',
price=price, size=size,
stop='loss', stop_price=stop_price)
active_ids.append(order['id'])
active_orders, _, _ = await self.auth_client.orders('active')
self.assertEqual(len(active_orders), 3)
self.assertEqual(active_orders[0]['id'], active_ids[2])
self.assertEqual(active_orders[1]['id'], active_ids[1])
self.assertEqual(active_orders[2]['id'], active_ids[0])
market_ids = []
for i in range(1,4):
size = 0.001 * i
order = await self.auth_client.market_order('buy', 'BTC-USD',
size=0.01)
market_ids.append(order['id'])
await asyncio.sleep(0.25)
all_orders, _, _, = await self.auth_client.orders('all')
self.assertGreaterEqual(len(all_orders), 9)
self.assertEqual(all_orders[0]['id'], market_ids[2])
self.assertEqual(all_orders[1]['id'], market_ids[1])
self.assertEqual(all_orders[2]['id'], market_ids[0])
self.assertEqual(all_orders[3]['id'], active_ids[2])
oa_orders, _, _, = await self.auth_client.orders(['open', 'active'])
self.assertGreaterEqual(len(all_orders), 9)
self.assertEqual(oa_orders[0]['id'], active_ids[2])
self.assertEqual(oa_orders[1]['id'], active_ids[1])
self.assertEqual(oa_orders[2]['id'], active_ids[0])
self.assertEqual(oa_orders[3]['id'], open_ids[2])
self.assertEqual(oa_orders[4]['id'], open_ids[1])
self.assertEqual(oa_orders[5]['id'], open_ids[0])
oa_btc_orders, _, _ = await self.auth_client.orders(['open', 'active'],
'BTC-USD')
self.assertEqual(oa_btc_orders[0]['id'], open_ids[2])
self.assertEqual(oa_btc_orders[1]['id'], open_ids[1])
self.assertEqual(oa_btc_orders[2]['id'], open_ids[0])
orders, before, after = await self.auth_client.orders('all', limit=5)
self.assertEqual(len(orders), 5)
self.assertEqual(orders[0]['id'], market_ids[2])
self.assertEqual(orders[4]['id'], active_ids[1])
after_orders, after_before, after_after = await self.auth_client.orders(
'all', after=after)
self.assertEqual(after_orders[0]['id'], active_ids[0])
original_orders, _, _ = await self.auth_client.orders('all', before=after_before)
self.assertEqual(original_orders, orders)
await self.auth_client.cancel_all(stop=True)
await asyncio.sleep(.5)
oa_orders, _, _, = await self.auth_client.orders(['open', 'active'])
self.assertEqual(len(oa_orders), 0)
@skipUnless(TEST_AUTH, "Auth credentials required")
async def test_get_order(self):
# Assumes limit_order and market_order work
ids = []
for i in range(1, 4):
price = 1 + i/10
size = .001 * i
order = await self.auth_client.limit_order('buy', 'BTC-USD',
price=price, size=size)
ids.append(order['id'])
for i in range(1, 4):
size = .001 * i
order = await self.auth_client.market_order('sell', 'BTC-USD',
size=size)
ids.append(order['id'])
oid = random.choice(ids)
order = await self.auth_client.get_order(oid)
self.assertEqual(order['id'], oid)
oid = random.choice(ids)
order = await self.auth_client.get_order(oid)
self.assertEqual(order['id'], oid)
@skipUnless(TEST_AUTH, "Auth credentials required")
async def test_fills(self):
# Assumes market_order works
orders = []
for i in range(1, 5):
btc_size = .001 * i
ltc_size = .01 * i
side = random.choice(['buy', 'sell'])
order = await self.auth_client.market_order(side, 'BTC-USD', size=btc_size)
orders.append(order)
await asyncio.sleep(.25)
order = await self.auth_client.market_order(side, 'LTC-USD', size=ltc_size)
orders.append(order)
await asyncio.sleep(.25)
fills, _, _ = await self.auth_client.fills(product_id='BTC-USD')
keys = {'created_at', 'fee', 'liquidity', 'order_id', 'price',
'product_id', 'profile_id', 'settled', 'side', 'size',
'trade_id', 'usd_volume', 'user_id'}
self.assertGreaterEqual(len(fills), 4)
self.assertEqual(fills[0]['order_id'], orders[6]['id'])
fills, before, after = await self.auth_client.fills(product_id='LTC-USD', limit=3)
self.assertEqual(len(fills), 3)
self.assertEqual(fills[0]['order_id'], orders[7]['id'])
after_fills, after_before, after_after = await self.auth_client.fills(
product_id='LTC-USD', after=after)
self.assertLess(after_fills[0]['trade_id'], fills[-1]['trade_id'])
original_fills, _, _ = await self.auth_client.fills(product_id='LTC-USD',
before=after_before)
self.assertEqual(original_fills, fills)
order = random.choice(orders)
fills, _, _ = await self.auth_client.fills(order_id=order['id'])
self.assertGreaterEqual(len(fills), 1)
total = 0
for fill in fills:
total += float(fill['size'])
self.assertAlmostEqual(total, float(order['size']))
@skipUnless(TEST_AUTH, "Auth credentials required")
async def test_payment_methods(self):
keys = {'id', 'type', 'name', 'currency', 'primary_buy', 'primary_sell',
'allow_buy', 'allow_sell', 'allow_deposit', 'allow_withdraw',
'limits'}
methods = await self.auth_client.payment_methods()
self.assertIsInstance(methods, list)
self.assertIsInstance(methods[0], dict)
self.assertGreaterEqual(methods[0].keys(), keys)
@skipUnless(TEST_AUTH, "Auth credentials required")
async def test_coinbase_accounts(self):
keys = {'id', 'name', 'balance', 'currency', 'type', 'primary', 'active'}
accounts = await self.auth_client.coinbase_accounts()
self.assertIsInstance(accounts, list)
self.assertIsInstance(accounts[0], dict)
self.assertGreaterEqual(accounts[0].keys(), keys)
@expectedFailure
@skipUnless(TEST_AUTH and TEST_USD_ACCOUNT and TEST_USD_PAYMENT_METHOD,
"Auth credentials, test USD account, and test USD payment method required.")
async def test_deposit_payment_method(self):
# As of 11/25/18 this call returns a 401 error:
# "refresh of oauth token failed"
resp = await self.auth_client.deposit_payment_method(1500, 'USD',
TEST_USD_PAYMENT_METHOD)
keys = {'amount', 'currency', 'id', 'payout_at'}
self.assertIsInstance(resp, dict)
self.assertEqual(resp.keys(), keys)
self.assertEqual(float(resp['amount']), 1500.0)
self.assertEqual(resp['currency'], 'USD')
@skipUnless(TEST_AUTH and TEST_USD_ACCOUNT and TEST_USD_COINBASE_ACCOUNT,
"Auth credentials, test USD account, and test usd Coinbase account required")
async def test_deposit_cointbase(self):
resp = await self.auth_client.deposit_coinbase(150, 'USD',
TEST_USD_COINBASE_ACCOUNT)
keys = {'amount', 'currency', 'id'}
self.assertIsInstance(resp, dict)
self.assertEqual(resp.keys(), keys)
self.assertEqual(resp['currency'], 'USD')
self.assertEqual(float(resp['amount']), 150.0)
@expectedFailure
@skipUnless(TEST_AUTH and TEST_USD_ACCOUNT and TEST_USD_PAYMENT_METHOD,
"Auth credentials, test USD account, and test USD payment method required.")
async def test_withdraw_payment_method(self):
# As of 11/25/18 this call returns a 401 error:
# "refresh of oauth token failed"
resp = await self.auth_client.withdraw_payment_method(1500, 'USD',
TEST_USD_PAYMENT_METHOD)
keys = {'amount', 'currency', 'id', 'payout_at'}
self.assertIsInstance(resp, dict)
self.assertEqual(resp.keys(), keys)
self.assertEqual(float(resp['amount']), 1500.0)
self.assertEqual(resp['currency'], 'USD')
@skipUnless(TEST_AUTH and TEST_USD_ACCOUNT and TEST_USD_COINBASE_ACCOUNT,
"Auth credentials, test USD account, and test usd Coinbase account required")
async def test_withdraw_cointbase(self):
resp = await self.auth_client.withdraw_coinbase(75, 'USD',
TEST_USD_COINBASE_ACCOUNT)
keys = {'amount', 'currency', 'id'}
self.assertIsInstance(resp, dict)
self.assertEqual(resp.keys(), keys)
self.assertEqual(resp['currency'], 'USD')
self.assertEqual(float(resp['amount']), 75.0)
@expectedFailure
@skipUnless(TEST_AUTH, "Auth credentials required")
async def test_withdraw_crypto(self):
# As of 11/25/18 this call returns a 401 error:
# "refresh of oauth token failed - The funds were transferred to
# Coinbase for processing, but failed to withdraw to
# 0x5ad5769cd04681FeD900BCE3DDc877B50E83d469. Please manually withdraw
# from Coinbase."
address = "0x5ad5769cd04681FeD900BCE3DDc877B50E83d469"
resp = await self.auth_client.withdraw_crypto(.001, 'LTC', address)
@expectedFailure
@skipUnless(TEST_AUTH, "Auth credentials required")
async def test_stablecoin_conversion(self):
# As of 11/25/18 this call returns a 400 error:
# "USDC is not enabled for your account"
resp = await self.auth_client.stablecoin_conversion('USD', 'USDC', 100)
keys = {'amount', 'id', 'from', 'from_account_id', 'to', 'to_account_id'}
self.assertIsInstance(resp, dict)
self.assertEqual(resp.keys(), keys)
self.assertEqual(float(resp['amount']), 100.0)
self.assertEqual(resp['from'], 'USD')
self.assertEqual(resp['to'], 'USDC')
@skipUnless(TEST_AUTH and TEST_BTC_ACCOUNT, "Auth credentials and test BTC account ID required")
async def test_reports(self):
# Combines tests for create_report and report_status
orders = []
for i in range(1, 4):
size = .001 * i
side = random.choice(['buy', 'sell'])
order = await self.auth_client.market_order(side, 'BTC-USD', size=size)
orders.append(order)
await asyncio.sleep(.25)
keys = {'id', 'type', 'status'}
end = datetime.utcnow()
start = end - timedelta(days=1)
end = end.isoformat()
start = start.isoformat()
resp1 = await self.auth_client.create_report('account', start, end,
account_id=TEST_BTC_ACCOUNT)
self.assertIsInstance(resp1, dict)
self.assertEqual(resp1.keys(), keys)
self.assertEqual(resp1['type'], 'account')
resp2 = await self.auth_client.create_report('fills', start, end,
product_id='BTC-USD')
self.assertIsInstance(resp2, dict)
self.assertEqual(resp2.keys(), keys)
self.assertEqual(resp2['type'], 'fills')
resp3 = await self.auth_client.create_report('fills', start, end,
product_id='BTC-USD', report_format='csv',
email='[email protected]')
self.assertIsInstance(resp3, dict)
self.assertEqual(resp3.keys(), keys)
self.assertEqual(resp3['type'], 'fills')
await asyncio.sleep(10)
status1 = await self.auth_client.report_status(resp1['id'])
keys = {'completed_at', 'created_at', 'expires_at', 'file_url', 'id',
'params', 'status', 'type', 'user_id'}
statuses = {'pending', 'creating', 'ready'}
self.assertIsInstance(status1, dict)
self.assertEqual(status1.keys(), keys)
self.assertEqual(status1['id'], resp1['id'])
self.assertEqual(status1['type'], 'account')
self.assertIn(status1['status'], statuses)
self.assertEqual(status1['params']['start_date'], start)
self.assertEqual(status1['params']['end_date'], end)
self.assertEqual(status1['params']['format'], 'pdf')
self.assertEqual(status1['params']['account_id'], TEST_BTC_ACCOUNT)
status2 = await self.auth_client.report_status(resp2['id'])
self.assertIsInstance(status2, dict)
self.assertEqual(status2.keys(), keys)
self.assertEqual(status2['id'], resp2['id'])
self.assertEqual(status2['type'], 'fills')
self.assertIn(status2['status'], statuses)
self.assertEqual(status2['params']['start_date'], start)
self.assertEqual(status2['params']['end_date'], end)
self.assertEqual(status2['params']['format'], 'pdf')
self.assertEqual(status2['params']['product_id'], 'BTC-USD')
status3 = await self.auth_client.report_status(resp3['id'])
self.assertIsInstance(status3, dict)
self.assertEqual(status3.keys(), keys)
self.assertEqual(status3['id'], resp3['id'])
self.assertEqual(status3['type'], 'fills')
self.assertIn(status3['status'], statuses)
self.assertEqual(status3['params']['start_date'], start)
self.assertEqual(status3['params']['end_date'], end)
self.assertEqual(status3['params']['email'], '[email protected]')
self.assertEqual(status3['params']['format'], 'csv')
@skipUnless(TEST_AUTH, "Auth credentials required")
async def test_trailing_volume (self):
tv = await self.auth_client.trailing_volume()
keys ={'product_id', 'volume', 'exchange_volume', 'recorded_at'}
self.assertIsInstance(tv, list)
self.assertIsInstance(tv[0], dict)
self.assertEqual(tv[0].keys(), keys)
|
[] |
[] |
[
"SECRET",
"TEST_BTC_ACCOUNT",
"TEST_USD_ACCOUNT",
"KEY",
"PASSPHRASE",
"TEST_USD_COINBASE_ACCOUNT",
"TEST_USD_PAYMENT_METHOD"
] |
[]
|
["SECRET", "TEST_BTC_ACCOUNT", "TEST_USD_ACCOUNT", "KEY", "PASSPHRASE", "TEST_USD_COINBASE_ACCOUNT", "TEST_USD_PAYMENT_METHOD"]
|
python
| 7 | 0 | |
bin/basenji_sad_ref.py
|
#!/usr/bin/env python
# Copyright 2017 Calico LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from __future__ import print_function
from optparse import OptionParser
import gc
import pdb
import pickle
import os
from queue import Queue
import sys
from threading import Thread
import time
import h5py
import numpy as np
import pandas as pd
import pysam
import tensorflow as tf
import basenji.dna_io as dna_io
import basenji.params as params
import basenji.seqnn as seqnn
import basenji.vcf as bvcf
from basenji.stream import PredStream
from basenji_sad import initialize_output_h5
'''
basenji_sad_ref.py
Compute SNP Activity Difference (SAD) scores for SNPs in a VCF file.
This versions saves computation by clustering nearby SNPs in order to
make a single reference prediction for several SNPs.
'''
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <params_file> <model_file> <vcf_file>'
parser = OptionParser(usage)
parser.add_option('-c', dest='center_pct',
default=0.25, type='float',
help='Require clustered SNPs lie in center region [Default: %default]')
parser.add_option('-f', dest='genome_fasta',
default='%s/data/hg19.fa' % os.environ['BASENJIDIR'],
help='Genome FASTA for sequences [Default: %default]')
parser.add_option('--flip', dest='flip_ref',
default=False, action='store_true',
help='Flip reference/alternate alleles when simple [Default: %default]')
parser.add_option('--local', dest='local',
default=1024, type='int',
help='Local SAD score [Default: %default]')
parser.add_option('-n', dest='norm_file',
default=None,
help='Normalize SAD scores')
parser.add_option('-o',dest='out_dir',
default='sad',
help='Output directory for tables and plots [Default: %default]')
parser.add_option('-p', dest='processes',
default=None, type='int',
help='Number of processes, passed by multi script')
parser.add_option('--pseudo', dest='log_pseudo',
default=1, type='float',
help='Log2 pseudocount [Default: %default]')
parser.add_option('--rc', dest='rc',
default=False, action='store_true',
help='Average forward and reverse complement predictions [Default: %default]')
parser.add_option('--shifts', dest='shifts',
default='0', type='str',
help='Ensemble prediction shifts [Default: %default]')
parser.add_option('--stats', dest='sad_stats',
default='SAD',
help='Comma-separated list of stats to save. [Default: %default]')
parser.add_option('-t', dest='targets_file',
default=None, type='str',
help='File specifying target indexes and labels in table format')
parser.add_option('--ti', dest='track_indexes',
default=None, type='str',
help='Comma-separated list of target indexes to output BigWig tracks')
parser.add_option('-u', dest='penultimate',
default=False, action='store_true',
help='Compute SED in the penultimate layer [Default: %default]')
(options, args) = parser.parse_args()
if len(args) == 3:
# single worker
params_file = args[0]
model_file = args[1]
vcf_file = args[2]
elif len(args) == 5:
# multi worker
options_pkl_file = args[0]
params_file = args[1]
model_file = args[2]
vcf_file = args[3]
worker_index = int(args[4])
# load options
options_pkl = open(options_pkl_file, 'rb')
options = pickle.load(options_pkl)
options_pkl.close()
# update output directory
options.out_dir = '%s/job%d' % (options.out_dir, worker_index)
else:
parser.error('Must provide parameters and model files and QTL VCF file')
if not os.path.isdir(options.out_dir):
os.mkdir(options.out_dir)
if options.track_indexes is None:
options.track_indexes = []
else:
options.track_indexes = [int(ti) for ti in options.track_indexes.split(',')]
if not os.path.isdir('%s/tracks' % options.out_dir):
os.mkdir('%s/tracks' % options.out_dir)
options.shifts = [int(shift) for shift in options.shifts.split(',')]
options.sad_stats = options.sad_stats.split(',')
#################################################################
# read parameters and collet target information
job = params.read_job_params(params_file, require=['seq_length','num_targets'])
if options.targets_file is None:
target_ids = ['t%d' % ti for ti in range(job['num_targets'])]
target_labels = ['']*len(target_ids)
target_subset = None
else:
targets_df = pd.read_table(options.targets_file, index_col=0)
target_ids = targets_df.identifier
target_labels = targets_df.description
target_subset = targets_df.index
if len(target_subset) == job['num_targets']:
target_subset = None
#################################################################
# load SNPs
# read sorted SNPs from VCF
snps = bvcf.vcf_snps(vcf_file, require_sorted=True, flip_ref=options.flip_ref,
validate_ref_fasta=options.genome_fasta)
# filter for worker SNPs
if options.processes is not None:
worker_bounds = np.linspace(0, len(snps), options.processes+1, dtype='int')
snps = snps[worker_bounds[worker_index]:worker_bounds[worker_index+1]]
num_snps = len(snps)
# cluster SNPs by position
snp_clusters = cluster_snps(snps, job['seq_length'], options.center_pct)
# delimit sequence boundaries
[sc.delimit(job['seq_length']) for sc in snp_clusters]
# open genome FASTA
genome_open = pysam.Fastafile(options.genome_fasta)
# make SNP sequence generator
def snp_gen():
for sc in snp_clusters:
snp_1hot_list = sc.get_1hots(genome_open)
for snp_1hot in snp_1hot_list:
yield {'sequence':snp_1hot}
snp_types = {'sequence': tf.float32}
snp_shapes = {'sequence': tf.TensorShape([tf.Dimension(job['seq_length']),
tf.Dimension(4)])}
dataset = tf.data.Dataset.from_generator(snp_gen,
output_types=snp_types,
output_shapes=snp_shapes)
dataset = dataset.batch(job['batch_size'])
dataset = dataset.prefetch(2*job['batch_size'])
# dataset = dataset.apply(tf.contrib.data.prefetch_to_device('/device:GPU:0'))
iterator = dataset.make_one_shot_iterator()
data_ops = iterator.get_next()
#################################################################
# setup model
# build model
t0 = time.time()
model = seqnn.SeqNN()
model.build_sad(job, data_ops,
ensemble_rc=options.rc, ensemble_shifts=options.shifts,
embed_penultimate=options.penultimate, target_subset=target_subset)
print('Model building time %f' % (time.time() - t0), flush=True)
if options.penultimate:
# labels become inappropriate
target_ids = ['']*model.hp.cnn_filters[-1]
target_labels = target_ids
# read target normalization factors
target_norms = np.ones(len(target_labels))
if options.norm_file is not None:
ti = 0
for line in open(options.norm_file):
target_norms[ti] = float(line.strip())
ti += 1
num_targets = len(target_ids)
#################################################################
# setup output
snp_flips = np.array([snp.flipped for snp in snps], dtype='bool')
sad_out = initialize_output_h5(options.out_dir, options.sad_stats,
snps, target_ids, target_labels)
snp_threads = []
snp_queue = Queue()
for i in range(1):
sw = SNPWorker(snp_queue, sad_out, options.sad_stats, options.log_pseudo)
sw.start()
snp_threads.append(sw)
#################################################################
# predict SNP scores, write output
# initialize saver
saver = tf.train.Saver()
with tf.Session() as sess:
# load variables into session
saver.restore(sess, model_file)
# initialize predictions stream
preds_stream = PredStream(sess, model, 32)
# predictions index
pi = 0
# SNP index
si = 0
for snp_cluster in snp_clusters:
ref_preds = preds_stream[pi]
pi += 1
for snp in snp_cluster.snps:
# print(snp, flush=True)
alt_preds = preds_stream[pi]
pi += 1
# queue SNP
if snp_flips[si]:
snp_queue.put((alt_preds, ref_preds, si))
else:
snp_queue.put((ref_preds, alt_preds, si))
# update SNP index
si += 1
# finish queue
print('Waiting for threads to finish.', flush=True)
snp_queue.join()
# close genome
genome_open.close()
###################################################
# compute SAD distributions across variants
# define percentiles
d_fine = 0.001
d_coarse = 0.01
percentiles_neg = np.arange(d_fine, 0.1, d_fine)
percentiles_base = np.arange(0.1, 0.9, d_coarse)
percentiles_pos = np.arange(0.9, 1, d_fine)
percentiles = np.concatenate([percentiles_neg, percentiles_base, percentiles_pos])
sad_out.create_dataset('percentiles', data=percentiles)
pct_len = len(percentiles)
for sad_stat in options.sad_stats:
sad_stat_pct = '%s_pct' % sad_stat
# compute
sad_pct = np.percentile(sad_out[sad_stat], 100*percentiles, axis=0).T
sad_pct = sad_pct.astype('float16')
# save
sad_out.create_dataset(sad_stat_pct, data=sad_pct, dtype='float16')
sad_out.close()
def cluster_snps(snps, seq_len, center_pct):
"""Cluster a sorted list of SNPs into regions that will satisfy
the required center_pct."""
valid_snp_distance = int(seq_len*center_pct)
snp_clusters = []
cluster_chr = None
for snp in snps:
if snp.chr == cluster_chr and snp.pos < cluster_pos0 + valid_snp_distance:
# append to latest cluster
snp_clusters[-1].add_snp(snp)
else:
# initialize new cluster
snp_clusters.append(SNPCluster())
snp_clusters[-1].add_snp(snp)
cluster_chr = snp.chr
cluster_pos0 = snp.pos
return snp_clusters
class SNPCluster:
def __init__(self):
self.snps = []
self.chr = None
self.start = None
self.end = None
def add_snp(self, snp):
self.snps.append(snp)
def delimit(self, seq_len):
positions = [snp.pos for snp in self.snps]
pos_min = np.min(positions)
pos_max = np.max(positions)
pos_mid = (pos_min + pos_max) // 2
self.chr = self.snps[0].chr
self.start = pos_mid - seq_len//2
self.end = self.start + seq_len
for snp in self.snps:
snp.seq_pos = snp.pos - 1 - self.start
def get_1hots(self, genome_open):
seqs1_list = []
# extract reference
if self.start < 0:
ref_seq = 'N'*(-self.start) + genome_open.fetch(self.chr, 0, self.end).upper()
else:
ref_seq = genome_open.fetch(self.chr, self.start, self.end).upper()
# extend to full length
if len(ref_seq) < self.end - self.start:
ref_seq += 'N'*(self.end-self.start-len(ref_seq))
# verify reference alleles
for snp in self.snps:
ref_n = len(snp.ref_allele)
ref_snp = ref_seq[snp.seq_pos:snp.seq_pos+ref_n]
if snp.ref_allele != ref_snp:
print('ERROR: %s does not match reference %s' % (snp, ref_snp), file=sys.stderr)
exit(1)
# 1 hot code reference sequence
ref_1hot = dna_io.dna_1hot(ref_seq)
seqs1_list = [ref_1hot]
# make alternative 1 hot coded sequences
# (assuming SNP is 1-based indexed)
for snp in self.snps:
alt_1hot = make_alt_1hot(ref_1hot, snp.seq_pos, snp.ref_allele, snp.alt_alleles[0])
seqs1_list.append(alt_1hot)
return seqs1_list
class SNPWorker(Thread):
"""Compute summary statistics and write to HDF."""
def __init__(self, snp_queue, sad_out, stats, log_pseudo=1):
Thread.__init__(self)
self.queue = snp_queue
self.daemon = True
self.sad_out = sad_out
self.stats = stats
self.log_pseudo = log_pseudo
def run(self):
while True:
# unload predictions
ref_preds, alt_preds, szi = self.queue.get()
# sum across length
ref_preds_sum = ref_preds.sum(axis=0, dtype='float64')
alt_preds_sum = alt_preds.sum(axis=0, dtype='float64')
# compare reference to alternative via mean subtraction
if 'SAD' in self.stats:
sad = alt_preds_sum - ref_preds_sum
self.sad_out['SAD'][szi,:] = sad.astype('float16')
# compare reference to alternative via mean log division
if 'SAR' in self.stats:
sar = np.log2(alt_preds_sum + self.log_pseudo) \
- np.log2(ref_preds_sum + self.log_pseudo)
self.sad_out['SAR'][szi,:] = sar.astype('float16')
# compare geometric means
if 'geoSAD' in self.stats:
sar_vec = np.log2(alt_preds.astype('float64') + self.log_pseudo) \
- np.log2(ref_preds.astype('float64') + self.log_pseudo)
geo_sad = sar_vec.sum(axis=0)
self.sad_out['geoSAD'][szi,:] = geo_sad.astype('float16')
if szi % 32 == 0:
gc.collect()
# communicate finished task
self.queue.task_done()
def make_alt_1hot(ref_1hot, snp_seq_pos, ref_allele, alt_allele):
"""Return alternative allele one hot coding."""
ref_n = len(ref_allele)
alt_n = len(alt_allele)
# copy reference
alt_1hot = np.copy(ref_1hot)
if alt_n == ref_n:
# SNP
dna_io.hot1_set(alt_1hot, snp_seq_pos, alt_allele)
elif ref_n > alt_n:
# deletion
delete_len = ref_n - alt_n
assert (ref_allele[0] == alt_allele[0])
dna_io.hot1_delete(alt_1hot, snp_seq_pos+1, delete_len)
else:
# insertion
assert (ref_allele[0] == alt_allele[0])
dna_io.hot1_insert(alt_1hot, snp_seq_pos+1, alt_allele[1:])
return alt_1hot
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
|
[] |
[] |
[
"BASENJIDIR"
] |
[]
|
["BASENJIDIR"]
|
python
| 1 | 0 | |
cmd/serv.go
|
// Copyright 2014 The Gogs Authors. All rights reserved.
// Copyright 2016 The Gitea Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package cmd
import (
"fmt"
"net/http"
"net/url"
"os"
"os/exec"
"regexp"
"strconv"
"strings"
"time"
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/json"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/pprof"
"code.gitea.io/gitea/modules/private"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/services/lfs"
"github.com/golang-jwt/jwt"
"github.com/kballard/go-shellquote"
"github.com/urfave/cli"
)
const (
lfsAuthenticateVerb = "git-lfs-authenticate"
)
// CmdServ represents the available serv sub-command.
var CmdServ = cli.Command{
Name: "serv",
Usage: "This command should only be called by SSH shell",
Description: `Serv provide access auth for repositories`,
Action: runServ,
Flags: []cli.Flag{
cli.BoolFlag{
Name: "enable-pprof",
},
cli.BoolFlag{
Name: "debug",
},
},
}
func setup(logPath string, debug bool) {
_ = log.DelLogger("console")
if debug {
_ = log.NewLogger(1000, "console", "console", `{"level":"trace","stacktracelevel":"NONE","stderr":true}`)
} else {
_ = log.NewLogger(1000, "console", "console", `{"level":"fatal","stacktracelevel":"NONE","stderr":true}`)
}
setting.NewContext()
if debug {
setting.RunMode = "dev"
}
}
var (
allowedCommands = map[string]models.AccessMode{
"git-upload-pack": models.AccessModeRead,
"git-upload-archive": models.AccessModeRead,
"git-receive-pack": models.AccessModeWrite,
lfsAuthenticateVerb: models.AccessModeNone,
}
alphaDashDotPattern = regexp.MustCompile(`[^\w-\.]`)
)
func fail(userMessage, logMessage string, args ...interface{}) error {
// There appears to be a chance to cause a zombie process and failure to read the Exit status
// if nothing is outputted on stdout.
fmt.Fprintln(os.Stdout, "")
fmt.Fprintln(os.Stderr, "Gitea:", userMessage)
if len(logMessage) > 0 {
if !setting.IsProd() {
fmt.Fprintf(os.Stderr, logMessage+"\n", args...)
}
}
ctx, cancel := installSignals()
defer cancel()
if len(logMessage) > 0 {
_ = private.SSHLog(ctx, true, fmt.Sprintf(logMessage+": ", args...))
}
return cli.NewExitError(fmt.Sprintf("Gitea: %s", userMessage), 1)
}
func runServ(c *cli.Context) error {
ctx, cancel := installSignals()
defer cancel()
// FIXME: This needs to internationalised
setup("serv.log", c.Bool("debug"))
if setting.SSH.Disabled {
println("Gitea: SSH has been disabled")
return nil
}
if len(c.Args()) < 1 {
if err := cli.ShowSubcommandHelp(c); err != nil {
fmt.Printf("error showing subcommand help: %v\n", err)
}
return nil
}
keys := strings.Split(c.Args()[0], "-")
if len(keys) != 2 || keys[0] != "key" {
return fail("Key ID format error", "Invalid key argument: %s", c.Args()[0])
}
keyID, err := strconv.ParseInt(keys[1], 10, 64)
if err != nil {
return fail("Key ID format error", "Invalid key argument: %s", c.Args()[1])
}
cmd := os.Getenv("SSH_ORIGINAL_COMMAND")
if len(cmd) == 0 {
key, user, err := private.ServNoCommand(ctx, keyID)
if err != nil {
return fail("Internal error", "Failed to check provided key: %v", err)
}
switch key.Type {
case models.KeyTypeDeploy:
println("Hi there! You've successfully authenticated with the deploy key named " + key.Name + ", but Gitea does not provide shell access.")
case models.KeyTypePrincipal:
println("Hi there! You've successfully authenticated with the principal " + key.Content + ", but Gitea does not provide shell access.")
default:
println("Hi there, " + user.Name + "! You've successfully authenticated with the key named " + key.Name + ", but Gitea does not provide shell access.")
}
println("If this is unexpected, please log in with password and setup Gitea under another user.")
return nil
} else if c.Bool("debug") {
log.Debug("SSH_ORIGINAL_COMMAND: %s", os.Getenv("SSH_ORIGINAL_COMMAND"))
}
words, err := shellquote.Split(cmd)
if err != nil {
return fail("Error parsing arguments", "Failed to parse arguments: %v", err)
}
if len(words) < 2 {
if git.CheckGitVersionAtLeast("2.29") == nil {
// for AGit Flow
if cmd == "ssh_info" {
fmt.Print(`{"type":"gitea","version":1}`)
return nil
}
}
return fail("Too few arguments", "Too few arguments in cmd: %s", cmd)
}
verb := words[0]
repoPath := words[1]
if repoPath[0] == '/' {
repoPath = repoPath[1:]
}
var lfsVerb string
if verb == lfsAuthenticateVerb {
if !setting.LFS.StartServer {
return fail("Unknown git command", "LFS authentication request over SSH denied, LFS support is disabled")
}
if len(words) > 2 {
lfsVerb = words[2]
}
}
// LowerCase and trim the repoPath as that's how they are stored.
repoPath = strings.ToLower(strings.TrimSpace(repoPath))
rr := strings.SplitN(repoPath, "/", 2)
if len(rr) != 2 {
return fail("Invalid repository path", "Invalid repository path: %v", repoPath)
}
username := strings.ToLower(rr[0])
reponame := strings.ToLower(strings.TrimSuffix(rr[1], ".git"))
if alphaDashDotPattern.MatchString(reponame) {
return fail("Invalid repo name", "Invalid repo name: %s", reponame)
}
if setting.EnablePprof || c.Bool("enable-pprof") {
if err := os.MkdirAll(setting.PprofDataPath, os.ModePerm); err != nil {
return fail("Error while trying to create PPROF_DATA_PATH", "Error while trying to create PPROF_DATA_PATH: %v", err)
}
stopCPUProfiler, err := pprof.DumpCPUProfileForUsername(setting.PprofDataPath, username)
if err != nil {
return fail("Internal Server Error", "Unable to start CPU profile: %v", err)
}
defer func() {
stopCPUProfiler()
err := pprof.DumpMemProfileForUsername(setting.PprofDataPath, username)
if err != nil {
_ = fail("Internal Server Error", "Unable to dump Mem Profile: %v", err)
}
}()
}
requestedMode, has := allowedCommands[verb]
if !has {
return fail("Unknown git command", "Unknown git command %s", verb)
}
if verb == lfsAuthenticateVerb {
if lfsVerb == "upload" {
requestedMode = models.AccessModeWrite
} else if lfsVerb == "download" {
requestedMode = models.AccessModeRead
} else {
return fail("Unknown LFS verb", "Unknown lfs verb %s", lfsVerb)
}
}
results, err := private.ServCommand(ctx, keyID, username, reponame, requestedMode, verb, lfsVerb)
if err != nil {
if private.IsErrServCommand(err) {
errServCommand := err.(private.ErrServCommand)
if errServCommand.StatusCode != http.StatusInternalServerError {
return fail("Unauthorized", "%s", errServCommand.Error())
}
return fail("Internal Server Error", "%s", errServCommand.Error())
}
return fail("Internal Server Error", "%s", err.Error())
}
os.Setenv(models.EnvRepoIsWiki, strconv.FormatBool(results.IsWiki))
os.Setenv(models.EnvRepoName, results.RepoName)
os.Setenv(models.EnvRepoUsername, results.OwnerName)
os.Setenv(models.EnvPusherName, results.UserName)
os.Setenv(models.EnvPusherEmail, results.UserEmail)
os.Setenv(models.EnvPusherID, strconv.FormatInt(results.UserID, 10))
os.Setenv(models.EnvRepoID, strconv.FormatInt(results.RepoID, 10))
os.Setenv(models.EnvPRID, fmt.Sprintf("%d", 0))
os.Setenv(models.EnvIsDeployKey, fmt.Sprintf("%t", results.IsDeployKey))
os.Setenv(models.EnvKeyID, fmt.Sprintf("%d", results.KeyID))
os.Setenv(models.EnvAppURL, setting.AppURL)
//LFS token authentication
if verb == lfsAuthenticateVerb {
url := fmt.Sprintf("%s%s/%s.git/info/lfs", setting.AppURL, url.PathEscape(results.OwnerName), url.PathEscape(results.RepoName))
now := time.Now()
claims := lfs.Claims{
StandardClaims: jwt.StandardClaims{
ExpiresAt: now.Add(setting.LFS.HTTPAuthExpiry).Unix(),
NotBefore: now.Unix(),
},
RepoID: results.RepoID,
Op: lfsVerb,
UserID: results.UserID,
}
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
// Sign and get the complete encoded token as a string using the secret
tokenString, err := token.SignedString(setting.LFS.JWTSecretBytes)
if err != nil {
return fail("Internal error", "Failed to sign JWT token: %v", err)
}
tokenAuthentication := &models.LFSTokenResponse{
Header: make(map[string]string),
Href: url,
}
tokenAuthentication.Header["Authorization"] = fmt.Sprintf("Bearer %s", tokenString)
enc := json.NewEncoder(os.Stdout)
err = enc.Encode(tokenAuthentication)
if err != nil {
return fail("Internal error", "Failed to encode LFS json response: %v", err)
}
return nil
}
// Special handle for Windows.
if setting.IsWindows {
verb = strings.Replace(verb, "-", " ", 1)
}
var gitcmd *exec.Cmd
verbs := strings.Split(verb, " ")
if len(verbs) == 2 {
gitcmd = exec.CommandContext(ctx, verbs[0], verbs[1], repoPath)
} else {
gitcmd = exec.CommandContext(ctx, verb, repoPath)
}
gitcmd.Dir = setting.RepoRootPath
gitcmd.Stdout = os.Stdout
gitcmd.Stdin = os.Stdin
gitcmd.Stderr = os.Stderr
if err = gitcmd.Run(); err != nil {
return fail("Internal error", "Failed to execute git command: %v", err)
}
// Update user key activity.
if results.KeyID > 0 {
if err = private.UpdatePublicKeyInRepo(ctx, results.KeyID, results.RepoID); err != nil {
return fail("Internal error", "UpdatePublicKeyInRepo: %v", err)
}
}
return nil
}
|
[
"\"SSH_ORIGINAL_COMMAND\"",
"\"SSH_ORIGINAL_COMMAND\""
] |
[] |
[
"SSH_ORIGINAL_COMMAND"
] |
[]
|
["SSH_ORIGINAL_COMMAND"]
|
go
| 1 | 0 | |
common/elasticsearch/aws.go
|
// The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package elasticsearch
import (
"fmt"
"net/http"
"os"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
elasticaws "github.com/olivere/elastic/aws/v4"
)
func NewAwsHttpClient(config AWSRequestSigningConfig) (*http.Client, error) {
if !config.Enabled {
return nil, nil
}
if config.Region == "" {
config.Region = os.Getenv("AWS_REGION")
if config.Region == "" {
return nil, fmt.Errorf("unable to resolve AWS region for obtaining AWS Elastic signing credentials")
}
}
var awsCredentials *credentials.Credentials
switch strings.ToLower(config.CredentialProvider) {
case "static":
awsCredentials = credentials.NewStaticCredentials(
config.Static.AccessKeyID,
config.Static.SecretAccessKey,
config.Static.Token,
)
case "environment":
awsCredentials = credentials.NewEnvCredentials()
case "aws-sdk-default":
awsSession, err := session.NewSession(&aws.Config{
Region: &config.Region,
})
if err != nil {
return nil, err
}
awsCredentials = awsSession.Config.Credentials
default:
return nil, fmt.Errorf("unknown AWS credential provider specified: %+v. Accepted options are 'static', 'environment' or 'session'", config.CredentialProvider)
}
return elasticaws.NewV4SigningClient(awsCredentials, config.Region), nil
}
|
[
"\"AWS_REGION\""
] |
[] |
[
"AWS_REGION"
] |
[]
|
["AWS_REGION"]
|
go
| 1 | 0 | |
lang/java/reef-tests/src/test/java/org/apache/reef/tests/TestEnvironmentFactory.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.reef.tests;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Factory for the TestEnvironment.
*/
public final class TestEnvironmentFactory {
private static final Logger LOG = Logger.getLogger(TestEnvironmentFactory.class.getName());
/**
* If $REEF_TEST_YARN environment variable is not set or is set to false,
* return the local test environment; otherwise, return the one for YARN.
*
* @return a new TestEnvironment instance.
*/
public static TestEnvironment getNewTestEnvironment() {
final boolean isYarn = Boolean.parseBoolean(System.getenv("REEF_TEST_YARN"));
final boolean isMesos = Boolean.parseBoolean(System.getenv("REEF_TEST_MESOS"));
final boolean isAzBatch = Boolean.parseBoolean(System.getenv("REEF_TEST_AZBATCH"));
if (isYarn ? (isMesos || isAzBatch) : (isMesos && isAzBatch)) {
throw new RuntimeException("Cannot test on two runtimes at once");
} else if (isYarn) {
LOG.log(Level.INFO, "Running tests on YARN");
return new YarnTestEnvironment();
} else if (isMesos) {
LOG.log(Level.INFO, "Running tests on Mesos");
return new MesosTestEnvironment();
} else if (isAzBatch) {
LOG.log(Level.INFO, "Running tests on Azure Batch");
return new AzureBatchTestEnvironment();
} else {
LOG.log(Level.INFO, "Running tests on Local");
return new LocalTestEnvironment();
}
}
/**
* Empty private constructor to prohibit instantiation of utility class.
*/
private TestEnvironmentFactory() {
}
}
|
[
"\"REEF_TEST_YARN\"",
"\"REEF_TEST_MESOS\"",
"\"REEF_TEST_AZBATCH\""
] |
[] |
[
"REEF_TEST_MESOS",
"REEF_TEST_AZBATCH",
"REEF_TEST_YARN"
] |
[]
|
["REEF_TEST_MESOS", "REEF_TEST_AZBATCH", "REEF_TEST_YARN"]
|
java
| 3 | 0 | |
tests/unit/gapic/aiplatform_v1/test_job_service.py
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.aiplatform_v1.services.job_service import JobServiceAsyncClient
from google.cloud.aiplatform_v1.services.job_service import JobServiceClient
from google.cloud.aiplatform_v1.services.job_service import pagers
from google.cloud.aiplatform_v1.services.job_service import transports
from google.cloud.aiplatform_v1.types import accelerator_type
from google.cloud.aiplatform_v1.types import batch_prediction_job
from google.cloud.aiplatform_v1.types import (
batch_prediction_job as gca_batch_prediction_job,
)
from google.cloud.aiplatform_v1.types import completion_stats
from google.cloud.aiplatform_v1.types import custom_job
from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job
from google.cloud.aiplatform_v1.types import data_labeling_job
from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job
from google.cloud.aiplatform_v1.types import encryption_spec
from google.cloud.aiplatform_v1.types import env_var
from google.cloud.aiplatform_v1.types import explanation
from google.cloud.aiplatform_v1.types import explanation_metadata
from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job
from google.cloud.aiplatform_v1.types import (
hyperparameter_tuning_job as gca_hyperparameter_tuning_job,
)
from google.cloud.aiplatform_v1.types import io
from google.cloud.aiplatform_v1.types import job_service
from google.cloud.aiplatform_v1.types import job_state
from google.cloud.aiplatform_v1.types import machine_resources
from google.cloud.aiplatform_v1.types import manual_batch_tuning_parameters
from google.cloud.aiplatform_v1.types import model
from google.cloud.aiplatform_v1.types import model_deployment_monitoring_job
from google.cloud.aiplatform_v1.types import (
model_deployment_monitoring_job as gca_model_deployment_monitoring_job,
)
from google.cloud.aiplatform_v1.types import model_monitoring
from google.cloud.aiplatform_v1.types import operation as gca_operation
from google.cloud.aiplatform_v1.types import study
from google.cloud.aiplatform_v1.types import unmanaged_container_model
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import any_pb2 # type: ignore
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
from google.type import money_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert JobServiceClient._get_default_mtls_endpoint(None) is None
assert (
JobServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
)
assert (
JobServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
JobServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
JobServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert JobServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize(
"client_class",
[
JobServiceClient,
JobServiceAsyncClient,
],
)
def test_job_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "aiplatform.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.JobServiceGrpcTransport, "grpc"),
(transports.JobServiceGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_job_service_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class",
[
JobServiceClient,
JobServiceAsyncClient,
],
)
def test_job_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "aiplatform.googleapis.com:443"
def test_job_service_client_get_transport_class():
transport = JobServiceClient.get_transport_class()
available_transports = [
transports.JobServiceGrpcTransport,
]
assert transport in available_transports
transport = JobServiceClient.get_transport_class("grpc")
assert transport == transports.JobServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(JobServiceClient, transports.JobServiceGrpcTransport, "grpc"),
(
JobServiceAsyncClient,
transports.JobServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
JobServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceClient)
)
@mock.patch.object(
JobServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(JobServiceAsyncClient),
)
def test_job_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(JobServiceClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(JobServiceClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(JobServiceClient, transports.JobServiceGrpcTransport, "grpc", "true"),
(
JobServiceAsyncClient,
transports.JobServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(JobServiceClient, transports.JobServiceGrpcTransport, "grpc", "false"),
(
JobServiceAsyncClient,
transports.JobServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
JobServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceClient)
)
@mock.patch.object(
JobServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(JobServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_job_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [JobServiceClient, JobServiceAsyncClient])
@mock.patch.object(
JobServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(JobServiceClient)
)
@mock.patch.object(
JobServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(JobServiceAsyncClient),
)
def test_job_service_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(JobServiceClient, transports.JobServiceGrpcTransport, "grpc"),
(
JobServiceAsyncClient,
transports.JobServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_job_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(
scopes=["1", "2"],
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(JobServiceClient, transports.JobServiceGrpcTransport, "grpc", grpc_helpers),
(
JobServiceAsyncClient,
transports.JobServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_job_service_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_job_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.aiplatform_v1.services.job_service.transports.JobServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = JobServiceClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(JobServiceClient, transports.JobServiceGrpcTransport, "grpc", grpc_helpers),
(
JobServiceAsyncClient,
transports.JobServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_job_service_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"aiplatform.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=None,
default_host="aiplatform.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"request_type",
[
job_service.CreateCustomJobRequest,
dict,
],
)
def test_create_custom_job(request_type, transport: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_custom_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_custom_job.CustomJob(
name="name_value",
display_name="display_name_value",
state=job_state.JobState.JOB_STATE_QUEUED,
)
response = client.create_custom_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CreateCustomJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_custom_job.CustomJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.state == job_state.JobState.JOB_STATE_QUEUED
def test_create_custom_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_custom_job), "__call__"
) as call:
client.create_custom_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CreateCustomJobRequest()
@pytest.mark.asyncio
async def test_create_custom_job_async(
transport: str = "grpc_asyncio", request_type=job_service.CreateCustomJobRequest
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_custom_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_custom_job.CustomJob(
name="name_value",
display_name="display_name_value",
state=job_state.JobState.JOB_STATE_QUEUED,
)
)
response = await client.create_custom_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CreateCustomJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_custom_job.CustomJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.state == job_state.JobState.JOB_STATE_QUEUED
@pytest.mark.asyncio
async def test_create_custom_job_async_from_dict():
await test_create_custom_job_async(request_type=dict)
def test_create_custom_job_field_headers():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CreateCustomJobRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_custom_job), "__call__"
) as call:
call.return_value = gca_custom_job.CustomJob()
client.create_custom_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_custom_job_field_headers_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CreateCustomJobRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_custom_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_custom_job.CustomJob()
)
await client.create_custom_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent/value",
) in kw["metadata"]
def test_create_custom_job_flattened():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_custom_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_custom_job.CustomJob()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_custom_job(
parent="parent_value",
custom_job=gca_custom_job.CustomJob(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].custom_job
mock_val = gca_custom_job.CustomJob(name="name_value")
assert arg == mock_val
def test_create_custom_job_flattened_error():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_custom_job(
job_service.CreateCustomJobRequest(),
parent="parent_value",
custom_job=gca_custom_job.CustomJob(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_custom_job_flattened_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_custom_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_custom_job.CustomJob()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_custom_job.CustomJob()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_custom_job(
parent="parent_value",
custom_job=gca_custom_job.CustomJob(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].custom_job
mock_val = gca_custom_job.CustomJob(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_custom_job_flattened_error_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_custom_job(
job_service.CreateCustomJobRequest(),
parent="parent_value",
custom_job=gca_custom_job.CustomJob(name="name_value"),
)
@pytest.mark.parametrize(
"request_type",
[
job_service.GetCustomJobRequest,
dict,
],
)
def test_get_custom_job(request_type, transport: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_custom_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = custom_job.CustomJob(
name="name_value",
display_name="display_name_value",
state=job_state.JobState.JOB_STATE_QUEUED,
)
response = client.get_custom_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.GetCustomJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, custom_job.CustomJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.state == job_state.JobState.JOB_STATE_QUEUED
def test_get_custom_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_custom_job), "__call__") as call:
client.get_custom_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.GetCustomJobRequest()
@pytest.mark.asyncio
async def test_get_custom_job_async(
transport: str = "grpc_asyncio", request_type=job_service.GetCustomJobRequest
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_custom_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
custom_job.CustomJob(
name="name_value",
display_name="display_name_value",
state=job_state.JobState.JOB_STATE_QUEUED,
)
)
response = await client.get_custom_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.GetCustomJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, custom_job.CustomJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.state == job_state.JobState.JOB_STATE_QUEUED
@pytest.mark.asyncio
async def test_get_custom_job_async_from_dict():
await test_get_custom_job_async(request_type=dict)
def test_get_custom_job_field_headers():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.GetCustomJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_custom_job), "__call__") as call:
call.return_value = custom_job.CustomJob()
client.get_custom_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_custom_job_field_headers_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.GetCustomJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_custom_job), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
custom_job.CustomJob()
)
await client.get_custom_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name/value",
) in kw["metadata"]
def test_get_custom_job_flattened():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_custom_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = custom_job.CustomJob()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_custom_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_custom_job_flattened_error():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_custom_job(
job_service.GetCustomJobRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_get_custom_job_flattened_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_custom_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = custom_job.CustomJob()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
custom_job.CustomJob()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_custom_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_custom_job_flattened_error_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_custom_job(
job_service.GetCustomJobRequest(),
name="name_value",
)
@pytest.mark.parametrize(
"request_type",
[
job_service.ListCustomJobsRequest,
dict,
],
)
def test_list_custom_jobs(request_type, transport: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_custom_jobs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.ListCustomJobsResponse(
next_page_token="next_page_token_value",
)
response = client.list_custom_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ListCustomJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListCustomJobsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_custom_jobs_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_custom_jobs), "__call__") as call:
client.list_custom_jobs()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ListCustomJobsRequest()
@pytest.mark.asyncio
async def test_list_custom_jobs_async(
transport: str = "grpc_asyncio", request_type=job_service.ListCustomJobsRequest
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_custom_jobs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.ListCustomJobsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_custom_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ListCustomJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListCustomJobsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_custom_jobs_async_from_dict():
await test_list_custom_jobs_async(request_type=dict)
def test_list_custom_jobs_field_headers():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.ListCustomJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_custom_jobs), "__call__") as call:
call.return_value = job_service.ListCustomJobsResponse()
client.list_custom_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_custom_jobs_field_headers_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.ListCustomJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_custom_jobs), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.ListCustomJobsResponse()
)
await client.list_custom_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent/value",
) in kw["metadata"]
def test_list_custom_jobs_flattened():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_custom_jobs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.ListCustomJobsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_custom_jobs(
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_custom_jobs_flattened_error():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_custom_jobs(
job_service.ListCustomJobsRequest(),
parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_custom_jobs_flattened_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_custom_jobs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.ListCustomJobsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.ListCustomJobsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_custom_jobs(
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_custom_jobs_flattened_error_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_custom_jobs(
job_service.ListCustomJobsRequest(),
parent="parent_value",
)
def test_list_custom_jobs_pager(transport_name: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials,
transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_custom_jobs), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListCustomJobsResponse(
custom_jobs=[
custom_job.CustomJob(),
custom_job.CustomJob(),
custom_job.CustomJob(),
],
next_page_token="abc",
),
job_service.ListCustomJobsResponse(
custom_jobs=[],
next_page_token="def",
),
job_service.ListCustomJobsResponse(
custom_jobs=[
custom_job.CustomJob(),
],
next_page_token="ghi",
),
job_service.ListCustomJobsResponse(
custom_jobs=[
custom_job.CustomJob(),
custom_job.CustomJob(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_custom_jobs(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, custom_job.CustomJob) for i in results)
def test_list_custom_jobs_pages(transport_name: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials,
transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_custom_jobs), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListCustomJobsResponse(
custom_jobs=[
custom_job.CustomJob(),
custom_job.CustomJob(),
custom_job.CustomJob(),
],
next_page_token="abc",
),
job_service.ListCustomJobsResponse(
custom_jobs=[],
next_page_token="def",
),
job_service.ListCustomJobsResponse(
custom_jobs=[
custom_job.CustomJob(),
],
next_page_token="ghi",
),
job_service.ListCustomJobsResponse(
custom_jobs=[
custom_job.CustomJob(),
custom_job.CustomJob(),
],
),
RuntimeError,
)
pages = list(client.list_custom_jobs(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_custom_jobs_async_pager():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_custom_jobs), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListCustomJobsResponse(
custom_jobs=[
custom_job.CustomJob(),
custom_job.CustomJob(),
custom_job.CustomJob(),
],
next_page_token="abc",
),
job_service.ListCustomJobsResponse(
custom_jobs=[],
next_page_token="def",
),
job_service.ListCustomJobsResponse(
custom_jobs=[
custom_job.CustomJob(),
],
next_page_token="ghi",
),
job_service.ListCustomJobsResponse(
custom_jobs=[
custom_job.CustomJob(),
custom_job.CustomJob(),
],
),
RuntimeError,
)
async_pager = await client.list_custom_jobs(
request={},
)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, custom_job.CustomJob) for i in responses)
@pytest.mark.asyncio
async def test_list_custom_jobs_async_pages():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_custom_jobs), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListCustomJobsResponse(
custom_jobs=[
custom_job.CustomJob(),
custom_job.CustomJob(),
custom_job.CustomJob(),
],
next_page_token="abc",
),
job_service.ListCustomJobsResponse(
custom_jobs=[],
next_page_token="def",
),
job_service.ListCustomJobsResponse(
custom_jobs=[
custom_job.CustomJob(),
],
next_page_token="ghi",
),
job_service.ListCustomJobsResponse(
custom_jobs=[
custom_job.CustomJob(),
custom_job.CustomJob(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_custom_jobs(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type",
[
job_service.DeleteCustomJobRequest,
dict,
],
)
def test_delete_custom_job(request_type, transport: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_custom_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_custom_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.DeleteCustomJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_custom_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_custom_job), "__call__"
) as call:
client.delete_custom_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.DeleteCustomJobRequest()
@pytest.mark.asyncio
async def test_delete_custom_job_async(
transport: str = "grpc_asyncio", request_type=job_service.DeleteCustomJobRequest
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_custom_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_custom_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.DeleteCustomJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_custom_job_async_from_dict():
await test_delete_custom_job_async(request_type=dict)
def test_delete_custom_job_field_headers():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.DeleteCustomJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_custom_job), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_custom_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_custom_job_field_headers_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.DeleteCustomJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_custom_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_custom_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name/value",
) in kw["metadata"]
def test_delete_custom_job_flattened():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_custom_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_custom_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_custom_job_flattened_error():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_custom_job(
job_service.DeleteCustomJobRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_delete_custom_job_flattened_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_custom_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_custom_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_custom_job_flattened_error_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_custom_job(
job_service.DeleteCustomJobRequest(),
name="name_value",
)
@pytest.mark.parametrize(
"request_type",
[
job_service.CancelCustomJobRequest,
dict,
],
)
def test_cancel_custom_job(request_type, transport: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_custom_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.cancel_custom_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CancelCustomJobRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_cancel_custom_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_custom_job), "__call__"
) as call:
client.cancel_custom_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CancelCustomJobRequest()
@pytest.mark.asyncio
async def test_cancel_custom_job_async(
transport: str = "grpc_asyncio", request_type=job_service.CancelCustomJobRequest
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_custom_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.cancel_custom_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CancelCustomJobRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_cancel_custom_job_async_from_dict():
await test_cancel_custom_job_async(request_type=dict)
def test_cancel_custom_job_field_headers():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CancelCustomJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_custom_job), "__call__"
) as call:
call.return_value = None
client.cancel_custom_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_cancel_custom_job_field_headers_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CancelCustomJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_custom_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.cancel_custom_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name/value",
) in kw["metadata"]
def test_cancel_custom_job_flattened():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_custom_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.cancel_custom_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_cancel_custom_job_flattened_error():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.cancel_custom_job(
job_service.CancelCustomJobRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_cancel_custom_job_flattened_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_custom_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.cancel_custom_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_cancel_custom_job_flattened_error_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.cancel_custom_job(
job_service.CancelCustomJobRequest(),
name="name_value",
)
@pytest.mark.parametrize(
"request_type",
[
job_service.CreateDataLabelingJobRequest,
dict,
],
)
def test_create_data_labeling_job(request_type, transport: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_data_labeling_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_data_labeling_job.DataLabelingJob(
name="name_value",
display_name="display_name_value",
datasets=["datasets_value"],
labeler_count=1375,
instruction_uri="instruction_uri_value",
inputs_schema_uri="inputs_schema_uri_value",
state=job_state.JobState.JOB_STATE_QUEUED,
labeling_progress=1810,
specialist_pools=["specialist_pools_value"],
)
response = client.create_data_labeling_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CreateDataLabelingJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_data_labeling_job.DataLabelingJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.datasets == ["datasets_value"]
assert response.labeler_count == 1375
assert response.instruction_uri == "instruction_uri_value"
assert response.inputs_schema_uri == "inputs_schema_uri_value"
assert response.state == job_state.JobState.JOB_STATE_QUEUED
assert response.labeling_progress == 1810
assert response.specialist_pools == ["specialist_pools_value"]
def test_create_data_labeling_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_data_labeling_job), "__call__"
) as call:
client.create_data_labeling_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CreateDataLabelingJobRequest()
@pytest.mark.asyncio
async def test_create_data_labeling_job_async(
transport: str = "grpc_asyncio",
request_type=job_service.CreateDataLabelingJobRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_data_labeling_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_data_labeling_job.DataLabelingJob(
name="name_value",
display_name="display_name_value",
datasets=["datasets_value"],
labeler_count=1375,
instruction_uri="instruction_uri_value",
inputs_schema_uri="inputs_schema_uri_value",
state=job_state.JobState.JOB_STATE_QUEUED,
labeling_progress=1810,
specialist_pools=["specialist_pools_value"],
)
)
response = await client.create_data_labeling_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CreateDataLabelingJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_data_labeling_job.DataLabelingJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.datasets == ["datasets_value"]
assert response.labeler_count == 1375
assert response.instruction_uri == "instruction_uri_value"
assert response.inputs_schema_uri == "inputs_schema_uri_value"
assert response.state == job_state.JobState.JOB_STATE_QUEUED
assert response.labeling_progress == 1810
assert response.specialist_pools == ["specialist_pools_value"]
@pytest.mark.asyncio
async def test_create_data_labeling_job_async_from_dict():
await test_create_data_labeling_job_async(request_type=dict)
def test_create_data_labeling_job_field_headers():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CreateDataLabelingJobRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_data_labeling_job), "__call__"
) as call:
call.return_value = gca_data_labeling_job.DataLabelingJob()
client.create_data_labeling_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_data_labeling_job_field_headers_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CreateDataLabelingJobRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_data_labeling_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_data_labeling_job.DataLabelingJob()
)
await client.create_data_labeling_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent/value",
) in kw["metadata"]
def test_create_data_labeling_job_flattened():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_data_labeling_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_data_labeling_job.DataLabelingJob()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_data_labeling_job(
parent="parent_value",
data_labeling_job=gca_data_labeling_job.DataLabelingJob(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].data_labeling_job
mock_val = gca_data_labeling_job.DataLabelingJob(name="name_value")
assert arg == mock_val
def test_create_data_labeling_job_flattened_error():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_data_labeling_job(
job_service.CreateDataLabelingJobRequest(),
parent="parent_value",
data_labeling_job=gca_data_labeling_job.DataLabelingJob(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_data_labeling_job_flattened_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_data_labeling_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_data_labeling_job.DataLabelingJob()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_data_labeling_job.DataLabelingJob()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_data_labeling_job(
parent="parent_value",
data_labeling_job=gca_data_labeling_job.DataLabelingJob(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].data_labeling_job
mock_val = gca_data_labeling_job.DataLabelingJob(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_data_labeling_job_flattened_error_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_data_labeling_job(
job_service.CreateDataLabelingJobRequest(),
parent="parent_value",
data_labeling_job=gca_data_labeling_job.DataLabelingJob(name="name_value"),
)
@pytest.mark.parametrize(
"request_type",
[
job_service.GetDataLabelingJobRequest,
dict,
],
)
def test_get_data_labeling_job(request_type, transport: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_data_labeling_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = data_labeling_job.DataLabelingJob(
name="name_value",
display_name="display_name_value",
datasets=["datasets_value"],
labeler_count=1375,
instruction_uri="instruction_uri_value",
inputs_schema_uri="inputs_schema_uri_value",
state=job_state.JobState.JOB_STATE_QUEUED,
labeling_progress=1810,
specialist_pools=["specialist_pools_value"],
)
response = client.get_data_labeling_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.GetDataLabelingJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, data_labeling_job.DataLabelingJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.datasets == ["datasets_value"]
assert response.labeler_count == 1375
assert response.instruction_uri == "instruction_uri_value"
assert response.inputs_schema_uri == "inputs_schema_uri_value"
assert response.state == job_state.JobState.JOB_STATE_QUEUED
assert response.labeling_progress == 1810
assert response.specialist_pools == ["specialist_pools_value"]
def test_get_data_labeling_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_data_labeling_job), "__call__"
) as call:
client.get_data_labeling_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.GetDataLabelingJobRequest()
@pytest.mark.asyncio
async def test_get_data_labeling_job_async(
transport: str = "grpc_asyncio", request_type=job_service.GetDataLabelingJobRequest
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_data_labeling_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
data_labeling_job.DataLabelingJob(
name="name_value",
display_name="display_name_value",
datasets=["datasets_value"],
labeler_count=1375,
instruction_uri="instruction_uri_value",
inputs_schema_uri="inputs_schema_uri_value",
state=job_state.JobState.JOB_STATE_QUEUED,
labeling_progress=1810,
specialist_pools=["specialist_pools_value"],
)
)
response = await client.get_data_labeling_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.GetDataLabelingJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, data_labeling_job.DataLabelingJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.datasets == ["datasets_value"]
assert response.labeler_count == 1375
assert response.instruction_uri == "instruction_uri_value"
assert response.inputs_schema_uri == "inputs_schema_uri_value"
assert response.state == job_state.JobState.JOB_STATE_QUEUED
assert response.labeling_progress == 1810
assert response.specialist_pools == ["specialist_pools_value"]
@pytest.mark.asyncio
async def test_get_data_labeling_job_async_from_dict():
await test_get_data_labeling_job_async(request_type=dict)
def test_get_data_labeling_job_field_headers():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.GetDataLabelingJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_data_labeling_job), "__call__"
) as call:
call.return_value = data_labeling_job.DataLabelingJob()
client.get_data_labeling_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_data_labeling_job_field_headers_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.GetDataLabelingJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_data_labeling_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
data_labeling_job.DataLabelingJob()
)
await client.get_data_labeling_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name/value",
) in kw["metadata"]
def test_get_data_labeling_job_flattened():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_data_labeling_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = data_labeling_job.DataLabelingJob()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_data_labeling_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_data_labeling_job_flattened_error():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_data_labeling_job(
job_service.GetDataLabelingJobRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_get_data_labeling_job_flattened_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_data_labeling_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = data_labeling_job.DataLabelingJob()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
data_labeling_job.DataLabelingJob()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_data_labeling_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_data_labeling_job_flattened_error_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_data_labeling_job(
job_service.GetDataLabelingJobRequest(),
name="name_value",
)
@pytest.mark.parametrize(
"request_type",
[
job_service.ListDataLabelingJobsRequest,
dict,
],
)
def test_list_data_labeling_jobs(request_type, transport: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_data_labeling_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.ListDataLabelingJobsResponse(
next_page_token="next_page_token_value",
)
response = client.list_data_labeling_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ListDataLabelingJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListDataLabelingJobsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_data_labeling_jobs_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_data_labeling_jobs), "__call__"
) as call:
client.list_data_labeling_jobs()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ListDataLabelingJobsRequest()
@pytest.mark.asyncio
async def test_list_data_labeling_jobs_async(
transport: str = "grpc_asyncio",
request_type=job_service.ListDataLabelingJobsRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_data_labeling_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.ListDataLabelingJobsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_data_labeling_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ListDataLabelingJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListDataLabelingJobsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_data_labeling_jobs_async_from_dict():
await test_list_data_labeling_jobs_async(request_type=dict)
def test_list_data_labeling_jobs_field_headers():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.ListDataLabelingJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_data_labeling_jobs), "__call__"
) as call:
call.return_value = job_service.ListDataLabelingJobsResponse()
client.list_data_labeling_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_data_labeling_jobs_field_headers_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.ListDataLabelingJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_data_labeling_jobs), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.ListDataLabelingJobsResponse()
)
await client.list_data_labeling_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent/value",
) in kw["metadata"]
def test_list_data_labeling_jobs_flattened():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_data_labeling_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.ListDataLabelingJobsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_data_labeling_jobs(
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_data_labeling_jobs_flattened_error():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_data_labeling_jobs(
job_service.ListDataLabelingJobsRequest(),
parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_data_labeling_jobs_flattened_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_data_labeling_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.ListDataLabelingJobsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.ListDataLabelingJobsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_data_labeling_jobs(
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_data_labeling_jobs_flattened_error_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_data_labeling_jobs(
job_service.ListDataLabelingJobsRequest(),
parent="parent_value",
)
def test_list_data_labeling_jobs_pager(transport_name: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials,
transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_data_labeling_jobs), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListDataLabelingJobsResponse(
data_labeling_jobs=[
data_labeling_job.DataLabelingJob(),
data_labeling_job.DataLabelingJob(),
data_labeling_job.DataLabelingJob(),
],
next_page_token="abc",
),
job_service.ListDataLabelingJobsResponse(
data_labeling_jobs=[],
next_page_token="def",
),
job_service.ListDataLabelingJobsResponse(
data_labeling_jobs=[
data_labeling_job.DataLabelingJob(),
],
next_page_token="ghi",
),
job_service.ListDataLabelingJobsResponse(
data_labeling_jobs=[
data_labeling_job.DataLabelingJob(),
data_labeling_job.DataLabelingJob(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_data_labeling_jobs(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, data_labeling_job.DataLabelingJob) for i in results)
def test_list_data_labeling_jobs_pages(transport_name: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials,
transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_data_labeling_jobs), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListDataLabelingJobsResponse(
data_labeling_jobs=[
data_labeling_job.DataLabelingJob(),
data_labeling_job.DataLabelingJob(),
data_labeling_job.DataLabelingJob(),
],
next_page_token="abc",
),
job_service.ListDataLabelingJobsResponse(
data_labeling_jobs=[],
next_page_token="def",
),
job_service.ListDataLabelingJobsResponse(
data_labeling_jobs=[
data_labeling_job.DataLabelingJob(),
],
next_page_token="ghi",
),
job_service.ListDataLabelingJobsResponse(
data_labeling_jobs=[
data_labeling_job.DataLabelingJob(),
data_labeling_job.DataLabelingJob(),
],
),
RuntimeError,
)
pages = list(client.list_data_labeling_jobs(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_data_labeling_jobs_async_pager():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_data_labeling_jobs),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListDataLabelingJobsResponse(
data_labeling_jobs=[
data_labeling_job.DataLabelingJob(),
data_labeling_job.DataLabelingJob(),
data_labeling_job.DataLabelingJob(),
],
next_page_token="abc",
),
job_service.ListDataLabelingJobsResponse(
data_labeling_jobs=[],
next_page_token="def",
),
job_service.ListDataLabelingJobsResponse(
data_labeling_jobs=[
data_labeling_job.DataLabelingJob(),
],
next_page_token="ghi",
),
job_service.ListDataLabelingJobsResponse(
data_labeling_jobs=[
data_labeling_job.DataLabelingJob(),
data_labeling_job.DataLabelingJob(),
],
),
RuntimeError,
)
async_pager = await client.list_data_labeling_jobs(
request={},
)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, data_labeling_job.DataLabelingJob) for i in responses)
@pytest.mark.asyncio
async def test_list_data_labeling_jobs_async_pages():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_data_labeling_jobs),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListDataLabelingJobsResponse(
data_labeling_jobs=[
data_labeling_job.DataLabelingJob(),
data_labeling_job.DataLabelingJob(),
data_labeling_job.DataLabelingJob(),
],
next_page_token="abc",
),
job_service.ListDataLabelingJobsResponse(
data_labeling_jobs=[],
next_page_token="def",
),
job_service.ListDataLabelingJobsResponse(
data_labeling_jobs=[
data_labeling_job.DataLabelingJob(),
],
next_page_token="ghi",
),
job_service.ListDataLabelingJobsResponse(
data_labeling_jobs=[
data_labeling_job.DataLabelingJob(),
data_labeling_job.DataLabelingJob(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_data_labeling_jobs(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type",
[
job_service.DeleteDataLabelingJobRequest,
dict,
],
)
def test_delete_data_labeling_job(request_type, transport: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_data_labeling_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_data_labeling_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.DeleteDataLabelingJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_data_labeling_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_data_labeling_job), "__call__"
) as call:
client.delete_data_labeling_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.DeleteDataLabelingJobRequest()
@pytest.mark.asyncio
async def test_delete_data_labeling_job_async(
transport: str = "grpc_asyncio",
request_type=job_service.DeleteDataLabelingJobRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_data_labeling_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_data_labeling_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.DeleteDataLabelingJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_data_labeling_job_async_from_dict():
await test_delete_data_labeling_job_async(request_type=dict)
def test_delete_data_labeling_job_field_headers():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.DeleteDataLabelingJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_data_labeling_job), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_data_labeling_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_data_labeling_job_field_headers_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.DeleteDataLabelingJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_data_labeling_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_data_labeling_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name/value",
) in kw["metadata"]
def test_delete_data_labeling_job_flattened():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_data_labeling_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_data_labeling_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_data_labeling_job_flattened_error():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_data_labeling_job(
job_service.DeleteDataLabelingJobRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_delete_data_labeling_job_flattened_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_data_labeling_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_data_labeling_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_data_labeling_job_flattened_error_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_data_labeling_job(
job_service.DeleteDataLabelingJobRequest(),
name="name_value",
)
@pytest.mark.parametrize(
"request_type",
[
job_service.CancelDataLabelingJobRequest,
dict,
],
)
def test_cancel_data_labeling_job(request_type, transport: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_data_labeling_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.cancel_data_labeling_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CancelDataLabelingJobRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_cancel_data_labeling_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_data_labeling_job), "__call__"
) as call:
client.cancel_data_labeling_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CancelDataLabelingJobRequest()
@pytest.mark.asyncio
async def test_cancel_data_labeling_job_async(
transport: str = "grpc_asyncio",
request_type=job_service.CancelDataLabelingJobRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_data_labeling_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.cancel_data_labeling_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CancelDataLabelingJobRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_cancel_data_labeling_job_async_from_dict():
await test_cancel_data_labeling_job_async(request_type=dict)
def test_cancel_data_labeling_job_field_headers():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CancelDataLabelingJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_data_labeling_job), "__call__"
) as call:
call.return_value = None
client.cancel_data_labeling_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_cancel_data_labeling_job_field_headers_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CancelDataLabelingJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_data_labeling_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.cancel_data_labeling_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name/value",
) in kw["metadata"]
def test_cancel_data_labeling_job_flattened():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_data_labeling_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.cancel_data_labeling_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_cancel_data_labeling_job_flattened_error():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.cancel_data_labeling_job(
job_service.CancelDataLabelingJobRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_cancel_data_labeling_job_flattened_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_data_labeling_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.cancel_data_labeling_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_cancel_data_labeling_job_flattened_error_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.cancel_data_labeling_job(
job_service.CancelDataLabelingJobRequest(),
name="name_value",
)
@pytest.mark.parametrize(
"request_type",
[
job_service.CreateHyperparameterTuningJobRequest,
dict,
],
)
def test_create_hyperparameter_tuning_job(request_type, transport: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_hyperparameter_tuning_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob(
name="name_value",
display_name="display_name_value",
max_trial_count=1609,
parallel_trial_count=2128,
max_failed_trial_count=2317,
state=job_state.JobState.JOB_STATE_QUEUED,
)
response = client.create_hyperparameter_tuning_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CreateHyperparameterTuningJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_hyperparameter_tuning_job.HyperparameterTuningJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.max_trial_count == 1609
assert response.parallel_trial_count == 2128
assert response.max_failed_trial_count == 2317
assert response.state == job_state.JobState.JOB_STATE_QUEUED
def test_create_hyperparameter_tuning_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_hyperparameter_tuning_job), "__call__"
) as call:
client.create_hyperparameter_tuning_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CreateHyperparameterTuningJobRequest()
@pytest.mark.asyncio
async def test_create_hyperparameter_tuning_job_async(
transport: str = "grpc_asyncio",
request_type=job_service.CreateHyperparameterTuningJobRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_hyperparameter_tuning_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_hyperparameter_tuning_job.HyperparameterTuningJob(
name="name_value",
display_name="display_name_value",
max_trial_count=1609,
parallel_trial_count=2128,
max_failed_trial_count=2317,
state=job_state.JobState.JOB_STATE_QUEUED,
)
)
response = await client.create_hyperparameter_tuning_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CreateHyperparameterTuningJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_hyperparameter_tuning_job.HyperparameterTuningJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.max_trial_count == 1609
assert response.parallel_trial_count == 2128
assert response.max_failed_trial_count == 2317
assert response.state == job_state.JobState.JOB_STATE_QUEUED
@pytest.mark.asyncio
async def test_create_hyperparameter_tuning_job_async_from_dict():
await test_create_hyperparameter_tuning_job_async(request_type=dict)
def test_create_hyperparameter_tuning_job_field_headers():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CreateHyperparameterTuningJobRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_hyperparameter_tuning_job), "__call__"
) as call:
call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob()
client.create_hyperparameter_tuning_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_hyperparameter_tuning_job_field_headers_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CreateHyperparameterTuningJobRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_hyperparameter_tuning_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_hyperparameter_tuning_job.HyperparameterTuningJob()
)
await client.create_hyperparameter_tuning_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent/value",
) in kw["metadata"]
def test_create_hyperparameter_tuning_job_flattened():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_hyperparameter_tuning_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_hyperparameter_tuning_job(
parent="parent_value",
hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(
name="name_value"
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].hyperparameter_tuning_job
mock_val = gca_hyperparameter_tuning_job.HyperparameterTuningJob(
name="name_value"
)
assert arg == mock_val
def test_create_hyperparameter_tuning_job_flattened_error():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_hyperparameter_tuning_job(
job_service.CreateHyperparameterTuningJobRequest(),
parent="parent_value",
hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(
name="name_value"
),
)
@pytest.mark.asyncio
async def test_create_hyperparameter_tuning_job_flattened_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_hyperparameter_tuning_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_hyperparameter_tuning_job.HyperparameterTuningJob()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_hyperparameter_tuning_job.HyperparameterTuningJob()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_hyperparameter_tuning_job(
parent="parent_value",
hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(
name="name_value"
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].hyperparameter_tuning_job
mock_val = gca_hyperparameter_tuning_job.HyperparameterTuningJob(
name="name_value"
)
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_hyperparameter_tuning_job_flattened_error_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_hyperparameter_tuning_job(
job_service.CreateHyperparameterTuningJobRequest(),
parent="parent_value",
hyperparameter_tuning_job=gca_hyperparameter_tuning_job.HyperparameterTuningJob(
name="name_value"
),
)
@pytest.mark.parametrize(
"request_type",
[
job_service.GetHyperparameterTuningJobRequest,
dict,
],
)
def test_get_hyperparameter_tuning_job(request_type, transport: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_hyperparameter_tuning_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob(
name="name_value",
display_name="display_name_value",
max_trial_count=1609,
parallel_trial_count=2128,
max_failed_trial_count=2317,
state=job_state.JobState.JOB_STATE_QUEUED,
)
response = client.get_hyperparameter_tuning_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.GetHyperparameterTuningJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, hyperparameter_tuning_job.HyperparameterTuningJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.max_trial_count == 1609
assert response.parallel_trial_count == 2128
assert response.max_failed_trial_count == 2317
assert response.state == job_state.JobState.JOB_STATE_QUEUED
def test_get_hyperparameter_tuning_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_hyperparameter_tuning_job), "__call__"
) as call:
client.get_hyperparameter_tuning_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.GetHyperparameterTuningJobRequest()
@pytest.mark.asyncio
async def test_get_hyperparameter_tuning_job_async(
transport: str = "grpc_asyncio",
request_type=job_service.GetHyperparameterTuningJobRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_hyperparameter_tuning_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
hyperparameter_tuning_job.HyperparameterTuningJob(
name="name_value",
display_name="display_name_value",
max_trial_count=1609,
parallel_trial_count=2128,
max_failed_trial_count=2317,
state=job_state.JobState.JOB_STATE_QUEUED,
)
)
response = await client.get_hyperparameter_tuning_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.GetHyperparameterTuningJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, hyperparameter_tuning_job.HyperparameterTuningJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.max_trial_count == 1609
assert response.parallel_trial_count == 2128
assert response.max_failed_trial_count == 2317
assert response.state == job_state.JobState.JOB_STATE_QUEUED
@pytest.mark.asyncio
async def test_get_hyperparameter_tuning_job_async_from_dict():
await test_get_hyperparameter_tuning_job_async(request_type=dict)
def test_get_hyperparameter_tuning_job_field_headers():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.GetHyperparameterTuningJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_hyperparameter_tuning_job), "__call__"
) as call:
call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob()
client.get_hyperparameter_tuning_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_hyperparameter_tuning_job_field_headers_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.GetHyperparameterTuningJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_hyperparameter_tuning_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
hyperparameter_tuning_job.HyperparameterTuningJob()
)
await client.get_hyperparameter_tuning_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name/value",
) in kw["metadata"]
def test_get_hyperparameter_tuning_job_flattened():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_hyperparameter_tuning_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_hyperparameter_tuning_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_hyperparameter_tuning_job_flattened_error():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_hyperparameter_tuning_job(
job_service.GetHyperparameterTuningJobRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_get_hyperparameter_tuning_job_flattened_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_hyperparameter_tuning_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = hyperparameter_tuning_job.HyperparameterTuningJob()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
hyperparameter_tuning_job.HyperparameterTuningJob()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_hyperparameter_tuning_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_hyperparameter_tuning_job_flattened_error_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_hyperparameter_tuning_job(
job_service.GetHyperparameterTuningJobRequest(),
name="name_value",
)
@pytest.mark.parametrize(
"request_type",
[
job_service.ListHyperparameterTuningJobsRequest,
dict,
],
)
def test_list_hyperparameter_tuning_jobs(request_type, transport: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_hyperparameter_tuning_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.ListHyperparameterTuningJobsResponse(
next_page_token="next_page_token_value",
)
response = client.list_hyperparameter_tuning_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ListHyperparameterTuningJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListHyperparameterTuningJobsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_hyperparameter_tuning_jobs_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_hyperparameter_tuning_jobs), "__call__"
) as call:
client.list_hyperparameter_tuning_jobs()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ListHyperparameterTuningJobsRequest()
@pytest.mark.asyncio
async def test_list_hyperparameter_tuning_jobs_async(
transport: str = "grpc_asyncio",
request_type=job_service.ListHyperparameterTuningJobsRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_hyperparameter_tuning_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.ListHyperparameterTuningJobsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_hyperparameter_tuning_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ListHyperparameterTuningJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListHyperparameterTuningJobsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_hyperparameter_tuning_jobs_async_from_dict():
await test_list_hyperparameter_tuning_jobs_async(request_type=dict)
def test_list_hyperparameter_tuning_jobs_field_headers():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.ListHyperparameterTuningJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_hyperparameter_tuning_jobs), "__call__"
) as call:
call.return_value = job_service.ListHyperparameterTuningJobsResponse()
client.list_hyperparameter_tuning_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_hyperparameter_tuning_jobs_field_headers_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.ListHyperparameterTuningJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_hyperparameter_tuning_jobs), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.ListHyperparameterTuningJobsResponse()
)
await client.list_hyperparameter_tuning_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent/value",
) in kw["metadata"]
def test_list_hyperparameter_tuning_jobs_flattened():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_hyperparameter_tuning_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.ListHyperparameterTuningJobsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_hyperparameter_tuning_jobs(
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_hyperparameter_tuning_jobs_flattened_error():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_hyperparameter_tuning_jobs(
job_service.ListHyperparameterTuningJobsRequest(),
parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_hyperparameter_tuning_jobs_flattened_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_hyperparameter_tuning_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.ListHyperparameterTuningJobsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.ListHyperparameterTuningJobsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_hyperparameter_tuning_jobs(
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_hyperparameter_tuning_jobs_flattened_error_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_hyperparameter_tuning_jobs(
job_service.ListHyperparameterTuningJobsRequest(),
parent="parent_value",
)
def test_list_hyperparameter_tuning_jobs_pager(transport_name: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials,
transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_hyperparameter_tuning_jobs), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListHyperparameterTuningJobsResponse(
hyperparameter_tuning_jobs=[
hyperparameter_tuning_job.HyperparameterTuningJob(),
hyperparameter_tuning_job.HyperparameterTuningJob(),
hyperparameter_tuning_job.HyperparameterTuningJob(),
],
next_page_token="abc",
),
job_service.ListHyperparameterTuningJobsResponse(
hyperparameter_tuning_jobs=[],
next_page_token="def",
),
job_service.ListHyperparameterTuningJobsResponse(
hyperparameter_tuning_jobs=[
hyperparameter_tuning_job.HyperparameterTuningJob(),
],
next_page_token="ghi",
),
job_service.ListHyperparameterTuningJobsResponse(
hyperparameter_tuning_jobs=[
hyperparameter_tuning_job.HyperparameterTuningJob(),
hyperparameter_tuning_job.HyperparameterTuningJob(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_hyperparameter_tuning_jobs(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(
isinstance(i, hyperparameter_tuning_job.HyperparameterTuningJob)
for i in results
)
def test_list_hyperparameter_tuning_jobs_pages(transport_name: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials,
transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_hyperparameter_tuning_jobs), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListHyperparameterTuningJobsResponse(
hyperparameter_tuning_jobs=[
hyperparameter_tuning_job.HyperparameterTuningJob(),
hyperparameter_tuning_job.HyperparameterTuningJob(),
hyperparameter_tuning_job.HyperparameterTuningJob(),
],
next_page_token="abc",
),
job_service.ListHyperparameterTuningJobsResponse(
hyperparameter_tuning_jobs=[],
next_page_token="def",
),
job_service.ListHyperparameterTuningJobsResponse(
hyperparameter_tuning_jobs=[
hyperparameter_tuning_job.HyperparameterTuningJob(),
],
next_page_token="ghi",
),
job_service.ListHyperparameterTuningJobsResponse(
hyperparameter_tuning_jobs=[
hyperparameter_tuning_job.HyperparameterTuningJob(),
hyperparameter_tuning_job.HyperparameterTuningJob(),
],
),
RuntimeError,
)
pages = list(client.list_hyperparameter_tuning_jobs(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_hyperparameter_tuning_jobs_async_pager():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_hyperparameter_tuning_jobs),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListHyperparameterTuningJobsResponse(
hyperparameter_tuning_jobs=[
hyperparameter_tuning_job.HyperparameterTuningJob(),
hyperparameter_tuning_job.HyperparameterTuningJob(),
hyperparameter_tuning_job.HyperparameterTuningJob(),
],
next_page_token="abc",
),
job_service.ListHyperparameterTuningJobsResponse(
hyperparameter_tuning_jobs=[],
next_page_token="def",
),
job_service.ListHyperparameterTuningJobsResponse(
hyperparameter_tuning_jobs=[
hyperparameter_tuning_job.HyperparameterTuningJob(),
],
next_page_token="ghi",
),
job_service.ListHyperparameterTuningJobsResponse(
hyperparameter_tuning_jobs=[
hyperparameter_tuning_job.HyperparameterTuningJob(),
hyperparameter_tuning_job.HyperparameterTuningJob(),
],
),
RuntimeError,
)
async_pager = await client.list_hyperparameter_tuning_jobs(
request={},
)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(
isinstance(i, hyperparameter_tuning_job.HyperparameterTuningJob)
for i in responses
)
@pytest.mark.asyncio
async def test_list_hyperparameter_tuning_jobs_async_pages():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_hyperparameter_tuning_jobs),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListHyperparameterTuningJobsResponse(
hyperparameter_tuning_jobs=[
hyperparameter_tuning_job.HyperparameterTuningJob(),
hyperparameter_tuning_job.HyperparameterTuningJob(),
hyperparameter_tuning_job.HyperparameterTuningJob(),
],
next_page_token="abc",
),
job_service.ListHyperparameterTuningJobsResponse(
hyperparameter_tuning_jobs=[],
next_page_token="def",
),
job_service.ListHyperparameterTuningJobsResponse(
hyperparameter_tuning_jobs=[
hyperparameter_tuning_job.HyperparameterTuningJob(),
],
next_page_token="ghi",
),
job_service.ListHyperparameterTuningJobsResponse(
hyperparameter_tuning_jobs=[
hyperparameter_tuning_job.HyperparameterTuningJob(),
hyperparameter_tuning_job.HyperparameterTuningJob(),
],
),
RuntimeError,
)
pages = []
async for page_ in (
await client.list_hyperparameter_tuning_jobs(request={})
).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type",
[
job_service.DeleteHyperparameterTuningJobRequest,
dict,
],
)
def test_delete_hyperparameter_tuning_job(request_type, transport: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_hyperparameter_tuning_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_hyperparameter_tuning_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.DeleteHyperparameterTuningJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_hyperparameter_tuning_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_hyperparameter_tuning_job), "__call__"
) as call:
client.delete_hyperparameter_tuning_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.DeleteHyperparameterTuningJobRequest()
@pytest.mark.asyncio
async def test_delete_hyperparameter_tuning_job_async(
transport: str = "grpc_asyncio",
request_type=job_service.DeleteHyperparameterTuningJobRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_hyperparameter_tuning_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_hyperparameter_tuning_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.DeleteHyperparameterTuningJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_hyperparameter_tuning_job_async_from_dict():
await test_delete_hyperparameter_tuning_job_async(request_type=dict)
def test_delete_hyperparameter_tuning_job_field_headers():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.DeleteHyperparameterTuningJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_hyperparameter_tuning_job), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_hyperparameter_tuning_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_hyperparameter_tuning_job_field_headers_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.DeleteHyperparameterTuningJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_hyperparameter_tuning_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_hyperparameter_tuning_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name/value",
) in kw["metadata"]
def test_delete_hyperparameter_tuning_job_flattened():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_hyperparameter_tuning_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_hyperparameter_tuning_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_hyperparameter_tuning_job_flattened_error():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_hyperparameter_tuning_job(
job_service.DeleteHyperparameterTuningJobRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_delete_hyperparameter_tuning_job_flattened_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_hyperparameter_tuning_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_hyperparameter_tuning_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_hyperparameter_tuning_job_flattened_error_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_hyperparameter_tuning_job(
job_service.DeleteHyperparameterTuningJobRequest(),
name="name_value",
)
@pytest.mark.parametrize(
"request_type",
[
job_service.CancelHyperparameterTuningJobRequest,
dict,
],
)
def test_cancel_hyperparameter_tuning_job(request_type, transport: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_hyperparameter_tuning_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.cancel_hyperparameter_tuning_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CancelHyperparameterTuningJobRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_cancel_hyperparameter_tuning_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_hyperparameter_tuning_job), "__call__"
) as call:
client.cancel_hyperparameter_tuning_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CancelHyperparameterTuningJobRequest()
@pytest.mark.asyncio
async def test_cancel_hyperparameter_tuning_job_async(
transport: str = "grpc_asyncio",
request_type=job_service.CancelHyperparameterTuningJobRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_hyperparameter_tuning_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.cancel_hyperparameter_tuning_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CancelHyperparameterTuningJobRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_cancel_hyperparameter_tuning_job_async_from_dict():
await test_cancel_hyperparameter_tuning_job_async(request_type=dict)
def test_cancel_hyperparameter_tuning_job_field_headers():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CancelHyperparameterTuningJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_hyperparameter_tuning_job), "__call__"
) as call:
call.return_value = None
client.cancel_hyperparameter_tuning_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_cancel_hyperparameter_tuning_job_field_headers_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CancelHyperparameterTuningJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_hyperparameter_tuning_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.cancel_hyperparameter_tuning_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name/value",
) in kw["metadata"]
def test_cancel_hyperparameter_tuning_job_flattened():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_hyperparameter_tuning_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.cancel_hyperparameter_tuning_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_cancel_hyperparameter_tuning_job_flattened_error():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.cancel_hyperparameter_tuning_job(
job_service.CancelHyperparameterTuningJobRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_cancel_hyperparameter_tuning_job_flattened_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_hyperparameter_tuning_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.cancel_hyperparameter_tuning_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_cancel_hyperparameter_tuning_job_flattened_error_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.cancel_hyperparameter_tuning_job(
job_service.CancelHyperparameterTuningJobRequest(),
name="name_value",
)
@pytest.mark.parametrize(
"request_type",
[
job_service.CreateBatchPredictionJobRequest,
dict,
],
)
def test_create_batch_prediction_job(request_type, transport: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_batch_prediction_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_batch_prediction_job.BatchPredictionJob(
name="name_value",
display_name="display_name_value",
model="model_value",
generate_explanation=True,
state=job_state.JobState.JOB_STATE_QUEUED,
)
response = client.create_batch_prediction_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CreateBatchPredictionJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_batch_prediction_job.BatchPredictionJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.model == "model_value"
assert response.generate_explanation is True
assert response.state == job_state.JobState.JOB_STATE_QUEUED
def test_create_batch_prediction_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_batch_prediction_job), "__call__"
) as call:
client.create_batch_prediction_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CreateBatchPredictionJobRequest()
@pytest.mark.asyncio
async def test_create_batch_prediction_job_async(
transport: str = "grpc_asyncio",
request_type=job_service.CreateBatchPredictionJobRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_batch_prediction_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_batch_prediction_job.BatchPredictionJob(
name="name_value",
display_name="display_name_value",
model="model_value",
generate_explanation=True,
state=job_state.JobState.JOB_STATE_QUEUED,
)
)
response = await client.create_batch_prediction_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CreateBatchPredictionJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_batch_prediction_job.BatchPredictionJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.model == "model_value"
assert response.generate_explanation is True
assert response.state == job_state.JobState.JOB_STATE_QUEUED
@pytest.mark.asyncio
async def test_create_batch_prediction_job_async_from_dict():
await test_create_batch_prediction_job_async(request_type=dict)
def test_create_batch_prediction_job_field_headers():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CreateBatchPredictionJobRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_batch_prediction_job), "__call__"
) as call:
call.return_value = gca_batch_prediction_job.BatchPredictionJob()
client.create_batch_prediction_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_batch_prediction_job_field_headers_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CreateBatchPredictionJobRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_batch_prediction_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_batch_prediction_job.BatchPredictionJob()
)
await client.create_batch_prediction_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent/value",
) in kw["metadata"]
def test_create_batch_prediction_job_flattened():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_batch_prediction_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_batch_prediction_job.BatchPredictionJob()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_batch_prediction_job(
parent="parent_value",
batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(
name="name_value"
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].batch_prediction_job
mock_val = gca_batch_prediction_job.BatchPredictionJob(name="name_value")
assert arg == mock_val
def test_create_batch_prediction_job_flattened_error():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_batch_prediction_job(
job_service.CreateBatchPredictionJobRequest(),
parent="parent_value",
batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(
name="name_value"
),
)
@pytest.mark.asyncio
async def test_create_batch_prediction_job_flattened_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_batch_prediction_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_batch_prediction_job.BatchPredictionJob()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_batch_prediction_job.BatchPredictionJob()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_batch_prediction_job(
parent="parent_value",
batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(
name="name_value"
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].batch_prediction_job
mock_val = gca_batch_prediction_job.BatchPredictionJob(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_batch_prediction_job_flattened_error_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_batch_prediction_job(
job_service.CreateBatchPredictionJobRequest(),
parent="parent_value",
batch_prediction_job=gca_batch_prediction_job.BatchPredictionJob(
name="name_value"
),
)
@pytest.mark.parametrize(
"request_type",
[
job_service.GetBatchPredictionJobRequest,
dict,
],
)
def test_get_batch_prediction_job(request_type, transport: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_batch_prediction_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = batch_prediction_job.BatchPredictionJob(
name="name_value",
display_name="display_name_value",
model="model_value",
generate_explanation=True,
state=job_state.JobState.JOB_STATE_QUEUED,
)
response = client.get_batch_prediction_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.GetBatchPredictionJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, batch_prediction_job.BatchPredictionJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.model == "model_value"
assert response.generate_explanation is True
assert response.state == job_state.JobState.JOB_STATE_QUEUED
def test_get_batch_prediction_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_batch_prediction_job), "__call__"
) as call:
client.get_batch_prediction_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.GetBatchPredictionJobRequest()
@pytest.mark.asyncio
async def test_get_batch_prediction_job_async(
transport: str = "grpc_asyncio",
request_type=job_service.GetBatchPredictionJobRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_batch_prediction_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
batch_prediction_job.BatchPredictionJob(
name="name_value",
display_name="display_name_value",
model="model_value",
generate_explanation=True,
state=job_state.JobState.JOB_STATE_QUEUED,
)
)
response = await client.get_batch_prediction_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.GetBatchPredictionJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, batch_prediction_job.BatchPredictionJob)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.model == "model_value"
assert response.generate_explanation is True
assert response.state == job_state.JobState.JOB_STATE_QUEUED
@pytest.mark.asyncio
async def test_get_batch_prediction_job_async_from_dict():
await test_get_batch_prediction_job_async(request_type=dict)
def test_get_batch_prediction_job_field_headers():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.GetBatchPredictionJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_batch_prediction_job), "__call__"
) as call:
call.return_value = batch_prediction_job.BatchPredictionJob()
client.get_batch_prediction_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_batch_prediction_job_field_headers_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.GetBatchPredictionJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_batch_prediction_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
batch_prediction_job.BatchPredictionJob()
)
await client.get_batch_prediction_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name/value",
) in kw["metadata"]
def test_get_batch_prediction_job_flattened():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_batch_prediction_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = batch_prediction_job.BatchPredictionJob()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_batch_prediction_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_batch_prediction_job_flattened_error():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_batch_prediction_job(
job_service.GetBatchPredictionJobRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_get_batch_prediction_job_flattened_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_batch_prediction_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = batch_prediction_job.BatchPredictionJob()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
batch_prediction_job.BatchPredictionJob()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_batch_prediction_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_batch_prediction_job_flattened_error_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_batch_prediction_job(
job_service.GetBatchPredictionJobRequest(),
name="name_value",
)
@pytest.mark.parametrize(
"request_type",
[
job_service.ListBatchPredictionJobsRequest,
dict,
],
)
def test_list_batch_prediction_jobs(request_type, transport: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_batch_prediction_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.ListBatchPredictionJobsResponse(
next_page_token="next_page_token_value",
)
response = client.list_batch_prediction_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ListBatchPredictionJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListBatchPredictionJobsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_batch_prediction_jobs_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_batch_prediction_jobs), "__call__"
) as call:
client.list_batch_prediction_jobs()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ListBatchPredictionJobsRequest()
@pytest.mark.asyncio
async def test_list_batch_prediction_jobs_async(
transport: str = "grpc_asyncio",
request_type=job_service.ListBatchPredictionJobsRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_batch_prediction_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.ListBatchPredictionJobsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_batch_prediction_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ListBatchPredictionJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListBatchPredictionJobsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_batch_prediction_jobs_async_from_dict():
await test_list_batch_prediction_jobs_async(request_type=dict)
def test_list_batch_prediction_jobs_field_headers():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.ListBatchPredictionJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_batch_prediction_jobs), "__call__"
) as call:
call.return_value = job_service.ListBatchPredictionJobsResponse()
client.list_batch_prediction_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_batch_prediction_jobs_field_headers_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.ListBatchPredictionJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_batch_prediction_jobs), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.ListBatchPredictionJobsResponse()
)
await client.list_batch_prediction_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent/value",
) in kw["metadata"]
def test_list_batch_prediction_jobs_flattened():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_batch_prediction_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.ListBatchPredictionJobsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_batch_prediction_jobs(
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_batch_prediction_jobs_flattened_error():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_batch_prediction_jobs(
job_service.ListBatchPredictionJobsRequest(),
parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_batch_prediction_jobs_flattened_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_batch_prediction_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.ListBatchPredictionJobsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.ListBatchPredictionJobsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_batch_prediction_jobs(
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_batch_prediction_jobs_flattened_error_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_batch_prediction_jobs(
job_service.ListBatchPredictionJobsRequest(),
parent="parent_value",
)
def test_list_batch_prediction_jobs_pager(transport_name: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials,
transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_batch_prediction_jobs), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListBatchPredictionJobsResponse(
batch_prediction_jobs=[
batch_prediction_job.BatchPredictionJob(),
batch_prediction_job.BatchPredictionJob(),
batch_prediction_job.BatchPredictionJob(),
],
next_page_token="abc",
),
job_service.ListBatchPredictionJobsResponse(
batch_prediction_jobs=[],
next_page_token="def",
),
job_service.ListBatchPredictionJobsResponse(
batch_prediction_jobs=[
batch_prediction_job.BatchPredictionJob(),
],
next_page_token="ghi",
),
job_service.ListBatchPredictionJobsResponse(
batch_prediction_jobs=[
batch_prediction_job.BatchPredictionJob(),
batch_prediction_job.BatchPredictionJob(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_batch_prediction_jobs(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(
isinstance(i, batch_prediction_job.BatchPredictionJob) for i in results
)
def test_list_batch_prediction_jobs_pages(transport_name: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials,
transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_batch_prediction_jobs), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListBatchPredictionJobsResponse(
batch_prediction_jobs=[
batch_prediction_job.BatchPredictionJob(),
batch_prediction_job.BatchPredictionJob(),
batch_prediction_job.BatchPredictionJob(),
],
next_page_token="abc",
),
job_service.ListBatchPredictionJobsResponse(
batch_prediction_jobs=[],
next_page_token="def",
),
job_service.ListBatchPredictionJobsResponse(
batch_prediction_jobs=[
batch_prediction_job.BatchPredictionJob(),
],
next_page_token="ghi",
),
job_service.ListBatchPredictionJobsResponse(
batch_prediction_jobs=[
batch_prediction_job.BatchPredictionJob(),
batch_prediction_job.BatchPredictionJob(),
],
),
RuntimeError,
)
pages = list(client.list_batch_prediction_jobs(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_batch_prediction_jobs_async_pager():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_batch_prediction_jobs),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListBatchPredictionJobsResponse(
batch_prediction_jobs=[
batch_prediction_job.BatchPredictionJob(),
batch_prediction_job.BatchPredictionJob(),
batch_prediction_job.BatchPredictionJob(),
],
next_page_token="abc",
),
job_service.ListBatchPredictionJobsResponse(
batch_prediction_jobs=[],
next_page_token="def",
),
job_service.ListBatchPredictionJobsResponse(
batch_prediction_jobs=[
batch_prediction_job.BatchPredictionJob(),
],
next_page_token="ghi",
),
job_service.ListBatchPredictionJobsResponse(
batch_prediction_jobs=[
batch_prediction_job.BatchPredictionJob(),
batch_prediction_job.BatchPredictionJob(),
],
),
RuntimeError,
)
async_pager = await client.list_batch_prediction_jobs(
request={},
)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(
isinstance(i, batch_prediction_job.BatchPredictionJob) for i in responses
)
@pytest.mark.asyncio
async def test_list_batch_prediction_jobs_async_pages():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_batch_prediction_jobs),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListBatchPredictionJobsResponse(
batch_prediction_jobs=[
batch_prediction_job.BatchPredictionJob(),
batch_prediction_job.BatchPredictionJob(),
batch_prediction_job.BatchPredictionJob(),
],
next_page_token="abc",
),
job_service.ListBatchPredictionJobsResponse(
batch_prediction_jobs=[],
next_page_token="def",
),
job_service.ListBatchPredictionJobsResponse(
batch_prediction_jobs=[
batch_prediction_job.BatchPredictionJob(),
],
next_page_token="ghi",
),
job_service.ListBatchPredictionJobsResponse(
batch_prediction_jobs=[
batch_prediction_job.BatchPredictionJob(),
batch_prediction_job.BatchPredictionJob(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_batch_prediction_jobs(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type",
[
job_service.DeleteBatchPredictionJobRequest,
dict,
],
)
def test_delete_batch_prediction_job(request_type, transport: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_batch_prediction_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_batch_prediction_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.DeleteBatchPredictionJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_batch_prediction_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_batch_prediction_job), "__call__"
) as call:
client.delete_batch_prediction_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.DeleteBatchPredictionJobRequest()
@pytest.mark.asyncio
async def test_delete_batch_prediction_job_async(
transport: str = "grpc_asyncio",
request_type=job_service.DeleteBatchPredictionJobRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_batch_prediction_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_batch_prediction_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.DeleteBatchPredictionJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_batch_prediction_job_async_from_dict():
await test_delete_batch_prediction_job_async(request_type=dict)
def test_delete_batch_prediction_job_field_headers():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.DeleteBatchPredictionJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_batch_prediction_job), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_batch_prediction_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_batch_prediction_job_field_headers_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.DeleteBatchPredictionJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_batch_prediction_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_batch_prediction_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name/value",
) in kw["metadata"]
def test_delete_batch_prediction_job_flattened():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_batch_prediction_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_batch_prediction_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_batch_prediction_job_flattened_error():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_batch_prediction_job(
job_service.DeleteBatchPredictionJobRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_delete_batch_prediction_job_flattened_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_batch_prediction_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_batch_prediction_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_batch_prediction_job_flattened_error_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_batch_prediction_job(
job_service.DeleteBatchPredictionJobRequest(),
name="name_value",
)
@pytest.mark.parametrize(
"request_type",
[
job_service.CancelBatchPredictionJobRequest,
dict,
],
)
def test_cancel_batch_prediction_job(request_type, transport: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_batch_prediction_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.cancel_batch_prediction_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CancelBatchPredictionJobRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_cancel_batch_prediction_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_batch_prediction_job), "__call__"
) as call:
client.cancel_batch_prediction_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CancelBatchPredictionJobRequest()
@pytest.mark.asyncio
async def test_cancel_batch_prediction_job_async(
transport: str = "grpc_asyncio",
request_type=job_service.CancelBatchPredictionJobRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_batch_prediction_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.cancel_batch_prediction_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CancelBatchPredictionJobRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_cancel_batch_prediction_job_async_from_dict():
await test_cancel_batch_prediction_job_async(request_type=dict)
def test_cancel_batch_prediction_job_field_headers():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CancelBatchPredictionJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_batch_prediction_job), "__call__"
) as call:
call.return_value = None
client.cancel_batch_prediction_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_cancel_batch_prediction_job_field_headers_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CancelBatchPredictionJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_batch_prediction_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.cancel_batch_prediction_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name/value",
) in kw["metadata"]
def test_cancel_batch_prediction_job_flattened():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_batch_prediction_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.cancel_batch_prediction_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_cancel_batch_prediction_job_flattened_error():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.cancel_batch_prediction_job(
job_service.CancelBatchPredictionJobRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_cancel_batch_prediction_job_flattened_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_batch_prediction_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.cancel_batch_prediction_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_cancel_batch_prediction_job_flattened_error_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.cancel_batch_prediction_job(
job_service.CancelBatchPredictionJobRequest(),
name="name_value",
)
@pytest.mark.parametrize(
"request_type",
[
job_service.CreateModelDeploymentMonitoringJobRequest,
dict,
],
)
def test_create_model_deployment_monitoring_job(request_type, transport: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
name="name_value",
display_name="display_name_value",
endpoint="endpoint_value",
state=job_state.JobState.JOB_STATE_QUEUED,
schedule_state=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING,
predict_instance_schema_uri="predict_instance_schema_uri_value",
analysis_instance_schema_uri="analysis_instance_schema_uri_value",
enable_monitoring_pipeline_logs=True,
)
response = client.create_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CreateModelDeploymentMonitoringJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(
response, gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob
)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.endpoint == "endpoint_value"
assert response.state == job_state.JobState.JOB_STATE_QUEUED
assert (
response.schedule_state
== gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING
)
assert response.predict_instance_schema_uri == "predict_instance_schema_uri_value"
assert response.analysis_instance_schema_uri == "analysis_instance_schema_uri_value"
assert response.enable_monitoring_pipeline_logs is True
def test_create_model_deployment_monitoring_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_model_deployment_monitoring_job), "__call__"
) as call:
client.create_model_deployment_monitoring_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CreateModelDeploymentMonitoringJobRequest()
@pytest.mark.asyncio
async def test_create_model_deployment_monitoring_job_async(
transport: str = "grpc_asyncio",
request_type=job_service.CreateModelDeploymentMonitoringJobRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
name="name_value",
display_name="display_name_value",
endpoint="endpoint_value",
state=job_state.JobState.JOB_STATE_QUEUED,
schedule_state=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING,
predict_instance_schema_uri="predict_instance_schema_uri_value",
analysis_instance_schema_uri="analysis_instance_schema_uri_value",
enable_monitoring_pipeline_logs=True,
)
)
response = await client.create_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.CreateModelDeploymentMonitoringJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(
response, gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob
)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.endpoint == "endpoint_value"
assert response.state == job_state.JobState.JOB_STATE_QUEUED
assert (
response.schedule_state
== gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING
)
assert response.predict_instance_schema_uri == "predict_instance_schema_uri_value"
assert response.analysis_instance_schema_uri == "analysis_instance_schema_uri_value"
assert response.enable_monitoring_pipeline_logs is True
@pytest.mark.asyncio
async def test_create_model_deployment_monitoring_job_async_from_dict():
await test_create_model_deployment_monitoring_job_async(request_type=dict)
def test_create_model_deployment_monitoring_job_field_headers():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CreateModelDeploymentMonitoringJobRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_model_deployment_monitoring_job), "__call__"
) as call:
call.return_value = (
gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
)
client.create_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_model_deployment_monitoring_job_field_headers_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.CreateModelDeploymentMonitoringJobRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_model_deployment_monitoring_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
)
await client.create_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent/value",
) in kw["metadata"]
def test_create_model_deployment_monitoring_job_flattened():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = (
gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_model_deployment_monitoring_job(
parent="parent_value",
model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
name="name_value"
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].model_deployment_monitoring_job
mock_val = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
name="name_value"
)
assert arg == mock_val
def test_create_model_deployment_monitoring_job_flattened_error():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_model_deployment_monitoring_job(
job_service.CreateModelDeploymentMonitoringJobRequest(),
parent="parent_value",
model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
name="name_value"
),
)
@pytest.mark.asyncio
async def test_create_model_deployment_monitoring_job_flattened_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = (
gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
)
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_model_deployment_monitoring_job(
parent="parent_value",
model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
name="name_value"
),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].model_deployment_monitoring_job
mock_val = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
name="name_value"
)
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_model_deployment_monitoring_job_flattened_error_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_model_deployment_monitoring_job(
job_service.CreateModelDeploymentMonitoringJobRequest(),
parent="parent_value",
model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
name="name_value"
),
)
@pytest.mark.parametrize(
"request_type",
[
job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest,
dict,
],
)
def test_search_model_deployment_monitoring_stats_anomalies(
request_type, transport: str = "grpc"
):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_model_deployment_monitoring_stats_anomalies),
"__call__",
) as call:
# Designate an appropriate return value for the call.
call.return_value = (
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
next_page_token="next_page_token_value",
)
)
response = client.search_model_deployment_monitoring_stats_anomalies(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert (
args[0]
== job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest()
)
# Establish that the response is the type that we expect.
assert isinstance(
response, pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager
)
assert response.next_page_token == "next_page_token_value"
def test_search_model_deployment_monitoring_stats_anomalies_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_model_deployment_monitoring_stats_anomalies),
"__call__",
) as call:
client.search_model_deployment_monitoring_stats_anomalies()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert (
args[0]
== job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest()
)
@pytest.mark.asyncio
async def test_search_model_deployment_monitoring_stats_anomalies_async(
transport: str = "grpc_asyncio",
request_type=job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_model_deployment_monitoring_stats_anomalies),
"__call__",
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
next_page_token="next_page_token_value",
)
)
response = await client.search_model_deployment_monitoring_stats_anomalies(
request
)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert (
args[0]
== job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest()
)
# Establish that the response is the type that we expect.
assert isinstance(
response, pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager
)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_search_model_deployment_monitoring_stats_anomalies_async_from_dict():
await test_search_model_deployment_monitoring_stats_anomalies_async(
request_type=dict
)
def test_search_model_deployment_monitoring_stats_anomalies_field_headers():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest()
request.model_deployment_monitoring_job = "model_deployment_monitoring_job/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_model_deployment_monitoring_stats_anomalies),
"__call__",
) as call:
call.return_value = (
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse()
)
client.search_model_deployment_monitoring_stats_anomalies(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"model_deployment_monitoring_job=model_deployment_monitoring_job/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_search_model_deployment_monitoring_stats_anomalies_field_headers_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest()
request.model_deployment_monitoring_job = "model_deployment_monitoring_job/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_model_deployment_monitoring_stats_anomalies),
"__call__",
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse()
)
await client.search_model_deployment_monitoring_stats_anomalies(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"model_deployment_monitoring_job=model_deployment_monitoring_job/value",
) in kw["metadata"]
def test_search_model_deployment_monitoring_stats_anomalies_flattened():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_model_deployment_monitoring_stats_anomalies),
"__call__",
) as call:
# Designate an appropriate return value for the call.
call.return_value = (
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.search_model_deployment_monitoring_stats_anomalies(
model_deployment_monitoring_job="model_deployment_monitoring_job_value",
deployed_model_id="deployed_model_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].model_deployment_monitoring_job
mock_val = "model_deployment_monitoring_job_value"
assert arg == mock_val
arg = args[0].deployed_model_id
mock_val = "deployed_model_id_value"
assert arg == mock_val
def test_search_model_deployment_monitoring_stats_anomalies_flattened_error():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.search_model_deployment_monitoring_stats_anomalies(
job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(),
model_deployment_monitoring_job="model_deployment_monitoring_job_value",
deployed_model_id="deployed_model_id_value",
)
@pytest.mark.asyncio
async def test_search_model_deployment_monitoring_stats_anomalies_flattened_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_model_deployment_monitoring_stats_anomalies),
"__call__",
) as call:
# Designate an appropriate return value for the call.
call.return_value = (
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse()
)
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.search_model_deployment_monitoring_stats_anomalies(
model_deployment_monitoring_job="model_deployment_monitoring_job_value",
deployed_model_id="deployed_model_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].model_deployment_monitoring_job
mock_val = "model_deployment_monitoring_job_value"
assert arg == mock_val
arg = args[0].deployed_model_id
mock_val = "deployed_model_id_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_search_model_deployment_monitoring_stats_anomalies_flattened_error_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.search_model_deployment_monitoring_stats_anomalies(
job_service.SearchModelDeploymentMonitoringStatsAnomaliesRequest(),
model_deployment_monitoring_job="model_deployment_monitoring_job_value",
deployed_model_id="deployed_model_id_value",
)
def test_search_model_deployment_monitoring_stats_anomalies_pager(
transport_name: str = "grpc",
):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials,
transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_model_deployment_monitoring_stats_anomalies),
"__call__",
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
monitoring_stats=[
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
],
next_page_token="abc",
),
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
monitoring_stats=[],
next_page_token="def",
),
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
monitoring_stats=[
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
],
next_page_token="ghi",
),
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
monitoring_stats=[
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("model_deployment_monitoring_job", ""),)
),
)
pager = client.search_model_deployment_monitoring_stats_anomalies(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(
isinstance(
i, gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies
)
for i in results
)
def test_search_model_deployment_monitoring_stats_anomalies_pages(
transport_name: str = "grpc",
):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials,
transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_model_deployment_monitoring_stats_anomalies),
"__call__",
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
monitoring_stats=[
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
],
next_page_token="abc",
),
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
monitoring_stats=[],
next_page_token="def",
),
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
monitoring_stats=[
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
],
next_page_token="ghi",
),
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
monitoring_stats=[
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
],
),
RuntimeError,
)
pages = list(
client.search_model_deployment_monitoring_stats_anomalies(request={}).pages
)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_search_model_deployment_monitoring_stats_anomalies_async_pager():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_model_deployment_monitoring_stats_anomalies),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
monitoring_stats=[
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
],
next_page_token="abc",
),
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
monitoring_stats=[],
next_page_token="def",
),
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
monitoring_stats=[
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
],
next_page_token="ghi",
),
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
monitoring_stats=[
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
],
),
RuntimeError,
)
async_pager = await client.search_model_deployment_monitoring_stats_anomalies(
request={},
)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(
isinstance(
i, gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies
)
for i in responses
)
@pytest.mark.asyncio
async def test_search_model_deployment_monitoring_stats_anomalies_async_pages():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_model_deployment_monitoring_stats_anomalies),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
monitoring_stats=[
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
],
next_page_token="abc",
),
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
monitoring_stats=[],
next_page_token="def",
),
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
monitoring_stats=[
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
],
next_page_token="ghi",
),
job_service.SearchModelDeploymentMonitoringStatsAnomaliesResponse(
monitoring_stats=[
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
gca_model_deployment_monitoring_job.ModelMonitoringStatsAnomalies(),
],
),
RuntimeError,
)
pages = []
async for page_ in (
await client.search_model_deployment_monitoring_stats_anomalies(request={})
).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type",
[
job_service.GetModelDeploymentMonitoringJobRequest,
dict,
],
)
def test_get_model_deployment_monitoring_job(request_type, transport: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
name="name_value",
display_name="display_name_value",
endpoint="endpoint_value",
state=job_state.JobState.JOB_STATE_QUEUED,
schedule_state=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING,
predict_instance_schema_uri="predict_instance_schema_uri_value",
analysis_instance_schema_uri="analysis_instance_schema_uri_value",
enable_monitoring_pipeline_logs=True,
)
response = client.get_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.GetModelDeploymentMonitoringJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(
response, model_deployment_monitoring_job.ModelDeploymentMonitoringJob
)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.endpoint == "endpoint_value"
assert response.state == job_state.JobState.JOB_STATE_QUEUED
assert (
response.schedule_state
== model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING
)
assert response.predict_instance_schema_uri == "predict_instance_schema_uri_value"
assert response.analysis_instance_schema_uri == "analysis_instance_schema_uri_value"
assert response.enable_monitoring_pipeline_logs is True
def test_get_model_deployment_monitoring_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_model_deployment_monitoring_job), "__call__"
) as call:
client.get_model_deployment_monitoring_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.GetModelDeploymentMonitoringJobRequest()
@pytest.mark.asyncio
async def test_get_model_deployment_monitoring_job_async(
transport: str = "grpc_asyncio",
request_type=job_service.GetModelDeploymentMonitoringJobRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
name="name_value",
display_name="display_name_value",
endpoint="endpoint_value",
state=job_state.JobState.JOB_STATE_QUEUED,
schedule_state=model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING,
predict_instance_schema_uri="predict_instance_schema_uri_value",
analysis_instance_schema_uri="analysis_instance_schema_uri_value",
enable_monitoring_pipeline_logs=True,
)
)
response = await client.get_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.GetModelDeploymentMonitoringJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(
response, model_deployment_monitoring_job.ModelDeploymentMonitoringJob
)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.endpoint == "endpoint_value"
assert response.state == job_state.JobState.JOB_STATE_QUEUED
assert (
response.schedule_state
== model_deployment_monitoring_job.ModelDeploymentMonitoringJob.MonitoringScheduleState.PENDING
)
assert response.predict_instance_schema_uri == "predict_instance_schema_uri_value"
assert response.analysis_instance_schema_uri == "analysis_instance_schema_uri_value"
assert response.enable_monitoring_pipeline_logs is True
@pytest.mark.asyncio
async def test_get_model_deployment_monitoring_job_async_from_dict():
await test_get_model_deployment_monitoring_job_async(request_type=dict)
def test_get_model_deployment_monitoring_job_field_headers():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.GetModelDeploymentMonitoringJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_model_deployment_monitoring_job), "__call__"
) as call:
call.return_value = (
model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
)
client.get_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_model_deployment_monitoring_job_field_headers_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.GetModelDeploymentMonitoringJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_model_deployment_monitoring_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
)
await client.get_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name/value",
) in kw["metadata"]
def test_get_model_deployment_monitoring_job_flattened():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = (
model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_model_deployment_monitoring_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_model_deployment_monitoring_job_flattened_error():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_model_deployment_monitoring_job(
job_service.GetModelDeploymentMonitoringJobRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_get_model_deployment_monitoring_job_flattened_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = (
model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
)
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
model_deployment_monitoring_job.ModelDeploymentMonitoringJob()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_model_deployment_monitoring_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_model_deployment_monitoring_job_flattened_error_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_model_deployment_monitoring_job(
job_service.GetModelDeploymentMonitoringJobRequest(),
name="name_value",
)
@pytest.mark.parametrize(
"request_type",
[
job_service.ListModelDeploymentMonitoringJobsRequest,
dict,
],
)
def test_list_model_deployment_monitoring_jobs(request_type, transport: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_model_deployment_monitoring_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse(
next_page_token="next_page_token_value",
)
response = client.list_model_deployment_monitoring_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ListModelDeploymentMonitoringJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListModelDeploymentMonitoringJobsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_model_deployment_monitoring_jobs_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_model_deployment_monitoring_jobs), "__call__"
) as call:
client.list_model_deployment_monitoring_jobs()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ListModelDeploymentMonitoringJobsRequest()
@pytest.mark.asyncio
async def test_list_model_deployment_monitoring_jobs_async(
transport: str = "grpc_asyncio",
request_type=job_service.ListModelDeploymentMonitoringJobsRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_model_deployment_monitoring_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.ListModelDeploymentMonitoringJobsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_model_deployment_monitoring_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ListModelDeploymentMonitoringJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListModelDeploymentMonitoringJobsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_model_deployment_monitoring_jobs_async_from_dict():
await test_list_model_deployment_monitoring_jobs_async(request_type=dict)
def test_list_model_deployment_monitoring_jobs_field_headers():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.ListModelDeploymentMonitoringJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_model_deployment_monitoring_jobs), "__call__"
) as call:
call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse()
client.list_model_deployment_monitoring_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_model_deployment_monitoring_jobs_field_headers_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.ListModelDeploymentMonitoringJobsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_model_deployment_monitoring_jobs), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.ListModelDeploymentMonitoringJobsResponse()
)
await client.list_model_deployment_monitoring_jobs(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent/value",
) in kw["metadata"]
def test_list_model_deployment_monitoring_jobs_flattened():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_model_deployment_monitoring_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_model_deployment_monitoring_jobs(
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_model_deployment_monitoring_jobs_flattened_error():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_model_deployment_monitoring_jobs(
job_service.ListModelDeploymentMonitoringJobsRequest(),
parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_model_deployment_monitoring_jobs_flattened_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_model_deployment_monitoring_jobs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = job_service.ListModelDeploymentMonitoringJobsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
job_service.ListModelDeploymentMonitoringJobsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_model_deployment_monitoring_jobs(
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_model_deployment_monitoring_jobs_flattened_error_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_model_deployment_monitoring_jobs(
job_service.ListModelDeploymentMonitoringJobsRequest(),
parent="parent_value",
)
def test_list_model_deployment_monitoring_jobs_pager(transport_name: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials,
transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_model_deployment_monitoring_jobs), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListModelDeploymentMonitoringJobsResponse(
model_deployment_monitoring_jobs=[
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
],
next_page_token="abc",
),
job_service.ListModelDeploymentMonitoringJobsResponse(
model_deployment_monitoring_jobs=[],
next_page_token="def",
),
job_service.ListModelDeploymentMonitoringJobsResponse(
model_deployment_monitoring_jobs=[
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
],
next_page_token="ghi",
),
job_service.ListModelDeploymentMonitoringJobsResponse(
model_deployment_monitoring_jobs=[
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_model_deployment_monitoring_jobs(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(
isinstance(i, model_deployment_monitoring_job.ModelDeploymentMonitoringJob)
for i in results
)
def test_list_model_deployment_monitoring_jobs_pages(transport_name: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials,
transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_model_deployment_monitoring_jobs), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListModelDeploymentMonitoringJobsResponse(
model_deployment_monitoring_jobs=[
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
],
next_page_token="abc",
),
job_service.ListModelDeploymentMonitoringJobsResponse(
model_deployment_monitoring_jobs=[],
next_page_token="def",
),
job_service.ListModelDeploymentMonitoringJobsResponse(
model_deployment_monitoring_jobs=[
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
],
next_page_token="ghi",
),
job_service.ListModelDeploymentMonitoringJobsResponse(
model_deployment_monitoring_jobs=[
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
],
),
RuntimeError,
)
pages = list(client.list_model_deployment_monitoring_jobs(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_model_deployment_monitoring_jobs_async_pager():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_model_deployment_monitoring_jobs),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListModelDeploymentMonitoringJobsResponse(
model_deployment_monitoring_jobs=[
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
],
next_page_token="abc",
),
job_service.ListModelDeploymentMonitoringJobsResponse(
model_deployment_monitoring_jobs=[],
next_page_token="def",
),
job_service.ListModelDeploymentMonitoringJobsResponse(
model_deployment_monitoring_jobs=[
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
],
next_page_token="ghi",
),
job_service.ListModelDeploymentMonitoringJobsResponse(
model_deployment_monitoring_jobs=[
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
],
),
RuntimeError,
)
async_pager = await client.list_model_deployment_monitoring_jobs(
request={},
)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(
isinstance(i, model_deployment_monitoring_job.ModelDeploymentMonitoringJob)
for i in responses
)
@pytest.mark.asyncio
async def test_list_model_deployment_monitoring_jobs_async_pages():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_model_deployment_monitoring_jobs),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
job_service.ListModelDeploymentMonitoringJobsResponse(
model_deployment_monitoring_jobs=[
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
],
next_page_token="abc",
),
job_service.ListModelDeploymentMonitoringJobsResponse(
model_deployment_monitoring_jobs=[],
next_page_token="def",
),
job_service.ListModelDeploymentMonitoringJobsResponse(
model_deployment_monitoring_jobs=[
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
],
next_page_token="ghi",
),
job_service.ListModelDeploymentMonitoringJobsResponse(
model_deployment_monitoring_jobs=[
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
model_deployment_monitoring_job.ModelDeploymentMonitoringJob(),
],
),
RuntimeError,
)
pages = []
async for page_ in (
await client.list_model_deployment_monitoring_jobs(request={})
).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type",
[
job_service.UpdateModelDeploymentMonitoringJobRequest,
dict,
],
)
def test_update_model_deployment_monitoring_job(request_type, transport: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.update_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.UpdateModelDeploymentMonitoringJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_update_model_deployment_monitoring_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_model_deployment_monitoring_job), "__call__"
) as call:
client.update_model_deployment_monitoring_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.UpdateModelDeploymentMonitoringJobRequest()
@pytest.mark.asyncio
async def test_update_model_deployment_monitoring_job_async(
transport: str = "grpc_asyncio",
request_type=job_service.UpdateModelDeploymentMonitoringJobRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.update_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.UpdateModelDeploymentMonitoringJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_update_model_deployment_monitoring_job_async_from_dict():
await test_update_model_deployment_monitoring_job_async(request_type=dict)
def test_update_model_deployment_monitoring_job_field_headers():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.UpdateModelDeploymentMonitoringJobRequest()
request.model_deployment_monitoring_job.name = (
"model_deployment_monitoring_job.name/value"
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_model_deployment_monitoring_job), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.update_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"model_deployment_monitoring_job.name=model_deployment_monitoring_job.name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_model_deployment_monitoring_job_field_headers_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.UpdateModelDeploymentMonitoringJobRequest()
request.model_deployment_monitoring_job.name = (
"model_deployment_monitoring_job.name/value"
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_model_deployment_monitoring_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.update_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"model_deployment_monitoring_job.name=model_deployment_monitoring_job.name/value",
) in kw["metadata"]
def test_update_model_deployment_monitoring_job_flattened():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_model_deployment_monitoring_job(
model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].model_deployment_monitoring_job
mock_val = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
name="name_value"
)
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_model_deployment_monitoring_job_flattened_error():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_model_deployment_monitoring_job(
job_service.UpdateModelDeploymentMonitoringJobRequest(),
model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_model_deployment_monitoring_job_flattened_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_model_deployment_monitoring_job(
model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].model_deployment_monitoring_job
mock_val = gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
name="name_value"
)
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_model_deployment_monitoring_job_flattened_error_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_model_deployment_monitoring_job(
job_service.UpdateModelDeploymentMonitoringJobRequest(),
model_deployment_monitoring_job=gca_model_deployment_monitoring_job.ModelDeploymentMonitoringJob(
name="name_value"
),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize(
"request_type",
[
job_service.DeleteModelDeploymentMonitoringJobRequest,
dict,
],
)
def test_delete_model_deployment_monitoring_job(request_type, transport: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.DeleteModelDeploymentMonitoringJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_model_deployment_monitoring_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_model_deployment_monitoring_job), "__call__"
) as call:
client.delete_model_deployment_monitoring_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.DeleteModelDeploymentMonitoringJobRequest()
@pytest.mark.asyncio
async def test_delete_model_deployment_monitoring_job_async(
transport: str = "grpc_asyncio",
request_type=job_service.DeleteModelDeploymentMonitoringJobRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.DeleteModelDeploymentMonitoringJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_model_deployment_monitoring_job_async_from_dict():
await test_delete_model_deployment_monitoring_job_async(request_type=dict)
def test_delete_model_deployment_monitoring_job_field_headers():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.DeleteModelDeploymentMonitoringJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_model_deployment_monitoring_job), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_model_deployment_monitoring_job_field_headers_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.DeleteModelDeploymentMonitoringJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_model_deployment_monitoring_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name/value",
) in kw["metadata"]
def test_delete_model_deployment_monitoring_job_flattened():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_model_deployment_monitoring_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_model_deployment_monitoring_job_flattened_error():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_model_deployment_monitoring_job(
job_service.DeleteModelDeploymentMonitoringJobRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_delete_model_deployment_monitoring_job_flattened_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_model_deployment_monitoring_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_model_deployment_monitoring_job_flattened_error_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_model_deployment_monitoring_job(
job_service.DeleteModelDeploymentMonitoringJobRequest(),
name="name_value",
)
@pytest.mark.parametrize(
"request_type",
[
job_service.PauseModelDeploymentMonitoringJobRequest,
dict,
],
)
def test_pause_model_deployment_monitoring_job(request_type, transport: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.pause_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.pause_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.PauseModelDeploymentMonitoringJobRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_pause_model_deployment_monitoring_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.pause_model_deployment_monitoring_job), "__call__"
) as call:
client.pause_model_deployment_monitoring_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.PauseModelDeploymentMonitoringJobRequest()
@pytest.mark.asyncio
async def test_pause_model_deployment_monitoring_job_async(
transport: str = "grpc_asyncio",
request_type=job_service.PauseModelDeploymentMonitoringJobRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.pause_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.pause_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.PauseModelDeploymentMonitoringJobRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_pause_model_deployment_monitoring_job_async_from_dict():
await test_pause_model_deployment_monitoring_job_async(request_type=dict)
def test_pause_model_deployment_monitoring_job_field_headers():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.PauseModelDeploymentMonitoringJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.pause_model_deployment_monitoring_job), "__call__"
) as call:
call.return_value = None
client.pause_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_pause_model_deployment_monitoring_job_field_headers_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.PauseModelDeploymentMonitoringJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.pause_model_deployment_monitoring_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.pause_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name/value",
) in kw["metadata"]
def test_pause_model_deployment_monitoring_job_flattened():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.pause_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.pause_model_deployment_monitoring_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_pause_model_deployment_monitoring_job_flattened_error():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.pause_model_deployment_monitoring_job(
job_service.PauseModelDeploymentMonitoringJobRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_pause_model_deployment_monitoring_job_flattened_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.pause_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.pause_model_deployment_monitoring_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_pause_model_deployment_monitoring_job_flattened_error_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.pause_model_deployment_monitoring_job(
job_service.PauseModelDeploymentMonitoringJobRequest(),
name="name_value",
)
@pytest.mark.parametrize(
"request_type",
[
job_service.ResumeModelDeploymentMonitoringJobRequest,
dict,
],
)
def test_resume_model_deployment_monitoring_job(request_type, transport: str = "grpc"):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.resume_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.resume_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ResumeModelDeploymentMonitoringJobRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_resume_model_deployment_monitoring_job_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.resume_model_deployment_monitoring_job), "__call__"
) as call:
client.resume_model_deployment_monitoring_job()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ResumeModelDeploymentMonitoringJobRequest()
@pytest.mark.asyncio
async def test_resume_model_deployment_monitoring_job_async(
transport: str = "grpc_asyncio",
request_type=job_service.ResumeModelDeploymentMonitoringJobRequest,
):
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.resume_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.resume_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == job_service.ResumeModelDeploymentMonitoringJobRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_resume_model_deployment_monitoring_job_async_from_dict():
await test_resume_model_deployment_monitoring_job_async(request_type=dict)
def test_resume_model_deployment_monitoring_job_field_headers():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.ResumeModelDeploymentMonitoringJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.resume_model_deployment_monitoring_job), "__call__"
) as call:
call.return_value = None
client.resume_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name/value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_resume_model_deployment_monitoring_job_field_headers_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = job_service.ResumeModelDeploymentMonitoringJobRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.resume_model_deployment_monitoring_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.resume_model_deployment_monitoring_job(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name/value",
) in kw["metadata"]
def test_resume_model_deployment_monitoring_job_flattened():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.resume_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.resume_model_deployment_monitoring_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_resume_model_deployment_monitoring_job_flattened_error():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.resume_model_deployment_monitoring_job(
job_service.ResumeModelDeploymentMonitoringJobRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_resume_model_deployment_monitoring_job_flattened_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.resume_model_deployment_monitoring_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.resume_model_deployment_monitoring_job(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_resume_model_deployment_monitoring_job_flattened_error_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.resume_model_deployment_monitoring_job(
job_service.ResumeModelDeploymentMonitoringJobRequest(),
name="name_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.JobServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.JobServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = JobServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.JobServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = JobServiceClient(
client_options=options,
transport=transport,
)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = JobServiceClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.JobServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = JobServiceClient(
client_options={"scopes": ["1", "2"]},
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.JobServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = JobServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.JobServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.JobServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.JobServiceGrpcTransport,
transports.JobServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.JobServiceGrpcTransport,
)
def test_job_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.JobServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_job_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.aiplatform_v1.services.job_service.transports.JobServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.JobServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"create_custom_job",
"get_custom_job",
"list_custom_jobs",
"delete_custom_job",
"cancel_custom_job",
"create_data_labeling_job",
"get_data_labeling_job",
"list_data_labeling_jobs",
"delete_data_labeling_job",
"cancel_data_labeling_job",
"create_hyperparameter_tuning_job",
"get_hyperparameter_tuning_job",
"list_hyperparameter_tuning_jobs",
"delete_hyperparameter_tuning_job",
"cancel_hyperparameter_tuning_job",
"create_batch_prediction_job",
"get_batch_prediction_job",
"list_batch_prediction_jobs",
"delete_batch_prediction_job",
"cancel_batch_prediction_job",
"create_model_deployment_monitoring_job",
"search_model_deployment_monitoring_stats_anomalies",
"get_model_deployment_monitoring_job",
"list_model_deployment_monitoring_jobs",
"update_model_deployment_monitoring_job",
"delete_model_deployment_monitoring_job",
"pause_model_deployment_monitoring_job",
"resume_model_deployment_monitoring_job",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_job_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.aiplatform_v1.services.job_service.transports.JobServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.JobServiceTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_job_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.aiplatform_v1.services.job_service.transports.JobServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.JobServiceTransport()
adc.assert_called_once()
def test_job_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
JobServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.JobServiceGrpcTransport,
transports.JobServiceGrpcAsyncIOTransport,
],
)
def test_job_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.JobServiceGrpcTransport, grpc_helpers),
(transports.JobServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_job_service_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"aiplatform.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="aiplatform.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport],
)
def test_job_service_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_job_service_host_no_port():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="aiplatform.googleapis.com"
),
)
assert client.transport._host == "aiplatform.googleapis.com:443"
def test_job_service_host_with_port():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="aiplatform.googleapis.com:8000"
),
)
assert client.transport._host == "aiplatform.googleapis.com:8000"
def test_job_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.JobServiceGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_job_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.JobServiceGrpcAsyncIOTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport],
)
def test_job_service_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.JobServiceGrpcTransport, transports.JobServiceGrpcAsyncIOTransport],
)
def test_job_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_job_service_grpc_lro_client():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(
transport.operations_client,
operations_v1.OperationsClient,
)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_job_service_grpc_lro_async_client():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(
transport.operations_client,
operations_v1.OperationsAsyncClient,
)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_batch_prediction_job_path():
project = "squid"
location = "clam"
batch_prediction_job = "whelk"
expected = "projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}".format(
project=project,
location=location,
batch_prediction_job=batch_prediction_job,
)
actual = JobServiceClient.batch_prediction_job_path(
project, location, batch_prediction_job
)
assert expected == actual
def test_parse_batch_prediction_job_path():
expected = {
"project": "octopus",
"location": "oyster",
"batch_prediction_job": "nudibranch",
}
path = JobServiceClient.batch_prediction_job_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_batch_prediction_job_path(path)
assert expected == actual
def test_custom_job_path():
project = "cuttlefish"
location = "mussel"
custom_job = "winkle"
expected = "projects/{project}/locations/{location}/customJobs/{custom_job}".format(
project=project,
location=location,
custom_job=custom_job,
)
actual = JobServiceClient.custom_job_path(project, location, custom_job)
assert expected == actual
def test_parse_custom_job_path():
expected = {
"project": "nautilus",
"location": "scallop",
"custom_job": "abalone",
}
path = JobServiceClient.custom_job_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_custom_job_path(path)
assert expected == actual
def test_data_labeling_job_path():
project = "squid"
location = "clam"
data_labeling_job = "whelk"
expected = "projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}".format(
project=project,
location=location,
data_labeling_job=data_labeling_job,
)
actual = JobServiceClient.data_labeling_job_path(
project, location, data_labeling_job
)
assert expected == actual
def test_parse_data_labeling_job_path():
expected = {
"project": "octopus",
"location": "oyster",
"data_labeling_job": "nudibranch",
}
path = JobServiceClient.data_labeling_job_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_data_labeling_job_path(path)
assert expected == actual
def test_dataset_path():
project = "cuttlefish"
location = "mussel"
dataset = "winkle"
expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(
project=project,
location=location,
dataset=dataset,
)
actual = JobServiceClient.dataset_path(project, location, dataset)
assert expected == actual
def test_parse_dataset_path():
expected = {
"project": "nautilus",
"location": "scallop",
"dataset": "abalone",
}
path = JobServiceClient.dataset_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_dataset_path(path)
assert expected == actual
def test_endpoint_path():
project = "squid"
location = "clam"
endpoint = "whelk"
expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(
project=project,
location=location,
endpoint=endpoint,
)
actual = JobServiceClient.endpoint_path(project, location, endpoint)
assert expected == actual
def test_parse_endpoint_path():
expected = {
"project": "octopus",
"location": "oyster",
"endpoint": "nudibranch",
}
path = JobServiceClient.endpoint_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_endpoint_path(path)
assert expected == actual
def test_hyperparameter_tuning_job_path():
project = "cuttlefish"
location = "mussel"
hyperparameter_tuning_job = "winkle"
expected = "projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}".format(
project=project,
location=location,
hyperparameter_tuning_job=hyperparameter_tuning_job,
)
actual = JobServiceClient.hyperparameter_tuning_job_path(
project, location, hyperparameter_tuning_job
)
assert expected == actual
def test_parse_hyperparameter_tuning_job_path():
expected = {
"project": "nautilus",
"location": "scallop",
"hyperparameter_tuning_job": "abalone",
}
path = JobServiceClient.hyperparameter_tuning_job_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_hyperparameter_tuning_job_path(path)
assert expected == actual
def test_model_path():
project = "squid"
location = "clam"
model = "whelk"
expected = "projects/{project}/locations/{location}/models/{model}".format(
project=project,
location=location,
model=model,
)
actual = JobServiceClient.model_path(project, location, model)
assert expected == actual
def test_parse_model_path():
expected = {
"project": "octopus",
"location": "oyster",
"model": "nudibranch",
}
path = JobServiceClient.model_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_model_path(path)
assert expected == actual
def test_model_deployment_monitoring_job_path():
project = "cuttlefish"
location = "mussel"
model_deployment_monitoring_job = "winkle"
expected = "projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}".format(
project=project,
location=location,
model_deployment_monitoring_job=model_deployment_monitoring_job,
)
actual = JobServiceClient.model_deployment_monitoring_job_path(
project, location, model_deployment_monitoring_job
)
assert expected == actual
def test_parse_model_deployment_monitoring_job_path():
expected = {
"project": "nautilus",
"location": "scallop",
"model_deployment_monitoring_job": "abalone",
}
path = JobServiceClient.model_deployment_monitoring_job_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_model_deployment_monitoring_job_path(path)
assert expected == actual
def test_network_path():
project = "squid"
network = "clam"
expected = "projects/{project}/global/networks/{network}".format(
project=project,
network=network,
)
actual = JobServiceClient.network_path(project, network)
assert expected == actual
def test_parse_network_path():
expected = {
"project": "whelk",
"network": "octopus",
}
path = JobServiceClient.network_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_network_path(path)
assert expected == actual
def test_tensorboard_path():
project = "oyster"
location = "nudibranch"
tensorboard = "cuttlefish"
expected = (
"projects/{project}/locations/{location}/tensorboards/{tensorboard}".format(
project=project,
location=location,
tensorboard=tensorboard,
)
)
actual = JobServiceClient.tensorboard_path(project, location, tensorboard)
assert expected == actual
def test_parse_tensorboard_path():
expected = {
"project": "mussel",
"location": "winkle",
"tensorboard": "nautilus",
}
path = JobServiceClient.tensorboard_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_tensorboard_path(path)
assert expected == actual
def test_trial_path():
project = "scallop"
location = "abalone"
study = "squid"
trial = "clam"
expected = (
"projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format(
project=project,
location=location,
study=study,
trial=trial,
)
)
actual = JobServiceClient.trial_path(project, location, study, trial)
assert expected == actual
def test_parse_trial_path():
expected = {
"project": "whelk",
"location": "octopus",
"study": "oyster",
"trial": "nudibranch",
}
path = JobServiceClient.trial_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_trial_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "cuttlefish"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = JobServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "mussel",
}
path = JobServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "winkle"
expected = "folders/{folder}".format(
folder=folder,
)
actual = JobServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nautilus",
}
path = JobServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "scallop"
expected = "organizations/{organization}".format(
organization=organization,
)
actual = JobServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "abalone",
}
path = JobServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "squid"
expected = "projects/{project}".format(
project=project,
)
actual = JobServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "clam",
}
path = JobServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "whelk"
location = "octopus"
expected = "projects/{project}/locations/{location}".format(
project=project,
location=location,
)
actual = JobServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "oyster",
"location": "nudibranch",
}
path = JobServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = JobServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.JobServiceTransport, "_prep_wrapped_messages"
) as prep:
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.JobServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = JobServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = JobServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = JobServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(JobServiceClient, transports.JobServiceGrpcTransport),
(JobServiceAsyncClient, transports.JobServiceGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
alert.go
|
package main
import (
"bytes"
"context"
"fmt"
"github.com/antonholmquist/jason"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-lambda-go/lambda"
"net/http"
"os"
)
type Slack struct {
WebhookURL string
Channel string
Name string
Message string
}
func send(s Slack) {
jsonStr := `{"channel":"` + s.Channel +
`","username":"` + s.Name +
`","text":"` + s.Message + `"}`
req, err := http.NewRequest(
"POST",
s.WebhookURL,
bytes.NewBuffer([]byte(jsonStr)),
)
if err != nil {
fmt.Print(err)
}
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
fmt.Print(err)
}
fmt.Print(resp)
defer resp.Body.Close()
}
func createMessage(message string) string {
json, err := jason.NewObjectFromBytes([]byte(message))
if err != nil {
panic(err)
}
text := "<!channel>\n```\n"
for k, v := range json.Map() {
s, sErr := v.String()
if sErr == nil {
text += fmt.Sprintf("%s\t:%s\n", k, s)
} else {
text += fmt.Sprintf("%s\t:%s\n", k, sErr)
}
}
text += "```"
return text
}
// Send Slack notification from SNS event.
func slackNotice(ctx context.Context, snsEvent events.SNSEvent) {
fmt.Printf("events %s \n", snsEvent.Records)
for _, record := range snsEvent.Records {
snsRecord := record.SNS
fmt.Printf("[%s %s] Message = %s \n", record.EventSource, snsRecord.Timestamp, snsRecord.Message)
var s = Slack{os.Getenv("SLACK_WEBHOOK_URL"),
os.Getenv("SLACK_CHANNEL"),
os.Getenv("SLACK_NAME"),
createMessage(snsRecord.Message)}
send(s)
}
}
func main() {
lambda.Start(slackNotice)
}
|
[
"\"SLACK_WEBHOOK_URL\"",
"\"SLACK_CHANNEL\"",
"\"SLACK_NAME\""
] |
[] |
[
"SLACK_WEBHOOK_URL",
"SLACK_NAME",
"SLACK_CHANNEL"
] |
[]
|
["SLACK_WEBHOOK_URL", "SLACK_NAME", "SLACK_CHANNEL"]
|
go
| 3 | 0 | |
cmd/minikube/cmd/root.go
|
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
goflag "flag"
"fmt"
"os"
"runtime"
"strings"
"github.com/docker/machine/libmachine"
"github.com/golang/glog"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
"k8s.io/kubectl/pkg/util/templates"
configCmd "k8s.io/minikube/cmd/minikube/cmd/config"
"k8s.io/minikube/pkg/minikube/bootstrapper"
"k8s.io/minikube/pkg/minikube/bootstrapper/kubeadm"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/localpath"
"k8s.io/minikube/pkg/minikube/translate"
)
var dirs = [...]string{
localpath.MiniPath(),
localpath.MakeMiniPath("certs"),
localpath.MakeMiniPath("machines"),
localpath.MakeMiniPath("cache"),
localpath.MakeMiniPath("cache", "iso"),
localpath.MakeMiniPath("config"),
localpath.MakeMiniPath("addons"),
localpath.MakeMiniPath("files"),
localpath.MakeMiniPath("logs"),
}
var viperWhiteList = []string{
"v",
"alsologtostderr",
"log_dir",
}
// RootCmd represents the base command when called without any subcommands
var RootCmd = &cobra.Command{
Use: "minikube",
Short: "Minikube is a tool for managing local Kubernetes clusters.",
Long: `Minikube is a CLI tool that provisions and manages single-node Kubernetes clusters optimized for development workflows.`,
PersistentPreRun: func(cmd *cobra.Command, args []string) {
for _, path := range dirs {
if err := os.MkdirAll(path, 0777); err != nil {
exit.WithError("Error creating minikube directory", err)
}
}
logDir := pflag.Lookup("log_dir")
if !logDir.Changed {
if err := logDir.Value.Set(localpath.MakeMiniPath("logs")); err != nil {
exit.WithError("logdir set failed", err)
}
}
},
}
// Execute adds all child commands to the root command sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() {
for _, c := range RootCmd.Commands() {
c.Short = translate.T(c.Short)
c.Long = translate.T(c.Long)
c.Flags().VisitAll(func(flag *pflag.Flag) {
flag.Usage = translate.T(flag.Usage)
})
c.SetUsageTemplate(usageTemplate())
}
RootCmd.Short = translate.T(RootCmd.Short)
RootCmd.Long = translate.T(RootCmd.Long)
RootCmd.Flags().VisitAll(func(flag *pflag.Flag) {
flag.Usage = translate.T(flag.Usage)
})
if runtime.GOOS != "windows" {
// add minikube binaries to the path
targetDir := localpath.MakeMiniPath("bin")
addToPath(targetDir)
}
if err := RootCmd.Execute(); err != nil {
// Cobra already outputs the error, typically because the user provided an unknown command.
os.Exit(exit.BadUsage)
}
}
// usageTemplate just calls translate.T on the default usage template
// explicitly using the raw string instead of calling c.UsageTemplate()
// so the extractor can find this monstrosity of a string
func usageTemplate() string {
return fmt.Sprintf(`%s:{{if .Runnable}}
{{.UseLine}}{{end}}{{if .HasAvailableSubCommands}}
{{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}}
%s:
{{.NameAndAliases}}{{end}}{{if .HasExample}}
%s:
{{.Example}}{{end}}{{if .HasAvailableSubCommands}}
%s:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
{{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}}
%s:
{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}}
%s:
{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}}
%s:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}}
{{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}}
%s{{end}}
`, translate.T("Usage"), translate.T("Aliases"), translate.T("Examples"), translate.T("Available Commands"), translate.T("Flags"), translate.T("Global Flags"), translate.T("Additional help topics"), translate.T(`Use "{{.CommandPath}} [command] --help" for more information about a command.`))
}
// Handle config values for flags used in external packages (e.g. glog)
// by setting them directly, using values from viper when not passed in as args
func setFlagsUsingViper() {
for _, config := range viperWhiteList {
var a = pflag.Lookup(config)
viper.SetDefault(a.Name, a.DefValue)
// If the flag is set, override viper value
if a.Changed {
viper.Set(a.Name, a.Value.String())
}
// Viper will give precedence first to calls to the Set command,
// then to values from the config.yml
if err := a.Value.Set(viper.GetString(a.Name)); err != nil {
exit.WithError(fmt.Sprintf("failed to set value for %q", a.Name), err)
}
a.Changed = true
}
}
func init() {
translate.DetermineLocale()
RootCmd.PersistentFlags().StringP(config.MachineProfile, "p", constants.DefaultMachineName, `The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently.`)
RootCmd.PersistentFlags().StringP(configCmd.Bootstrapper, "b", "kubeadm", "The name of the cluster bootstrapper that will set up the kubernetes cluster.")
groups := templates.CommandGroups{
{
Message: translate.T("Basic Commands:"),
Commands: []*cobra.Command{
startCmd,
statusCmd,
stopCmd,
deleteCmd,
dashboardCmd,
},
},
{
Message: translate.T("Images Commands:"),
Commands: []*cobra.Command{
dockerEnvCmd,
cacheCmd,
},
},
{
Message: translate.T("Configuration and Management Commands:"),
Commands: []*cobra.Command{
configCmd.AddonsCmd,
configCmd.ConfigCmd,
configCmd.ProfileCmd,
updateContextCmd,
},
},
{
Message: translate.T("Networking and Connectivity Commands:"),
Commands: []*cobra.Command{
serviceCmd,
tunnelCmd,
},
},
{
Message: translate.T("Advanced Commands:"),
Commands: []*cobra.Command{
mountCmd,
sshCmd,
kubectlCmd,
},
},
{
Message: translate.T("Troubleshooting Commands:"),
Commands: []*cobra.Command{
sshKeyCmd,
ipCmd,
logsCmd,
updateCheckCmd,
versionCmd,
},
},
}
groups.Add(RootCmd)
// Ungrouped commands will show up in the "Other Commands" section
RootCmd.AddCommand(completionCmd)
templates.ActsAsRootCommand(RootCmd, []string{"options"}, groups...)
pflag.CommandLine.AddGoFlagSet(goflag.CommandLine)
if err := viper.BindPFlags(RootCmd.PersistentFlags()); err != nil {
exit.WithError("Unable to bind flags", err)
}
cobra.OnInitialize(initConfig)
}
// initConfig reads in config file and ENV variables if set.
func initConfig() {
configPath := localpath.ConfigFile
viper.SetConfigFile(configPath)
viper.SetConfigType("json")
if err := viper.ReadInConfig(); err != nil {
// This config file is optional, so don't emit errors if missing
if _, ok := err.(viper.ConfigFileNotFoundError); !ok {
glog.Warningf("Error reading config file at %s: %v", configPath, err)
}
}
setupViper()
}
func setupViper() {
viper.SetEnvPrefix(minikubeEnvPrefix)
// Replaces '-' in flags with '_' in env variables
// e.g. iso-url => $ENVPREFIX_ISO_URL
viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
viper.AutomaticEnv()
viper.SetDefault(config.WantUpdateNotification, true)
viper.SetDefault(config.ReminderWaitPeriodInHours, 24)
viper.SetDefault(config.WantReportError, false)
viper.SetDefault(config.WantReportErrorPrompt, true)
viper.SetDefault(config.WantKubectlDownloadMsg, true)
viper.SetDefault(config.WantNoneDriverWarning, true)
viper.SetDefault(config.ShowDriverDeprecationNotification, true)
viper.SetDefault(config.ShowBootstrapperDeprecationNotification, true)
setFlagsUsingViper()
}
// getClusterBootstrapper returns a new bootstrapper for the cluster
func getClusterBootstrapper(api libmachine.API, bootstrapperName string) (bootstrapper.Bootstrapper, error) {
var b bootstrapper.Bootstrapper
var err error
switch bootstrapperName {
case bootstrapper.BootstrapperTypeKubeadm:
b, err = kubeadm.NewKubeadmBootstrapper(api)
if err != nil {
return nil, errors.Wrap(err, "getting kubeadm bootstrapper")
}
default:
return nil, fmt.Errorf("unknown bootstrapper: %s", bootstrapperName)
}
return b, nil
}
func addToPath(dir string) {
new := fmt.Sprintf("%s:%s", dir, os.Getenv("PATH"))
glog.Infof("Updating PATH: %s", dir)
os.Setenv("PATH", new)
}
|
[
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
go
| 1 | 0 | |
example-rethinkdb/example-rethinkdb.go
|
package main
import (
"crypto/tls"
"crypto/x509"
"encoding/json"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
rethink "gopkg.in/gorethink/gorethink.v3"
)
// This is a type to hold our word definitions in
type item struct {
ID string `gorethink:"id,omitempty" json:"_id,omitempty"`
Word string `gorethink:"word" json:"word"`
Definition string `gorethink:"definition" json:"definition"`
}
var session *rethink.Session
func wordHandler(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case "GET":
results, err := rethink.DB("examples").Table("words").Run(session)
var items []*item
err = results.All(&items)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
jsonstr, err := json.Marshal(items)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.Write(jsonstr)
return
case "PUT":
r.ParseForm()
newitem := item{Word: r.Form.Get("word"), Definition: r.Form.Get("definition")}
err := rethink.DB("examples").Table("words").Insert(newitem).Exec(session)
if err != nil {
log.Println(err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusAccepted)
return
}
return
}
func main() {
// Connect to database:
// Connection string in $COMPOSE_RETHINKDB_URL
// Compse database certificate in $PATH_TO_RETHINKDB_CERT
roots := x509.NewCertPool()
cert, err := ioutil.ReadFile(os.Getenv("PATH_TO_RETHINKDB_CERT"))
if err != nil {
log.Fatal(err)
}
roots.AppendCertsFromPEM(cert)
rethinkurl, err := url.Parse(os.Getenv("COMPOSE_RETHINKDB_URL"))
if err != nil {
log.Fatal(err)
}
password, setpass := rethinkurl.User.Password()
if !setpass {
log.Fatal("Password needs to be set in $COMPOSE_RETHINKDB_URL")
}
session, err = rethink.Connect(rethink.ConnectOpts{
Address: rethinkurl.Host,
Username: rethinkurl.User.Username(),
Password: password,
TLSConfig: &tls.Config{
RootCAs: roots,
},
})
if err != nil {
log.Fatalln(err)
}
rethink.DBCreate("examples").Exec(session)
rethink.DB("examples").TableCreate("words", rethink.TableCreateOpts{Replicas: 3}).Exec(session)
defer session.Close()
fs := http.FileServer(http.Dir("public"))
http.Handle("/", fs)
http.HandleFunc("/words", wordHandler)
http.ListenAndServe(":8080", nil)
}
|
[
"\"PATH_TO_RETHINKDB_CERT\"",
"\"COMPOSE_RETHINKDB_URL\""
] |
[] |
[
"COMPOSE_RETHINKDB_URL",
"PATH_TO_RETHINKDB_CERT"
] |
[]
|
["COMPOSE_RETHINKDB_URL", "PATH_TO_RETHINKDB_CERT"]
|
go
| 2 | 0 | |
registry/docker.go
|
package registry
import (
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"strings"
c "github.com/docker/docker-credential-helpers/client"
"github.com/michaelperel/docker-lock/registry/internal/docker"
)
// DockerWrapper is a registry wrapper for Docker Hub. It supports public
// and private repositories.
type DockerWrapper struct {
ConfigFile string
Client *HTTPClient
authCreds *dockerAuthCredentials
}
type dockerAuthCredentials struct {
username string
password string
}
// NewDockerWrapper creates a DockerWrapper from docker's config.json.
func NewDockerWrapper(
configPath string,
client *HTTPClient,
) (*DockerWrapper, error) {
if client == nil {
client = &HTTPClient{
Client: &http.Client{},
BaseDigestURL: "https://registry-1.docker.io/v2",
BaseTokenURL: "https://auth.docker.io/token",
}
}
w := &DockerWrapper{ConfigFile: configPath, Client: client}
authCreds, err := w.getAuthCredentials()
if err != nil {
return nil, err
}
w.authCreds = authCreds
return w, nil
}
// GetDigest gets the digest from a name and tag. The workflow for
// authenticating with private repositories:
// (1) if "DOCKER_USERNAME" and "DOCKER_PASSWORD" are set, use them.
// (2) Otherwise, try to get credentials from docker's config file. This method
// requires the user to have logged in with the 'docker login' command
// beforehand.
func (w *DockerWrapper) GetDigest(name string, tag string) (string, error) {
// Docker-Content-Digest is the root of the hash chain
// https://github.com/docker/distribution/issues/1662
var names []string
if strings.Contains(name, "/") {
names = []string{name, "library/" + name}
} else {
names = []string{"library/" + name, name}
}
for _, name := range names {
token, err := w.getToken(name)
if err != nil {
return "", err
}
url := fmt.Sprintf(
"%s/%s/manifests/%s", w.Client.BaseDigestURL, name, tag,
)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return "", err
}
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token))
req.Header.Add(
"Accept", "application/vnd.docker.distribution.manifest.v2+json",
)
resp, err := w.Client.Client.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
digest := resp.Header.Get("Docker-Content-Digest")
if digest != "" {
return strings.TrimPrefix(digest, "sha256:"), nil
}
}
return "", errors.New("no digest found")
}
func (w *DockerWrapper) getToken(name string) (string, error) {
url := fmt.Sprintf(
"%s?scope=repository:%s:pull&service=registry.docker.io",
w.Client.BaseTokenURL,
name,
)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return "", err
}
if w.authCreds.username != "" && w.authCreds.password != "" {
req.SetBasicAuth(w.authCreds.username, w.authCreds.password)
}
resp, err := w.Client.Client.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
decoder := json.NewDecoder(resp.Body)
var t docker.TokenResponse
if err = decoder.Decode(&t); err != nil {
return "", err
}
return t.Token, nil
}
func (w *DockerWrapper) getAuthCredentials() (*dockerAuthCredentials, error) {
var (
username = os.Getenv("DOCKER_USERNAME")
password = os.Getenv("DOCKER_PASSWORD")
)
if username != "" && password != "" {
return &dockerAuthCredentials{
username: username,
password: password,
}, nil
}
if w.ConfigFile == "" {
return &dockerAuthCredentials{}, nil
}
confByt, err := ioutil.ReadFile(w.ConfigFile)
if err != nil {
return nil, err
}
var conf docker.Config
if err = json.Unmarshal(confByt, &conf); err != nil {
return nil, err
}
authByt, err := base64.StdEncoding.DecodeString(conf.Auths.Index.Auth)
if err != nil {
return nil, err
}
authString := string(authByt)
if authString != "" {
auth := strings.Split(authString, ":")
return &dockerAuthCredentials{username: auth[0], password: auth[1]}, nil
} else if conf.CredsStore != "" {
authCreds, err := w.getAuthCredentialsFromCredsStore(conf.CredsStore)
if err != nil {
return &dockerAuthCredentials{}, nil
}
return authCreds, nil
}
return &dockerAuthCredentials{}, nil
}
func (w *DockerWrapper) getAuthCredentialsFromCredsStore(
credsStore string,
) (authCreds *dockerAuthCredentials, err error) {
credsStore = fmt.Sprintf("%s-%s", "docker-credential", credsStore)
defer func() {
if err := recover(); err != nil {
authCreds = &dockerAuthCredentials{}
return
}
}()
p := c.NewShellProgramFunc(credsStore)
credResponse, err := c.Get(p, "https://index.docker.io/v1/")
if err != nil {
return authCreds, err
}
return &dockerAuthCredentials{
username: credResponse.Username,
password: credResponse.Secret,
}, nil
}
// Prefix returns an empty string since images on Docker Hub do not use a
// prefix, unlike third party registries.
func (w *DockerWrapper) Prefix() string {
return ""
}
|
[
"\"DOCKER_USERNAME\"",
"\"DOCKER_PASSWORD\""
] |
[] |
[
"DOCKER_PASSWORD",
"DOCKER_USERNAME"
] |
[]
|
["DOCKER_PASSWORD", "DOCKER_USERNAME"]
|
go
| 2 | 0 | |
test/helper_test.go
|
package test
import (
"context"
"encoding/json"
"fmt"
"math/rand"
"os"
"reflect"
"strconv"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/chyroc/lark"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
func randInt64() int64 {
return rand.Int63()
}
func randInt64String() string {
return strconv.FormatInt(randInt64(), 10)
}
func mockGetTenantAccessTokenFailed(ctx context.Context) (*lark.TokenExpire, *lark.Response, error) {
return nil, nil, fmt.Errorf("failed")
}
func printData(datas ...interface{}) {
for _, v := range datas {
printDataSingle(v)
}
}
func printDataSingle(v interface{}) {
vt := reflect.TypeOf(v)
if vt != nil {
if vt.Kind() == reflect.Ptr {
vt = vt.Elem()
}
fmt.Printf(vt.Name() + "#")
}
if v == nil {
fmt.Println("nil")
return
}
switch v := v.(type) {
case int:
fmt.Println(v)
case int8:
fmt.Println(v)
case int16:
fmt.Println(v)
case int32:
fmt.Println(v)
case int64:
fmt.Println(v)
case uint:
fmt.Println(v)
case uint8:
fmt.Println(v)
case uint16:
fmt.Println(v)
case uint32:
fmt.Println(v)
case uint64:
fmt.Println(v)
case bool:
fmt.Println(v)
case error:
fmt.Println(v)
default:
vv, _ := json.Marshal(v)
fmt.Println(string(vv))
}
}
func Test_Helper(t *testing.T) {
as := assert.New(t)
t.Run("GetErrorCode", func(t *testing.T) {
as.Equal(int64(-1), lark.GetErrorCode(fmt.Errorf("x")))
})
t.Run("UnwrapMessageContent", func(t *testing.T) {
t.Run("image", func(t *testing.T) {
_, err := lark.UnwrapMessageContent(lark.MsgTypeInteractive, `{"image_key":"image-x"}`)
as.NotNil(err)
as.Contains(err.Error(), "unknown message type")
})
t.Run("image", func(t *testing.T) {
_, err := lark.UnwrapMessageContent(lark.MsgTypeText, "")
as.NotNil(err)
as.Contains(err.Error(), "invalid content")
})
t.Run("text", func(t *testing.T) {
res, err := lark.UnwrapMessageContent(lark.MsgTypeText, `{"text":"hi"}`)
as.Nil(err)
as.Equal("hi", res.Text.Text)
})
t.Run("image", func(t *testing.T) {
res, err := lark.UnwrapMessageContent(lark.MsgTypeImage, `{"image_key":"image-x"}`)
as.Nil(err)
as.Equal("image-x", res.Image.ImageKey)
})
})
t.Run("", func(t *testing.T) {
printData(nil)
printData("hi")
printData(1)
printData(false)
printData(lark.MsgTypeText)
printData(lark.SendRawMessageReq{Content: "x"})
var x *lark.SendRawMessageReq = nil
printData(x)
printData(lark.SendRawMessageReq{Content: "x"}, x)
})
}
func IsNotInCI() bool {
isNotInCI := os.Getenv("IN_CI") == ""
if isNotInCI {
fmt.Println("NOT IN CI, SKIP")
}
return isNotInCI
}
func IsInCI() bool {
return os.Getenv("IN_CI") != ""
}
|
[
"\"IN_CI\"",
"\"IN_CI\""
] |
[] |
[
"IN_CI"
] |
[]
|
["IN_CI"]
|
go
| 1 | 0 | |
ida_plugin/uefi_analyser/guids/edk_guids.py
|
# Trusted GUIDs list from https://github.com/snare/ida-efiutils/blob/master/efiguids.py
edk_guids = {
'ACPI_TABLE_GUID' : [0xeb9d2d30, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'APPLE_REMOVABLE_MEDIA_PROTOCOL_GUID' : [0x2ea9743a, 0x23d9, 0x425e, 0x87, 0x2c, 0xf6, 0x15, 0xaa, 0x19, 0x57, 0x88],
'ARM_GLOBAL_VARIABLE_PPI_GUID' : [0xab1c1816, 0xd542, 0x4e6f, 0x9b, 0x1e, 0x8e, 0xcd, 0x92, 0x53, 0xe2, 0xe7],
'ARM_HOB_GLOBAL_VARIABLE_GUID' : [0xc3253c90, 0xa24f, 0x4599, 0xa6, 0x64, 0x1f, 0x88, 0x13, 0x77, 0x8f, 0xc9],
'ARM_MP_CORE_INFO_GUID' : [0xa4ee0728, 0xe5d7, 0x4ac5, 0xb2, 0x1e, 0x65, 0x8e, 0xd8, 0x57, 0xe8, 0x34],
'ARM_MP_CORE_INFO_PPI_GUID' : [0x6847cc74, 0xe9ec, 0x4f8f, 0xa2, 0x9d, 0xab, 0x44, 0xe7, 0x54, 0xa8, 0xfc],
'BDS_LIB_STRING_PACKAGE_GUID' : [0x3b4d9b23, 0x95ac, 0x44f6, 0x9f, 0xcd, 0xe, 0x95, 0x94, 0x58, 0x6c, 0x72],
'BLOCKIO_VENDOR_GUID' : [0xcf31fac5, 0xc24e, 0x11d2, 0x85, 0xf3, 0x0, 0xa0, 0xc9, 0x3e, 0xc9, 0x3b],
'BLOCK_MMIO_PROTOCOL_GUID' : [0x6b558ce3, 0x69e5, 0x4c67, 0xa6, 0x34, 0xf7, 0xfe, 0x72, 0xad, 0xbe, 0x84],
'BOOT_MAINT_FORMSET_GUID' : [0x642237c7, 0x35d4, 0x472d, 0x83, 0x65, 0x12, 0xe0, 0xcc, 0xf2, 0x7a, 0x22],
'BOOT_MANAGER_FORMSET_GUID' : [0x847bc3fe, 0xb974, 0x446d, 0x94, 0x49, 0x5a, 0xd5, 0x41, 0x2e, 0x99, 0x3b],
'CONNECT_CONIN_EVENT_GUID' : [0xdb4e8151, 0x57ed, 0x4bed, 0x88, 0x33, 0x67, 0x51, 0xb5, 0xd1, 0xa8, 0xd7],
'DEVICE_MANAGER_FORMSET_GUID' : [0x3ebfa8e6, 0x511d, 0x4b5b, 0xa9, 0x5f, 0xfb, 0x38, 0x26, 0xf, 0x1c, 0x27],
'DP_HII_GUID' : [0xeb832fd9, 0x9089, 0x4898, 0x83, 0xc9, 0x41, 0x61, 0x8f, 0x5c, 0x48, 0xb9],
'DRIVER_HEALTH_FORMSET_GUID' : [0xf76e0a70, 0xb5ed, 0x4c38, 0xac, 0x9a, 0xe5, 0xf5, 0x4b, 0xf1, 0x6e, 0x34],
'DRIVER_SAMPLE_FORMSET_GUID' : [0xA04A27f4, 0xDF00, 0x4D42, 0xB5, 0x52, 0x39, 0x51, 0x13, 0x02, 0x11, 0x3D],
'DRIVER_SAMPLE_INVENTORY_GUID' : [0xb3f56470, 0x6141, 0x4621, 0x8f, 0x19, 0x70, 0x4e, 0x57, 0x7a, 0xa9, 0xe8],
'DUET_CONSOLEOUT_CONFIG_GUID' : [0xED150714, 0xDF30, 0x407D, 0xB2, 0x4A, 0x4B, 0x74, 0x2F, 0xD5, 0xCE, 0xA2],
'DXE_CORE_FILE_NAME_GUID' : [0xD6A2CB7F, 0x6A18, 0x4e2f, 0xB4, 0x3B, 0x99, 0x20, 0xA7, 0x33, 0x70, 0x0A],
'DXE_SERVICES_TABLE_GUID' : [0x5ad34ba, 0x6f02, 0x4214, 0x95, 0x2e, 0x4d, 0xa0, 0x39, 0x8e, 0x2b, 0xb9],
'EBL_ADD_COMMAND_PROTOCOL_GUID' : [0xaeda2428, 0x9a22, 0x4637, 0x9b, 0x21, 0x54, 0x5e, 0x28, 0xfb, 0xb8, 0x29],
'ECP_PEI_PCI_CFG_PPI_GUID' : [0xb0ee53d4, 0xa049, 0x4a79, 0xb2, 0xff, 0x19, 0xd9, 0xfa, 0xef, 0xaa, 0x94],
'EFI_ABSOLUTE_POINTER_PROTOCOL_GUID' : [0x8D59D32B, 0xC655, 0x4AE9, 0x9B, 0x15, 0xF2, 0x59, 0x04, 0x99, 0x2A, 0x43],
'EFI_ACPI_20_TABLE_GUID' : [0x8868e871, 0xe4f1, 0x11d3, 0xbc, 0x22, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_ACPI_S3_CONTEXT_GUID' : [0xef98d3a, 0x3e33, 0x497a, 0xa4, 0x1, 0x77, 0xbe, 0x3e, 0xb7, 0x4f, 0x38],
'EFI_ACPI_S3_SAVE_GUID' : [0x125f2de1, 0xfb85, 0x440c, 0xa5, 0x4c, 0x4d, 0x99, 0x35, 0x8a, 0x8d, 0x38],
'EFI_ACPI_SDT_PROTOCOL_GUID' : [0xeb97088e, 0xcfdf, 0x49c6, 0xbe, 0x4b, 0xd9, 0x6, 0xa5, 0xb2, 0xe, 0x86],
'EFI_ACPI_SUPPORT_GUID' : [0xdbff9d55, 0x89b7, 0x46da, 0xbd, 0xdf, 0x67, 0x7d, 0x3d, 0xc0, 0x24, 0x1d],
'EFI_ACPI_TABLE_GUID' : [0x8868e871, 0xe4f1, 0x11d3, 0xbc, 0x22, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_ACPI_TABLE_PROTOCOL_GUID' : [0xffe06bdd, 0x6107, 0x46a6, 0x7b, 0xb2, 0x5a, 0x9c, 0x7e, 0xc5, 0x27, 0x5c],
'EFI_ACPI_TABLE_STORAGE_GUID' : [0x7e374e25, 0x8e01, 0x4fee, 0x87, 0xf2, 0x39, 0xc, 0x23, 0xc6, 0x6, 0xcd],
'EFI_ACPI_VARIABLE_COMPATIBILITY_GUID' : [0xc020489e, 0x6db2, 0x4ef2, 0x9a, 0xa5, 0xca, 0x6, 0xfc, 0x11, 0xd3, 0x6a],
'EFI_ALTERNATE_FV_BLOCK_GUID' : [0xf496922d, 0x172f, 0x4bbc, 0xa1, 0xeb, 0xe, 0xeb, 0x94, 0x9c, 0x34, 0x86],
'EFI_APRIORI_GUID' : [0xfc510ee7, 0xffdc, 0x11d4, 0xbd, 0x41, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_ARP_PROTOCOL_GUID' : [0xf4b427bb, 0xba21, 0x4f16, 0xbc, 0x4e, 0x43, 0xe4, 0x16, 0xab, 0x61, 0x9c],
'EFI_ARP_SERVICE_BINDING_PROTOCOL_GUID' : [0xf44c00ee, 0x1f2c, 0x4a00, 0xaa, 0x9, 0x1c, 0x9f, 0x3e, 0x8, 0x0, 0xa3],
'EFI_ATA_PASS_THRU_PROTOCOL_GUID' : [0x1d3de7f0, 0x807, 0x424f, 0xaa, 0x69, 0x11, 0xa5, 0x4e, 0x19, 0xa4, 0x6f],
'EFI_AUTHENTICATED_VARIABLE_GUID' : [0xaaf32c78, 0x947b, 0x439a, 0xa1, 0x80, 0x2e, 0x14, 0x4e, 0xc3, 0x77, 0x92],
'EFI_AUTHENTICATION_CHAP_LOCAL_GUID' : [0xc280c73e, 0x15ca, 0x11da, 0xb0, 0xca, 0x00, 0x10, 0x83, 0xff, 0xca, 0x4d],
'EFI_AUTHENTICATION_CHAP_RADIUS_GUID' : [0xd6062b50, 0x15ca, 0x11da, 0x92, 0x19, 0x00, 0x10, 0x83, 0xff, 0xca, 0x4d],
'EFI_AUTHENTICATION_INFO_PROTOCOL_GUID' : [0x7671d9d0, 0x53db, 0x4173, 0xaa, 0x69, 0x23, 0x27, 0xf2, 0x1f, 0x0b, 0xc7],
'EFI_BDS_ARCH_PROTOCOL_GUID' : [0x665E3FF6, 0x46CC, 0x11d4, 0x9A, 0x38, 0x00, 0x90, 0x27, 0x3F, 0xC1, 0x4D],
'EFI_BIS_PROTOCOL_GUID' : [0x0b64aab0, 0x5429, 0x11d4, 0x98, 0x16, 0x00, 0xa0, 0xc9, 0x1f, 0xad, 0xcf],
'EFI_BLOCK_IO2_PROTOCOL_GUID' : [0xa77b2472, 0xe282, 0x4e9f, 0xa2, 0x45, 0xc2, 0xc0, 0xe2, 0x7b, 0xbc, 0xc1],
'EFI_BLOCK_IO_PROTOCOL_GUID' : [0x964e5b21, 0x6459, 0x11d2, 0x8e, 0x39, 0x0, 0xa0, 0xc9, 0x69, 0x72, 0x3b],
'EFI_BOOT_LOGO_PROTOCOL_GUID' : [0xcdea2bd3, 0xfc25, 0x4c1c, 0xb9, 0x7c, 0xb3, 0x11, 0x86, 0x6, 0x49, 0x90],
'EFI_BOOT_SCRIPT_EXECUTOR_CONTEXT_GUID' : [0x79cb58c4, 0xac51, 0x442f, 0xaf, 0xd7, 0x98, 0xe4, 0x7d, 0x2e, 0x99, 0x8],
'EFI_BOOT_SCRIPT_EXECUTOR_VARIABLE_GUID' : [0x3079818c, 0x46d4, 0x4a73, 0xae, 0xf3, 0xe3, 0xe4, 0x6c, 0xf1, 0xee, 0xdb],
'EFI_BOOT_SCRIPT_SAVE_PROTOCOL_GUID' : [0x470e1529, 0xb79e, 0x4e32, 0xa0, 0xfe, 0x6a, 0x15, 0x6d, 0x29, 0xf9, 0xb2],
'EFI_BUS_SPECIFIC_DRIVER_OVERRIDE_PROTOCOL_GUID' : [0x3bc1b285, 0x8a15, 0x4a82, 0xaa, 0xbf, 0x4d, 0x7d, 0x13, 0xfb, 0x32, 0x65],
'EFI_CACHE_SUBCLASS_GUID' : [0x7f0013a7, 0xdc79, 0x4b22, 0x80, 0x99, 0x11, 0xf7, 0x5f, 0xdc, 0x82, 0x9d],
'EFI_CAPSULE_ARCH_PROTOCOL_GUID' : [0x5053697e, 0x2cbc, 0x4819, 0x90, 0xd9, 0x05, 0x80, 0xde, 0xee, 0x57, 0x54],
'EFI_CAPSULE_ARCH_PROTOCOL_GUID' : [0x5053697e, 0x2cbc, 0x4819, 0x90, 0xd9, 0x5, 0x80, 0xde, 0xee, 0x57, 0x54],
'EFI_CAPSULE_GUID' : [0x3B6686BD, 0x0D76, 0x4030, 0xB7, 0x0E, 0xB5, 0x51, 0x9E, 0x2F, 0xC5, 0xA0],
'EFI_CAPSULE_INFO_GUID' : [0x8B34EAC7, 0x2690, 0x460B, 0x8B, 0xA5, 0xD5, 0xCF, 0x32, 0x83, 0x17, 0x35],
'EFI_CAPSULE_VENDOR_GUID' : [0x711C703F, 0xC285, 0x4B10, 0xA3, 0xB0, 0x36, 0xEC, 0xBD, 0x3C, 0x8B, 0xE2],
'EFI_CERT_RSA2048_GUID' : [0x3c5766e8, 0x269c, 0x4e34, 0xaa, 0x14, 0xed, 0x77, 0x6e, 0x85, 0xb3, 0xb6],
'EFI_CERT_RSA2048_SHA1_GUID' : [0x67f8444f, 0x8743, 0x48f1, 0xa3, 0x28, 0x1e, 0xaa, 0xb8, 0x73, 0x60, 0x80],
'EFI_CERT_RSA2048_SHA256_GUID' : [0xe2b36190, 0x879b, 0x4a3d, 0xad, 0x8d, 0xf2, 0xe7, 0xbb, 0xa3, 0x27, 0x84],
'EFI_CERT_SHA1_GUID' : [0x826ca512, 0xcf10, 0x4ac9, 0xb1, 0x87, 0xbe, 0x1, 0x49, 0x66, 0x31, 0xbd],
'EFI_CERT_SHA224_GUID' : [0xb6e5233, 0xa65c, 0x44c9, 0x94, 0x7, 0xd9, 0xab, 0x83, 0xbf, 0xc8, 0xbd],
'EFI_CERT_SHA256_GUID' : [0xc1c41626, 0x504c, 0x4092, 0xac, 0xa9, 0x41, 0xf9, 0x36, 0x93, 0x43, 0x28],
'EFI_CERT_SHA384_GUID' : [0xff3e5307, 0x9fd0, 0x48c9, 0x85, 0xf1, 0x8a, 0xd5, 0x6c, 0x70, 0x1e, 0x1],
'EFI_CERT_SHA512_GUID' : [0x93e0fae, 0xa6c4, 0x4f50, 0x9f, 0x1b, 0xd4, 0x1e, 0x2b, 0x89, 0xc1, 0x9a],
'EFI_CERT_TYPE_PKCS7_GUID' : [0x4aafd29d, 0x68df, 0x49ee, 0x8a, 0xa9, 0x34, 0x7d, 0x37, 0x56, 0x65, 0xa7],
'EFI_CERT_TYPE_RSA2048_SHA256_GUID' : [0xa7717414, 0xc616, 0x4977, 0x94, 0x20, 0x84, 0x47, 0x12, 0xa7, 0x35, 0xbf],
'EFI_CERT_X509_GUID' : [0xa5c059a1, 0x94e4, 0x4aa7, 0x87, 0xb5, 0xab, 0x15, 0x5c, 0x2b, 0xf0, 0x72],
'EFI_COMPATIBLE_MEMORY_TESTED_PROTOCOL_GUID' : [0x64c475ef, 0x344b, 0x492c, 0x93, 0xad, 0xab, 0x9e, 0xb4, 0x39, 0x50, 0x4],
'EFI_COMPONENT_NAME2_PROTOCOL_GUID' : [0x6a7a5cff, 0xe8d9, 0x4f70, 0xba, 0xda, 0x75, 0xab, 0x30, 0x25, 0xce, 0x14],
'EFI_COMPONENT_NAME_PROTOCOL_GUID' : [0x107a772c, 0xd5e1, 0x11d4, 0x9a, 0x46, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_CONFIG_FILE_NAME_GUID' : [0x98B8D59B, 0xE8BA, 0x48EE, 0x98, 0xDD, 0xC2, 0x95, 0x39, 0x2F, 0x1E, 0xDB],
'EFI_CONSOLE_CONTROL_PROTOCOL_GUID' : [0xf42f7782, 0x12e, 0x4c12, 0x99, 0x56, 0x49, 0xf9, 0x43, 0x4, 0xf7, 0x21],
'EFI_CONSOLE_IN_DEVICE_GUID' : [0xd3b36f2b, 0xd551, 0x11d4, 0x9a, 0x46, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_CONSOLE_OUT_DEVICE_GUID' : [0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_CPU_ARCH_PROTOCOL_GUID' : [0x26baccb1, 0x6f42, 0x11d4, 0xbc, 0xe7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_CPU_IO2_PROTOCOL_GUID' : [0xad61f191, 0xae5f, 0x4c0e, 0xb9, 0xfa, 0xe8, 0x69, 0xd2, 0x88, 0xc6, 0x4f],
'EFI_CPU_IO_PROTOCOL_GUID' : [0xB0732526, 0x38C8, 0x4b40, 0x88, 0x77, 0x61, 0xC7, 0xB0, 0x6A, 0xAC, 0x45],
'EFI_CRC32_GUIDED_SECTION_EXTRACTION_GUID' : [0xFC1BCDB0, 0x7D31, 0x49aa, 0x93, 0x6A, 0xA4, 0x60, 0x0D, 0x9D, 0xD0, 0x83],
'EFI_CRC32_GUIDED_SECTION_EXTRACTION_PROTOCOL_GUID' : [0xFC1BCDB0, 0x7D31, 0x49aa, 0x93, 0x6A, 0xA4, 0x60, 0x0D, 0x9D, 0xD0, 0x83],
'EFI_CUSTOMIZED_DECOMPRESS_PROTOCOL_GUID' : [0x9a44198e, 0xa4a2, 0x44e6, 0x8a, 0x1f, 0x39, 0xbe, 0xfd, 0xac, 0x89, 0x6f],
'EFI_DATA_HUB_PROTOCOL_GUID' : [0xae80d021, 0x618e, 0x11d4, 0xbc, 0xd7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_DATA_HUB_STATUS_CODE_RECORD_GUID' : [0xd083e94c, 0x6560, 0x42e4, 0xb6, 0xd4, 0x2d, 0xf7, 0x5a, 0xdf, 0x6a, 0x2a],
'EFI_DEBUGPORT_PROTOCOL_GUID' : [0xEBA4E8D2, 0x3858, 0x41EC, 0xA2, 0x81, 0x26, 0x47, 0xBA, 0x96, 0x60, 0xD0],
'EFI_DEBUG_AGENT_GUID' : [0x865a5a9b, 0xb85d, 0x474c, 0x84, 0x55, 0x65, 0xd1, 0xbe, 0x84, 0x4b, 0xe2],
'EFI_DEBUG_ASSERT_PROTOCOL_GUID' : [0xbe499c92, 0x7d4b, 0x11d4, 0xbc, 0xee, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_DEBUG_IMAGE_INFO_TABLE_GUID' : [0x49152e77, 0x1ada, 0x4764, 0xb7, 0xa2, 0x7a, 0xfe, 0xfe, 0xd9, 0x5e, 0x8b],
'EFI_DEBUG_MASK_PROTOCOL_GUID' : [0x4c8a2451, 0xc207, 0x405b, 0x96, 0x94, 0x99, 0xea, 0x13, 0x25, 0x13, 0x41],
'EFI_DEBUG_SERIAL_IO_PROTOCOL_GUID' : [0xe683dc4f, 0x9ed, 0x4f22, 0x86, 0x6b, 0x8e, 0x40, 0x46, 0x94, 0x7c, 0x6c],
'EFI_DEBUG_SUPPORT_PERIODIC_CALLBACK_PROTOCOL_GUID' : [0x9546e07c, 0x2cbb, 0x4c88, 0x98, 0x6c, 0xcd, 0x34, 0x10, 0x86, 0xf0, 0x44],
'EFI_DEBUG_SUPPORT_PROTOCOL_GUID' : [0x2755590C, 0x6F3C, 0x42FA, 0x9E, 0xA4, 0xA3, 0xBA, 0x54, 0x3C, 0xDA, 0x25],
'EFI_DECOMPRESS_PROTOCOL_GUID' : [0xd8117cfe, 0x94a6, 0x11d4, 0x9a, 0x3a, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_DEFAULT_BMP_LOGO_GUID' : [0x7BB28B99,0x61BB,0x11d5,0x9A,0x5D,0x00,0x90,0x27,0x3F,0xC1,0x4D],
'EFI_DEFERRED_IMAGE_LOAD_PROTOCOL_GUID' : [0x15853d7c, 0x3ddf, 0x43e0, 0xa1, 0xcb, 0xeb, 0xf8, 0x5b, 0x8f, 0x87, 0x2c],
'EFI_DEVICE_IO_PROTOCOL_GUID' : [0xaf6ac311, 0x84c3, 0x11d2, 0x8e, 0x3c, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b],
'EFI_DEVICE_PATH_FROM_TEXT_PROTOCOL_GUID' : [0x5c99a21, 0xc70f, 0x4ad2, 0x8a, 0x5f, 0x35, 0xdf, 0x33, 0x43, 0xf5, 0x1e],
'EFI_DEVICE_PATH_PROTOCOL_GUID' : [0x9576e91, 0x6d3f, 0x11d2, 0x8e, 0x39, 0x0, 0xa0, 0xc9, 0x69, 0x72, 0x3b],
'EFI_DEVICE_PATH_TO_TEXT_PROTOCOL_GUID' : [0x8b843e20, 0x8132, 0x4852, 0x90, 0xcc, 0x55, 0x1a, 0x4e, 0x4a, 0x7f, 0x1c],
'EFI_DEVICE_PATH_UTILITIES_PROTOCOL_GUID' : [0x379be4e, 0xd706, 0x437d, 0xb0, 0x37, 0xed, 0xb8, 0x2f, 0xb7, 0x72, 0xa4],
'EFI_DHCP4_PROTOCOL_GUID' : [0x8a219718, 0x4ef5, 0x4761, 0x91, 0xc8, 0xc0, 0xf0, 0x4b, 0xda, 0x9e, 0x56],
'EFI_DHCP4_SERVICE_BINDING_PROTOCOL_GUID' : [0x9d9a39d8, 0xbd42, 0x4a73, 0xa4, 0xd5, 0x8e, 0xe9, 0x4b, 0xe1, 0x13, 0x80],
'EFI_DHCP6_PROTOCOL_GUID' : [0x87c8bad7, 0x595, 0x4053, 0x82, 0x97, 0xde, 0xde, 0x39, 0x5f, 0x5d, 0x5b],
'EFI_DHCP6_SERVICE_BINDING_PROTOCOL_GUID' : [0x9fb9a8a1, 0x2f4a, 0x43a6, 0x88, 0x9c, 0xd0, 0xf7, 0xb6, 0xc4, 0x7a, 0xd5],
'EFI_DISK_INFO_AHCI_INTERFACE_GUID' : [0x9e498932, 0x4abc, 0x45af, 0xa3, 0x4d, 0x2, 0x47, 0x78, 0x7b, 0xe7, 0xc6],
'EFI_DISK_INFO_IDE_INTERFACE_GUID' : [0x5e948fe3, 0x26d3, 0x42b5, 0xaf, 0x17, 0x61, 0x2, 0x87, 0x18, 0x8d, 0xec],
'EFI_DISK_INFO_PROTOCOL_GUID' : [0xd432a67f, 0x14dc, 0x484b, 0xb3, 0xbb, 0x3f, 0x2, 0x91, 0x84, 0x93, 0x27],
'EFI_DISK_INFO_SCSI_INTERFACE_GUID' : [0x8f74baa, 0xea36, 0x41d9, 0x95, 0x21, 0x21, 0xa7, 0xf, 0x87, 0x80, 0xbc],
'EFI_DISK_INFO_USB_INTERFACE_GUID' : [0xcb871572, 0xc11a, 0x47b5, 0xb4, 0x92, 0x67, 0x5e, 0xaf, 0xa7, 0x77, 0x27],
'EFI_DISK_IO_PROTOCOL_GUID' : [0xce345171, 0xba0b, 0x11d2, 0x8e, 0x4f, 0x0, 0xa0, 0xc9, 0x69, 0x72, 0x3b],
'EFI_DPC_PROTOCOL_GUID' : [0x480f8ae9, 0xc46, 0x4aa9, 0xbc, 0x89, 0xdb, 0x9f, 0xba, 0x61, 0x98, 0x6],
'EFI_DRIVER_BINDING_PROTOCOL_GUID' : [0x18a031ab, 0xb443, 0x4d1a, 0xa5, 0xc0, 0xc, 0x9, 0x26, 0x1e, 0x9f, 0x71],
'EFI_DRIVER_CONFIGURATION2_PROTOCOL_GUID' : [0xbfd7dc1d, 0x24f1, 0x40d9, 0x82, 0xe7, 0x2e, 0x09, 0xbb, 0x6b, 0x4e, 0xbe],
'EFI_DRIVER_CONFIGURATION_PROTOCOL_GUID' : [0x107a772b, 0xd5e1, 0x11d4, 0x9a, 0x46, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_DRIVER_DIAGNOSTICS2_PROTOCOL_GUID' : [0x4d330321, 0x025f, 0x4aac, 0x90, 0xd8, 0x5e, 0xd9, 0x0, 0x17, 0x3b, 0x63],
'EFI_DRIVER_DIAGNOSTICS2_PROTOCOL_GUID' : [0x4d330321, 0x025f, 0x4aac, 0x90, 0xd8, 0x5e, 0xd9, 0x00, 0x17, 0x3b, 0x63],
'EFI_DRIVER_DIAGNOSTICS_PROTOCOL_GUID' : [0x0784924f, 0xe296, 0x11d4, 0x9a, 0x49, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_DRIVER_FAMILY_OVERRIDE_PROTOCOL_GUID' : [0xb1ee129e, 0xda36, 0x4181, 0x91, 0xf8, 0x4, 0xa4, 0x92, 0x37, 0x66, 0xa7],
'EFI_DRIVER_HEALTH_PROTOCOL_GUID' : [0x2a534210, 0x9280, 0x41d8, 0xae, 0x79, 0xca, 0xda, 0x1, 0xa2, 0xb1, 0x27],
'EFI_DRIVER_SUPPORTED_EFI_VERSION_PROTOCOL_GUID' : [0x5c198761, 0x16a8, 0x4e69, 0x97, 0x2c, 0x89, 0xd6, 0x79, 0x54, 0xf8, 0x1d],
'EFI_DXE_IPL_PPI_GUID' : [0xae8ce5d, 0xe448, 0x4437, 0xa8, 0xd7, 0xeb, 0xf5, 0xf1, 0x94, 0xf7, 0x31],
'EFI_DXE_SERVICES_TABLE_GUID' : [0x5ad34ba, 0x6f02, 0x4214, 0x95, 0x2e, 0x4d, 0xa0, 0x39, 0x8e, 0x2b, 0xb9],
'EFI_DXE_SMM_READY_TO_LOCK_PROTOCOL_GUID' : [0x60ff8964, 0xe906, 0x41d0, 0xaf, 0xed, 0xf2, 0x41, 0xe9, 0x74, 0xe0, 0x8e],
'EFI_EAP_MANAGEMENT_PROTOCOL_GUID' : [0xbb62e663, 0x625d, 0x40b2, 0xa0, 0x88, 0xbb, 0xe8, 0x36, 0x23, 0xa2, 0x45],
'EFI_EAP_PROTOCOL_GUID' : [0x5d9f96db, 0xe731, 0x4caa, 0xa0, 0xd, 0x72, 0xe1, 0x87, 0xcd, 0x77, 0x62],
'EFI_EBC_INTERPRETER_PROTOCOL_GUID' : [0x13AC6DD1, 0x73D0, 0x11D4, 0xB0, 0x6B, 0x00, 0xAA, 0x00, 0xBD, 0x6D, 0xE7],
'EFI_EBC_SIMPLE_DEBUGGER_PROTOCOL_GUID' : [0x2a72d11e, 0x7376, 0x40f6, 0x9c, 0x68, 0x23, 0xfa, 0x2f, 0xe3, 0x63, 0xf1],
'EFI_EBC_VM_TEST_PROTOCOL_GUID' : [0xAAEACCFD, 0xF27B, 0x4C17, 0xB6, 0x10, 0x75, 0xCA, 0x1F, 0x2D, 0xFB, 0x52],
'EFI_EDID_ACTIVE_PROTOCOL_GUID' : [0xbd8c1056, 0x9f36, 0x44ec, 0x92, 0xa8, 0xa6, 0x33, 0x7f, 0x81, 0x79, 0x86],
'EFI_EDID_DISCOVERED_PROTOCOL_GUID' : [0x1c0c34f6, 0xd380, 0x41fa, 0xa0, 0x49, 0x8a, 0xd0, 0x6c, 0x1a, 0x66, 0xaa],
'EFI_EDID_DISCOVERED_PROTOCOL_GUID' : [0x1c0c34f6, 0xd380, 0x41fa, 0xa0, 0x49, 0x8a, 0xd0, 0x6c,0x1a, 0x66, 0xaa],
'EFI_EDID_OVERRIDE_PROTOCOL_GUID' : [0x48ecb431, 0xfb72, 0x45c0, 0xa9, 0x22, 0xf4, 0x58, 0xfe, 0x4, 0xb, 0xd5],
'EFI_EMU_PHYSICAL_DISK_GUID' : [0xf2ba331a, 0x8985, 0x11db, 0xa4, 0x06, 0x00, 0x40, 0xd0, 0x2b, 0x18, 0x35],
'EFI_EMU_SYSTEM_CONFIG_GUID' : [0x9C4FB516, 0x3A1E, 0xD847, 0xA1, 0xA1, 0x70, 0x58, 0xB6, 0x98, 0x67, 0x32],
'EFI_EMU_VIRTUAL_DISK_GUID' : [0xf2ba331a, 0x8985, 0x11db, 0xa4, 0x06, 0x00, 0x40, 0xd0, 0x2b, 0x18, 0x35],
'EFI_END_OF_DXE_EVENT_GROUP_GUID' : [0x2ce967a, 0xdd7e, 0x4ffc, 0x9e, 0xe7, 0x81, 0x0c, 0xf0, 0x47, 0x8, 0x80],
'EFI_END_OF_DXE_EVENT_GROUP_GUID' : [0x2ce967a, 0xdd7e, 0x4ffc, 0x9e, 0xe7, 0x81, 0xc, 0xf0, 0x47, 0x8, 0x80],
'EFI_ERROR_SECTION_DIRECTED_IO_DMAR_GUID' : [0x71761d37, 0x32b2, 0x45cd, 0xa7, 0xd0, 0xb0, 0xfe, 0xdd, 0x93, 0xe8, 0xcf],
'EFI_ERROR_SECTION_DMAR_GENERIC_GUID' : [0x5b51fef7, 0xc79d, 0x4434, 0x8f, 0x1b, 0xaa, 0x62, 0xde, 0x3e, 0x2c, 0x64],
'EFI_ERROR_SECTION_FW_ERROR_RECORD_GUID' : [0x81212a96, 0x09ed, 0x4996, 0x94, 0x71, 0x8d, 0x72, 0x9c, 0x8e, 0x69, 0xed],
'EFI_ERROR_SECTION_IOMMU_DMAR_GUID' : [0x036f84e1, 0x7f37, 0x428c, 0xa7, 0x9e, 0x57, 0x5f, 0xdf, 0xaa, 0x84, 0xec],
'EFI_ERROR_SECTION_PCIE_GUID' : [0xd995e954, 0xbbc1, 0x430f, 0xad, 0x91, 0xb4, 0x4d, 0xcb, 0x3c, 0x6f, 0x35],
'EFI_ERROR_SECTION_PCI_DEVICE_GUID' : [0xeb5e4685, 0xca66, 0x4769, 0xb6, 0xa2, 0x26, 0x06, 0x8b, 0x00, 0x13, 0x26],
'EFI_ERROR_SECTION_PCI_PCIX_BUS_GUID' : [0xc5753963, 0x3b84, 0x4095, 0xbf, 0x78, 0xed, 0xda, 0xd3, 0xf9, 0xc9, 0xdd],
'EFI_ERROR_SECTION_PLATFORM_MEMORY_GUID' : [0xa5bc1114, 0x6f64, 0x4ede, 0xb8, 0x63, 0x3e, 0x83, 0xed, 0x7c, 0x83, 0xb1],
'EFI_ERROR_SECTION_PROCESSOR_GENERIC_GUID' : [0x9876ccad, 0x47b4, 0x4bdb, 0xb6, 0x5e, 0x16, 0xf1, 0x93, 0xc4, 0xf3, 0xdb],
'EFI_ERROR_SECTION_PROCESSOR_SPECIFIC_GUID' : [0xdc3ea0b0, 0xa144, 0x4797, 0xb9, 0x5b, 0x53, 0xfa, 0x24, 0x2b, 0x6e, 0x1d],
'EFI_EVENT_GROUP_DXE_DISPATCH_GUID' : [0x7081e22f, 0xcac6, 0x4053, 0x94, 0x68, 0x67, 0x57, 0x82, 0xcf, 0x88, 0xe5],
'EFI_EVENT_LEGACY_BOOT_GUID' : [0x2a571201, 0x4966, 0x47f6, 0x8b, 0x86, 0xf3, 0x1e, 0x41, 0xf3, 0x2f, 0x10],
'EFI_EVENT_NOTIFICATION_TYEP_BOOT_GUID' : [0x3D61A466, 0xAB40, 0x409a, 0xA6, 0x98, 0xF3, 0x62, 0xD4, 0x64, 0xB3, 0x8F],
'EFI_EVENT_NOTIFICATION_TYEP_CMC_GUID' : [0x2DCE8BB1, 0xBDD7, 0x450e, 0xB9, 0xAD, 0x9C, 0xF4, 0xEB, 0xD4, 0xF8, 0x90],
'EFI_EVENT_NOTIFICATION_TYEP_CPE_GUID' : [0x4E292F96, 0xD843, 0x4a55, 0xA8, 0xC2, 0xD4, 0x81, 0xF2, 0x7E, 0xBE, 0xEE],
'EFI_EVENT_NOTIFICATION_TYEP_DMAR_GUID' : [0x667DD791, 0xC6B3, 0x4c27, 0x8A, 0x6B, 0x0F, 0x8E, 0x72, 0x2D, 0xEB, 0x41],
'EFI_EVENT_NOTIFICATION_TYEP_INIT_GUID' : [0xCC5263E8, 0x9308, 0x454a, 0x89, 0xD0, 0x34, 0x0B, 0xD3, 0x9B, 0xC9, 0x8E],
'EFI_EVENT_NOTIFICATION_TYEP_MCE_GUID' : [0xE8F56FFE, 0x919C, 0x4cc5, 0xBA, 0x88, 0x65, 0xAB, 0xE1, 0x49, 0x13, 0xBB],
'EFI_EVENT_NOTIFICATION_TYEP_NMI_GUID' : [0x5BAD89FF, 0xB7E6, 0x42c9, 0x81, 0x4A, 0xCF, 0x24, 0x85, 0xD6, 0xE9, 0x8A],
'EFI_EVENT_NOTIFICATION_TYEP_PCIE_GUID' : [0xCF93C01F, 0x1A16, 0x4dfc, 0xB8, 0xBC, 0x9C, 0x4D, 0xAF, 0x67, 0xC1, 0x04],
'EFI_EXTENDED_SAL_BASE_IO_SERVICES_PROTOCOL_GUID' : [0x5aea42b5, 0x31e1, 0x4515, 0xbc, 0x31, 0xb8, 0xd5, 0x25, 0x75, 0x65, 0xa6],
'EFI_EXTENDED_SAL_BASE_SERVICES_PROTOCOL_GUID' : [0xd9e9fa06, 0x0fe0, 0x41c3, 0x96, 0xfb, 0x83, 0x42, 0x5a, 0x33, 0x94, 0xf8],
'EFI_EXTENDED_SAL_CACHE_SERVICES_PROTOCOL_GUID' : [0xedc9494, 0x2743, 0x4ba5, 0x88, 0x18, 0x0a, 0xef, 0x52, 0x13, 0xf1, 0x88],
'EFI_EXTENDED_SAL_ELOG_SERVICES_PROTOCOL_GUID' : [0xd5e4ee5f, 0x3e0a, 0x453c, 0xa7, 0x25, 0xb6, 0x92, 0xbb, 0x6, 0x36, 0x5a],
'EFI_EXTENDED_SAL_FV_BLOCK_SERVICES_PROTOCOL_GUID' : [0xa2271df1, 0xbcbb, 0x4f1d, 0x98, 0xa9, 0x06, 0xbc, 0x17, 0x2f, 0x07, 0x1a],
'EFI_EXTENDED_SAL_LOCK_SERVICES_PROTOCOL_GUID' : [0x76b75c23, 0xfe4f, 0x4e17, 0xa2, 0xad, 0x1a, 0x65, 0x3d, 0xbb, 0x49, 0x4a],
'EFI_EXTENDED_SAL_MCA_LOG_SERVICES_PROTOCOL_GUID' : [0xcb3fd86e, 0x38a3, 0x4c03, 0x9a, 0x5c, 0x90, 0xcf, 0xa3, 0xa2, 0xab, 0x7a],
'EFI_EXTENDED_SAL_MCA_SERVICES_PROTOCOL_GUID' : [0x2a591128, 0x6cc7, 0x42b1, 0x8a, 0xf0, 0x58, 0x93, 0x3b, 0x68, 0x2d, 0xbb],
'EFI_EXTENDED_SAL_MP_SERVICES_PROTOCOL_GUID' : [0x697d81a2, 0xcf18, 0x4dc0, 0x9e, 0x0d, 0x06, 0x11, 0x3b, 0x61, 0x8a, 0x3f],
'EFI_EXTENDED_SAL_MTC_SERVICES_PROTOCOL_GUID' : [0x899afd18, 0x75e8, 0x408b, 0xa4, 0x1a, 0x6e, 0x2e, 0x7e, 0xcd, 0xf4, 0x54],
'EFI_EXTENDED_SAL_PAL_SERVICES_PROTOCOL_GUID' : [0xe1cd9d21, 0x0fc2, 0x438d, 0x97, 0x03, 0x04, 0xe6, 0x6d, 0x96, 0x1e, 0x57],
'EFI_EXTENDED_SAL_PCI_SERVICES_PROTOCOL_GUID' : [0xa46b1a31, 0xad66, 0x4905, 0x92, 0xf6, 0x2b, 0x46, 0x59, 0xdc, 0x30, 0x63],
'EFI_EXTENDED_SAL_RESET_SERVICES_PROTOCOL_GUID' : [0x7d019990, 0x8ce1, 0x46f5, 0xa7, 0x76, 0x3c, 0x51, 0x98, 0x67, 0x6a, 0xa0],
'EFI_EXTENDED_SAL_RTC_SERVICES_PROTOCOL_GUID' : [0x7e97a470, 0xefdb, 0x4d02, 0x8f, 0xce, 0x61, 0x90, 0xd2, 0x7b, 0xa2, 0x96],
'EFI_EXTENDED_SAL_SENSOR_SERVICES_PROTOCOL_GUID' : [0x4a153b6e, 0x85a1, 0x4982, 0x98, 0xf4, 0x6a, 0x8c, 0xfc, 0xa4, 0xab, 0xa1],
'EFI_EXTENDED_SAL_SM_COM_LAYER_SERVICES_PROTOCOL_GUID' : [0x4356799, 0x81b7, 0x4e08, 0xa3, 0x8d, 0xd9, 0x78, 0xfa, 0x47, 0xba, 0x42],
'EFI_EXTENDED_SAL_SST_GUID' : [0x38802700, 0x868a, 0x4b4e, 0x81, 0xd4, 0x4f, 0x1b, 0xdc, 0xcf, 0xb4, 0x6f],
'EFI_EXTENDED_SAL_STALL_SERVICES_PROTOCOL_GUID' : [0x53a58d06, 0xac27, 0x4d8c, 0xb5, 0xe9, 0xf0, 0x8a, 0x80, 0x65, 0x41, 0x70],
'EFI_EXTENDED_SAL_STATUS_CODE_SERVICES_PROTOCOL_GUID' : [0xdbd91d, 0x55e9, 0x420f, 0x96, 0x39, 0x5e, 0x9f, 0x84, 0x37, 0xb4, 0x4f],
'EFI_EXTENDED_SAL_VARIABLE_SERVICES_PROTOCOL_GUID' : [0x4ecb6c53, 0xc641, 0x4370, 0x8c, 0xb2, 0x3b, 0x0e, 0x49, 0x6e, 0x83, 0x78],
'EFI_EXTENDED_SAL_VIRTUAL_SERVICES_PROTOCOL_GUID' : [0xc1a74056, 0x260e, 0x4871, 0xa0, 0x31, 0xe6, 0x45, 0xa6, 0x5b, 0x6e, 0x11],
'EFI_EXT_SCSI_PASS_THRU_PROTOCOL_GUID' : [0x143b7632, 0xb81b, 0x4cb7, 0xab, 0xd3, 0xb6, 0x25, 0xa5, 0xb9, 0xbf, 0xfe],
'EFI_FAULT_TOLERANT_WRITE_PROTOCOL_GUID' : [0x3ebd9e82, 0x2c78, 0x4de6, 0x97, 0x86, 0x8d, 0x4b, 0xfc, 0xb7, 0xc8, 0x81],
'EFI_FFS_VOLUME_TOP_FILE_GUID' : [0x1BA0062E, 0xC779, 0x4582, 0x85, 0x66, 0x33, 0x6A, 0xE8, 0xF7, 0x8F, 0x09],
'EFI_FILE_SYSTEM_INFO_ID_GUID' : [0x9576e93, 0x6d3f, 0x11d2, 0x8e, 0x39, 0x0, 0xa0, 0xc9, 0x69, 0x72, 0x3b],
'EFI_FILE_SYSTEM_VOLUME_LABEL_INFO_ID_GUID' : [0xDB47D7D3, 0xFE81, 0x11d3, 0x9A, 0x35, 0x00, 0x90, 0x27, 0x3F, 0xC1, 0x4D],
'EFI_FIND_FV_PPI_GUID' : [0x36164812, 0xa023, 0x44e5, 0xbd, 0x85, 0x5, 0xbf, 0x3c, 0x77, 0x0, 0xaa],
'EFI_FIRMWARE_CONTENTS_SIGNED_GUID' : [0xf9d89e8, 0x9259, 0x4f76, 0xa5, 0xaf, 0xc, 0x89, 0xe3, 0x40, 0x23, 0xdf],
'EFI_FIRMWARE_FILE_SYSTEM2_GUID' : [0x8c8ce578, 0x8a3d, 0x4f1c, 0x99, 0x35, 0x89, 0x61, 0x85, 0xc3, 0x2d, 0xd3],
'EFI_FIRMWARE_FILE_SYSTEM3_GUID' : [0x5473c07a, 0x3dcb, 0x4dca, 0xbd, 0x6f, 0x1e, 0x96, 0x89, 0xe7, 0x34, 0x9a],
'EFI_FIRMWARE_FILE_SYSTEM_GUID' : [0x7A9354D9, 0x0468, 0x444a, 0x81, 0xCE, 0x0B, 0xF6, 0x17, 0xD8, 0x90, 0xDF],
'EFI_FIRMWARE_MANAGEMENT_PROTOCOL_GUID' : [0x86c77a67, 0xb97, 0x4633, 0xa1, 0x87, 0x49, 0x10, 0x4d, 0x6, 0x85, 0xc7],
'EFI_FIRMWARE_PERFORMANCE_GUID' : [0xc095791a, 0x3001, 0x47b2, 0x80, 0xc9, 0xea, 0xc7, 0x31, 0x9f, 0x2f, 0xa4],
'EFI_FIRMWARE_VOLUME2_PROTOCOL_GUID' : [0x220e73b6, 0x6bdb, 0x4413, 0x84, 0x5, 0xb9, 0x74, 0xb1, 0x8, 0x61, 0x9a],
'EFI_FIRMWARE_VOLUME_BLOCK2_PROTOCOL_GUID' : [0x8f644fa9, 0xe850, 0x4db1, 0x9c, 0xe2, 0xb, 0x44, 0x69, 0x8e, 0x8d, 0xa4],
'EFI_FIRMWARE_VOLUME_BLOCK_PROTOCOL_GUID' : [0x8f644fa9, 0xe850, 0x4db1, 0x9c, 0xe2, 0xb, 0x44, 0x69, 0x8e, 0x8d, 0xa4],
'EFI_FIRMWARE_VOLUME_DISPATCH_PROTOCOL_GUID' : [0x7aa35a69, 0x506c, 0x444f, 0xa7, 0xaf, 0x69, 0x4b, 0xf5, 0x6f, 0x71, 0xc8],
'EFI_FIRMWARE_VOLUME_PROTOCOL_GUID' : [0x389F751F, 0x1838, 0x4388, 0x83, 0x90, 0xCD, 0x81, 0x54, 0xBD, 0x27, 0xF8],
'EFI_FORM_BROWSER2_PROTOCOL_GUID' : [0xb9d4c360, 0xbcfb, 0x4f9b, 0x92, 0x98, 0x53, 0xc1, 0x36, 0x98, 0x22, 0x58],
'EFI_FORM_BROWSER_COMPATIBILITY_PROTOCOL_GUID' : [0xfb7c852, 0xadca, 0x4853, 0x8d, 0xf, 0xfb, 0xa7, 0x1b, 0x1c, 0xe1, 0x1a],
'EFI_FORM_BROWSER_PROTOCOL_GUID' : [0xe5a1333e, 0xe1b4, 0x4d55, 0xce, 0xeb, 0x35, 0xc3, 0xef, 0x13, 0x34, 0x43],
'EFI_FORM_BROWSER_PROTOCOL_GUID' : [0xfb7c852, 0xadca, 0x4853, 0x8d, 0xf, 0xfb, 0xa7, 0x1b, 0x1c, 0xe1, 0x1a],
'EFI_FORM_CALLBACK_PROTOCOL_GUID' : [0xf3e4543d, 0xcf35, 0x6cef, 0x35, 0xc4, 0x4f, 0xe6, 0x34, 0x4d, 0xfc, 0x54],
'EFI_FRAMEWORK_DEVICE_PATH_GUID' : [0xb7084e63, 0x46b7, 0x4d1a, 0x86, 0x77, 0xe3, 0x0b, 0x53, 0xdb, 0xf0, 0x50],
'EFI_FTP4_PROTOCOL_GUID' : [0xeb338826, 0x681b, 0x4295, 0xb3, 0x56, 0x2b, 0x36, 0x4c, 0x75, 0x7b, 0x9],
'EFI_FTP4_SERVICE_BINDING_PROTOCOL_GUID' : [0xfaaecb1, 0x226e, 0x4782, 0xaa, 0xce, 0x7d, 0xb9, 0xbc, 0xbf, 0x4d, 0xaf],
'EFI_FTW_LITE_PROTOCOL_GUID' : [0x3f557189, 0x8dae, 0x45ae, 0xa0, 0xb3, 0x2b, 0x99, 0xca, 0x7a, 0xa7, 0xa0],
'EFI_FVB_EXTENSION_PROTOCOL_GUID' : [0x53a4c71b, 0xb581, 0x4170, 0x91, 0xb3, 0x8d, 0xb8, 0x7a, 0x4b, 0x5c, 0x46],
'EFI_GENERIC_MEMORY_TEST_PROTOCOL_GUID' : [0x309de7f1, 0x7f5e, 0x4ace, 0xb4, 0x9c, 0x53, 0x1b, 0xe5, 0xaa, 0x95, 0xef],
'EFI_GENERIC_VARIABLE_GUID' : [0x59d1c24f, 0x50f1, 0x401a, 0xb1, 0x01, 0xf3, 0x3e, 0x0d, 0xae, 0xd4, 0x43],
'EFI_GLOBAL_VARIABLE_GUID' : [0x8BE4DF61, 0x93CA, 0x11d2, 0xAA, 0x0D, 0x00, 0xE0, 0x98, 0x03, 0x2B, 0x8C],
'EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID' : [0x9042a9de, 0x23dc, 0x4a38, 0x96, 0xfb, 0x7a, 0xde, 0xd0, 0x80, 0x51, 0x6a],
'EFI_HARDWARE_ERROR_VARIABLE_GUID' : [0x414E6BDD, 0xE47B, 0x47cc, 0xB2, 0x44, 0xBB, 0x61, 0x02, 0x0C, 0xF5, 0x16],
'EFI_HASH_ALGORITHM_SHA1_GUID' : [0x2ae9d80f, 0x3fb2, 0x4095, 0xb7, 0xb1, 0xe9, 0x31, 0x57, 0xb9, 0x46, 0xb6],
'EFI_HASH_ALGORITHM_SHA1_NOPAD_GUID' : [0x24c5dc2f, 0x53e2, 0x40ca, 0x9e, 0xd6, 0xa5, 0xd9, 0xa4, 0x9f, 0x46, 0x3b],
'EFI_HASH_ALGORITHM_SHA224_GUID' : [0x8df01a06, 0x9bd5, 0x4bf7, 0xb0, 0x21, 0xdb, 0x4f, 0xd9, 0xcc, 0xf4, 0x5b],
'EFI_HASH_ALGORITHM_SHA256_GUID' : [0x51aa59de, 0xfdf2, 0x4ea3, 0xbc, 0x63, 0x87, 0x5f, 0xb7, 0x84, 0x2e, 0xe9],
'EFI_HASH_ALGORITHM_SHA256_NOPAD_GUID' : [0x8628752a, 0x6cb7, 0x4814, 0x96, 0xfc, 0x24, 0xa8, 0x15, 0xac, 0x22, 0x26],
'EFI_HASH_ALGORITHM_SHA384_GUID' : [0xefa96432, 0xde33, 0x4dd2, 0xae, 0xe6, 0x32, 0x8c, 0x33, 0xdf, 0x77, 0x7a],
'EFI_HASH_ALGORITHM_SHA512_GUID' : [0xcaa4381e, 0x750c, 0x4770, 0xb8, 0x70, 0x7a, 0x23, 0xb4, 0xe4, 0x21, 0x30],
'EFI_HASH_ALGORTIHM_MD5_GUID' : [0xaf7c79c, 0x65b5, 0x4319, 0xb0, 0xae, 0x44, 0xec, 0x48, 0x4e, 0x4a, 0xd7],
'EFI_HASH_PROTOCOL_GUID' : [0xc5184932, 0xdba5, 0x46db, 0xa5, 0xba, 0xcc, 0x0b, 0xda, 0x9c, 0x14, 0x35],
'EFI_HASH_SERVICE_BINDING_PROTOCOL_GUID' : [0x42881c98, 0xa4f3, 0x44b0, 0xa3, 0x9d, 0xdf, 0xa1, 0x86, 0x67, 0xd8, 0xcd],
'EFI_HII_COMPATIBILITY_PROTOCOL_GUID' : [0x5542cce1, 0xdf5c, 0x4d1b, 0xab, 0xca, 0x36, 0x4f, 0x77, 0xd3, 0x99, 0xfb],
'EFI_HII_CONFIG_ACCESS_PROTOCOL_GUID' : [0x330d4706, 0xf2a0, 0x4e4f, 0xa3, 0x69, 0xb6, 0x6f, 0xa8, 0xd5, 0x43, 0x85],
'EFI_HII_CONFIG_ROUTING_PROTOCOL_GUID' : [0x587e72d7, 0xcc50, 0x4f79, 0x82, 0x09, 0xca, 0x29, 0x1f, 0xc1, 0xa1, 0x0f],
'EFI_HII_DATABASE_PROTOCOL_GUID' : [0xef9fc172, 0xa1b2, 0x4693, 0xb3, 0x27, 0x6d, 0x32, 0xfc, 0x41, 0x60, 0x42],
'EFI_HII_DRIVER_HEALTH_FORMSET_GUID' : [0xf22fc20c, 0x8cf4, 0x45eb, 0x8e, 0x6, 0xad, 0x4e, 0x50, 0xb9, 0x5d, 0xd3],
'EFI_HII_FONT_PROTOCOL_GUID' : [0xe9ca4775, 0x8657, 0x47fc, 0x97, 0xe7, 0x7e, 0xd6, 0x5a, 0x8, 0x43, 0x24],
'EFI_HII_FRONT_PAGE_CLASS_GUID' : [0x94d411b7, 0x7669, 0x45c3, 0xba, 0x3b, 0xf3, 0xa5, 0x8a, 0x71, 0x56, 0x81],
'EFI_HII_IMAGE_PROTOCOL_GUID' : [0x31a6406a, 0x6bdf, 0x4e46, 0xb2, 0xa2, 0xeb, 0xaa, 0x89, 0xc4, 0x9, 0x20],
'EFI_HII_PACKAGE_LIST_PROTOCOL_GUID' : [0x6a1ee763, 0xd47a, 0x43b4, 0xaa, 0xbe, 0xef, 0x1d, 0xe2, 0xab, 0x56, 0xfc],
'EFI_HII_PLATFORM_SETUP_FORMSET_GUID' : [0x93039971, 0x8545, 0x4b04, 0xb4, 0x5e, 0x32, 0xeb, 0x83, 0x26, 0x4, 0xe],
'EFI_HII_PROTOCOL_GUID' : [0x5542cce1, 0xdf5c, 0x4d1b, 0xab, 0xca, 0x36, 0x4f, 0x77, 0xd3, 0x99, 0xfb],
'EFI_HII_PROTOCOL_GUID' : [0xd7ad636e, 0xb997, 0x459b, 0xbf, 0x3f, 0x88, 0x46, 0x89, 0x79, 0x80, 0xe1],
'EFI_HII_SET_KEYBOARD_LAYOUT_EVENT_GUID' : [0x14982a4f, 0xb0ed, 0x45b8, 0xa8, 0x11, 0x5a, 0x7a, 0x9b, 0xc2, 0x32, 0xdf],
'EFI_HII_STANDARD_FORM_GUID' : [0x3bd2f4ec, 0xe524, 0x46e4, 0xa9, 0xd8, 0x51, 0x1, 0x17, 0x42, 0x55, 0x62],
'EFI_HII_STRING_PROTOCOL_GUID' : [0xfd96974, 0x23aa, 0x4cdc, 0xb9, 0xcb, 0x98, 0xd1, 0x77, 0x50, 0x32, 0x2a],
'EFI_HII_USER_CREDENTIAL_FORMSET_GUID' : [0x337f4407, 0x5aee, 0x4b83, 0xb2, 0xa7, 0x4e, 0xad, 0xca, 0x30, 0x88, 0xcd],
'EFI_HOB_LIST_GUID' : [0x7739f24c, 0x93d7, 0x11d4, 0x9a, 0x3a, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_HOB_MEMORY_ALLOC_BSP_STORE_GUID' : [0x564b33cd, 0xc92a, 0x4593, 0x90, 0xbf, 0x24, 0x73, 0xe4, 0x3c, 0x63, 0x22],
'EFI_HOB_MEMORY_ALLOC_STACK_GUID' : [0x4ed4bf27, 0x4092, 0x42e9, 0x80, 0x7d, 0x52, 0x7b, 0x1d, 0x0, 0xc9, 0xbd],
'EFI_IA32_X64_ERROR_TYPE_BUS_CHECK_GUID' : [0x1CF3F8B3, 0xC5B1, 0x49a2, 0xAA, 0x59, 0x5E, 0xEF, 0x92, 0xFF, 0xA6, 0x3C],
'EFI_IA32_X64_ERROR_TYPE_CACHE_CHECK_GUID' : [0xA55701F5, 0xE3EF, 0x43de, 0xAC, 0x72, 0x24, 0x9B, 0x57, 0x3F, 0xAD, 0x2C],
'EFI_IA32_X64_ERROR_TYPE_MS_CHECK_GUID' : [0x48AB7F57, 0xDC34, 0x4f6c, 0xA7, 0xD3, 0xB0, 0xB5, 0xB0, 0xA7, 0x43, 0x14],
'EFI_IA32_X64_ERROR_TYPE_TLB_CHECK_GUID' : [0xFC06B535, 0x5E1F, 0x4562, 0x9F, 0x25, 0x0A, 0x3B, 0x9A, 0xDB, 0x63, 0xC3],
'EFI_IDE_CONTROLLER_INIT_PROTOCOL_GUID' : [0xa1e37052, 0x80d9, 0x4e65, 0xa3, 0x17, 0x3e, 0x9a, 0x55, 0xc4, 0x3e, 0xc9],
'EFI_IFR_FRAMEWORK_GUID' : [0x31ca5d1a, 0xd511, 0x4931, 0xb7, 0x82, 0xae, 0x6b, 0x2b, 0x17, 0x8c, 0xd7],
'EFI_IFR_REFRESH_ID_OP_GUID' : [0xF5E655D9, 0x02A6, 0x46f2, 0x9E, 0x76, 0xB8, 0xBE, 0x8E, 0x60, 0xAB, 0x22],
'EFI_IFR_TIANO_GUID' : [0xf0b1735, 0x87a0, 0x4193, 0xb2, 0x66, 0x53, 0x8c, 0x38, 0xaf, 0x48, 0xce],
'EFI_IMAGE_SECURITY_DATABASE_GUID' : [0xd719b2cb, 0x3d3a, 0x4596, 0xa3, 0xbc, 0xda, 0xd0, 0xe, 0x67, 0x65, 0x6f],
'EFI_INCOMPATIBLE_PCI_DEVICE_SUPPORT_PROTOCOL_GUID' : [0xeb23f55a, 0x7863, 0x4ac2, 0x8d, 0x3d, 0x95, 0x65, 0x35, 0xde, 0x03, 0x75],
'EFI_IOBASE_HOB_GUID' : [0xd4a28a3e, 0xdcf2, 0x43cf, 0xa2, 0xb7, 0xf3, 0x57, 0x2a, 0x7c, 0xab, 0x9],
'EFI_IP4_CONFIG_PROTOCOL_GUID' : [0x3b95aa31, 0x3793, 0x434b, 0x86, 0x67, 0xc8, 0x07, 0x08, 0x92, 0xe0, 0x5e],
'EFI_IP4_PROTOCOL_GUID' : [0x41d94cd2, 0x35b6, 0x455a, 0x82, 0x58, 0xd4, 0xe5, 0x13, 0x34, 0xaa, 0xdd],
'EFI_IP4_SERVICE_BINDING_PROTOCOL_GUID' : [0xc51711e7, 0xb4bf, 0x404a, 0xbf, 0xb8, 0x0a, 0x04, 0x8e, 0xf1, 0xff, 0xe4],
'EFI_IP6_CONFIG_PROTOCOL_GUID' : [0x937fe521, 0x95ae, 0x4d1a, 0x89, 0x29, 0x48, 0xbc, 0xd9, 0x0a, 0xd3, 0x1a],
'EFI_IP6_PROTOCOL_GUID' : [0x2c8759d5, 0x5c2d, 0x66ef, 0x92, 0x5f, 0xb6, 0x6c, 0x10, 0x19, 0x57, 0xe2],
'EFI_IP6_SERVICE_BINDING_PROTOCOL_GUID' : [0xec835dd3, 0xfe0f, 0x617b, 0xa6, 0x21, 0xb3, 0x50, 0xc3, 0xe1, 0x33, 0x88],
'EFI_IPSEC2_PROTOCOL_GUID' : [0xa3979e64, 0xace8, 0x4ddc, 0xbc, 0x7, 0x4d, 0x66, 0xb8, 0xfd, 0x9, 0x77],
'EFI_IPSEC_CONFIG_PROTOCOL_GUID' : [0xce5e5929, 0xc7a3, 0x4602, 0xad, 0x9e, 0xc9, 0xda, 0xf9, 0x4e, 0xbf, 0xcf],
'EFI_IPSEC_PROTOCOL_GUID' : [0xdfb386f7, 0xe100, 0x43ad, 0x9c, 0x9a, 0xed, 0x90, 0xd0, 0x8a, 0x5e, 0x12],
'EFI_ISA_ACPI_PROTOCOL_GUID' : [0x64a892dc, 0x5561, 0x4536, 0x92, 0xc7, 0x79, 0x9b, 0xfc, 0x18, 0x33, 0x55],
'EFI_ISA_IO_PROTOCOL_GUID' : [0x7ee2bd44, 0x3da0, 0x11d4, 0x9a, 0x38, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_ISCSI_INITIATOR_NAME_PROTOCOL_GUID' : [0x59324945, 0xec44, 0x4c0d, 0xb1, 0xcd, 0x9d, 0xb1, 0x39, 0xdf, 0x7, 0xc],
'EFI_KMS_FORMAT_AESCBC_128_GUID' : [0xa0e8ee6a, 0x0e92, 0x44d4, 0x86, 0x1b, 0x0e, 0xaa, 0x4a, 0xca, 0x44, 0xa2],
'EFI_KMS_FORMAT_AESCBC_256_GUID' : [0xd7e69789, 0x1f68, 0x45e8, 0x96, 0xef, 0x3b, 0x64, 0x07, 0xa5, 0xb2, 0xdc],
'EFI_KMS_FORMAT_AESXTS_128_GUID' : [0x4776e33f, 0xdb47, 0x479a, 0xa2, 0x5f, 0xa1, 0xcd, 0x0a, 0xfa, 0xb3, 0x8b],
'EFI_KMS_FORMAT_AESXTS_256_GUID' : [0xdc7e8613, 0xc4bb, 0x4db0, 0x84, 0x62, 0x13, 0x51, 0x13, 0x57, 0xab, 0xe2],
'EFI_KMS_FORMAT_GENERIC_1024_GUID' : [0x43be0b44, 0x874b, 0x4ead, 0xb0, 0x9c, 0x24, 0x1a, 0x4f, 0xbd, 0x7e, 0xb3],
'EFI_KMS_FORMAT_GENERIC_128_GUID' : [0xec8a3d69, 0x6ddf, 0x4108, 0x94, 0x76, 0x73, 0x37, 0xfc, 0x52, 0x21, 0x36],
'EFI_KMS_FORMAT_GENERIC_160_GUID' : [0xa3b3e6f8, 0xefca, 0x4bc1, 0x88, 0xfb, 0xcb, 0x87, 0x33, 0x9b, 0x25, 0x79],
'EFI_KMS_FORMAT_GENERIC_2048_GUID' : [0x40093f23, 0x630c, 0x4626, 0x9c, 0x48, 0x40, 0x37, 0x3b, 0x19, 0xcb, 0xbe],
'EFI_KMS_FORMAT_GENERIC_256_GUID' : [0x70f64793, 0xc323, 0x4261, 0xac, 0x2c, 0xd8, 0x76, 0xf2, 0x7c, 0x53, 0x45],
'EFI_KMS_FORMAT_GENERIC_3072_GUID' : [0xb9237513, 0x6c44, 0x4411, 0xa9, 0x90, 0x21, 0xe5, 0x56, 0xe0, 0x5a, 0xde],
'EFI_KMS_FORMAT_GENERIC_512_GUID' : [0x978fe043, 0xd7af, 0x422e, 0x8a, 0x92, 0x2b, 0x48, 0xe4, 0x63, 0xbd, 0xe6],
'EFI_KMS_FORMAT_MD2_128_GUID' : [0x78be11c4, 0xee44, 0x4a22, 0x9f, 0x05, 0x03, 0x85, 0x2e, 0xc5, 0xc9, 0x78],
'EFI_KMS_FORMAT_MD4_128_GUID' : [0xd1c17aa1, 0xcac5, 0x400f, 0xbe, 0x17, 0xe2, 0xa2, 0xae, 0x06, 0x67, 0x7c],
'EFI_KMS_FORMAT_MD5SHA_128_GUID' : [0x1c178237, 0x6897, 0x459e, 0x9d, 0x36, 0x67, 0xce, 0x8e, 0xf9, 0x4f, 0x76],
'EFI_KMS_FORMAT_MD5_128_GUID' : [0xdcbc3662, 0x9cda, 0x4b52, 0xa0, 0x4c, 0x82, 0xeb, 0x1d, 0x23, 0x48, 0xc7],
'EFI_KMS_FORMAT_MDC2_128_GUID' : [0xf7ad60f8, 0xefa8, 0x44a3, 0x91, 0x13, 0x23, 0x1f, 0x39, 0x9e, 0xb4, 0xc7],
'EFI_KMS_FORMAT_MDC4_128_GUID' : [0x3fa4f847, 0xd8eb, 0x4df4, 0xbd, 0x49, 0x10, 0x3a, 0x0a, 0x84, 0x7b, 0xbc],
'EFI_KMS_FORMAT_RSASHA1_1024_GUID' : [0x56417bed, 0x6bbe, 0x4882, 0x86, 0xa0, 0x3a, 0xe8, 0xbb, 0x17, 0xf8, 0xf9],
'EFI_KMS_FORMAT_RSASHA1_2048_GUID' : [0xf66447d4, 0x75a6, 0x463e, 0xa8, 0x19, 0x07, 0x7f, 0x2d, 0xda, 0x05, 0xe9],
'EFI_KMS_FORMAT_RSASHA256_2048_GUID' : [0xa477af13, 0x877d, 0x4060, 0xba, 0xa1, 0x25, 0xd1, 0xbe, 0xa0, 0x8a, 0xd3],
'EFI_KMS_FORMAT_SHA1_160_GUID' : [0x453c5e5a, 0x482d, 0x43f0, 0x87, 0xc9, 0x59, 0x41, 0xf3, 0xa3, 0x8a, 0xc2],
'EFI_KMS_FORMAT_SHA256_256_GUID' : [0x6bb4f5cd, 0x8022, 0x448d, 0xbc, 0x6d, 0x77, 0x1b, 0xae, 0x93, 0x5f, 0xc6],
'EFI_KMS_FORMAT_SHA512_512_GUID' : [0x2f240e12, 0xe14d, 0x475c, 0x83, 0xb0, 0xef, 0xff, 0x22, 0xd7, 0x7b, 0xe7],
'EFI_KMS_PROTOCOL_GUID' : [0xEC3A978D, 0x7C4E, 0x48FA, 0x9A, 0xBE, 0x6A, 0xD9, 0x1C, 0xC8, 0xF8, 0x11],
'EFI_LEGACY_8259_PROTOCOL_GUID' : [0x38321dba, 0x4fe0, 0x4e17, 0x8a, 0xec, 0x41, 0x30, 0x55, 0xea, 0xed, 0xc1],
'EFI_LEGACY_BIOS_GUID' : [0x2e3044ac, 0x879f, 0x490f, 0x97, 0x60, 0xbb, 0xdf, 0xaf, 0x69, 0x5f, 0x50],
'EFI_LEGACY_BIOS_PLATFORM_PROTOCOL_GUID' : [0x783658a3, 0x4172, 0x4421, 0xa2, 0x99, 0xe0, 0x9, 0x7, 0x9c, 0xc, 0xb4],
'EFI_LEGACY_BIOS_PROTOCOL_GUID' : [0xdb9a1e3d, 0x45cb, 0x4abb, 0x85, 0x3b, 0xe5, 0x38, 0x7f, 0xdb, 0x2e, 0x2d],
'EFI_LEGACY_BIOS_THUNK_PROTOCOL_GUID' : [0x4c51a7ba, 0x7195, 0x442d, 0x87, 0x92, 0xbe, 0xea, 0x6e, 0x2f, 0xf6, 0xec],
'EFI_LEGACY_DEV_ORDER_VARIABLE_GUID' : [0xa56074db, 0x65fe, 0x45f7, 0xbd, 0x21, 0x2d, 0x2b, 0xdd, 0x8e, 0x96, 0x52],
'EFI_LEGACY_INTERRUPT_PROTOCOL_GUID' : [0x31ce593d, 0x108a, 0x485d, 0xad, 0xb2, 0x78, 0xf2, 0x1f, 0x29, 0x66, 0xbe],
'EFI_LEGACY_REGION2_PROTOCOL_GUID' : [0x70101eaf, 0x85, 0x440c, 0xb3, 0x56, 0x8e, 0xe3, 0x6f, 0xef, 0x24, 0xf0],
'EFI_LEGACY_REGION_PROTOCOL_GUID' : [0xfc9013a, 0x568, 0x4ba9, 0x9b, 0x7e, 0xc9, 0xc3, 0x90, 0xa6, 0x60, 0x9b],
'EFI_LOADED_IMAGE_DEVICE_PATH_PROTOCOL_GUID' : [0xbc62157e, 0x3e33, 0x4fec, 0x99, 0x20, 0x2d, 0x3b, 0x36, 0xd7, 0x50, 0xdf],
'EFI_LOADED_IMAGE_PROTOCOL_GUID' : [0x5B1B31A1, 0x9562, 0x11d2, 0x8E, 0x3F, 0x00, 0xA0, 0xC9, 0x69, 0x72, 0x3B],
'EFI_LOAD_FILE2_PROTOCOL_GUID' : [0x4006c0c1, 0xfcb3, 0x403e, 0x99, 0x6d, 0x4a, 0x6c, 0x87, 0x24, 0xe0, 0x6d],
'EFI_LOAD_FILE_PROTOCOL_GUID' : [0x56EC3091, 0x954C, 0x11d2, 0x8E, 0x3F, 0x00, 0xA0, 0xC9, 0x69, 0x72, 0x3B],
'EFI_LOAD_FIXED_ADDRESS_CONFIGURATION_TABLE_GUID' : [0x2CA88B53,0xD296,0x4080, 0xA4,0xA5,0xCA,0xD9,0xBA,0xE2,0x4B,0x9],
'EFI_LOCK_BOX_PROTOCOL_GUID' : [0xbd445d79, 0xb7ad, 0x4f04, 0x9a, 0xd8, 0x29, 0xbd, 0x20, 0x40, 0xeb, 0x3c],
'EFI_MANAGED_NETWORK_PROTOCOL_GUID' : [0x7ab33a91, 0xace5, 0x4326, 0xb5, 0x72, 0xe7, 0xee, 0x33, 0xd3, 0x9f, 0x16],
'EFI_MANAGED_NETWORK_SERVICE_BINDING_PROTOCOL_GUID' : [0xf36ff770, 0xa7e1, 0x42cf, 0x9e, 0xd2, 0x56, 0xf0, 0xf2, 0x71, 0xf4, 0x4c],
'EFI_MEASURED_FV_HOB_GUID' : [0xb2360b42, 0x7173, 0x420a, 0x86, 0x96, 0x46, 0xca, 0x6b, 0xab, 0x10, 0x60],
'EFI_MEMORY_PRODUCER_GUID' : [0x1d7add6e, 0xb2da, 0x4b0b, 0xb2, 0x9f, 0x49, 0xcb, 0x42, 0xf4, 0x63, 0x56],
'EFI_MEMORY_SUBCLASS_GUID' : [0x4E8F4EBB, 0x64B9, 0x4e05, 0x9B, 0x18, 0x4C, 0xFE, 0x49, 0x23, 0x50, 0x97],
'EFI_MEMORY_TYPE_INFORMATION_GUID' : [0x4c19049f,0x4137,0x4dd3, 0x9c,0x10,0x8b,0x97,0xa8,0x3f,0xfd,0xfa],
'EFI_METRONOME_ARCH_PROTOCOL_GUID' : [0x26baccb2, 0x6f42, 0x11d4, 0xbc, 0xe7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_MINI_SHELL_FILE_GUID' : [0x86ad232b, 0xd33a, 0x465c, 0xbf, 0x5f, 0x41, 0x37, 0xb, 0xa9, 0x2f, 0xe2],
'EFI_MISC_PRODUCER_GUID' : [0x62512c92, 0x63c4, 0x4d80, 0x82, 0xb1, 0xc1, 0xa4, 0xdc, 0x44, 0x80, 0xe5],
'EFI_MISC_SUBCLASS_GUID' : [0x772484B2, 0x7482, 0x4b91, 0x9F, 0x9A, 0xAD, 0x43, 0xF8, 0x1C, 0x58, 0x81],
'EFI_MMC_HOST_PROTOCOL_GUID' : [0x3e591c00, 0x9e4a, 0x11df, 0x92, 0x44, 0x00, 0x02, 0xA5, 0xD5, 0xC5, 0x1B],
'EFI_MONOTONIC_COUNTER_ARCH_PROTOCOL_GUID' : [0x1da97072, 0xbddc, 0x4b30, 0x99, 0xf1, 0x72, 0xa0, 0xb5, 0x6f, 0xff, 0x2a],
'EFI_MONTONIC_COUNTER_ARCH_PROTOCOL_GUID' : [0x1da97072, 0xbddc, 0x4b30, 0x99, 0xf1, 0x72, 0xa0, 0xb5, 0x6f, 0xff, 0x2a],
'EFI_MPS_TABLE_GUID' : [0xeb9d2d2f, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_MP_SERVICES_PROTOCOL_GUID' : [0x3fdda605, 0xa76e, 0x4f46, 0xad, 0x29, 0x12, 0xf4, 0x53, 0x1b, 0x3d, 0x08],
'EFI_MTFTP4_PROTOCOL_GUID' : [0x78247c57, 0x63db, 0x4708, 0x99, 0xc2, 0xa8, 0xb4, 0xa9, 0xa6, 0x1f, 0x6b],
'EFI_MTFTP4_SERVICE_BINDING_PROTOCOL_GUID' : [0x2FE800BE, 0x8F01, 0x4aa6, 0x94, 0x6B, 0xD7, 0x13, 0x88, 0xE1, 0x83, 0x3F],
'EFI_MTFTP6_PROTOCOL_GUID' : [0xbf0a78ba, 0xec29, 0x49cf, 0xa1, 0xc9, 0x7a, 0xe5, 0x4e, 0xab, 0x6a, 0x51],
'EFI_MTFTP6_SERVICE_BINDING_PROTOCOL_GUID' : [0xd9760ff3, 0x3cca, 0x4267, 0x80, 0xf9, 0x75, 0x27, 0xfa, 0xfa, 0x42, 0x23],
'EFI_NETWORK_INTERFACE_IDENTIFIER_PROTOCOL_GUID' : [0x1ACED566, 0x76ED, 0x4218, 0xBC, 0x81, 0x76, 0x7F, 0x1F, 0x97, 0x7A, 0x89],
'EFI_NETWORK_INTERFACE_IDENTIFIER_PROTOCOL_GUID' : [0xE18541CD, 0xF755, 0x4f73, 0x92, 0x8D, 0x64, 0x3C, 0x8A, 0x79, 0xB2, 0x29],
'EFI_NIC_IP4_CONFIG_NVDATA_GUID' : [0x9d5b53f, 0xf4b0, 0x4f59, 0xa0, 0xb1, 0x7b, 0x57, 0xd3, 0x5c, 0xe, 0x5],
'EFI_NIC_IP4_CONFIG_PROTOCOL_GUID' : [0xdca3d4d, 0x12da, 0x4728, 0xbf, 0x7e, 0x86, 0xce, 0xb9, 0x28, 0xd0, 0x67],
'EFI_NIC_IP4_CONFIG_VARIABLE_GUID' : [0xd8944553, 0xc4dd, 0x41f4, 0x9b, 0x30, 0xe1, 0x39, 0x7c, 0xfb, 0x26, 0x7b],
'EFI_NT_LOAD_AS_DLL_PPI_GUID' : [0xccc53f6b, 0xa03a, 0x4ed8, 0x83, 0x9a, 0x3, 0xd9, 0x9c, 0x2, 0xb4, 0xe3],
'EFI_OEM_BADGING_PROTOCOL_GUID' : [0x170e13c0, 0xbf1b, 0x4218, 0x87, 0x1d, 0x2a, 0xbd, 0xc6, 0xf8, 0x87, 0xbc],
'EFI_PART_TYPE_EFI_SYSTEM_PART_GUID' : [0xc12a7328, 0xf81f, 0x11d2, 0xba, 0x4b, 0x00, 0xa0, 0xc9, 0x3e, 0xc9, 0x3b],
'EFI_PART_TYPE_LEGACY_MBR_GUID' : [0x024dee41, 0x33e7, 0x11d3, 0x9d, 0x69, 0x00, 0x08, 0xc7, 0x81, 0xf3, 0x9f],
'EFI_PATH_FILE_NAME_GUID' : [0x7644C181, 0xFA6E, 0x46DA, 0x80, 0xCB, 0x04, 0xB9, 0x90, 0x40, 0x62, 0xE8],
'EFI_PCD_PROTOCOL_GUID' : [0x13a3f0f6, 0x264a, 0x3ef0, 0xf2, 0xe0, 0xde, 0xc5, 0x12, 0x34, 0x2f, 0x34],
'EFI_PCI_ENUMERATION_COMPLETE_GUID' : [0x30cfe3e7, 0x3de1, 0x4586, 0xbe, 0x20, 0xde, 0xab, 0xa1, 0xb3, 0xb7, 0x93],
'EFI_PCI_EXPRESS_BASE_ADDRESS_GUID' : [0x3677d529, 0x326f, 0x4603, 0xa9, 0x26, 0xea, 0xac, 0xe0, 0x1d, 0xcb, 0xb0],
'EFI_PCI_HOST_BRIDGE_RESOURCE_ALLOCATION_PROTOCOL_GUID' : [0xCF8034BE, 0x6768, 0x4d8b, 0xB7,0x39,0x7C,0xCE,0x68,0x3A,0x9F,0xBE],
'EFI_PCI_HOTPLUG_DEVICE_GUID' : [0x0b280816, 0x52e7, 0x4e51, 0xaa, 0x57, 0x11, 0xbd, 0x41, 0xcb, 0xef, 0xc3],
'EFI_PCI_HOTPLUG_REQUEST_PROTOCOL_GUID' : [0x19cb87ab, 0x2cb9, 0x4665, 0x83, 0x60, 0xdd, 0xcf, 0x60, 0x54, 0xf7, 0x9d],
'EFI_PCI_HOT_PLUG_INIT_PROTOCOL_GUID' : [0xaa0e8bc1, 0xdabc, 0x46b0, 0xa8, 0x44, 0x37, 0xb8, 0x16, 0x9b, 0x2b, 0xea],
'EFI_PCI_IO_PROTOCOL_GUID' : [0x4cf5b200, 0x68b8, 0x4ca5, 0x9e, 0xec, 0xb2, 0x3e, 0x3f, 0x50, 0x2, 0x9a],
'EFI_PCI_OPTION_ROM_TABLE_GUID' : [0x7462660f, 0x1cbd, 0x48da, 0xad, 0x11, 0x91, 0x71, 0x79, 0x13, 0x83, 0x1c],
'EFI_PCI_OVERRIDE_GUID' : [0xb5b35764, 0x460c, 0x4a06, 0x99, 0xfc, 0x77, 0xa1, 0x7c, 0x1b, 0x5c, 0xeb],
'EFI_PCI_PLATFORM_PROTOCOL_GUID' : [0x7d75280, 0x27d4, 0x4d69, 0x90, 0xd0, 0x56, 0x43, 0xe2, 0x38, 0xb3, 0x41],
'EFI_PCI_ROOT_BRIDGE_IO_PROTOCOL_GUID' : [0x2f707ebb, 0x4a1a, 0x11d4, 0x9a, 0x38, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_PC_ANSI_GUID' : [0xe0c14753, 0xf9be, 0x11d2, 0x9a, 0x0c, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_PEI_APRIORI_FILE_NAME_GUID' : [0x1b45cc0a, 0x156a, 0x428a, 0xaf, 0x62, 0x49, 0x86, 0x4d, 0xa0, 0xe6, 0xe6],
'EFI_PEI_BOOT_SCRIPT_EXECUTER_PPI_GUID' : [0xabd42895, 0x78cf, 0x4872, 0x84, 0x44, 0x1b, 0x5c, 0x18, 0x0b, 0xfb, 0xff],
'EFI_PEI_CPU_IO_PPI_INSTALLED_GUID' : [0xe6af1f7b, 0xfc3f, 0x46da, 0xa8, 0x28, 0xa3, 0xb4, 0x57, 0xa4, 0x42, 0x82],
'EFI_PEI_DECOMPRESS_PPI_GUID' : [0x1a36e4e7, 0xfab6, 0x476a, 0x8e, 0x75, 0x69, 0x5a, 0x5, 0x76, 0xfd, 0xd7],
'EFI_PEI_DEVICE_RECOVERY_MODULE_PPI_GUID' : [0x0DE2CE25, 0x446A, 0x45a7, 0xBF, 0xC9, 0x37, 0xDA, 0x26, 0x34, 0x4B, 0x37],
'EFI_PEI_END_OF_PEI_PHASE_PPI_GUID' : [0x605EA650, 0xC65C, 0x42e1, 0xBA, 0x80, 0x91, 0xA5, 0x2A, 0xB6, 0x18, 0xC6],
'EFI_PEI_FIND_FV_PPI_GUID' : [0x36164812, 0xa023, 0x44e5, 0xbd, 0x85, 0x5, 0xbf, 0x3c, 0x77, 0x0, 0xaa],
'EFI_PEI_FIRMWARE_VOLUME_INFO_PPI_GUID' : [0x49edb1c1, 0xbf21, 0x4761, 0xbb, 0x12, 0xeb, 0x0, 0x31, 0xaa, 0xbb, 0x39],
'EFI_PEI_FLUSH_INSTRUCTION_CACHE_GUID' : [0xd8117cfc, 0x94a6, 0x11d4, 0x9a, 0x3a, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_PEI_LOADED_IMAGE_PPI_GUID' : [0xc1fcd448, 0x6300, 0x4458, 0xb8, 0x64, 0x28, 0xdf, 0x01, 0x53, 0x64, 0xbc],
'EFI_PEI_LOAD_FILE_GUID' : [0xb9e0abfe, 0x5979, 0x4914, 0x97, 0x7f, 0x6d, 0xee, 0x78, 0xc2, 0x78, 0xa6],
'EFI_PEI_LOAD_FILE_PPI_GUID' : [0xb9e0abfe, 0x5979, 0x4914, 0x97, 0x7f, 0x6d, 0xee, 0x78, 0xc2, 0x78, 0xa6],
'EFI_PEI_PCD_PPI_GUID' : [0x1f34d25, 0x4de2, 0x23ad, 0x3f, 0xf3, 0x36, 0x35, 0x3f, 0xf3, 0x23, 0xf1],
'EFI_PEI_PCI_CFG2_PPI_GUID' : [0x57a449a, 0x1fdc, 0x4c06, 0xbf, 0xc9, 0xf5, 0x3f, 0x6a, 0x99, 0xbb, 0x92],
'EFI_PEI_PCI_CFG_PPI_INSTALLED_GUID' : [0xe1f2eba0, 0xf7b9, 0x4a26, 0x86, 0x20, 0x13, 0x12, 0x21, 0x64, 0x2a, 0x90],
'EFI_PEI_PERFORMANCE_HOB_GUID' : [0x10f432de, 0xdeec, 0x4631, 0x80, 0xcd, 0x47, 0xf6, 0x5d, 0x8f, 0x80, 0xbb],
'EFI_PEI_PERMANENT_MEMORY_INSTALLED_PPI_GUID' : [0xf894643d, 0xc449, 0x42d1, 0x8e, 0xa8, 0x85, 0xbd, 0xd8, 0xc6, 0x5b, 0xde],
'EFI_PEI_PE_COFF_LOADER_GUID' : [0xd8117cff, 0x94a6, 0x11d4, 0x9a, 0x3a, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_PEI_READ_ONLY_VARIABLE2_PPI_GUID' : [0x2ab86ef5, 0xecb5, 0x4134, 0xb5, 0x56, 0x38, 0x54, 0xca, 0x1f, 0xe1, 0xb4],
'EFI_PEI_READ_ONLY_VARIABLE_ACCESS_PPI_GUID' : [0x3cdc90c6, 0x13fb, 0x4a75, 0x9e, 0x79, 0x59, 0xe9, 0xdd, 0x78, 0xb9, 0xfa],
'EFI_PEI_RECOVERY_BLOCK_IO_PPI_GUID' : [0x695d8aa1, 0x42ee, 0x4c46, 0x80, 0x5c, 0x6e, 0xa6, 0xbc, 0xe7, 0x99, 0xe3],
'EFI_PEI_RECOVERY_MODULE_PPI_GUID' : [0xFB6D9542, 0x612D, 0x4f45, 0x87, 0x2F, 0x5C, 0xFF, 0x52, 0xE9, 0x3D, 0xCF],
'EFI_PEI_REPORT_PROGRESS_CODE_PPI_GUID' : [0x229832d3, 0x7a30, 0x4b36, 0xb8, 0x27, 0xf4, 0xc, 0xb7, 0xd4, 0x54, 0x36],
'EFI_PEI_RESET_PPI_GUID' : [0xef398d58, 0x9dfd, 0x4103, 0xbf, 0x94, 0x78, 0xc6, 0xf4, 0xfe, 0x71, 0x2f],
'EFI_PEI_RSC_HANDLER_PPI_GUID' : [0x65d394, 0x9951, 0x4144, 0x82, 0xa3, 0xa, 0xfc, 0x85, 0x79, 0xc2, 0x51],
'EFI_PEI_S3_RESUME2_PPI_GUID' : [0x6D582DBC, 0xDB85, 0x4514, 0x8F, 0xCC, 0x5A, 0xDF, 0x62, 0x27, 0xB1, 0x47],
'EFI_PEI_S3_RESUME_PPI_GUID' : [0x4426CCB2, 0xE684, 0x4a8a, 0xAE, 0x40, 0x20, 0xD4, 0xB0, 0x25, 0xB7, 0x10],
'EFI_PEI_SECTION_EXTRACTION_PPI_GUID' : [0x4F89E208, 0xE144, 0x4804, 0x9E, 0xC8, 0x0F, 0x89, 0x4F, 0x7E, 0x36, 0xD7],
'EFI_PEI_SECURITY2_PPI_GUID' : [0xdcd0be23, 0x9586, 0x40f4, 0xb6, 0x43, 0x06, 0x52, 0x2c, 0xed, 0x4e, 0xde],
'EFI_PEI_SECURITY_PPI_GUID' : [0x1388066e, 0x3a57, 0x4efa, 0x98, 0xf3, 0xc1, 0x2f, 0x3a, 0x95, 0x8a, 0x29],
'EFI_PEI_SMBUS2_PPI_GUID' : [0x9ca93627, 0xb65b, 0x4324, 0xa2, 0x2, 0xc0, 0xb4, 0x61, 0x76, 0x45, 0x43],
'EFI_PEI_SMBUS_PPI_GUID' : [0xabd42895, 0x78cf, 0x4872, 0x84, 0x44, 0x1b, 0x5c, 0x18, 0xb, 0xfb, 0xda],
'EFI_PEI_SMM_COMMUNICATION_PPI_GUID' : [0xae933e1c, 0xcc47, 0x4e38, 0x8f, 0xe, 0xe2, 0xf6, 0x1d, 0x26, 0x5, 0xdf],
'EFI_PEI_STALL_PPI_GUID' : [0x1f4c6f90, 0xb06b, 0x48d8, 0xa2, 0x01, 0xba, 0xe5, 0xf1, 0xcd, 0x7d, 0x56],
'EFI_PEI_TEMPORARY_RAM_DONE_PPI_GUID' : [0xceab683c, 0xec56, 0x4a2d, 0xa9, 0x06, 0x40, 0x53, 0xfa, 0x4e, 0x9c, 0x16],
'EFI_PEI_TEMPORARY_RAM_SUPPORT_PPI_GUID' : [0xdbe23aa9, 0xa345, 0x4b97, 0x85, 0xb6, 0xb2, 0x26, 0xf1, 0x61, 0x73, 0x89],
'EFI_PEI_TRANSFER_CONTROL_GUID' : [0xd8117d02, 0x94a6, 0x11d4, 0x9a, 0x3a, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_PEI_VECTOR_HANDOFF_INFO_PPI_GUID' : [0x3cd652b4, 0x6d33, 0x4dce, 0x89, 0xdb, 0x83, 0xdf, 0x97, 0x66, 0xfc, 0xca],
'EFI_PERFORMANCE_PROTOCOL_GUID' : [0xFFECFFFF, 0x923C, 0x14d2, 0x9E, 0x3F, 0x22, 0xA0, 0xC9, 0x69, 0x56, 0x3B],
'EFI_PHYSICAL_PRESENCE_DATA_GUID' : [0xf6499b1, 0xe9ad, 0x493d, 0xb9, 0xc2, 0x2f, 0x90, 0x81, 0x5c, 0x6c, 0xbc],
'EFI_PLATFORM_DRIVER_OVERRIDE_PROTOCOL_GUID' : [0x6b30c738, 0xa391, 0x11d4, 0x9a, 0x3b, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_PLATFORM_MEMTEST_PROTOCOL_GUID' : [0x859ba18, 0x7dd7, 0x4ed7, 0xa8, 0x8e, 0x10, 0x9c, 0x63, 0x91, 0x7b, 0xdd],
'EFI_PLATFORM_TO_DRIVER_CONFIGURATION_CLP_GUID' : [0x345ecc0e, 0x0cb6, 0x4b75, 0xbb, 0x57, 0x1b, 0x12, 0x9c, 0x47, 0x33, 0x3e],
'EFI_PLATFORM_TO_DRIVER_CONFIGURATION_PROTOCOL_GUID' : [0x642cd590, 0x8059, 0x4c0a, 0xa9, 0x58, 0xc5, 0xec, 0x07, 0xd2, 0x3c, 0x4b],
'EFI_PRIMARY_CONSOLE_IN_DEVICE_GUID' : [0xe451dcbe, 0x96a1, 0x4729, 0xa5, 0xcf, 0x6b, 0x9c, 0x2c, 0xff, 0x47, 0xfd],
'EFI_PRIMARY_CONSOLE_OUT_DEVICE_GUID' : [0x62bdf38a, 0xe3d5, 0x492c, 0x95, 0xc, 0x23, 0xa7, 0xf6, 0x6e, 0x67, 0x2e],
'EFI_PRIMARY_STANDARD_ERROR_DEVICE_GUID' : [0x5a68191b, 0x9b97, 0x4752, 0x99, 0x46, 0xe3, 0x6a, 0x5d, 0xa9, 0x42, 0xb1],
'EFI_PRINT2_PROTOCOL_GUID' : [0xf05976ef, 0x83f1, 0x4f3d, 0x86, 0x19, 0xf7, 0x59, 0x5d, 0x41, 0xe5, 0x38],
'EFI_PRINT_PROTOCOL_GUID' : [0xdf2d868e, 0x32fc, 0x4cf0, 0x8e, 0x6b, 0xff, 0xd9, 0x5d, 0x13, 0x43, 0xd0],
'EFI_PROCESSOR_PRODUCER_GUID' : [0x1bf06aea, 0x5bec, 0x4a8d, 0x95, 0x76, 0x74, 0x9b, 0x09, 0x56, 0x2d, 0x30],
'EFI_PROCESSOR_SUBCLASS_GUID' : [0x26fdeb7e, 0xb8af, 0x4ccf, 0xaa, 0x97, 0x02, 0x63, 0x3c, 0xe4, 0x8c, 0xa7],
'EFI_PS2_POLICY_PROTOCOL_GUID' : [0x4df19259, 0xdc71, 0x4d46, 0xbe, 0xf1, 0x35, 0x7b, 0xb5, 0x78, 0xc4, 0x18],
'EFI_PXE_BASE_CODE_CALLBACK_PROTOCOL_GUID' : [0x245dca21, 0xfb7b, 0x11d3, 0x8f, 0x01, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b],
'EFI_PXE_BASE_CODE_PROTOCOL_GUID' : [0x03c4e603, 0xac28, 0x11d3, 0x9a, 0x2d, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_PXE_DHCP4_CALLBACK_PROTOCOL_GUID' : [0xc1544c01, 0x92a4, 0x4198, 0x8a, 0x84, 0x77, 0x85, 0x83, 0xc2, 0x36, 0x21],
'EFI_PXE_DHCP4_PROTOCOL_GUID' : [0x03c4e624, 0xac28, 0x11d3, 0x9a, 0x2d, 0x00, 0x90, 0x29, 0x3f, 0xc1, 0x4d],
'EFI_REAL_TIME_CLOCK_ARCH_PROTOCOL_GUID' : [0x27CFAC87, 0x46CC, 0x11d4, 0x9A, 0x38, 0x00, 0x90, 0x27, 0x3F, 0xC1, 0x4D],
'EFI_RESET_ARCH_PROTOCOL_GUID' : [0x27CFAC88, 0x46CC, 0x11d4, 0x9A, 0x38, 0x00, 0x90, 0x27, 0x3F, 0xC1, 0x4D],
'EFI_RSC_HANDLER_PROTOCOL_GUID' : [0x86212936, 0xe76, 0x41c8, 0xa0, 0x3a, 0x2a, 0xf2, 0xfc, 0x1c, 0x39, 0xe2],
'EFI_RUNTIME_ARCH_PROTOCOL_GUID' : [0xb7dfb4e1, 0x52f, 0x449f, 0x87, 0xbe, 0x98, 0x18, 0xfc, 0x91, 0xb7, 0x33],
'EFI_RUNTIME_CRYPT_PROTOCOL_GUID' : [0xe1475e0c, 0x1746, 0x4802, 0x86, 0x2e, 0x1, 0x1c, 0x2c, 0x2d, 0x9d, 0x86],
'EFI_S3_SAVE_STATE_PROTOCOL_GUID' : [0xe857caf6, 0xc046, 0x45dc, 0xbe, 0x3f, 0xee, 0x7, 0x65, 0xfb, 0xa8, 0x87],
'EFI_S3_SMM_SAVE_STATE_PROTOCOL_GUID' : [0x320afe62, 0xe593, 0x49cb, 0xa9, 0xf1, 0xd4, 0xc2, 0xf4, 0xaf, 0x1, 0x4c],
'EFI_SAL_MCA_INIT_PMI_PROTOCOL_GUID' : [0xb60dc6e8, 0x3b6f, 0x11d5, 0xaf, 0x9, 0x0, 0xa0, 0xc9, 0x44, 0xa0, 0x5b],
'EFI_SAL_SYSTEM_TABLE_GUID' : [0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_SAS_DEVICE_PATH_GUID' : [0xd487ddb4, 0x008b, 0x11d9, 0xaf, 0xdc, 0x00, 0x10, 0x83, 0xff, 0xca, 0x4d],
'EFI_SCSI_BUS_PROTOCOL_GUID' : [0x5261213D, 0x3A3D, 0x441E, 0xB3, 0xAF, 0x21, 0xD3, 0xF7, 0xA4, 0xCA, 0x17],
'EFI_SCSI_IO_PROTOCOL_GUID' : [0x932f47e6, 0x2362, 0x4002, 0x80, 0x3e, 0x3c, 0xd5, 0x4b, 0x13, 0x8f, 0x85],
'EFI_SCSI_PASS_THRU_PROTOCOL_GUID' : [0xa59e8fcf, 0xbda0, 0x43bb, 0x90, 0xb1, 0xd3, 0x73, 0x2e, 0xca, 0xa8, 0x77],
'EFI_SECTION_EXTRACTION_PROTOCOL_GUID' : [0x448F5DA4, 0x6DD7, 0x4FE1, 0x93, 0x07, 0x69, 0x22, 0x41, 0x92, 0x21, 0x5D],
'EFI_SECURITY2_ARCH_PROTOCOL_GUID' : [0x94ab2f58, 0x1438, 0x4ef1, 0x91, 0x52, 0x18, 0x94, 0x1a, 0x3a, 0x0e, 0x68],
'EFI_SECURITY_ARCH_PROTOCOL_GUID' : [0xA46423E3, 0x4617, 0x49f1, 0xB9, 0xFF, 0xD1, 0xBF, 0xA9, 0x11, 0x58, 0x39],
'EFI_SECURITY_POLICY_PROTOCOL_GUID' : [0x78E4D245, 0xCD4D, 0x4a05, 0xA2, 0xBA, 0x47, 0x43, 0xE8, 0x6C, 0xFC, 0xAB],
'EFI_SEC_PLATFORM_INFORMATION_GUID' : [0x6f8c2b35, 0xfef4, 0x448d, 0x82, 0x56, 0xe1, 0x1b, 0x19, 0xd6, 0x10, 0x77],
'EFI_SERIAL_IO_PROTOCOL_GUID' : [0xBB25CF6F, 0xF1D4, 0x11D2, 0x9A, 0x0C, 0x00, 0x90, 0x27, 0x3F, 0xC1, 0xFD],
'EFI_SE_EXT_SIGNATURE_GUID' : [0xd2c18636, 0x40e5, 0x4eb5, 0xa3, 0x1b, 0x36, 0x69, 0x5f, 0xd4, 0x2c, 0x87],
'EFI_SHELLPKG_TOKEN_SPACE_GUID' : [0x171e9188, 0x31d3, 0x40f5, 0xb1, 0xc, 0x53, 0x9b, 0x2d, 0xb9, 0x40, 0xcd],
'EFI_SHELL_FILE_GUID' : [0xc57ad6b7, 0x0515, 0x40a8, 0x9d, 0x21, 0x55, 0x16, 0x52, 0x85, 0x4e, 0x37],
'EFI_SHELL_PARAMETERS_PROTOCOL_GUID' : [0x752f3136, 0x4e16, 0x4fdc, 0xa2, 0x2a, 0xe5, 0xf4, 0x68, 0x12, 0xf4, 0xca],
'EFI_SHELL_PROTOCOL_GUID' : [0x6302d008, 0x7f9b, 0x4f30, 0x87, 0xac, 0x60, 0xc9, 0xfe, 0xf5, 0xda, 0x4e],
'EFI_SIMPLE_FILE_SYSTEM_PROTOCOL_GUID' : [0x964e5b22, 0x6459, 0x11d2, 0x8e, 0x39, 0x0, 0xa0, 0xc9, 0x69, 0x72, 0x3b],
'EFI_SIMPLE_NETWORK_PROTOCOL_GUID' : [0xA19832B9, 0xAC25, 0x11D3, 0x9A, 0x2D, 0x00, 0x90, 0x27, 0x3F, 0xC1, 0x4D],
'EFI_SIMPLE_POINTER_PROTOCOL_GUID' : [0x31878c87, 0xb75, 0x11d5, 0x9a, 0x4f, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_SIMPLE_TEXT_INPUT_EX_PROTOCOL_GUID' : [0xdd9e7534, 0x7762, 0x4698, 0x8c, 0x14, 0xf5, 0x85, 0x17, 0xa6, 0x25, 0xaa],
'EFI_SIMPLE_TEXT_INPUT_PROTOCOL_GUID' : [0x387477c1, 0x69c7, 0x11d2, 0x8e, 0x39, 0x0, 0xa0, 0xc9, 0x69, 0x72, 0x3b],
'EFI_SIMPLE_TEXT_IN_PROTOCOL_GUID' : [0x387477c1, 0x69c7, 0x11d2, 0x8e, 0x39, 0x0, 0xa0, 0xc9, 0x69, 0x72, 0x3b],
'EFI_SIMPLE_TEXT_OUTPUT_PROTOCOL_GUID' : [0x387477c2, 0x69c7, 0x11d2, 0x8e, 0x39, 0x0, 0xa0, 0xc9, 0x69, 0x72, 0x3b],
'EFI_SIMPLE_TEXT_OUT_PROTOCOL_GUID' : [0x387477c2, 0x69c7, 0x11d2, 0x8e, 0x39, 0x0, 0xa0, 0xc9, 0x69, 0x72, 0x3b],
'EFI_SIO_PROTOCOL_GUID' : [0x215fdd18, 0xbd50, 0x4feb, 0x89, 0xb, 0x58, 0xca, 0xb, 0x47, 0x39, 0xe9],
'EFI_SMBIOS_PROTOCOL_GUID' : [0x3583ff6, 0xcb36, 0x4940, 0x94, 0x7e, 0xb9, 0xb3, 0x9f, 0x4a, 0xfa, 0xf7],
'EFI_SMBIOS_TABLE_GUID' : [0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_SMBUS_HC_PROTOCOL_GUID' : [0xe49d33ed, 0x513d, 0x4634, 0xb6, 0x98, 0x6f, 0x55, 0xaa, 0x75, 0x1c, 0x1b],
'EFI_SMM_ACCESS2_PROTOCOL_GUID' : [0xc2702b74, 0x800c, 0x4131, 0x87, 0x46, 0x8f, 0xb5, 0xb8, 0x9c, 0xe4, 0xac],
'EFI_SMM_ACCESS_PROTOCOL_GUID' : [0x3792095a, 0xe309, 0x4c1e, 0xaa, 0x01, 0x85, 0xf5, 0x65, 0x5a, 0x17, 0xf1],
'EFI_SMM_BASE2_PROTOCOL_GUID' : [0xf4ccbfb7, 0xf6e0, 0x47fd, 0x9d, 0xd4, 0x10, 0xa8, 0xf1, 0x50, 0xc1, 0x91],
'EFI_SMM_BASE_HELPER_READY_PROTOCOL_GUID' : [0x910dca07, 0x1f94, 0x4ee7, 0xaf, 0x2f, 0xff, 0x72, 0xf3, 0x15, 0x43, 0x53],
'EFI_SMM_BASE_PROTOCOL_GUID' : [0x1390954D, 0xda95, 0x4227, 0x93, 0x28, 0x72, 0x82, 0xc2, 0x17, 0xda, 0xa8],
'EFI_SMM_COMMUNICATION_PROTOCOL_GUID' : [0xc68ed8e2, 0x9dc6, 0x4cbd, 0x9d, 0x94, 0xdb, 0x65, 0xac, 0xc5, 0xc3, 0x32],
'EFI_SMM_CONFIGURATION_PROTOCOL_GUID' : [0x26eeb3de, 0xb689, 0x492e, 0x80, 0xf0, 0xbe, 0x8b, 0xd7, 0xda, 0x4b, 0xa7],
'EFI_SMM_CONTROL2_PROTOCOL_GUID' : [0x843dc720, 0xab1e, 0x42cb, 0x93, 0x57, 0x8a, 0x0, 0x78, 0xf3, 0x56, 0x1b],
'EFI_SMM_CONTROL_PROTOCOL_GUID' : [0x8d12e231, 0xc667, 0x4fd1, 0x98, 0xf2, 0x24, 0x49, 0xa7, 0xe7, 0xb2, 0xe5],
'EFI_SMM_CPU_IO2_PROTOCOL_GUID' : [0x3242A9D8, 0xCE70, 0x4AA0, 0x95, 0x5D, 0x5E, 0x7B, 0x14, 0x0D, 0xE4, 0xD2],
'EFI_SMM_CPU_IO_GUID' : [0x5f439a0b, 0x45d8, 0x4682, 0xa4, 0xf4, 0xf0, 0x57, 0x6b, 0x51, 0x34, 0x41],
'EFI_SMM_CPU_PROTOCOL_GUID' : [0xeb346b97, 0x975f, 0x4a9f, 0x8b, 0x22, 0xf8, 0xe9, 0x2b, 0xb3, 0xd5, 0x69],
'EFI_SMM_CPU_SAVE_STATE_PROTOCOL_GUID' : [0x21f302ad, 0x6e94, 0x471b, 0x84, 0xbc, 0xb1, 0x48, 0x0, 0x40, 0x3a, 0x1d],
'EFI_SMM_END_OF_DXE_PROTOCOL_GUID' : [0x24e70042, 0xd5c5, 0x4260, 0x8c, 0x39, 0xa, 0xd3, 0xaa, 0x32, 0xe9, 0x3d],
'EFI_SMM_FAULT_TOLERANT_WRITE_PROTOCOL_GUID' : [0x3868fc3b, 0x7e45, 0x43a7, 0x90, 0x6c, 0x4b, 0xa4, 0x7d, 0xe1, 0x75, 0x4d],
'EFI_SMM_FIRMWARE_VOLUME_BLOCK_PROTOCOL_GUID' : [0xd326d041, 0xbd31, 0x4c01, 0xb5, 0xa8, 0x62, 0x8b, 0xe8, 0x7f, 0x6, 0x53],
'EFI_SMM_GPI_DISPATCH2_PROTOCOL_GUID' : [0x25566b03, 0xb577, 0x4cbf, 0x95, 0x8c, 0xed, 0x66, 0x3e, 0xa2, 0x43, 0x80],
'EFI_SMM_GPI_DISPATCH_PROTOCOL_GUID' : [0xe0744b81, 0x9513, 0x49cd, 0x8c, 0xea, 0xe9, 0x24, 0x5e, 0x70, 0x39, 0xda],
'EFI_SMM_ICHN_DISPATCH_PROTOCOL_GUID' : [0xc50b323e, 0x9075, 0x4f2a, 0xac, 0x8e, 0xd2, 0x59, 0x6a, 0x10, 0x85, 0xcc],
'EFI_SMM_IO_TRAP_DISPATCH2_PROTOCOL_GUID' : [0x58dc368d, 0x7bfa, 0x4e77, 0xab, 0xbc, 0xe, 0x29, 0x41, 0x8d, 0xf9, 0x30],
'EFI_SMM_LOCK_BOX_COMMUNICATION_GUID' : [0x2a3cfebd, 0x27e8, 0x4d0a, 0x8b, 0x79, 0xd6, 0x88, 0xc2, 0xa3, 0xe1, 0xc0],
'EFI_SMM_PCI_ROOT_BRIDGE_IO_PROTOCOL_GUID' : [0x8bc1714d, 0xffcb, 0x41c3, 0x89, 0xdc, 0x6c, 0x74, 0xd0, 0x6d, 0x98, 0xea],
'EFI_SMM_PERIODIC_TIMER_DISPATCH2_PROTOCOL_GUID' : [0x4cec368e, 0x8e8e, 0x4d71, 0x8b, 0xe1, 0x95, 0x8c, 0x45, 0xfc, 0x8a, 0x53],
'EFI_SMM_PERIODIC_TIMER_DISPATCH_PROTOCOL_GUID' : [0x9cca03fc, 0x4c9e, 0x4a19, 0x9b, 0x6, 0xed, 0x7b, 0x47, 0x9b, 0xde, 0x55],
'EFI_SMM_POWER_BUTTON_DISPATCH2_PROTOCOL_GUID' : [0x1b1183fa, 0x1823, 0x46a7, 0x88, 0x72, 0x9c, 0x57, 0x87, 0x55, 0x40, 0x9d],
'EFI_SMM_POWER_BUTTON_DISPATCH_PROTOCOL_GUID' : [0xb709efa0, 0x47a6, 0x4b41, 0xb9, 0x31, 0x12, 0xec, 0xe7, 0xa8, 0xee, 0x56],
'EFI_SMM_READY_TO_LOCK_PROTOCOL_GUID' : [0x47b7fa8c, 0xf4bd, 0x4af6, 0x82, 0x00, 0x33, 0x30, 0x86, 0xf0, 0xd2, 0xc8],
'EFI_SMM_RSC_HANDLER_PROTOCOL_GUID' : [0x2ff29fa7, 0x5e80, 0x4ed9, 0xb3, 0x80, 0x1, 0x7d, 0x3c, 0x55, 0x4f, 0xf4],
'EFI_SMM_STANDBY_BUTTON_DISPATCH2_PROTOCOL_GUID' : [0x7300c4a1, 0x43f2, 0x4017, 0xa5, 0x1b, 0xc8, 0x1a, 0x7f, 0x40, 0x58, 0x5b],
'EFI_SMM_STANDBY_BUTTON_DISPATCH_PROTOCOL_GUID' : [0x78965b98, 0xb0bf, 0x449e, 0x8b, 0x22, 0xd2, 0x91, 0x4e, 0x49, 0x8a, 0x98],
'EFI_SMM_STATUS_CODE_PROTOCOL_GUID' : [0x6afd2b77, 0x98c1, 0x4acd, 0xa6, 0xf9, 0x8a, 0x94, 0x39, 0xde, 0xf, 0xb1],
'EFI_SMM_SWAP_ADDRESS_RANGE_PROTOCOL_GUID' : [0x67c4f112, 0x3385, 0x4e55, 0x9c, 0x5b, 0xc0, 0x5b, 0x71, 0x7c, 0x42, 0x28],
'EFI_SMM_SW_DISPATCH2_PROTOCOL_GUID' : [0x18a3c6dc, 0x5eea, 0x48c8, 0xa1, 0xc1, 0xb5, 0x33, 0x89, 0xf9, 0x89, 0x99],
'EFI_SMM_SW_DISPATCH_PROTOCOL_GUID' : [0xe541b773, 0xdd11, 0x420c, 0xb0, 0x26, 0xdf, 0x99, 0x36, 0x53, 0xf8, 0xbf],
'EFI_SMM_SX_DISPATCH2_PROTOCOL_GUID' : [0x456d2859, 0xa84b, 0x4e47, 0xa2, 0xee, 0x32, 0x76, 0xd8, 0x86, 0x99, 0x7d],
'EFI_SMM_SX_DISPATCH_PROTOCOL_GUID' : [0x14fc52be, 0x1dc, 0x426c, 0x91, 0xae, 0xa2, 0x3c, 0x3e, 0x22, 0xa, 0xe8],
'EFI_SMM_USB_DISPATCH2_PROTOCOL_GUID' : [0xee9b8d90, 0xc5a6, 0x40a2, 0xbd, 0xe2, 0x52, 0x55, 0x8d, 0x33, 0xcc, 0xa1],
'EFI_SMM_USB_DISPATCH_PROTOCOL_GUID' : [0xa05b6ffd, 0x87af, 0x4e42, 0x95, 0xc9, 0x62, 0x28, 0xb6, 0x3c, 0xf3, 0xf3],
'EFI_SMM_VARIABLE_PROTOCOL_GUID' : [0xed32d533, 0x99e6, 0x4209, 0x9c, 0xc0, 0x2d, 0x72, 0xcd, 0xd9, 0x98, 0xa7],
'EFI_SMM_VARIABLE_WRITE_GUID' : [0x93ba1826, 0xdffb, 0x45dd, 0x82, 0xa7, 0xe7, 0xdc, 0xaa, 0x3b, 0xbd, 0xf3],
'EFI_STANDARD_CALLER_ID_GUID' : [0xC9DCF469, 0xA7C4, 0x11D5, 0x87, 0xDA, 0x00, 0x06, 0x29, 0x45, 0xC3, 0xB9],
'EFI_STANDARD_ERROR_DEVICE_GUID' : [0xd3b36f2d, 0xd551, 0x11d4, 0x9a, 0x46, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_STATUS_CODE_DATA_TYPE_ASSERT_GUID' : [0xDA571595, 0x4D99, 0x487C, 0x82, 0x7C, 0x26, 0x22, 0x67, 0x7D, 0x33, 0x07],
'EFI_STATUS_CODE_DATA_TYPE_DEBUG_GUID' : [0x9A4E9246, 0xD553, 0x11D5, 0x87, 0xE2, 0x00, 0x06, 0x29, 0x45, 0xC3, 0xb9],
'EFI_STATUS_CODE_DATA_TYPE_ERROR_GUID' : [0xAB359CE3, 0x99B3, 0xAE18, 0xC8, 0x9D, 0x95, 0xD3, 0xB0, 0x72, 0xE1, 0x9B],
'EFI_STATUS_CODE_DATA_TYPE_EXCEPTION_HANDLER_GUID' : [0x3BC2BD12, 0xAD2E, 0x11D5, 0x87, 0xDD, 0x00, 0x06, 0x29, 0x45, 0xC3, 0xB9],
'EFI_STATUS_CODE_DATA_TYPE_PROGRESS_CODE_GUID' : [0xA356AB39, 0x35C4, 0x35DA, 0xB3, 0x7A, 0xF8, 0xEA, 0x9E, 0x8B, 0x36, 0xA3],
'EFI_STATUS_CODE_DATA_TYPE_STRING_GUID' : [0x92D11080, 0x496F, 0x4D95, 0xBE, 0x7E, 0x03, 0x74, 0x88, 0x38, 0x2B, 0x0A],
'EFI_STATUS_CODE_GUID' : [0xd083e94c, 0x6560, 0x42e4, 0xb6, 0xd4, 0x2d, 0xf7, 0x5a, 0xdf, 0x6a, 0x2a],
'EFI_STATUS_CODE_RUNTIME_PROTOCOL_GUID' : [0xd2b2b828, 0x826, 0x48a7, 0xb3, 0xdf, 0x98, 0x3c, 0x0, 0x60, 0x24, 0xf0],
'EFI_STATUS_CODE_SPECIFIC_DATA_GUID' : [0x335984bd, 0xe805, 0x409a, 0xb8, 0xf8, 0xd2, 0x7e, 0xce, 0x5f, 0xf7, 0xa6],
'EFI_STORAGE_SECURITY_COMMAND_PROTOCOL_GUID' : [0xC88B0B6D, 0x0DFC, 0x49A7, 0x9C, 0xB4, 0x49, 0x07, 0x4B, 0x4C, 0x3A, 0x78],
'EFI_SWAP_ADDRESS_RANGE_PROTOCOL_GUID' : [0x1259f60d, 0xb754, 0x468e, 0xa7, 0x89, 0x4d, 0xb8, 0x5d, 0x55, 0xe8, 0x7e],
'EFI_SYSTEM_NV_DATA_FV_GUID' : [0xfff12b8d, 0x7696, 0x4c8b, 0xa9, 0x85, 0x27, 0x47, 0x7, 0x5b, 0x4f, 0x50],
'EFI_SYSTEM_NV_DATA_HOB_GUID' : [0xd6e5092d, 0xc7b2, 0x4872, 0xaf, 0x66, 0xfd, 0xc0, 0xe6, 0xf9, 0x5e, 0x78],
'EFI_TAPE_IO_PROTOCOL_GUID' : [0x1e93e633, 0xd65a, 0x459e, 0xab, 0x84, 0x93, 0xd9, 0xec, 0x26, 0x6d, 0x18],
'EFI_TCG_EVENT_HOB_GUID' : [0x2e3044ac, 0x879f, 0x490f, 0x97, 0x60, 0xbb, 0xdf, 0xaf, 0x69, 0x5f, 0x50],
'EFI_TCG_PLATFORM_PROTOCOL_GUID' : [0x8c4c9a41, 0xbf56, 0x4627, 0x9e, 0xa, 0xc8, 0x38, 0x6d, 0x66, 0x11, 0x5c],
'EFI_TCG_PROTOCOL_GUID' : [0xf541796d, 0xa62e, 0x4954, 0xa7, 0x75, 0x95, 0x84, 0xf6, 0x1b, 0x9c, 0xdd],
'EFI_TCP4_PROTOCOL_GUID' : [0x65530BC7, 0xA359, 0x410f, 0xB0, 0x10, 0x5A, 0xAD, 0xC7, 0xEC, 0x2B, 0x62],
'EFI_TCP4_SERVICE_BINDING_PROTOCOL_GUID' : [0x00720665, 0x67EB, 0x4a99, 0xBA, 0xF7, 0xD3, 0xC3, 0x3A, 0x1C, 0x7C, 0xC9],
'EFI_TCP6_PROTOCOL_GUID' : [0x46e44855, 0xbd60, 0x4ab7, 0xab, 0x0d, 0xa6, 0x79, 0xb9, 0x44, 0x7d, 0x77],
'EFI_TCP6_SERVICE_BINDING_PROTOCOL_GUID' : [0xec20eb79, 0x6c1a, 0x4664, 0x9a, 0x0d, 0xd2, 0xe4, 0xcc, 0x16, 0xd6, 0x64],
'EFI_TCP_PROTOCOL_GUID' : [0x02b3d5f2, 0xac28, 0x11d3, 0x9a, 0x2d, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_TIANO_DECOMPRESS_PROTOCOL_GUID' : [0xe84cf29c, 0x191f, 0x4eae, 0x96, 0xe1, 0xf4, 0x6a, 0xec, 0xea, 0xea, 0x0b],
'EFI_TIMER_ARCH_PROTOCOL_GUID' : [0x26baccb3, 0x6f42, 0x11d4, 0xbc, 0xe7, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_TSC_FREQUENCY_GUID' : [0xdba6a7e3, 0xbb57, 0x4be7, 0x8a, 0xf8, 0xd5, 0x78, 0xdb, 0x7e, 0x56, 0x87],
'EFI_UART_DEVICE_PATH_GUID' : [0x37499a9d, 0x542f, 0x4c89, 0xa0, 0x26, 0x35, 0xda, 0x14, 0x20, 0x94, 0xe4],
'EFI_UDP4_PROTOCOL_GUID' : [0x3ad9df29, 0x4501, 0x478d, 0xb1, 0xf8, 0x7f, 0x7f, 0xe7, 0x0e, 0x50, 0xf3],
'EFI_UDP4_SERVICE_BINDING_PROTOCOL_GUID' : [0x83f01464, 0x99bd, 0x45e5, 0xb3, 0x83, 0xaf, 0x63, 0x05, 0xd8, 0xe9, 0xe6],
'EFI_UDP6_PROTOCOL_GUID' : [0x4f948815, 0xb4b9, 0x43cb, 0x8a, 0x33, 0x90, 0xe0, 0x60, 0xb3, 0x49, 0x55],
'EFI_UDP6_SERVICE_BINDING_PROTOCOL_GUID' : [0x66ed4721, 0x3c98, 0x4d3e, 0x81, 0xe3, 0xd0, 0x3d, 0xd3, 0x9a, 0x72, 0x54],
'EFI_UGA_DRAW_PROTOCOL_GUID' : [0x982c298b, 0xf4fa, 0x41cb, 0xb8, 0x38, 0x77, 0xaa, 0x68, 0x8f, 0xb8, 0x39],
'EFI_UGA_IO_PROTOCOL_GUID' : [0x61a4d49e, 0x6f68, 0x4f1b, 0xb9, 0x22, 0xa8, 0x6e, 0xed, 0xb, 0x7, 0xa2],
'EFI_UGA_SPLASH_PROTOCOL_GUID' : [0xa45b3a0d, 0x2e55, 0x4c03, 0xad, 0x9c, 0x27, 0xd4, 0x82, 0xb, 0x50, 0x7e],
'EFI_UNICODE_COLLATION2_PROTOCOL_GUID' : [0xa4c751fc, 0x23ae, 0x4c3e, 0x92, 0xe9, 0x49, 0x64, 0xcf, 0x63, 0xf3, 0x49],
'EFI_UNICODE_COLLATION_PROTOCOL2_GUID' : [0xa4c751fc, 0x23ae, 0x4c3e, 0x92, 0xe9, 0x49, 0x64, 0xcf, 0x63, 0xf3, 0x49],
'EFI_UNICODE_COLLATION_PROTOCOL_GUID' : [0x1d85cd7f, 0xf43d, 0x11d2, 0x9a, 0xc, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_UNIX_CONSOLE_GUID' : [0xf2cc5d06, 0x8985, 0x11db, 0xbb, 0x19, 0x00, 0x40, 0xd0, 0x2b, 0x18, 0x35],
'EFI_UNIX_CPU_MODEL_GUID' : [0xf2d3b330, 0x8985, 0x11db, 0x8a, 0xa3, 0x00, 0x40, 0xd0, 0x2b, 0x18, 0x35],
'EFI_UNIX_CPU_SPEED_GUID' : [0xf2d74e5a, 0x8985, 0x11db, 0x97, 0x05, 0x00, 0x40, 0xd0, 0x2b, 0x18, 0x35],
'EFI_UNIX_FILE_SYSTEM_GUID' : [0xf2c16b9e, 0x8985, 0x11db, 0x92, 0xc8, 0x00, 0x40, 0xd0, 0x2b, 0x18, 0x35],
'EFI_UNIX_GOP_GUID' : [0xbace07c2, 0x8987, 0x11db, 0xa5, 0x9a, 0x00, 0x40, 0xd0, 0x2b, 0x18, 0x35],
'EFI_UNIX_IO_PROTOCOL_GUID' : [0xf2e23f54, 0x8985, 0x11db, 0xac, 0x79, 0x00, 0x40, 0xd0, 0x2b, 0x18, 0x35],
'EFI_UNIX_MEMORY_GUID' : [0xf2d006cc, 0x8985, 0x11db, 0xa4, 0x72, 0x00, 0x40, 0xd0, 0x2b, 0x18, 0x35],
'EFI_UNIX_NETWORK_GUID' : [0x081603B4, 0x0F1D, 0x4022, 0xB6, 0xFD, 0x4C, 0xE3, 0x5E, 0x09, 0xA1, 0xA6],
'EFI_UNIX_PHYSICAL_DISKS_GUID' : [0xf2bdcc96, 0x8985, 0x11db, 0x87, 0x19, 0x00, 0x40, 0xd0, 0x2b, 0x18, 0x35],
'EFI_UNIX_SERIAL_PORT_GUID' : [0x6d3a727d, 0x66c8, 0x4d19, 0x87, 0xe6, 0x2, 0x15, 0x86, 0x14, 0x90, 0xf3],
'EFI_UNIX_THUNK_PROTOCOL_GUID' : [0xf2e98868, 0x8985, 0x11db, 0x9a, 0x59, 0x00, 0x40, 0xd0, 0x2b, 0x18, 0x35],
'EFI_UNIX_UGA_GUID' : [0xf2c8b80e, 0x8985, 0x11db, 0x93, 0xf1, 0x00, 0x40, 0xd0, 0x2b, 0x18, 0x35],
'EFI_UNIX_UGA_IO_PROTOCOL_GUID' : [0xf2e5e2c6, 0x8985, 0x11db, 0xa1, 0x91, 0x00, 0x40, 0xd0, 0x2b, 0x18, 0x35],
'EFI_UNIX_VIRTUAL_DISKS_GUID' : [0xf2ba331a, 0x8985, 0x11db, 0xa4, 0x06, 0x00, 0x40, 0xd0, 0x2b, 0x18, 0x35],
'EFI_UPDATE_DATA_FILE_GUID' : [0x283fa2ee, 0x532c, 0x484d, 0x93, 0x83, 0x9f, 0x93, 0xb3, 0x6f, 0xb, 0x7e],
'EFI_USB2_HC_PROTOCOL_GUID' : [0x3e745226, 0x9818, 0x45b6, 0xa2, 0xac, 0xd7, 0xcd, 0xe, 0x8b, 0xa2, 0xbc],
'EFI_USB_ATAPI_PROTOCOL_GUID' : [0x2B2F68DA, 0x0CD2, 0x44cf, 0x8E, 0x8B, 0xBB, 0xA2, 0x0B, 0x1B, 0x5B, 0x75],
'EFI_USB_BUS_PROTOCOL_GUID' : [0x2B2F68CC, 0x0CD2, 0x44cf, 0x8E, 0x8B, 0xBB, 0xA2, 0x0B, 0x1B, 0x5B, 0x75],
'EFI_USB_HC_PROTOCOL_GUID' : [0xf5089266, 0x1aa0, 0x4953, 0x97, 0xd8, 0x56, 0x2f, 0x8a, 0x73, 0xb5, 0x19],
'EFI_USB_IO_PROTOCOL_GUID' : [0x2B2F68D6, 0x0CD2, 0x44cf, 0x8E, 0x8B, 0xBB, 0xA2, 0x0B, 0x1B, 0x5B, 0x75],
'EFI_USER_CREDENTIAL2_PROTOCOL_GUID' : [0xe98adb03, 0xb8b9, 0x4af8, 0xba, 0x20, 0x26, 0xe9, 0x11, 0x4c, 0xbc, 0xe5],
'EFI_USER_CREDENTIAL_PROTOCOL_GUID' : [0x71ee5e94, 0x65b9, 0x45d5, 0x82, 0x1a, 0x3a, 0x4d, 0x86, 0xcf, 0xe6, 0xbe],
'EFI_USER_INFO_ACCESS_SETUP_ADMIN_GUID' : [0x85b75607, 0xf7ce, 0x471e, 0xb7, 0xe4, 0x2a, 0xea, 0x5f, 0x72, 0x32, 0xee],
'EFI_USER_INFO_ACCESS_SETUP_NORMAL_GUID' : [0x1db29ae0, 0x9dcb, 0x43bc, 0x8d, 0x87, 0x5d, 0xa1, 0x49, 0x64, 0xdd, 0xe2],
'EFI_USER_INFO_ACCESS_SETUP_RESTRICTED_GUID' : [0xbdb38125, 0x4d63, 0x49f4, 0x82, 0x12, 0x61, 0xcf, 0x5a, 0x19, 0xa, 0xf8],
'EFI_USER_MANAGER_PROTOCOL_GUID' : [0x6fd5b00c, 0xd426, 0x4283, 0x98, 0x87, 0x6c, 0xf5, 0xcf, 0x1c, 0xb1, 0xfe],
'EFI_UXIX_SYSTEM_CONFIG_GUID' : [0x375ea976, 0x3ccd, 0x4e74, 0xa8, 0x45, 0x26, 0xb9, 0xb3, 0x24, 0xb1, 0x3c],
'EFI_VARIABLE_ARCH_PROTOCOL_GUID' : [0x1e5668e2, 0x8481, 0x11d4, 0xbc, 0xf1, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_VARIABLE_GUID' : [0xddcf3616, 0x3275, 0x4164, 0x98, 0xb6, 0xfe, 0x85, 0x70, 0x7f, 0xfe, 0x7d],
'EFI_VARIABLE_INDEX_TABLE_GUID' : [0x8cfdb8c8, 0xd6b2, 0x40f3, 0x8e, 0x97, 0x02, 0x30, 0x7c, 0xc9, 0x8b, 0x7c],
'EFI_VARIABLE_STORE_PROTOCOL_GUID' : [0xf088cd91, 0xa046, 0x11d2, 0x8e, 0x42, 0x0, 0xa0, 0xc9, 0x69, 0x72, 0x3b],
'EFI_VARIABLE_WRITE_ARCH_PROTOCOL_GUID' : [0x6441f818, 0x6362, 0x4e44, 0xb5, 0x70, 0x7d, 0xba, 0x31, 0xdd, 0x24, 0x53],
'EFI_VGA_MINI_PORT_PROTOCOL_GUID' : [0xc7735a2f, 0x88f5, 0x4882, 0xae, 0x63, 0xfa, 0xac, 0x8c, 0x8b, 0x86, 0xb3],
'EFI_VIRTUAL_MEMORY_ACCESS_PROTOCOL_GUID' : [0x745d377a, 0xb988, 0x47b2, 0xb1, 0x8f, 0xbb, 0xc8, 0xd, 0xc5, 0x66, 0x98],
'EFI_VLAN_CONFIG_PROTOCOL_GUID' : [0x9e23d768, 0xd2f3, 0x4366, 0x9f, 0xc3, 0x3a, 0x7a, 0xba, 0x86, 0x43, 0x74],
'EFI_VT_100_GUID' : [0xdfa66065, 0xb419, 0x11d3, 0x9a, 0x2d, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'EFI_VT_100_PLUS_GUID' : [0x7baec70b, 0x57e0, 0x4c76, 0x8e, 0x87, 0x2f, 0x9e, 0x28, 0x08, 0x83, 0x43],
'EFI_VT_UTF8_GUID' : [0xad15a0d6, 0x8bec, 0x4acf, 0xa0, 0x73, 0xd0, 0x1d, 0xe7, 0x7e, 0x2d, 0x88],
'EFI_WATCHDOG_TIMER_ARCH_PROTOCOL_GUID' : [0x665E3FF5, 0x46CC, 0x11d4, 0x9A, 0x38, 0x00, 0x90, 0x27, 0x3F, 0xC1, 0x4D],
'EFI_WIN_NT_CONSOLE_GUID' : [0xba73672c, 0xa5d3, 0x11d4, 0xbd, 0x0, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_WIN_NT_CPU_MODEL_GUID' : [0xbee9b6ce, 0x2f8a, 0x11d4, 0xbd, 0xd, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_WIN_NT_CPU_SPEED_GUID' : [0xd4f29055, 0xe1fb, 0x11d4, 0xbd, 0xd, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_WIN_NT_FILE_SYSTEM_GUID' : [0xc95a935, 0xa006, 0x11d4, 0xbc, 0xfa, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_WIN_NT_GOP_GUID' : [0x4e11e955, 0xccca, 0x11d4, 0xbd, 0xd, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_WIN_NT_IO_PROTOCOL_GUID' : [0x96eb4ad6, 0xa32a, 0x11d4, 0xbc, 0xfd, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_WIN_NT_MEMORY_GUID' : [0x99042912, 0x122a, 0x11d4, 0xbd, 0xd, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_WIN_NT_PASS_THROUGH_GUID' : [0xcc664eb8, 0x3c24, 0x4086, 0xb6, 0xf6, 0x34, 0xe8, 0x56, 0xbc, 0xe3, 0x6e],
'EFI_WIN_NT_PHYSICAL_DISKS_GUID' : [0xc95a92f, 0xa006, 0x11d4, 0xbc, 0xfa, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_WIN_NT_SERIAL_PORT_GUID' : [0xc95a93d, 0xa006, 0x11d4, 0xbc, 0xfa, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_WIN_NT_SYSTEM_CONFIG_GUID' : [0xb347f047, 0xaf8c, 0x490e, 0xac, 0x07, 0x0a, 0xa9, 0xb7, 0xe5, 0x38, 0x58],
'EFI_WIN_NT_THUNK_PROTOCOL_GUID' : [0x58c518b1, 0x76f3, 0x11d4, 0xbc, 0xea, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_WIN_NT_UGA_GUID' : [0xab248e99, 0xabe1, 0x11d4, 0xbd, 0xd, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_WIN_NT_VIRTUAL_DISKS_GUID' : [0xc95a928, 0xa006, 0x11d4, 0xbc, 0xfa, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'EFI_XEN_INFO_GUID' : [0xd3b46f3b, 0xd441, 0x1244, 0x9a, 0x12, 0x0, 0x12, 0x27, 0x3f, 0xc1, 0x4d],
'EMBEDDED_DEVICE_PROTOCOL_GUID' : [0xbf4b9d10, 0x13ec, 0x43dd, 0x88, 0x80, 0xe9, 0xb, 0x71, 0x8f, 0x27, 0xde],
'EMBEDDED_EXTERNAL_DEVICE_PROTOCOL_GUID' : [0x735F8C64, 0xD696, 0x44D0, 0xBD, 0xF2, 0x44, 0x7F, 0xD0, 0x5A, 0x54, 0x06],
'EMU_BLOCK_IO_PROTOCOL_GUID' : [0x6888A4AE, 0xAFCE, 0xE84B, 0x91, 0x02, 0xF7, 0xB9, 0xDA, 0xE6, 0xA0, 0x30],
'EMU_GRAPHICS_WINDOW_PROTOCOL_GUID' : [0x30FD316A, 0x6728, 0x2E41, 0xA6, 0x90, 0x0D, 0x13, 0x33, 0xD8, 0xCA, 0xC1],
'EMU_IO_THUNK_PROTOCO_GUID' : [0x453368F6, 0x7C85, 0x434A, 0xA9, 0x8A, 0x72, 0xD1, 0xB7, 0xFF, 0xA9, 0x26],
'EMU_SNP_PROTOCOL_GUID' : [0xFD5FBE54, 0x8C35, 0xB345, 0x8A, 0x0F, 0x7A, 0xC8, 0xA5, 0xFD, 0x05, 0x21],
'EMU_THUNK_PPI_GUID' : [0xB958B78C, 0x1D3E, 0xEE40, 0x8B, 0xF4, 0xF0, 0x63, 0x2D, 0x06, 0x39, 0x16],
'EMU_THUNK_PROTOCOL_GUID' : [0x5CF32E0B, 0x8EDF, 0x2E44, 0x9C, 0xDA, 0x93, 0x20, 0x5E, 0x99, 0xEC, 0x1C],
'EXTENDED_SAL_BOOT_SERVICE_PROTOCOL_GUID' : [0xde0ee9a4, 0x3c7a, 0x44f2, 0xb7, 0x8b, 0xe3, 0xcc, 0xd6, 0x9c, 0x3a, 0xf7],
'EXTENDED_SAL_BOOT_SERVICE_PROTOCOL_GUID' : [0xde0ee9a4,0x3c7a,0x44f2,0xb7,0x8b,0xe3,0xcc,0xd6,0x9c,0x3a,0xf7],
'FFS_GUID' : [0xac05bf33, 0x995a, 0x4ed4, 0xaa, 0xb8, 0xef, 0x7a, 0xe8, 0xf, 0x5c, 0xb0],
'FILE_EXPLORE_FORMSET_GUID' : [0x1f2d63e1, 0xfebd, 0x4dc7, 0x9c, 0xc5, 0xba, 0x2b, 0x1c, 0xef, 0x9c, 0x5b],
'FILE_GUID' : [0xcbd2e4d5, 0x7068, 0x4ff5, 0xb4, 0x62, 0x98, 0x22, 0xb4, 0xad, 0x8d, 0x60],
'FORM_BROWSER_EXTENSION_PROTOCOL_GUID' : [0x1f73b18d, 0x4630, 0x43c1, 0xa1, 0xde, 0x6f, 0x80, 0x85, 0x5d, 0x7d, 0xa4],
'FRAMEWORK_BDS_FRONTPAGE_FORMSET_GUID' : [0x9e0c30bc, 0x3f06, 0x4ba6, 0x82, 0x88, 0x9, 0x17, 0x9b, 0x85, 0x5d, 0xbe],
'FRAMEWORK_EFI_FIRMWARE_VOLUME_BLOCK_PROTOCOL_GUID' : [0xDE28BC59, 0x6228, 0x41BD, 0xBD, 0xF6, 0xA3, 0xB9, 0xAD,0xB5, 0x8D, 0xA1],
'FRAMEWORK_EFI_MP_SERVICES_PROTOCOL_GUID' : [0xf33261e7, 0x23cb, 0x11d5, 0xbd, 0x5c, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'FRONT_PAGE_FORMSET_GUID' : [0x9e0c30bc, 0x3f06, 0x4ba6, 0x82, 0x88, 0x9, 0x17, 0x9b, 0x85, 0x5d, 0xbe],
'HANDLE_PARSING_HII_GUID' : [0xb8969637, 0x81de, 0x43af, 0xbc, 0x9a, 0x24, 0xd9, 0x89, 0x13, 0xf2, 0xf6],
'HD_BOOT_DEVICE_PATH_VARIABLE_GUID' : [0xfab7e9e1, 0x39dd, 0x4f2b, 0x84, 0x8, 0xe2, 0xe, 0x90, 0x6c, 0xb6, 0xde],
'HII_RESOURCE_SAMPLE_FORM_SET_GUID' : [0x4f4ef7f0, 0xaa29, 0x4ce9, 0xba, 0x41, 0x64, 0x3e, 0x1, 0x23, 0xa9, 0x9f],
'HOB_LIST_GUID' : [0x7739f24c, 0x93d7, 0x11d4, 0x9a, 0x3a, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'HOT_PLUG_DEVICE_GUID' : [0x220ac432, 0x1d43, 0x49e5, 0xa7, 0x4f, 0x4c, 0x9d, 0xa6, 0x7a, 0xd2, 0x3b],
'IDLE_LOOP_EVENT_GUID' : [0x3c8d294c, 0x5fc3, 0x4451, 0xbb, 0x31, 0xc4, 0xc0, 0x32, 0x29, 0x5e, 0x6c],
'INTEL_FRAMEWORK_MODULEPKG_TOKEN_SPACE_GUID' : [0xD3705011, 0xBC19, 0x4af7, 0xBE, 0x16, 0xF6, 0x80, 0x30, 0x37, 0x8C, 0x15],
'IP4_ISCSI_CONFIG_GUID' : [0x6456ed61, 0x3579, 0x41c9, 0x8a, 0x26, 0x0a, 0x0b, 0xd6, 0x2b, 0x78, 0xfc],
'IP6_CONFIG_NVDATA_GUID' : [0x2eea107, 0x98db, 0x400e, 0x98, 0x30, 0x46, 0xa, 0x15, 0x42, 0xd7, 0x99],
'ISCSI_CHAP_AUTH_INFO_GUID' : [0x786ec0ac, 0x65ae, 0x4d1b, 0xb1, 0x37, 0xd, 0x11, 0xa, 0x48, 0x37, 0x97],
'ISCSI_CONFIG_GUID' : [0x4b47d616, 0xa8d6, 0x4552, 0x9d, 0x44, 0xcc, 0xad, 0x2e, 0xf, 0x4c, 0xf9],
'ISCSI_V4_PRIVATE_GUID' : [0xfa3cde4c, 0x87c2, 0x427d, 0xae, 0xde, 0x7d, 0xd0, 0x96, 0xc8, 0x8c, 0x58],
'ISCSI_V6_PRIVATE_GUID' : [0x28be27e5, 0x66cc, 0x4a31, 0xa3, 0x15, 0xdb, 0x14, 0xc3, 0x74, 0x4d, 0x85],
'LAST_ENUM_LANGUAGE_GUID' : [0xe8c545b, 0xa2ee, 0x470d, 0x8e, 0x26, 0xbd, 0xa1, 0xa1, 0x3c, 0xa, 0xa3],
'LDR_MEMORY_DESCRIPTOR_GUID' : [0x7701d7e5, 0x7d1d, 0x4432, 0xa4, 0x68, 0x67, 0x3d, 0xab, 0x8a, 0xde, 0x60],
'LOAD_FILE_PROTOCOL_GUID' : [0x56EC3091, 0x954C, 0x11d2, 0x8E, 0x3F, 0x00, 0xA0, 0xC9, 0x69, 0x72, 0x3B],
'LOCAL_EFI_WIN_NT_BUS_DRIVER_IO_PROTOCOL_GUID' : [0x96eb4ad6, 0xa32a, 0x11d4, 0xbc, 0xfd, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'LOCAL_EFI_WIN_NT_SERIAL_PORT_GUID' : [0xc95a93d, 0xa006, 0x11d4, 0xbc, 0xfa, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'LOCAL_EFI_WIN_NT_THUNK_PROTOCOL_GUID' : [0x58c518b1, 0x76f3, 0x11d4, 0xbc, 0xea, 0x0, 0x80, 0xc7, 0x3c, 0x88, 0x81],
'LZMAF86_CUSTOM_DECOMPRESS_GUID' : [0xD42AE6BD, 0x1352, 0x4bfb, 0x90, 0x9A, 0xCA, 0x72, 0xA6, 0xEA, 0xE8, 0x89],
'LZMA_CUSTOM_DECOMPRESS_GUID' : [0xEE4E5898, 0x3914, 0x4259, 0x9D, 0x6E, 0xDC, 0x7B, 0xD7, 0x94, 0x03, 0xCF],
'MDEMODULEPKG_TOKEN_SPACE_GUID' : [0xA1AFF049, 0xFDEB, 0x442a, 0xB3, 0x20, 0x13, 0xAB, 0x4C, 0xB7, 0x2B, 0xBC],
'MDEPKG_TOKEN_SPACE_GUID' : [0x914AEBE7, 0x4635, 0x459b, 0xAA, 0x1C, 0x11, 0xE2, 0x19, 0xB0, 0x3A, 0x10],
'MEMORY_ONLY_RESET_CONTROL_GUID' : [0xe20939be, 0x32d4, 0x41be, 0xa1, 0x50, 0x89, 0x7f, 0x85, 0xd4, 0x98, 0x29],
'MEMORY_STATUS_CODE_RECORD_GUID' : [0x60cc026, 0x4c0d, 0x4dda, 0x8f, 0x41, 0x59, 0x5f, 0xef, 0x0, 0xa5, 0x2],
'MTC_VENDOR_GUID' : [0xeb704011, 0x1402, 0x11d3, 0x8e, 0x77, 0x0, 0xa0, 0xc9, 0x69, 0x72, 0x3b],
'MY_GUID' : [0x12345678, 0xAABB, 0xCCDD, 0xEE, 0xFF, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66],
'NT_FWH_PPI_GUID' : [0x4e76928f, 0x50ad, 0x4334, 0xb0, 0x6b, 0xa8, 0x42, 0x13, 0x10, 0x8a, 0x57],
'PCATCHIPSET_TOKEN_SPACE_GUID' : [0x326ae723, 0xae32, 0x4589, 0x98, 0xb8, 0xca, 0xc2, 0x3c, 0xdc, 0xc1, 0xb1],
'PCD_DATABASE_HOB_GUID' : [0xEA296D92, 0x0B69, 0x423C, 0x8C, 0x28, 0x33, 0xB4, 0xE0, 0xA9, 0x12, 0x68],
'PCD_PPI_GUID' : [0x6e81c58, 0x4ad7, 0x44bc, 0x83, 0x90, 0xf1, 0x2, 0x65, 0xf7, 0x24, 0x80],
'PCD_PROTOCOL_GUID' : [0x11b34006, 0xd85b, 0x4d0a, 0xa2, 0x90, 0xd5, 0xa5, 0x71, 0x31, 0xe, 0xf7],
'PE32_IMAGE_PROTOCOL_GUID' : [0x5cb5c776,0x60d5,0x45ee,0x88,0x3c,0x45,0x27,0x8,0xcd,0x74,0x3f],
'PEI_ATA_CONTROLLER_PPI_GUID' : [0xa45e60d1, 0xc719, 0x44aa, 0xb0, 0x7a, 0xaa, 0x77, 0x7f, 0x85, 0x90, 0x6d],
'PEI_BASE_MEMORY_TEST_GUID' : [0xb6ec423c, 0x21d2, 0x490d, 0x85, 0xc6, 0xdd, 0x58, 0x64, 0xea, 0xa6, 0x74],
'PEI_BLOCK_IO_PPI_GUID' : [0x695d8aa1, 0x42ee, 0x4c46, 0x80, 0x5c, 0x6e, 0xa6, 0xbc, 0xe7, 0x99, 0xe3],
'PEI_BOOT_SCRIPT_EXECUTER_PPI_GUID' : [0xabd42895, 0x78cf, 0x4872, 0x84, 0x44, 0x1b, 0x5c, 0x18, 0x0b, 0xfb, 0xff],
'PEI_CAPSULE_PPI_GUID' : [0x3acf33ee, 0xd892, 0x40f4, 0xa2, 0xfc, 0x38, 0x54, 0xd2, 0xe1, 0x32, 0x3d],
'PEI_CPU_IO_PPI_GUID' : [0xe6af1f7b, 0xfc3f, 0x46da, 0xa8, 0x28, 0xa3, 0xb4, 0x57, 0xa4, 0x42, 0x82],
'PEI_END_OF_PEI_PHASE_PPI_GUID' : [0x605EA650, 0xC65C, 0x42e1, 0xBA, 0x80, 0x91, 0xA5, 0x2A, 0xB6, 0x18, 0xC6],
'PEI_FLASH_MAP_PPI_GUID' : [0xf34c2fa0, 0xde88, 0x4270, 0x84, 0x14, 0x96, 0x12, 0x22, 0xf4, 0x52, 0x1c],
'PEI_IN_MEMORY_GUID' : [0x643b8786, 0xb417, 0x48d2, 0x8f, 0x5e, 0x78, 0x19, 0x93, 0x1c, 0xae, 0xd8],
'PEI_LOCK_PHYSICAL_PRESENCE_PPI_GUID' : [0xef9aefe5, 0x2bd3, 0x4031, 0xaf, 0x7d, 0x5e, 0xfe, 0x5a, 0xbb, 0x9a, 0xd],
'PEI_NT_THUNK_GUID' : [0x98c281e5, 0xf906, 0x43dd, 0xa9, 0x2b, 0xb0, 0x3, 0xbf, 0x27, 0x65, 0xda],
'PEI_NT_THUNK_PPI_GUID' : [0x98c281e5, 0xf906, 0x43dd, 0xa9, 0x2b, 0xb0, 0x3, 0xbf, 0x27, 0x65, 0xda],
'PEI_OPERATOR_PRESENCE_PPI_GUID' : [0x20a7378c, 0xaa83, 0x4ce1, 0x82, 0x1f, 0x47, 0x40, 0xee, 0x1b, 0x3f, 0x9f],
'PEI_PCI_CFG_PPI_GUID' : [0xe1f2eba0, 0xf7b9, 0x4a26, 0x86, 0x20, 0x13, 0x12, 0x21, 0x64, 0x2a, 0x90],
'PEI_PERMANENT_MEMORY_INSTALLED_PPI_GUID' : [0xf894643d, 0xc449, 0x42d1, 0x8e, 0xa8, 0x85, 0xbd, 0xd8, 0xc6, 0x5b, 0xde],
'PEI_READ_ONLY_VARIABLE_ACCESS_PPI_GUID' : [0x3cdc90c6, 0x13fb, 0x4a75, 0x9e, 0x79, 0x59, 0xe9, 0xdd, 0x78, 0xb9, 0xfa],
'PEI_RESET_PPI_GUID' : [0xef398d58, 0x9dfd, 0x4103, 0xbf, 0x94, 0x78, 0xc6, 0xf4, 0xfe, 0x71, 0x2f],
'PEI_S3_RESUME_PPI_GUID' : [0x4426CCB2, 0xE684, 0x4a8a, 0xAE, 0x40, 0x20, 0xD4, 0xB0, 0x25, 0xB7, 0x10],
'PEI_SECURITY_PPI_GUID' : [0x1388066e, 0x3a57, 0x4efa, 0x98, 0xf3, 0xc1, 0x2f, 0x3a, 0x95, 0x8a, 0x29],
'PEI_SEC_PERFORMANCE_PPI_GUID' : [0x0ecc666b, 0x4662, 0x47f9, 0x9d, 0xd5, 0xd0, 0x96, 0xff, 0x7d, 0xa4, 0x9e],
'PEI_SMBUS2_PPI_GUID' : [0x9ca93627, 0xb65b, 0x4324, 0xa2, 0x2, 0xc0, 0xb4, 0x61, 0x76, 0x45, 0x43],
'PEI_SMBUS_PPI_GUID' : [0xabd42895, 0x78cf, 0x4872, 0x84, 0x44, 0x1b, 0x5c, 0x18, 0xb, 0xfb, 0xda],
'PEI_SMM_ACCESS_PPI_GUID' : [0x268f33a9, 0xcccd, 0x48be, 0x88, 0x17, 0x86, 0x5, 0x3a, 0xc3, 0x2e, 0xd6],
'PEI_SMM_CONTROL_PPI_GUID' : [0x61c68702, 0x4d7e, 0x4f43, 0x8d, 0xef, 0xa7, 0x43, 0x5, 0xce, 0x74, 0xc5],
'PEI_STALL_PPI_GUID' : [0x1f4c6f90, 0xb06b, 0x48d8, 0xa2, 0x01, 0xba, 0xe5, 0xf1, 0xcd, 0x7d, 0x56],
'PEI_STATUS_CODE_MEMORY_PPI_GUID' : [0x26f8ab01, 0xd3cd, 0x489c, 0x98, 0x4f, 0xdf, 0xde, 0xf7, 0x68, 0x39, 0x5b],
'PEI_STATUS_CODE_PPI_GUID' : [0x229832d3, 0x7a30, 0x4b36, 0xb8, 0x27, 0xf4, 0xc, 0xb7, 0xd4, 0x54, 0x36],
'PEI_TPM_INITIALIZED_PPI_GUID' : [0xe9db0d58, 0xd48d, 0x47f6, 0x9c, 0x6e, 0x6f, 0x40, 0xe8, 0x6c, 0x7b, 0x41],
'PEI_UNIX_AUTOSCAN_PPI_GUID' : [0xf2ed3d14, 0x8985, 0x11db, 0xb0, 0x57, 0x00, 0x40, 0xd0, 0x2b, 0x18, 0x35],
'PEI_UNIX_THUNK_PPI_GUID' : [0xf2f830f2, 0x8985, 0x11db, 0x80, 0x6b, 0x00, 0x40, 0xd0, 0x2b, 0x18, 0x35],
'PEI_USB2_HOST_CONTROLLER_PPI_GUID' : [0xa7d09fe1, 0x74d4, 0x4ba5, 0x84, 0x7c, 0x12, 0xed, 0x5b, 0x19, 0xad, 0xe4],
'PEI_USB_CONTROLLER_PPI_GUID' : [0x3bc1f6de, 0x693e, 0x4547,0xa3, 0x0, 0x21, 0x82, 0x3c, 0xa4, 0x20, 0xb2],
'PEI_USB_HOST_CONTROLLER_PPI_GUID' : [0x652b38a9, 0x77f4, 0x453f, 0x89, 0xd5, 0xe7, 0xbd, 0xc3, 0x52, 0xfc, 0x53],
'PEI_USB_IO_PPI_GUID' : [0x7c29785c, 0x66b9, 0x49fc, 0xb7, 0x97, 0x1c, 0xa5, 0x55, 0xe, 0xf2, 0x83],
'PERFORMANCEPKG_TOKEN_SPACE_GUID' : [0x669346ef, 0xFDad, 0x4aeb, 0x08, 0xa6, 0x21, 0x46, 0x2d, 0x3f, 0xef, 0x7d],
'PERFORMANCE_EX_PROTOCOL_GUID' : [0x1ea81bec, 0xf01a, 0x4d98, 0xa2, 0x1, 0x4a, 0x61, 0xce, 0x2f, 0xc0, 0x22],
'PERFORMANCE_PROTOCOL_GUID' : [0x76b6bdfa, 0x2acd, 0x4462, 0x9E, 0x3F, 0xcb, 0x58, 0xC9, 0x69, 0xd9, 0x37],
'PE_COFF_LOADER_PROTOCOL_GUID' : [0xB323179B, 0x97FB, 0x477E, 0xB0, 0xFE, 0xD8, 0x85, 0x91, 0xFA, 0x11, 0xAB],
'PLAT_OVER_MNGR_GUID' : [0x8614567d, 0x35be, 0x4415, 0x8d, 0x88, 0xbd, 0x7d, 0xc, 0x9c, 0x70, 0xc0],
'PRE_PI_EXTRACT_GUIDED_SECTION_DATA_GUID' : [0x385A982C, 0x2F49, 0x4043, 0xA5, 0x1E, 0x49, 0x01, 0x02, 0x5C, 0x8B, 0x6B],
'PWD_CREDENTIAL_PROVIDER_GUID' : [0x78b9ec8b, 0xc000, 0x46c5, 0xac, 0x93, 0x24, 0xa0, 0xc1, 0xbb, 0x0, 0xce],
'RECOVERY_ON_DATA_CD_GUID' : [0x5cac0099, 0x0dc9, 0x48e5, 0x80, 0x68, 0xbb, 0x95, 0xf5, 0x40, 0x0a, 0x9f],
'RECOVERY_ON_FAT_FLOPPY_DISK_GUID' : [0x2e3d2e75, 0x9b2e, 0x412d, 0xb4, 0xb1, 0x70, 0x41, 0x6b, 0x87, 0x0, 0xff],
'RECOVERY_ON_FAT_IDE_DISK_GUID' : [0xb38573b6, 0x6200, 0x4ac5, 0xb5, 0x1d, 0x82, 0xe6, 0x59, 0x38, 0xd7, 0x83],
'RECOVERY_ON_FAT_USB_DISK_GUID' : [0x0ffbce19, 0x324c, 0x4690, 0xa0, 0x09, 0x98, 0xc6, 0xae, 0x2e, 0xb1, 0x86],
'SAL_SYSTEM_TABLE_GUID' : [0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'SECUREBOOT_CONFIG_FORM_SET_GUID' : [0x5daf50a5, 0xea81, 0x4de2, 0x8f, 0x9b, 0xca, 0xbd, 0xa9, 0xcf, 0x5c, 0x14],
'SECURITYPKG_TOKEN_SPACE_GUID' : [0xd3fb176, 0x9569, 0x4d51, 0xa3, 0xef, 0x7d, 0x61, 0xc6, 0x4f, 0xea, 0xba],
'SHELLPKG_SHELL_ENV2_EXT_GUID' : [0xd2c18636, 0x40e5, 0x4eb5, 0xa3, 0x1b, 0x36, 0x69, 0x5f, 0xd4, 0x2c, 0x87],
'SHELL_ALIAS_VARIABLE_GUID' : [0x0053d9d6, 0x2659, 0x4599, 0xa2, 0x6b, 0xef, 0x45, 0x36, 0xe6, 0x31, 0xa9],
'SHELL_DEBUG1_HII_GUID' : [0x25f200aa, 0xd3cb, 0x470a, 0xbf, 0x51, 0xe7, 0xd1, 0x62, 0xd2, 0x2e, 0x6f],
'SHELL_DRIVER1_HII_GUID' : [0xaf0b742, 0x63ec, 0x45bd, 0x8d, 0xb6, 0x71, 0xad, 0x7f, 0x2f, 0xe8, 0xe8],
'SHELL_ENVIRONMENT_PROTOCOL_GUID' : [0x47c7b221, 0xc42a, 0x11d2, 0x8e, 0x57, 0x0, 0xa0, 0xc9, 0x69, 0x72, 0x3b],
'SHELL_INSTALL1_HII_GUID' : [0x7d574d54, 0xd364, 0x4d4a, 0x95, 0xe3, 0x49, 0x45, 0xdb, 0x7a, 0xd3, 0xee],
'SHELL_INTERFACE_PROTOCOL_GUID' : [0x47c7b223, 0xc42a, 0x11d2, 0x8e, 0x57, 0x0, 0xa0, 0xc9, 0x69, 0x72, 0x3b],
'SHELL_LEVEL1_HII_GUID' : [0xdec5daa4, 0x6781, 0x4820, 0x9c, 0x63, 0xa7, 0xb0, 0xe4, 0xf1, 0xdb, 0x31],
'SHELL_LEVEL2_HII_GUID' : [0xf95a7ccc, 0x4c55, 0x4426, 0xa7, 0xb4, 0xdc, 0x89, 0x61, 0x95, 0xb, 0xae],
'SHELL_LEVEL3_HII_GUID' : [0x4344558d, 0x4ef9, 0x4725, 0xb1, 0xe4, 0x33, 0x76, 0xe8, 0xd6, 0x97, 0x4f],
'SHELL_MAP_GUID' : [0x51271e13, 0x7de3, 0x43af, 0x8b, 0xc2, 0x71, 0xad, 0x3b, 0x82, 0x43, 0x25],
'SHELL_NETWORK1_HII_GUID' : [0xf3d301bb, 0xf4a5, 0x45a8, 0xb0, 0xb7, 0xfa, 0x99, 0x9c, 0x62, 0x37, 0xae],
'SHELL_VARIABLE_GUID' : [0x158def5a, 0xf656, 0x419c, 0xb0, 0x27, 0x7a, 0x31, 0x92, 0xc0, 0x79, 0xd2],
'SMBIOS_TABLE_GUID' : [0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d],
'SMM_COMMUNICATE_HEADER_GUID' : [0xf328e36c, 0x23b6, 0x4a95, 0x85, 0x4b, 0x32, 0xe1, 0x95, 0x34, 0xcd, 0x75],
'SMM_PERFORMANCE_EX_PROTOCOL_GUID' : [0x931fc048, 0xc71d, 0x4455, 0x89, 0x30, 0x47, 0x6, 0x30, 0xe3, 0xe, 0xe5],
'SMM_PERFORMANCE_PROTOCOL_GUID' : [0xf866226a, 0xeaa5, 0x4f5a, 0xa9, 0xa, 0x6c, 0xfb, 0xa5, 0x7c, 0x58, 0x8e],
'STATUS_CODE_CALLBACK_GUID' : [0xe701458c, 0x4900, 0x4ca5, 0xb7, 0x72, 0x3d, 0x37, 0x94, 0x9f, 0x79, 0x27],
'SYSTEM_ROM_FILE_GUID' : [0x1547B4F3, 0x3E8A, 0x4FEF, 0x81, 0xC8, 0x32, 0x8E, 0xD6, 0x47, 0xAB, 0x1A],
'TCG_CONFIG_FORM_SET_GUID' : [0xb0f901e4, 0xc424, 0x45de, 0x90, 0x81, 0x95, 0xe2, 0xb, 0xde, 0x6f, 0xb5],
'TEMPORARY_RAM_SUPPORT_PPI_GUID' : [0xdbe23aa9, 0xa345, 0x4b97, 0x85, 0xb6, 0xb2, 0x26, 0xf1, 0x61, 0x73, 0x89],
'TIANO_CUSTOM_DECOMPRESS_GUID' : [0xA31280AD, 0x481E, 0x41B6, 0x95, 0xE8, 0x12, 0x7F, 0x4C, 0x98, 0x47, 0x79],
'UNIX_FWH_PPI_GUID' : [0xf2f0dc30, 0x8985, 0x11db, 0xa1, 0x5b, 0x00, 0x40, 0xd0, 0x2b, 0x18, 0x35],
'UNIX_PEI_LOAD_FILE_GUID' : [0xf2f48768, 0x8985, 0x11db, 0xb8, 0xda, 0x00, 0x40, 0xd0, 0x2b, 0x18, 0x35],
'UNKNOWN_DEVICE_GUID' : [0xcf31fac5, 0xc24e, 0x11d2, 0x85, 0xf3, 0x0, 0xa0, 0xc9, 0x3e, 0xc9, 0x3b],
'USB_CREDENTIAL_PROVIDER_GUID' : [0xd0849ed1, 0xa88c, 0x4ba6, 0xb1, 0xd6, 0xab, 0x50, 0xe2, 0x80, 0xb7, 0xa9],
'USB_KEYBOARD_LAYOUT_PACKAGE_GUID' : [0xc0f3b43, 0x44de, 0x4907, 0xb4, 0x78, 0x22, 0x5f, 0x6f, 0x62, 0x89, 0xdc],
'USER_IDENTIFY_MANAGER_GUID' : [0x3ccd3dd8, 0x8d45, 0x4fed, 0x96, 0x2d, 0x2b, 0x38, 0xcd, 0x82, 0xb3, 0xc4],
'USER_PROFILE_MANAGER_GUID' : [0xc35f272c, 0x97c2, 0x465a, 0xa2, 0x16, 0x69, 0x6b, 0x66, 0x8a, 0x8c, 0xfe],
'VIRTUAL_UNCACHED_PAGES_PROTOCOL_GUID' : [0xAD651C7D, 0x3C22, 0x4DBF, 0x92, 0xe8, 0x38, 0xa7, 0xcd, 0xae, 0x87, 0xb2],
'VLAN_CONFIG_FORM_SET_GUID' : [0xd79df6b0, 0xef44, 0x43bd, 0x97, 0x97, 0x43, 0xe9, 0x3b, 0xcf, 0x5f, 0xa8]
}
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
base_databasewebsite.go
|
/*
// Copyright 2018 Sendhil Vel. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
base_website.go
Date : 19/07/2018
Comment : This is seed file for creating any go website which connects to Postgres database.
Version : 1.0.9
by Sendhil Vel
*/
package main
/*
imports necessary packages
*/
import (
"database/sql"
"errors"
"fmt"
"html/template"
"io/ioutil"
"net/http"
"os"
"strings"
"github.com/gin-gonic/gin"
_ "github.com/lib/pq"
"github.com/subosito/gotenv"
)
/*
Defining necessary variables
*/
var tpl *template.Template
var err error
var r *gin.Engine
var rpcurl string
var db *sql.DB
/*
Users is the core object
*/
type Users struct {
DBId string `json:"userid,omitempty"`
Name string `json:"name,omitempty"`
Email string `json:"email,omitempty"`
Token string `json:"token,omitempty"`
IsActive bool `json:"isactive,omitempty"`
Password string `json:"password,omitempty"`
}
/*
render - renders the pages
*/
func render(c *gin.Context, data gin.H, templateName string) {
switch c.Request.Header.Get("Accept") {
case "application/json":
c.JSON(http.StatusOK, data["payload"])
case "application/xml":
c.XML(http.StatusOK, data["payload"])
default:
c.HTML(http.StatusOK, templateName, data)
}
}
/*
jsonresponse - This function return the response in a json format
*/
func jsonresponse(c *gin.Context) {
c.JSON(200, gin.H{
"status": "success",
"message": "process executed successfully",
})
}
/*
performLogin - gets the posted values for varibles username and password and check if the username/password combination is valid
*/
func performLogin(c *gin.Context) {
/*
Get the values from POST objects
*/
username := c.PostForm("username")
password := c.PostForm("password")
/*
Checks the username and password variables valuesare not empty
*/
if len(username) == 0 || len(password) == 0 {
err = errors.New("missing password and/or email")
return
}
/*
Call the actual function which checks and return error or information about user
Based on status we are redirecting to necessary pages
If error then redirecting to login page again with error messages
If valid then redirecting to userprofile page which display user information.
*/
UserInfo, err := getUser(username, password)
if err != nil {
render(c, gin.H{
"title": "Login",
"ErrorMessage": "Login Failed",
}, "login.html")
} else {
render(c, gin.H{
"title": "User Profile",
"UserInfo": UserInfo,
}, "userprofile.html")
}
}
/*
getUser - this checks the information stored in database and information passed.
In case of valid information, user information is returned.
In case of invalid information error is returned.
*/
func getUser(vUserName string, vPassword string) (userobj Users, err error) {
/*
Defining the variables
*/
var vDBId, vName, vEmail, vToken, vsqlPassword sql.NullString
var vIsActive sql.NullBool
/*
creating a sql query using parameter
*/
sqlStmt := fmt.Sprintf(`SELECT id,Name,Email,Token,Is_Active,Password FROM shard_1.users WHERE LOWER(Email)=lower('%s') and lower(password) = md5('%s')`, strings.ToLower(vUserName), vPassword)
/*
Executing the sql query
In case of error, error information will be returned
User object is returned in case credentials are valid
*/
err = db.QueryRow(sqlStmt).Scan(&vDBId, &vName, &vEmail, &vToken, &vIsActive, &vsqlPassword)
if err != nil {
fmt.Println(err)
err = fmt.Errorf("unknown email : %s", err.Error())
return
}
userobj.DBId = vDBId.String
userobj.Name = vName.String
userobj.Email = vEmail.String
userobj.Token = vToken.String
userobj.IsActive = vIsActive.Bool
userobj.Password = ""
return
}
/*
initializeRoutes - This will defines various routes and relavant information
*/
func initializeRoutes(port string) {
/*
All the urls will be mentioned and configured.
*/
/*
url : /test
*/
r.GET("/test", showHomePage)
/*
url : /
*/
r.GET("/", showHomePage)
/*
Defining group route for users
*/
userRoutes := r.Group("/user")
{
/*
url : /user/
*/
userRoutes.GET("/", showHomePage)
/*
url : /user/login (method is get)
*/
userRoutes.GET("/login", showLoginPage)
/*
url : /user/login (method is post)
*/
userRoutes.POST("/login", performLogin)
/*
url : /user/jsonresponse
*/
userRoutes.GET("/jsonresponse", jsonresponse)
}
fmt.Println("-------Starting server-------------")
}
/*
main - main function of the file
*/
func main() {
/*
Loads the env variables
*/
gotenv.Load()
/*
Get the port no from .env file.
Convert string to int
In case some error comes then process is stopped
*/
port := os.Getenv("WEBSITE_PORT")
dbUser := os.Getenv("USER_DB_USER")
dbPass := os.Getenv("USER_DB_PASS")
dbName := os.Getenv("USER_DB_NAME")
dbURL := os.Getenv("USER_DB_URL")
/*
Setting Gin parameter and folders
*/
gin.SetMode(gin.ReleaseMode)
gin.DefaultWriter = ioutil.Discard
r = gin.Default()
r.LoadHTMLGlob("templetes/html/*")
r.Static("/css", "templetes/css")
r.Static("/js", "templetes/js")
r.Static("/img", "templetes/img")
r.Static("/fonts", "templetes/fonts")
/*
Calling function which will be connecting to Database
In case of error, we are stopping the execution.
*/
err := initDBConnection(dbUser, dbPass, dbURL, dbName)
if err != nil {
fmt.Printf("Error: %s\n", err.Error())
return
}
fmt.Println("DB connected")
/*
calling function to setup the routes
*/
go initializeRoutes(port)
/*
Starting the server in the specified port
*/
fmt.Println("Web Portal is running on " + port)
r.Run(port)
fmt.Println("-------Started server-------------")
}
/*
showHomePage - this will display status of website
*/
func showHomePage(c *gin.Context) {
c.JSON(200, gin.H{
"Server": "Cool you are ready to start website in goLang",
})
}
/*
showLoginPage - This will load and show login page with necessary parameters
*/
func showLoginPage(c *gin.Context) {
render(c, gin.H{
"title": "Login",
"ErrorMessage": "",
}, "login.html")
}
/*
initDBConnection - This function connects to Postgres database
*/
func initDBConnection(dbUser, dbPass, dbURL, dbNAME string) (err error) {
/*
Variables defined here
*/
var user, pass, url, name string
/*
verify that all variables exists
*/
if len(dbUser) == 0 || len(dbURL) == 0 || len(dbPass) == 0 || len(dbNAME) == 0 {
err = errors.New("Missing DB Credentails. Please Check")
return
}
/*
verify the varibles and set values after remove spaces
*/
if len(dbUser) > 0 && len(dbPass) > 0 && len(dbURL) > 0 && len(dbNAME) > 0 {
user = strings.TrimSpace(dbUser)
pass = strings.TrimSpace(dbPass)
url = strings.TrimSpace(dbURL)
name = strings.TrimSpace(dbNAME)
}
/*
Prepares the connection string
*/
connString := fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=require", user, pass, url, name)
fmt.Printf("connecting to database: %s\n", url)
/*
connects the database with the provided values, in case of any issue error will be raise
*/
db, err = sql.Open("postgres", connString)
if err != nil {
err = fmt.Errorf("Database refused connection: %s", err.Error())
return
}
return
}
|
[
"\"WEBSITE_PORT\"",
"\"USER_DB_USER\"",
"\"USER_DB_PASS\"",
"\"USER_DB_NAME\"",
"\"USER_DB_URL\""
] |
[] |
[
"USER_DB_NAME",
"USER_DB_URL",
"USER_DB_USER",
"USER_DB_PASS",
"WEBSITE_PORT"
] |
[]
|
["USER_DB_NAME", "USER_DB_URL", "USER_DB_USER", "USER_DB_PASS", "WEBSITE_PORT"]
|
go
| 5 | 0 | |
test/data_test.py
|
# -*- coding: utf-8 -*-
'''
data模块测试代码
'''
import unittest
from lixinger_openapi.token import load_token
from lixinger_openapi.data import (
indice_fundamental,
)
class DataTest(unittest.TestCase):
def setUp(self):
self.token = ''
with open('token.cfg', 'r') as token_cfg:
self.token = token_cfg.read().strip()
def test_indice_fundamental(self):
load_token(self.token)
rlt = indice_fundamental(startDate='2018-01-01', endDate='2018-01-31', metrics=['pe_ttm'], stockCodes=['000300','000905'])
self.assertEqual(1, rlt['code'])
self.assertEqual('If get range date data, we only support single stockCode. Please change your stockCodes.', rlt['msg'])
self.assertEqual(None, rlt['data'])
if __name__ == '__main__':
unittest.main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
contrib/gitian-build.py
|
#!/usr/bin/env python3
# Copyright (c) 2018-2019 The BitSilver Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import argparse
import os
import subprocess
import sys
def setup():
global args, workdir
programs = ['ruby', 'git', 'make', 'wget', 'curl']
if args.kvm:
programs += ['apt-cacher-ng', 'python-vm-builder', 'qemu-kvm', 'qemu-utils']
elif args.docker and not os.path.isfile('/lib/systemd/system/docker.service'):
dockers = ['docker.io', 'docker-ce']
for i in dockers:
return_code = subprocess.call(['sudo', 'apt-get', 'install', '-qq', i])
if return_code == 0:
break
if return_code != 0:
print('Cannot find any way to install Docker.', file=sys.stderr)
sys.exit(1)
else:
programs += ['apt-cacher-ng', 'lxc', 'debootstrap']
subprocess.check_call(['sudo', 'apt-get', 'install', '-qq'] + programs)
if not os.path.isdir('gitian.sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/bitsilver-core/gitian.sigs.git'])
if not os.path.isdir('bitsilver-detached-sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/bitsilver-core/bitsilver-detached-sigs.git'])
if not os.path.isdir('gitian-builder'):
subprocess.check_call(['git', 'clone', 'https://github.com/devrandom/gitian-builder.git'])
if not os.path.isdir('bitsilver'):
subprocess.check_call(['git', 'clone', 'https://github.com/bitsilver/bitsilver.git'])
os.chdir('gitian-builder')
make_image_prog = ['bin/make-base-vm', '--suite', 'bionic', '--arch', 'amd64']
if args.docker:
make_image_prog += ['--docker']
elif not args.kvm:
make_image_prog += ['--lxc']
subprocess.check_call(make_image_prog)
os.chdir(workdir)
if args.is_bionic and not args.kvm and not args.docker:
subprocess.check_call(['sudo', 'sed', '-i', 's/lxcbr0/br0/', '/etc/default/lxc-net'])
print('Reboot is required')
sys.exit(0)
def build():
global args, workdir
os.makedirs('bitsilver-binaries/' + args.version, exist_ok=True)
print('\nBuilding Dependencies\n')
os.chdir('gitian-builder')
os.makedirs('inputs', exist_ok=True)
subprocess.check_call(['wget', '-O', 'inputs/osslsigncode-2.0.tar.gz', 'https://github.com/mtrojnar/osslsigncode/archive/2.0.tar.gz'])
subprocess.check_call(["echo '5a60e0a4b3e0b4d655317b2f12a810211c50242138322b16e7e01c6fbb89d92f inputs/osslsigncode-2.0.tar.gz' | sha256sum -c"], shell=True)
subprocess.check_call(['make', '-C', '../bitsilver/depends', 'download', 'SOURCES_PATH=' + os.getcwd() + '/cache/common'])
if args.linux:
print('\nCompiling ' + args.version + ' Linux')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'bitsilver='+args.commit, '--url', 'bitsilver='+args.url, '../bitsilver/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-linux', '--destination', '../gitian.sigs/', '../bitsilver/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call('mv build/out/bitsilver-*.tar.gz build/out/src/bitsilver-*.tar.gz ../bitsilver-binaries/'+args.version, shell=True)
if args.windows:
print('\nCompiling ' + args.version + ' Windows')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'bitsilver='+args.commit, '--url', 'bitsilver='+args.url, '../bitsilver/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-unsigned', '--destination', '../gitian.sigs/', '../bitsilver/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call('mv build/out/bitsilver-*-win-unsigned.tar.gz inputs/', shell=True)
subprocess.check_call('mv build/out/bitsilver-*.zip build/out/bitsilver-*.exe build/out/src/bitsilver-*.tar.gz ../bitsilver-binaries/'+args.version, shell=True)
if args.macos:
print('\nCompiling ' + args.version + ' MacOS')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'bitsilver='+args.commit, '--url', 'bitsilver='+args.url, '../bitsilver/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-unsigned', '--destination', '../gitian.sigs/', '../bitsilver/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call('mv build/out/bitsilver-*-osx-unsigned.tar.gz inputs/', shell=True)
subprocess.check_call('mv build/out/bitsilver-*.tar.gz build/out/bitsilver-*.dmg build/out/src/bitsilver-*.tar.gz ../bitsilver-binaries/'+args.version, shell=True)
os.chdir(workdir)
if args.commit_files:
print('\nCommitting '+args.version+' Unsigned Sigs\n')
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'add', args.version+'-linux/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-win-unsigned/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-osx-unsigned/'+args.signer])
subprocess.check_call(['git', 'commit', '-m', 'Add '+args.version+' unsigned sigs for '+args.signer])
os.chdir(workdir)
def sign():
global args, workdir
os.chdir('gitian-builder')
if args.windows:
print('\nSigning ' + args.version + ' Windows')
subprocess.check_call('cp inputs/bitsilver-' + args.version + '-win-unsigned.tar.gz inputs/bitsilver-win-unsigned.tar.gz', shell=True)
subprocess.check_call(['bin/gbuild', '--skip-image', '--upgrade', '--commit', 'signature='+args.commit, '../bitsilver/contrib/gitian-descriptors/gitian-win-signer.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-signed', '--destination', '../gitian.sigs/', '../bitsilver/contrib/gitian-descriptors/gitian-win-signer.yml'])
subprocess.check_call('mv build/out/bitsilver-*win64-setup.exe ../bitsilver-binaries/'+args.version, shell=True)
if args.macos:
print('\nSigning ' + args.version + ' MacOS')
subprocess.check_call('cp inputs/bitsilver-' + args.version + '-osx-unsigned.tar.gz inputs/bitsilver-osx-unsigned.tar.gz', shell=True)
subprocess.check_call(['bin/gbuild', '--skip-image', '--upgrade', '--commit', 'signature='+args.commit, '../bitsilver/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-signed', '--destination', '../gitian.sigs/', '../bitsilver/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call('mv build/out/bitsilver-osx-signed.dmg ../bitsilver-binaries/'+args.version+'/bitsilver-'+args.version+'-osx.dmg', shell=True)
os.chdir(workdir)
if args.commit_files:
print('\nCommitting '+args.version+' Signed Sigs\n')
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'add', args.version+'-win-signed/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-osx-signed/'+args.signer])
subprocess.check_call(['git', 'commit', '-a', '-m', 'Add '+args.version+' signed binary sigs for '+args.signer])
os.chdir(workdir)
def verify():
global args, workdir
rc = 0
os.chdir('gitian-builder')
print('\nVerifying v'+args.version+' Linux\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-linux', '../bitsilver/contrib/gitian-descriptors/gitian-linux.yml']):
print('Verifying v'+args.version+' Linux FAILED\n')
rc = 1
print('\nVerifying v'+args.version+' Windows\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-unsigned', '../bitsilver/contrib/gitian-descriptors/gitian-win.yml']):
print('Verifying v'+args.version+' Windows FAILED\n')
rc = 1
print('\nVerifying v'+args.version+' MacOS\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-unsigned', '../bitsilver/contrib/gitian-descriptors/gitian-osx.yml']):
print('Verifying v'+args.version+' MacOS FAILED\n')
rc = 1
print('\nVerifying v'+args.version+' Signed Windows\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-signed', '../bitsilver/contrib/gitian-descriptors/gitian-win-signer.yml']):
print('Verifying v'+args.version+' Signed Windows FAILED\n')
rc = 1
print('\nVerifying v'+args.version+' Signed MacOS\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-signed', '../bitsilver/contrib/gitian-descriptors/gitian-osx-signer.yml']):
print('Verifying v'+args.version+' Signed MacOS FAILED\n')
rc = 1
os.chdir(workdir)
return rc
def main():
global args, workdir
parser = argparse.ArgumentParser(description='Script for running full Gitian builds.')
parser.add_argument('-c', '--commit', action='store_true', dest='commit', help='Indicate that the version argument is for a commit or branch')
parser.add_argument('-p', '--pull', action='store_true', dest='pull', help='Indicate that the version argument is the number of a github repository pull request')
parser.add_argument('-u', '--url', dest='url', default='https://github.com/bitsilver/bitsilver', help='Specify the URL of the repository. Default is %(default)s')
parser.add_argument('-v', '--verify', action='store_true', dest='verify', help='Verify the Gitian build')
parser.add_argument('-b', '--build', action='store_true', dest='build', help='Do a Gitian build')
parser.add_argument('-s', '--sign', action='store_true', dest='sign', help='Make signed binaries for Windows and MacOS')
parser.add_argument('-B', '--buildsign', action='store_true', dest='buildsign', help='Build both signed and unsigned binaries')
parser.add_argument('-o', '--os', dest='os', default='lwm', help='Specify which Operating Systems the build is for. Default is %(default)s. l for Linux, w for Windows, m for MacOS')
parser.add_argument('-j', '--jobs', dest='jobs', default='2', help='Number of processes to use. Default %(default)s')
parser.add_argument('-m', '--memory', dest='memory', default='2000', help='Memory to allocate in MiB. Default %(default)s')
parser.add_argument('-k', '--kvm', action='store_true', dest='kvm', help='Use KVM instead of LXC')
parser.add_argument('-d', '--docker', action='store_true', dest='docker', help='Use Docker instead of LXC')
parser.add_argument('-S', '--setup', action='store_true', dest='setup', help='Set up the Gitian building environment. Only works on Debian-based systems (Ubuntu, Debian)')
parser.add_argument('-D', '--detach-sign', action='store_true', dest='detach_sign', help='Create the assert file for detached signing. Will not commit anything.')
parser.add_argument('-n', '--no-commit', action='store_false', dest='commit_files', help='Do not commit anything to git')
parser.add_argument('signer', nargs='?', help='GPG signer to sign each build assert file')
parser.add_argument('version', nargs='?', help='Version number, commit, or branch to build. If building a commit or branch, the -c option must be specified')
args = parser.parse_args()
workdir = os.getcwd()
args.is_bionic = b'bionic' in subprocess.check_output(['lsb_release', '-cs'])
if args.kvm and args.docker:
raise Exception('Error: cannot have both kvm and docker')
# Ensure no more than one environment variable for gitian-builder (USE_LXC, USE_VBOX, USE_DOCKER) is set as they
# can interfere (e.g., USE_LXC being set shadows USE_DOCKER; for details see gitian-builder/libexec/make-clean-vm).
os.environ['USE_LXC'] = ''
os.environ['USE_VBOX'] = ''
os.environ['USE_DOCKER'] = ''
if args.docker:
os.environ['USE_DOCKER'] = '1'
elif not args.kvm:
os.environ['USE_LXC'] = '1'
if 'GITIAN_HOST_IP' not in os.environ.keys():
os.environ['GITIAN_HOST_IP'] = '10.0.3.1'
if 'LXC_GUEST_IP' not in os.environ.keys():
os.environ['LXC_GUEST_IP'] = '10.0.3.5'
if args.setup:
setup()
if args.buildsign:
args.build = True
args.sign = True
if not args.build and not args.sign and not args.verify:
sys.exit(0)
args.linux = 'l' in args.os
args.windows = 'w' in args.os
args.macos = 'm' in args.os
# Disable for MacOS if no SDK found
if args.macos and not os.path.isfile('gitian-builder/inputs/MacOSX10.14.sdk.tar.gz'):
print('Cannot build for MacOS, SDK does not exist. Will build for other OSes')
args.macos = False
args.sign_prog = 'true' if args.detach_sign else 'gpg --detach-sign'
script_name = os.path.basename(sys.argv[0])
if not args.signer:
print(script_name+': Missing signer')
print('Try '+script_name+' --help for more information')
sys.exit(1)
if not args.version:
print(script_name+': Missing version')
print('Try '+script_name+' --help for more information')
sys.exit(1)
# Add leading 'v' for tags
if args.commit and args.pull:
raise Exception('Cannot have both commit and pull')
args.commit = ('' if args.commit else 'v') + args.version
os.chdir('bitsilver')
if args.pull:
subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
os.chdir('../gitian-builder/inputs/bitsilver')
subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
args.commit = subprocess.check_output(['git', 'show', '-s', '--format=%H', 'FETCH_HEAD'], universal_newlines=True, encoding='utf8').strip()
args.version = 'pull-' + args.version
print(args.commit)
subprocess.check_call(['git', 'fetch'])
subprocess.check_call(['git', 'checkout', args.commit])
os.chdir(workdir)
os.chdir('gitian-builder')
subprocess.check_call(['git', 'pull'])
os.chdir(workdir)
if args.build:
build()
if args.sign:
sign()
if args.verify:
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'pull'])
os.chdir(workdir)
sys.exit(verify())
if __name__ == '__main__':
main()
|
[] |
[] |
[
"USE_DOCKER",
"USE_LXC",
"USE_VBOX",
"GITIAN_HOST_IP",
"LXC_GUEST_IP"
] |
[]
|
["USE_DOCKER", "USE_LXC", "USE_VBOX", "GITIAN_HOST_IP", "LXC_GUEST_IP"]
|
python
| 5 | 0 | |
src/jvmgo/ch05/classpath/classpath.go
|
package classpath
import (
"os"
"path/filepath"
)
type Classpath struct {
bootClasspath Entry
extClasspath Entry
userClasspath Entry
}
func Parse(jreOption, cpOption string) *Classpath {
cp := &Classpath{}
cp.parseBootAndExtClasspath(jreOption)
cp.parseUserClasspath(cpOption)
return cp
}
func (self *Classpath) parseBootAndExtClasspath(jreOption string) {
jreDir := getJreDir(jreOption)
// jre/lib/*
jreLibPath := filepath.Join(jreDir, "lib", "*")
self.bootClasspath = newWildcardEntry(jreLibPath)
//jre/lib/ext/*
jreExtPath := filepath.Join(jreDir, "lib", "ext", "*")
self.extClasspath = newWildcardEntry(jreExtPath)
}
func getJreDir(jreOption string) string {
if jreOption != "" && exists(jreOption) {
return jreOption
}
if exists("./jre") {
return "./jre"
}
if jh := os.Getenv("JAVA_HOME"); jh != "" {
return filepath.Join(jh, "jre")
}
panic("Can not find jre folder!")
}
func exists(path string) bool {
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}
func (self *Classpath) parseUserClasspath(cpOption string) {
if cpOption == "" {
cpOption = "."
}
self.userClasspath = newEntry(cpOption)
}
func (self *Classpath) ReadClass(className string) ([]byte, Entry, error) {
className = className + ".class"
if data, entry, err := self.bootClasspath.readClass(className); err == nil {
return data, entry, err
}
if data, entry, err := self.extClasspath.readClass(className); err == nil {
return data, entry, err
}
return self.userClasspath.readClass(className)
}
func (self *Classpath) String() string {
return self.userClasspath.String()
}
|
[
"\"JAVA_HOME\""
] |
[] |
[
"JAVA_HOME"
] |
[]
|
["JAVA_HOME"]
|
go
| 1 | 0 | |
Voice/Implement two-factor with Python/app.py
|
import os
import requests
import random
import pprint
import json
from signalwire.rest import Client as signalwire_client
from signalwire.voice_response import VoiceResponse, Say
from flask import Flask,request
app = Flask(__name__)
# Var to store challenge session
data = {}
data['requests'] = []
# Lookup auth session, and verify if it matches
def lookup_code(number,code):
# Loop through all sessions
for i in range(len(data['requests'])):
# Look if number is equal to a number in a session, we are prepending a '+'
if '+' + number == data['requests'][i]['number']:
# Great, We found a session matching that number, now let us check the challenge code
if code == data['requests'][i]['code']:
# We have a match, let's remove the validated session and return true
data['requests'].pop(i)
return True
# catch all for failed challenges
return False
# Listen for '/validate-auth' route
@app.route('/validate-auth')
def validate_auth():
# Grab the authorization code from the GET/POST request
check_code = request.values.get('auth_code')
# Grab the phone number from the GET/POST request
number = request.values.get('number')
# Verify the number and challenge code
if lookup_code(number, check_code):
# Return 200, On Accept
return "200"
# Return 403, On Forbidden
return "403"
# Listen on '/request-auth' for creation of an challenge session from GET/POST requests
@app.route('/request-auth', methods=['GET', 'POST'])
def request_auth():
# Initialize SignalWire Client
client = signalwire_client(os.environ['SIGNALWIRE_PROJECT'], os.environ['SIGNALWIRE_TOKEN'], signalwire_space_url = os.environ['SIGNALWIRE_SPACE'])
# Generate a random 6 digit code between 123456 - 987654
auth_code = str(random.randint(123456,987654))
#Get the phone number to challenge from request
number = "+" + request.values.get('number')
# Add the session to the in-memory global request object
data['requests'].append({
'number': number,
'code': auth_code
})
# Send a voice call, with challenge code in message to phone number provided.
call = client.calls.create(
from_ = os.environ['SIGNALWIRE_NUMBER'],
url = "http://" + os.environ['HOSTNAME'] + "/call_2fa?auth_code=" + auth_code,
to = number
)
return "200"
# Listen on route '/call_2fa' for GET/POST requests, handles LaML Responses for phone call
@app.route('/call_2fa', methods=['GET', 'POST'])
def call_2fa():
# Read in passed digits from request
auth_code = request.values.get('auth_code')
# Prepare a voice response
response = VoiceResponse()
# Voice responses, TTS message
response.say('Your authorization code is: ' + auth_code )
response.say('Repeating...')
response.say('Your authorization code is: ' + auth_code )
response.say('Thank you for calling, GoodBye!')
# return voice response LaML
return str(response)
# Default route
@app.route('/')
def hello():
return "Hello World!"
if __name__ == '__main__':
app.run(host="0.0.0.0")
|
[] |
[] |
[
"HOSTNAME",
"SIGNALWIRE_NUMBER",
"SIGNALWIRE_SPACE",
"SIGNALWIRE_PROJECT",
"SIGNALWIRE_TOKEN"
] |
[]
|
["HOSTNAME", "SIGNALWIRE_NUMBER", "SIGNALWIRE_SPACE", "SIGNALWIRE_PROJECT", "SIGNALWIRE_TOKEN"]
|
python
| 5 | 0 | |
integration-cli/docker_cli_pull_local_test.go
|
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/docker/distribution"
"github.com/docker/distribution/manifest"
"github.com/docker/distribution/manifest/manifestlist"
"github.com/docker/distribution/manifest/schema2"
"github.com/docker/docker/integration-cli/checker"
icmd "github.com/docker/docker/pkg/testutil/cmd"
"github.com/go-check/check"
"github.com/opencontainers/go-digest"
)
// testPullImageWithAliases pulls a specific image tag and verifies that any aliases (i.e., other
// tags for the same image) are not also pulled down.
//
// Ref: docker/docker#8141
func testPullImageWithAliases(c *check.C) {
repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
repos := []string{}
for _, tag := range []string{"recent", "fresh"} {
repos = append(repos, fmt.Sprintf("%v:%v", repoName, tag))
}
// Tag and push the same image multiple times.
for _, repo := range repos {
dockerCmd(c, "tag", "busybox", repo)
dockerCmd(c, "push", repo)
}
// Clear local images store.
args := append([]string{"rmi"}, repos...)
dockerCmd(c, args...)
// Pull a single tag and verify it doesn't bring down all aliases.
dockerCmd(c, "pull", repos[0])
dockerCmd(c, "inspect", repos[0])
for _, repo := range repos[1:] {
_, _, err := dockerCmdWithError("inspect", repo)
c.Assert(err, checker.NotNil, check.Commentf("Image %v shouldn't have been pulled down", repo))
}
}
func (s *DockerRegistrySuite) TestPullImageWithAliases(c *check.C) {
testPullImageWithAliases(c)
}
func (s *DockerSchema1RegistrySuite) TestPullImageWithAliases(c *check.C) {
testPullImageWithAliases(c)
}
// testConcurrentPullWholeRepo pulls the same repo concurrently.
func testConcurrentPullWholeRepo(c *check.C) {
repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
repos := []string{}
for _, tag := range []string{"recent", "fresh", "todays"} {
repo := fmt.Sprintf("%v:%v", repoName, tag)
buildImageSuccessfully(c, repo, withDockerfile(fmt.Sprintf(`
FROM busybox
ENTRYPOINT ["/bin/echo"]
ENV FOO foo
ENV BAR bar
CMD echo %s
`, repo)))
dockerCmd(c, "push", repo)
repos = append(repos, repo)
}
// Clear local images store.
args := append([]string{"rmi"}, repos...)
dockerCmd(c, args...)
// Run multiple re-pulls concurrently
results := make(chan error)
numPulls := 3
for i := 0; i != numPulls; i++ {
go func() {
result := icmd.RunCommand(dockerBinary, "pull", "-a", repoName)
results <- result.Error
}()
}
// These checks are separate from the loop above because the check
// package is not goroutine-safe.
for i := 0; i != numPulls; i++ {
err := <-results
c.Assert(err, checker.IsNil, check.Commentf("concurrent pull failed with error: %v", err))
}
// Ensure all tags were pulled successfully
for _, repo := range repos {
dockerCmd(c, "inspect", repo)
out, _ := dockerCmd(c, "run", "--rm", repo)
c.Assert(strings.TrimSpace(out), checker.Equals, "/bin/sh -c echo "+repo)
}
}
func (s *DockerRegistrySuite) testConcurrentPullWholeRepo(c *check.C) {
testConcurrentPullWholeRepo(c)
}
func (s *DockerSchema1RegistrySuite) testConcurrentPullWholeRepo(c *check.C) {
testConcurrentPullWholeRepo(c)
}
// testConcurrentFailingPull tries a concurrent pull that doesn't succeed.
func testConcurrentFailingPull(c *check.C) {
repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
// Run multiple pulls concurrently
results := make(chan error)
numPulls := 3
for i := 0; i != numPulls; i++ {
go func() {
result := icmd.RunCommand(dockerBinary, "pull", repoName+":asdfasdf")
results <- result.Error
}()
}
// These checks are separate from the loop above because the check
// package is not goroutine-safe.
for i := 0; i != numPulls; i++ {
err := <-results
c.Assert(err, checker.NotNil, check.Commentf("expected pull to fail"))
}
}
func (s *DockerRegistrySuite) testConcurrentFailingPull(c *check.C) {
testConcurrentFailingPull(c)
}
func (s *DockerSchema1RegistrySuite) testConcurrentFailingPull(c *check.C) {
testConcurrentFailingPull(c)
}
// testConcurrentPullMultipleTags pulls multiple tags from the same repo
// concurrently.
func testConcurrentPullMultipleTags(c *check.C) {
repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
repos := []string{}
for _, tag := range []string{"recent", "fresh", "todays"} {
repo := fmt.Sprintf("%v:%v", repoName, tag)
buildImageSuccessfully(c, repo, withDockerfile(fmt.Sprintf(`
FROM busybox
ENTRYPOINT ["/bin/echo"]
ENV FOO foo
ENV BAR bar
CMD echo %s
`, repo)))
dockerCmd(c, "push", repo)
repos = append(repos, repo)
}
// Clear local images store.
args := append([]string{"rmi"}, repos...)
dockerCmd(c, args...)
// Re-pull individual tags, in parallel
results := make(chan error)
for _, repo := range repos {
go func(repo string) {
result := icmd.RunCommand(dockerBinary, "pull", repo)
results <- result.Error
}(repo)
}
// These checks are separate from the loop above because the check
// package is not goroutine-safe.
for range repos {
err := <-results
c.Assert(err, checker.IsNil, check.Commentf("concurrent pull failed with error: %v", err))
}
// Ensure all tags were pulled successfully
for _, repo := range repos {
dockerCmd(c, "inspect", repo)
out, _ := dockerCmd(c, "run", "--rm", repo)
c.Assert(strings.TrimSpace(out), checker.Equals, "/bin/sh -c echo "+repo)
}
}
func (s *DockerRegistrySuite) TestConcurrentPullMultipleTags(c *check.C) {
testConcurrentPullMultipleTags(c)
}
func (s *DockerSchema1RegistrySuite) TestConcurrentPullMultipleTags(c *check.C) {
testConcurrentPullMultipleTags(c)
}
// testPullIDStability verifies that pushing an image and pulling it back
// preserves the image ID.
func testPullIDStability(c *check.C) {
derivedImage := privateRegistryURL + "/dockercli/id-stability"
baseImage := "busybox"
buildImageSuccessfully(c, derivedImage, withDockerfile(fmt.Sprintf(`
FROM %s
ENV derived true
ENV asdf true
RUN dd if=/dev/zero of=/file bs=1024 count=1024
CMD echo %s
`, baseImage, derivedImage)))
originalID := getIDByName(c, derivedImage)
dockerCmd(c, "push", derivedImage)
// Pull
out, _ := dockerCmd(c, "pull", derivedImage)
if strings.Contains(out, "Pull complete") {
c.Fatalf("repull redownloaded a layer: %s", out)
}
derivedIDAfterPull := getIDByName(c, derivedImage)
if derivedIDAfterPull != originalID {
c.Fatal("image's ID unexpectedly changed after a repush/repull")
}
// Make sure the image runs correctly
out, _ = dockerCmd(c, "run", "--rm", derivedImage)
if strings.TrimSpace(out) != derivedImage {
c.Fatalf("expected %s; got %s", derivedImage, out)
}
// Confirm that repushing and repulling does not change the computed ID
dockerCmd(c, "push", derivedImage)
dockerCmd(c, "rmi", derivedImage)
dockerCmd(c, "pull", derivedImage)
derivedIDAfterPull = getIDByName(c, derivedImage)
if derivedIDAfterPull != originalID {
c.Fatal("image's ID unexpectedly changed after a repush/repull")
}
// Make sure the image still runs
out, _ = dockerCmd(c, "run", "--rm", derivedImage)
if strings.TrimSpace(out) != derivedImage {
c.Fatalf("expected %s; got %s", derivedImage, out)
}
}
func (s *DockerRegistrySuite) TestPullIDStability(c *check.C) {
testPullIDStability(c)
}
func (s *DockerSchema1RegistrySuite) TestPullIDStability(c *check.C) {
testPullIDStability(c)
}
// #21213
func testPullNoLayers(c *check.C) {
repoName := fmt.Sprintf("%v/dockercli/scratch", privateRegistryURL)
buildImageSuccessfully(c, repoName, withDockerfile(`
FROM scratch
ENV foo bar`))
dockerCmd(c, "push", repoName)
dockerCmd(c, "rmi", repoName)
dockerCmd(c, "pull", repoName)
}
func (s *DockerRegistrySuite) TestPullNoLayers(c *check.C) {
testPullNoLayers(c)
}
func (s *DockerSchema1RegistrySuite) TestPullNoLayers(c *check.C) {
testPullNoLayers(c)
}
func (s *DockerRegistrySuite) TestPullManifestList(c *check.C) {
testRequires(c, NotArm)
pushDigest, err := setupImage(c)
c.Assert(err, checker.IsNil, check.Commentf("error setting up image"))
// Inject a manifest list into the registry
manifestList := &manifestlist.ManifestList{
Versioned: manifest.Versioned{
SchemaVersion: 2,
MediaType: manifestlist.MediaTypeManifestList,
},
Manifests: []manifestlist.ManifestDescriptor{
{
Descriptor: distribution.Descriptor{
Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b",
Size: 3253,
MediaType: schema2.MediaTypeManifest,
},
Platform: manifestlist.PlatformSpec{
Architecture: "bogus_arch",
OS: "bogus_os",
},
},
{
Descriptor: distribution.Descriptor{
Digest: pushDigest,
Size: 3253,
MediaType: schema2.MediaTypeManifest,
},
Platform: manifestlist.PlatformSpec{
Architecture: runtime.GOARCH,
OS: runtime.GOOS,
},
},
},
}
manifestListJSON, err := json.MarshalIndent(manifestList, "", " ")
c.Assert(err, checker.IsNil, check.Commentf("error marshalling manifest list"))
manifestListDigest := digest.FromBytes(manifestListJSON)
hexDigest := manifestListDigest.Hex()
registryV2Path := s.reg.Path()
// Write manifest list to blob store
blobDir := filepath.Join(registryV2Path, "blobs", "sha256", hexDigest[:2], hexDigest)
err = os.MkdirAll(blobDir, 0755)
c.Assert(err, checker.IsNil, check.Commentf("error creating blob dir"))
blobPath := filepath.Join(blobDir, "data")
err = ioutil.WriteFile(blobPath, []byte(manifestListJSON), 0644)
c.Assert(err, checker.IsNil, check.Commentf("error writing manifest list"))
// Add to revision store
revisionDir := filepath.Join(registryV2Path, "repositories", remoteRepoName, "_manifests", "revisions", "sha256", hexDigest)
err = os.Mkdir(revisionDir, 0755)
c.Assert(err, checker.IsNil, check.Commentf("error creating revision dir"))
revisionPath := filepath.Join(revisionDir, "link")
err = ioutil.WriteFile(revisionPath, []byte(manifestListDigest.String()), 0644)
c.Assert(err, checker.IsNil, check.Commentf("error writing revision link"))
// Update tag
tagPath := filepath.Join(registryV2Path, "repositories", remoteRepoName, "_manifests", "tags", "latest", "current", "link")
err = ioutil.WriteFile(tagPath, []byte(manifestListDigest.String()), 0644)
c.Assert(err, checker.IsNil, check.Commentf("error writing tag link"))
// Verify that the image can be pulled through the manifest list.
out, _ := dockerCmd(c, "pull", repoName)
// The pull output includes "Digest: <digest>", so find that
matches := digestRegex.FindStringSubmatch(out)
c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out))
pullDigest := matches[1]
// Make sure the pushed and pull digests match
c.Assert(manifestListDigest.String(), checker.Equals, pullDigest)
// Was the image actually created?
dockerCmd(c, "inspect", repoName)
dockerCmd(c, "rmi", repoName)
}
// #23100
func (s *DockerRegistryAuthHtpasswdSuite) TestPullWithExternalAuthLoginWithScheme(c *check.C) {
osPath := os.Getenv("PATH")
defer os.Setenv("PATH", osPath)
workingDir, err := os.Getwd()
c.Assert(err, checker.IsNil)
absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth"))
c.Assert(err, checker.IsNil)
testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute)
os.Setenv("PATH", testPath)
repoName := fmt.Sprintf("%v/dockercli/busybox:authtest", privateRegistryURL)
tmp, err := ioutil.TempDir("", "integration-cli-")
c.Assert(err, checker.IsNil)
externalAuthConfig := `{ "credsStore": "shell-test" }`
configPath := filepath.Join(tmp, "config.json")
err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644)
c.Assert(err, checker.IsNil)
dockerCmd(c, "--config", tmp, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), privateRegistryURL)
b, err := ioutil.ReadFile(configPath)
c.Assert(err, checker.IsNil)
c.Assert(string(b), checker.Not(checker.Contains), "\"auth\":")
dockerCmd(c, "--config", tmp, "tag", "busybox", repoName)
dockerCmd(c, "--config", tmp, "push", repoName)
dockerCmd(c, "--config", tmp, "logout", privateRegistryURL)
dockerCmd(c, "--config", tmp, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), "https://"+privateRegistryURL)
dockerCmd(c, "--config", tmp, "pull", repoName)
// likewise push should work
repoName2 := fmt.Sprintf("%v/dockercli/busybox:nocreds", privateRegistryURL)
dockerCmd(c, "tag", repoName, repoName2)
dockerCmd(c, "--config", tmp, "push", repoName2)
// logout should work w scheme also because it will be stripped
dockerCmd(c, "--config", tmp, "logout", "https://"+privateRegistryURL)
}
func (s *DockerRegistryAuthHtpasswdSuite) TestPullWithExternalAuth(c *check.C) {
osPath := os.Getenv("PATH")
defer os.Setenv("PATH", osPath)
workingDir, err := os.Getwd()
c.Assert(err, checker.IsNil)
absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth"))
c.Assert(err, checker.IsNil)
testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute)
os.Setenv("PATH", testPath)
repoName := fmt.Sprintf("%v/dockercli/busybox:authtest", privateRegistryURL)
tmp, err := ioutil.TempDir("", "integration-cli-")
c.Assert(err, checker.IsNil)
externalAuthConfig := `{ "credsStore": "shell-test" }`
configPath := filepath.Join(tmp, "config.json")
err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644)
c.Assert(err, checker.IsNil)
dockerCmd(c, "--config", tmp, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), privateRegistryURL)
b, err := ioutil.ReadFile(configPath)
c.Assert(err, checker.IsNil)
c.Assert(string(b), checker.Not(checker.Contains), "\"auth\":")
dockerCmd(c, "--config", tmp, "tag", "busybox", repoName)
dockerCmd(c, "--config", tmp, "push", repoName)
dockerCmd(c, "--config", tmp, "pull", repoName)
}
// TestRunImplicitPullWithNoTag should pull implicitly only the default tag (latest)
func (s *DockerRegistrySuite) TestRunImplicitPullWithNoTag(c *check.C) {
testRequires(c, DaemonIsLinux)
repo := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL)
repoTag1 := fmt.Sprintf("%v:latest", repo)
repoTag2 := fmt.Sprintf("%v:t1", repo)
// tag the image and upload it to the private registry
dockerCmd(c, "tag", "busybox", repoTag1)
dockerCmd(c, "tag", "busybox", repoTag2)
dockerCmd(c, "push", repo)
dockerCmd(c, "rmi", repoTag1)
dockerCmd(c, "rmi", repoTag2)
out, _, err := dockerCmdWithError("run", repo)
c.Assert(err, check.IsNil)
c.Assert(out, checker.Contains, fmt.Sprintf("Unable to find image '%s:latest' locally", repo))
// There should be only one line for repo, the one with repo:latest
outImageCmd, _, err := dockerCmdWithError("images", repo)
splitOutImageCmd := strings.Split(strings.TrimSpace(outImageCmd), "\n")
c.Assert(splitOutImageCmd, checker.HasLen, 2)
}
|
[
"\"PATH\"",
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
go
| 1 | 0 | |
server/plugin/plugin.go
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package plugin
import (
"github.com/apache/servicecomb-service-center/pkg/log"
"github.com/apache/servicecomb-service-center/pkg/plugin"
"github.com/apache/servicecomb-service-center/pkg/util"
"github.com/astaxie/beego"
"sync"
)
var pluginMgr = &PluginManager{}
func init() {
pluginMgr.Initialize()
}
type wrapInstance struct {
dynamic bool
instance PluginInstance
lock sync.RWMutex
}
type PluginManager struct {
plugins map[PluginName]map[string]*Plugin
instances map[PluginName]*wrapInstance
}
func (pm *PluginManager) Initialize() {
pm.plugins = make(map[PluginName]map[string]*Plugin, int(typeEnd))
pm.instances = make(map[PluginName]*wrapInstance, int(typeEnd))
for t := PluginName(0); t != typeEnd; t++ {
pm.instances[t] = &wrapInstance{}
}
}
func (pm *PluginManager) ReloadAll() {
for pn := range pm.instances {
pm.Reload(pn)
}
}
// unsafe
func (pm *PluginManager) Register(p Plugin) {
m, ok := pm.plugins[p.PName]
if !ok {
m = make(map[string]*Plugin, 5)
}
m[p.Name] = &p
pm.plugins[p.PName] = m
log.Infof("load '%s' plugin named '%s'", p.PName, p.Name)
}
func (pm *PluginManager) Get(pn PluginName, name string) *Plugin {
m, ok := pm.plugins[pn]
if !ok {
return nil
}
return m[name]
}
func (pm *PluginManager) Instance(pn PluginName) PluginInstance {
wi := pm.instances[pn]
wi.lock.RLock()
if wi.instance != nil {
wi.lock.RUnlock()
return wi.instance
}
wi.lock.RUnlock()
wi.lock.Lock()
if wi.instance != nil {
wi.lock.Unlock()
return wi.instance
}
pm.New(pn)
wi.lock.Unlock()
return wi.instance
}
func (pm *PluginManager) New(pn PluginName) {
var (
title = STATIC
f func() PluginInstance
)
wi := pm.instances[pn]
p := pm.existDynamicPlugin(pn)
if p != nil {
wi.dynamic = true
title = DYNAMIC
f = p.New
} else {
wi.dynamic = false
m, ok := pm.plugins[pn]
if !ok {
return
}
name := beego.AppConfig.DefaultString(pn.String()+"_plugin", BUILDIN)
p, ok = m[name]
if !ok {
return
}
f = p.New
pn.ActiveConfigs().Set(keyPluginName, name)
}
log.Infof("call %s '%s' plugin %s(), new a '%s' instance",
title, p.PName, util.FuncName(f), p.Name)
wi.instance = f()
}
func (pm *PluginManager) Reload(pn PluginName) {
wi := pm.instances[pn]
wi.lock.Lock()
wi.instance = nil
pn.ClearConfigs()
wi.lock.Unlock()
}
func (pm *PluginManager) existDynamicPlugin(pn PluginName) *Plugin {
m, ok := pm.plugins[pn]
if !ok {
return nil
}
// 'buildin' implement of all plugins should call DynamicPluginFunc()
if plugin.PluginLoader().Exist(pn.String()) {
return m[BUILDIN]
}
return nil
}
func Plugins() *PluginManager {
return pluginMgr
}
func RegisterPlugin(p Plugin) {
Plugins().Register(p)
}
func LoadPlugins() {
for t := PluginName(0); t != typeEnd; t++ {
Plugins().Instance(t)
}
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
internal/state/dynamostate_integration_test.go
|
// +build integration
package state_test
import (
"context"
"fmt"
"os"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/cresta/hostname-for-target-group/internal/state"
"github.com/cresta/zapctx/testhelp/testhelp"
"github.com/stretchr/testify/require"
)
func TestDynamoDBStorage(t *testing.T) {
if os.Getenv("DYNAMODB_TABLE") == "" {
t.Skip("skipping test: expect env DYNAMODB_TABLE=<dynamo_table>")
}
ses, err := session.NewSession()
require.NoError(t, err)
st := &state.DynamoDBStorage{
TableName: os.Getenv("DYNAMODB_TABLE"),
Log: testhelp.ZapTestingLogger(t),
Client: dynamodb.New(ses),
}
testAnyStateStorage(t, st)
testAnyStateCache(t, st)
}
func testAnyStateCache(t *testing.T, store state.SyncCache) {
ctx := context.Background()
prev, err := store.GetSync(ctx, time.Now())
require.NoError(t, err)
require.Nil(t, prev)
now := time.Now()
toCache := map[state.TargetGroupARN]string{
"arn:test": "1.2.3.4",
}
err = store.StoreSync(ctx, toCache, now.Add(time.Minute))
require.NoError(t, err)
prev, err = store.GetSync(ctx, now.Add(time.Second))
require.NoError(t, err)
require.Equal(t, toCache, prev)
prev, err = store.GetSync(ctx, now.Add(time.Second*61))
require.NoError(t, err)
require.Nil(t, prev)
err = store.StoreSync(ctx, nil, now.Add(time.Minute))
require.NoError(t, err)
}
func testAnyStateStorage(t *testing.T, store state.Storage) {
ctx := context.Background()
testName := fmt.Sprintf("TestDynamoDBStorage:%s", time.Now())
sk := state.Keys{
TargetGroupARN: state.TargetGroupARN(testName),
Hostname: "www.google.com",
}
// States should start missing
out, err := store.GetStates(ctx, []state.Keys{sk})
require.NoError(t, err)
require.Empty(t, out[sk])
storedStates := []state.Target{
{
IP: "1.2.3.4",
TimesMissing: 0,
}, {
IP: "1.2.3.5",
TimesMissing: 3,
},
}
// Should be able to add a state
err = store.Store(ctx, map[state.Keys]state.State{
sk: {
Targets: storedStates,
Version: 1,
},
})
require.NoError(t, err)
// Should see the state when you fetch it out
out, err = store.GetStates(ctx, []state.Keys{sk})
require.NoError(t, err)
require.Len(t, out, 1)
require.NotEmpty(t, out[sk])
require.Equal(t, 1, out[sk].Version)
require.Len(t, out[sk].Targets, 2)
require.Equal(t, storedStates, out[sk].Targets)
// Now remove the item
err = store.Store(ctx, map[state.Keys]state.State{
sk: {},
})
require.NoError(t, err)
// And expect it gone
out, err = store.GetStates(ctx, []state.Keys{sk})
require.NoError(t, err)
require.Empty(t, out[sk])
}
|
[
"\"DYNAMODB_TABLE\"",
"\"DYNAMODB_TABLE\""
] |
[] |
[
"DYNAMODB_TABLE"
] |
[]
|
["DYNAMODB_TABLE"]
|
go
| 1 | 0 | |
util.go
|
package main
import (
"encoding/binary"
"crypto/rand"
"io/ioutil"
"math"
"os"
"path/filepath"
"strings"
)
// Uses /dev/urandom to generate random numbers. We don't
// need to recreate generated numbers, so we don't save
// a RNG state.
func randUint() uint64 {
b := make([]byte, 8)
_, err := rand.Read(b)
for err != nil {
_, err = rand.Read(b)
}
return binary.BigEndian.Uint64(b)
}
func randFloat() float64 {
return float64(randUint()) / float64(math.MaxUint64)
}
// The range returned is inclusive.
func randRange(low, high uint) uint {
f := randFloat() * float64(high - low + 1)
return uint(math.Floor(f)) + low
}
// Returns true when under the specified chance.
func chance(f float64) bool {
return randFloat() < f
}
// Returns the directory listing as full path names. The passed path
// must be absolute.
func listDirs(path string) []string {
if !filepath.IsAbs(path) {
panic("cannot list dirs on non-absolute path")
}
dirs := []string{}
files, _ := ioutil.ReadDir(path)
for _, file := range files {
isPrivate := strings.HasPrefix(file.Name(), ".")
if file.IsDir() && !isPrivate {
dirs = append(dirs, filepath.Join(path, file.Name()))
}
}
return dirs
}
// Returns true if you can descend from this path, descending is going
// down a directory, as opposed to up (`cd ..` is up). The passed path
// must be absolute
func canDescend(path string) bool {
dirs := listDirs(path)
return len(dirs) > 0
}
// Returns a random path to desend. The passed path must be absolute.
func randDescension(path string) string {
dirs := listDirs(path)
if len(dirs) == 0 {
panic("Tried to descend when unable")
}
return dirs[randRange(0, uint(len(dirs) - 1))]
}
// Returns true if you can ascend from this path. No ascending
// below the home directory. The passed path must be absolute.
func canAscend(path string) bool {
home := os.Getenv("HOME")
return strings.HasPrefix(filepath.Dir(path), home)
}
// No need to be random. You can only ascend in one direction.
func ascend(path string) string {
return filepath.Dir(path)
}
// This is the furthest we can ascend.
func baseLocation() string {
home := os.Getenv("HOME")
return home
}
// Returns true if the path provided is an ascended location from.
func isAscension(to string, from string) bool {
return strings.HasPrefix(filepath.Dir(from), to)
}
// Returns true if the path provided is a descended location from.
func isDescension(to string, from string) bool {
return strings.HasPrefix(filepath.Dir(to), from)
}
|
[
"\"HOME\"",
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
commands/server_test.go
|
// Copyright 2015 The Hugo Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package commands
import (
"fmt"
"net/http"
"os"
"runtime"
"strings"
"testing"
"time"
"github.com/gohugoio/hugo/helpers"
"github.com/spf13/viper"
"github.com/stretchr/testify/require"
)
func TestServer(t *testing.T) {
if isWindowsCI() {
// TODO(bep) not sure why server tests have started to fail on the Windows CI server.
t.Skip("Skip server test on appveyor")
}
assert := require.New(t)
dir, err := createSimpleTestSite(t, testSiteConfig{})
assert.NoError(err)
// Let us hope that this port is available on all systems ...
port := 1331
defer func() {
os.RemoveAll(dir)
}()
stop := make(chan bool)
b := newCommandsBuilder()
scmd := b.newServerCmdSignaled(stop)
cmd := scmd.getCommand()
cmd.SetArgs([]string{"-s=" + dir, fmt.Sprintf("-p=%d", port)})
go func() {
_, err = cmd.ExecuteC()
assert.NoError(err)
}()
// There is no way to know exactly when the server is ready for connections.
// We could improve by something like https://golang.org/pkg/net/http/httptest/#Server
// But for now, let us sleep and pray!
time.Sleep(2 * time.Second)
resp, err := http.Get("http://localhost:1331/")
assert.NoError(err)
defer resp.Body.Close()
homeContent := helpers.ReaderToString(resp.Body)
assert.Contains(homeContent, "List: Hugo Commands")
assert.Contains(homeContent, "Environment: development")
// Stop the server.
stop <- true
}
func TestFixURL(t *testing.T) {
type data struct {
TestName string
CLIBaseURL string
CfgBaseURL string
AppendPort bool
Port int
Result string
}
tests := []data{
{"Basic http localhost", "", "http://foo.com", true, 1313, "http://localhost:1313/"},
{"Basic https production, http localhost", "", "https://foo.com", true, 1313, "http://localhost:1313/"},
{"Basic subdir", "", "http://foo.com/bar", true, 1313, "http://localhost:1313/bar/"},
{"Basic production", "http://foo.com", "http://foo.com", false, 80, "http://foo.com/"},
{"Production subdir", "http://foo.com/bar", "http://foo.com/bar", false, 80, "http://foo.com/bar/"},
{"No http", "", "foo.com", true, 1313, "//localhost:1313/"},
{"Override configured port", "", "foo.com:2020", true, 1313, "//localhost:1313/"},
{"No http production", "foo.com", "foo.com", false, 80, "//foo.com/"},
{"No http production with port", "foo.com", "foo.com", true, 2020, "//foo.com:2020/"},
{"No config", "", "", true, 1313, "//localhost:1313/"},
}
for _, test := range tests {
t.Run(test.TestName, func(t *testing.T) {
b := newCommandsBuilder()
s := b.newServerCmd()
v := viper.New()
baseURL := test.CLIBaseURL
v.Set("baseURL", test.CfgBaseURL)
s.serverAppend = test.AppendPort
s.serverPort = test.Port
result, err := s.fixURL(v, baseURL, s.serverPort)
if err != nil {
t.Errorf("Unexpected error %s", err)
}
if result != test.Result {
t.Errorf("Expected %q, got %q", test.Result, result)
}
})
}
}
func TestRemoveErrorPrefixFromLog(t *testing.T) {
assert := require.New(t)
content := `ERROR 2018/10/07 13:11:12 Error while rendering "home": template: _default/baseof.html:4:3: executing "main" at <partial "logo" .>: error calling partial: template: partials/logo.html:5:84: executing "partials/logo.html" at <$resized.AHeight>: can't evaluate field AHeight in type *resource.Image
ERROR 2018/10/07 13:11:12 Rebuild failed: logged 1 error(s)
`
withoutError := removeErrorPrefixFromLog(content)
assert.False(strings.Contains(withoutError, "ERROR"), withoutError)
}
func isWindowsCI() bool {
return runtime.GOOS == "windows" && os.Getenv("CI") != ""
}
|
[
"\"CI\""
] |
[] |
[
"CI"
] |
[]
|
["CI"]
|
go
| 1 | 0 | |
tests/automation_framework/src/utilities/submit_request_utility.py
|
import logging
import json
import time
import os
import config.config as pconfig
import env
from avalon_sdk.connector.direct.jrpc.jrpc_worker_registry import \
JRPCWorkerRegistryImpl
from avalon_sdk.connector.direct.jrpc.jrpc_work_order import \
JRPCWorkOrderImpl
from avalon_sdk.worker.worker_details import \
WorkerType, WorkerStatus
from avalon_sdk.connector.direct.jrpc.jrpc_work_order_receipt \
import JRPCWorkOrderReceiptImpl
from avalon_sdk.connector.blockchains.fabric.fabric_worker_registry \
import FabricWorkerRegistryImpl
from avalon_sdk.connector.blockchains.fabric.fabric_work_order \
import FabricWorkOrderImpl
from avalon_sdk.connector.blockchains.ethereum.ethereum_worker_registry \
import EthereumWorkerRegistryImpl
from avalon_sdk.connector.blockchains.ethereum.ethereum_work_order \
import EthereumWorkOrderProxyImpl
import avalon_sdk.worker.worker_details as worker_details
logger = logging.getLogger(__name__)
TCFHOME = os.environ.get("TCF_HOME", "../../")
def config_file_read():
config = pconfig.parse_configuration_files(
env.conffiles, env.confpaths)
logger.info(" URI client %s \n", config["tcf"]["json_rpc_uri"])
config["tcf"]["json_rpc_uri"] = env.uri_client_sdk
return config
def _create_worker_registry_instance(blockchain_type, config):
# create worker registry instance for direct/proxy model
if env.proxy_mode and blockchain_type == 'fabric':
return FabricWorkerRegistryImpl(config)
elif env.proxy_mode and blockchain_type == 'ethereum':
return EthereumWorkerRegistryImpl(config)
else:
logger.info("Direct SDK code path\n")
return JRPCWorkerRegistryImpl(config)
def _create_work_order_instance(blockchain_type, config):
# create work order instance for direct/proxy model
if env.proxy_mode and blockchain_type == 'fabric':
return FabricWorkOrderImpl(config)
elif env.proxy_mode and blockchain_type == 'ethereum':
return EthereumWorkOrderProxyImpl(config)
else:
logger.info("Direct SDK code path\n")
return JRPCWorkOrderImpl(config)
def _create_work_order_receipt_instance(blockchain_type, config):
# create work order receipt instance for direct/proxy model
if env.proxy_mode and blockchain_type == 'fabric':
return None
elif env.proxy_mode and blockchain_type == 'ethereum':
# TODO need to implement
return None
else:
logger.info("Direct SDK code path\n")
return JRPCWorkOrderReceiptImpl(config)
def submit_request_listener(
uri_client, input_json_str, output_json_file_name):
logger.info("Listener code path\n")
req_time = time.strftime("%Y%m%d_%H%M%S")
request_method = input_json_str["method"]
input_json_str = json.dumps(input_json_str)
# write request to file
signed_input_file = ('./results/' + output_json_file_name + '_' + req_time
+ '_request.json')
with open(signed_input_file, 'w') as req_file:
req_file.write(json.dumps(input_json_str, ensure_ascii=False))
logger.info("in submit listener %s", input_json_str)
if request_method == "WorkOrderGetResult":
logger.info("- Validating WorkOrderGetResult Response-")
response = {}
response_timeout_start = time.time()
response_timeout_multiplier = ((6000 / 3600) + 6) * 3
while "result" not in response:
if "error" in response:
if response["error"]["code"] != 5:
logger.info('WorkOrderGetResult - '
'Response received with error code. ')
err_cd = 1
break
response_timeout_end = time.time()
if ((response_timeout_end - response_timeout_start) >
response_timeout_multiplier):
logger.info('ERROR: WorkOrderGetResult response is not \
received within expected time.')
break
response = uri_client._postmsg(input_json_str)
else:
logger.info('**********Received Request*********\n%s\n', input_json_str)
response = uri_client._postmsg(input_json_str)
logger.info('**********Received Response*********\n%s\n', response)
# write response to file
response_output_file = ('./results/' + output_json_file_name + '_'
+ req_time + '_response.json')
with open(response_output_file, 'w') as resp_file:
resp_file.write(json.dumps(response, ensure_ascii=False))
return response
def workorder_submit_sdk(wo_params, input_json_obj=None):
logger.info("WorkOrderSubmit SDK code path\n")
if input_json_obj is None:
req_id = 3
else:
req_id = input_json_obj["id"]
config = config_file_read()
work_order = _create_work_order_instance(env.blockchain_type, config)
logger.info(" work order id %s \n", wo_params.get_work_order_id())
logger.info(" worker id %s \n", wo_params.get_worker_id())
logger.info(" Requester ID %s \n", wo_params.get_requester_id())
logger.info(" To string %s \n", wo_params.to_string())
logger.info(" worker id %s \n", wo_params.get_worker_id())
logger.info("Work order submit request : %s, \n \n ",
wo_params.to_jrpc_string(req_id))
response = work_order.work_order_submit(
wo_params.get_work_order_id(),
wo_params.get_worker_id(),
wo_params.get_requester_id(),
wo_params.to_string(),
id=req_id
)
if env.proxy_mode and (type(response) != dict):
if response.value == 0:
response = {"error": {"code": 5}}
else:
response = {"error": {"code": response.value}}
response["workOrderId"] = wo_params.get_work_order_id()
logger.info('**********Received Response*********\n%s\n', response)
return response
def worker_lookup_sdk(worker_type, input_json=None):
logger.info("WorkerLookUp SDK code path\n")
if input_json is None:
jrpc_req_id = 3
else:
jrpc_req_id = input_json["id"]
config = config_file_read()
worker_dict = {'SGX': WorkerType.TEE_SGX,
'MPC': WorkerType.MPC, 'ZK': WorkerType.ZK}
worker_registry = _create_worker_registry_instance(env.blockchain_type, config)
if env.blockchain_type == "ethereum":
if worker_type in worker_dict.keys():
worker = WorkerType.TEE_SGX
else:
worker = worker_type
worker_lookup_response = worker_registry.worker_lookup(
worker,
config["WorkerConfig"]["OrganizationId"],
config["WorkerConfig"]["ApplicationTypeId"],
jrpc_req_id
)
else:
worker_lookup_response = worker_registry.worker_lookup(
worker_type=worker_dict.get(worker_type, worker_type), id=jrpc_req_id)
logger.info("\n Worker lookup response: {}\n".format(
json.dumps(worker_lookup_response, indent=4)
))
return worker_lookup_response
def worker_register_sdk(register_params, input_json):
logger.info("WorkerRegister SDK code path\n")
jrpc_req_id = input_json["id"]
if input_json is None:
jrpc_req_id = 3
else:
jrpc_req_id = input_json["id"]
worker_dict = {'SGX': WorkerType.TEE_SGX,
'MPC': WorkerType.MPC, 'ZK': WorkerType.ZK}
config = config_file_read()
worker_registry = _create_worker_registry_instance(env.blockchain_type, config)
if env.proxy_mode and (env.blockchain_type == "ethereum"):
worker_register_result = worker_registry.worker_register(
register_params["worker_id"],
worker_dict[register_params["workerType"]],
register_params["organization_id"],
register_params["application_type_id"],
json.dumps(register_params["details"]))
else:
worker_register_result = worker_registry.worker_register(
register_params["worker_id"],
worker_dict[register_params["workerType"]],
register_params["organization_id"],
register_params["application_type_id"],
json.dumps(register_params["details"]), jrpc_req_id)
logger.info("\n Worker register response: {}\n".format(
json.dumps(worker_register_result, indent=4)))
return worker_register_result
def worker_setstatus_sdk(set_status_params, input_json):
logger.info("WorkerSetStatus SDK code path\n")
logger.info("Worker status params %s \n", set_status_params)
if input_json is None:
jrpc_req_id = 3
else:
jrpc_req_id = input_json["id"]
status_dict = {1: WorkerStatus.ACTIVE, 2: WorkerStatus.OFF_LINE,
3: WorkerStatus.DECOMMISSIONED,
4: WorkerStatus.COMPROMISED}
config = config_file_read()
worker_registry = _create_worker_registry_instance(env.blockchain_type, config)
if env.proxy_mode and (env.blockchain_type == "ethereum"):
worker_setstatus_result = worker_registry.worker_set_status(
set_status_params["worker_id"],
status_dict[set_status_params["status"]])
else:
worker_setstatus_result = worker_registry.worker_set_status(
set_status_params["worker_id"],
status_dict[set_status_params["status"]], jrpc_req_id)
if env.proxy_mode:
result = worker_setstatus_result
worker_setstatus_result = {}
worker_setstatus_result["error"] = {"code" : result.value, "message" : ""}
logger.info("\n Worker setstatus response: {}\n".format(worker_setstatus_result))
return worker_setstatus_result
def worker_retrieve_sdk(worker_id, input_json=None):
logger.info("WorkerRetrieve SDK code path\n")
worker_obj = worker_details.SGXWorkerDetails()
if input_json is None:
jrpc_req_id = 11
else:
jrpc_req_id = input_json["id"]
config = config_file_read()
worker_registry = _create_worker_registry_instance(env.blockchain_type, config)
worker_retrieve_result = worker_registry.worker_retrieve(worker_id, jrpc_req_id)
if env.proxy_mode:
if worker_retrieve_result is None:
worker_retrieve_result = {"error": {"code": '', "message": "Worker Id not found"}}
else:
response = worker_retrieve_result
worker_obj.load_worker(json.loads(response[4]))
worker_retrieve_result = {}
result = {"workerType": response[1],
"organizationId": response[2],
"applicationTypeId": response[3],
"details": json.loads(response[4])}
worker_retrieve_result["result"] = result
if "error" in worker_retrieve_result:
logger.error("Unable to retrieve worker details\n")
return worker_retrieve_result
logger.info("\n Worker retrieve response: {}\n".format(worker_retrieve_result))
worker_obj.worker_id = worker_id
worker_retrieve_result["workerId"] = worker_id
logger.info("\n Worker ID\n%s\n", worker_id)
return worker_retrieve_result
def worker_update_sdk(update_params, input_json=None):
logger.info("WorkerUpdate SDK code path\n")
logger.info("Worker update params %s \n", update_params)
worker_obj = worker_details.SGXWorkerDetails()
# update_params = json.loads(update_params)
if input_json is None:
jrpc_req_id = 11
else:
jrpc_req_id = input_json["id"]
config = config_file_read()
worker_registry = _create_worker_registry_instance(env.blockchain_type, config)
if env.proxy_mode and (env.blockchain_type == "ethereum"):
worker_update_result = worker_registry.worker_update(
update_params["worker_id"],
json.dumps(update_params["details"]))
else:
worker_update_result = worker_registry.worker_update(
update_params["worker_id"],
json.dumps(update_params["details"]), jrpc_req_id)
if env.proxy_mode and (type(worker_update_result) != dict):
response = worker_update_result.value
worker_update_result = {"error": {"code": response, "message" : ""}}
logger.info("\n Worker update response: {}\n".format(worker_update_result))
return worker_update_result
def workorder_receiptcreate_sdk(wo_create_receipt, input_json):
logger.info("WorkerReceiptCreate SDK code path\n")
jrpc_req_id = input_json["id"]
config = config_file_read()
# Create receipt
wo_receipt = _create_work_order_receipt_instance(env.blockchain_type, config)
# Submit work order create receipt jrpc request
wo_receipt_resp = wo_receipt.work_order_receipt_create(
wo_create_receipt["workOrderId"],
wo_create_receipt["workerServiceId"],
wo_create_receipt["workerId"],
wo_create_receipt["requesterId"],
wo_create_receipt["receiptCreateStatus"],
wo_create_receipt["workOrderRequestHash"],
wo_create_receipt["requesterGeneratedNonce"],
wo_create_receipt["requesterSignature"],
wo_create_receipt["signatureRules"],
wo_create_receipt["receiptVerificationKey"],
jrpc_req_id
)
logger.info("Work order create receipt response : {} \n \n ".format(
wo_receipt_resp
))
return wo_receipt_resp
def workorder_receiptretrieve_sdk(workorderId, input_json):
logger.info("ReceiptRetrieve SDK code path\n")
jrpc_req_id = input_json["id"]
config = config_file_read()
# Create receipt
wo_receipt = _create_work_order_receipt_instance(env.blockchain_type, config)
wo_receipt_resp = wo_receipt.work_order_receipt_retrieve(
workorderId, jrpc_req_id)
logger.info("Work order retrieve receipt response : {} \n \n ".format(
wo_receipt_resp
))
# Retrieve last update to receipt by passing 0xFFFFFFFF
jrpc_req_id += 1
receipt_update_retrieve = \
wo_receipt.work_order_receipt_update_retrieve(
workorderId,
None,
1 << 32,
id=jrpc_req_id)
logger.info("\n Last update to receipt receipt is:\n {}".format(
json.dumps(receipt_update_retrieve, indent=4)
))
return receipt_update_retrieve
def workorder_getresult_sdk(workorderId, input_json):
logger.info("WorkOderGetResult SDK code path\n")
jrpc_req_id = input_json["id"]
config = config_file_read()
work_order = _create_work_order_instance(env.blockchain_type, config)
logger.info("----- Validating WorkOrderGetResult Response ------")
get_result_res = work_order.work_order_get_result(
workorderId, jrpc_req_id)
logger.info("****** WorkOrderGetResult Received Response*****\n%s\n", get_result_res)
if env.proxy_mode and (get_result_res is None):
get_result_res = {"error": {"code": -1}}
return get_result_res
def workorder_receiptlookup_sdk(requesterId, input_json):
logger.info("ReceiptRetrieve SDK code path\n")
jrpc_req_id = input_json["id"]
config = config_file_read()
wo_receipt = _create_work_order_receipt_instance(env.blockchain_type, config)
wo_receipt_resp = wo_receipt.work_order_receipt_lookup(
requester_id=requesterId, id=jrpc_req_id)
logger.info("Work order receipt lookup response : {} \n \n ".format(
wo_receipt_resp
))
return wo_receipt_resp
|
[] |
[] |
[
"TCF_HOME"
] |
[]
|
["TCF_HOME"]
|
python
| 1 | 0 | |
CasStereoNet/main.py
|
from __future__ import print_function, division
import argparse
import os, sys
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
from tensorboardX import SummaryWriter
from datasets.messytable_dataset import MessytableDataset
from models import __models__, __loss__
from utils import *
from utils.metrics import compute_err_metric
from utils.warp_ops import apply_disparity_cu
from utils.messytable_dataset_config import cfg
import gc
cudnn.benchmark = True
assert torch.backends.cudnn.enabled, "Amp requires cudnn backend to be enabled."
parser = argparse.ArgumentParser(description='Cascade Stereo Network (CasStereoNet)')
# Model parameters
parser.add_argument('--model', default='gwcnet-c', help='select a model structure', choices=__models__.keys())
parser.add_argument('--grad_method', type=str, default="detach", choices=["detach", "undetach"],
help='predicted disp detach, undetach')
parser.add_argument('--save_freq', type=int, default=1, help='the frequency of saving checkpoint')
parser.add_argument('--logdir', required=True, help='the directory to save logs and checkpoints')
parser.add_argument('--loadckpt', help='load the weights from a specific checkpoint')
parser.add_argument('--resume', action='store_true', help='continue training the model')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
# Apex and distributed training configuration
parser.add_argument("--local_rank", type=int, default=0, help='rank of device in distributed training')
parser.add_argument('--using_apex', action='store_true', help='using apex, need to install apex')
parser.add_argument('--sync_bn', action='store_true',help='enabling apex sync BN.')
parser.add_argument('--opt-level', type=str, default="O0")
parser.add_argument('--keep-batchnorm-fp32', type=str, default=None)
parser.add_argument('--loss-scale', type=str, default=None)
# Messytable dataset configuration
parser.add_argument('--config-file', type=str, default='./CasStereoNet/configs/local_train_config.yaml',
metavar='FILE', help='Config files')
parser.add_argument('--color-jitter', action='store_true', help='whether apply color jitter in data augmentation')
parser.add_argument('--gaussian-blur', action='store_true', help='whether apply gaussian blur in data augmentation')
parser.add_argument('--debug', action='store_true', help='whether run in debug mode')
parser.add_argument('--warp-op', action='store_true', help='whether use warp_op function to get disparity')
args = parser.parse_args()
cfg.merge_from_file(args.config_file)
os.makedirs(args.logdir, exist_ok=True)
# Use sync_bn by using nvidia-apex, need to install apex.
if args.sync_bn:
assert args.using_apex, "must set using apex and install nvidia-apex"
if args.using_apex:
try:
from apex.parallel import DistributedDataParallel as DDP
from apex.fp16_utils import *
from apex import amp, optimizers
from apex.multi_tensor_apply import multi_tensor_applier
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this example.")
# Distributed training
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
is_distributed = num_gpus > 1
args.is_distributed = is_distributed
if is_distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(
backend="nccl", init_method="env://"
)
synchronize()
# Set seed
set_random_seed(args.seed)
# Create summary logger and print args
if (not is_distributed) or (dist.get_rank() == 0):
print("argv:", sys.argv[1:])
print_args(args)
print(f'Runing with configs : \n {cfg}')
print("creating new summary file")
logger = SummaryWriter(args.logdir)
# Create model and model_loss
model = __models__[args.model](
maxdisp=cfg.ARGS.MAX_DISP,
ndisps=[int(nd) for nd in cfg.ARGS.NDISP],
disp_interval_pixel=[float(d_i) for d_i in cfg.ARGS.DISP_INTER_R],
cr_base_chs=[int(ch) for ch in cfg.ARGS.CR_BASE_CHS],
grad_method=args.grad_method,
using_ns=cfg.ARGS.USING_NS,
ns_size=cfg.ARGS.NS_SIZE
)
if args.sync_bn:
import apex
print("using apex synced BN")
model = apex.parallel.convert_syncbn_model(model)
model_loss = __loss__[args.model]
model.cuda()
if dist.get_rank() == 0:
print('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))
# Optimizer
optimizer = optim.Adam(model.parameters(), lr=cfg.SOLVER.LR, betas=(0.9, 0.999))
# Load parameters if ckpt is provided
start_epoch = 0
if args.resume:
# find all checkpoints file and sort according to epoch id
all_saved_ckpts = [fn for fn in os.listdir(args.logdir) if (fn.endswith(".ckpt") and not fn.endswith("best.ckpt"))]
all_saved_ckpts = sorted(all_saved_ckpts, key=lambda x: int(x.split('_')[-1].split('.')[0]))
# use the latest checkpoint file
loadckpt = os.path.join(args.logdir, all_saved_ckpts[-1])
print("loading the lastest model in logdir: {}".format(loadckpt))
state_dict = torch.load(loadckpt, map_location=torch.device("cpu"))
model.load_state_dict(state_dict['model'])
optimizer.load_state_dict(state_dict['optimizer'])
start_epoch = state_dict['epoch'] + 1
elif args.loadckpt:
# load the checkpoint file specified by args.loadckpt
print("loading model {}".format(args.loadckpt))
state_dict = torch.load(args.loadckpt, map_location=torch.device("cpu"))
model.load_state_dict(state_dict['model'])
if dist.get_rank() == 0:
print("start at epoch {}".format(start_epoch))
# Initialize Amp
if args.using_apex:
model, optimizer = amp.initialize(model, optimizer,
opt_level=args.opt_level,
keep_batchnorm_fp32=args.keep_batchnorm_fp32,
loss_scale=args.loss_scale
)
# Enable Multiprocess training
if is_distributed:
print("Dist Train, Let's use", torch.cuda.device_count(), "GPUs!")
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank,
# find_unused_parameters=False,
# this should be removed if we update BatchNorm stats
# broadcast_buffers=False,
)
else:
if torch.cuda.is_available():
print("Let's use", torch.cuda.device_count(), "GPUs!")
model = nn.DataParallel(model)
# Dataset, dataloader
train_dataset = MessytableDataset(cfg.SPLIT.TRAIN, args.gaussian_blur, args.color_jitter, args.debug, sub=100)
val_dataset = MessytableDataset(cfg.SPLIT.VAL, args.gaussian_blur, args.color_jitter, args.debug, sub=100)
if is_distributed:
train_sampler = torch.utils.data.DistributedSampler(train_dataset, num_replicas=dist.get_world_size(),
rank=dist.get_rank())
val_sampler = torch.utils.data.DistributedSampler(val_dataset, num_replicas=dist.get_world_size(),
rank=dist.get_rank())
TrainImgLoader = torch.utils.data.DataLoader(train_dataset, cfg.SOLVER.BATCH_SIZE, sampler=train_sampler,
num_workers=cfg.SOLVER.NUM_WORKER, drop_last=True, pin_memory=True)
ValImgLoader = torch.utils.data.DataLoader(val_dataset, cfg.SOLVER.BATCH_SIZE, sampler=val_sampler,
num_workers=cfg.SOLVER.NUM_WORKER, drop_last=False, pin_memory=True)
else:
TrainImgLoader = torch.utils.data.DataLoader(train_dataset, batch_size=cfg.SOLVER.BATCH_SIZE,
shuffle=True, num_workers=cfg.SOLVER.NUM_WORKER, drop_last=True)
ValImgLoader = torch.utils.data.DataLoader(val_dataset, batch_size=cfg.SOLVER.BATCH_SIZE,
shuffle=False, num_workers=cfg.SOLVER.NUM_WORKER, drop_last=False)
num_stage = len([int(nd) for nd in cfg.ARGS.NDISP])
def train():
Cur_err = np.inf
for epoch_idx in range(start_epoch, cfg.SOLVER.EPOCHS):
adjust_learning_rate(optimizer, epoch_idx, cfg.SOLVER.LR, cfg.SOLVER.LR_EPOCHS)
# Training
avg_train_scalars = AverageMeterDict()
for batch_idx, sample in enumerate(TrainImgLoader):
loss, scalar_outputs = train_sample(sample)
if (not is_distributed) or (dist.get_rank() == 0):
avg_train_scalars.update(scalar_outputs)
# Calculate average error in the main process
if (not is_distributed) or (dist.get_rank() == 0):
# Get average results among all batches
total_err_metrics = avg_train_scalars.mean()
print(f'Epoch {epoch_idx} train total_err_metrics: {total_err_metrics}')
# Add lr to dict and save results to tensorboard
total_err_metrics.update({'lr': optimizer.param_groups[0]['lr']})
save_scalars(logger, 'train', total_err_metrics, epoch_idx)
# Save checkpoints
if (epoch_idx + 1) % args.save_freq == 0:
if (not is_distributed) or (dist.get_rank() == 0):
checkpoint_data = {'epoch': epoch_idx, 'model': model.module.state_dict(), 'optimizer': optimizer.state_dict()}
save_filename = "{}/checkpoint_{:0>6}.ckpt".format(args.logdir, epoch_idx)
torch.save(checkpoint_data, save_filename)
gc.collect()
# Validation
avg_test_scalars = AverageMeterDict()
for batch_idx, sample in enumerate(ValImgLoader):
loss, scalar_outputs = test_sample(sample)
if (not is_distributed) or (dist.get_rank() == 0):
avg_test_scalars.update(scalar_outputs)
# Calculate average error and save checkpoint in the main process
if (not is_distributed) or (dist.get_rank() == 0):
# Get average results among all batches
total_err_metrics = avg_test_scalars.mean()
print(f'Epoch {epoch_idx} val total_err_metrics: {total_err_metrics}')
save_scalars(logger, 'val', total_err_metrics, epoch_idx)
# Save best checkpoints
if (not is_distributed) or (dist.get_rank() == 0):
New_err = total_err_metrics["depth_abs_err"][0]
if New_err < Cur_err:
Cur_err = New_err
checkpoint_data = {'epoch': epoch_idx, 'model': model.module.state_dict(),
'optimizer': optimizer.state_dict()}
save_filename = "{}/checkpoint_best.ckpt".format(args.logdir)
torch.save(checkpoint_data, save_filename)
print("Best Checkpoint epoch_idx:{}".format(epoch_idx))
gc.collect()
# train one sample
def train_sample(sample):
model.train()
# Load data
imgL = sample['img_L'].cuda()
imgR = sample['img_R'].cuda()
disp_gt = sample['img_disp_l'].cuda()
depth_gt = sample['img_depth_l'].cuda() # [bs, 1, H, W]
img_focal_length = sample['focal_length'].cuda()
img_baseline = sample['baseline'].cuda()
if args.warp_op:
img_disp_r = sample['img_disp_r'].cuda()
disp_gt = apply_disparity_cu(img_disp_r, img_disp_r.type(torch.int)) # [bs, 1, H, W]
del img_disp_r
# Resize the 2x resolution disp and depth back to 256 * 512
# Note: This step should go after the apply_disparity_cu
disp_gt = F.interpolate(disp_gt, (256, 512)).squeeze(1) # [bs, H, W]
depth_gt = F.interpolate(depth_gt, (256, 512)) # [bs, 1, H, W]
optimizer.zero_grad()
outputs = model(imgL, imgR)
mask = (disp_gt < cfg.ARGS.MAX_DISP) * (disp_gt > 0) # Note in training we do not exclude bg
loss = model_loss(outputs, disp_gt, mask, dlossw=[float(e) for e in cfg.ARGS.DLOSSW])
outputs_stage = outputs["stage{}".format(num_stage)]
disp_pred = outputs_stage['pred'] # [bs, H, W]
del outputs
# Compute error metrics
scalar_outputs = {"loss": loss}
err_metrics = compute_err_metric(disp_gt.unsqueeze(1),
depth_gt,
disp_pred.unsqueeze(1),
img_focal_length,
img_baseline,
mask.unsqueeze(1))
scalar_outputs.update(err_metrics)
if is_distributed and args.using_apex:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
if is_distributed:
scalar_outputs = reduce_scalar_outputs(scalar_outputs)
return tensor2float(scalar_outputs["loss"]), tensor2float(scalar_outputs)
# test one sample
@make_nograd_func
def test_sample(sample):
if is_distributed:
model_eval = model.module
else:
model_eval = model
model_eval.eval()
imgL = sample['img_L'].cuda()
imgR = sample['img_R'].cuda()
disp_gt = sample['img_disp_l'].cuda()
depth_gt = sample['img_depth_l'].cuda() # [bs, 1, H, W]
img_focal_length = sample['focal_length'].cuda()
img_baseline = sample['baseline'].cuda()
if args.warp_op:
img_disp_r = sample['img_disp_r'].cuda()
disp_gt = apply_disparity_cu(img_disp_r, img_disp_r.type(torch.int)) # [bs, 1, H, W]
del img_disp_r
disp_gt = F.interpolate(disp_gt, (256, 512)).squeeze(1) # [bs, H, W]
depth_gt = F.interpolate(depth_gt, (256, 512))
outputs = model_eval(imgL, imgR)
mask = (disp_gt < cfg.ARGS.MAX_DISP) * (disp_gt > 0)
loss = torch.tensor(0, dtype=imgL.dtype, device=imgL.device, requires_grad=False)
# loss = model_loss(outputs, disp_gt, mask, dlossw=[float(e) for e in cfg.ARGS.DLOSSW])
outputs_stage = outputs["stage{}".format(num_stage)]
disp_pred = outputs_stage["pred"]
# Compute error metrics
scalar_outputs = {"loss": loss}
err_metrics = compute_err_metric(disp_gt.unsqueeze(1),
depth_gt,
disp_pred.unsqueeze(1),
img_focal_length,
img_baseline,
mask.unsqueeze(1))
scalar_outputs.update(err_metrics)
if is_distributed:
scalar_outputs = reduce_scalar_outputs(scalar_outputs)
return tensor2float(scalar_outputs["loss"]), tensor2float(scalar_outputs)
if __name__ == '__main__':
train()
|
[] |
[] |
[
"WORLD_SIZE"
] |
[]
|
["WORLD_SIZE"]
|
python
| 1 | 0 | |
plugins/commands.py
|
import os
import math
import json
import time
import shutil
import heroku3
import requests
from pyrogram import filters
from pyrogram import Client as trojanz
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
if bool(os.environ.get("WEBHOOK", False)):
from sample_config import Config
else:
from config import Config
from script import Script
from plugins.helpers import humanbytes
from database.filters_mdb import filter_stats
from database.users_mdb import add_user, find_user, all_users
@trojanz.on_message(filters.command('id') & (filters.private | filters.group))
async def showid(client, message):
chat_type = message.chat.type
if chat_type == "private":
user_id = message.chat.id
await message.reply_text(
f"Your ID : `{user_id}`",
parse_mode="md",
quote=True
)
elif (chat_type == "group") or (chat_type == "supergroup"):
user_id = message.from_user.id
chat_id = message.chat.id
if message.reply_to_message:
reply_id = f"Replied User ID : `{message.reply_to_message.from_user.id}`"
else:
reply_id = ""
await message.reply_text(
f"Your ID : `{user_id}`\nThis Group ID : `{chat_id}`\n\n{reply_id}",
parse_mode="md",
quote=True
)
@trojanz.on_message(filters.command('info') & (filters.private | filters.group))
async def showinfo(client, message):
try:
cmd, id = message.text.split(" ", 1)
except:
id = False
pass
if id:
if (len(id) == 10 or len(id) == 9):
try:
checkid = int(id)
except:
await message.reply_text("__Enter a valid USER ID__", quote=True, parse_mode="md")
return
else:
await message.reply_text("__Enter a valid USER ID__", quote=True, parse_mode="md")
return
if Config.SAVE_USER == "yes":
name, username, dcid = await find_user(str(id))
else:
try:
user = await client.get_users(int(id))
name = str(user.first_name + (user.last_name or ""))
username = user.username
dcid = user.dc_id
except:
name = False
pass
if not name:
await message.reply_text("__USER Details not found!!__", quote=True, parse_mode="md")
return
else:
if message.reply_to_message:
name = str(message.reply_to_message.from_user.first_name\
+ (message.reply_to_message.from_user.last_name or ""))
id = message.reply_to_message.from_user.id
username = message.reply_to_message.from_user.username
dcid = message.reply_to_message.from_user.dc_id
else:
name = str(message.from_user.first_name\
+ (message.from_user.last_name or ""))
id = message.from_user.id
username = message.from_user.username
dcid = message.from_user.dc_id
if not str(username) == "None":
user_name = f"@{username}"
else:
user_name = "none"
await message.reply_text(
f"<b>Name</b> : {name}\n\n"
f"<b>User ID</b> : <code>{id}</code>\n\n"
f"<b>Username</b> : {user_name}\n\n"
f"<b>Permanant USER link</b> : <a href='tg://user?id={id}'>Click here!</a>\n\n"
f"<b>DC ID</b> : {dcid}\n\n",
quote=True,
parse_mode="html"
)
@trojanz.on_message((filters.private | filters.group) & filters.command('status'))
async def bot_status(client,message):
if str(message.from_user.id) not in Config.AUTH_USERS:
return
chats, filters = await filter_stats()
if Config.SAVE_USER == "yes":
users = await all_users()
userstats = f"> __**{users} users have interacted with your bot!**__\n\n"
else:
userstats = ""
if Config.HEROKU_API_KEY:
try:
server = heroku3.from_key(Config.HEROKU_API_KEY)
user_agent = (
'Mozilla/5.0 (Linux; Android 10; SM-G975F) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/80.0.3987.149 Mobile Safari/537.36'
)
accountid = server.account().id
headers = {
'User-Agent': user_agent,
'Authorization': f'Bearer {Config.HEROKU_API_KEY}',
'Accept': 'application/vnd.heroku+json; version=3.account-quotas',
}
path = "/accounts/" + accountid + "/actions/get-quota"
request = requests.get("https://api.heroku.com" + path, headers=headers)
if request.status_code == 200:
result = request.json()
total_quota = result['account_quota']
quota_used = result['quota_used']
quota_left = total_quota - quota_used
total = math.floor(total_quota/3600)
used = math.floor(quota_used/3600)
hours = math.floor(quota_left/3600)
minutes = math.floor(quota_left/60 % 60)
days = math.floor(hours/24)
usedperc = math.floor(quota_used / total_quota * 100)
leftperc = math.floor(quota_left / total_quota * 100)
quota_details = f"""
**Heroku Account Status**
> __You have **{total} hours** of free dyno quota available each month.__
> __Dyno hours used this month__ ;
- **{used} hours** ( {usedperc}% )
> __Dyno hours remaining this month__ ;
- **{hours} hours** ( {leftperc}% )
- **Approximately {days} days!**
"""
else:
quota_details = ""
except:
print("Check your Heroku API key")
quota_details = ""
else:
quota_details = ""
uptime = time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - Config.BOT_START_TIME))
try:
t, u, f = shutil.disk_usage(".")
total = humanbytes(t)
used = humanbytes(u)
free = humanbytes(f)
disk = "\n**Disk Details**\n\n" \
f"> USED : {used} / {total}\n" \
f"> FREE : {free}\n\n"
except:
disk = ""
await message.reply_text(
"**Current status of your bot!**\n\n"
f"> __**{filters}** filters across **{chats}** chats__\n\n"
f"{userstats}"
f"> __BOT Uptime__ : **{uptime}**\n\n"
f"{quota_details}"
f"{disk}",
quote=True,
parse_mode="md"
)
@trojanz.on_message(filters.command('start') & filters.private)
async def start(client, message):
await message.reply_text(
text=Script.START_MSG.format(message.from_user.mention),
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("Command Help", callback_data="help_data")
]
]
),
reply_to_message_id=message.message_id
)
if Config.SAVE_USER == "yes":
try:
await add_user(
str(message.from_user.id),
str(message.from_user.username),
str(message.from_user.first_name + " " + (message.from_user.last_name or "")),
str(message.from_user.dc_id)
)
except:
pass
@trojanz.on_message(filters.command('help') & filters.private)
async def help(client, message):
await message.reply_text(
text=Script.HELP_MSG,
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton("How to Deploy?", url="https://t.me/TAMIL_HD_NEW_MOVIES"),
InlineKeyboardButton("About Me", callback_data="about_data")
],
[
InlineKeyboardButton("BOT Channel", url="https://t.me/TAMIL_HD_NEW_MOVIES"),
InlineKeyboardButton("Support Group", url="https://t.me/TAMIL_HD_NEW_MOVIES")
]
]
),
reply_to_message_id=message.message_id
)
@trojanz.on_message(filters.command('about') & filters.private)
async def about(client, message):
await message.reply_text(
text=Script.ABOUT_MSG,
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
"SOURCE CODE", url="https://github.com/skdevi/Unlimited-Filter-Bot")
],
[
InlineKeyboardButton("BACK", callback_data="help_data"),
InlineKeyboardButton("CLOSE", callback_data="close_data"),
]
]
),
reply_to_message_id=message.message_id
)
|
[] |
[] |
[
"WEBHOOK"
] |
[]
|
["WEBHOOK"]
|
python
| 1 | 0 | |
src/setup.py
|
import os
from codecs import open
from distutils.command.build import build
from os import path
from setuptools import find_packages, setup
from pretalx import __version__ as pretalx_version
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
try:
with open(path.join(here, '../README.rst'), encoding='utf-8') as f:
long_description = f.read()
except: # noqa
long_description = ''
class CustomBuild(build):
def run(self):
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pretalx.settings")
import django
django.setup()
from django.conf import settings
from django.core import management
settings.COMPRESS_ENABLED = True
settings.COMPRESS_OFFLINE = True
management.call_command('compilemessages', verbosity=1)
management.call_command('collectstatic', verbosity=1, interactive=False)
management.call_command('compress', verbosity=1)
build.run(self)
cmdclass = {
'build': CustomBuild
}
setup(
name='pretalx',
version=pretalx_version,
description='Conference organization: CfPs, scheduling, much more',
long_description=long_description,
url='https://pretalx.org',
author='Tobias Kunze',
author_email='[email protected]',
license='Apache License 2.0',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Other Audience',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Environment :: Web Environment',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Framework :: Django :: 2.0'
],
keywords='conference cfp event barcamp',
install_requires=[
'bleach>=2.1.2,==2.1.*',
'celery==4.1.*',
'csscompressor==0.9.*',
'cssutils==1.0.*',
'Django==2.0.*',
'django-bakery==0.11.*',
'django-bootstrap4==0.0.4',
'django-compressor==2.2.*',
'django-csp==3.3.*',
'django-filter==1.1.*',
'django-formset-js-improved==0.5.0.1',
'django-formtools==2.1.*',
'django-hierarkey==1.0.*',
'django-i18nfield==1.3.*',
'django-libsass==0.7',
'djangorestframework==3.7.*',
'inlinestyler==0.2.*',
'libsass<=0.13.1', # May be removed once https://github.com/dahlia/libsass-python/pull/227 is merged
'Markdown==2.6.*',
'pytz',
'reportlab==3.4.*',
'requests',
'rules==1.3.*',
'urlman==1.2.*',
'vobject==0.9.*',
'whitenoise==3.3.*',
'zxcvbn-python==4.4.*',
],
extras_require={
'dev': [
'beautifulsoup4',
'isort',
'lxml',
'pylama',
'pytest',
'pytest-cov',
'pytest-django',
'pytest-mock',
],
'mysql': ['mysqlclient'],
'postgres': ['psycopg2'],
},
packages=find_packages(exclude=['tests', 'tests.*']),
include_package_data=True,
cmdclass=cmdclass,
)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
example.py
|
#!/usr/bin/env python3
import cbthelper as cbt
from selenium import webdriver
import sys, os
def main():
# set username and auth key for api requests
username = os.environ['CBT_USERNAME'] or ''
authkey = os.environ['CBT_AUTHKEY'] or ''
if username == '' or authkey == '':
print("Don't forget to set username and authkey in main.py")
sys.exit(1)
cbt.login(username, authkey)
# build caps using best match of what customer wants
# does not require exact platform or browser name
caps = cbt.getCapsBuilder() \
.withPlatform('windows 10') \
.withBrowser('Google Chrome 65') \
.withResolution(1024, 768) \
.withName('cbthelper test') \
.withBuild('0.0.1') \
.withRecordNetwork(false) \
.build()
print(caps)
driver = webdriver.Remote(desired_capabilities = caps, command_executor = cbt.hub)
driver.implicitly_wait(20)
# initialize an AutomatedTest object with our selenium session id
myTest = cbt.getTestFromId(driver.session_id)
video = myTest.startRecordingVideo()
driver.get('http://google.com')
driver.implicitly_wait(2)
# easily take snapshot
googleSnap = myTest.takeSnapshot()
# easily set snapshot description
googleSnap.setDescription('google.com')
# save the snapshot locally
googleSnap.saveLocally('test/google.png')
driver.get('http://crossbrowsertesting.com')
driver.implicitly_wait(2)
# take snapshot and set description with one call (that's all)
myTest.takeSnapshot('cbt.com')
# downloads every snapshot for a given test and saves them in a directory
# can set useDescription to name the images what we set as the description
# alternatively can set a prefix (default 'image') and images will be indexed
myTest.saveAllSnapshots('test/images', useDescription=True)
video.stopRecording()
# set score using enum (SCORE_PASS, _FAIL, or _UNSET)
myTest.setScore(cbt.SCORE_PASS)
# set description
myTest.setDescription('blah blah blah')
# send request to our api to stop the test
# can also pass in score to set score and stop in one call
# myTest.stop(cbt.SCORE_PASS)
myTest.stop()
video.saveLocally('test/video.mp4')
#driver.quit()
# our test history api call takes a lot of optional parameters
# the builder makes it easier to get what you want
options = cbt.getTestHistoryBuilder() \
.withLimit(5) \
.withName('cbthelper test') \
.build()
print(options)
# grab our history using the options we created above
history = cbt.getTestHistory(options)
print(history['selenium'])
if __name__ == '__main__':
main()
|
[] |
[] |
[
"CBT_AUTHKEY",
"CBT_USERNAME"
] |
[]
|
["CBT_AUTHKEY", "CBT_USERNAME"]
|
python
| 2 | 0 | |
main.go
|
package main
import (
"crypto/md5"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"sync"
"github.com/Sirupsen/logrus"
"github.com/docker/go-plugins-helpers/volume"
)
const socketAddress = "/run/docker/plugins/ceph-rbd.sock"
type cephRbdVolume struct {
Pool string
Rbd string
Hosts string
Username string
Secret string
RbdNum int
Mountpoint string
connections int
}
type cephRbdDriver struct {
sync.RWMutex
root string
statePath string
volumes map[string]*cephRbdVolume
}
func newCephRbdDriver(root string) (*cephRbdDriver, error) {
logrus.WithField("method", "new driver").Debug(root)
d := &cephRbdDriver{
root: filepath.Join(root, "volumes"),
statePath: filepath.Join(root, "state", "ceph-rbd-state.json"),
volumes: map[string]*cephRbdVolume{},
}
data, err := ioutil.ReadFile(d.statePath)
if err != nil {
if os.IsNotExist(err) {
logrus.WithField("statePath", d.statePath).Debug("no state found")
} else {
return nil, err
}
} else {
if err := json.Unmarshal(data, &d.volumes); err != nil {
return nil, err
}
}
return d, nil
}
func (d *cephRbdDriver) saveState() {
data, err := json.Marshal(d.volumes)
if err != nil {
logrus.WithField("statePath", d.statePath).Error(err)
return
}
if err := ioutil.WriteFile(d.statePath, data, 0644); err != nil {
logrus.WithField("savestate", d.statePath).Error(err)
}
}
func (d *cephRbdDriver) Create(r volume.Request) volume.Response {
logrus.WithField("method", "create").Debugf("%#v", r)
d.Lock()
defer d.Unlock()
v := &cephRbdVolume{}
for key, val := range r.Options {
switch key {
case "pool":
v.Pool = val
case "rbd":
v.Rbd = val
case "hosts":
v.Hosts = val
case "username":
v.Username = val
case "secret":
v.Secret = val
default:
return responseError(fmt.Sprintf("unknown option %q", val))
}
}
if v.Pool == "" {
return responseError("'pool' option required")
}
if v.Rbd == "" {
return responseError("'rbd' option required")
}
if v.Hosts == "" {
return responseError("'hosts' option required")
}
if v.Username == "" {
return responseError("'username' option required")
}
if v.Secret == "" {
return responseError("'secret' option required")
}
v.Mountpoint = filepath.Join(d.root, fmt.Sprintf("%x", md5.Sum([]byte(v.Rbd)))) // TODO Include pool, hosts
d.volumes[r.Name] = v
d.saveState()
logrus.WithField("method", "create").Debugf("Saved mountpoint %s", v.Mountpoint)
return volume.Response{}
}
func (d *cephRbdDriver) Remove(r volume.Request) volume.Response {
logrus.WithField("method", "remove").Debugf("%#v", r)
d.Lock()
defer d.Unlock()
v, ok := d.volumes[r.Name]
if !ok {
return responseError(fmt.Sprintf("volume %s not found", r.Name))
}
if v.connections != 0 {
return responseError(fmt.Sprintf("volume %s is currently used by a container", r.Name))
}
if err := os.RemoveAll(v.Mountpoint); err != nil {
return responseError(err.Error())
}
delete(d.volumes, r.Name)
d.saveState()
return volume.Response{}
}
func (d *cephRbdDriver) Path(r volume.Request) volume.Response {
logrus.WithField("method", "path").Debugf("%#v", r)
d.RLock()
defer d.RUnlock()
v, ok := d.volumes[r.Name]
if !ok {
return responseError(fmt.Sprintf("volume %s not found", r.Name))
}
return volume.Response{Mountpoint: v.Mountpoint}
}
func (d *cephRbdDriver) Mount(r volume.MountRequest) volume.Response {
logrus.WithField("method", "mount").Debugf("%#v", r)
d.Lock()
defer d.Unlock()
v, ok := d.volumes[r.Name]
if !ok {
return responseError(fmt.Sprintf("volume %s not found", r.Name))
}
if v.connections == 0 {
fi, err := os.Lstat(v.Mountpoint)
if os.IsNotExist(err) {
if err := os.MkdirAll(v.Mountpoint, 0755); err != nil {
return responseError(err.Error())
}
} else if err != nil {
return responseError(err.Error())
}
if fi != nil && !fi.IsDir() {
return responseError(fmt.Sprintf("%v already exist and it's not a directory", v.Mountpoint))
}
if err := d.mountVolume(v); err != nil {
return responseError(err.Error())
}
}
v.connections++
return volume.Response{Mountpoint: v.Mountpoint}
}
func (d *cephRbdDriver) Unmount(r volume.UnmountRequest) volume.Response {
logrus.WithField("method", "unmount").Debugf("%#v", r)
d.Lock()
defer d.Unlock()
v, ok := d.volumes[r.Name]
if !ok {
return responseError(fmt.Sprintf("volume %s not found", r.Name))
}
v.connections--
if v.connections <= 0 {
if err := d.unmountVolume(v); err != nil {
return responseError(err.Error())
}
v.connections = 0
}
return volume.Response{}
}
func (d *cephRbdDriver) Get(r volume.Request) volume.Response {
logrus.WithField("method", "get").Debugf("%#v", r)
d.Lock()
defer d.Unlock()
v, ok := d.volumes[r.Name]
if !ok {
return responseError(fmt.Sprintf("volume %s not found", r.Name))
}
return volume.Response{Volume: &volume.Volume{Name: r.Name, Mountpoint: v.Mountpoint}}
}
func (d *cephRbdDriver) List(r volume.Request) volume.Response {
logrus.WithField("method", "list").Debugf("%#v", r)
d.Lock()
defer d.Unlock()
var vols []*volume.Volume
for name, v := range d.volumes {
vols = append(vols, &volume.Volume{Name: name, Mountpoint: v.Mountpoint})
}
return volume.Response{Volumes: vols}
}
func (d *cephRbdDriver) Capabilities(r volume.Request) volume.Response {
logrus.WithField("method", "capabilities").Debugf("%#v", r)
return volume.Response{Capabilities: volume.Capability{Scope: "local"}}
}
func (d *cephRbdDriver) mountVolume(v *cephRbdVolume) error {
fileName := "/host/sys/bus/rbd/add"
mountString := fmt.Sprintf("%s name=%s,secret=%s %s %s", v.Hosts, v.Username, v.Secret, v.Pool, v.Rbd)
if err := ioutil.WriteFile(fileName, []byte(mountString), 0600); err != nil {
logrus.WithField("mountvolume", v.Rbd).Error(err)
return err
}
num, err := findRbdNum(v)
if err != nil {
return err
}
v.RbdNum = num
cmd := exec.Command("mount", fmt.Sprintf("/dev/rbd%d", v.RbdNum), v.Mountpoint)
logrus.Debug(cmd.Args)
return cmd.Run()
}
func (d *cephRbdDriver) unmountVolume(v *cephRbdVolume) error {
cmd := fmt.Sprintf("umount %s", v.Mountpoint)
logrus.Debug(cmd)
if err := exec.Command("sh", "-c", cmd).Run(); err != nil {
return err
}
return ioutil.WriteFile("/host/sys/bus/rbd/remove", []byte(strconv.Itoa(v.RbdNum)), 0600)
}
func findRbdNum(v *cephRbdVolume) (int, error) {
// FIXME Use pool for searching as well
cmd := fmt.Sprintf("grep -l %s /host/sys/devices/rbd/*/name | egrep -o '[0-9]+' | head -n1", v.Rbd)
logrus.Debug(cmd)
output, err := exec.Command("sh", "-c", cmd).Output()
logrus.Debug(output)
if err != nil {
return -1, err
}
num, err := strconv.Atoi(strings.TrimSpace(string(output)))
if err != nil {
return -1, err
}
return num, nil
}
func responseError(err string) volume.Response {
logrus.Error(err)
return volume.Response{Err: err}
}
func main() {
debug := os.Getenv("DEBUG")
if ok, _ := strconv.ParseBool(debug); ok {
logrus.SetLevel(logrus.DebugLevel)
}
d, err := newCephRbdDriver("/mnt")
if err != nil {
log.Fatal(err)
}
h := volume.NewHandler(d)
logrus.Infof("listening on %s", socketAddress)
logrus.Error(h.ServeUnix(socketAddress, 0))
}
|
[
"\"DEBUG\""
] |
[] |
[
"DEBUG"
] |
[]
|
["DEBUG"]
|
go
| 1 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sciblog.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
generator/debug.go
|
// Copyright 2015 go-swagger maintainers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package generator
import (
"fmt"
"log"
"os"
"path/filepath"
"runtime"
)
// Debug when the env var DEBUG or SWAGGER_DEBUG is not empty
// the generators will be very noisy about what they are doing
var Debug = os.Getenv("DEBUG") != "" || os.Getenv("SWAGGER_DEBUG") != ""
func logDebug(frmt string, args ...interface{}) {
if Debug {
_, file, pos, _ := runtime.Caller(2)
log.Printf("%s:%d: %s", filepath.Base(file), pos, fmt.Sprintf(frmt, args...))
}
}
|
[
"\"DEBUG\"",
"\"SWAGGER_DEBUG\""
] |
[] |
[
"SWAGGER_DEBUG",
"DEBUG"
] |
[]
|
["SWAGGER_DEBUG", "DEBUG"]
|
go
| 2 | 0 | |
src/python/pants/bin/daemon_pants_runner.py
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import logging
import os
import sys
import time
from contextlib import contextmanager
from threading import Lock
from typing import Dict, Tuple
from pants.base.exiter import PANTS_FAILED_EXIT_CODE, ExitCode
from pants.bin.local_pants_runner import LocalPantsRunner
from pants.engine.internals.native import Native, RawFdRunner
from pants.init.logging import (
clear_logging_handlers,
get_logging_handlers,
set_logging_handlers,
setup_logging,
)
from pants.init.util import clean_global_runtime_state
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.pantsd.pants_daemon_core import PantsDaemonCore
from pants.util.contextutil import argv_as, hermetic_environment_as, stdio_as
logger = logging.getLogger(__name__)
class ExclusiveRequestTimeout(Exception):
"""Represents a timeout while waiting for another request to complete."""
class DaemonPantsRunner(RawFdRunner):
"""A RawFdRunner (callable) that will be called for each client request to Pantsd."""
def __init__(self, core: PantsDaemonCore) -> None:
super().__init__()
self._core = core
self._run_lock = Lock()
@staticmethod
def _send_stderr(stderr_fd: int, msg: str) -> None:
"""Used to send stderr on a raw filehandle _before_ stdio replacement.
After stdio replacement has happened via `stdio_as` (which mutates sys.std*, and thus cannot
happen until the request lock has been acquired), sys.std* should be used directly.
"""
with os.fdopen(stderr_fd, mode="w", closefd=False) as stderr:
print(msg, file=stderr, flush=True)
@contextmanager
def _one_run_at_a_time(self, stderr_fd: int, timeout: float):
"""Acquires exclusive access within the daemon.
Periodically prints a message on the given stderr_fd while exclusive access cannot be
acquired.
"""
should_poll_forever = timeout <= 0
start = time.time()
deadline = None if should_poll_forever else start + timeout
def should_keep_polling(now):
return not deadline or deadline > now
acquired = self._run_lock.acquire(blocking=False)
if not acquired:
# If we don't acquire immediately, send an explanation.
length = "forever" if should_poll_forever else "up to {} seconds".format(timeout)
self._send_stderr(
stderr_fd,
f"Another pants invocation is running. Will wait {length} for it to finish before giving up.\n"
"If you don't want to wait for the first run to finish, please press Ctrl-C and run "
"this command with PANTS_CONCURRENT=True in the environment.\n",
)
while True:
now = time.time()
if acquired:
try:
yield
break
finally:
self._run_lock.release()
elif should_keep_polling(now):
self._send_stderr(
stderr_fd,
f"Waiting for invocation to finish (waited for {int(now - start)}s so far)...\n",
)
acquired = self._run_lock.acquire(blocking=True, timeout=5)
else:
raise ExclusiveRequestTimeout(
"Timed out while waiting for another pants invocation to finish."
)
@contextmanager
def _stderr_logging(self, global_bootstrap_options):
"""Temporarily replaces existing handlers (ie, the pantsd handler) with a stderr handler.
In the context of pantsd, there will be an existing handler for the pantsd log, which we
temporarily replace. Making them additive would cause per-run logs to go to pantsd, which
we don't want.
TODO: It would be good to handle logging destinations entirely via the threadlocal state
rather than via handler mutations.
"""
handlers = get_logging_handlers()
try:
clear_logging_handlers()
Native().override_thread_logging_destination_to_just_stderr()
setup_logging(global_bootstrap_options, stderr_logging=True)
yield
finally:
Native().override_thread_logging_destination_to_just_pantsd()
set_logging_handlers(handlers)
def single_daemonized_run(self, working_dir: str) -> ExitCode:
"""Run a single daemonized run of Pants.
All aspects of the `sys` global should already have been replaced in `__call__`, so this
method should not need any special handling for the fact that it's running in a proxied
environment.
"""
# Capture the client's start time, which we propagate here in order to get an accurate
# view of total time.
env_start_time = os.environ.get("PANTSD_RUNTRACKER_CLIENT_START_TIME", None)
start_time = float(env_start_time) if env_start_time else time.time()
# Clear global mutable state before entering `LocalPantsRunner`. Note that we use
# `sys.argv` and `os.environ`, since they have been mutated to maintain the illusion
# of a local run: once we allow for concurrent runs, this information should be
# propagated down from the caller.
# see https://github.com/pantsbuild/pants/issues/7654
clean_global_runtime_state(reset_subsystem=True)
options_bootstrapper = OptionsBootstrapper.create(
env=os.environ, args=sys.argv, allow_pantsrc=True
)
bootstrap_options = options_bootstrapper.bootstrap_options
global_bootstrap_options = bootstrap_options.for_global_scope()
# Run using the pre-warmed Session.
with self._stderr_logging(global_bootstrap_options):
try:
scheduler = self._core.prepare_scheduler(options_bootstrapper)
runner = LocalPantsRunner.create(
os.environ, options_bootstrapper, scheduler=scheduler
)
return runner.run(start_time)
except Exception as e:
logger.exception(e)
return PANTS_FAILED_EXIT_CODE
except KeyboardInterrupt:
print("Interrupted by user.\n", file=sys.stderr)
return PANTS_FAILED_EXIT_CODE
def __call__(
self,
command: str,
args: Tuple[str, ...],
env: Dict[str, str],
working_directory: bytes,
stdin_fd: int,
stdout_fd: int,
stderr_fd: int,
) -> ExitCode:
request_timeout = float(env.get("PANTSD_REQUEST_TIMEOUT_LIMIT", -1))
# NB: Order matters: we acquire a lock before mutating either `sys.std*`, `os.environ`, etc.
with self._one_run_at_a_time(stderr_fd, timeout=request_timeout), stdio_as(
stdin_fd=stdin_fd, stdout_fd=stdout_fd, stderr_fd=stderr_fd
), hermetic_environment_as(**env), argv_as((command,) + args):
# NB: Run implements exception handling, so only the most primitive errors will escape
# this function, where they will be logged to the pantsd.log by the server.
logger.info(f"handling request: `{' '.join(args)}`")
try:
return self.single_daemonized_run(working_directory.decode())
finally:
logger.info(f"request completed: `{' '.join(args)}`")
|
[] |
[] |
[
"PANTSD_RUNTRACKER_CLIENT_START_TIME"
] |
[]
|
["PANTSD_RUNTRACKER_CLIENT_START_TIME"]
|
python
| 1 | 0 | |
main.go
|
// Copyright 2017, 2021 Tamás Gulácsi
//
// SPDX-License-Identifier: Apache-2.0
package main
import (
"bytes"
"context"
"database/sql"
"encoding/csv"
"errors"
"flag"
"fmt"
"io"
"os"
"os/exec"
"path"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"time"
"unicode"
"golang.org/x/sync/errgroup"
"github.com/go-kit/kit/log"
"github.com/tgulacsi/go/loghlp/kitloghlp"
custom "github.com/tgulacsi/oracall/custom"
oracall "github.com/tgulacsi/oracall/lib"
// for Oracle-specific drivers
"github.com/godror/godror"
)
//go:generate go generate ./lib
// Should install protobuf-compiler to use it, like
// curl -L https://github.com/google/protobuf/releases/download/v3.0.0-beta-2/protoc-3.0.0-beta-2-linux-x86_64.zip -o /tmp/protoc-3.0.0-beta-2-linux-x86_64.zip && unzip -p /tmp/protoc-3.0.0-beta-2-linux-x86_64.zip protoc >$HOME/bin/protoc
var logger = kitloghlp.New(os.Stderr)
var flagConnect = flag.String("connect", "", "connect to DB for retrieving function arguments")
func main() {
oracall.Log = log.With(logger, "lib", "oracall").Log
if err := Main(os.Args); err != nil {
logger.Log("error", fmt.Sprintf("%+v", err))
os.Exit(1)
}
}
func Main(args []string) error {
os.Args = args
gopSrc := filepath.Join(os.Getenv("GOPATH"), "src")
flag.BoolVar(&oracall.SkipMissingTableOf, "skip-missing-table-of", true, "skip functions with missing TableOf info")
flagDump := flag.String("dump", "", "dump to this csv")
flagBaseDir := flag.String("base-dir", gopSrc, "base dir for the -pb-out, -db-out flags")
flagPbOut := flag.String("pb-out", "", "package import path for the Protocol Buffers files, optionally with the package name, like \"my/pb-pkg:main\"")
flagDbOut := flag.String("db-out", "-:main", "package name of the generated functions, optionally with the package name, like \"my/db-pkg:main\"")
flagGenerator := flag.String("protoc-gen", "gogofast", "use protoc-gen-<generator>")
flag.BoolVar(&oracall.NumberAsString, "number-as-string", false, "add ,string to json tags")
flag.BoolVar(&custom.ZeroIsAlmostZero, "zero-is-almost-zero", false, "zero should be just almost zero, to distinguish 0 and non-set field")
flagVerbose := flag.Bool("v", false, "verbose logging")
flagExcept := flag.String("except", "", "except these functions")
flagReplace := flag.String("replace", "", "funcA=>funcB")
flag.IntVar(&oracall.MaxTableSize, "max-table-size", oracall.MaxTableSize, "maximum table size for PL/SQL associative arrays")
flag.Parse()
if *flagPbOut == "" {
if *flagDbOut == "" {
return errors.New("-pb-out or -db-out is required")
}
*flagPbOut = *flagDbOut
} else if *flagDbOut == "" {
*flagDbOut = *flagPbOut
}
pbPath, pbPkg := parsePkgFlag(*flagPbOut)
dbPath, dbPkg := parsePkgFlag(*flagDbOut)
Log := logger.Log
pattern := flag.Arg(0)
if pattern == "" {
pattern = "%"
}
oracall.Gogo = *flagGenerator != "go"
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
defer cancel()
var functions []oracall.Function
var err error
filters := [](func(string) bool){func(string) bool { return true }}
filter := func(s string) bool {
for _, f := range filters {
if !f(s) {
return false
}
}
return true
}
if *flagExcept != "" {
except := strings.FieldsFunc(*flagExcept, func(r rune) bool { return r == ',' || unicode.IsSpace(r) })
Log("except", except)
filters = append(filters, func(s string) bool {
for _, e := range except {
if strings.EqualFold(e, s) {
return false
}
}
return true
})
}
var annotations []oracall.Annotation
if *flagConnect == "" {
if pattern != "%" {
rPattern := regexp.MustCompile("(?i)" + strings.Replace(strings.Replace(pattern, ".", "[.]", -1), "%", ".*", -1))
filters = append(filters, func(s string) bool {
return rPattern.MatchString(s)
})
}
functions, err = oracall.ParseCsvFile("", filter)
} else {
var cx *sql.DB
P, parseErr := godror.ParseConnString(*flagConnect)
if parseErr != nil {
return fmt.Errorf("%s: %w", *flagConnect, parseErr)
}
P.StandaloneConnection = false
cx = sql.OpenDB(godror.NewConnector(P))
defer cx.Close()
cx.SetMaxIdleConns(0)
if *flagVerbose {
godror.SetLogger(log.With(logger, "lib", "godror"))
}
if err = cx.Ping(); err != nil {
return fmt.Errorf("ping %s: %w", *flagConnect, err)
}
functions, annotations, err = parseDB(ctx, cx, pattern, *flagDump, filter)
}
if err != nil {
return fmt.Errorf("read %s: %w", flag.Arg(0), err)
}
defer os.Stdout.Sync()
out := os.Stdout
var testOut *os.File
if dbPath != "" && dbPath != "-" {
fn := "oracall.go"
if dbPkg != "main" {
fn = dbPkg + ".go"
}
fn = filepath.Join(*flagBaseDir, dbPath, fn)
Log("msg", "Writing generated functions", "file", fn)
os.MkdirAll(filepath.Dir(fn), 0775)
if out, err = os.Create(fn); err != nil {
return fmt.Errorf("create %s: %w", fn, err)
}
testFn := fn[:len(fn)-3] + "_test.go"
if testOut, err = os.Create(testFn); err != nil {
return fmt.Errorf("create %s: %w", testFn, err)
}
defer func() {
if err := out.Close(); err != nil {
Log("msg", "close", "file", out.Name(), "error", err)
}
if err := testOut.Close(); err != nil {
Log("msg", "close", "file", testOut.Name(), "error", err)
}
}()
}
*flagReplace = strings.TrimSpace(*flagReplace)
for _, elt := range strings.FieldsFunc(
rReplace.ReplaceAllLiteralString(*flagReplace, "=>"),
func(r rune) bool { return r == ',' || unicode.IsSpace(r) }) {
i := strings.Index(elt, "=>")
if i < 0 {
continue
}
a := oracall.Annotation{Type: "replace", Name: elt[:i], Other: elt[i+2:]}
if i = strings.IndexByte(a.Name, '.'); i >= 0 {
a.Package, a.Name = a.Name[:i], a.Name[i+1:]
a.Other = strings.TrimPrefix(a.Other, a.Package)
}
annotations = append(annotations, a)
}
Log("annotations", annotations)
functions = oracall.ApplyAnnotations(functions, annotations)
sort.Slice(functions, func(i, j int) bool { return functions[i].Name() < functions[j].Name() })
var grp errgroup.Group
grp.Go(func() error {
pbPath := pbPath
if pbPath == dbPath {
pbPath = ""
}
if err := oracall.SaveFunctions(
out, functions,
dbPkg, pbPath, false,
); err != nil {
return fmt.Errorf("save functions: %w", err)
}
return nil
})
if testOut != nil {
grp.Go(func() error {
pbPath := pbPath
if pbPath == dbPath {
pbPath = ""
}
if err := oracall.SaveFunctionTests(
testOut, functions,
dbPkg, pbPath, false,
); err != nil {
return fmt.Errorf("save function tests: %w", err)
}
return nil
})
}
grp.Go(func() error {
pbFn := "oracall.proto"
if pbPkg != "main" {
pbFn = pbPkg + ".proto"
}
pbFn = filepath.Join(*flagBaseDir, pbPath, pbFn)
os.MkdirAll(filepath.Dir(pbFn), 0775)
Log("msg", "Writing Protocol Buffers", "file", pbFn)
fh, err := os.Create(pbFn)
if err != nil {
return fmt.Errorf("create proto: %w", err)
}
err = oracall.SaveProtobuf(fh, functions, pbPkg, pbPath)
if closeErr := fh.Close(); closeErr != nil && err == nil {
err = closeErr
}
if err != nil {
return fmt.Errorf("SaveProtobuf: %w", err)
}
args := make([]string, 0, 4)
if *flagGenerator == "go" {
args = append(args,
"--"+*flagGenerator+"_out=:"+*flagBaseDir,
"--go-grpc_out=:"+*flagBaseDir)
} else {
args = append(args,
"--"+*flagGenerator+"_out=Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,plugins=grpc:"+*flagBaseDir)
}
cmd := exec.CommandContext(ctx,
"protoc",
append(args, "--proto_path="+*flagBaseDir+":.",
pbFn)...,
)
cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr
err = cmd.Run()
Log("msg", "protoc", "args", cmd.Args, "error", err)
if err != nil {
return fmt.Errorf("%q: %w", cmd.Args, err)
}
if *flagGenerator == "go" {
fn := strings.TrimSuffix(pbFn, ".proto") + ".pb.go"
cmd = exec.CommandContext(ctx, "sed", "-i", "-e",
`/timestamp "github.com\/golang\/protobuf\/ptypes\/timestamp"/ s,timestamp.*$,custom "github.com/tgulacsi/oracall/custom",; /timestamp\.Timestamp/ s/timestamp\.Timestamp/custom.Timestamp/g`,
fn,
)
cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr
err = cmd.Run()
Log("msg", "replace timestamppb", "file", fn, "args", cmd.Args, "error", err)
if err != nil {
return fmt.Errorf("%q: %w", cmd.Args, err)
}
}
return nil
})
if err := grp.Wait(); err != nil {
return err
}
return nil
}
type dbRow struct {
Package, Object, InOut sql.NullString
dbType
SubID sql.NullInt64
OID, Seq int
}
func (r dbRow) String() string {
return fmt.Sprintf("%s.%s %s", r.Package.String, r.Object.String, r.dbType)
}
type dbType struct {
Argument string
Data, PLS, Owner, Name, Subname, Link, Charset string
Level int
Prec, Scale, Length sql.NullInt64
}
func (t dbType) String() string {
return fmt.Sprintf("%s{%s}[%d](%s/%s.%s.%s@%s)", t.Argument, t.Data, t.Level, t.PLS, t.Owner, t.Name, t.Subname, t.Link)
}
func parseDB(ctx context.Context, cx *sql.DB, pattern, dumpFn string, filter func(string) bool) (functions []oracall.Function, annotations []oracall.Annotation, err error) {
tbl, objTbl := "user_arguments", "user_objects"
if strings.HasPrefix(pattern, "DBMS_") || strings.HasPrefix(pattern, "UTL_") {
tbl, objTbl = "all_arguments", "all_objects"
}
argumentsQry := `` + //nolint:gas
`SELECT A.*
FROM
(SELECT DISTINCT object_id object_id, subprogram_id, sequence*100 seq,
package_name, object_name,
data_level, argument_name, in_out,
data_type, data_precision, data_scale, character_set_name,
pls_type, char_length, type_owner, type_name, type_subname, type_link
FROM ` + tbl + `
WHERE data_type <> 'OBJECT' AND package_name||'.'||object_name LIKE UPPER(:1)
UNION ALL
SELECT DISTINCT object_id object_id, subprogram_id, A.sequence*100 + B.attr_no,
package_name, object_name,
A.data_level, B.attr_name, A.in_out,
B.ATTR_TYPE_NAME, B.PRECISION, B.scale, B.character_set_name,
NVL2(B.ATTR_TYPE_OWNER, B.attr_type_owner||'.', '')||B.attr_type_name, B.length,
NULL, NULL, NULL, NULL
FROM all_type_attrs B, ` + tbl + ` A
WHERE B.owner = A.type_owner AND B.type_name = A.type_name AND
A.data_type = 'OBJECT' AND
A.package_name||'.'||A.object_name LIKE UPPER(:2)
) A
ORDER BY 1, 2, 3`
ctx, cancel := context.WithTimeout(ctx, 10*time.Minute)
defer cancel()
objTimeQry := `SELECT last_ddl_time FROM ` + objTbl + ` WHERE object_name = :1 AND object_type <> 'PACKAGE BODY'`
objTimeStmt, err := cx.PrepareContext(ctx, objTimeQry)
if err != nil {
return nil, nil, fmt.Errorf("%s: %w", objTimeQry, err)
}
defer objTimeStmt.Close()
getObjTime := func(name string) (time.Time, error) {
var t time.Time
if err := objTimeStmt.QueryRowContext(ctx, name).Scan(&t); err != nil {
return t, fmt.Errorf("%s [%q]: %w", objTimeQry, name, err)
}
return t, nil
}
dbCh := make(chan dbRow)
grp, grpCtx := errgroup.WithContext(ctx)
grp.Go(func() error {
defer close(dbCh)
var collStmt, attrStmt *sql.Stmt
qry := `SELECT coll_type, elem_type_owner, elem_type_name, elem_type_package,
length, precision, scale, character_set_name, index_by,
(SELECT MIN(typecode) FROM all_plsql_types B
WHERE B.owner = A.elem_type_owner AND
B.type_name = A.elem_type_name AND
B.package_name = A.elem_type_package) typecode
FROM all_plsql_coll_types A
WHERE owner = :owner AND package_name = :pkg AND type_name = :sub
UNION
SELECT coll_type, elem_type_owner, elem_type_name, NULL elem_type_package,
length, precision, scale, character_set_name, NULL index_by,
(SELECT MIN(typecode) FROM all_types B
WHERE B.owner = A.elem_type_owner AND
B.type_name = A.elem_type_name) typecode
FROM all_coll_types A
WHERE (owner, type_name) IN (
SELECT :owner, :pkg FROM DUAL
UNION
SELECT table_owner, table_name||NVL2(db_link, '@'||db_link, NULL)
FROM user_synonyms
WHERE synonym_name = :pkg)`
var resolveTypeShort func(ctx context.Context, typ, owner, name, sub string) ([]dbType, error)
var err error
if collStmt, err = cx.PrepareContext(grpCtx, qry); err != nil {
logger.Log("WARN", fmt.Errorf("%s: %w", qry, err))
} else {
defer collStmt.Close()
if rows, err := collStmt.QueryContext(grpCtx,
sql.Named("owner", ""), sql.Named("pkg", ""), sql.Named("sub", ""),
); err != nil {
collStmt = nil
} else {
rows.Close()
qry = `SELECT attr_name, attr_type_owner, attr_type_name, attr_type_package,
length, precision, scale, character_set_name, attr_no,
(SELECT MIN(typecode) FROM all_plsql_types B
WHERE B.owner = A.attr_type_owner AND B.type_name = A.attr_type_name AND B.package_name = A.attr_type_package) typecode
FROM all_plsql_type_attrs A
WHERE owner = :owner AND package_name = :pkg AND type_name = :sub
UNION ALL
SELECT column_name, data_type_owner, data_type, NULL AS attr_type_package,
data_length, data_precision, data_scale, character_set_name, column_id AS attr_no,
'PL/SQL RECORD' AS typecode
FROM all_tab_cols A
WHERE NOT EXISTS (SELECT 1 FROM all_plsql_type_attrs B
WHERE B.owner = :owner AND package_name = :pkg AND type_name = :sub) AND
hidden_column = 'NO' AND INSTR(column_name, '$') = 0 AND
owner = :owner AND table_name = :pkg
ORDER BY attr_no`
if attrStmt, err = cx.PrepareContext(grpCtx, qry); err != nil {
logger.Log("WARN", fmt.Errorf("%s: %w", qry, err))
} else {
defer attrStmt.Close()
if rows, err := attrStmt.QueryContext(grpCtx,
sql.Named("owner", ""), sql.Named("pkg", ""), sql.Named("sub", ""),
); err != nil {
attrStmt = nil
} else {
rows.Close()
resolveTypeShort = func(ctx context.Context, typ, owner, name, sub string) ([]dbType, error) {
return resolveType(ctx, collStmt, attrStmt, typ, owner, name, sub)
}
}
}
}
}
qry = argumentsQry
rows, err := cx.QueryContext(grpCtx,
qry, pattern, pattern, godror.FetchArraySize(1024), godror.PrefetchCount(1025),
)
if err != nil {
logger.Log("qry", qry, "error", err)
return fmt.Errorf("%s: %w", qry, err)
}
defer rows.Close()
var seq int
for rows.Next() {
var row dbRow
if err = rows.Scan(&row.OID, &row.SubID, &row.Seq, &row.Package, &row.Object,
&row.Level, &row.Argument, &row.InOut,
&row.Data, &row.Prec, &row.Scale, &row.Charset,
&row.PLS, &row.Length, &row.Owner, &row.Name, &row.Subname, &row.Link,
); err != nil {
return fmt.Errorf("reading row=%v: %w", rows, err)
}
row.Seq = seq
seq++
select {
case <-grpCtx.Done():
return grpCtx.Err()
case dbCh <- row:
}
if resolveTypeShort == nil {
continue
}
if row.Data == "PL/SQL TABLE" || row.Data == "PL/SQL RECORD" || row.Data == "REF CURSOR" || row.Data == "TABLE" {
plus, err := resolveTypeShort(grpCtx, row.Data, row.Owner, row.Name, row.Subname)
if err != nil {
return err
}
if plus, err = expandArgs(grpCtx, plus, resolveTypeShort); err != nil {
return err
}
for _, p := range plus {
row.Seq = seq
seq++
row.Argument, row.Data, row.Length, row.Prec, row.Scale, row.Charset = p.Argument, p.Data, p.Length, p.Prec, p.Scale, p.Charset
row.Owner, row.Name, row.Subname, row.Link = p.Owner, p.Name, p.Subname, p.Link
row.Level = p.Level
//logger.Log("arg", row.Argument, "row", row.Length, "p", p.Length)
select {
case <-grpCtx.Done():
return grpCtx.Err()
case dbCh <- row:
}
}
}
}
if err != nil {
return fmt.Errorf("walking rows: %w", err)
}
return nil
})
var cwMu sync.Mutex
var cw *csv.Writer
if dumpFn != "" {
var lastOk bool
qry := argumentsQry
qry = qry[:strings.Index(qry, "FROM "+tbl)] //nolint:gas
qry = strings.TrimPrefix(qry[strings.LastIndex(qry, "SELECT ")+7:], "DISTINCT ")
colNames := strings.Split(
strings.Map(
func(r rune) rune {
if 'A' <= r && r <= 'Z' || '0' <= r && r <= '9' || r == '_' {
lastOk = true
return r
}
if 'a' <= r && r <= 'z' {
lastOk = true
return unicode.ToUpper(r)
}
if r == ',' {
return r
}
if lastOk {
lastOk = false
return ' '
}
return -1
},
qry,
),
",",
)
for i, nm := range colNames {
nm = strings.TrimSpace(nm)
colNames[i] = nm
if j := strings.LastIndexByte(nm, ' '); j >= 0 {
colNames[i] = nm[j+1:]
}
}
var fh *os.File
if fh, err = os.Create(dumpFn); err != nil {
logger.Log("msg", "create", "dump", dumpFn, "error", err)
return functions, annotations, fmt.Errorf("%s: %w", dumpFn, err)
}
defer func() {
cwMu.Lock()
cw.Flush()
err = fmt.Errorf("csv flush: %w", cw.Error())
cwMu.Unlock()
if err != nil {
logger.Log("msg", "flush", "csv", fh.Name(), "error", err)
}
if err = fh.Close(); err != nil {
logger.Log("msg", "close", "dump", fh.Name(), "error", err)
}
}()
cwMu.Lock()
cw = csv.NewWriter(fh)
err = cw.Write(colNames)
cwMu.Unlock()
if err != nil {
logger.Log("msg", "write header to csv", "error", err)
return functions, annotations, fmt.Errorf("write header: %w", err)
}
}
var prevPackage string
var docsMu sync.Mutex
var replMu sync.Mutex
docs := make(map[string]string)
userArgs := make(chan oracall.UserArgument, 16)
grp.Go(func() error {
defer close(userArgs)
var pkgTime time.Time
ctx := grpCtx
Loop:
for {
var row dbRow
var ok bool
select {
case <-ctx.Done():
return ctx.Err()
case row, ok = <-dbCh:
if !ok {
break Loop
}
if row.Name == "" {
row.PLS = row.Data
} else {
row.PLS = row.Owner + "." + row.Name + "." + row.Subname
if row.Link != "" {
row.PLS += "@" + row.Link
}
}
//logger.Log("arg", row.Argument, "name", row.Name, "sub", row.Subname, "data", row.Data, "pls", row.PLS)
}
//logger.Log("row", row)
var ua oracall.UserArgument
ua.DataType = row.Data
ua.InOut = row.InOut.String
if cw != nil {
N := i64ToString
cwMu.Lock()
err := cw.Write([]string{
strconv.Itoa(row.OID), N(row.SubID), strconv.Itoa(row.Seq), row.Package.String, row.Object.String,
strconv.Itoa(row.Level), row.Argument, ua.InOut,
ua.DataType, N(row.Prec), N(row.Scale), row.Charset,
row.PLS, N(row.Length),
row.Owner, row.Name, row.Subname, row.Link,
})
cwMu.Unlock()
if err != nil {
return fmt.Errorf("write csv: %w", err)
}
}
if !row.Package.Valid {
continue
}
ua.PackageName = row.Package.String
if ua.PackageName != prevPackage {
if pkgTime, err = getObjTime(ua.PackageName); err != nil {
return err
}
prevPackage = ua.PackageName
grp.Go(func() error {
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
Log := log.With(logger, "package", ua.PackageName).Log
if srcErr := getSource(ctx, buf, cx, ua.PackageName); srcErr != nil {
Log("WARN", "getSource", "error", srcErr)
return nil
}
replMu.Lock()
for _, b := range rAnnotation.FindAll(buf.Bytes(), -1) {
b = bytes.TrimSpace(bytes.TrimPrefix(b, []byte("--oracall:")))
a := oracall.Annotation{Package: ua.PackageName}
if i := bytes.IndexByte(b, ' '); i < 0 {
continue
} else {
a.Type, b = string(b[:i]), b[i+1:]
}
if i := bytes.Index(b, []byte("=>")); i < 0 {
if i = bytes.IndexByte(b, '='); i < 0 {
a.Name = string(bytes.TrimSpace(b))
} else {
a.Name = string(bytes.TrimSpace(b[:i]))
if a.Size, err = strconv.Atoi(string(bytes.TrimSpace(b[i+1:]))); err != nil {
return err
}
}
} else {
a.Name, a.Other = string(bytes.TrimSpace(b[:i])), string(bytes.TrimSpace(b[i+2:]))
}
annotations = append(annotations, a)
}
bb := buf.Bytes()
if len(annotations) != 0 {
Log("annotations", annotations)
bb = rAnnotation.ReplaceAll(bb, nil)
}
replMu.Unlock()
subCtx, subCancel := context.WithTimeout(ctx, 1*time.Minute)
funDocs, docsErr := parseDocs(subCtx, string(bb))
subCancel()
Log("msg", "parseDocs", "docs", len(funDocs), "error", docsErr)
docsMu.Lock()
pn := oracall.UnoCap(ua.PackageName) + "."
for nm, doc := range funDocs {
docs[pn+strings.ToLower(nm)] = doc
}
docsMu.Unlock()
if docsErr == context.DeadlineExceeded {
docsErr = nil
}
return docsErr
})
}
ua.LastDDL = pkgTime
if row.Object.Valid {
ua.ObjectName = row.Object.String
}
if row.Argument != "" {
ua.ArgumentName = row.Argument
}
if row.Charset != "" {
ua.CharacterSetName = row.Charset
}
if row.PLS != "" {
ua.PlsType = row.PLS
}
if row.Owner != "" {
ua.TypeOwner = row.Owner
}
if row.Name != "" {
ua.TypeName = row.Name
}
if row.Subname != "" {
ua.TypeSubname = row.Subname
}
if row.Link != "" {
ua.TypeLink = row.Link
}
ua.ObjectID = uint(row.OID)
if row.SubID.Valid {
ua.SubprogramID = uint(row.SubID.Int64)
}
ua.DataLevel = uint8(row.Level)
ua.Position = uint(row.Seq)
if row.Prec.Valid {
ua.DataPrecision = uint8(row.Prec.Int64)
}
if row.Scale.Valid {
ua.DataScale = uint8(row.Scale.Int64)
}
if row.Length.Valid {
ua.CharLength = uint(row.Length.Int64)
}
userArgs <- ua
}
return nil
})
filteredArgs := make(chan []oracall.UserArgument, 16)
grp.Go(func() error { oracall.FilterAndGroup(filteredArgs, userArgs, filter); return nil })
functions = oracall.ParseArguments(filteredArgs, filter)
if grpErr := grp.Wait(); grpErr != nil {
logger.Log("msg", "ParseArguments", "error", fmt.Sprintf("%+v", grpErr))
}
docNames := make([]string, 0, len(docs))
for k := range docs {
docNames = append(docNames, k)
}
sort.Strings(docNames)
var any bool
for i, f := range functions {
if f.Documentation == "" {
if f.Documentation = docs[f.Name()]; f.Documentation == "" {
//logger.Log("msg", "No documentation", "function", f.Name())
any = true
} else {
functions[i] = f
}
}
}
if any {
logger.Log("has", docNames)
}
return functions, annotations, nil
}
var bufPool = sync.Pool{New: func() interface{} { return bytes.NewBuffer(make([]byte, 0, 1024)) }}
func getSource(ctx context.Context, w io.Writer, cx *sql.DB, packageName string) error {
qry := "SELECT text FROM user_source WHERE name = UPPER(:1) AND type = 'PACKAGE' ORDER BY line"
rows, err := cx.QueryContext(ctx, qry, packageName, godror.PrefetchCount(129))
if err != nil {
return fmt.Errorf("%s [%q]: %w", qry, packageName, err)
}
defer rows.Close()
for rows.Next() {
var line sql.NullString
if err := rows.Scan(&line); err != nil {
return fmt.Errorf("%s: %w", qry, err)
}
if _, err := io.WriteString(w, line.String); err != nil {
return err
}
}
if err := rows.Err(); err != nil {
return fmt.Errorf("%s: %w", qry, err)
}
return nil
}
func i64ToString(n sql.NullInt64) string {
if n.Valid {
return strconv.FormatInt(n.Int64, 10)
}
return ""
}
func parsePkgFlag(s string) (string, string) {
if i := strings.LastIndexByte(s, ':'); i >= 0 {
return s[:i], s[i+1:]
}
pkg := path.Base(s)
if pkg == "" {
pkg = "main"
}
return s, pkg
}
var rReplace = regexp.MustCompile(`\s*=>\s*`)
var rAnnotation = regexp.MustCompile(`--oracall:(?:(replace(_json)?|rename)\s+[a-zA-Z0-9_#]+\s*=>\s*[a-zA-Z0-9_#]+|(handle|private)\s+[a-zA-Z0-9_#]+|max-table-size\s+[a-zA-Z0-9_$]+\s*=\s*[0-9]+)`)
func resolveType(ctx context.Context, collStmt, attrStmt *sql.Stmt, typ, owner, pkg, sub string) ([]dbType, error) {
plus := make([]dbType, 0, 4)
var rows *sql.Rows
var err error
switch typ {
case "PL/SQL TABLE", "PL/SQL INDEX TABLE", "TABLE":
/*SELECT coll_type, elem_type_owner, elem_type_name, elem_type_package,
length, precision, scale, character_set_name, index_by
FROM all_plsql_coll_types
WHERE owner = :1 AND package_name = :2 AND type_name = :3*/
if rows, err = collStmt.QueryContext(ctx,
sql.Named("owner", owner), sql.Named("pkg", pkg), sql.Named("sub", sub),
); err != nil {
return plus, err
}
defer rows.Close()
for rows.Next() {
var t dbType
var indexBy, typeCode string
if err = rows.Scan(&t.Data, &t.Owner, &t.Subname, &t.Name,
&t.Length, &t.Prec, &t.Scale, &t.Charset, &indexBy, &typeCode,
); err != nil {
return plus, err
}
if typeCode != "COLLECTION" {
t.Data = typeCode
}
if t.Data == "" {
t.Data = t.Subname
}
if t.Data == "PL/SQL INDEX TABLE" {
t.Data = "PL/SQL TABLE"
}
t.Level = 1
plus = append(plus, t)
}
case "REF CURSOR":
/*
ARGUMENT_NAME SEQUENCE DATA_LEVEL DATA_TYPE
1 0 REF CURSOR
2 1 PL/SQL RECORD
SZERZ_AZON 3 2 NUMBER
UZENET_TIP 4 2 CHAR
HIBAKOD 5 2 VARCHAR2
DATUM 6 2 DATE
UTOLSO_TIP 7 2 CHAR
JAVITVA 8 2 VARCHAR2
P_IDO_TOL 9 0 DATE
P_IDO_IG 10 0 DATE
*/
plus = append(plus, dbType{Owner: owner, Name: pkg, Subname: sub, Data: "PL/SQL RECORD", Level: 1})
case "PL/SQL RECORD":
/*SELECT attr_name, attr_type_owner, attr_type_name, attr_type_package,
length, precision, scale, character_set_name, attr_no
FROM all_plsql_type_attrs
WHERE owner = :1 AND package_name = :2 AND type_name = :3
ORDER BY attr_no*/
if rows, err = attrStmt.QueryContext(ctx,
sql.Named("owner", owner), sql.Named("pkg", pkg), sql.Named("sub", sub),
); err != nil {
return plus, err
}
//logger.Log("owner", owner, "pkg", pkg, "sub", sub)
defer rows.Close()
for rows.Next() {
var t dbType
var attrNo sql.NullInt64
var typeCode string
if err = rows.Scan(&t.Argument, &t.Owner, &t.Subname, &t.Name,
&t.Length, &t.Prec, &t.Scale, &t.Charset, &attrNo, &typeCode,
); err != nil {
return plus, err
}
t.Data = typeCode
if typeCode == "COLLECTION" {
t.Data = "PL/SQL TABLE"
}
if t.Owner == "" && t.Subname != "" {
t.Data = t.Subname
}
if t.Data == "PL/SQL INDEX TABLE" {
t.Data = "PL/SQL TABLE"
}
t.Level = 1
plus = append(plus, t)
}
default:
return nil, fmt.Errorf("%s: %w", typ, errors.New("unknown type"))
}
if rows != nil {
err = rows.Err()
}
if len(plus) == 0 && err == nil {
err = fmt.Errorf("%s/%s.%s.%s: %w", typ, owner, pkg, sub, errors.New("not found"))
}
return plus, err
}
// SUBPROGRAM_ID ARGUMENT_NAME SEQUENCE DATA_LEVEL DATA_TYPE IN_OUT
// P_KARSZAM 1 0 NUMBER IN
// P_TSZAM 2 0 NUMBER IN
// P_OUTPUT 3 0 PL/SQL TABLE OUT
// 4 1 PL/SQL RECORD OUT
// F_SZERZ_AZON 5 2 NUMBER OUT
/*
ARGUMENT_NAME SEQUENCE DATA_LEVEL DATA_TYPE TYPE_OWNER TYPE_NAME TYPE_SUBNAME
P_SZERZ_AZON 1 0 NUMBER
P_OUTPUT 2 0 PL/SQL TABLE ABLAK DB_SPOOLSYS3 TYPE_OUTLIST_078
3 1 PL/SQL RECORD ABLAK DB_SPOOLSYS3 TYPE_OUTPUT_078
TRANZ_KEZDETE 4 2 DATE
TRANZ_VEGE 5 2 DATE
KOLTSEG 6 2 NUMBER
ERTE..TT_ALAPOK 7 2 PL/SQL TABLE ABLAK DB_SPOOLSYS3 ATYPE_OUTLIST_UNIT
8 3 PL/SQL RECORD ABLAK DB_SPOOLSYS3 ATYPE_OUTPUT_UNIT
F_UNIT_RNEV 9 4 VARCHAR2
F_UNIT_NEV 10 4 VARCHAR2
F_ISIN 11 4 VARCHAR2
UNIT_DB 12 4 NUMBER
UNIT_ARF 13 4 NUMBER
VASAROLT_ALAPOK 14 2 PL/SQL TABLE ABLAK DB_SPOOLSYS3 ATYPE_OUTLIST_UNIT
15 3 PL/SQL RECORD ABLAK DB_SPOOLSYS3 ATYPE_OUTPUT_UNIT
F_UNIT_RNEV 16 4 VARCHAR2
F_UNIT_NEV 17 4 VARCHAR2
F_ISIN 18 4 VARCHAR2
UNIT_DB 19 4 NUMBER
UNIT_ARF 20 4 NUMBER
*/
func expandArgs(ctx context.Context, plus []dbType, resolveTypeShort func(ctx context.Context, typ, owner, name, sub string) ([]dbType, error)) ([]dbType, error) {
//logger.Log("expand", plus)
for i := 0; i < len(plus); i++ {
p := plus[i]
if p.Data == "PL/SQL INDEX TABLE" {
p.Data = "PL/SQL TABLE"
}
//logger.Log("i", i, "arg", p.Argument, "data", p.Data, "owner", p.Owner, "name", p.Name, "sub", p.Subname)
if p.Data == "TABLE" || p.Data == "PL/SQL TABLE" || p.Data == "PL/SQL RECORD" || p.Data == "REF CURSOR" {
q, err := resolveTypeShort(ctx, p.Data, p.Owner, p.Name, p.Subname)
if err != nil {
return plus, fmt.Errorf("%+v: %w", p, err)
}
//logger.Log("q", q)
for i, x := range q {
if x.Data == "PL/SQL INDEX TABLE" {
x.Data = "PL/SQL TABLE"
}
x.Level += p.Level
q[i] = x
}
plus = append(plus[:i+1], append(q, plus[i+1:]...)...)
}
}
return plus, nil
}
// vim: set fileencoding=utf-8 noet:
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
pkg/kubectl/cmd/util/factory_test.go
|
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"os/user"
"path"
"sort"
"strings"
"testing"
"time"
"k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apiserver/pkg/util/flag"
manualfake "k8s.io/client-go/rest/fake"
testcore "k8s.io/client-go/testing"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/kubectl/resource"
"k8s.io/kubernetes/pkg/kubectl/validation"
)
func TestNewFactoryDefaultFlagBindings(t *testing.T) {
factory := NewFactory(nil)
if !factory.FlagSet().HasFlags() {
t.Errorf("Expected flags, but didn't get any")
}
}
func TestNewFactoryNoFlagBindings(t *testing.T) {
clientConfig := clientcmd.NewDefaultClientConfig(*clientcmdapi.NewConfig(), &clientcmd.ConfigOverrides{})
factory := NewFactory(clientConfig)
if factory.FlagSet().HasFlags() {
t.Errorf("Expected zero flags, but got %v", factory.FlagSet())
}
}
func TestPortsForObject(t *testing.T) {
f := NewFactory(nil)
pod := &api.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "baz", Namespace: "test", ResourceVersion: "12"},
Spec: api.PodSpec{
Containers: []api.Container{
{
Ports: []api.ContainerPort{
{
ContainerPort: 101,
},
},
},
},
},
}
expected := sets.NewString("101")
ports, err := f.PortsForObject(pod)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
got := sets.NewString(ports...)
if !expected.Equal(got) {
t.Fatalf("Ports mismatch! Expected %v, got %v", expected, got)
}
}
func TestProtocolsForObject(t *testing.T) {
f := NewFactory(nil)
pod := &api.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "baz", Namespace: "test", ResourceVersion: "12"},
Spec: api.PodSpec{
Containers: []api.Container{
{
Ports: []api.ContainerPort{
{
ContainerPort: 101,
Protocol: api.ProtocolTCP,
},
{
ContainerPort: 102,
Protocol: api.ProtocolUDP,
},
},
},
},
},
}
expected := sets.NewString("101/TCP", "102/UDP")
protocolsMap, err := f.ProtocolsForObject(pod)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
protocolsString := kubectl.MakeProtocols(protocolsMap)
protocolsStrings := strings.Split(protocolsString, ",")
got := sets.NewString(protocolsStrings...)
if !expected.Equal(got) {
t.Fatalf("Protocols mismatch! Expected %v, got %v", expected, got)
}
}
func TestLabelsForObject(t *testing.T) {
f := NewFactory(nil)
tests := []struct {
name string
object runtime.Object
expected string
err error
}{
{
name: "successful re-use of labels",
object: &api.Service{
ObjectMeta: metav1.ObjectMeta{Name: "baz", Namespace: "test", Labels: map[string]string{"svc": "test"}},
TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: "v1"},
},
expected: "svc=test",
err: nil,
},
{
name: "empty labels",
object: &api.Service{
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "test", Labels: map[string]string{}},
TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: "v1"},
},
expected: "",
err: nil,
},
{
name: "nil labels",
object: &api.Service{
ObjectMeta: metav1.ObjectMeta{Name: "zen", Namespace: "test", Labels: nil},
TypeMeta: metav1.TypeMeta{Kind: "Service", APIVersion: "v1"},
},
expected: "",
err: nil,
},
}
for _, test := range tests {
gotLabels, err := f.LabelsForObject(test.object)
if err != test.err {
t.Fatalf("%s: Error mismatch: Expected %v, got %v", test.name, test.err, err)
}
got := kubectl.MakeLabels(gotLabels)
if test.expected != got {
t.Fatalf("%s: Labels mismatch! Expected %s, got %s", test.name, test.expected, got)
}
}
}
func TestCanBeExposed(t *testing.T) {
factory := NewFactory(nil)
tests := []struct {
kind schema.GroupKind
expectErr bool
}{
{
kind: api.Kind("ReplicationController"),
expectErr: false,
},
{
kind: api.Kind("Node"),
expectErr: true,
},
}
for _, test := range tests {
err := factory.CanBeExposed(test.kind)
if test.expectErr && err == nil {
t.Error("unexpected non-error")
}
if !test.expectErr && err != nil {
t.Errorf("unexpected error: %v", err)
}
}
}
func TestFlagUnderscoreRenaming(t *testing.T) {
factory := NewFactory(nil)
factory.FlagSet().SetNormalizeFunc(flag.WordSepNormalizeFunc)
factory.FlagSet().Bool("valid_flag", false, "bool value")
// In case of failure of this test check this PR: spf13/pflag#23
if factory.FlagSet().Lookup("valid_flag").Name != "valid-flag" {
t.Fatalf("Expected flag name to be valid-flag, got %s", factory.FlagSet().Lookup("valid_flag").Name)
}
}
func loadSchemaForTest() (validation.Schema, error) {
pathToSwaggerSpec := "../../../../api/swagger-spec/" + api.Registry.GroupOrDie(api.GroupName).GroupVersion.Version + ".json"
data, err := ioutil.ReadFile(pathToSwaggerSpec)
if err != nil {
return nil, err
}
return validation.NewSwaggerSchemaFromBytes(data, nil)
}
func header() http.Header {
header := http.Header{}
header.Set("Content-Type", runtime.ContentTypeJSON)
return header
}
func TestRefetchSchemaWhenValidationFails(t *testing.T) {
schema, err := loadSchemaForTest()
if err != nil {
t.Errorf("Error loading schema: %v", err)
t.FailNow()
}
output, err := json.Marshal(schema)
if err != nil {
t.Errorf("Error serializing schema: %v", err)
t.FailNow()
}
requests := map[string]int{}
c := &manualfake.RESTClient{
NegotiatedSerializer: testapi.Default.NegotiatedSerializer(),
Client: manualfake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
switch p, m := req.URL.Path, req.Method; {
case strings.HasPrefix(p, "/swaggerapi") && m == "GET":
requests[p] = requests[p] + 1
return &http.Response{StatusCode: 200, Header: header(), Body: ioutil.NopCloser(bytes.NewBuffer(output))}, nil
default:
t.Fatalf("unexpected request: %#v\n%#v", req.URL, req)
return nil, nil
}
}),
}
dir, err := ioutil.TempDir("", "schemaCache")
if err != nil {
t.Fatalf("Error getting tempDir: %v", err)
}
defer os.RemoveAll(dir)
fullDir, err := substituteUserHome(dir)
if err != nil {
t.Errorf("Error getting fullDir: %v", err)
t.FailNow()
}
cacheFile := path.Join(fullDir, "foo", "bar", schemaFileName)
err = writeSchemaFile(output, fullDir, cacheFile, "foo", "bar")
if err != nil {
t.Errorf("Error building old cache schema: %v", err)
t.FailNow()
}
obj := &extensions.Deployment{}
data, err := runtime.Encode(testapi.Extensions.Codec(), obj)
if err != nil {
t.Errorf("unexpected error: %v", err)
t.FailNow()
}
// Re-get request, should use HTTP and write
if getSchemaAndValidate(c, data, "foo", "bar", dir, nil); err != nil {
t.Errorf("unexpected error validating: %v", err)
}
if requests["/swaggerapi/foo/bar"] != 1 {
t.Errorf("expected 1 schema request, saw: %d", requests["/swaggerapi/foo/bar"])
}
}
func TestValidateCachesSchema(t *testing.T) {
schema, err := loadSchemaForTest()
if err != nil {
t.Errorf("Error loading schema: %v", err)
t.FailNow()
}
output, err := json.Marshal(schema)
if err != nil {
t.Errorf("Error serializing schema: %v", err)
t.FailNow()
}
requests := map[string]int{}
c := &manualfake.RESTClient{
NegotiatedSerializer: testapi.Default.NegotiatedSerializer(),
Client: manualfake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
switch p, m := req.URL.Path, req.Method; {
case strings.HasPrefix(p, "/swaggerapi") && m == "GET":
requests[p] = requests[p] + 1
return &http.Response{StatusCode: 200, Header: header(), Body: ioutil.NopCloser(bytes.NewBuffer(output))}, nil
default:
t.Fatalf("unexpected request: %#v\n%#v", req.URL, req)
return nil, nil
}
}),
}
dir, err := ioutil.TempDir("", "schemaCache")
if err != nil {
t.Fatalf("Error getting tempDir: %v", err)
}
defer os.RemoveAll(dir)
obj := &api.Pod{}
data, err := runtime.Encode(testapi.Default.Codec(), obj)
if err != nil {
t.Errorf("unexpected error: %v", err)
t.FailNow()
}
// Initial request, should use HTTP and write
if getSchemaAndValidate(c, data, "foo", "bar", dir, nil); err != nil {
t.Errorf("unexpected error validating: %v", err)
}
if _, err := os.Stat(path.Join(dir, "foo", "bar", schemaFileName)); err != nil {
t.Errorf("unexpected missing cache file: %v", err)
}
if requests["/swaggerapi/foo/bar"] != 1 {
t.Errorf("expected 1 schema request, saw: %d", requests["/swaggerapi/foo/bar"])
}
// Same version and group, should skip HTTP
if getSchemaAndValidate(c, data, "foo", "bar", dir, nil); err != nil {
t.Errorf("unexpected error validating: %v", err)
}
if requests["/swaggerapi/foo/bar"] != 2 {
t.Errorf("expected 1 schema request, saw: %d", requests["/swaggerapi/foo/bar"])
}
// Different API group, should go to HTTP and write
if getSchemaAndValidate(c, data, "foo", "baz", dir, nil); err != nil {
t.Errorf("unexpected error validating: %v", err)
}
if _, err := os.Stat(path.Join(dir, "foo", "baz", schemaFileName)); err != nil {
t.Errorf("unexpected missing cache file: %v", err)
}
if requests["/swaggerapi/foo/baz"] != 1 {
t.Errorf("expected 1 schema request, saw: %d", requests["/swaggerapi/foo/baz"])
}
// Different version, should go to HTTP and write
if getSchemaAndValidate(c, data, "foo2", "bar", dir, nil); err != nil {
t.Errorf("unexpected error validating: %v", err)
}
if _, err := os.Stat(path.Join(dir, "foo2", "bar", schemaFileName)); err != nil {
t.Errorf("unexpected missing cache file: %v", err)
}
if requests["/swaggerapi/foo2/bar"] != 1 {
t.Errorf("expected 1 schema request, saw: %d", requests["/swaggerapi/foo2/bar"])
}
// No cache dir, should go straight to HTTP and not write
if getSchemaAndValidate(c, data, "foo", "blah", "", nil); err != nil {
t.Errorf("unexpected error validating: %v", err)
}
if requests["/swaggerapi/foo/blah"] != 1 {
t.Errorf("expected 1 schema request, saw: %d", requests["/swaggerapi/foo/blah"])
}
if _, err := os.Stat(path.Join(dir, "foo", "blah", schemaFileName)); err == nil || !os.IsNotExist(err) {
t.Errorf("unexpected cache file error: %v", err)
}
}
func TestSubstitueUser(t *testing.T) {
usr, err := user.Current()
if err != nil {
t.Logf("SKIPPING TEST: unexpected error: %v", err)
return
}
tests := []struct {
input string
expected string
expectErr bool
}{
{input: "~/foo", expected: path.Join(os.Getenv("HOME"), "foo")},
{input: "~" + usr.Username + "/bar", expected: usr.HomeDir + "/bar"},
{input: "/foo/bar", expected: "/foo/bar"},
{input: "~doesntexit/bar", expectErr: true},
}
for _, test := range tests {
output, err := substituteUserHome(test.input)
if test.expectErr {
if err == nil {
t.Error("unexpected non-error")
}
continue
}
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if output != test.expected {
t.Errorf("expected: %s, saw: %s", test.expected, output)
}
}
}
func newPodList(count, isUnready, isUnhealthy int, labels map[string]string) *api.PodList {
pods := []api.Pod{}
for i := 0; i < count; i++ {
newPod := api.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("pod-%d", i+1),
Namespace: metav1.NamespaceDefault,
CreationTimestamp: metav1.Date(2016, time.April, 1, 1, 0, i, 0, time.UTC),
Labels: labels,
},
Status: api.PodStatus{
Conditions: []api.PodCondition{
{
Status: api.ConditionTrue,
Type: api.PodReady,
},
},
},
}
pods = append(pods, newPod)
}
if isUnready > -1 && isUnready < count {
pods[isUnready].Status.Conditions[0].Status = api.ConditionFalse
}
if isUnhealthy > -1 && isUnhealthy < count {
pods[isUnhealthy].Status.ContainerStatuses = []api.ContainerStatus{{RestartCount: 5}}
}
return &api.PodList{
Items: pods,
}
}
func TestGetFirstPod(t *testing.T) {
labelSet := map[string]string{"test": "selector"}
tests := []struct {
name string
podList *api.PodList
watching []watch.Event
sortBy func([]*v1.Pod) sort.Interface
expected *api.Pod
expectedNum int
expectedErr bool
}{
{
name: "kubectl logs - two ready pods",
podList: newPodList(2, -1, -1, labelSet),
sortBy: func(pods []*v1.Pod) sort.Interface { return controller.ByLogging(pods) },
expected: &api.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-1",
Namespace: metav1.NamespaceDefault,
CreationTimestamp: metav1.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC),
Labels: map[string]string{"test": "selector"},
},
Status: api.PodStatus{
Conditions: []api.PodCondition{
{
Status: api.ConditionTrue,
Type: api.PodReady,
},
},
},
},
expectedNum: 2,
},
{
name: "kubectl logs - one unhealthy, one healthy",
podList: newPodList(2, -1, 1, labelSet),
sortBy: func(pods []*v1.Pod) sort.Interface { return controller.ByLogging(pods) },
expected: &api.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-2",
Namespace: metav1.NamespaceDefault,
CreationTimestamp: metav1.Date(2016, time.April, 1, 1, 0, 1, 0, time.UTC),
Labels: map[string]string{"test": "selector"},
},
Status: api.PodStatus{
Conditions: []api.PodCondition{
{
Status: api.ConditionTrue,
Type: api.PodReady,
},
},
ContainerStatuses: []api.ContainerStatus{{RestartCount: 5}},
},
},
expectedNum: 2,
},
{
name: "kubectl attach - two ready pods",
podList: newPodList(2, -1, -1, labelSet),
sortBy: func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) },
expected: &api.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-1",
Namespace: metav1.NamespaceDefault,
CreationTimestamp: metav1.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC),
Labels: map[string]string{"test": "selector"},
},
Status: api.PodStatus{
Conditions: []api.PodCondition{
{
Status: api.ConditionTrue,
Type: api.PodReady,
},
},
},
},
expectedNum: 2,
},
{
name: "kubectl attach - wait for ready pod",
podList: newPodList(1, 1, -1, labelSet),
watching: []watch.Event{
{
Type: watch.Modified,
Object: &api.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-1",
Namespace: metav1.NamespaceDefault,
CreationTimestamp: metav1.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC),
Labels: map[string]string{"test": "selector"},
},
Status: api.PodStatus{
Conditions: []api.PodCondition{
{
Status: api.ConditionTrue,
Type: api.PodReady,
},
},
},
},
},
},
sortBy: func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) },
expected: &api.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-1",
Namespace: metav1.NamespaceDefault,
CreationTimestamp: metav1.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC),
Labels: map[string]string{"test": "selector"},
},
Status: api.PodStatus{
Conditions: []api.PodCondition{
{
Status: api.ConditionTrue,
Type: api.PodReady,
},
},
},
},
expectedNum: 1,
},
}
for i := range tests {
test := tests[i]
fake := fake.NewSimpleClientset(test.podList)
if len(test.watching) > 0 {
watcher := watch.NewFake()
for _, event := range test.watching {
switch event.Type {
case watch.Added:
go watcher.Add(event.Object)
case watch.Modified:
go watcher.Modify(event.Object)
}
}
fake.PrependWatchReactor("pods", testcore.DefaultWatchReactor(watcher, nil))
}
selector := labels.Set(labelSet).AsSelector()
pod, numPods, err := GetFirstPod(fake.Core(), metav1.NamespaceDefault, selector, 1*time.Minute, test.sortBy)
pod.Spec.SecurityContext = nil
if !test.expectedErr && err != nil {
t.Errorf("%s: unexpected error: %v", test.name, err)
continue
}
if test.expectedErr && err == nil {
t.Errorf("%s: expected an error", test.name)
continue
}
if test.expectedNum != numPods {
t.Errorf("%s: expected %d pods, got %d", test.name, test.expectedNum, numPods)
continue
}
if !apiequality.Semantic.DeepEqual(test.expected, pod) {
t.Errorf("%s:\nexpected pod:\n%#v\ngot:\n%#v\n\n", test.name, test.expected, pod)
}
}
}
func TestPrintObjectSpecificMessage(t *testing.T) {
f := NewFactory(nil)
tests := []struct {
obj runtime.Object
expectOutput bool
}{
{
obj: &api.Service{},
expectOutput: false,
},
{
obj: &api.Pod{},
expectOutput: false,
},
{
obj: &api.Service{Spec: api.ServiceSpec{Type: api.ServiceTypeLoadBalancer}},
expectOutput: false,
},
{
obj: &api.Service{Spec: api.ServiceSpec{Type: api.ServiceTypeNodePort}},
expectOutput: true,
},
}
for _, test := range tests {
buff := &bytes.Buffer{}
f.PrintObjectSpecificMessage(test.obj, buff)
if test.expectOutput && buff.Len() == 0 {
t.Errorf("Expected output, saw none for %v", test.obj)
}
if !test.expectOutput && buff.Len() > 0 {
t.Errorf("Expected no output, saw %s for %v", buff.String(), test.obj)
}
}
}
func TestMakePortsString(t *testing.T) {
tests := []struct {
ports []api.ServicePort
useNodePort bool
expectedOutput string
}{
{ports: nil, expectedOutput: ""},
{ports: []api.ServicePort{}, expectedOutput: ""},
{ports: []api.ServicePort{
{
Port: 80,
Protocol: "TCP",
},
},
expectedOutput: "tcp:80",
},
{ports: []api.ServicePort{
{
Port: 80,
Protocol: "TCP",
},
{
Port: 8080,
Protocol: "UDP",
},
{
Port: 9000,
Protocol: "TCP",
},
},
expectedOutput: "tcp:80,udp:8080,tcp:9000",
},
{ports: []api.ServicePort{
{
Port: 80,
NodePort: 9090,
Protocol: "TCP",
},
{
Port: 8080,
NodePort: 80,
Protocol: "UDP",
},
},
useNodePort: true,
expectedOutput: "tcp:9090,udp:80",
},
}
for _, test := range tests {
output := makePortsString(test.ports, test.useNodePort)
if output != test.expectedOutput {
t.Errorf("expected: %s, saw: %s.", test.expectedOutput, output)
}
}
}
func fakeClient() resource.ClientMapper {
return resource.ClientMapperFunc(func(*meta.RESTMapping) (resource.RESTClient, error) {
return &manualfake.RESTClient{}, nil
})
}
func TestDiscoveryReplaceAliases(t *testing.T) {
tests := []struct {
name string
arg string
expected string
}{
{
name: "no-replacement",
arg: "service",
expected: "service",
},
{
name: "all-replacement",
arg: "all",
expected: "pods,replicationcontrollers,services,statefulsets.apps,horizontalpodautoscalers.autoscaling,jobs.batch,cronjobs.batch,daemonsets.extensions,deployments.extensions,replicasets.extensions",
},
{
name: "alias-in-comma-separated-arg",
arg: "all,secrets",
expected: "pods,replicationcontrollers,services,statefulsets.apps,horizontalpodautoscalers.autoscaling,jobs.batch,cronjobs.batch,daemonsets.extensions,deployments.extensions,replicasets.extensions,secrets",
},
}
ds := &fakeDiscoveryClient{}
mapper, err := NewShortcutExpander(testapi.Default.RESTMapper(), ds)
if err != nil {
t.Fatalf("Unable to create shortcut expander, err = %s", err.Error())
}
b := resource.NewBuilder(mapper, resource.LegacyCategoryExpander, api.Scheme, fakeClient(), testapi.Default.Codec())
for _, test := range tests {
replaced := b.ReplaceAliases(test.arg)
if replaced != test.expected {
t.Errorf("%s: unexpected argument: expected %s, got %s", test.name, test.expected, replaced)
}
}
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
src/main/java/io/videooftheday/ProductionPropertyService.java
|
package io.videooftheday;
import io.videooftheday.properties.EthereumProperty;
import io.videooftheday.properties.IPFSProperty;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.io.InputStream;
import java.util.Properties;
public class ProductionPropertyService implements Property {
private static final Logger logger = LoggerFactory.getLogger(ProductionPropertyService.class);
private final Properties props;
public ProductionPropertyService() {
props = new Properties();
}
public InputStream getFileLocation() {
String stagingPropertyFile = System.getenv().getOrDefault("staging", "");
if (stagingPropertyFile != null && !stagingPropertyFile.isEmpty() && !stagingPropertyFile.equals("")) {
logger.info("Found staging property file in this environment-->" + stagingPropertyFile);
return VideoOfTheDayLauncher.class.getClassLoader().getResourceAsStream(stagingPropertyFile);
} else {
String productionPropertyFile = System.getenv().getOrDefault("production", "");
if (productionPropertyFile != null && !productionPropertyFile.isEmpty() && !productionPropertyFile.equals("")) {
return VideoOfTheDayLauncher.class.getClassLoader().getResourceAsStream(productionPropertyFile);
}
String localPropertyFile = System.getenv().getOrDefault("local", "");
if (localPropertyFile != null && !localPropertyFile.isEmpty() && !localPropertyFile.equals("")) {
return VideoOfTheDayLauncher.class.getClassLoader().getResourceAsStream(localPropertyFile);
}
return ProductionPropertyService.class.getResourceAsStream("no property file found");
}
}
@Override
public EthereumProperty getEthereumProperties() {
try {
props.load(getFileLocation());
EthereumProperty ethereumProperty = new EthereumProperty();
ethereumProperty.blockChainName = props.getProperty("blockchain.Name");
ethereumProperty.node = props.getProperty("blockchain.Node");
ethereumProperty.port = props.getProperty("blockchain.Port");
ethereumProperty.privateKey = props.getProperty("blockchain.PrivateKey");
return ethereumProperty;
} catch (IOException ignored) {
throw new Property.PropertyFileNotFound();
}
}
@Override
public IPFSProperty getIPFSProperty() {
try {
props.load(getFileLocation());
IPFSProperty ipfsProperty= new IPFSProperty();
ipfsProperty.gateway = props.getProperty("ipfs.Gateway");
ipfsProperty.port = props.getProperty("ipfs.Port");
return ipfsProperty;
} catch (IOException ignored) {
throw new Property.PropertyFileNotFound();
}
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
jobs/migrations/0002_auto_20210119_1903.py
|
# Generated by Django 3.1.5 on 2021-01-19 19:03
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('jobs', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='job',
name='flagged',
field=models.ManyToManyField(blank=True, to=settings.AUTH_USER_MODEL, verbose_name='Flagged'),
),
migrations.AlterField(
model_name='location',
name='flagged',
field=models.ManyToManyField(blank=True, to=settings.AUTH_USER_MODEL, verbose_name='Flagged'),
),
]
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
query_test.go
|
package flumewater
import (
"log"
"os"
"testing"
"github.com/joho/godotenv"
"github.com/stretchr/testify/assert"
)
func TestFlumeQueryDevice(t *testing.T) {
err := godotenv.Load(".env")
if err != nil {
log.Fatalf("Error loading .env file")
}
client := NewClient(os.Getenv("FLUME_CLIENT_ID"), os.Getenv("FLUME_CLIENT_SECRET"), os.Getenv("FLUME_USERNAME"), os.Getenv("FLUME_PASSWORD"))
devices, _ := client.FetchUserDevices(FlumeWaterFetchDeviceRequest{})
query := FlumeWaterQuery{
Bucket: FlumeWaterBucketDay,
SinceDatetime: "2021-03-12 00:00:00",
RequestID: "test",
}
results, err := client.QueryUserDevice(devices[0].ID, FlumeWaterQueryRequest{
Queries: []FlumeWaterQuery{query},
})
assert.Nil(t, err)
for key := range results[0] {
assert.Equal(t, "test", key)
}
}
|
[
"\"FLUME_CLIENT_ID\"",
"\"FLUME_CLIENT_SECRET\"",
"\"FLUME_USERNAME\"",
"\"FLUME_PASSWORD\""
] |
[] |
[
"FLUME_CLIENT_ID",
"FLUME_PASSWORD",
"FLUME_USERNAME",
"FLUME_CLIENT_SECRET"
] |
[]
|
["FLUME_CLIENT_ID", "FLUME_PASSWORD", "FLUME_USERNAME", "FLUME_CLIENT_SECRET"]
|
go
| 4 | 0 | |
codes/kmc/batchJobs/rateCaculation/mainStuff/concPostProc.py
|
import subprocess
import sys
import os
import math
# This code is meant to manage running multiple instances of my KMCLib codes at the same time,
# in the name of time efficiency
resultDir = os.environ.get('RESULTS')
if resultDir == None :
print ("WARNING! $RESULTS not set! Attempt to write results will fail!\n")
numConcs = 24
numLambda = 6
numStepsEquilib = 160000000
numStepsAnal = 1600000
numStepsSnapshot = 1000
numStepsReq = 1600000
sysSize = 124
analInterval = 1
numPasses = 100
timeInterval = 100.0
dataLocation = "batchJobs/concRuns/attempt2/"
lambdaMin = 0.1
lambdaMax = 0.4
rateStepSize = (lambdaMax-lambdaMin)/float(numLambda-1)
concMax = 0.97
concMin = 0.03
concStepSize = (concMax-concMin)/float(numConcs-1)
jobIndex = 1
runningJobs = []
failedRuns = []
for rateIndex in range(0, numLambda):
currentRate = lambdaMin + rateStepSize*rateIndex
rateData = []
for botConcIndex in range(0, numConcs):
botConc = concMin + concStepSize*botConcIndex
for topConcIndex in range(0, numConcs):
topConc = concMin + concStepSize*topConcIndex
currentLoc = resultDir+"/"+dataLocation+str(rateIndex)+"/"+str(botConcIndex)+"/"+str(topConcIndex)
inTopVals = []
outTopVals = []
inBotVals = []
outBotVals = []
failed = False
try:
with open(currentLoc+"/inTop.dat", 'r') as f:
lines = f.readlines()
if len(lines) != numPasses:
failed = True
print("wrongLength")
for line in lines:
inTopVals.append(float(line))
except IOError:
failed = True
print("ioError")
try:
with open(currentLoc+"/outTop.dat", 'r') as f:
lines = f.readlines()
if len(lines) != numPasses:
failed = True
for line in lines:
outTopVals.append(float(line))
except IOError:
failed = True
try:
with open(currentLoc+"/inBot.dat", 'r') as f:
lines = f.readlines()
if len(lines) != numPasses:
failed = True
for line in lines:
inBotVals.append(float(line))
except IOError:
failed = True
try:
with open(currentLoc+"/outBot.dat", 'r') as f:
lines = f.readlines()
if len(lines) != numPasses:
failed = True
for line in lines:
outBotVals.append(float(line))
except IOError:
failed = True
totWeight = 0.0
meanNum = 0.0
sqrDev = 0.0
try:
with open(currentLoc+"/ovNumHist.dat", 'r') as f:
lines = f.readlines()
if len(lines) != sysSize:
failed = True
weights = []
for line in lines:
words = line.split()
val = float(words[1])
weights.append(val)
totWeight += val
if totWeight != 0.0:
for index in range(0, len(weights)):
weights[index] = weights[index]/totWeight
meanNum += index*weights[index]
for index in range(0, len(weights)):
sqrDev += weights[index]*(index - meanNum)*(index - meanNum)
errNum = math.sqrt(sqrDev/float(numPasses))
except (IOError, LookupError):
failed = True
if failed == False:
total = 0.0
flows = []
for index in range(0, numPasses):
flows.append(0.5*((inBotVals[index]-outBotVals[index]) + (outTopVals[index] - inTopVals[index])))
total += flows[-1]
flowMean = total/float(numPasses)
squaredDev = 0.0
for index in range(0, numPasses):
squaredDev += (flows[index]-flowMean)*(flows[index]-flowMean)
stdErr = math.sqrt(squaredDev)/float(numPasses)
rateData.append([botConc, topConc, flowMean, stdErr, meanNum, errNum])
else:
failedRuns.append("concFlow.py "+str(botConc)+" "+str(topConc)+" "+str(currentRate)+" "+str(sysSize)+" "+str(analInterval)+" "+str(numStepsEquilib)+" "+str(numStepsSnapshot)+" "+str(numStepsAnal)+" "+str(numStepsReq)+" "+str(numPasses)+" "+str(timeInterval)+" "+dataLocation+str(rateIndex)+"/"+str(botConcIndex)+"/"+str(topConcIndex)+"\n")
with open(resultDir+"/"+dataLocation+str(rateIndex)+"/rateMeans.dat", 'w') as f:
for index in rateData:
f.write(str(index[0])+" "+str(index[1])+" "+str(index[2])+"\n")
with open(resultDir+"/"+dataLocation+str(rateIndex)+"/rateErrs.dat", 'w') as f:
for index in rateData:
if index[2] != 0.0:
f.write(str(index[0])+" "+str(index[1])+" "+str(100.0*index[3]/abs(index[2]))+"\n")
else:
f.write(str(index[0])+" "+str(index[1])+" "+str(-1.0)+"\n")
with open(resultDir+"/"+dataLocation+str(rateIndex)+"/densMeans.dat", 'w') as f:
for index in rateData:
f.write(str(index[0])+" "+str(index[1])+" "+str(index[4]/float(sysSize))+"\n")
with open(resultDir+"/"+dataLocation+str(rateIndex)+"/densErrs.dat", 'w') as f:
for index in rateData:
if index[2] != 0.0:
f.write(str(index[0])+" "+str(index[1])+" "+str(100.0*index[5]/abs(index[4]))+"\n")
else:
f.write(str(index[0])+" "+str(index[1])+" "+str(-1.0)+"\n")
with open(resultDir+"/"+dataLocation+str(rateIndex)+"/altPlot.dat", 'w') as f:
for index in rateData:
f.write(str(index[0]-index[1])+" "+str(index[4]/float(sysSize))+" "+str(index[2])+"\n")
with open(resultDir+"/"+dataLocation+"failedRuns.proc", 'w') as f:
for index in failedRuns:
f.write(index)
with open("failedRuns/testInput."+str(jobIndex), 'w') as g:
g.write(index)
jobIndex += 1
|
[] |
[] |
[
"RESULTS"
] |
[]
|
["RESULTS"]
|
python
| 1 | 0 | |
integration/integration_test.go
|
/*
Copyright 2016-2018 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package integration
import (
"bytes"
"context"
"crypto/x509"
"fmt"
"io"
"io/ioutil"
"net"
"net/http/httptest"
"net/url"
"os"
"os/exec"
"os/user"
"path/filepath"
"regexp"
"strconv"
"strings"
"testing"
"time"
"golang.org/x/crypto/ssh"
"github.com/gravitational/teleport"
"github.com/gravitational/teleport/lib"
"github.com/gravitational/teleport/lib/auth"
"github.com/gravitational/teleport/lib/auth/testauthority"
"github.com/gravitational/teleport/lib/backend"
"github.com/gravitational/teleport/lib/client"
"github.com/gravitational/teleport/lib/defaults"
"github.com/gravitational/teleport/lib/events"
"github.com/gravitational/teleport/lib/pam"
"github.com/gravitational/teleport/lib/service"
"github.com/gravitational/teleport/lib/services"
"github.com/gravitational/teleport/lib/session"
"github.com/gravitational/teleport/lib/utils"
"github.com/gravitational/trace"
log "github.com/sirupsen/logrus"
"gopkg.in/check.v1"
)
const (
Loopback = "127.0.0.1"
Host = "localhost"
HostID = "00000000-0000-0000-0000-000000000000"
Site = "local-site"
AllocatePortsNum = 300
)
type IntSuite struct {
ports utils.PortList
me *user.User
// priv/pub pair to avoid re-generating it
priv []byte
pub []byte
}
// bootstrap check
func TestIntegrations(t *testing.T) { check.TestingT(t) }
var _ = check.Suite(&IntSuite{})
func (s *IntSuite) TearDownSuite(c *check.C) {
var err error
// restore os.Stdin to its original condition: connected to /dev/null
os.Stdin.Close()
os.Stdin, err = os.Open("/dev/null")
c.Assert(err, check.IsNil)
}
func (s *IntSuite) SetUpTest(c *check.C) {
os.RemoveAll(client.FullProfilePath(""))
}
func (s *IntSuite) SetUpSuite(c *check.C) {
var err error
utils.InitLoggerForTests()
SetTestTimeouts(time.Millisecond * time.Duration(100))
s.priv, s.pub, err = testauthority.New().GenerateKeyPair("")
c.Assert(err, check.IsNil)
// find 10 free litening ports to use
s.ports, err = utils.GetFreeTCPPorts(AllocatePortsNum)
if err != nil {
c.Fatal(err)
}
s.me, _ = user.Current()
// close & re-open stdin because 'go test' runs with os.stdin connected to /dev/null
stdin, err := os.Open("/dev/tty")
if err != nil {
os.Stdin.Close()
os.Stdin = stdin
}
}
// newTeleport helper returns a created but not started Teleport instance pre-configured
// with the current user os.user.Current().
func (s *IntSuite) newUnstartedTeleport(c *check.C, logins []string, enableSSH bool) *TeleInstance {
t := NewInstance(InstanceConfig{ClusterName: Site, HostID: HostID, NodeName: Host, Ports: s.getPorts(5), Priv: s.priv, Pub: s.pub})
// use passed logins, but use suite's default login if nothing was passed
if logins == nil || len(logins) == 0 {
logins = []string{s.me.Username}
}
for _, login := range logins {
t.AddUser(login, []string{login})
}
if err := t.Create(nil, enableSSH, nil); err != nil {
c.Fatalf("Unexpected response from Create: %v", err)
}
return t
}
// newTeleport helper returns a running Teleport instance pre-configured
// with the current user os.user.Current().
func (s *IntSuite) newTeleport(c *check.C, logins []string, enableSSH bool) *TeleInstance {
t := s.newUnstartedTeleport(c, logins, enableSSH)
if err := t.Start(); err != nil {
c.Fatalf("Unexpected response from Start: %v", err)
}
return t
}
// newTeleportWithConfig is a helper function that will create a running
// Teleport instance with the passed in user, instance secrets, and Teleport
// configuration.
func (s *IntSuite) newTeleportWithConfig(c *check.C, logins []string, instanceSecrets []*InstanceSecrets, teleportConfig *service.Config) *TeleInstance {
t := NewInstance(InstanceConfig{ClusterName: Site, HostID: HostID, NodeName: Host, Ports: s.getPorts(5), Priv: s.priv, Pub: s.pub})
// use passed logins, but use suite's default login if nothing was passed
if logins == nil || len(logins) == 0 {
logins = []string{s.me.Username}
}
for _, login := range logins {
t.AddUser(login, []string{login})
}
// create a new teleport instance with passed in configuration
if err := t.CreateEx(instanceSecrets, teleportConfig); err != nil {
c.Fatalf("Unexpected response from CreateEx: %v", trace.DebugReport(err))
}
if err := t.Start(); err != nil {
c.Fatalf("Unexpected response from Start: %v", trace.DebugReport(err))
}
return t
}
// TestAuditOn creates a live session, records a bunch of data through it
// and then reads it back and compares against simulated reality.
func (s *IntSuite) TestAuditOn(c *check.C) {
var tests = []struct {
inRecordLocation string
inForwardAgent bool
auditSessionsURI string
}{
// normal teleport
{
inRecordLocation: services.RecordAtNode,
inForwardAgent: false,
},
// recording proxy
{
inRecordLocation: services.RecordAtProxy,
inForwardAgent: true,
},
// normal teleport with upload to file server
{
inRecordLocation: services.RecordAtNode,
inForwardAgent: false,
auditSessionsURI: c.MkDir(),
},
{
inRecordLocation: services.RecordAtProxy,
inForwardAgent: false,
auditSessionsURI: c.MkDir(),
},
}
for _, tt := range tests {
makeConfig := func() (*check.C, []string, []*InstanceSecrets, *service.Config) {
clusterConfig, err := services.NewClusterConfig(services.ClusterConfigSpecV3{
SessionRecording: tt.inRecordLocation,
Audit: services.AuditConfig{AuditSessionsURI: tt.auditSessionsURI},
})
c.Assert(err, check.IsNil)
tconf := service.MakeDefaultConfig()
tconf.Auth.Enabled = true
tconf.Auth.ClusterConfig = clusterConfig
tconf.Proxy.Enabled = true
tconf.Proxy.DisableWebService = true
tconf.Proxy.DisableWebInterface = true
tconf.SSH.Enabled = true
return c, nil, nil, tconf
}
t := s.newTeleportWithConfig(makeConfig())
defer t.Stop(true)
// Start a node.
nodeSSHPort := s.getPorts(1)[0]
nodeConfig := func() *service.Config {
tconf := service.MakeDefaultConfig()
tconf.HostUUID = "node"
tconf.Hostname = "node"
tconf.SSH.Enabled = true
tconf.SSH.Addr.Addr = net.JoinHostPort(t.Hostname, fmt.Sprintf("%v", nodeSSHPort))
return tconf
}
nodeProcess, err := t.StartNode(nodeConfig())
c.Assert(err, check.IsNil)
// get access to a authClient for the cluster
site := t.GetSiteAPI(Site)
c.Assert(site, check.NotNil)
// wait 10 seconds for both nodes to show up, otherwise
// we'll have trouble connecting to the node below.
waitForNodes := func(site auth.ClientI, count int) error {
tickCh := time.Tick(500 * time.Millisecond)
stopCh := time.After(10 * time.Second)
for {
select {
case <-tickCh:
nodesInSite, err := site.GetNodes(defaults.Namespace, services.SkipValidation())
if err != nil && !trace.IsNotFound(err) {
return trace.Wrap(err)
}
if got, want := len(nodesInSite), count; got == want {
return nil
}
case <-stopCh:
return trace.BadParameter("waited 10s, did find %v nodes", count)
}
}
}
err = waitForNodes(site, 2)
c.Assert(err, check.IsNil)
// should have no sessions:
sessions, err := site.GetSessions(defaults.Namespace)
c.Assert(err, check.IsNil)
c.Assert(len(sessions), check.Equals, 0)
// create interactive session (this goroutine is this user's terminal time)
endC := make(chan error, 0)
myTerm := NewTerminal(250)
go func() {
cl, err := t.NewClient(ClientConfig{
Login: s.me.Username,
Cluster: Site,
Host: Host,
Port: nodeSSHPort,
ForwardAgent: tt.inForwardAgent,
})
c.Assert(err, check.IsNil)
cl.Stdout = &myTerm
cl.Stdin = &myTerm
err = cl.SSH(context.TODO(), []string{}, false)
endC <- err
}()
// wait until we've found the session in the audit log
getSession := func(site auth.ClientI) (*session.Session, error) {
tickCh := time.Tick(500 * time.Millisecond)
stopCh := time.After(10 * time.Second)
for {
select {
case <-tickCh:
sessions, err = site.GetSessions(defaults.Namespace)
if err != nil {
return nil, trace.Wrap(err)
}
if len(sessions) != 1 {
continue
}
return &sessions[0], nil
case <-stopCh:
return nil, trace.BadParameter("unable to find sessions after 10s (mode=%v)", tt.inRecordLocation)
}
}
}
session, err := getSession(site)
c.Assert(err, check.IsNil)
// wait for the user to join this session:
for len(session.Parties) == 0 {
time.Sleep(time.Millisecond * 5)
session, err = site.GetSession(defaults.Namespace, sessions[0].ID)
c.Assert(err, check.IsNil)
}
// make sure it's us who joined! :)
c.Assert(session.Parties[0].User, check.Equals, s.me.Username)
// lets type "echo hi" followed by "enter" and then "exit" + "enter":
myTerm.Type("\aecho hi\n\r\aexit\n\r\a")
// wait for session to end:
select {
case <-endC:
case <-time.After(10 * time.Second):
c.Fatalf("Timeout waiting for session to finish")
}
// wait for the upload of the right session to complete
timeoutC := time.After(10 * time.Second)
loop:
for {
select {
case event := <-t.UploadEventsC:
if event.SessionID != string(session.ID) {
log.Debugf("Skipping mismatching session %v, expecting upload of %v.", event.SessionID, session.ID)
continue
}
break loop
case <-timeoutC:
c.Fatalf("Timeout waiting for upload of session %v to complete to %v", session.ID, tt.auditSessionsURI)
}
}
// read back the entire session (we have to try several times until we get back
// everything because the session is closing)
var sessionStream []byte
for i := 0; i < 6; i++ {
sessionStream, err = site.GetSessionChunk(defaults.Namespace, session.ID, 0, events.MaxChunkBytes)
c.Assert(err, check.IsNil)
if strings.Contains(string(sessionStream), "exit") {
break
}
time.Sleep(time.Millisecond * 250)
if i >= 5 {
// session stream keeps coming back short
c.Fatalf("Stream is not getting data: %q.", string(sessionStream))
}
}
// see what we got. It looks different based on bash settings, but here it is
// on Ev's machine (hostname is 'edsger'):
//
// edsger ~: echo hi
// hi
// edsger ~: exit
// logout
//
comment := check.Commentf("%q", string(sessionStream))
c.Assert(strings.Contains(string(sessionStream), "echo hi"), check.Equals, true, comment)
c.Assert(strings.Contains(string(sessionStream), "exit"), check.Equals, true, comment)
// Wait until session.start, session.leave, and session.end events have arrived.
getSessions := func(site auth.ClientI) ([]events.EventFields, error) {
tickCh := time.Tick(500 * time.Millisecond)
stopCh := time.After(10 * time.Second)
for {
select {
case <-tickCh:
// Get all session events from the backend.
sessionEvents, err := site.GetSessionEvents(defaults.Namespace, session.ID, 0, false)
if err != nil {
return nil, trace.Wrap(err)
}
// Look through all session events for the three wanted.
var hasStart bool
var hasEnd bool
var hasLeave bool
for _, se := range sessionEvents {
if se.GetType() == events.SessionStartEvent {
hasStart = true
}
if se.GetType() == events.SessionEndEvent {
hasEnd = true
}
if se.GetType() == events.SessionLeaveEvent {
hasLeave = true
}
}
// Make sure all three events were found.
if hasStart && hasEnd && hasLeave {
return sessionEvents, nil
}
case <-stopCh:
return nil, trace.BadParameter("unable to find all session events after 10s (mode=%v)", tt.inRecordLocation)
}
}
}
history, err := getSessions(site)
c.Assert(err, check.IsNil)
getChunk := func(e events.EventFields, maxlen int) string {
offset := e.GetInt("offset")
length := e.GetInt("bytes")
if length == 0 {
return ""
}
if length > maxlen {
length = maxlen
}
return string(sessionStream[offset : offset+length])
}
findByType := func(et string) events.EventFields {
for _, e := range history {
if e.GetType() == et {
return e
}
}
return nil
}
// there should alwys be 'session.start' event (and it must be first)
first := history[0]
start := findByType(events.SessionStartEvent)
c.Assert(start, check.DeepEquals, first)
c.Assert(start.GetInt("bytes"), check.Equals, 0)
c.Assert(start.GetString(events.SessionEventID) != "", check.Equals, true)
c.Assert(start.GetString(events.TerminalSize) != "", check.Equals, true)
// if session are being recorded at nodes, then the event server_id field
// should contain the ID of the node. if sessions are being recorded at the
// proxy, then server_id is random so we can't check it, but it should not
// the server_id of any of the nodes we know about.
switch tt.inRecordLocation {
case services.RecordAtNode:
c.Assert(start.GetString(events.SessionServerID), check.Equals, nodeProcess.Config.HostUUID)
case services.RecordAtProxy:
c.Assert(start.GetString(events.SessionServerID), check.Not(check.Equals), nodeProcess.Config.HostUUID)
c.Assert(start.GetString(events.SessionServerID), check.Not(check.Equals), t.Process.Config.HostUUID)
}
// make sure data is recorded properly
out := &bytes.Buffer{}
for _, e := range history {
out.WriteString(getChunk(e, 1000))
}
recorded := replaceNewlines(out.String())
c.Assert(recorded, check.Matches, ".*exit.*")
c.Assert(recorded, check.Matches, ".*echo hi.*")
// there should alwys be 'session.end' event
end := findByType(events.SessionEndEvent)
c.Assert(end, check.NotNil)
c.Assert(end.GetInt("bytes"), check.Equals, 0)
c.Assert(end.GetString(events.SessionEventID) != "", check.Equals, true)
// there should alwys be 'session.leave' event
leave := findByType(events.SessionLeaveEvent)
c.Assert(leave, check.NotNil)
c.Assert(leave.GetInt("bytes"), check.Equals, 0)
c.Assert(leave.GetString(events.SessionEventID) != "", check.Equals, true)
// all of them should have a proper time:
for _, e := range history {
c.Assert(e.GetTime("time").IsZero(), check.Equals, false)
}
}
}
func replaceNewlines(in string) string {
return regexp.MustCompile(`\r?\n`).ReplaceAllString(in, `\n`)
}
// TestInteroperability checks if Teleport and OpenSSH behave in the same way
// when executing commands.
func (s *IntSuite) TestInteroperability(c *check.C) {
tempdir, err := ioutil.TempDir("", "teleport-")
c.Assert(err, check.IsNil)
defer os.RemoveAll(tempdir)
tempfile := filepath.Join(tempdir, "file.txt")
// create new teleport server that will be used by all tests
t := s.newTeleport(c, nil, true)
defer t.Stop(true)
var tests = []struct {
inCommand string
inStdin string
outContains string
outFile bool
}{
// 0 - echo "1\n2\n" | ssh localhost "cat -"
// this command can be used to copy files by piping stdout to stdin over ssh.
{
inCommand: "cat -",
inStdin: "1\n2\n",
outContains: "1\n2\n",
outFile: false,
},
// 1 - ssh -tt locahost '/bin/sh -c "mkdir -p /tmp && echo a > /tmp/file.txt"'
// programs like ansible execute commands like this
{
inCommand: fmt.Sprintf(`/bin/sh -c "mkdir -p /tmp && echo a > %v"`, tempfile),
inStdin: "",
outContains: "a",
outFile: true,
},
// 2 - ssh localhost tty
// should print "not a tty"
{
inCommand: "tty",
inStdin: "",
outContains: "not a tty",
outFile: false,
},
}
for i, tt := range tests {
// create new teleport client
cl, err := t.NewClient(ClientConfig{Login: s.me.Username, Cluster: Site, Host: Host, Port: t.GetPortSSHInt()})
c.Assert(err, check.IsNil)
// hook up stdin and stdout to a buffer for reading and writing
inbuf := bytes.NewReader([]byte(tt.inStdin))
outbuf := utils.NewSyncBuffer()
cl.Stdin = inbuf
cl.Stdout = outbuf
cl.Stderr = outbuf
// run command and wait a maximum of 10 seconds for it to complete
sessionEndC := make(chan interface{}, 0)
go func() {
// don't check for err, because sometimes this process should fail
// with an error and that's what the test is checking for.
cl.SSH(context.TODO(), []string{tt.inCommand}, false)
sessionEndC <- true
}()
waitFor(sessionEndC, time.Second*10)
// if we are looking for the output in a file, look in the file
// otherwise check stdout and stderr for the expected output
if tt.outFile {
bytes, err := ioutil.ReadFile(tempfile)
c.Assert(err, check.IsNil)
comment := check.Commentf("Test %v: %q does not contain: %q", i, string(bytes), tt.outContains)
c.Assert(strings.Contains(string(bytes), tt.outContains), check.Equals, true, comment)
} else {
comment := check.Commentf("Test %v: %q does not contain: %q", i, outbuf.String(), tt.outContains)
c.Assert(strings.Contains(outbuf.String(), tt.outContains), check.Equals, true, comment)
}
}
}
// TestInteractive covers SSH into shell and joining the same session from another client
func (s *IntSuite) TestInteractive(c *check.C) {
t := s.newTeleport(c, nil, true)
defer t.Stop(true)
sessionEndC := make(chan interface{}, 0)
// get a reference to site obj:
site := t.GetSiteAPI(Site)
c.Assert(site, check.NotNil)
personA := NewTerminal(250)
personB := NewTerminal(250)
// PersonA: SSH into the server, wait one second, then type some commands on stdin:
openSession := func() {
cl, err := t.NewClient(ClientConfig{Login: s.me.Username, Cluster: Site, Host: Host, Port: t.GetPortSSHInt()})
c.Assert(err, check.IsNil)
cl.Stdout = &personA
cl.Stdin = &personA
// Person A types something into the terminal (including "exit")
personA.Type("\aecho hi\n\r\aexit\n\r\a")
err = cl.SSH(context.TODO(), []string{}, false)
c.Assert(err, check.IsNil)
sessionEndC <- true
}
// PersonB: wait for a session to become available, then join:
joinSession := func() {
var sessionID string
for {
time.Sleep(time.Millisecond)
sessions, _ := site.GetSessions(defaults.Namespace)
if len(sessions) == 0 {
continue
}
sessionID = string(sessions[0].ID)
break
}
cl, err := t.NewClient(ClientConfig{Login: s.me.Username, Cluster: Site, Host: Host, Port: t.GetPortSSHInt()})
c.Assert(err, check.IsNil)
cl.Stdout = &personB
for i := 0; i < 10; i++ {
err = cl.Join(context.TODO(), defaults.Namespace, session.ID(sessionID), &personB)
if err == nil {
break
}
}
c.Assert(err, check.IsNil)
}
go openSession()
go joinSession()
// wait for the session to end
waitFor(sessionEndC, time.Second*10)
// make sure the output of B is mirrored in A
outputOfA := string(personA.Output(100))
outputOfB := string(personB.Output(100))
c.Assert(strings.Contains(outputOfA, outputOfB), check.Equals, true)
}
// TestShutdown tests scenario with a graceful shutdown,
// that session will be working after
func (s *IntSuite) TestShutdown(c *check.C) {
t := s.newTeleport(c, nil, true)
// get a reference to site obj:
site := t.GetSiteAPI(Site)
c.Assert(site, check.NotNil)
person := NewTerminal(250)
// commandsC receive commands
commandsC := make(chan string, 0)
// PersonA: SSH into the server, wait one second, then type some commands on stdin:
openSession := func() {
cl, err := t.NewClient(ClientConfig{Login: s.me.Username, Cluster: Site, Host: Host, Port: t.GetPortSSHInt()})
c.Assert(err, check.IsNil)
cl.Stdout = &person
cl.Stdin = &person
go func() {
for command := range commandsC {
person.Type(command)
}
}()
err = cl.SSH(context.TODO(), []string{}, false)
c.Assert(err, check.IsNil)
}
go openSession()
retry := func(command, pattern string) {
person.Type(command)
// wait for both sites to see each other via their reverse tunnels (for up to 10 seconds)
abortTime := time.Now().Add(10 * time.Second)
var matched bool
var output string
for {
output = string(replaceNewlines(person.Output(1000)))
matched, _ = regexp.MatchString(pattern, output)
if matched {
break
}
time.Sleep(time.Millisecond * 200)
if time.Now().After(abortTime) {
c.Fatalf("failed to capture output: %v", pattern)
}
}
if !matched {
c.Fatalf("output %q does not match pattern %q", output, pattern)
}
}
retry("echo start \r\n", ".*start.*")
// initiate shutdown
ctx := context.TODO()
shutdownContext := t.Process.StartShutdown(ctx)
// make sure that terminal still works
retry("echo howdy \r\n", ".*howdy.*")
// now type exit and wait for shutdown to complete
person.Type("exit\n\r")
select {
case <-shutdownContext.Done():
case <-time.After(5 * time.Second):
c.Fatalf("failed to shut down the server")
}
}
type disconnectTestCase struct {
recordingMode string
options services.RoleOptions
disconnectTimeout time.Duration
}
// TestDisconnectScenarios tests multiple scenarios with client disconnects
func (s *IntSuite) TestDisconnectScenarios(c *check.C) {
testCases := []disconnectTestCase{
{
recordingMode: services.RecordAtNode,
options: services.RoleOptions{
ClientIdleTimeout: services.NewDuration(500 * time.Millisecond),
},
disconnectTimeout: time.Second,
},
{
recordingMode: services.RecordAtProxy,
options: services.RoleOptions{
ClientIdleTimeout: services.NewDuration(500 * time.Millisecond),
},
disconnectTimeout: time.Second,
},
{
recordingMode: services.RecordAtNode,
options: services.RoleOptions{
DisconnectExpiredCert: services.NewBool(true),
MaxSessionTTL: services.NewDuration(2 * time.Second),
},
disconnectTimeout: 4 * time.Second,
},
{
recordingMode: services.RecordAtProxy,
options: services.RoleOptions{
DisconnectExpiredCert: services.NewBool(true),
MaxSessionTTL: services.NewDuration(2 * time.Second),
},
disconnectTimeout: 4 * time.Second,
},
}
for _, tc := range testCases {
s.runDisconnectTest(c, tc)
}
}
func (s *IntSuite) runDisconnectTest(c *check.C, tc disconnectTestCase) {
t := NewInstance(InstanceConfig{
ClusterName: Site,
HostID: HostID,
NodeName: Host,
Ports: s.getPorts(5),
Priv: s.priv,
Pub: s.pub,
})
// devs role gets disconnected after 1 second idle time
username := s.me.Username
role, err := services.NewRole("devs", services.RoleSpecV3{
Options: tc.options,
Allow: services.RoleConditions{
Logins: []string{username},
},
})
c.Assert(err, check.IsNil)
t.AddUserWithRole(username, role)
clusterConfig, err := services.NewClusterConfig(services.ClusterConfigSpecV3{
SessionRecording: services.RecordAtNode,
})
c.Assert(err, check.IsNil)
cfg := service.MakeDefaultConfig()
cfg.Auth.Enabled = true
cfg.Auth.ClusterConfig = clusterConfig
cfg.Proxy.DisableWebService = true
cfg.Proxy.DisableWebInterface = true
cfg.Proxy.Enabled = true
cfg.SSH.Enabled = true
c.Assert(t.CreateEx(nil, cfg), check.IsNil)
c.Assert(t.Start(), check.IsNil)
defer t.Stop(true)
// get a reference to site obj:
site := t.GetSiteAPI(Site)
c.Assert(site, check.NotNil)
person := NewTerminal(250)
// commandsC receive commands
commandsC := make(chan string, 0)
// PersonA: SSH into the server, wait one second, then type some commands on stdin:
sessionCtx, sessionCancel := context.WithCancel(context.TODO())
openSession := func() {
defer sessionCancel()
cl, err := t.NewClient(ClientConfig{Login: username, Cluster: Site, Host: Host, Port: t.GetPortSSHInt()})
c.Assert(err, check.IsNil)
cl.Stdout = &person
cl.Stdin = &person
go func() {
for command := range commandsC {
person.Type(command)
}
}()
err = cl.SSH(context.TODO(), []string{}, false)
if err != nil && err != io.EOF {
c.Fatalf("expected EOF or nil, got %v instead", err)
}
}
go openSession()
retry := func(command, pattern string) {
person.Type(command)
abortTime := time.Now().Add(10 * time.Second)
var matched bool
var output string
for {
output = string(replaceNewlines(person.Output(1000)))
matched, _ = regexp.MatchString(pattern, output)
if matched {
break
}
time.Sleep(time.Millisecond * 200)
if time.Now().After(abortTime) {
c.Fatalf("failed to capture output: %v", pattern)
}
}
if !matched {
c.Fatalf("output %q does not match pattern %q", output, pattern)
}
}
retry("echo start \r\n", ".*start.*")
time.Sleep(tc.disconnectTimeout)
select {
case <-time.After(tc.disconnectTimeout):
c.Fatalf("timeout waiting for session to exit")
case <-sessionCtx.Done():
// session closed
}
}
// TestInvalidLogins validates that you can't login with invalid login or
// with invalid 'site' parameter
func (s *IntSuite) TestEnvironmentVariables(c *check.C) {
t := s.newTeleport(c, nil, true)
defer t.Stop(true)
testKey, testVal := "TELEPORT_TEST_ENV", "howdy"
cmd := []string{"printenv", testKey}
// make sure sessions set run command
tc, err := t.NewClient(ClientConfig{Login: s.me.Username, Cluster: Site, Host: Host, Port: t.GetPortSSHInt()})
c.Assert(err, check.IsNil)
tc.Env = map[string]string{testKey: testVal}
out := &bytes.Buffer{}
tc.Stdout = out
tc.Stdin = nil
err = tc.SSH(context.TODO(), cmd, false)
c.Assert(err, check.IsNil)
c.Assert(strings.TrimSpace(out.String()), check.Equals, testVal)
}
// TestInvalidLogins validates that you can't login with invalid login or
// with invalid 'site' parameter
func (s *IntSuite) TestInvalidLogins(c *check.C) {
t := s.newTeleport(c, nil, true)
defer t.Stop(true)
cmd := []string{"echo", "success"}
// try the wrong site:
tc, err := t.NewClient(ClientConfig{Login: s.me.Username, Cluster: "wrong-site", Host: Host, Port: t.GetPortSSHInt()})
c.Assert(err, check.IsNil)
err = tc.SSH(context.TODO(), cmd, false)
c.Assert(err, check.ErrorMatches, "cluster wrong-site not found")
}
// TestTwoClusters creates two teleport clusters: "a" and "b" and creates a
// tunnel from A to B.
//
// Two tests are run, first is when both A and B record sessions at nodes. It
// executes an SSH command on A by connecting directly to A and by connecting
// to B via B<->A tunnel. All sessions should end up in A.
//
// In the second test, sessions are recorded at B. All sessions still show up on
// A (they are Teleport nodes) but in addition, two show up on B when connecting
// over the B<->A tunnel because sessions are recorded at the proxy.
func (s *IntSuite) TestTwoClusters(c *check.C) {
now := time.Now().In(time.UTC).Round(time.Second)
var tests = []struct {
inRecordLocation string
outExecCountSiteA int
outExecCountSiteB int
}{
// normal teleport. since all events are recorded at the node, all events
// end up on site-a and none on site-b.
{
services.RecordAtNode,
3,
0,
},
// recording proxy. since events are recorded at the proxy, 3 events end up
// on site-a (because it's a teleport node so it still records at the node)
// and 2 events end up on site-b because it's recording.
{
services.RecordAtProxy,
3,
2,
},
}
for _, tt := range tests {
// start the http proxy, we need to make sure this was not used
ps := &proxyServer{}
ts := httptest.NewServer(ps)
defer ts.Close()
// clear out any proxy environment variables
for _, v := range []string{"http_proxy", "https_proxy", "HTTP_PROXY", "HTTPS_PROXY"} {
os.Setenv(v, "")
}
username := s.me.Username
a := NewInstance(InstanceConfig{ClusterName: "site-A", HostID: HostID, NodeName: Host, Ports: s.getPorts(5), Priv: s.priv, Pub: s.pub})
b := NewInstance(InstanceConfig{ClusterName: "site-B", HostID: HostID, NodeName: Host, Ports: s.getPorts(5), Priv: s.priv, Pub: s.pub})
a.AddUser(username, []string{username})
b.AddUser(username, []string{username})
clusterConfig, err := services.NewClusterConfig(services.ClusterConfigSpecV3{
SessionRecording: tt.inRecordLocation,
})
c.Assert(err, check.IsNil)
acfg := service.MakeDefaultConfig()
acfg.Auth.Enabled = true
acfg.Proxy.Enabled = true
acfg.Proxy.DisableWebService = true
acfg.Proxy.DisableWebInterface = true
acfg.SSH.Enabled = true
bcfg := service.MakeDefaultConfig()
bcfg.Auth.Enabled = true
bcfg.Auth.ClusterConfig = clusterConfig
bcfg.Proxy.Enabled = true
bcfg.Proxy.DisableWebService = true
bcfg.Proxy.DisableWebInterface = true
bcfg.SSH.Enabled = false
c.Assert(b.CreateEx(a.Secrets.AsSlice(), bcfg), check.IsNil)
c.Assert(a.CreateEx(b.Secrets.AsSlice(), acfg), check.IsNil)
c.Assert(b.Start(), check.IsNil)
c.Assert(a.Start(), check.IsNil)
// wait for both sites to see each other via their reverse tunnels (for up to 10 seconds)
abortTime := time.Now().Add(time.Second * 10)
for len(b.Tunnel.GetSites()) < 2 && len(b.Tunnel.GetSites()) < 2 {
time.Sleep(time.Millisecond * 200)
if time.Now().After(abortTime) {
c.Fatalf("two sites do not see each other: tunnels are not working")
}
}
var (
outputA bytes.Buffer
outputB bytes.Buffer
)
// make sure the direct dialer was used and not the proxy dialer
c.Assert(ps.Count(), check.Equals, 0)
// if we got here, it means two sites are cross-connected. lets execute SSH commands
sshPort := a.GetPortSSHInt()
cmd := []string{"echo", "hello world"}
// directly:
tc, err := a.NewClient(ClientConfig{
Login: username,
Cluster: "site-A",
Host: Host,
Port: sshPort,
ForwardAgent: true,
})
tc.Stdout = &outputA
c.Assert(err, check.IsNil)
err = tc.SSH(context.TODO(), cmd, false)
c.Assert(err, check.IsNil)
c.Assert(outputA.String(), check.Equals, "hello world\n")
// Update trusted CAs.
err = tc.UpdateTrustedCA(context.TODO())
c.Assert(err, check.IsNil)
// The known_hosts file should have two certificates, the way bytes.Split
// works that means the output will be 3 (2 certs + 1 empty).
buffer, err := ioutil.ReadFile(filepath.Join(tc.KeysDir, "known_hosts"))
c.Assert(err, check.IsNil)
parts := bytes.Split(buffer, []byte("\n"))
c.Assert(parts, check.HasLen, 3)
// The certs.pem file should have 2 certificates.
buffer, err = ioutil.ReadFile(filepath.Join(tc.KeysDir, "keys", Host, "certs.pem"))
c.Assert(err, check.IsNil)
roots := x509.NewCertPool()
ok := roots.AppendCertsFromPEM(buffer)
c.Assert(ok, check.Equals, true)
c.Assert(roots.Subjects(), check.HasLen, 2)
// via tunnel b->a:
tc, err = b.NewClient(ClientConfig{
Login: username,
Cluster: "site-A",
Host: Host,
Port: sshPort,
ForwardAgent: true,
})
tc.Stdout = &outputB
c.Assert(err, check.IsNil)
err = tc.SSH(context.TODO(), cmd, false)
c.Assert(err, check.IsNil)
c.Assert(outputA.String(), check.DeepEquals, outputB.String())
// Stop "site-A" and try to connect to it again via "site-A" (expect a connection error)
a.Stop(false)
err = tc.SSH(context.TODO(), cmd, false)
// debug mode will add more lines, so this check has to be flexible
c.Assert(strings.Replace(err.Error(), "\n", "", -1), check.Matches, `.*site-A is offline.*`)
// Reset and start "Site-A" again
a.Reset()
err = a.Start()
c.Assert(err, check.IsNil)
// try to execute an SSH command using the same old client to Site-B
// "site-A" and "site-B" reverse tunnels are supposed to reconnect,
// and 'tc' (client) is also supposed to reconnect
for i := 0; i < 10; i++ {
time.Sleep(250 * time.Millisecond)
err = tc.SSH(context.TODO(), cmd, false)
if err == nil {
break
}
}
c.Assert(err, check.IsNil)
searchAndAssert := func(site auth.ClientI, count int) error {
tickCh := time.Tick(500 * time.Millisecond)
stopCh := time.After(5 * time.Second)
// only look for exec events
execQuery := fmt.Sprintf("%s=%s", events.EventType, events.ExecEvent)
for {
select {
case <-tickCh:
eventsInSite, err := site.SearchEvents(now, now.Add(1*time.Hour), execQuery, 0)
if err != nil {
return trace.Wrap(err)
}
// found the number of events we were looking for
if got, want := len(eventsInSite), count; got == want {
return nil
}
case <-stopCh:
return trace.BadParameter("unable to find %v events after 5s", count)
}
}
}
siteA := a.GetSiteAPI("site-A")
err = searchAndAssert(siteA, tt.outExecCountSiteA)
c.Assert(err, check.IsNil)
siteB := b.GetSiteAPI("site-B")
err = searchAndAssert(siteB, tt.outExecCountSiteB)
c.Assert(err, check.IsNil)
// stop both sites for real
c.Assert(b.Stop(true), check.IsNil)
c.Assert(a.Stop(true), check.IsNil)
}
}
// TestTwoClustersProxy checks if the reverse tunnel uses a HTTP PROXY to
// establish a connection.
func (s *IntSuite) TestTwoClustersProxy(c *check.C) {
// start the http proxy
ps := &proxyServer{}
ts := httptest.NewServer(ps)
defer ts.Close()
// set the http_proxy environment variable
u, err := url.Parse(ts.URL)
c.Assert(err, check.IsNil)
os.Setenv("http_proxy", u.Host)
defer os.Setenv("http_proxy", "")
username := s.me.Username
a := NewInstance(InstanceConfig{ClusterName: "site-A", HostID: HostID, NodeName: Host, Ports: s.getPorts(5), Priv: s.priv, Pub: s.pub})
b := NewInstance(InstanceConfig{ClusterName: "site-B", HostID: HostID, NodeName: Host, Ports: s.getPorts(5), Priv: s.priv, Pub: s.pub})
a.AddUser(username, []string{username})
b.AddUser(username, []string{username})
c.Assert(b.Create(a.Secrets.AsSlice(), false, nil), check.IsNil)
c.Assert(a.Create(b.Secrets.AsSlice(), true, nil), check.IsNil)
c.Assert(b.Start(), check.IsNil)
c.Assert(a.Start(), check.IsNil)
// wait for both sites to see each other via their reverse tunnels (for up to 10 seconds)
abortTime := time.Now().Add(time.Second * 10)
for len(a.Tunnel.GetSites()) < 2 && len(b.Tunnel.GetSites()) < 2 {
time.Sleep(time.Millisecond * 200)
if time.Now().After(abortTime) {
c.Fatalf("two sites do not see each other: tunnels are not working")
}
}
// make sure the reverse tunnel went through the proxy
c.Assert(ps.Count() > 0, check.Equals, true, check.Commentf("proxy did not intercept any connection"))
// stop both sites for real
c.Assert(b.Stop(true), check.IsNil)
c.Assert(a.Stop(true), check.IsNil)
}
// TestHA tests scenario when auth server for the cluster goes down
// and we switch to local persistent caches
func (s *IntSuite) TestHA(c *check.C) {
username := s.me.Username
a := NewInstance(InstanceConfig{ClusterName: "cluster-a", HostID: HostID, NodeName: Host, Ports: s.getPorts(5), Priv: s.priv, Pub: s.pub})
b := NewInstance(InstanceConfig{ClusterName: "cluster-b", HostID: HostID, NodeName: Host, Ports: s.getPorts(5), Priv: s.priv, Pub: s.pub})
a.AddUser(username, []string{username})
b.AddUser(username, []string{username})
c.Assert(b.Create(a.Secrets.AsSlice(), false, nil), check.IsNil)
c.Assert(a.Create(b.Secrets.AsSlice(), true, nil), check.IsNil)
c.Assert(b.Start(), check.IsNil)
c.Assert(a.Start(), check.IsNil)
nodePorts := s.getPorts(3)
sshPort, proxyWebPort, proxySSHPort := nodePorts[0], nodePorts[1], nodePorts[2]
c.Assert(a.StartNodeAndProxy("cluster-a-node", sshPort, proxyWebPort, proxySSHPort), check.IsNil)
// wait for both sites to see each other via their reverse tunnels (for up to 10 seconds)
abortTime := time.Now().Add(time.Second * 10)
for len(a.Tunnel.GetSites()) < 2 && len(b.Tunnel.GetSites()) < 2 {
time.Sleep(time.Millisecond * 2000)
if time.Now().After(abortTime) {
c.Fatalf("two sites do not see each other: tunnels are not working")
}
}
cmd := []string{"echo", "hello world"}
tc, err := b.NewClient(ClientConfig{Login: username, Cluster: "cluster-a", Host: Loopback, Port: sshPort})
c.Assert(err, check.IsNil)
output := &bytes.Buffer{}
tc.Stdout = output
c.Assert(err, check.IsNil)
// try to execute an SSH command using the same old client to Site-B
// "site-A" and "site-B" reverse tunnels are supposed to reconnect,
// and 'tc' (client) is also supposed to reconnect
for i := 0; i < 10; i++ {
time.Sleep(time.Millisecond * 50)
err = tc.SSH(context.TODO(), cmd, false)
if err == nil {
break
}
}
c.Assert(err, check.IsNil)
c.Assert(output.String(), check.Equals, "hello world\n")
// stop auth server a now
c.Assert(a.Stop(true), check.IsNil)
// try to execute an SSH command using the same old client to Site-B
// "site-A" and "site-B" reverse tunnels are supposed to reconnect,
// and 'tc' (client) is also supposed to reconnect
for i := 0; i < 10; i++ {
time.Sleep(time.Millisecond * 50)
err = tc.SSH(context.TODO(), cmd, false)
if err == nil {
break
}
}
c.Assert(err, check.IsNil)
// stop cluster and remaining nodes
c.Assert(b.Stop(true), check.IsNil)
c.Assert(b.StopNodes(), check.IsNil)
}
// TestMapRoles tests local to remote role mapping and access patterns
func (s *IntSuite) TestMapRoles(c *check.C) {
username := s.me.Username
clusterMain := "cluster-main"
clusterAux := "cluster-aux"
main := NewInstance(InstanceConfig{ClusterName: clusterMain, HostID: HostID, NodeName: Host, Ports: s.getPorts(5), Priv: s.priv, Pub: s.pub})
aux := NewInstance(InstanceConfig{ClusterName: clusterAux, HostID: HostID, NodeName: Host, Ports: s.getPorts(5), Priv: s.priv, Pub: s.pub})
// main cluster has a local user and belongs to role "main-devs"
mainDevs := "main-devs"
role, err := services.NewRole(mainDevs, services.RoleSpecV3{
Allow: services.RoleConditions{
Logins: []string{username},
},
})
c.Assert(err, check.IsNil)
main.AddUserWithRole(username, role)
// for role mapping test we turn on Web API on the main cluster
// as it's used
makeConfig := func(enableSSH bool) ([]*InstanceSecrets, *service.Config) {
tconf := service.MakeDefaultConfig()
tconf.SSH.Enabled = enableSSH
tconf.Console = nil
tconf.Proxy.DisableWebService = false
tconf.Proxy.DisableWebInterface = true
return nil, tconf
}
lib.SetInsecureDevMode(true)
defer lib.SetInsecureDevMode(false)
c.Assert(main.CreateEx(makeConfig(false)), check.IsNil)
c.Assert(aux.CreateEx(makeConfig(true)), check.IsNil)
// auxiliary cluster has a role aux-devs
// connect aux cluster to main cluster
// using trusted clusters, so remote user will be allowed to assume
// role specified by mapping remote role "devs" to local role "local-devs"
auxDevs := "aux-devs"
role, err = services.NewRole(auxDevs, services.RoleSpecV3{
Allow: services.RoleConditions{
Logins: []string{username},
},
})
c.Assert(err, check.IsNil)
err = aux.Process.GetAuthServer().UpsertRole(role)
c.Assert(err, check.IsNil)
trustedClusterToken := "trusted-clsuter-token"
err = main.Process.GetAuthServer().UpsertToken(trustedClusterToken, []teleport.Role{teleport.RoleTrustedCluster}, backend.Forever)
c.Assert(err, check.IsNil)
trustedCluster := main.Secrets.AsTrustedCluster(trustedClusterToken, services.RoleMap{
{Remote: mainDevs, Local: []string{auxDevs}},
})
// modify trusted cluster resource name so it would not
// match the cluster name to check that it does not matter
trustedCluster.SetName(main.Secrets.SiteName + "-cluster")
c.Assert(main.Start(), check.IsNil)
c.Assert(aux.Start(), check.IsNil)
err = trustedCluster.CheckAndSetDefaults()
c.Assert(err, check.IsNil)
// try and upsert a trusted cluster
var upsertSuccess bool
for i := 0; i < 10; i++ {
log.Debugf("Will create trusted cluster %v, attempt %v", trustedCluster, i)
_, err = aux.Process.GetAuthServer().UpsertTrustedCluster(trustedCluster)
if err != nil {
if trace.IsConnectionProblem(err) {
log.Debugf("retrying on connection problem: %v", err)
continue
}
c.Fatalf("got non connection problem %v", err)
}
upsertSuccess = true
break
}
// make sure we upsert a trusted cluster
c.Assert(upsertSuccess, check.Equals, true)
nodePorts := s.getPorts(3)
sshPort, proxyWebPort, proxySSHPort := nodePorts[0], nodePorts[1], nodePorts[2]
c.Assert(aux.StartNodeAndProxy("aux-node", sshPort, proxyWebPort, proxySSHPort), check.IsNil)
// wait for both sites to see each other via their reverse tunnels (for up to 10 seconds)
abortTime := time.Now().Add(time.Second * 10)
for len(main.Tunnel.GetSites()) < 2 && len(main.Tunnel.GetSites()) < 2 {
time.Sleep(time.Millisecond * 2000)
if time.Now().After(abortTime) {
c.Fatalf("two clusters do not see each other: tunnels are not working")
}
}
// Make sure that GetNodes returns nodes in the remote site. This makes
// sure identity aware GetNodes works for remote clusters. Testing of the
// correct nodes that identity aware GetNodes is done in TestList.
var nodes []services.Server
for i := 0; i < 10; i++ {
nodes, err = aux.Process.GetAuthServer().GetNodes(defaults.Namespace, services.SkipValidation())
c.Assert(err, check.IsNil)
if len(nodes) != 2 {
time.Sleep(100 * time.Millisecond)
continue
}
}
c.Assert(nodes, check.HasLen, 2)
cmd := []string{"echo", "hello world"}
tc, err := main.NewClient(ClientConfig{Login: username, Cluster: clusterAux, Host: Loopback, Port: sshPort})
c.Assert(err, check.IsNil)
output := &bytes.Buffer{}
tc.Stdout = output
c.Assert(err, check.IsNil)
// try to execute an SSH command using the same old client to Site-B
// "site-A" and "site-B" reverse tunnels are supposed to reconnect,
// and 'tc' (client) is also supposed to reconnect
for i := 0; i < 10; i++ {
time.Sleep(time.Millisecond * 50)
err = tc.SSH(context.TODO(), cmd, false)
if err == nil {
break
}
}
c.Assert(err, check.IsNil)
c.Assert(output.String(), check.Equals, "hello world\n")
// make sure both clusters have the right certificate authorities with the right signing keys.
var tests = []struct {
mainClusterName string
auxClusterName string
inCluster *TeleInstance
outChkMainUserCA check.Checker
outLenMainUserCA int
outChkMainHostCA check.Checker
outLenMainHostCA int
outChkAuxUserCA check.Checker
outLenAuxUserCA int
outChkAuxHostCA check.Checker
outLenAuxHostCA int
}{
// 0 - main
// * User CA for main has one signing key.
// * Host CA for main has one signing key.
// * User CA for aux does not exist.
// * Host CA for aux has no signing keys.
{
main.Secrets.SiteName,
aux.Secrets.SiteName,
main,
check.IsNil, 1,
check.IsNil, 1,
check.NotNil, 0,
check.IsNil, 0,
},
// 1 - aux
// * User CA for main has no signing keys.
// * Host CA for main has no signing keys.
// * User CA for aux has one signing key.
// * Host CA for aux has one signing key.
{
trustedCluster.GetName(),
aux.Secrets.SiteName,
aux,
check.IsNil, 0,
check.IsNil, 0,
check.IsNil, 1,
check.IsNil, 1,
},
}
for i, tt := range tests {
cid := services.CertAuthID{Type: services.UserCA, DomainName: tt.mainClusterName}
mainUserCAs, err := tt.inCluster.Process.GetAuthServer().GetCertAuthority(cid, true)
c.Assert(err, tt.outChkMainUserCA)
if tt.outChkMainUserCA == check.IsNil {
c.Assert(mainUserCAs.GetSigningKeys(), check.HasLen, tt.outLenMainUserCA, check.Commentf("Test %v, Main User CA", i))
}
cid = services.CertAuthID{Type: services.HostCA, DomainName: tt.mainClusterName}
mainHostCAs, err := tt.inCluster.Process.GetAuthServer().GetCertAuthority(cid, true)
c.Assert(err, tt.outChkMainHostCA)
if tt.outChkMainHostCA == check.IsNil {
c.Assert(mainHostCAs.GetSigningKeys(), check.HasLen, tt.outLenMainHostCA, check.Commentf("Test %v, Main Host CA", i))
}
cid = services.CertAuthID{Type: services.UserCA, DomainName: tt.auxClusterName}
auxUserCAs, err := tt.inCluster.Process.GetAuthServer().GetCertAuthority(cid, true)
c.Assert(err, tt.outChkAuxUserCA)
if tt.outChkAuxUserCA == check.IsNil {
c.Assert(auxUserCAs.GetSigningKeys(), check.HasLen, tt.outLenAuxUserCA, check.Commentf("Test %v, Aux User CA", i))
}
cid = services.CertAuthID{Type: services.HostCA, DomainName: tt.auxClusterName}
auxHostCAs, err := tt.inCluster.Process.GetAuthServer().GetCertAuthority(cid, true)
c.Assert(err, tt.outChkAuxHostCA)
if tt.outChkAuxHostCA == check.IsNil {
c.Assert(auxHostCAs.GetSigningKeys(), check.HasLen, tt.outLenAuxHostCA, check.Commentf("Test %v, Aux Host CA", i))
}
}
// stop clusters and remaining nodes
c.Assert(main.Stop(true), check.IsNil)
c.Assert(aux.Stop(true), check.IsNil)
}
// TestTrustedClusters tests remote clusters scenarios
// using trusted clusters feature
func (s *IntSuite) TestTrustedClusters(c *check.C) {
s.trustedClusters(c, false)
}
// TestMultiplexingTrustedClusters tests remote clusters scenarios
// using trusted clusters feature
func (s *IntSuite) TestMultiplexingTrustedClusters(c *check.C) {
s.trustedClusters(c, true)
}
func (s *IntSuite) trustedClusters(c *check.C, multiplex bool) {
username := s.me.Username
clusterMain := "cluster-main"
clusterAux := "cluster-aux"
main := NewInstance(InstanceConfig{ClusterName: clusterMain, HostID: HostID, NodeName: Host, Ports: s.getPorts(5), Priv: s.priv, Pub: s.pub, MultiplexProxy: multiplex})
aux := NewInstance(InstanceConfig{ClusterName: clusterAux, HostID: HostID, NodeName: Host, Ports: s.getPorts(5), Priv: s.priv, Pub: s.pub})
// main cluster has a local user and belongs to role "main-devs"
mainDevs := "main-devs"
role, err := services.NewRole(mainDevs, services.RoleSpecV3{
Allow: services.RoleConditions{
Logins: []string{username},
},
})
c.Assert(err, check.IsNil)
main.AddUserWithRole(username, role)
// for role mapping test we turn on Web API on the main cluster
// as it's used
makeConfig := func(enableSSH bool) ([]*InstanceSecrets, *service.Config) {
tconf := service.MakeDefaultConfig()
tconf.SSH.Enabled = enableSSH
tconf.Console = nil
tconf.Proxy.DisableWebService = false
tconf.Proxy.DisableWebInterface = true
return nil, tconf
}
lib.SetInsecureDevMode(true)
defer lib.SetInsecureDevMode(false)
c.Assert(main.CreateEx(makeConfig(false)), check.IsNil)
c.Assert(aux.CreateEx(makeConfig(true)), check.IsNil)
// auxiliary cluster has a role aux-devs
// connect aux cluster to main cluster
// using trusted clusters, so remote user will be allowed to assume
// role specified by mapping remote role "devs" to local role "local-devs"
auxDevs := "aux-devs"
role, err = services.NewRole(auxDevs, services.RoleSpecV3{
Allow: services.RoleConditions{
Logins: []string{username},
},
})
c.Assert(err, check.IsNil)
err = aux.Process.GetAuthServer().UpsertRole(role)
c.Assert(err, check.IsNil)
trustedClusterToken := "trusted-clsuter-token"
err = main.Process.GetAuthServer().UpsertToken(trustedClusterToken, []teleport.Role{teleport.RoleTrustedCluster}, backend.Forever)
c.Assert(err, check.IsNil)
trustedCluster := main.Secrets.AsTrustedCluster(trustedClusterToken, services.RoleMap{
{Remote: mainDevs, Local: []string{auxDevs}},
})
// modify trusted cluster resource name so it would not
// match the cluster name to check that it does not matter
trustedCluster.SetName(main.Secrets.SiteName + "-cluster")
c.Assert(main.Start(), check.IsNil)
c.Assert(aux.Start(), check.IsNil)
err = trustedCluster.CheckAndSetDefaults()
c.Assert(err, check.IsNil)
// try and upsert a trusted cluster
var upsertSuccess bool
for i := 0; i < 10; i++ {
log.Debugf("Will create trusted cluster %v, attempt %v", trustedCluster, i)
_, err = aux.Process.GetAuthServer().UpsertTrustedCluster(trustedCluster)
if err != nil {
if trace.IsConnectionProblem(err) {
log.Debugf("retrying on connection problem: %v", err)
continue
}
c.Fatalf("got non connection problem %v", err)
}
upsertSuccess = true
break
}
// make sure we upsert a trusted cluster
c.Assert(upsertSuccess, check.Equals, true)
nodePorts := s.getPorts(3)
sshPort, proxyWebPort, proxySSHPort := nodePorts[0], nodePorts[1], nodePorts[2]
c.Assert(aux.StartNodeAndProxy("aux-node", sshPort, proxyWebPort, proxySSHPort), check.IsNil)
// wait for both sites to see each other via their reverse tunnels (for up to 10 seconds)
abortTime := time.Now().Add(time.Second * 10)
for len(main.Tunnel.GetSites()) < 2 && len(main.Tunnel.GetSites()) < 2 {
time.Sleep(time.Millisecond * 2000)
if time.Now().After(abortTime) {
c.Fatalf("two clusters do not see each other: tunnels are not working")
}
}
cmd := []string{"echo", "hello world"}
tc, err := main.NewClient(ClientConfig{Login: username, Cluster: clusterAux, Host: Loopback, Port: sshPort})
c.Assert(err, check.IsNil)
output := &bytes.Buffer{}
tc.Stdout = output
c.Assert(err, check.IsNil)
// try to execute an SSH command using the same old client to Site-B
// "site-A" and "site-B" reverse tunnels are supposed to reconnect,
// and 'tc' (client) is also supposed to reconnect
for i := 0; i < 10; i++ {
time.Sleep(time.Millisecond * 50)
err = tc.SSH(context.TODO(), cmd, false)
if err == nil {
break
}
}
c.Assert(err, check.IsNil)
c.Assert(output.String(), check.Equals, "hello world\n")
// check that remote cluster has been provisioned
remoteClusters, err := main.Process.GetAuthServer().GetRemoteClusters()
c.Assert(err, check.IsNil)
c.Assert(remoteClusters, check.HasLen, 1)
c.Assert(remoteClusters[0].GetName(), check.Equals, clusterAux)
// after removing the remote cluster, the connection will start failing
err = main.Process.GetAuthServer().DeleteRemoteCluster(clusterAux)
c.Assert(err, check.IsNil)
for i := 0; i < 10; i++ {
time.Sleep(time.Millisecond * 50)
err = tc.SSH(context.TODO(), cmd, false)
if err != nil {
break
}
}
c.Assert(err, check.NotNil, check.Commentf("expected tunnel to close and SSH client to start failing"))
// remove trusted cluster from aux cluster side, and recrete right after
// this should re-establish connection
err = aux.Process.GetAuthServer().DeleteTrustedCluster(trustedCluster.GetName())
c.Assert(err, check.IsNil)
_, err = aux.Process.GetAuthServer().UpsertTrustedCluster(trustedCluster)
c.Assert(err, check.IsNil)
// check that remote cluster has been re-provisioned
remoteClusters, err = main.Process.GetAuthServer().GetRemoteClusters()
c.Assert(err, check.IsNil)
c.Assert(remoteClusters, check.HasLen, 1)
c.Assert(remoteClusters[0].GetName(), check.Equals, clusterAux)
// wait for both sites to see each other via their reverse tunnels (for up to 10 seconds)
abortTime = time.Now().Add(time.Second * 10)
for len(main.Tunnel.GetSites()) < 2 {
time.Sleep(time.Millisecond * 2000)
if time.Now().After(abortTime) {
c.Fatalf("two clusters do not see each other: tunnels are not working")
}
}
// connection and client should recover and work again
output = &bytes.Buffer{}
tc.Stdout = output
for i := 0; i < 10; i++ {
time.Sleep(time.Millisecond * 50)
err = tc.SSH(context.TODO(), cmd, false)
if err == nil {
break
}
}
c.Assert(err, check.IsNil)
c.Assert(output.String(), check.Equals, "hello world\n")
// stop clusters and remaining nodes
c.Assert(main.Stop(true), check.IsNil)
c.Assert(aux.Stop(true), check.IsNil)
}
// TestDiscovery tests case for multiple proxies and a reverse tunnel
// agent that eventually connnects to the the right proxy
func (s *IntSuite) TestDiscovery(c *check.C) {
username := s.me.Username
// create load balancer for main cluster proxies
frontend := *utils.MustParseAddr(net.JoinHostPort(Loopback, strconv.Itoa(s.getPorts(1)[0])))
lb, err := utils.NewLoadBalancer(context.TODO(), frontend)
c.Assert(err, check.IsNil)
c.Assert(lb.Listen(), check.IsNil)
go lb.Serve()
defer lb.Close()
remote := NewInstance(InstanceConfig{ClusterName: "cluster-remote", HostID: HostID, NodeName: Host, Ports: s.getPorts(5), Priv: s.priv, Pub: s.pub})
main := NewInstance(InstanceConfig{ClusterName: "cluster-main", HostID: HostID, NodeName: Host, Ports: s.getPorts(5), Priv: s.priv, Pub: s.pub})
remote.AddUser(username, []string{username})
main.AddUser(username, []string{username})
c.Assert(main.Create(remote.Secrets.AsSlice(), false, nil), check.IsNil)
mainSecrets := main.Secrets
// switch listen address of the main cluster to load balancer
mainProxyAddr := *utils.MustParseAddr(mainSecrets.ListenAddr)
lb.AddBackend(mainProxyAddr)
mainSecrets.ListenAddr = frontend.String()
c.Assert(remote.Create(mainSecrets.AsSlice(), true, nil), check.IsNil)
c.Assert(main.Start(), check.IsNil)
c.Assert(remote.Start(), check.IsNil)
// wait for both sites to see each other via their reverse tunnels (for up to 10 seconds)
abortTime := time.Now().Add(time.Second * 10)
for len(main.Tunnel.GetSites()) < 2 {
time.Sleep(time.Millisecond * 2000)
if time.Now().After(abortTime) {
c.Fatalf("two clusters do not see each other: tunnels are not working")
}
}
// start second proxy
nodePorts := s.getPorts(3)
proxyReverseTunnelPort, proxyWebPort, proxySSHPort := nodePorts[0], nodePorts[1], nodePorts[2]
proxyConfig := ProxyConfig{
Name: "cluster-main-proxy",
SSHPort: proxySSHPort,
WebPort: proxyWebPort,
ReverseTunnelPort: proxyReverseTunnelPort,
}
err = main.StartProxy(proxyConfig)
c.Assert(err, check.IsNil)
// add second proxy as a backend to the load balancer
lb.AddBackend(*utils.MustParseAddr(net.JoinHostPort(Loopback, strconv.Itoa(proxyReverseTunnelPort))))
// At this point the remote cluster should be connected to two proxies in
// the main cluster.
waitForProxyCount(remote, "cluster-main", 2)
// execute the connection via first proxy
cfg := ClientConfig{
Login: username,
Cluster: "cluster-remote",
Host: Loopback,
Port: remote.GetPortSSHInt(),
}
output, err := runCommand(main, []string{"echo", "hello world"}, cfg, 1)
c.Assert(err, check.IsNil)
c.Assert(output, check.Equals, "hello world\n")
// Execute the connection via second proxy, should work. This command is
// tried 10 times with 250 millisecond delay between each attempt to allow
// the discovery request to be received and the connection added to the agent
// pool.
cfgProxy := ClientConfig{
Login: username,
Cluster: "cluster-remote",
Host: Loopback,
Port: remote.GetPortSSHInt(),
Proxy: &proxyConfig,
}
output, err = runCommand(main, []string{"echo", "hello world"}, cfgProxy, 10)
c.Assert(err, check.IsNil)
c.Assert(output, check.Equals, "hello world\n")
// now disconnect the main proxy and make sure it will reconnect eventually
lb.RemoveBackend(mainProxyAddr)
// requests going via main proxy will fail
output, err = runCommand(main, []string{"echo", "hello world"}, cfg, 1)
c.Assert(err, check.NotNil)
// requests going via second proxy will succeed
output, err = runCommand(main, []string{"echo", "hello world"}, cfgProxy, 1)
c.Assert(err, check.IsNil)
c.Assert(output, check.Equals, "hello world\n")
// Connect the main proxy back and make sure agents have reconnected over time.
// This command is tried 10 times with 250 millisecond delay between each
// attempt to allow the discovery request to be received and the connection
// added to the agent pool.
lb.AddBackend(mainProxyAddr)
output, err = runCommand(main, []string{"echo", "hello world"}, cfg, 40)
c.Assert(err, check.IsNil)
c.Assert(output, check.Equals, "hello world\n")
// Stop one of proxies on the main cluster.
err = main.StopProxy()
c.Assert(err, check.IsNil)
// Wait for the remote cluster to detect the outbound connection is gone.
waitForProxyCount(remote, "cluster-main", 1)
// Stop both clusters and remaining nodes.
c.Assert(remote.Stop(true), check.IsNil)
c.Assert(main.Stop(true), check.IsNil)
}
// waitForProxyCount waits a set time for the proxy count in clusterName to
// reach some value.
func waitForProxyCount(t *TeleInstance, clusterName string, count int) error {
var counts map[string]int
for i := 0; i < 20; i++ {
counts = t.Pool.Counts()
if counts[clusterName] == count {
return nil
}
time.Sleep(250 * time.Millisecond)
}
return trace.BadParameter("proxy count on %v: %v", clusterName, counts[clusterName])
}
// TestExternalClient tests if we can connect to a node in a Teleport
// cluster. Both normal and recording proxies are tested.
func (s *IntSuite) TestExternalClient(c *check.C) {
// Only run this test if we have access to the external SSH binary.
_, err := exec.LookPath("ssh")
if err != nil {
c.Skip("Skipping TestExternalClient, no external SSH binary found.")
return
}
var tests = []struct {
inRecordLocation string
inForwardAgent bool
inCommand string
outError bool
outExecOutput string
}{
// Record at the node, forward agent. Will still work even though the agent
// will be rejected by the proxy (agent forwarding request rejection is a
// soft failure).
{
inRecordLocation: services.RecordAtNode,
inForwardAgent: true,
inCommand: "echo hello",
outError: false,
outExecOutput: "hello",
},
// Record at the node, don't forward agent, will work. This is the normal
// Teleport mode of operation.
{
inRecordLocation: services.RecordAtNode,
inForwardAgent: false,
inCommand: "echo hello",
outError: false,
outExecOutput: "hello",
},
// Record at the proxy, forward agent. Will work.
{
inRecordLocation: services.RecordAtProxy,
inForwardAgent: true,
inCommand: "echo hello",
outError: false,
outExecOutput: "hello",
},
// Record at the proxy, don't forward agent, request will fail because
// recording proxy requires an agent.
{
inRecordLocation: services.RecordAtProxy,
inForwardAgent: false,
inCommand: "echo hello",
outError: true,
outExecOutput: "",
},
}
for _, tt := range tests {
// Create a Teleport instance with auth, proxy, and node.
makeConfig := func() (*check.C, []string, []*InstanceSecrets, *service.Config) {
clusterConfig, err := services.NewClusterConfig(services.ClusterConfigSpecV3{
SessionRecording: tt.inRecordLocation,
})
c.Assert(err, check.IsNil)
tconf := service.MakeDefaultConfig()
tconf.Console = nil
tconf.Auth.Enabled = true
tconf.Auth.ClusterConfig = clusterConfig
tconf.Proxy.Enabled = true
tconf.Proxy.DisableWebService = true
tconf.Proxy.DisableWebInterface = true
tconf.SSH.Enabled = true
return c, nil, nil, tconf
}
t := s.newTeleportWithConfig(makeConfig())
defer t.Stop(true)
// Start (and defer close) a agent that runs during this integration test.
teleAgent, socketDirPath, socketPath, err := createAgent(
s.me,
t.Secrets.Users[s.me.Username].Key.Priv,
t.Secrets.Users[s.me.Username].Key.Cert)
c.Assert(err, check.IsNil)
defer closeAgent(teleAgent, socketDirPath)
// Create a *exec.Cmd that will execute the external SSH command.
execCmd, err := externalSSHCommand(commandOptions{
forwardAgent: tt.inForwardAgent,
socketPath: socketPath,
proxyPort: t.GetPortProxy(),
nodePort: t.GetPortSSH(),
command: tt.inCommand,
})
c.Assert(err, check.IsNil)
// Execute SSH command and check the output is what we expect.
output, err := execCmd.Output()
if tt.outError {
c.Assert(err, check.NotNil)
} else {
if err != nil {
// If an *exec.ExitError is returned, parse it and return stderr. If this
// is not done then c.Assert will just print a byte array for the error.
er, ok := err.(*exec.ExitError)
if ok {
c.Fatalf("Unexpected error: %v", string(er.Stderr))
}
}
c.Assert(err, check.IsNil)
c.Assert(strings.TrimSpace(string(output)), check.Equals, tt.outExecOutput)
}
}
}
// TestControlMaster checks if multiple SSH channels can be created over the
// same connection. This is frequently used by tools like Ansible.
func (s *IntSuite) TestControlMaster(c *check.C) {
// Only run this test if we have access to the external SSH binary.
_, err := exec.LookPath("ssh")
if err != nil {
c.Skip("Skipping TestControlMaster, no external SSH binary found.")
return
}
var tests = []struct {
inRecordLocation string
}{
// Run tests when Teleport is recording sessions at the node.
{
inRecordLocation: services.RecordAtNode,
},
// Run tests when Teleport is recording sessions at the proxy.
{
inRecordLocation: services.RecordAtProxy,
},
}
for _, tt := range tests {
controlDir, err := ioutil.TempDir("", "teleport-")
c.Assert(err, check.IsNil)
defer os.RemoveAll(controlDir)
controlPath := filepath.Join(controlDir, "control-path")
// Create a Teleport instance with auth, proxy, and node.
makeConfig := func() (*check.C, []string, []*InstanceSecrets, *service.Config) {
clusterConfig, err := services.NewClusterConfig(services.ClusterConfigSpecV3{
SessionRecording: tt.inRecordLocation,
})
c.Assert(err, check.IsNil)
tconf := service.MakeDefaultConfig()
tconf.Console = nil
tconf.Auth.Enabled = true
tconf.Auth.ClusterConfig = clusterConfig
tconf.Proxy.Enabled = true
tconf.Proxy.DisableWebService = true
tconf.Proxy.DisableWebInterface = true
tconf.SSH.Enabled = true
return c, nil, nil, tconf
}
t := s.newTeleportWithConfig(makeConfig())
defer t.Stop(true)
// Start (and defer close) a agent that runs during this integration test.
teleAgent, socketDirPath, socketPath, err := createAgent(
s.me,
t.Secrets.Users[s.me.Username].Key.Priv,
t.Secrets.Users[s.me.Username].Key.Cert)
c.Assert(err, check.IsNil)
defer closeAgent(teleAgent, socketDirPath)
// Create and run an exec command twice with the passed in ControlPath. This
// will cause re-use of the connection and creation of two sessions within
// the connection.
for i := 0; i < 2; i++ {
execCmd, err := externalSSHCommand(commandOptions{
forcePTY: true,
forwardAgent: true,
controlPath: controlPath,
socketPath: socketPath,
proxyPort: t.GetPortProxy(),
nodePort: t.GetPortSSH(),
command: "echo hello",
})
c.Assert(err, check.IsNil)
// Execute SSH command and check the output is what we expect.
output, err := execCmd.Output()
if err != nil {
// If an *exec.ExitError is returned, parse it and return stderr. If this
// is not done then c.Assert will just print a byte array for the error.
er, ok := err.(*exec.ExitError)
if ok {
c.Fatalf("Unexpected error: %v", string(er.Stderr))
}
}
c.Assert(err, check.IsNil)
c.Assert(strings.TrimSpace(string(output)), check.Equals, "hello")
}
}
}
// TestProxyHostKeyCheck uses the forwarding proxy to connect to a server that
// presents a host key instead of a certificate in different configurations
// for the host key checking parameter in services.ClusterConfig.
func (s *IntSuite) TestProxyHostKeyCheck(c *check.C) {
var tests = []struct {
inHostKeyCheck string
outError bool
}{
// disable host key checking, should be able to connect
{
services.HostKeyCheckNo,
false,
},
// enable host key checking, should NOT be able to connect
{
services.HostKeyCheckYes,
true,
},
}
for _, tt := range tests {
hostSigner, err := ssh.ParsePrivateKey(s.priv)
c.Assert(err, check.IsNil)
// start a ssh server that presents a host key instead of a certificate
nodePort := s.getPorts(1)[0]
sshNode, err := newDiscardServer(Host, nodePort, hostSigner)
c.Assert(err, check.IsNil)
err = sshNode.Start()
c.Assert(err, check.IsNil)
defer sshNode.Stop()
// create a teleport instance with auth, proxy, and node
makeConfig := func() (*check.C, []string, []*InstanceSecrets, *service.Config) {
clusterConfig, err := services.NewClusterConfig(services.ClusterConfigSpecV3{
SessionRecording: services.RecordAtProxy,
ProxyChecksHostKeys: tt.inHostKeyCheck,
})
c.Assert(err, check.IsNil)
tconf := service.MakeDefaultConfig()
tconf.Console = nil
tconf.Auth.Enabled = true
tconf.Auth.ClusterConfig = clusterConfig
tconf.Proxy.Enabled = true
tconf.Proxy.DisableWebService = true
tconf.Proxy.DisableWebInterface = true
return c, nil, nil, tconf
}
t := s.newTeleportWithConfig(makeConfig())
defer t.Stop(true)
// create a teleport client and exec a command
clientConfig := ClientConfig{
Login: s.me.Username,
Cluster: Site,
Host: Host,
Port: nodePort,
ForwardAgent: true,
}
_, err = runCommand(t, []string{"echo hello"}, clientConfig, 1)
// check if we were able to exec the command or not
if tt.outError {
c.Assert(err, check.NotNil)
} else {
c.Assert(err, check.IsNil)
}
}
}
// TestAuditOff checks that when session recording has been turned off,
// sessions are not recorded.
func (s *IntSuite) TestAuditOff(c *check.C) {
var err error
// create a teleport instance with auth, proxy, and node
makeConfig := func() (*check.C, []string, []*InstanceSecrets, *service.Config) {
clusterConfig, err := services.NewClusterConfig(services.ClusterConfigSpecV3{
SessionRecording: services.RecordOff,
})
c.Assert(err, check.IsNil)
tconf := service.MakeDefaultConfig()
tconf.Console = nil
tconf.Auth.Enabled = true
tconf.Auth.ClusterConfig = clusterConfig
tconf.Proxy.Enabled = true
tconf.Proxy.DisableWebService = true
tconf.Proxy.DisableWebInterface = true
tconf.SSH.Enabled = true
return c, nil, nil, tconf
}
t := s.newTeleportWithConfig(makeConfig())
defer t.Stop(true)
// get access to a authClient for the cluster
site := t.GetSiteAPI(Site)
c.Assert(site, check.NotNil)
// should have no sessions in it to start with
sessions, _ := site.GetSessions(defaults.Namespace)
c.Assert(len(sessions), check.Equals, 0)
// create interactive session (this goroutine is this user's terminal time)
endCh := make(chan error, 1)
myTerm := NewTerminal(250)
go func() {
cl, err := t.NewClient(ClientConfig{
Login: s.me.Username,
Cluster: Site,
Host: Host,
Port: t.GetPortSSHInt(),
})
c.Assert(err, check.IsNil)
cl.Stdout = &myTerm
cl.Stdin = &myTerm
err = cl.SSH(context.TODO(), []string{}, false)
endCh <- err
}()
// wait until there's a session in there:
for i := 0; len(sessions) == 0; i++ {
time.Sleep(time.Millisecond * 20)
sessions, _ = site.GetSessions(defaults.Namespace)
if i > 100 {
c.Fatalf("Waited %v, but no sessions found", 100*20*time.Millisecond)
return
}
}
session := &sessions[0]
// wait for the user to join this session
for len(session.Parties) == 0 {
time.Sleep(time.Millisecond * 5)
session, err = site.GetSession(defaults.Namespace, sessions[0].ID)
c.Assert(err, check.IsNil)
}
// make sure it's us who joined! :)
c.Assert(session.Parties[0].User, check.Equals, s.me.Username)
// lets type "echo hi" followed by "enter" and then "exit" + "enter":
myTerm.Type("\aecho hi\n\r\aexit\n\r\a")
// wait for session to end
select {
case <-time.After(1 * time.Minute):
c.Fatalf("Timed out waiting for session to end.")
case <-endCh:
}
// audit log should have the fact that the session occurred recorded in it
sessions, err = site.GetSessions(defaults.Namespace)
c.Assert(err, check.IsNil)
c.Assert(len(sessions), check.Equals, 1)
// however, attempts to read the actual sessions should fail because it was
// not actually recorded
_, err = site.GetSessionChunk(defaults.Namespace, session.ID, 0, events.MaxChunkBytes)
c.Assert(err, check.NotNil)
}
// TestPAM checks that Teleport PAM integration works correctly. In this case
// that means if the account and session modules return success, the user
// should be allowed to log in. If either the account or session module does
// not return success, the user should not be able to log in.
func (s *IntSuite) TestPAM(c *check.C) {
// Check if TestPAM can run. For PAM tests to run, the binary must have been
// built with PAM support and the system running the tests must have libpam
// installed, and have the policy files installed. This test is always run
// in a container as part of the CI/CD pipeline. To run this test locally,
// install the pam_teleport.so module by running 'make && sudo make install'
// from the modules/pam_teleport directory. This will install the PAM module
// as well as the policy files.
if !pam.BuildHasPAM() || !pam.SystemHasPAM() || !hasPAMPolicy() {
skipMessage := "Skipping TestPAM: no policy found. To run PAM tests run " +
"'make && sudo make install' from the modules/pam_teleport directory."
c.Skip(skipMessage)
}
var tests = []struct {
inEnabled bool
inServiceName string
outContains []string
outError bool
}{
// 0 - No PAM support, session should work but no PAM related output.
{
inEnabled: false,
inServiceName: "",
outContains: []string{},
outError: false,
},
// 1 - PAM enabled, module account and session functions return success.
{
inEnabled: true,
inServiceName: "teleport-success",
outContains: []string{
"Account opened successfully.",
"Session open successfully.",
},
outError: false,
},
// 2 - PAM enabled, module account functions fail.
{
inEnabled: true,
inServiceName: "teleport-acct-failure",
outContains: []string{},
outError: true,
},
// 3 - PAM enabled, module session functions fail.
{
inEnabled: true,
inServiceName: "teleport-session-failure",
outContains: []string{},
outError: true,
},
}
for _, tt := range tests {
// Create a teleport instance with auth, proxy, and node.
makeConfig := func() (*check.C, []string, []*InstanceSecrets, *service.Config) {
tconf := service.MakeDefaultConfig()
tconf.Console = nil
tconf.Auth.Enabled = true
tconf.Proxy.Enabled = true
tconf.Proxy.DisableWebService = true
tconf.Proxy.DisableWebInterface = true
tconf.SSH.Enabled = true
tconf.SSH.PAM.Enabled = tt.inEnabled
tconf.SSH.PAM.ServiceName = tt.inServiceName
return c, nil, nil, tconf
}
t := s.newTeleportWithConfig(makeConfig())
defer t.Stop(true)
termSession := NewTerminal(250)
// Create an interactive session and write something to the terminal.
ctx, cancel := context.WithCancel(context.Background())
go func() {
cl, err := t.NewClient(ClientConfig{
Login: s.me.Username,
Cluster: Site,
Host: Host,
Port: t.GetPortSSHInt(),
})
c.Assert(err, check.IsNil)
cl.Stdout = &termSession
cl.Stdin = &termSession
termSession.Type("\aecho hi\n\r\aexit\n\r\a")
err = cl.SSH(context.TODO(), []string{}, false)
// If an error is expected (for example PAM does not allow a session to be
// created), this failure needs to be checked here.
if tt.outError {
c.Assert(err, check.NotNil)
} else {
c.Assert(err, check.IsNil)
}
cancel()
}()
// Wait for the session to end or timeout after 10 seconds.
select {
case <-time.After(10 * time.Second):
c.Fatalf("Timeout exceeded waiting for session to complete.")
case <-ctx.Done():
}
// If any output is expected, check to make sure it was output.
for _, expectedOutput := range tt.outContains {
output := string(termSession.Output(100))
c.Assert(strings.Contains(output, expectedOutput), check.Equals, true)
}
}
}
// TestRotateSuccess tests full cycle cert authority rotation
func (s *IntSuite) TestRotateSuccess(c *check.C) {
for i := 0; i < getIterations(); i++ {
s.rotateSuccess(c)
}
}
func (s *IntSuite) rotateSuccess(c *check.C) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tconf := rotationConfig(true)
t := NewInstance(InstanceConfig{ClusterName: Site, HostID: HostID, NodeName: Host, Ports: s.getPorts(5), Priv: s.priv, Pub: s.pub})
logins := []string{s.me.Username}
for _, login := range logins {
t.AddUser(login, []string{login})
}
config, err := t.GenerateConfig(nil, tconf)
c.Assert(err, check.IsNil)
serviceC := make(chan *service.TeleportProcess, 20)
runCtx, runCancel := context.WithCancel(context.TODO())
go func() {
defer runCancel()
service.Run(ctx, *config, func(cfg *service.Config) (service.Process, error) {
svc, err := service.NewTeleport(cfg)
if err == nil {
serviceC <- svc
}
return svc, err
})
}()
l := log.WithFields(log.Fields{trace.Component: teleport.Component("test", "rotate")})
svc, err := waitForProcessStart(serviceC)
c.Assert(err, check.IsNil)
// Setup user in the cluster
err = SetupUser(svc, s.me.Username, nil)
c.Assert(err, check.IsNil)
// capture credentials before reload started to simulate old client
initialCreds, err := GenerateUserCreds(svc, s.me.Username)
c.Assert(err, check.IsNil)
l.Infof("Service started. Setting rotation state to %v", services.RotationPhaseUpdateClients)
// start rotation
err = svc.GetAuthServer().RotateCertAuthority(auth.RotateRequest{
TargetPhase: services.RotationPhaseInit,
Mode: services.RotationModeManual,
})
c.Assert(err, check.IsNil)
// wait until service phase update to be broadcasted (init phase does not trigger reload)
err = waitForProcessEvent(svc, service.TeleportPhaseChangeEvent, 10*time.Second)
c.Assert(err, check.IsNil)
// update clients
err = svc.GetAuthServer().RotateCertAuthority(auth.RotateRequest{
TargetPhase: services.RotationPhaseUpdateClients,
Mode: services.RotationModeManual,
})
c.Assert(err, check.IsNil)
// wait until service reload
svc, err = waitForReload(serviceC, svc)
c.Assert(err, check.IsNil)
cfg := ClientConfig{
Login: s.me.Username,
Host: Loopback,
Port: t.GetPortSSHInt(),
}
clt, err := t.NewClientWithCreds(cfg, *initialCreds)
c.Assert(err, check.IsNil)
// client works as is before servers have been rotated
err = runAndMatch(clt, 3, []string{"echo", "hello world"}, ".*hello world.*")
c.Assert(err, check.IsNil)
l.Infof("Service reloaded. Setting rotation state to %v", services.RotationPhaseUpdateServers)
// move to the next phase
err = svc.GetAuthServer().RotateCertAuthority(auth.RotateRequest{
TargetPhase: services.RotationPhaseUpdateServers,
Mode: services.RotationModeManual,
})
c.Assert(err, check.IsNil)
// wait until service reloaded
svc, err = waitForReload(serviceC, svc)
c.Assert(err, check.IsNil)
// new credentials will work from this phase to others
newCreds, err := GenerateUserCreds(svc, s.me.Username)
c.Assert(err, check.IsNil)
clt, err = t.NewClientWithCreds(cfg, *newCreds)
c.Assert(err, check.IsNil)
// new client works
err = runAndMatch(clt, 3, []string{"echo", "hello world"}, ".*hello world.*")
c.Assert(err, check.IsNil)
l.Infof("Service reloaded. Setting rotation state to %v.", services.RotationPhaseStandby)
// complete rotation
err = svc.GetAuthServer().RotateCertAuthority(auth.RotateRequest{
TargetPhase: services.RotationPhaseStandby,
Mode: services.RotationModeManual,
})
c.Assert(err, check.IsNil)
// wait until service reloaded
svc, err = waitForReload(serviceC, svc)
c.Assert(err, check.IsNil)
// new client still works
err = runAndMatch(clt, 3, []string{"echo", "hello world"}, ".*hello world.*")
c.Assert(err, check.IsNil)
l.Infof("Service reloaded. Rotation has completed. Shuttting down service.")
// shut down the service
cancel()
// close the service without waiting for the connections to drain
svc.Close()
select {
case <-runCtx.Done():
case <-time.After(20 * time.Second):
c.Fatalf("failed to shut down the server")
}
}
// TestRotateRollback tests cert authority rollback
func (s *IntSuite) TestRotateRollback(c *check.C) {
for i := 0; i < getIterations(); i++ {
s.rotateRollback(c)
}
}
func (s *IntSuite) rotateRollback(c *check.C) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tconf := rotationConfig(true)
t := NewInstance(InstanceConfig{ClusterName: Site, HostID: HostID, NodeName: Host, Ports: s.getPorts(5), Priv: s.priv, Pub: s.pub})
logins := []string{s.me.Username}
for _, login := range logins {
t.AddUser(login, []string{login})
}
config, err := t.GenerateConfig(nil, tconf)
c.Assert(err, check.IsNil)
serviceC := make(chan *service.TeleportProcess, 20)
runCtx, runCancel := context.WithCancel(context.TODO())
go func() {
defer runCancel()
service.Run(ctx, *config, func(cfg *service.Config) (service.Process, error) {
svc, err := service.NewTeleport(cfg)
if err == nil {
serviceC <- svc
}
return svc, err
})
}()
l := log.WithFields(log.Fields{trace.Component: teleport.Component("test", "rotate")})
svc, err := waitForProcessStart(serviceC)
c.Assert(err, check.IsNil)
// Setup user in the cluster
err = SetupUser(svc, s.me.Username, nil)
c.Assert(err, check.IsNil)
// capture credentials before reload started to simulate old client
initialCreds, err := GenerateUserCreds(svc, s.me.Username)
c.Assert(err, check.IsNil)
l.Infof("Service started. Setting rotation state to %v", services.RotationPhaseInit)
// start rotation
err = svc.GetAuthServer().RotateCertAuthority(auth.RotateRequest{
TargetPhase: services.RotationPhaseInit,
Mode: services.RotationModeManual,
})
c.Assert(err, check.IsNil)
err = waitForProcessEvent(svc, service.TeleportPhaseChangeEvent, 10*time.Second)
c.Assert(err, check.IsNil)
l.Infof("Setting rotation state to %v", services.RotationPhaseUpdateClients)
// start rotation
err = svc.GetAuthServer().RotateCertAuthority(auth.RotateRequest{
TargetPhase: services.RotationPhaseUpdateClients,
Mode: services.RotationModeManual,
})
c.Assert(err, check.IsNil)
// wait until service reload
svc, err = waitForReload(serviceC, svc)
c.Assert(err, check.IsNil)
cfg := ClientConfig{
Login: s.me.Username,
Host: Loopback,
Port: t.GetPortSSHInt(),
}
clt, err := t.NewClientWithCreds(cfg, *initialCreds)
c.Assert(err, check.IsNil)
// client works as is before servers have been rotated
err = runAndMatch(clt, 3, []string{"echo", "hello world"}, ".*hello world.*")
c.Assert(err, check.IsNil)
l.Infof("Service reloaded. Setting rotation state to %v", services.RotationPhaseUpdateServers)
// move to the next phase
err = svc.GetAuthServer().RotateCertAuthority(auth.RotateRequest{
TargetPhase: services.RotationPhaseUpdateServers,
Mode: services.RotationModeManual,
})
c.Assert(err, check.IsNil)
// wait until service reloaded
svc, err = waitForReload(serviceC, svc)
c.Assert(err, check.IsNil)
l.Infof("Service reloaded. Setting rotation state to %v.", services.RotationPhaseRollback)
// complete rotation
err = svc.GetAuthServer().RotateCertAuthority(auth.RotateRequest{
TargetPhase: services.RotationPhaseRollback,
Mode: services.RotationModeManual,
})
c.Assert(err, check.IsNil)
// wait until service reloaded
svc, err = waitForReload(serviceC, svc)
c.Assert(err, check.IsNil)
// old client works
err = runAndMatch(clt, 3, []string{"echo", "hello world"}, ".*hello world.*")
c.Assert(err, check.IsNil)
l.Infof("Service reloaded. Rotation has completed. Shuttting down service.")
// shut down the service
cancel()
// close the service without waiting for the connections to drain
svc.Close()
select {
case <-runCtx.Done():
case <-time.After(20 * time.Second):
c.Fatalf("failed to shut down the server")
}
}
// getIterations provides a simple way to add iterations to the test
// by setting environment variable "ITERATIONS", by default it returns 1
func getIterations() int {
out := os.Getenv("ITERATIONS")
if out == "" {
return 1
}
iter, err := strconv.Atoi(out)
if err != nil {
panic(err)
}
log.Debugf("Starting tests with %v iterations.", iter)
return iter
}
// TestRotateTrustedClusters tests CA rotation support for trusted clusters
func (s *IntSuite) TestRotateTrustedClusters(c *check.C) {
for i := 0; i < getIterations(); i++ {
s.rotateTrustedClusters(c)
}
}
// rotateTrustedClusters tests CA rotation support for trusted clusters
func (s *IntSuite) rotateTrustedClusters(c *check.C) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
clusterMain := "rotate-main"
clusterAux := "rotate-aux"
tconf := rotationConfig(false)
main := NewInstance(InstanceConfig{ClusterName: clusterMain, HostID: HostID, NodeName: Host, Ports: s.getPorts(5), Priv: s.priv, Pub: s.pub})
aux := NewInstance(InstanceConfig{ClusterName: clusterAux, HostID: HostID, NodeName: Host, Ports: s.getPorts(5), Priv: s.priv, Pub: s.pub})
logins := []string{s.me.Username}
for _, login := range logins {
main.AddUser(login, []string{login})
}
config, err := main.GenerateConfig(nil, tconf)
c.Assert(err, check.IsNil)
serviceC := make(chan *service.TeleportProcess, 20)
runCtx, runCancel := context.WithCancel(context.TODO())
go func() {
defer runCancel()
service.Run(ctx, *config, func(cfg *service.Config) (service.Process, error) {
svc, err := service.NewTeleport(cfg)
if err == nil {
serviceC <- svc
}
return svc, err
})
}()
l := log.WithFields(log.Fields{trace.Component: teleport.Component("test", "rotate")})
svc, err := waitForProcessStart(serviceC)
c.Assert(err, check.IsNil)
// main cluster has a local user and belongs to role "main-devs"
mainDevs := "main-devs"
role, err := services.NewRole(mainDevs, services.RoleSpecV3{
Allow: services.RoleConditions{
Logins: []string{s.me.Username},
},
})
c.Assert(err, check.IsNil)
err = SetupUser(svc, s.me.Username, []services.Role{role})
c.Assert(err, check.IsNil)
// create auxiliary cluster and setup trust
c.Assert(aux.CreateEx(nil, rotationConfig(false)), check.IsNil)
// auxiliary cluster has a role aux-devs
// connect aux cluster to main cluster
// using trusted clusters, so remote user will be allowed to assume
// role specified by mapping remote role "devs" to local role "local-devs"
auxDevs := "aux-devs"
role, err = services.NewRole(auxDevs, services.RoleSpecV3{
Allow: services.RoleConditions{
Logins: []string{s.me.Username},
},
})
c.Assert(err, check.IsNil)
err = aux.Process.GetAuthServer().UpsertRole(role)
c.Assert(err, check.IsNil)
trustedClusterToken := "trusted-clsuter-token"
err = svc.GetAuthServer().UpsertToken(trustedClusterToken, []teleport.Role{teleport.RoleTrustedCluster}, backend.Forever)
c.Assert(err, check.IsNil)
trustedCluster := main.Secrets.AsTrustedCluster(trustedClusterToken, services.RoleMap{
{Remote: mainDevs, Local: []string{auxDevs}},
})
c.Assert(aux.Start(), check.IsNil)
// try and upsert a trusted cluster
lib.SetInsecureDevMode(true)
defer lib.SetInsecureDevMode(false)
var upsertSuccess bool
for i := 0; i < 10; i++ {
log.Debugf("Will create trusted cluster %v, attempt %v", trustedCluster, i)
_, err = aux.Process.GetAuthServer().UpsertTrustedCluster(trustedCluster)
if err != nil {
if trace.IsConnectionProblem(err) {
log.Debugf("retrying on connection problem: %v", err)
continue
}
c.Fatalf("got non connection problem %v", err)
}
upsertSuccess = true
break
}
// make sure we upsert a trusted cluster
c.Assert(upsertSuccess, check.Equals, true)
// capture credentials before has reload started to simulate old client
initialCreds, err := GenerateUserCreds(svc, s.me.Username)
c.Assert(err, check.IsNil)
// credentials should work
cfg := ClientConfig{
Login: s.me.Username,
Host: Loopback,
Cluster: clusterAux,
Port: aux.GetPortSSHInt(),
}
clt, err := main.NewClientWithCreds(cfg, *initialCreds)
c.Assert(err, check.IsNil)
err = runAndMatch(clt, 6, []string{"echo", "hello world"}, ".*hello world.*")
c.Assert(err, check.IsNil)
l.Infof("Setting rotation state to %v", services.RotationPhaseInit)
// start rotation
err = svc.GetAuthServer().RotateCertAuthority(auth.RotateRequest{
TargetPhase: services.RotationPhaseInit,
Mode: services.RotationModeManual,
})
c.Assert(err, check.IsNil)
// wait until service phase update to be broadcasted (init phase does not trigger reload)
err = waitForProcessEvent(svc, service.TeleportPhaseChangeEvent, 10*time.Second)
c.Assert(err, check.IsNil)
// waitForPhase waits until aux cluster detects the rotation
waitForPhase := func(phase string) error {
var lastPhase string
for i := 0; i < 10; i++ {
ca, err := aux.Process.GetAuthServer().GetCertAuthority(services.CertAuthID{
Type: services.HostCA,
DomainName: clusterMain,
}, false)
c.Assert(err, check.IsNil)
if ca.GetRotation().Phase == phase {
return nil
}
lastPhase = ca.GetRotation().Phase
time.Sleep(tconf.PollingPeriod / 2)
}
return trace.CompareFailed("failed to converge to phase %q, last phase %q", phase, lastPhase)
}
err = waitForPhase(services.RotationPhaseInit)
c.Assert(err, check.IsNil)
// update clients
err = svc.GetAuthServer().RotateCertAuthority(auth.RotateRequest{
TargetPhase: services.RotationPhaseUpdateClients,
Mode: services.RotationModeManual,
})
c.Assert(err, check.IsNil)
// wait until service reloaded
svc, err = waitForReload(serviceC, svc)
c.Assert(err, check.IsNil)
err = waitForPhase(services.RotationPhaseUpdateClients)
c.Assert(err, check.IsNil)
// old client should work as is
err = runAndMatch(clt, 6, []string{"echo", "hello world"}, ".*hello world.*")
c.Assert(err, check.IsNil)
l.Infof("Service reloaded. Setting rotation state to %v", services.RotationPhaseUpdateServers)
// move to the next phase
err = svc.GetAuthServer().RotateCertAuthority(auth.RotateRequest{
TargetPhase: services.RotationPhaseUpdateServers,
Mode: services.RotationModeManual,
})
c.Assert(err, check.IsNil)
// wait until service reloaded
svc, err = waitForReload(serviceC, svc)
c.Assert(err, check.IsNil)
err = waitForPhase(services.RotationPhaseUpdateServers)
c.Assert(err, check.IsNil)
// new credentials will work from this phase to others
newCreds, err := GenerateUserCreds(svc, s.me.Username)
c.Assert(err, check.IsNil)
clt, err = main.NewClientWithCreds(cfg, *newCreds)
c.Assert(err, check.IsNil)
// new client works
err = runAndMatch(clt, 3, []string{"echo", "hello world"}, ".*hello world.*")
c.Assert(err, check.IsNil)
l.Infof("Service reloaded. Setting rotation state to %v.", services.RotationPhaseStandby)
// complete rotation
err = svc.GetAuthServer().RotateCertAuthority(auth.RotateRequest{
TargetPhase: services.RotationPhaseStandby,
Mode: services.RotationModeManual,
})
c.Assert(err, check.IsNil)
// wait until service reloaded
svc, err = waitForReload(serviceC, svc)
c.Assert(err, check.IsNil)
err = waitForPhase(services.RotationPhaseStandby)
c.Assert(err, check.IsNil)
// new client still works
err = runAndMatch(clt, 3, []string{"echo", "hello world"}, ".*hello world.*")
c.Assert(err, check.IsNil)
l.Infof("Service reloaded. Rotation has completed. Shuttting down service.")
// shut down the service
cancel()
// close the service without waiting for the connections to drain
svc.Close()
select {
case <-runCtx.Done():
case <-time.After(20 * time.Second):
c.Fatalf("failed to shut down the server")
}
}
// rotationConfig sets up default config used for CA rotation tests
func rotationConfig(disableWebService bool) *service.Config {
tconf := service.MakeDefaultConfig()
tconf.SSH.Enabled = true
tconf.Proxy.DisableWebService = disableWebService
tconf.Proxy.DisableWebInterface = true
tconf.PollingPeriod = 500 * time.Millisecond
tconf.ClientTimeout = time.Second
tconf.ShutdownTimeout = 2 * tconf.ClientTimeout
return tconf
}
// waitForProcessEvent waits for process event to occur or timeout
func waitForProcessEvent(svc *service.TeleportProcess, event string, timeout time.Duration) error {
eventC := make(chan service.Event, 1)
svc.WaitForEvent(context.TODO(), event, eventC)
select {
case <-eventC:
return nil
case <-time.After(timeout):
return trace.BadParameter("timeout waiting for service to broadcast event %v", event)
}
}
// waitForProcessStart is waiting for the process to start
func waitForProcessStart(serviceC chan *service.TeleportProcess) (*service.TeleportProcess, error) {
var svc *service.TeleportProcess
select {
case svc = <-serviceC:
case <-time.After(60 * time.Second):
return nil, trace.BadParameter("timeout waiting for service to start")
}
return svc, nil
}
// waitForReload waits for multiple events to happen:
//
// 1. new service to be created and started
// 2. old service, if present to shut down
//
// this helper function allows to serialize tests for reloads.
func waitForReload(serviceC chan *service.TeleportProcess, old *service.TeleportProcess) (*service.TeleportProcess, error) {
var svc *service.TeleportProcess
select {
case svc = <-serviceC:
case <-time.After(60 * time.Second):
return nil, trace.BadParameter("timeout waiting for service to start")
}
eventC := make(chan service.Event, 1)
svc.WaitForEvent(context.TODO(), service.TeleportReadyEvent, eventC)
select {
case <-eventC:
case <-time.After(20 * time.Second):
return nil, trace.BadParameter("timeout waiting for service to broadcast ready status")
}
// if old service is present, wait for it to complete shut down procedure
if old != nil {
ctx, cancel := context.WithCancel(context.TODO())
go func() {
defer cancel()
old.Supervisor.Wait()
}()
select {
case <-ctx.Done():
case <-time.After(60 * time.Second):
return nil, trace.BadParameter("timeout waiting for old service to stop")
}
}
return svc, nil
}
// runAndMatch runs command and makes sure it matches the pattern
func runAndMatch(tc *client.TeleportClient, attempts int, command []string, pattern string) error {
output := &bytes.Buffer{}
tc.Stdout = output
var err error
for i := 0; i < attempts; i++ {
err = tc.SSH(context.TODO(), command, false)
if err != nil {
continue
}
out := output.String()
out = string(replaceNewlines(out))
matched, _ := regexp.MatchString(pattern, out)
if matched {
return nil
}
err = trace.CompareFailed("output %q did not match pattern %q", out, pattern)
time.Sleep(250 * time.Millisecond)
}
return err
}
// TestWindowChange checks if custom Teleport window change requests are sent
// when the server side PTY changes its size.
func (s *IntSuite) TestWindowChange(c *check.C) {
t := s.newTeleport(c, nil, true)
defer t.Stop(true)
site := t.GetSiteAPI(Site)
c.Assert(site, check.NotNil)
personA := NewTerminal(250)
personB := NewTerminal(250)
// openSession will open a new session on a server.
openSession := func() {
cl, err := t.NewClient(ClientConfig{
Login: s.me.Username,
Cluster: Site,
Host: Host,
Port: t.GetPortSSHInt(),
})
c.Assert(err, check.IsNil)
cl.Stdout = &personA
cl.Stdin = &personA
err = cl.SSH(context.TODO(), []string{}, false)
c.Assert(err, check.IsNil)
}
// joinSession will join the existing session on a server.
joinSession := func() {
// Find the existing session in the backend.
var sessionID string
for {
time.Sleep(time.Millisecond)
sessions, _ := site.GetSessions(defaults.Namespace)
if len(sessions) == 0 {
continue
}
sessionID = string(sessions[0].ID)
break
}
cl, err := t.NewClient(ClientConfig{
Login: s.me.Username,
Cluster: Site,
Host: Host,
Port: t.GetPortSSHInt(),
})
c.Assert(err, check.IsNil)
cl.Stdout = &personB
cl.Stdin = &personB
// Change the size of the window immediately after it is created.
cl.OnShellCreated = func(s *ssh.Session, c *ssh.Client, terminal io.ReadWriteCloser) (exit bool, err error) {
err = s.WindowChange(48, 160)
if err != nil {
return true, trace.Wrap(err)
}
return false, nil
}
for i := 0; i < 10; i++ {
err = cl.Join(context.TODO(), defaults.Namespace, session.ID(sessionID), &personB)
if err == nil {
break
}
}
c.Assert(err, check.IsNil)
}
// waitForOutput checks the output of the passed in terminal of a string until
// some timeout has occurred.
waitForOutput := func(t Terminal, s string) error {
tickerCh := time.Tick(500 * time.Millisecond)
timeoutCh := time.After(30 * time.Second)
for {
select {
case <-tickerCh:
if strings.Contains(t.Output(500), s) {
return nil
}
case <-timeoutCh:
return trace.BadParameter("timed out waiting for output")
}
}
}
// Open session, the initial size will be 80x24.
go openSession()
// Use the "printf" command to print the terminal size on the screen and
// make sure it is 80x25.
personA.Type("\aprintf '%s %s\n' $(tput cols) $(tput lines)\n\r\a")
err := waitForOutput(personA, "80 25")
c.Assert(err, check.IsNil)
// As soon as person B joins the session, the terminal is resized to 160x48.
// Have another user join the session. As soon as the second shell is
// created, the window is resized to 160x48 (see joinSession implementation).
go joinSession()
// Use the "printf" command to print the window size again and make sure it's
// 160x48.
personA.Type("\aprintf '%s %s\n' $(tput cols) $(tput lines)\n\r\a")
err = waitForOutput(personA, "160 48")
c.Assert(err, check.IsNil)
// Close the session.
personA.Type("\aexit\r\n\a")
}
// TestList checks that the list of servers returned is identity aware.
func (s *IntSuite) TestList(c *check.C) {
// Create and start a Teleport cluster with auth, proxy, and node.
makeConfig := func() (*check.C, []string, []*InstanceSecrets, *service.Config) {
clusterConfig, err := services.NewClusterConfig(services.ClusterConfigSpecV3{
SessionRecording: services.RecordOff,
})
c.Assert(err, check.IsNil)
tconf := service.MakeDefaultConfig()
tconf.Hostname = "server-01"
tconf.Auth.Enabled = true
tconf.Auth.ClusterConfig = clusterConfig
tconf.Proxy.Enabled = true
tconf.Proxy.DisableWebService = true
tconf.Proxy.DisableWebInterface = true
tconf.SSH.Enabled = true
tconf.SSH.Labels = map[string]string{
"role": "worker",
}
return c, nil, nil, tconf
}
t := s.newTeleportWithConfig(makeConfig())
defer t.Stop(true)
// Create and start a Teleport node.
nodeSSHPort := s.getPorts(1)[0]
nodeConfig := func() *service.Config {
tconf := service.MakeDefaultConfig()
tconf.Hostname = "server-02"
tconf.SSH.Enabled = true
tconf.SSH.Addr.Addr = net.JoinHostPort(t.Hostname, fmt.Sprintf("%v", nodeSSHPort))
tconf.SSH.Labels = map[string]string{
"role": "database",
}
return tconf
}
_, err := t.StartNode(nodeConfig())
c.Assert(err, check.IsNil)
// Get an auth client to the cluster.
clt := t.GetSiteAPI(Site)
c.Assert(clt, check.NotNil)
// Wait 10 seconds for both nodes to show up to make sure they both have
// registered themselves.
waitForNodes := func(clt auth.ClientI, count int) error {
tickCh := time.Tick(500 * time.Millisecond)
stopCh := time.After(10 * time.Second)
for {
select {
case <-tickCh:
nodesInCluster, err := clt.GetNodes(defaults.Namespace, services.SkipValidation())
if err != nil && !trace.IsNotFound(err) {
return trace.Wrap(err)
}
if got, want := len(nodesInCluster), count; got == want {
return nil
}
case <-stopCh:
return trace.BadParameter("waited 10s, did find %v nodes", count)
}
}
}
err = waitForNodes(clt, 2)
c.Assert(err, check.IsNil)
var tests = []struct {
inRoleName string
inLabels services.Labels
inLogin string
outNodes []string
}{
// 0 - Role has label "role:worker", only server-01 is returned.
{
inRoleName: "worker-only",
inLogin: "foo",
inLabels: services.Labels{"role": []string{"worker"}},
outNodes: []string{"server-01"},
},
// 1 - Role has label "role:database", only server-02 is returned.
{
inRoleName: "database-only",
inLogin: "bar",
inLabels: services.Labels{"role": []string{"database"}},
outNodes: []string{"server-02"},
},
// 2 - Role has wildcard label, all nodes are returned server-01 and server-2.
{
inRoleName: "worker-and-database",
inLogin: "baz",
inLabels: services.Labels{services.Wildcard: []string{services.Wildcard}},
outNodes: []string{"server-01", "server-02"},
},
}
for _, tt := range tests {
// Create role with logins and labels for this test.
role, err := services.NewRole(tt.inRoleName, services.RoleSpecV3{
Allow: services.RoleConditions{
Logins: []string{tt.inLogin},
NodeLabels: tt.inLabels,
},
})
c.Assert(err, check.IsNil)
// Create user, role, and generate credentials.
err = SetupUser(t.Process, tt.inLogin, []services.Role{role})
c.Assert(err, check.IsNil)
initialCreds, err := GenerateUserCreds(t.Process, tt.inLogin)
c.Assert(err, check.IsNil)
// Create a Teleport client.
cfg := ClientConfig{
Login: tt.inLogin,
Port: t.GetPortSSHInt(),
}
userClt, err := t.NewClientWithCreds(cfg, *initialCreds)
c.Assert(err, check.IsNil)
// Get list of nodes and check that the returned nodes match the
// expected nodes.
nodes, err := userClt.ListNodes(context.Background())
c.Assert(err, check.IsNil)
for _, node := range nodes {
ok := utils.SliceContainsStr(tt.outNodes, node.GetHostname())
if !ok {
c.Fatalf("Got nodes: %v, want: %v.", nodes, tt.outNodes)
}
}
}
}
// TestMultipleSignup makes sure that multiple users can create Teleport accounts.
func (s *IntSuite) TestMultipleSignup(c *check.C) {
type createNewUserReq struct {
InviteToken string `json:"invite_token"`
Pass string `json:"pass"`
}
// Create and start a Teleport cluster.
makeConfig := func() (*check.C, []string, []*InstanceSecrets, *service.Config) {
clusterConfig, err := services.NewClusterConfig(services.ClusterConfigSpecV3{
SessionRecording: services.RecordAtNode,
})
c.Assert(err, check.IsNil)
tconf := service.MakeDefaultConfig()
tconf.Auth.Preference.SetSecondFactor("off")
tconf.Auth.Enabled = true
tconf.Auth.ClusterConfig = clusterConfig
tconf.Proxy.Enabled = true
tconf.Proxy.DisableWebService = false
tconf.Proxy.DisableWebInterface = true
tconf.SSH.Enabled = true
return c, nil, nil, tconf
}
main := s.newTeleportWithConfig(makeConfig())
defer main.Stop(true)
mainAuth := main.Process.GetAuthServer()
// Create a few users to make sure the proxy uses the correct identity
// when connecting to the auth server.
for i := 0; i < 5; i++ {
// Create a random username.
username, err := utils.CryptoRandomHex(16)
c.Assert(err, check.IsNil)
// Create signup token, this is like doing "tctl users add foo foo".
token, err := mainAuth.CreateSignupToken(services.UserV1{
Name: username,
AllowedLogins: []string{username},
}, backend.Forever)
c.Assert(err, check.IsNil)
// Create client that will simulate web browser.
clt, err := createWebClient(main)
c.Assert(err, check.IsNil)
// Render the signup page.
_, err = clt.Get(context.Background(), clt.Endpoint("webapi", "users", "invites", token), url.Values{})
c.Assert(err, check.IsNil)
// Make sure signup is successful.
_, err = clt.PostJSON(context.Background(), clt.Endpoint("webapi", "users"), createNewUserReq{
InviteToken: token,
Pass: "fake-password-123",
})
c.Assert(err, check.IsNil)
}
}
// runCommand is a shortcut for running SSH command, it creates a client
// connected to proxy of the passed in instance, runs the command, and returns
// the result. If multiple attempts are requested, a 250 millisecond delay is
// added between them before giving up.
func runCommand(instance *TeleInstance, cmd []string, cfg ClientConfig, attempts int) (string, error) {
tc, err := instance.NewClient(cfg)
if err != nil {
return "", trace.Wrap(err)
}
output := &bytes.Buffer{}
tc.Stdout = output
for i := 0; i < attempts; i++ {
err = tc.SSH(context.TODO(), cmd, false)
if err == nil {
break
}
time.Sleep(250 * time.Millisecond)
}
return output.String(), trace.Wrap(err)
}
// getPorts helper returns a range of unallocated ports available for litening on
func (s *IntSuite) getPorts(num int) []int {
if len(s.ports) < num {
panic("do not have enough ports! increase AllocatePortsNum constant")
}
ports := make([]int, num)
for i := range ports {
p, _ := strconv.Atoi(s.ports.Pop())
ports[i] = p
}
return ports
}
// Terminal emulates stdin+stdout for integration testing
type Terminal struct {
io.Writer
io.Reader
written *bytes.Buffer
typed chan byte
}
func NewTerminal(capacity int) Terminal {
return Terminal{
typed: make(chan byte, capacity),
written: bytes.NewBuffer([]byte{}),
}
}
func (t *Terminal) Type(data string) {
for _, b := range []byte(data) {
t.typed <- b
}
}
// Output returns a number of first 'limit' bytes printed into this fake terminal
func (t *Terminal) Output(limit int) string {
buff := t.written.Bytes()
if len(buff) > limit {
buff = buff[:limit]
}
// clean up white space for easier comparison:
return strings.TrimSpace(string(buff))
}
func (t *Terminal) Write(data []byte) (n int, err error) {
return t.written.Write(data)
}
func (t *Terminal) Read(p []byte) (n int, err error) {
for n = 0; n < len(p); n++ {
p[n] = <-t.typed
if p[n] == '\r' {
break
}
if p[n] == '\a' { // 'alert' used for debugging, means 'pause for 1 second'
time.Sleep(time.Second)
n -= 1
}
time.Sleep(time.Millisecond * 10)
}
return n, nil
}
// waitFor helper waits on a challen for up to the given timeout
func waitFor(c chan interface{}, timeout time.Duration) error {
tick := time.Tick(timeout)
select {
case <-c:
return nil
case <-tick:
return fmt.Errorf("timeout waiting for event")
}
}
// hasPAMPolicy checks if the three policy files needed for tests exists. If
// they do it returns true, otherwise returns false.
func hasPAMPolicy() bool {
pamPolicyFiles := []string{
"/etc/pam.d/teleport-acct-failure",
"/etc/pam.d/teleport-session-failure",
"/etc/pam.d/teleport-success",
}
for _, fileName := range pamPolicyFiles {
_, err := os.Stat(fileName)
if os.IsNotExist(err) {
return false
}
}
return true
}
|
[
"\"ITERATIONS\""
] |
[] |
[
"ITERATIONS"
] |
[]
|
["ITERATIONS"]
|
go
| 1 | 0 | |
src/test/java/org/opensearch/security/ssl/OpenSSLTest.java
|
/*
* Copyright 2015-2017 floragunn GmbH
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.opensearch.security.ssl;
import java.util.HashSet;
import java.util.Random;
import java.util.Set;
import io.netty.handler.ssl.OpenSsl;
import io.netty.util.internal.PlatformDependent;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.opensearch.action.admin.cluster.health.ClusterHealthRequest;
import org.opensearch.action.admin.cluster.health.ClusterHealthResponse;
import org.opensearch.action.admin.cluster.node.info.NodesInfoRequest;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.unit.TimeValue;
import org.opensearch.node.Node;
import org.opensearch.node.PluginAwareNode;
import org.opensearch.security.OpenSearchSecurityPlugin;
import org.opensearch.security.ssl.util.SSLConfigConstants;
import org.opensearch.security.support.ConfigConstants;
import org.opensearch.security.test.helper.file.FileHelper;
import org.opensearch.security.test.helper.rest.RestHelper;
import org.opensearch.transport.Netty4Plugin;
public class OpenSSLTest extends SSLTest {
private static final String USE_NETTY_DEFAULT_ALLOCATOR_PROPERTY = "opensearch.unsafe.use_netty_default_allocator";
private static String USE_NETTY_DEFAULT_ALLOCATOR;
@BeforeClass
public static void enableNettyDefaultAllocator() {
USE_NETTY_DEFAULT_ALLOCATOR = System.getProperty(USE_NETTY_DEFAULT_ALLOCATOR_PROPERTY);
System.setProperty(USE_NETTY_DEFAULT_ALLOCATOR_PROPERTY, "true");
}
@AfterClass
public static void restoreNettyDefaultAllocator() {
if (USE_NETTY_DEFAULT_ALLOCATOR != null) {
System.setProperty(USE_NETTY_DEFAULT_ALLOCATOR_PROPERTY, USE_NETTY_DEFAULT_ALLOCATOR);
} else {
System.clearProperty(USE_NETTY_DEFAULT_ALLOCATOR_PROPERTY);
}
}
@Before
public void setup() {
allowOpenSSL = true;
}
@Test
public void testEnsureOpenSSLAvailability() {
//Assert.assertTrue("OpenSSL not available: "+String.valueOf(OpenSsl.unavailabilityCause()), OpenSsl.isAvailable());
final String openSSLOptional = System.getenv("OPENDISTRO_SECURITY_TEST_OPENSSL_OPT");
System.out.println("OPENDISTRO_SECURITY_TEST_OPENSSL_OPT "+openSSLOptional);
if(!Boolean.parseBoolean(openSSLOptional)) {
System.out.println("OpenSSL must be available");
Assert.assertTrue("OpenSSL not available: "+String.valueOf(OpenSsl.unavailabilityCause()), OpenSsl.isAvailable());
} else {
System.out.println("OpenSSL can be available");
}
}
@Override
@Test
public void testHttps() throws Exception {
Assume.assumeTrue(OpenSearchSecuritySSLPlugin.OPENSSL_SUPPORTED && OpenSsl.isAvailable());
super.testHttps();
}
@Override
@Test
public void testHttpsAndNodeSSL() throws Exception {
Assume.assumeTrue(OpenSearchSecuritySSLPlugin.OPENSSL_SUPPORTED && OpenSsl.isAvailable());
super.testHttpsAndNodeSSL();
}
@Override
@Test
public void testHttpPlainFail() throws Exception {
Assume.assumeTrue(OpenSearchSecuritySSLPlugin.OPENSSL_SUPPORTED && OpenSsl.isAvailable());
super.testHttpPlainFail();
}
@Override
@Test
public void testHttpsNoEnforce() throws Exception {
Assume.assumeTrue(OpenSearchSecuritySSLPlugin.OPENSSL_SUPPORTED && OpenSsl.isAvailable());
super.testHttpsNoEnforce();
}
@Override
@Test
public void testHttpsV3Fail() throws Exception {
Assume.assumeTrue(OpenSearchSecuritySSLPlugin.OPENSSL_SUPPORTED && OpenSsl.isAvailable());
super.testHttpsV3Fail();
}
@Override
@Test(timeout=40000)
public void testNodeClientSSL() throws Exception {
Assume.assumeTrue(OpenSearchSecuritySSLPlugin.OPENSSL_SUPPORTED && OpenSsl.isAvailable());
super.testNodeClientSSL();
}
@Override
@Test
public void testHttpsOptionalAuth() throws Exception {
Assume.assumeTrue(OpenSearchSecuritySSLPlugin.OPENSSL_SUPPORTED && OpenSsl.isAvailable());
super.testHttpsOptionalAuth();
}
@Test
public void testAvailCiphersOpenSSL() throws Exception {
Assume.assumeTrue(OpenSearchSecuritySSLPlugin.OPENSSL_SUPPORTED && OpenSsl.isAvailable());
// Set<String> openSSLAvailCiphers = new
// HashSet<>(OpenSsl.availableCipherSuites());
// System.out.println("OpenSSL available ciphers: "+openSSLAvailCiphers);
// ECDHE-RSA-AES256-SHA, ECDH-ECDSA-AES256-SHA, DH-DSS-DES-CBC-SHA,
// ADH-AES256-SHA256, ADH-CAMELLIA128-SHA
final Set<String> openSSLSecureCiphers = new HashSet<>();
for (final String secure : SSLConfigConstants.getSecureSSLCiphers(Settings.EMPTY, false)) {
if (OpenSsl.isCipherSuiteAvailable(secure)) {
openSSLSecureCiphers.add(secure);
}
}
System.out.println("OpenSSL secure ciphers: " + openSSLSecureCiphers);
Assert.assertTrue(openSSLSecureCiphers.size() > 0);
}
@Test
public void testHttpsEnforceFail() throws Exception {
Assume.assumeTrue(OpenSearchSecuritySSLPlugin.OPENSSL_SUPPORTED && OpenSsl.isAvailable());
super.testHttpsEnforceFail();
}
@Override
public void testCipherAndProtocols() throws Exception {
Assume.assumeTrue(OpenSearchSecuritySSLPlugin.OPENSSL_SUPPORTED && OpenSsl.isAvailable());
super.testCipherAndProtocols();
}
@Override
public void testHttpsAndNodeSSLFailedCipher() throws Exception {
Assume.assumeTrue(OpenSearchSecuritySSLPlugin.OPENSSL_SUPPORTED && OpenSsl.isAvailable());
super.testHttpsAndNodeSSLFailedCipher();
}
@Test
public void testHttpsAndNodeSSLPem() throws Exception {
Assume.assumeTrue(OpenSearchSecuritySSLPlugin.OPENSSL_SUPPORTED && OpenSsl.isAvailable());
super.testHttpsAndNodeSSLPem();
}
@Test
public void testHttpsAndNodeSSLPemEnc() throws Exception {
Assume.assumeTrue(OpenSearchSecuritySSLPlugin.OPENSSL_SUPPORTED && OpenSsl.isAvailable());
super.testHttpsAndNodeSSLPemEnc();
}
@Test
public void testNodeClientSSLwithOpenSslTLSv13() throws Exception {
Assume.assumeTrue(OpenSearchSecuritySSLPlugin.OPENSSL_SUPPORTED && OpenSsl.isAvailable() && OpenSsl.version() > 0x10101009L);
final Settings settings = Settings.builder().put("plugins.security.ssl.transport.enabled", true)
.put(ConfigConstants.SECURITY_SSL_ONLY, true)
.put(SSLConfigConstants.SECURITY_SSL_HTTP_ENABLE_OPENSSL_IF_AVAILABLE, allowOpenSSL)
.put(SSLConfigConstants.SECURITY_SSL_TRANSPORT_ENABLE_OPENSSL_IF_AVAILABLE, allowOpenSSL)
.put(SSLConfigConstants.SECURITY_SSL_TRANSPORT_KEYSTORE_ALIAS, "node-0")
.put("plugins.security.ssl.transport.keystore_filepath", FileHelper.getAbsoluteFilePathFromClassPath("ssl/node-0-keystore.jks"))
.put(SSLConfigConstants.SECURITY_SSL_TRANSPORT_TRUSTSTORE_FILEPATH, FileHelper.getAbsoluteFilePathFromClassPath("ssl/truststore.jks"))
.put("plugins.security.ssl.transport.enforce_hostname_verification", false)
.put("plugins.security.ssl.transport.resolve_hostname", false)
.putList(SSLConfigConstants.SECURITY_SSL_TRANSPORT_ENABLED_PROTOCOLS, "TLSv1.3")
.putList(SSLConfigConstants.SECURITY_SSL_TRANSPORT_ENABLED_CIPHERS, "TLS_CHACHA20_POLY1305_SHA256")
.put("node.max_local_storage_nodes",4)
.build();
setupSslOnlyMode(settings);
RestHelper rh = nonSslRestHelper();
final Settings tcSettings = Settings.builder().put("cluster.name", clusterInfo.clustername).put("path.home", "/tmp")
.put("node.name", "client_node_" + new Random().nextInt())
.put("node.data", false)
.put("node.master", false)
.put("node.ingest", false)
.put("path.data", "./target/data/" + clusterInfo.clustername + "/ssl/data")
.put("path.logs", "./target/data/" + clusterInfo.clustername + "/ssl/logs")
.put("path.home", "./target")
.put("discovery.initial_state_timeout","8s")
.putList("discovery.zen.ping.unicast.hosts", clusterInfo.nodeHost+":"+clusterInfo.nodePort)
.put(settings)// -----
.build();
try (Node node = new PluginAwareNode(false, tcSettings, Netty4Plugin.class, OpenSearchSecurityPlugin.class).start()) {
ClusterHealthResponse res = node.client().admin().cluster().health(new ClusterHealthRequest().waitForNodes("4").timeout(TimeValue.timeValueSeconds(5))).actionGet();
Assert.assertFalse(res.isTimedOut());
Assert.assertEquals(4, res.getNumberOfNodes());
Assert.assertEquals(4, node.client().admin().cluster().nodesInfo(new NodesInfoRequest()).actionGet().getNodes().size());
}
Assert.assertFalse(rh.executeSimpleRequest("_nodes/stats?pretty").contains("\"tx_size_in_bytes\" : 0"));
Assert.assertFalse(rh.executeSimpleRequest("_nodes/stats?pretty").contains("\"rx_count\" : 0"));
Assert.assertFalse(rh.executeSimpleRequest("_nodes/stats?pretty").contains("\"rx_size_in_bytes\" : 0"));
Assert.assertFalse(rh.executeSimpleRequest("_nodes/stats?pretty").contains("\"tx_count\" : 0"));
}
@Test
public void testTLSv12() throws Exception {
Assume.assumeTrue(OpenSearchSecuritySSLPlugin.OPENSSL_SUPPORTED && OpenSsl.isAvailable());
super.testTLSv12();
}
@Test
public void testJava12WithOpenSslEnabled() throws Exception {
// If the user has Java 12 running and OpenSSL enabled, we give
// a warning, ignore OpenSSL and use JDK SSl instead.
Assume.assumeTrue(PlatformDependent.javaVersion() >= 12);
super.testHttps();
}
}
|
[
"\"OPENDISTRO_SECURITY_TEST_OPENSSL_OPT\""
] |
[] |
[
"OPENDISTRO_SECURITY_TEST_OPENSSL_OPT"
] |
[]
|
["OPENDISTRO_SECURITY_TEST_OPENSSL_OPT"]
|
java
| 1 | 0 | |
tests/commands/install_uninstall_test.py
|
from __future__ import annotations
import os.path
import re
import re_assert
import before_commit.constants as C
from before_commit import git
from before_commit.commands.install_uninstall import _hook_types
from before_commit.commands.install_uninstall import CURRENT_HASH
from before_commit.commands.install_uninstall import install
from before_commit.commands.install_uninstall import install_hooks
from before_commit.commands.install_uninstall import is_our_script
from before_commit.commands.install_uninstall import PRIOR_HASHES
from before_commit.commands.install_uninstall import uninstall
from before_commit.parse_shebang import find_executable
from before_commit.util import cmd_output
from before_commit.util import make_executable
from before_commit.util import resource_text
from testing.fixtures import add_config_to_repo
from testing.fixtures import git_dir
from testing.fixtures import make_consuming_repo
from testing.fixtures import remove_config_from_repo
from testing.fixtures import write_config
from testing.util import cmd_output_mocked_pre_commit_home
from testing.util import cwd
from testing.util import git_commit
def test_hook_types_explicitly_listed():
assert _hook_types(os.devnull, ['pre-push']) == ['pre-push']
def test_hook_types_default_value_when_not_specified():
assert _hook_types(os.devnull, None) == ['pre-commit']
def test_hook_types_configured(tmpdir):
cfg = tmpdir.join('t.cfg')
cfg.write('default_install_hook_types: [pre-push]\nrepos: []\n')
assert _hook_types(str(cfg), None) == ['pre-push']
def test_hook_types_configured_nonsense(tmpdir):
cfg = tmpdir.join('t.cfg')
cfg.write('default_install_hook_types: []\nrepos: []\n')
# hopefully the user doesn't do this, but the code allows it!
assert _hook_types(str(cfg), None) == []
def test_hook_types_configuration_has_error(tmpdir):
cfg = tmpdir.join('t.cfg')
cfg.write('[')
assert _hook_types(str(cfg), None) == ['pre-commit']
def test_is_not_script():
assert is_our_script('setup.py') is False
def test_is_script():
assert is_our_script('before_commit/resources/hook-tmpl')
def test_is_previous_pre_commit(tmpdir):
f = tmpdir.join('foo')
f.write(f'{PRIOR_HASHES[0].decode()}\n')
assert is_our_script(f.strpath)
def test_install_pre_commit(in_git_dir, store):
assert not install(C.DEFAULT_CONFIG_FILE, store, hook_types=['pre-commit'])
assert os.access(in_git_dir.join('.git/hooks/pre-commit').strpath, os.X_OK)
assert not install(C.DEFAULT_CONFIG_FILE, store, hook_types=['pre-push'])
assert os.access(in_git_dir.join('.git/hooks/pre-push').strpath, os.X_OK)
def test_install_hooks_directory_not_present(in_git_dir, store):
# Simulate some git clients which don't make .git/hooks #234
if in_git_dir.join('.git/hooks').exists(): # pragma: no cover (odd git)
in_git_dir.join('.git/hooks').remove()
install(C.DEFAULT_CONFIG_FILE, store, hook_types=['pre-commit'])
assert in_git_dir.join('.git/hooks/pre-commit').exists()
def test_install_multiple_hooks_at_once(in_git_dir, store):
install(
C.DEFAULT_CONFIG_FILE,
store,
hook_types=['pre-commit', 'pre-push'],
)
assert in_git_dir.join('.git/hooks/pre-commit').exists()
assert in_git_dir.join('.git/hooks/pre-push').exists()
uninstall(C.DEFAULT_CONFIG_FILE, hook_types=['pre-commit', 'pre-push'])
assert not in_git_dir.join('.git/hooks/pre-commit').exists()
assert not in_git_dir.join('.git/hooks/pre-push').exists()
def test_install_refuses_core_hookspath(in_git_dir, store):
cmd_output('git', 'config', '--local', 'core.hooksPath', 'hooks')
assert install(C.DEFAULT_CONFIG_FILE, store, hook_types=['pre-commit'])
def test_install_hooks_dead_symlink(in_git_dir, store):
hook = in_git_dir.join('.git/hooks').ensure_dir().join('pre-commit')
os.symlink('/fake/baz', hook.strpath)
install(C.DEFAULT_CONFIG_FILE, store, hook_types=['pre-commit'])
assert hook.exists()
def test_uninstall_does_not_blow_up_when_not_there(in_git_dir):
assert uninstall(C.DEFAULT_CONFIG_FILE, hook_types=['pre-commit']) == 0
def test_uninstall(in_git_dir, store):
assert not in_git_dir.join('.git/hooks/pre-commit').exists()
install(C.DEFAULT_CONFIG_FILE, store, hook_types=['pre-commit'])
assert in_git_dir.join('.git/hooks/pre-commit').exists()
uninstall(C.DEFAULT_CONFIG_FILE, hook_types=['pre-commit'])
assert not in_git_dir.join('.git/hooks/pre-commit').exists()
def _get_commit_output(tempdir_factory, touch_file='foo', **kwargs):
open(touch_file, 'a').close()
cmd_output('git', 'add', touch_file)
return git_commit(
fn=cmd_output_mocked_pre_commit_home,
retcode=None,
tempdir_factory=tempdir_factory,
**kwargs,
)
# osx does this different :(
FILES_CHANGED = (
r'('
r' 1 file changed, 0 insertions\(\+\), 0 deletions\(-\)\n'
r'|'
r' 0 files changed\n'
r')'
)
NORMAL_PRE_COMMIT_RUN = re_assert.Matches(
fr'^\[INFO\] Initializing environment for .+\.\n'
fr'Bash hook\.+Passed\n'
fr'\[master [a-f0-9]{{7}}\] commit!\n'
fr'{FILES_CHANGED}'
fr' create mode 100644 foo\n$',
)
def test_install_pre_commit_and_run(tempdir_factory, store):
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
iret = install(C.DEFAULT_CONFIG_FILE, store, hook_types=['pre-commit'])
assert iret == 0
ret, output = _get_commit_output(tempdir_factory)
assert ret == 0
NORMAL_PRE_COMMIT_RUN.assert_matches(output)
def test_install_pre_commit_and_run_custom_path(tempdir_factory, store):
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
cmd_output('git', 'mv', C.DEFAULT_CONFIG_FILE, 'custom.yaml')
git_commit(cwd=path)
assert install('custom.yaml', store, hook_types=['pre-commit']) == 0
ret, output = _get_commit_output(tempdir_factory)
assert ret == 0
NORMAL_PRE_COMMIT_RUN.assert_matches(output)
def test_install_in_submodule_and_run(tempdir_factory, store):
src_path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
parent_path = git_dir(tempdir_factory)
cmd_output('git', 'submodule', 'add', src_path, 'sub', cwd=parent_path)
git_commit(cwd=parent_path)
sub_pth = os.path.join(parent_path, 'sub')
with cwd(sub_pth):
iret = install(C.DEFAULT_CONFIG_FILE, store, hook_types=['pre-commit'])
assert iret == 0
ret, output = _get_commit_output(tempdir_factory)
assert ret == 0
NORMAL_PRE_COMMIT_RUN.assert_matches(output)
def test_install_in_worktree_and_run(tempdir_factory, store):
src_path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
path = tempdir_factory.get()
cmd_output('git', '-C', src_path, 'branch', '-m', 'notmaster')
cmd_output('git', '-C', src_path, 'worktree', 'add', path, '-b', 'master')
with cwd(path):
iret = install(C.DEFAULT_CONFIG_FILE, store, hook_types=['pre-commit'])
assert iret == 0
ret, output = _get_commit_output(tempdir_factory)
assert ret == 0
NORMAL_PRE_COMMIT_RUN.assert_matches(output)
def test_commit_am(tempdir_factory, store):
"""Regression test for #322."""
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
# Make an unstaged change
open('unstaged', 'w').close()
cmd_output('git', 'add', '.')
git_commit(cwd=path)
with open('unstaged', 'w') as foo_file:
foo_file.write('Oh hai')
iret = install(C.DEFAULT_CONFIG_FILE, store, hook_types=['pre-commit'])
assert iret == 0
ret, output = _get_commit_output(tempdir_factory)
assert ret == 0
def test_unicode_merge_commit_message(tempdir_factory, store):
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
iret = install(C.DEFAULT_CONFIG_FILE, store, hook_types=['pre-commit'])
assert iret == 0
cmd_output('git', 'checkout', 'master', '-b', 'foo')
git_commit('-n', cwd=path)
cmd_output('git', 'checkout', 'master')
cmd_output('git', 'merge', 'foo', '--no-ff', '--no-commit', '-m', '☃')
# Used to crash
git_commit(
'--no-edit',
msg=None,
fn=cmd_output_mocked_pre_commit_home,
tempdir_factory=tempdir_factory,
)
def test_install_idempotent(tempdir_factory, store):
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
iret = install(C.DEFAULT_CONFIG_FILE, store, hook_types=['pre-commit'])
assert iret == 0
iret = install(C.DEFAULT_CONFIG_FILE, store, hook_types=['pre-commit'])
assert iret == 0
ret, output = _get_commit_output(tempdir_factory)
assert ret == 0
NORMAL_PRE_COMMIT_RUN.assert_matches(output)
def _path_without_us():
# Choose a path which *probably* doesn't include us
env = dict(os.environ)
exe = find_executable('pre-commit', _environ=env)
while exe:
parts = env['PATH'].split(os.pathsep)
after = [
x for x in parts
if x.lower().rstrip(os.sep) != os.path.dirname(exe).lower()
]
if parts == after:
raise AssertionError(exe, parts)
env['PATH'] = os.pathsep.join(after)
exe = find_executable('pre-commit', _environ=env)
return env['PATH']
def test_environment_not_sourced(tempdir_factory, store):
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
iret = install(C.DEFAULT_CONFIG_FILE, store, hook_types=['pre-commit'])
assert not iret
# simulate deleting the virtualenv by rewriting the exe
hook = os.path.join(path, '.git/hooks/pre-commit')
with open(hook) as f:
src = f.read()
src = re.sub('\nINSTALL_PYTHON=.*\n', '\nINSTALL_PYTHON="/dne"\n', src)
with open(hook, 'w') as f:
f.write(src)
# Use a specific homedir to ignore --user installs
homedir = tempdir_factory.get()
ret, out = git_commit(
env={
'HOME': homedir,
'PATH': _path_without_us(),
# Git needs this to make a commit
'GIT_AUTHOR_NAME': os.environ['GIT_AUTHOR_NAME'],
'GIT_COMMITTER_NAME': os.environ['GIT_COMMITTER_NAME'],
'GIT_AUTHOR_EMAIL': os.environ['GIT_AUTHOR_EMAIL'],
'GIT_COMMITTER_EMAIL': os.environ['GIT_COMMITTER_EMAIL'],
},
retcode=None,
)
assert ret == 1
assert out == (
'`pre-commit` not found. '
'Did you forget to activate your virtualenv?\n'
)
FAILING_PRE_COMMIT_RUN = re_assert.Matches(
r'^\[INFO\] Initializing environment for .+\.\n'
r'Failing hook\.+Failed\n'
r'- hook id: failing_hook\n'
r'- exit code: 1\n'
r'\n'
r'Fail\n'
r'foo\n'
r'\n$',
)
def test_failing_hooks_returns_nonzero(tempdir_factory, store):
path = make_consuming_repo(tempdir_factory, 'failing_hook_repo')
with cwd(path):
iret = install(C.DEFAULT_CONFIG_FILE, store, hook_types=['pre-commit'])
assert iret == 0
ret, output = _get_commit_output(tempdir_factory)
assert ret == 1
FAILING_PRE_COMMIT_RUN.assert_matches(output)
EXISTING_COMMIT_RUN = re_assert.Matches(
fr'^legacy hook\n'
fr'\[master [a-f0-9]{{7}}\] commit!\n'
fr'{FILES_CHANGED}'
fr' create mode 100644 baz\n$',
)
def _write_legacy_hook(path):
os.makedirs(os.path.join(path, '.git/hooks'), exist_ok=True)
with open(os.path.join(path, '.git/hooks/pre-commit'), 'w') as f:
f.write('#!/usr/bin/env bash\necho legacy hook\n')
make_executable(f.name)
def test_install_existing_hooks_no_overwrite(tempdir_factory, store):
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
_write_legacy_hook(path)
# Make sure we installed the "old" hook correctly
ret, output = _get_commit_output(tempdir_factory, touch_file='baz')
assert ret == 0
EXISTING_COMMIT_RUN.assert_matches(output)
# Now install pre-commit (no-overwrite)
iret = install(C.DEFAULT_CONFIG_FILE, store, hook_types=['pre-commit'])
assert iret == 0
# We should run both the legacy and pre-commit hooks
ret, output = _get_commit_output(tempdir_factory)
assert ret == 0
assert output.startswith('legacy hook\n')
NORMAL_PRE_COMMIT_RUN.assert_matches(output[len('legacy hook\n'):])
def test_legacy_overwriting_legacy_hook(tempdir_factory, store):
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
_write_legacy_hook(path)
iret = install(C.DEFAULT_CONFIG_FILE, store, hook_types=['pre-commit'])
assert iret == 0
_write_legacy_hook(path)
# this previously crashed on windows. See #1010
iret = install(C.DEFAULT_CONFIG_FILE, store, hook_types=['pre-commit'])
assert iret == 0
def test_install_existing_hook_no_overwrite_idempotent(tempdir_factory, store):
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
_write_legacy_hook(path)
# Install twice
iret = install(C.DEFAULT_CONFIG_FILE, store, hook_types=['pre-commit'])
assert iret == 0
iret = install(C.DEFAULT_CONFIG_FILE, store, hook_types=['pre-commit'])
assert iret == 0
# We should run both the legacy and pre-commit hooks
ret, output = _get_commit_output(tempdir_factory)
assert ret == 0
assert output.startswith('legacy hook\n')
NORMAL_PRE_COMMIT_RUN.assert_matches(output[len('legacy hook\n'):])
def test_install_with_existing_non_utf8_script(tmpdir, store):
cmd_output('git', 'init', str(tmpdir))
tmpdir.join('.git/hooks').ensure_dir()
tmpdir.join('.git/hooks/pre-commit').write_binary(
b'#!/usr/bin/env bash\n'
b'# garbage: \xa0\xef\x12\xf2\n'
b'echo legacy hook\n',
)
with tmpdir.as_cwd():
iret = install(C.DEFAULT_CONFIG_FILE, store, hook_types=['pre-commit'])
assert iret == 0
FAIL_OLD_HOOK = re_assert.Matches(
r'fail!\n'
r'\[INFO\] Initializing environment for .+\.\n'
r'Bash hook\.+Passed\n',
)
def test_failing_existing_hook_returns_1(tempdir_factory, store):
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
# Write out a failing "old" hook
os.makedirs(os.path.join(path, '.git/hooks'), exist_ok=True)
with open(os.path.join(path, '.git/hooks/pre-commit'), 'w') as f:
f.write('#!/usr/bin/env bash\necho "fail!"\nexit 1\n')
make_executable(f.name)
iret = install(C.DEFAULT_CONFIG_FILE, store, hook_types=['pre-commit'])
assert iret == 0
# We should get a failure from the legacy hook
ret, output = _get_commit_output(tempdir_factory)
assert ret == 1
FAIL_OLD_HOOK.assert_matches(output)
def test_install_overwrite_no_existing_hooks(tempdir_factory, store):
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
assert not install(
C.DEFAULT_CONFIG_FILE,
store,
hook_types=['pre-commit'],
overwrite=True,
)
ret, output = _get_commit_output(tempdir_factory)
assert ret == 0
NORMAL_PRE_COMMIT_RUN.assert_matches(output)
def test_install_overwrite(tempdir_factory, store):
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
_write_legacy_hook(path)
assert not install(
C.DEFAULT_CONFIG_FILE,
store,
hook_types=['pre-commit'],
overwrite=True,
)
ret, output = _get_commit_output(tempdir_factory)
assert ret == 0
NORMAL_PRE_COMMIT_RUN.assert_matches(output)
def test_uninstall_restores_legacy_hooks(tempdir_factory, store):
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
_write_legacy_hook(path)
# Now install and uninstall pre-commit
iret = install(C.DEFAULT_CONFIG_FILE, store, hook_types=['pre-commit'])
assert iret == 0
assert uninstall(C.DEFAULT_CONFIG_FILE, hook_types=['pre-commit']) == 0
# Make sure we installed the "old" hook correctly
ret, output = _get_commit_output(tempdir_factory, touch_file='baz')
assert ret == 0
EXISTING_COMMIT_RUN.assert_matches(output)
def test_replace_old_commit_script(tempdir_factory, store):
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
# Install a script that looks like our old script
pre_commit_contents = resource_text('hook-tmpl')
new_contents = pre_commit_contents.replace(
CURRENT_HASH.decode(), PRIOR_HASHES[-1].decode(),
)
os.makedirs(os.path.join(path, '.git/hooks'), exist_ok=True)
with open(os.path.join(path, '.git/hooks/pre-commit'), 'w') as f:
f.write(new_contents)
make_executable(f.name)
# Install normally
iret = install(C.DEFAULT_CONFIG_FILE, store, hook_types=['pre-commit'])
assert iret == 0
ret, output = _get_commit_output(tempdir_factory)
assert ret == 0
NORMAL_PRE_COMMIT_RUN.assert_matches(output)
def test_uninstall_doesnt_remove_not_our_hooks(in_git_dir):
before_commit = in_git_dir.join('.git/hooks').ensure_dir() \
.join('pre-commit')
before_commit.write('#!/usr/bin/env bash\necho 1\n')
make_executable(before_commit.strpath)
assert uninstall(C.DEFAULT_CONFIG_FILE, hook_types=['pre-commit']) == 0
assert before_commit.exists()
PRE_INSTALLED = re_assert.Matches(
fr'Bash hook\.+Passed\n'
fr'\[master [a-f0-9]{{7}}\] commit!\n'
fr'{FILES_CHANGED}'
fr' create mode 100644 foo\n$',
)
def test_installs_hooks_with_hooks_True(tempdir_factory, store):
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
install(
C.DEFAULT_CONFIG_FILE,
store,
hook_types=['pre-commit'],
hooks=True,
)
ret, output = _get_commit_output(
tempdir_factory, pre_commit_home=store.directory,
)
assert ret == 0
PRE_INSTALLED.assert_matches(output)
def test_install_hooks_command(tempdir_factory, store):
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
install(C.DEFAULT_CONFIG_FILE, store, hook_types=['pre-commit'])
install_hooks(C.DEFAULT_CONFIG_FILE, store)
ret, output = _get_commit_output(
tempdir_factory, pre_commit_home=store.directory,
)
assert ret == 0
PRE_INSTALLED.assert_matches(output)
def test_installed_from_venv(tempdir_factory, store):
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
install(C.DEFAULT_CONFIG_FILE, store, hook_types=['pre-commit'])
# No environment so pre-commit is not on the path when running!
# Should still pick up the python from when we installed
ret, output = _get_commit_output(
tempdir_factory,
env={
'HOME': os.path.expanduser('~'),
'PATH': _path_without_us(),
'TERM': os.environ.get('TERM', ''),
# Windows needs this to import `random`
'SYSTEMROOT': os.environ.get('SYSTEMROOT', ''),
# Windows needs this to resolve executables
'PATHEXT': os.environ.get('PATHEXT', ''),
# Git needs this to make a commit
'GIT_AUTHOR_NAME': os.environ['GIT_AUTHOR_NAME'],
'GIT_COMMITTER_NAME': os.environ['GIT_COMMITTER_NAME'],
'GIT_AUTHOR_EMAIL': os.environ['GIT_AUTHOR_EMAIL'],
'GIT_COMMITTER_EMAIL': os.environ['GIT_COMMITTER_EMAIL'],
},
)
assert ret == 0
NORMAL_PRE_COMMIT_RUN.assert_matches(output)
def _get_push_output(tempdir_factory, remote='origin', opts=()):
return cmd_output_mocked_pre_commit_home(
'git', 'push', remote, 'HEAD:new_branch', *opts,
tempdir_factory=tempdir_factory,
retcode=None,
)[:2]
def test_pre_push_integration_failing(tempdir_factory, store):
upstream = make_consuming_repo(tempdir_factory, 'failing_hook_repo')
path = tempdir_factory.get()
cmd_output('git', 'clone', upstream, path)
with cwd(path):
install(C.DEFAULT_CONFIG_FILE, store, hook_types=['pre-push'])
# commit succeeds because pre-commit is only installed for pre-push
assert _get_commit_output(tempdir_factory)[0] == 0
assert _get_commit_output(tempdir_factory, touch_file='zzz')[0] == 0
retc, output = _get_push_output(tempdir_factory)
assert retc == 1
assert 'Failing hook' in output
assert 'Failed' in output
assert 'foo zzz' in output # both filenames should be printed
assert 'hook id: failing_hook' in output
def test_pre_push_integration_accepted(tempdir_factory, store):
upstream = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
path = tempdir_factory.get()
cmd_output('git', 'clone', upstream, path)
with cwd(path):
install(C.DEFAULT_CONFIG_FILE, store, hook_types=['pre-push'])
assert _get_commit_output(tempdir_factory)[0] == 0
retc, output = _get_push_output(tempdir_factory)
assert retc == 0
assert 'Bash hook' in output
assert 'Passed' in output
def test_pre_push_force_push_without_fetch(tempdir_factory, store):
upstream = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
path1 = tempdir_factory.get()
path2 = tempdir_factory.get()
cmd_output('git', 'clone', upstream, path1)
cmd_output('git', 'clone', upstream, path2)
with cwd(path1):
assert _get_commit_output(tempdir_factory)[0] == 0
assert _get_push_output(tempdir_factory)[0] == 0
with cwd(path2):
install(C.DEFAULT_CONFIG_FILE, store, hook_types=['pre-push'])
assert _get_commit_output(tempdir_factory, msg='force!')[0] == 0
retc, output = _get_push_output(tempdir_factory, opts=('--force',))
assert retc == 0
assert 'Bash hook' in output
assert 'Passed' in output
def test_pre_push_new_upstream(tempdir_factory, store):
upstream = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
upstream2 = git_dir(tempdir_factory)
path = tempdir_factory.get()
cmd_output('git', 'clone', upstream, path)
with cwd(path):
install(C.DEFAULT_CONFIG_FILE, store, hook_types=['pre-push'])
assert _get_commit_output(tempdir_factory)[0] == 0
cmd_output('git', 'remote', 'rename', 'origin', 'upstream')
cmd_output('git', 'remote', 'add', 'origin', upstream2)
retc, output = _get_push_output(tempdir_factory)
assert retc == 0
assert 'Bash hook' in output
assert 'Passed' in output
def test_pre_push_environment_variables(tempdir_factory, store):
config = {
'repo': 'local',
'hooks': [
{
'id': 'print-remote-info',
'name': 'print remote info',
'entry': 'bash -c "echo remote: $PRE_COMMIT_REMOTE_NAME"',
'language': 'system',
'verbose': True,
},
],
}
upstream = git_dir(tempdir_factory)
clone = tempdir_factory.get()
cmd_output('git', 'clone', upstream, clone)
add_config_to_repo(clone, config)
with cwd(clone):
install(C.DEFAULT_CONFIG_FILE, store, hook_types=['pre-push'])
cmd_output('git', 'remote', 'rename', 'origin', 'origin2')
retc, output = _get_push_output(tempdir_factory, remote='origin2')
assert retc == 0
assert '\nremote: origin2\n' in output
def test_pre_push_integration_empty_push(tempdir_factory, store):
upstream = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
path = tempdir_factory.get()
cmd_output('git', 'clone', upstream, path)
with cwd(path):
install(C.DEFAULT_CONFIG_FILE, store, hook_types=['pre-push'])
_get_push_output(tempdir_factory)
retc, output = _get_push_output(tempdir_factory)
assert output == 'Everything up-to-date\n'
assert retc == 0
def test_pre_push_legacy(tempdir_factory, store):
upstream = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
path = tempdir_factory.get()
cmd_output('git', 'clone', upstream, path)
with cwd(path):
os.makedirs(os.path.join(path, '.git/hooks'), exist_ok=True)
with open(os.path.join(path, '.git/hooks/pre-push'), 'w') as f:
f.write(
'#!/usr/bin/env bash\n'
'set -eu\n'
'read lr ls rr rs\n'
'test -n "$lr" -a -n "$ls" -a -n "$rr" -a -n "$rs"\n'
'echo legacy\n',
)
make_executable(f.name)
install(C.DEFAULT_CONFIG_FILE, store, hook_types=['pre-push'])
assert _get_commit_output(tempdir_factory)[0] == 0
retc, output = _get_push_output(tempdir_factory)
assert retc == 0
first_line, _, third_line = output.splitlines()[:3]
assert first_line == 'legacy'
assert third_line.startswith('Bash hook')
assert third_line.endswith('Passed')
def test_commit_msg_integration_failing(
commit_msg_repo, tempdir_factory, store,
):
install(C.DEFAULT_CONFIG_FILE, store, hook_types=['commit-msg'])
retc, out = _get_commit_output(tempdir_factory)
assert retc == 1
assert out == '''\
Must have "Signed off by:"...............................................Failed
- hook id: must-have-signoff
- exit code: 1
'''
def test_commit_msg_integration_passing(
commit_msg_repo, tempdir_factory, store,
):
install(C.DEFAULT_CONFIG_FILE, store, hook_types=['commit-msg'])
msg = 'Hi\nSigned off by: me, lol'
retc, out = _get_commit_output(tempdir_factory, msg=msg)
assert retc == 0
first_line = out.splitlines()[0]
assert first_line.startswith('Must have "Signed off by:"...')
assert first_line.endswith('...Passed')
def test_commit_msg_legacy(commit_msg_repo, tempdir_factory, store):
hook_path = os.path.join(commit_msg_repo, '.git/hooks/commit-msg')
os.makedirs(os.path.dirname(hook_path), exist_ok=True)
with open(hook_path, 'w') as hook_file:
hook_file.write(
'#!/usr/bin/env bash\n'
'set -eu\n'
'test -e "$1"\n'
'echo legacy\n',
)
make_executable(hook_path)
install(C.DEFAULT_CONFIG_FILE, store, hook_types=['commit-msg'])
msg = 'Hi\nSigned off by: asottile'
retc, out = _get_commit_output(tempdir_factory, msg=msg)
assert retc == 0
first_line, second_line = out.splitlines()[:2]
assert first_line == 'legacy'
assert second_line.startswith('Must have "Signed off by:"...')
def test_post_commit_integration(tempdir_factory, store):
path = git_dir(tempdir_factory)
config = [
{
'repo': 'local',
'hooks': [{
'id': 'post-commit',
'name': 'Post commit',
'entry': 'touch post-commit.tmp',
'language': 'system',
'always_run': True,
'verbose': True,
'stages': ['post-commit'],
}],
},
]
write_config(path, config)
with cwd(path):
_get_commit_output(tempdir_factory)
assert not os.path.exists('post-commit.tmp')
install(C.DEFAULT_CONFIG_FILE, store, hook_types=['post-commit'])
_get_commit_output(tempdir_factory)
assert os.path.exists('post-commit.tmp')
def test_post_merge_integration(tempdir_factory, store):
path = git_dir(tempdir_factory)
config = [
{
'repo': 'local',
'hooks': [{
'id': 'post-merge',
'name': 'Post merge',
'entry': 'touch post-merge.tmp',
'language': 'system',
'always_run': True,
'verbose': True,
'stages': ['post-merge'],
}],
},
]
write_config(path, config)
with cwd(path):
# create a simple diamond of commits for a non-trivial merge
open('init', 'a').close()
cmd_output('git', 'add', '.')
git_commit()
open('master', 'a').close()
cmd_output('git', 'add', '.')
git_commit()
cmd_output('git', 'checkout', '-b', 'branch', 'HEAD^')
open('branch', 'a').close()
cmd_output('git', 'add', '.')
git_commit()
cmd_output('git', 'checkout', 'master')
install(C.DEFAULT_CONFIG_FILE, store, hook_types=['post-merge'])
retc, stdout, stderr = cmd_output_mocked_pre_commit_home(
'git', 'merge', 'branch',
tempdir_factory=tempdir_factory,
)
assert retc == 0
assert os.path.exists('post-merge.tmp')
def test_post_rewrite_integration(tempdir_factory, store):
path = git_dir(tempdir_factory)
config = [
{
'repo': 'local',
'hooks': [{
'id': 'post-rewrite',
'name': 'Post rewrite',
'entry': 'touch post-rewrite.tmp',
'language': 'system',
'always_run': True,
'verbose': True,
'stages': ['post-rewrite'],
}],
},
]
write_config(path, config)
with cwd(path):
open('init', 'a').close()
cmd_output('git', 'add', '.')
install(C.DEFAULT_CONFIG_FILE, store, hook_types=['post-rewrite'])
git_commit()
assert not os.path.exists('post-rewrite.tmp')
git_commit('--amend', '-m', 'ammended message')
assert os.path.exists('post-rewrite.tmp')
def test_post_checkout_integration(tempdir_factory, store):
path = git_dir(tempdir_factory)
config = [
{
'repo': 'local',
'hooks': [{
'id': 'post-checkout',
'name': 'Post checkout',
'entry': 'bash -c "echo ${PRE_COMMIT_TO_REF}"',
'language': 'system',
'always_run': True,
'verbose': True,
'stages': ['post-checkout'],
}],
},
{'repo': 'meta', 'hooks': [{'id': 'identity'}]},
]
write_config(path, config)
with cwd(path):
cmd_output('git', 'add', '.')
git_commit()
# add a file only on `feature`, it should not be passed to hooks
cmd_output('git', 'checkout', '-b', 'feature')
open('some_file', 'a').close()
cmd_output('git', 'add', '.')
git_commit()
cmd_output('git', 'checkout', 'master')
install(C.DEFAULT_CONFIG_FILE, store, hook_types=['post-checkout'])
retc, _, stderr = cmd_output('git', 'checkout', 'feature')
assert stderr is not None
assert retc == 0
assert git.head_rev(path) in stderr
assert 'some_file' not in stderr
def test_skips_post_checkout_unstaged_changes(tempdir_factory, store):
path = git_dir(tempdir_factory)
config = {
'repo': 'local',
'hooks': [{
'id': 'fail',
'name': 'fail',
'entry': 'fail',
'language': 'fail',
'always_run': True,
'stages': ['post-checkout'],
}],
}
write_config(path, config)
with cwd(path):
cmd_output('git', 'add', '.')
_get_commit_output(tempdir_factory)
install(C.DEFAULT_CONFIG_FILE, store, hook_types=['pre-commit'])
install(C.DEFAULT_CONFIG_FILE, store, hook_types=['post-checkout'])
# make an unstaged change so staged_files_only fires
open('file', 'a').close()
cmd_output('git', 'add', 'file')
with open('file', 'w') as f:
f.write('unstaged changes')
retc, out = _get_commit_output(tempdir_factory, all_files=False)
assert retc == 0
def test_prepare_commit_msg_integration_failing(
failing_prepare_commit_msg_repo, tempdir_factory, store,
):
install(C.DEFAULT_CONFIG_FILE, store, hook_types=['prepare-commit-msg'])
retc, out = _get_commit_output(tempdir_factory)
assert retc == 1
assert out == '''\
Add "Signed off by:".....................................................Failed
- hook id: add-signoff
- exit code: 1
'''
def test_prepare_commit_msg_integration_passing(
prepare_commit_msg_repo, tempdir_factory, store,
):
install(C.DEFAULT_CONFIG_FILE, store, hook_types=['prepare-commit-msg'])
retc, out = _get_commit_output(tempdir_factory, msg='Hi')
assert retc == 0
first_line = out.splitlines()[0]
assert first_line.startswith('Add "Signed off by:"...')
assert first_line.endswith('...Passed')
commit_msg_path = os.path.join(
prepare_commit_msg_repo, '.git/COMMIT_EDITMSG',
)
with open(commit_msg_path) as f:
assert 'Signed off by: ' in f.read()
def test_prepare_commit_msg_legacy(
prepare_commit_msg_repo, tempdir_factory, store,
):
hook_path = os.path.join(
prepare_commit_msg_repo, '.git/hooks/prepare-commit-msg',
)
os.makedirs(os.path.dirname(hook_path), exist_ok=True)
with open(hook_path, 'w') as hook_file:
hook_file.write(
'#!/usr/bin/env bash\n'
'set -eu\n'
'test -e "$1"\n'
'echo legacy\n',
)
make_executable(hook_path)
install(C.DEFAULT_CONFIG_FILE, store, hook_types=['prepare-commit-msg'])
retc, out = _get_commit_output(tempdir_factory, msg='Hi')
assert retc == 0
first_line, second_line = out.splitlines()[:2]
assert first_line == 'legacy'
assert second_line.startswith('Add "Signed off by:"...')
commit_msg_path = os.path.join(
prepare_commit_msg_repo, '.git/COMMIT_EDITMSG',
)
with open(commit_msg_path) as f:
assert 'Signed off by: ' in f.read()
def test_pre_merge_commit_integration(tempdir_factory, store):
output_pattern = re_assert.Matches(
r'^('
r"Merge made by the '(ort|recursive)' strategy.\n"
r'\[INFO\] Initializing environment for .+\n'
r'Bash hook\.+Passed\n'
r'|'
r'\[INFO\] Initializing environment for .+\n'
r'Bash hook\.+Passed\n'
r"Merge made by the '(ort|recursive)' strategy.\n"
r')'
r' foo \| 0\n'
r' 1 file changed, 0 insertions\(\+\), 0 deletions\(-\)\n'
r' create mode 100644 foo\n$',
)
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
ret = install(
C.DEFAULT_CONFIG_FILE,
store,
hook_types=['pre-merge-commit'],
)
assert ret == 0
cmd_output('git', 'checkout', 'master', '-b', 'feature')
_get_commit_output(tempdir_factory)
cmd_output('git', 'checkout', 'master')
ret, output, _ = cmd_output_mocked_pre_commit_home(
'git', 'merge', '--no-ff', '--no-edit', 'feature',
tempdir_factory=tempdir_factory,
)
assert ret == 0
output_pattern.assert_matches(output)
def test_install_disallow_missing_config(tempdir_factory, store):
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
remove_config_from_repo(path)
ret = install(
C.DEFAULT_CONFIG_FILE, store, hook_types=['pre-commit'],
overwrite=True, skip_on_missing_config=False,
)
assert ret == 0
ret, output = _get_commit_output(tempdir_factory)
assert ret == 1
def test_install_allow_missing_config(tempdir_factory, store):
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
remove_config_from_repo(path)
ret = install(
C.DEFAULT_CONFIG_FILE, store, hook_types=['pre-commit'],
overwrite=True, skip_on_missing_config=True,
)
assert ret == 0
ret, output = _get_commit_output(tempdir_factory)
assert ret == 0
expected = (
'`.pre-commit-config.yaml` config file not found. '
'Skipping `pre-commit`.'
)
assert expected in output
def test_install_temporarily_allow_mising_config(tempdir_factory, store):
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
remove_config_from_repo(path)
ret = install(
C.DEFAULT_CONFIG_FILE, store, hook_types=['pre-commit'],
overwrite=True, skip_on_missing_config=False,
)
assert ret == 0
env = dict(os.environ, PRE_COMMIT_ALLOW_NO_CONFIG='1')
ret, output = _get_commit_output(tempdir_factory, env=env)
assert ret == 0
expected = (
'`.pre-commit-config.yaml` config file not found. '
'Skipping `pre-commit`.'
)
assert expected in output
def test_install_uninstall_default_hook_types(in_git_dir, store):
cfg_src = 'default_install_hook_types: [pre-commit, pre-push]\nrepos: []\n'
in_git_dir.join(C.DEFAULT_CONFIG_FILE).write(cfg_src)
assert not install(C.DEFAULT_CONFIG_FILE, store, hook_types=None)
assert os.access(in_git_dir.join('.git/hooks/pre-commit').strpath, os.X_OK)
assert os.access(in_git_dir.join('.git/hooks/pre-push').strpath, os.X_OK)
assert not uninstall(C.DEFAULT_CONFIG_FILE, hook_types=None)
assert not in_git_dir.join('.git/hooks/pre-commit').exists()
assert not in_git_dir.join('.git/hooks/pre-push').exists()
|
[] |
[] |
[
"GIT_COMMITTER_NAME",
"GIT_COMMITTER_EMAIL",
"GIT_AUTHOR_NAME",
"PATHEXT",
"SYSTEMROOT",
"GIT_AUTHOR_EMAIL",
"TERM"
] |
[]
|
["GIT_COMMITTER_NAME", "GIT_COMMITTER_EMAIL", "GIT_AUTHOR_NAME", "PATHEXT", "SYSTEMROOT", "GIT_AUTHOR_EMAIL", "TERM"]
|
python
| 7 | 0 | |
OrderSystemManagement/wsgi.py
|
"""
WSGI config for OrderSystemManagement project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "OrderSystemManagement.settings")
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
pkg/handlers/msteam/msteam.go
|
/*
Copyright 2016 Skippbox, Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package msteam
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"github.com/bitnami-labs/kubewatch/config"
"github.com/bitnami-labs/kubewatch/pkg/event"
)
var msteamsErrMsg = `
%s
You need to set the MS teams webhook URL,
using --webhookURL, or using environment variables:
export KW_MSTEAMS_WEBHOOKURL=webhook_url
Command line flags will override environment variables
`
var msTeamsColors = map[string]string{
"Normal": "2DC72D",
"Warning": "DEFF22",
"Danger": "8C1A1A",
}
// Constants for Sending a Card
const (
messageType = "MessageCard"
context = "http://schema.org/extensions"
)
// TeamsMessageCard is for the Card Fields to send in Teams
// The Documentation is in https://docs.microsoft.com/en-us/outlook/actionable-messages/card-reference#card-fields
type TeamsMessageCard struct {
Type string `json:"@type"`
Context string `json:"@context"`
ThemeColor string `json:"themeColor"`
Summary string `json:"summary"`
Title string `json:"title"`
Text string `json:"text,omitempty"`
Sections []TeamsMessageCardSection `json:"sections"`
}
// TeamsMessageCardSection is placed under TeamsMessageCard.Sections
// Each element of AlertWebHook.Alerts will the number of elements of TeamsMessageCard.Sections to create
type TeamsMessageCardSection struct {
ActivityTitle string `json:"activityTitle"`
Facts []TeamsMessageCardSectionFacts `json:"facts"`
Markdown bool `json:"markdown"`
}
// TeamsMessageCardSectionFacts is placed under TeamsMessageCardSection.Facts
type TeamsMessageCardSectionFacts struct {
Name string `json:"name"`
Value string `json:"value"`
}
// Default handler implements Handler interface,
// print each event with JSON format
type MSTeams struct {
// TeamsWebhookURL is the webhook url of the Teams connector
TeamsWebhookURL string
}
// sendCard sends the JSON Encoded TeamsMessageCard to the webhook URL
func sendCard(ms *MSTeams, card *TeamsMessageCard) (*http.Response, error) {
buffer := new(bytes.Buffer)
if err := json.NewEncoder(buffer).Encode(card); err != nil {
return nil, fmt.Errorf("Failed encoding message card: %v", err)
}
res, err := http.Post(ms.TeamsWebhookURL, "application/json", buffer)
if err != nil {
return nil, fmt.Errorf("Failed sending to webhook url %s. Got the error: %v",
ms.TeamsWebhookURL, err)
}
if res.StatusCode != http.StatusOK {
resMessage, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, fmt.Errorf("Failed reading Teams http response: %v", err)
}
return nil, fmt.Errorf("Failed sending to the Teams Channel. Teams http response: %s, %s",
res.Status, string(resMessage))
}
if err := res.Body.Close(); err != nil {
return nil, err
}
return res, nil
}
// Init initializes handler configuration
func (ms *MSTeams) Init(c *config.Config) error {
webhookURL := c.Handler.MSTeams.WebhookURL
if webhookURL == "" {
webhookURL = os.Getenv("KW_MSTEAMS_WEBHOOKURL")
}
if webhookURL == "" {
return fmt.Errorf(msteamsErrMsg, "Missing MS teams webhook URL")
}
ms.TeamsWebhookURL = webhookURL
return nil
}
// Handle handles notification.
func (ms *MSTeams) Handle(e event.Event) {
card := &TeamsMessageCard{
Type: messageType,
Context: context,
Title: "kubewatch",
// Set a default Summary, this is required for Microsoft Teams
Summary: "kubewatch notification received",
}
card.ThemeColor = msTeamsColors[e.Status]
var s TeamsMessageCardSection
s.ActivityTitle = e.Message()
s.Markdown = true
card.Sections = append(card.Sections, s)
if _, err := sendCard(ms, card); err != nil {
log.Printf("%s\n", err)
return
}
log.Printf("Message successfully sent to MS Teams")
}
|
[
"\"KW_MSTEAMS_WEBHOOKURL\""
] |
[] |
[
"KW_MSTEAMS_WEBHOOKURL"
] |
[]
|
["KW_MSTEAMS_WEBHOOKURL"]
|
go
| 1 | 0 | |
locustfiles/collections.py
|
# -*- coding: utf-8 -*-
import itertools
import json
import os
import random
import uuid
import gevent.pool
import gevent.queue
import hca.util
import locust
from locustfiles.common import dss_task, fire_for_request, get_replica
import locustfiles.common.utils
class CollectionsTaskSet(locust.TaskSequence):
"""
Test create/read/delete collections operations.
See :mod:`scale_tests.collections` and
:func:`locustfiles.common.dss_task`.
"""
collections = gevent.queue.Queue()
replica = get_replica()
bundles = []
bundle_file = os.environ.get('USE_BUNDLE_FILE')
bundle_amount = int(os.environ.get('BUNDLE_AMOUNT', 1))
collection_size = int(os.environ.get('COLLECTION_SIZE', 10))
def setup(self):
print("Creating test bundles...")
if self.bundle_file:
with open(self.bundle_file, 'r') as f:
CollectionsTaskSet.bundles = json.load(f)
print(f"Loaded bundle cache with {len(CollectionsTaskSet.bundles)} bundles")
self.bundle_amount -= len(CollectionsTaskSet.bundles)
group = gevent.pool.Group()
CollectionsTaskSet.bundles.extend(group.map(lambda _: self._generate_bundle(), range(self.bundle_amount)))
group.join(raise_error=True)
print("Done creating test bundles!")
# This part is a little creative, so read carefully.
hca.util._ClientMethodFactory._consume_response = lambda _, y: y
"""
The scale test :class:`scale_tests.collections.CollectionsUser`
inherits from :class:`DSSLocust`, which implements a method
:meth:`DSSLocust.client` that we use to make requests. That
method is ultimately implemented in
:class:`hca.util._ClientMethodFactory`. The method
:meth:`hca.util._ClientMethodFactory._consume_response` does
what it says on the tin and consumes a :class:`requests.Response`
object, returning the parsed content from the server. By
replacing that method with `lambda x, y: y` (x being `self`), we
can return the original response object and pry the response code
and time elapsed in a nice clean way in
:func:`locustfiles.common.dss_task`.
"""
def teardown(self):
try:
for bundle in self.bundles:
self.client.delete_bundle(replica=self.replica, reason='test', **bundle)
except hca.util.exceptions.SwaggerAPIException as e:
if e.code == '403':
pass # not all keys can delete bundles
while not self.collections.empty():
uuid, version = self.collections.get()
self.client.delete_collection(uuid=uuid, replica=self.replica, version=version)
def _generate_bundle(self):
"""Generate an empty bundle for testing."""
new_uuid = str(uuid.uuid4())
new_version = '2019-07-26T201104.431632Z' # arbitrary
r = self.client.put_bundle(uuid=new_uuid, replica='aws',
version=new_version, files=[],
creator_uid=self.client.config.get("creator_uid", 0))
return {'uuid': new_uuid, 'version': r['version'], 'type': 'bundle'}
def _generate_collection(self, n: int):
"""
:param int n: number of bundles inside the collection
"""
col = locustfiles.common.utils.generate_collection()
col['contents'] = random.choices(self.bundles, k=n)
return col
@locust.seq_task(1)
@locust.task
def create_collection(self):
new_uuid = str(uuid.uuid4())
new_collection = self._generate_collection(self.collection_size)
# `PUT /collections` is inline and is bounded by API Gateway's
# max payload size so trying to PUT a really big collection will
# fail. So if the collection is really big (the per-request limit
# is 1000 items), we need to slowly add to the collection with
# `PATCH /collection/{uuid}`.
if self.collection_size < 1000:
r = self.client.put_collection(replica=self.replica, uuid=new_uuid, **new_collection)
fire_for_request(r, 'PUT /collections')
else:
chunked_contents = self.chunk(new_collection['contents'], 1000)
new_collection['contents'] = next(chunked_contents)
r = self.client.put_collection(replica=self.replica, uuid=new_uuid, **new_collection)
fire_for_request(r, 'PUT /collections')
print(f"New collection: {new_uuid} {new_collection['version']}")
for chunk in chunked_contents:
new_collection['contents'] = chunk
r = self.client.patch_collection(replica=self.replica, uuid=new_uuid,
**new_collection)
fire_for_request(r, 'PATCH /collections/{uuid}')
self.collections.put((new_uuid, r.json()['version']))
return r
@staticmethod
def chunk(it, size):
# https://stackoverflow.com/a/22045226
it = iter(it)
return iter(lambda: tuple(itertools.islice(it, size)), ())
@locust.seq_task(2)
@locust.task(5)
@dss_task('GET /collections')
def get_collections(self):
return self.client.get_collections(replica=self.replica)
@locust.seq_task(3)
@locust.task(10)
@dss_task('GET /collections/{uuid}')
def get_collection(self):
# Do get() and put() instead of peek() so that collection can't
# be deleted before we try and GET it
uuid, version = self.collections.get(block=True)
r = self.client.get_collection(uuid=uuid, replica=self.replica)
self.collections.put((uuid, version))
return r
# TODO: Test collections paging?
@locust.seq_task(4)
@locust.task
@dss_task('DELETE /collections/{uuid}')
def delete_collection(self):
uuid, version = self.collections.get(block=True)
return self.client.delete_collection(uuid=uuid, replica=self.replica, version=version)
|
[] |
[] |
[
"COLLECTION_SIZE",
"BUNDLE_AMOUNT",
"USE_BUNDLE_FILE"
] |
[]
|
["COLLECTION_SIZE", "BUNDLE_AMOUNT", "USE_BUNDLE_FILE"]
|
python
| 3 | 0 | |
HackerRank Solutions/Interview Preparation Kit/Linked Lists/Find Merge Point of Two Lists.java
|
import java.io.*;
import java.math.*;
import java.security.*;
import java.text.*;
import java.util.*;
import java.util.concurrent.*;
import java.util.regex.*;
public class Solution {
static class SinglyLinkedListNode {
public int data;
public SinglyLinkedListNode next;
public SinglyLinkedListNode(int nodeData) {
this.data = nodeData;
this.next = null;
}
}
static class SinglyLinkedList {
public SinglyLinkedListNode head;
public SinglyLinkedListNode tail;
public SinglyLinkedList() {
this.head = null;
this.tail = null;
}
public void insertNode(int nodeData) {
SinglyLinkedListNode node = new SinglyLinkedListNode(nodeData);
if (this.head == null) {
this.head = node;
} else {
this.tail.next = node;
}
this.tail = node;
}
}
public static void printSinglyLinkedList(SinglyLinkedListNode node, String sep, BufferedWriter bufferedWriter) throws IOException {
while (node != null) {
bufferedWriter.write(String.valueOf(node.data));
node = node.next;
if (node != null) {
bufferedWriter.write(sep);
}
}
}
// Complete the findMergeNode function below.
/*
* For your reference:
*
* SinglyLinkedListNode {
* int data;
* SinglyLinkedListNode next;
* }
*
*/
static int findMergeNode(SinglyLinkedListNode head1, SinglyLinkedListNode head2) {
SinglyLinkedListNode headB;
while(head1 != null){
headB = head2;
while(headB != null){
if(headB == head1){
return headB.data;
}
headB = headB.next;
}
head1 = head1.next;
}
return head1.data;
}
private static final Scanner scanner = new Scanner(System.in);
public static void main(String[] args) throws IOException {
BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(System.getenv("OUTPUT_PATH")));
int tests = scanner.nextInt();
scanner.skip("(\r\n|[\n\r\u2028\u2029\u0085])?");
for (int testsItr = 0; testsItr < tests; testsItr++) {
int index = scanner.nextInt();
scanner.skip("(\r\n|[\n\r\u2028\u2029\u0085])?");
SinglyLinkedList llist1 = new SinglyLinkedList();
int llist1Count = scanner.nextInt();
scanner.skip("(\r\n|[\n\r\u2028\u2029\u0085])?");
for (int i = 0; i < llist1Count; i++) {
int llist1Item = scanner.nextInt();
scanner.skip("(\r\n|[\n\r\u2028\u2029\u0085])?");
llist1.insertNode(llist1Item);
}
SinglyLinkedList llist2 = new SinglyLinkedList();
int llist2Count = scanner.nextInt();
scanner.skip("(\r\n|[\n\r\u2028\u2029\u0085])?");
for (int i = 0; i < llist2Count; i++) {
int llist2Item = scanner.nextInt();
scanner.skip("(\r\n|[\n\r\u2028\u2029\u0085])?");
llist2.insertNode(llist2Item);
}
SinglyLinkedListNode ptr1 = llist1.head;
SinglyLinkedListNode ptr2 = llist2.head;
for (int i = 0; i < llist1Count; i++) {
if (i < index) {
ptr1 = ptr1.next;
}
}
for (int i = 0; i < llist2Count; i++) {
if (i != llist2Count-1) {
ptr2 = ptr2.next;
}
}
ptr2.next = ptr1;
int result = findMergeNode(llist1.head, llist2.head);
bufferedWriter.write(String.valueOf(result));
bufferedWriter.newLine();
}
bufferedWriter.close();
scanner.close();
}
}
|
[
"\"OUTPUT_PATH\""
] |
[] |
[
"OUTPUT_PATH"
] |
[]
|
["OUTPUT_PATH"]
|
java
| 1 | 0 | |
tests/e2e/etcd_spawn_cov.go
|
// Copyright 2017 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build cov
package e2e
import (
"fmt"
"os"
"path/filepath"
"strings"
"syscall"
"time"
"github.com/jinlongchen/etcd/pkg/expect"
"github.com/jinlongchen/etcd/pkg/fileutil"
"github.com/jinlongchen/etcd/pkg/flags"
)
const noOutputLineCount = 2 // cov-enabled binaries emit PASS and coverage count lines
func spawnCmd(args []string) (*expect.ExpectProcess, error) {
if args[0] == binPath {
return spawnEtcd(args)
}
if args[0] == ctlBinPath || args[0] == ctlBinPath+"3" {
// avoid test flag conflicts in coverage enabled etcdctl by putting flags in ETCDCTL_ARGS
env := []string{
// was \xff, but that's used for testing boundary conditions; 0xe7cd should be safe
"ETCDCTL_ARGS=" + strings.Join(args, "\xe7\xcd"),
}
if args[0] == ctlBinPath+"3" {
env = append(env, "ETCDCTL_API=3")
}
covArgs, err := getCovArgs()
if err != nil {
return nil, err
}
// when withFlagByEnv() is used in testCtl(), env variables for ctl is set to os.env.
// they must be included in ctl_cov_env.
env = append(env, os.Environ()...)
ep, err := expect.NewExpectWithEnv(binDir+"/etcdctl_test", covArgs, env)
if err != nil {
return nil, err
}
ep.StopSignal = syscall.SIGTERM
return ep, nil
}
return expect.NewExpect(args[0], args[1:]...)
}
func spawnEtcd(args []string) (*expect.ExpectProcess, error) {
covArgs, err := getCovArgs()
if err != nil {
return nil, err
}
var env []string
if args[1] == "grpc-proxy" {
// avoid test flag conflicts in coverage enabled etcd by putting flags in ETCDCOV_ARGS
env = append(os.Environ(), "ETCDCOV_ARGS="+strings.Join(args, "\xe7\xcd"))
} else {
env = args2env(args[1:])
}
ep, err := expect.NewExpectWithEnv(binDir+"/etcd_test", covArgs, env)
if err != nil {
return nil, err
}
// ep sends SIGTERM to etcd_test process on ep.close()
// allowing the process to exit gracefully in order to generate a coverage report.
// note: go runtime ignores SIGINT but not SIGTERM
// if e2e test is run as a background process.
ep.StopSignal = syscall.SIGTERM
return ep, nil
}
func getCovArgs() ([]string, error) {
coverPath := os.Getenv("COVERDIR")
if !filepath.IsAbs(coverPath) {
// COVERDIR is relative to etcd root but e2e test has its path set to be relative to the e2e folder.
// adding ".." in front of COVERDIR ensures that e2e saves coverage reports to the correct location.
coverPath = filepath.Join("../..", coverPath)
}
if !fileutil.Exist(coverPath) {
return nil, fmt.Errorf("could not find coverage folder")
}
covArgs := []string{
fmt.Sprintf("-test.coverprofile=e2e.%v.coverprofile", time.Now().UnixNano()),
"-test.outputdir=" + coverPath,
}
return covArgs, nil
}
func args2env(args []string) []string {
var covEnvs []string
for i := range args {
if !strings.HasPrefix(args[i], "--") {
continue
}
flag := strings.Split(args[i], "--")[1]
val := "true"
// split the flag that has "="
// e.g --auto-tls=true" => flag=auto-tls and val=true
if strings.Contains(args[i], "=") {
split := strings.Split(flag, "=")
flag = split[0]
val = split[1]
}
if i+1 < len(args) {
if !strings.HasPrefix(args[i+1], "--") {
val = args[i+1]
}
}
covEnvs = append(covEnvs, flags.FlagToEnv("ETCD", flag)+"="+val)
}
return covEnvs
}
|
[
"\"COVERDIR\""
] |
[] |
[
"COVERDIR"
] |
[]
|
["COVERDIR"]
|
go
| 1 | 0 | |
python/ray/data/dataset.py
|
import logging
import os
import time
from typing import (
List,
Any,
Callable,
Iterator,
Iterable,
Generic,
Dict,
Optional,
Union,
TYPE_CHECKING,
Tuple,
)
from uuid import uuid4
if TYPE_CHECKING:
import pyarrow
import pandas
import mars
import modin
import dask
import pyspark
import ray.util.sgd
import torch
import tensorflow as tf
from ray.data.dataset_pipeline import DatasetPipeline
from ray.data.grouped_dataset import GroupedDataset
import collections
import itertools
import numpy as np
import ray
from ray.types import ObjectRef
from ray.util.annotations import DeveloperAPI, PublicAPI
from ray.data.block import (
Block,
BlockAccessor,
BlockMetadata,
T,
U,
BlockPartition,
BlockPartitionMetadata,
BlockExecStats,
KeyFn,
_validate_key_fn,
)
from ray.data.context import DatasetContext
from ray.data.datasource import (
Datasource,
CSVDatasource,
JSONDatasource,
NumpyDatasource,
ParquetDatasource,
BlockWritePathProvider,
DefaultBlockWritePathProvider,
ReadTask,
WriteResult,
)
from ray.data.datasource.file_based_datasource import (
_wrap_arrow_serialization_workaround,
_unwrap_arrow_serialization_workaround,
)
from ray.data.row import TableRow
from ray.data.aggregate import AggregateFn, Sum, Max, Min, Mean, Std
from ray.data.random_access_dataset import RandomAccessDataset
from ray.data.impl.remote_fn import cached_remote_fn
from ray.data.impl.block_batching import batch_blocks, BatchType
from ray.data.impl.plan import ExecutionPlan, OneToOneStage, AllToAllStage
from ray.data.impl.stats import DatasetStats
from ray.data.impl.compute import cache_wrapper, CallableClass, ComputeStrategy
from ray.data.impl.output_buffer import BlockOutputBuffer
from ray.data.impl.progress_bar import ProgressBar
from ray.data.impl.shuffle import ShufflePartitionOp
from ray.data.impl.fast_repartition import fast_repartition
from ray.data.impl.sort import sort_impl
from ray.data.impl.block_list import BlockList
from ray.data.impl.lazy_block_list import LazyBlockList
from ray.data.impl.delegating_block_builder import DelegatingBlockBuilder
logger = logging.getLogger(__name__)
# Whether we have warned of Datasets containing multiple epochs of data.
_epoch_warned = False
# Whether we have warned about using slow Dataset transforms.
_slow_warned = False
TensorflowFeatureTypeSpec = Union[
"tf.TypeSpec", List["tf.TypeSpec"], Dict[str, "tf.TypeSpec"]
]
@PublicAPI
class Dataset(Generic[T]):
"""Implements a distributed Arrow dataset.
Datasets are implemented as a list of ``ObjectRef[Block]``. The block
also determines the unit of parallelism. The default block type is the
``pyarrow.Table``. Arrow-incompatible objects are held in ``list`` blocks.
Since Datasets are just lists of Ray object refs, they can be passed
between Ray tasks and actors just like any other object. Datasets support
conversion to/from several more featureful dataframe libraries
(e.g., Spark, Dask, Modin, MARS), and are also compatible with distributed
TensorFlow / PyTorch.
Dataset supports parallel transformations such as .map(), .map_batches(),
and simple repartition, but currently not aggregations and joins.
"""
def __init__(
self,
plan: ExecutionPlan,
epoch: int,
lazy: bool,
):
"""Construct a Dataset (internal API).
The constructor is not part of the Dataset API. Use the ``ray.data.*``
read methods to construct a dataset.
"""
assert isinstance(plan, ExecutionPlan)
self._plan = plan
self._uuid = uuid4().hex
self._epoch = epoch
self._lazy = lazy
if not lazy:
# TODO(ekl) we should clear inputs once we have full lineage recorded.
self._plan.execute(clear_input_blocks=False)
@staticmethod
def copy(dataset: "Dataset[T]") -> "Dataset[T]":
return Dataset(dataset._plan, dataset._epoch, dataset._lazy)
def map(
self,
fn: Union[CallableClass, Callable[[T], U]],
*,
compute: Optional[str] = None,
**ray_remote_args,
) -> "Dataset[U]":
"""Apply the given function to each record of this dataset.
This is a blocking operation. Note that mapping individual records
can be quite slow. Consider using `.map_batches()` for performance.
Examples:
>>> import ray
>>> # Transform python objects.
>>> ds = ray.data.range(1000) # doctest: +SKIP
>>> ds.map(lambda x: x * 2) # doctest: +SKIP
>>> # Transform Arrow records.
>>> ds = ray.data.from_items( # doctest: +SKIP
... [{"value": i} for i in range(1000)])
>>> ds.map(lambda record: {"v2": record["value"] * 2}) # doctest: +SKIP
>>> # Define a callable class that persists state across
>>> # function invocations for efficiency.
>>> init_model = ... # doctest: +SKIP
>>> class CachedModel:
... def __init__(self):
... self.model = init_model()
... def __call__(self, batch):
... return self.model(batch)
>>> # Apply the transform in parallel on GPUs. Since
>>> # compute=ActorPoolStrategy(2, 8) the transform will be applied on an
>>> # autoscaling pool of 2-8 Ray actors, each allocated 1 GPU by Ray.
>>> from ray.data.impl.compute import ActorPoolStrategy
>>> ds.map(CachedModel, # doctest: +SKIP
... compute=ActorPoolStrategy(2, 8),
... num_gpus=1)
Time complexity: O(dataset size / parallelism)
Args:
fn: The function to apply to each record, or a class type
that can be instantiated to create such a callable. Callable classes are
only supported for the actor compute strategy.
compute: The compute strategy, either "tasks" (default) to use Ray
tasks, or ActorPoolStrategy(min, max) to use an autoscaling actor pool.
ray_remote_args: Additional resource requirements to request from
ray (e.g., num_gpus=1 to request GPUs for the map tasks).
"""
self._warn_slow()
fn = cache_wrapper(fn, compute)
context = DatasetContext.get_current()
def transform(block: Block) -> Iterable[Block]:
DatasetContext._set_current(context)
block = BlockAccessor.for_block(block)
output_buffer = BlockOutputBuffer(None, context.target_max_block_size)
for row in block.iter_rows():
output_buffer.add(fn(row))
if output_buffer.has_next():
yield output_buffer.next()
output_buffer.finalize()
if output_buffer.has_next():
yield output_buffer.next()
plan = self._plan.with_stage(
OneToOneStage("map", transform, compute, ray_remote_args)
)
return Dataset(plan, self._epoch, self._lazy)
def map_batches(
self,
fn: Union[CallableClass, Callable[[BatchType], BatchType]],
*,
batch_size: Optional[int] = 4096,
compute: Union[str, ComputeStrategy] = None,
batch_format: str = "native",
**ray_remote_args,
) -> "Dataset[Any]":
"""Apply the given function to batches of records of this dataset.
This is a blocking operation.
Examples:
>>> import ray
>>> # Transform python objects.
>>> ds = ray.data.range(1000) # doctest: +SKIP
>>> # Transform batches in parallel.
>>> ds.map_batches(lambda batch: [v * 2 for v in batch]) # doctest: +SKIP
>>> # Define a callable class that persists state across
>>> # function invocations for efficiency.
>>> init_model = ... # doctest: +SKIP
>>> class CachedModel:
... def __init__(self):
... self.model = init_model()
... def __call__(self, item):
... return self.model(item)
>>> # Apply the transform in parallel on GPUs. Since
>>> # compute=ActorPoolStrategy(2, 8) the transform will be applied on an
>>> # autoscaling pool of 2-8 Ray actors, each allocated 1 GPU by Ray.
>>> from ray.data.impl.compute import ActorPoolStrategy
>>> ds.map_batches( # doctest: +SKIP
... CachedModel, # doctest: +SKIP
... batch_size=256, # doctest: +SKIP
... compute=ActorPoolStrategy(2, 8), # doctest: +SKIP
... num_gpus=1) # doctest: +SKIP
Time complexity: O(dataset size / parallelism)
Args:
fn: The function to apply to each record batch, or a class type
that can be instantiated to create such a callable. Callable classes are
only supported for the actor compute strategy.
batch_size: Request a specific batch size, or None to use entire
blocks as batches. Defaults to a system-chosen batch size.
compute: The compute strategy, either "tasks" (default) to use Ray
tasks, or ActorPoolStrategy(min, max) to use an autoscaling actor pool.
batch_format: Specify "native" to use the native block format
(promotes Arrow to pandas), "pandas" to select
``pandas.DataFrame`` as the batch format,
or "pyarrow" to select ``pyarrow.Table``.
ray_remote_args: Additional resource requirements to request from
ray (e.g., num_gpus=1 to request GPUs for the map tasks).
"""
import pyarrow as pa
import pandas as pd
if batch_size is not None and batch_size < 1:
raise ValueError("Batch size cannot be negative or 0")
fn = cache_wrapper(fn, compute)
context = DatasetContext.get_current()
def transform(block: Block) -> Iterable[Block]:
DatasetContext._set_current(context)
output_buffer = BlockOutputBuffer(None, context.target_max_block_size)
block = BlockAccessor.for_block(block)
total_rows = block.num_rows()
max_batch_size = batch_size
if max_batch_size is None:
max_batch_size = max(total_rows, 1)
for start in range(0, total_rows, max_batch_size):
# Build a block for each batch.
end = min(total_rows, start + max_batch_size)
# Make sure to copy if slicing to avoid the Arrow serialization
# bug where we include the entire base view on serialization.
view = block.slice(start, end, copy=batch_size is not None)
if batch_format == "native":
# Always promote Arrow blocks to pandas for consistency.
if isinstance(view, pa.Table) or isinstance(view, bytes):
view = BlockAccessor.for_block(view).to_pandas()
elif batch_format == "pandas":
view = BlockAccessor.for_block(view).to_pandas()
elif batch_format == "pyarrow":
view = BlockAccessor.for_block(view).to_arrow()
else:
raise ValueError(
"The batch format must be one of 'native', 'pandas', "
"or 'pyarrow', got: {}".format(batch_format)
)
applied = fn(view)
if not (
isinstance(applied, list)
or isinstance(applied, pa.Table)
or isinstance(applied, pd.core.frame.DataFrame)
):
raise ValueError(
"The map batches UDF returned the value "
f"{applied}, which is not allowed. "
"The return type must be either list, "
"pandas.DataFrame, or pyarrow.Table"
)
output_buffer.add_block(applied)
if output_buffer.has_next():
yield output_buffer.next()
output_buffer.finalize()
if output_buffer.has_next():
yield output_buffer.next()
plan = self._plan.with_stage(
OneToOneStage("map_batches", transform, compute, ray_remote_args)
)
return Dataset(plan, self._epoch, self._lazy)
def add_column(
self,
col: str,
fn: Callable[["pandas.DataFrame"], "pandas.Series"],
*,
compute: Optional[str] = None,
**ray_remote_args,
) -> "Dataset[T]":
"""Add the given column to the dataset.
This is only supported for datasets convertible to pandas format.
A function generating the new column values given the batch in pandas
format must be specified.
This is a convenience wrapper over ``.map_batches()``.
Examples:
>>> import ray
>>> ds = ray.data.range_arrow(100) # doctest: +SKIP
>>> # Add a new column equal to value * 2.
>>> ds = ds.add_column( # doctest: +SKIP
... "new_col", lambda df: df["value"] * 2)
>>> # Overwrite the existing "value" with zeros.
>>> ds = ds.add_column("value", lambda df: 0) # doctest: +SKIP
Time complexity: O(dataset size / parallelism)
Args:
col: Name of the column to add. If the name already exists, the
column will be overwritten.
fn: Map function generating the column values given a batch of
records in pandas format.
compute: The compute strategy, either "tasks" (default) to use Ray
tasks, or ActorPoolStrategy(min, max) to use an autoscaling actor pool.
ray_remote_args: Additional resource requirements to request from
ray (e.g., num_gpus=1 to request GPUs for the map tasks).
"""
def process_batch(batch):
batch[col] = fn(batch)
return batch
if not callable(fn):
raise ValueError("`fn` must be callable, got {}".format(fn))
return self.map_batches(
process_batch, batch_format="pandas", compute=compute, **ray_remote_args
)
def flat_map(
self,
fn: Union[CallableClass, Callable[[T], Iterable[U]]],
*,
compute: Optional[str] = None,
**ray_remote_args,
) -> "Dataset[U]":
"""Apply the given function to each record and then flatten results.
This is a blocking operation. Consider using ``.map_batches()`` for
better performance (the batch size can be altered in map_batches).
Examples:
>>> import ray
>>> ds = ray.data.range(1000) # doctest: +SKIP
>>> ds.flat_map(lambda x: [x, x ** 2, x ** 3]) # doctest: +SKIP
Time complexity: O(dataset size / parallelism)
Args:
fn: The function to apply to each record, or a class type
that can be instantiated to create such a callable. Callable classes are
only supported for the actor compute strategy.
compute: The compute strategy, either "tasks" (default) to use Ray
tasks, or ActorPoolStrategy(min, max) to use an autoscaling actor pool.
ray_remote_args: Additional resource requirements to request from
ray (e.g., num_gpus=1 to request GPUs for the map tasks).
"""
self._warn_slow()
fn = cache_wrapper(fn, compute)
context = DatasetContext.get_current()
def transform(block: Block) -> Iterable[Block]:
DatasetContext._set_current(context)
output_buffer = BlockOutputBuffer(None, context.target_max_block_size)
block = BlockAccessor.for_block(block)
for row in block.iter_rows():
for r2 in fn(row):
output_buffer.add(r2)
if output_buffer.has_next():
yield output_buffer.next()
output_buffer.finalize()
if output_buffer.has_next():
yield output_buffer.next()
plan = self._plan.with_stage(
OneToOneStage("flat_map", transform, compute, ray_remote_args)
)
return Dataset(plan, self._epoch, self._lazy)
def filter(
self,
fn: Union[CallableClass, Callable[[T], bool]],
*,
compute: Optional[str] = None,
**ray_remote_args,
) -> "Dataset[T]":
"""Filter out records that do not satisfy the given predicate.
This is a blocking operation. Consider using ``.map_batches()`` for
better performance (you can implement filter by dropping records).
Examples:
>>> import ray
>>> ds = ray.data.range(100) # doctest: +SKIP
>>> ds.filter(lambda x: x % 2 == 0) # doctest: +SKIP
Time complexity: O(dataset size / parallelism)
Args:
fn: The predicate to apply to each record, or a class type
that can be instantiated to create such a callable. Callable classes are
only supported for the actor compute strategy.
compute: The compute strategy, either "tasks" (default) to use Ray
tasks, or ActorPoolStrategy(min, max) to use an autoscaling actor pool.
ray_remote_args: Additional resource requirements to request from
ray (e.g., num_gpus=1 to request GPUs for the map tasks).
"""
self._warn_slow()
fn = cache_wrapper(fn, compute)
context = DatasetContext.get_current()
def transform(block: Block) -> Iterable[Block]:
DatasetContext._set_current(context)
block = BlockAccessor.for_block(block)
builder = block.builder()
for row in block.iter_rows():
if fn(row):
builder.add(row)
return [builder.build()]
plan = self._plan.with_stage(
OneToOneStage("filter", transform, compute, ray_remote_args)
)
return Dataset(plan, self._epoch, self._lazy)
def repartition(self, num_blocks: int, *, shuffle: bool = False) -> "Dataset[T]":
"""Repartition the dataset into exactly this number of blocks.
This is a blocking operation. After repartitioning, all blocks in the
returned dataset will have approximately the same number of rows.
Examples:
>>> import ray
>>> ds = ray.data.range(100) # doctest: +SKIP
>>> # Set the number of output partitions to write to disk.
>>> ds.repartition(10).write_parquet(...) # doctest: +SKIP
Time complexity: O(dataset size / parallelism)
Args:
num_blocks: The number of blocks.
shuffle: Whether to perform a distributed shuffle during the
repartition. When shuffle is enabled, each output block
contains a subset of data rows from each input block, which
requires all-to-all data movement. When shuffle is disabled,
output blocks are created from adjacent input blocks,
minimizing data movement.
Returns:
The repartitioned dataset.
"""
if shuffle:
def do_shuffle(
block_list, clear_input_blocks: bool, block_udf, remote_args
):
if clear_input_blocks:
blocks = block_list.copy()
block_list.clear()
else:
blocks = block_list
shuffle_op = ShufflePartitionOp(block_udf, random_shuffle=False)
return shuffle_op.execute(
blocks,
num_blocks,
clear_input_blocks,
map_ray_remote_args=remote_args,
reduce_ray_remote_args=remote_args,
)
plan = self._plan.with_stage(
AllToAllStage(
"repartition", num_blocks, do_shuffle, supports_block_udf=True
)
)
else:
def do_fast_repartition(block_list, clear_input_blocks: bool, *_):
if clear_input_blocks:
blocks = block_list.copy()
block_list.clear()
else:
blocks = block_list
return fast_repartition(blocks, num_blocks)
plan = self._plan.with_stage(
AllToAllStage("repartition", num_blocks, do_fast_repartition)
)
return Dataset(plan, self._epoch, self._lazy)
def random_shuffle(
self,
*,
seed: Optional[int] = None,
num_blocks: Optional[int] = None,
) -> "Dataset[T]":
"""Randomly shuffle the elements of this dataset.
This is a blocking operation similar to repartition().
Examples:
>>> import ray
>>> ds = ray.data.range(100) # doctest: +SKIP
>>> # Shuffle this dataset randomly.
>>> ds.random_shuffle() # doctest: +SKIP
>>> # Shuffle this dataset with a fixed random seed.
>>> ds.random_shuffle(seed=12345) # doctest: +SKIP
Time complexity: O(dataset size / parallelism)
Args:
seed: Fix the random seed to use, otherwise one will be chosen
based on system randomness.
num_blocks: The number of output blocks after the shuffle, or None
to retain the number of blocks.
Returns:
The shuffled dataset.
"""
def do_shuffle(block_list, clear_input_blocks: bool, block_udf, remote_args):
num_blocks = block_list.executed_num_blocks() # Blocking.
if num_blocks == 0:
return block_list, {}
if clear_input_blocks:
blocks = block_list.copy()
block_list.clear()
else:
blocks = block_list
random_shuffle_op = ShufflePartitionOp(
block_udf, random_shuffle=True, random_seed=seed
)
return random_shuffle_op.execute(
blocks,
num_blocks,
clear_input_blocks,
map_ray_remote_args=remote_args,
reduce_ray_remote_args=remote_args,
)
plan = self._plan.with_stage(
AllToAllStage(
"random_shuffle", num_blocks, do_shuffle, supports_block_udf=True
)
)
return Dataset(plan, self._epoch, self._lazy)
def split(
self, n: int, *, equal: bool = False, locality_hints: Optional[List[Any]] = None
) -> List["Dataset[T]"]:
"""Split the dataset into ``n`` disjoint pieces.
This returns a list of sub-datasets that can be passed to Ray tasks
and actors and used to read the dataset records in parallel.
Examples:
>>> import ray
>>> ds = ray.data.range(100) # doctest: +SKIP
>>> workers = ... # doctest: +SKIP
>>> # Split up a dataset to process over `n` worker actors.
>>> shards = ds.split(len(workers), locality_hints=workers) # doctest: +SKIP
>>> for shard, worker in zip(shards, workers): # doctest: +SKIP
... worker.consume.remote(shard) # doctest: +SKIP
Time complexity: O(1)
See also: ``Dataset.split_at_indices``
Args:
n: Number of child datasets to return.
equal: Whether to guarantee each split has an equal
number of records. This may drop records if they cannot be
divided equally among the splits.
locality_hints: A list of Ray actor handles of size ``n``. The
system will try to co-locate the blocks of the ith dataset
with the ith actor to maximize data locality.
Returns:
A list of ``n`` disjoint dataset splits.
"""
if n <= 0:
raise ValueError(f"The number of splits {n} is not positive.")
if locality_hints and len(locality_hints) != n:
raise ValueError(
f"The length of locality_hints {len(locality_hints)} "
"doesn't equal the number of splits {n}."
)
# TODO: this is unreachable code.
if len(set(locality_hints)) != len(locality_hints):
raise ValueError(
"locality_hints must not contain duplicate actor handles"
)
blocks = self._plan.execute()
stats = self._plan.stats()
def _partition_splits(
splits: List[Dataset[T]], part_size: int, counts_cache: Dict[str, int]
):
"""Partition splits into two sets: splits that are smaller than the
target size and splits that are larger than the target size.
"""
splits = sorted(splits, key=lambda s: counts_cache[s._get_uuid()])
idx = next(
i
for i, split in enumerate(splits)
if counts_cache[split._get_uuid()] >= part_size
)
return splits[:idx], splits[idx:]
def _equalize_larger_splits(
splits: List[Dataset[T]],
target_size: int,
counts_cache: Dict[str, int],
num_splits_required: int,
):
"""Split each split into one or more subsplits that are each the
target size, with at most one leftover split that's smaller
than the target size.
This assume that the given splits are sorted in ascending order.
"""
if target_size == 0:
return splits, []
new_splits = []
leftovers = []
for split in splits:
size = counts_cache[split._get_uuid()]
if size == target_size:
new_splits.append(split)
continue
split_indices = list(range(target_size, size, target_size))
split_splits = split.split_at_indices(split_indices)
last_split_size = split_splits[-1].count()
if last_split_size < target_size:
# Last split is smaller than the target size, save it for
# our unioning of small splits.
leftover = split_splits.pop()
leftovers.append(leftover)
counts_cache[leftover._get_uuid()] = leftover.count()
if len(new_splits) + len(split_splits) >= num_splits_required:
# Short-circuit if the new splits will make us reach the
# desired number of splits.
new_splits.extend(
split_splits[: num_splits_required - len(new_splits)]
)
break
new_splits.extend(split_splits)
return new_splits, leftovers
def _equalize_smaller_splits(
splits: List[Dataset[T]],
target_size: int,
counts_cache: Dict[str, int],
num_splits_required: int,
):
"""Union small splits up to the target split size.
This assume that the given splits are sorted in ascending order.
"""
new_splits = []
union_buffer = []
union_buffer_size = 0
low = 0
high = len(splits) - 1
while low <= high:
# Union small splits up to the target split size.
low_split = splits[low]
low_count = counts_cache[low_split._get_uuid()]
high_split = splits[high]
high_count = counts_cache[high_split._get_uuid()]
if union_buffer_size + high_count <= target_size:
# Try to add the larger split to the union buffer first.
union_buffer.append(high_split)
union_buffer_size += high_count
high -= 1
elif union_buffer_size + low_count <= target_size:
union_buffer.append(low_split)
union_buffer_size += low_count
low += 1
else:
# Neither the larger nor smaller split fit in the union
# buffer, so we split the smaller split into a subsplit
# that will fit into the union buffer and a leftover
# subsplit that we add back into the candidate split list.
diff = target_size - union_buffer_size
diff_split, new_low_split = low_split.split_at_indices([diff])
union_buffer.append(diff_split)
union_buffer_size += diff
# We overwrite the old low split and don't advance the low
# pointer since (1) the old low split can be discarded,
# (2) the leftover subsplit is guaranteed to be smaller
# than the old low split, and (3) the low split should be
# the smallest split in the candidate split list, which is
# this subsplit.
splits[low] = new_low_split
counts_cache[new_low_split._get_uuid()] = low_count - diff
if union_buffer_size == target_size:
# Once the union buffer is full, we union together the
# splits.
assert len(union_buffer) > 1, union_buffer
first_ds = union_buffer[0]
new_split = first_ds.union(*union_buffer[1:])
new_splits.append(new_split)
# Clear the union buffer.
union_buffer = []
union_buffer_size = 0
if len(new_splits) == num_splits_required:
# Short-circuit if we've reached the desired number of
# splits.
break
return new_splits
def equalize(splits: List[Dataset[T]], num_splits: int) -> List[Dataset[T]]:
if not equal:
return splits
counts = {s._get_uuid(): s.count() for s in splits}
total_rows = sum(counts.values())
# Number of rows for each split.
target_size = total_rows // num_splits
# Partition splits.
smaller_splits, larger_splits = _partition_splits(
splits, target_size, counts
)
if len(smaller_splits) == 0 and num_splits < len(splits):
# All splits are already equal.
return splits
# Split larger splits.
new_splits, leftovers = _equalize_larger_splits(
larger_splits, target_size, counts, num_splits
)
# Short-circuit if we've already reached the desired number of
# splits.
if len(new_splits) == num_splits:
return new_splits
# Add leftovers to small splits and re-sort.
smaller_splits += leftovers
smaller_splits = sorted(smaller_splits, key=lambda s: counts[s._get_uuid()])
# Union smaller splits.
new_splits_small = _equalize_smaller_splits(
smaller_splits, target_size, counts, num_splits - len(new_splits)
)
new_splits.extend(new_splits_small)
return new_splits
block_refs, metadata = zip(*blocks.get_blocks_with_metadata())
metadata_mapping = {b: m for b, m in zip(block_refs, metadata)}
if locality_hints is None:
ds = equalize(
[
Dataset(
ExecutionPlan(
BlockList(
list(blocks), [metadata_mapping[b] for b in blocks]
),
stats,
),
self._epoch,
self._lazy,
)
for blocks in np.array_split(block_refs, n)
],
n,
)
assert len(ds) == n, (ds, n)
return ds
# If the locality_hints is set, we use a two-round greedy algorithm
# to co-locate the blocks with the actors based on block
# and actor's location (node_id).
#
# The split algorithm tries to allocate equally-sized blocks regardless
# of locality. Thus we first calculate the expected number of blocks
# for each split.
#
# In the first round, for each actor, we look for all blocks that
# match the actor's node_id, then allocate those matched blocks to
# this actor until we reach the limit(expected number).
#
# In the second round: fill each actor's allocation with
# remaining unallocated blocks until we reach the limit.
def build_allocation_size_map(
num_blocks: int, actors: List[Any]
) -> Dict[Any, int]:
"""Given the total number of blocks and a list of actors, calcuate
the expected number of blocks to allocate for each actor.
"""
num_actors = len(actors)
num_blocks_per_actor = num_blocks // num_actors
num_blocks_left = num_blocks - num_blocks_per_actor * n
num_blocks_by_actor = {}
for i, actor in enumerate(actors):
num_blocks_by_actor[actor] = num_blocks_per_actor
if i < num_blocks_left:
num_blocks_by_actor[actor] += 1
return num_blocks_by_actor
def build_block_refs_by_node_id(
blocks: List[ObjectRef[Block]],
) -> Dict[str, List[ObjectRef[Block]]]:
"""Build the reverse index from node_id to block_refs. For
simplicity, if the block is stored on multiple nodes we
only pick the first one.
"""
block_ref_locations = ray.experimental.get_object_locations(blocks)
block_refs_by_node_id = collections.defaultdict(list)
for block_ref in blocks:
node_ids = block_ref_locations.get(block_ref, {}).get("node_ids", [])
node_id = node_ids[0] if node_ids else None
block_refs_by_node_id[node_id].append(block_ref)
return block_refs_by_node_id
def build_node_id_by_actor(actors: List[Any]) -> Dict[Any, str]:
"""Build a map from a actor to its node_id."""
actors_state = ray.state.actors()
return {
actor: actors_state.get(actor._actor_id.hex(), {})
.get("Address", {})
.get("NodeID")
for actor in actors
}
# expected number of blocks to be allocated for each actor
expected_block_count_by_actor = build_allocation_size_map(
len(block_refs), locality_hints
)
# the reverse index from node_id to block_refs
block_refs_by_node_id = build_block_refs_by_node_id(block_refs)
# the map from actor to its node_id
node_id_by_actor = build_node_id_by_actor(locality_hints)
allocation_per_actor = collections.defaultdict(list)
# In the first round, for each actor, we look for all blocks that
# match the actor's node_id, then allocate those matched blocks to
# this actor until we reach the limit(expected number)
for actor in locality_hints:
node_id = node_id_by_actor[actor]
matching_blocks = block_refs_by_node_id[node_id]
expected_block_count = expected_block_count_by_actor[actor]
allocation = []
while matching_blocks and len(allocation) < expected_block_count:
allocation.append(matching_blocks.pop())
allocation_per_actor[actor] = allocation
# In the second round: fill each actor's allocation with
# remaining unallocated blocks until we reach the limit
remaining_block_refs = list(
itertools.chain.from_iterable(block_refs_by_node_id.values())
)
for actor in locality_hints:
while (
len(allocation_per_actor[actor]) < expected_block_count_by_actor[actor]
):
allocation_per_actor[actor].append(remaining_block_refs.pop())
assert len(remaining_block_refs) == 0, len(remaining_block_refs)
return equalize(
[
Dataset(
ExecutionPlan(
BlockList(
allocation_per_actor[actor],
[metadata_mapping[b] for b in allocation_per_actor[actor]],
),
stats,
),
self._epoch,
self._lazy,
)
for actor in locality_hints
],
n,
)
def split_at_indices(self, indices: List[int]) -> List["Dataset[T]"]:
"""Split the dataset at the given indices (like np.split).
Examples:
>>> import ray
>>> ds = ray.data.range(10) # doctest: +SKIP
>>> d1, d2, d3 = ds.split_at_indices([2, 5]) # doctest: +SKIP
>>> d1.take() # doctest: +SKIP
[0, 1]
>>> d2.take() # doctest: +SKIP
[2, 3, 4]
>>> d3.take() # doctest: +SKIP
[5, 6, 7, 8, 9]
Time complexity: O(num splits)
See also: ``Dataset.split``
Args:
indices: List of sorted integers which indicate where the dataset
will be split. If an index exceeds the length of the dataset,
an empty dataset will be returned.
Returns:
The dataset splits.
"""
if len(indices) < 1:
raise ValueError("indices must be at least of length 1")
if sorted(indices) != indices:
raise ValueError("indices must be sorted")
if indices[0] < 0:
raise ValueError("indices must be positive")
rest = self
splits = []
prev = 0
for i in indices:
first, rest = rest._split(i - prev, return_right_half=True)
prev = i
splits.append(first)
splits.append(rest)
return splits
def union(self, *other: List["Dataset[T]"]) -> "Dataset[T]":
"""Combine this dataset with others of the same type.
The order of the blocks in the datasets is preserved, as is the
relative ordering between the datasets passed in the argument list.
Args:
other: List of datasets to combine with this one. The datasets
must have the same schema as this dataset, otherwise the
behavior is undefined.
Returns:
A new dataset holding the union of their data.
"""
start_time = time.perf_counter()
context = DatasetContext.get_current()
tasks: List[ReadTask] = []
block_partition_refs: List[ObjectRef[BlockPartition]] = []
block_partition_meta_refs: List[ObjectRef[BlockPartitionMetadata]] = []
datasets = [self] + list(other)
for ds in datasets:
bl = ds._plan.execute()
if isinstance(bl, LazyBlockList):
tasks.extend(bl._tasks)
block_partition_refs.extend(bl._block_partition_refs)
block_partition_meta_refs.extend(bl._block_partition_meta_refs)
else:
tasks.extend([ReadTask(lambda: None, meta) for meta in bl._metadata])
if context.block_splitting_enabled:
block_partition_refs.extend(
[ray.put([(b, m)]) for b, m in bl.get_blocks_with_metadata()]
)
else:
block_partition_refs.extend(bl.get_blocks())
block_partition_meta_refs.extend(
[ray.put(meta) for meta in bl._metadata]
)
epochs = [ds._get_epoch() for ds in datasets]
max_epoch = max(*epochs)
if len(set(epochs)) > 1:
global _epoch_warned
if not _epoch_warned:
logger.warning(
"Dataset contains data from multiple epochs: {}, "
"likely due to a `rewindow()` call. The higher epoch "
"number {} will be used. This warning will not "
"be shown again.".format(set(epochs), max_epoch)
)
_epoch_warned = True
dataset_stats = DatasetStats(
stages={"union": []},
parent=[d._plan.stats() for d in datasets],
)
dataset_stats.time_total_s = time.perf_counter() - start_time
return Dataset(
ExecutionPlan(
LazyBlockList(tasks, block_partition_refs, block_partition_meta_refs),
dataset_stats,
),
max_epoch,
self._lazy,
)
def groupby(self, key: Optional[KeyFn]) -> "GroupedDataset[T]":
"""Group the dataset by the key function or column name.
This is a lazy operation.
Examples:
>>> import ray
>>> # Group by a key function and aggregate.
>>> ray.data.range(100).groupby(lambda x: x % 3).count() # doctest: +SKIP
>>> # Group by an Arrow table column and aggregate.
>>> ray.data.from_items([ # doctest: +SKIP
... {"A": x % 3, "B": x} for x in range(100)]).groupby( # doctest: +SKIP
... "A").count() # doctest: +SKIP
Time complexity: O(dataset size * log(dataset size / parallelism))
Args:
key: A key function or Arrow column name. If this is None, the
grouping is global.
Returns:
A lazy GroupedDataset that can be aggregated later.
"""
from ray.data.grouped_dataset import GroupedDataset
# Always allow None since groupby interprets that as grouping all
# records into a single global group.
if key is not None:
_validate_key_fn(self, key)
return GroupedDataset(self, key)
def aggregate(self, *aggs: AggregateFn) -> U:
"""Aggregate the entire dataset as one group.
This is a blocking operation.
Examples:
>>> import ray
>>> from ray.data.aggregate import Max, Mean
>>> ray.data.range(100).aggregate(Max()) # doctest: +SKIP
>>> ray.data.range_arrow(100).aggregate( # doctest: +SKIP
... Max("value"), Mean("value")) # doctest: +SKIP
Time complexity: O(dataset size / parallelism)
Args:
aggs: Aggregations to do.
Returns:
If the input dataset is a simple dataset then the output is
a tuple of ``(agg1, agg2, ...)`` where each tuple element is
the corresponding aggregation result.
If the input dataset is an Arrow dataset then the output is
an ``ArrowRow`` where each column is the corresponding
aggregation result.
If the dataset is empty, return ``None``.
"""
ret = self.groupby(None).aggregate(*aggs).take(1)
return ret[0] if len(ret) > 0 else None
def sum(
self, on: Optional[Union[KeyFn, List[KeyFn]]] = None, ignore_nulls: bool = True
) -> U:
"""Compute sum over entire dataset.
This is a blocking operation.
Examples:
>>> import ray
>>> ray.data.range(100).sum() # doctest: +SKIP
>>> ray.data.from_items([ # doctest: +SKIP
... (i, i**2) # doctest: +SKIP
... for i in range(100)]).sum(lambda x: x[1]) # doctest: +SKIP
>>> ray.data.range_arrow(100).sum("value") # doctest: +SKIP
>>> ray.data.from_items([ # doctest: +SKIP
... {"A": i, "B": i**2} # doctest: +SKIP
... for i in range(100)]).sum(["A", "B"]) # doctest: +SKIP
Args:
on: The data subset on which to compute the sum.
- For a simple dataset: it can be a callable or a list thereof,
and the default is to return a scalar sum of all rows.
- For an Arrow dataset: it can be a column name or a list
thereof, and the default is to return an ``ArrowRow``
containing the column-wise sum of all columns.
ignore_nulls: Whether to ignore null values. If ``True``, null
values will be ignored when computing the sum; if ``False``,
if a null value is encountered, the output will be None.
We consider np.nan, None, and pd.NaT to be null values.
Default is ``True``.
Returns:
The sum result.
For a simple dataset, the output is:
- ``on=None``: a scalar representing the sum of all rows,
- ``on=callable``: a scalar representing the sum of the outputs of
the callable called on each row,
- ``on=[callable_1, ..., calalble_n]``: a tuple of
``(sum_1, ..., sum_n)`` representing the sum of the outputs of
the corresponding callables called on each row.
For an Arrow dataset, the output is:
- ``on=None``: an ArrowRow containing the column-wise sum of all
columns,
- ``on="col"``: a scalar representing the sum of all items in
column ``"col"``,
- ``on=["col_1", ..., "col_n"]``: an n-column ``ArrowRow``
containing the column-wise sum of the provided columns.
If the dataset is empty, all values are null, or any value is null
AND ``ignore_nulls`` is ``False``, then the output will be None.
"""
ret = self._aggregate_on(Sum, on, ignore_nulls)
return self._aggregate_result(ret)
def min(
self, on: Optional[Union[KeyFn, List[KeyFn]]] = None, ignore_nulls: bool = True
) -> U:
"""Compute minimum over entire dataset.
This is a blocking operation.
Examples:
>>> import ray
>>> ray.data.range(100).min() # doctest: +SKIP
>>> ray.data.from_items([ # doctest: +SKIP
... (i, i**2) # doctest: +SKIP
... for i in range(100)]).min(lambda x: x[1]) # doctest: +SKIP
>>> ray.data.range_arrow(100).min("value") # doctest: +SKIP
>>> ray.data.from_items([ # doctest: +SKIP
... {"A": i, "B": i**2} # doctest: +SKIP
... for i in range(100)]).min(["A", "B"]) # doctest: +SKIP
Args:
on: The data subset on which to compute the min.
- For a simple dataset: it can be a callable or a list thereof,
and the default is to return a scalar min of all rows.
- For an Arrow dataset: it can be a column name or a list
thereof, and the default is to return an ``ArrowRow``
containing the column-wise min of all columns.
ignore_nulls: Whether to ignore null values. If ``True``, null
values will be ignored when computing the min; if ``False``,
if a null value is encountered, the output will be None.
We consider np.nan, None, and pd.NaT to be null values.
Default is ``True``.
Returns:
The min result.
For a simple dataset, the output is:
- ``on=None``: a scalar representing the min of all rows,
- ``on=callable``: a scalar representing the min of the outputs
of the callable called on each row,
- ``on=[callable_1, ..., calalble_n]``: a tuple of
``(min_1, ..., min_n)`` representing the min of the outputs
of the corresponding callables called on each row.
For an Arrow dataset, the output is:
- ``on=None``: an ``ArrowRow`` containing the column-wise min of
all columns,
- ``on="col"``: a scalar representing the min of all items in
column ``"col"``,
- ``on=["col_1", ..., "col_n"]``: an n-column ``ArrowRow``
containing the column-wise min of the provided columns.
If the dataset is empty, all values are null, or any value is null
AND ``ignore_nulls`` is ``False``, then the output will be None.
"""
ret = self._aggregate_on(Min, on, ignore_nulls)
return self._aggregate_result(ret)
def max(
self, on: Optional[Union[KeyFn, List[KeyFn]]] = None, ignore_nulls: bool = True
) -> U:
"""Compute maximum over entire dataset.
This is a blocking operation.
Examples:
>>> import ray
>>> ray.data.range(100).max() # doctest: +SKIP
>>> ray.data.from_items([ # doctest: +SKIP
... (i, i**2) # doctest: +SKIP
... for i in range(100)]).max(lambda x: x[1]) # doctest: +SKIP
>>> ray.data.range_arrow(100).max("value") # doctest: +SKIP
>>> ray.data.from_items([ # doctest: +SKIP
... {"A": i, "B": i**2} # doctest: +SKIP
... for i in range(100)]).max(["A", "B"]) # doctest: +SKIP
Args:
on: The data subset on which to compute the max.
- For a simple dataset: it can be a callable or a list thereof,
and the default is to return a scalar max of all rows.
- For an Arrow dataset: it can be a column name or a list
thereof, and the default is to return an ``ArrowRow``
containing the column-wise max of all columns.
ignore_nulls: Whether to ignore null values. If ``True``, null
values will be ignored when computing the max; if ``False``,
if a null value is encountered, the output will be None.
We consider np.nan, None, and pd.NaT to be null values.
Default is ``True``.
Returns:
The max result.
For a simple dataset, the output is:
- ``on=None``: a scalar representing the max of all rows,
- ``on=callable``: a scalar representing the max of the outputs of
the callable called on each row,
- ``on=[callable_1, ..., calalble_n]``: a tuple of
``(max_1, ..., max_n)`` representing the max of the outputs of
the corresponding callables called on each row.
For an Arrow dataset, the output is:
- ``on=None``: an ``ArrowRow`` containing the column-wise max of
all columns,
- ``on="col"``: a scalar representing the max of all items in
column ``"col"``,
- ``on=["col_1", ..., "col_n"]``: an n-column ``ArrowRow``
containing the column-wise max of the provided columns.
If the dataset is empty, all values are null, or any value is null
AND ``ignore_nulls`` is ``False``, then the output will be None.
"""
ret = self._aggregate_on(Max, on, ignore_nulls)
return self._aggregate_result(ret)
def mean(
self, on: Optional[Union[KeyFn, List[KeyFn]]] = None, ignore_nulls: bool = True
) -> U:
"""Compute mean over entire dataset.
This is a blocking operation.
Examples:
>>> import ray
>>> ray.data.range(100).mean() # doctest: +SKIP
>>> ray.data.from_items([ # doctest: +SKIP
... (i, i**2) # doctest: +SKIP
... for i in range(100)]).mean(lambda x: x[1]) # doctest: +SKIP
>>> ray.data.range_arrow(100).mean("value") # doctest: +SKIP
>>> ray.data.from_items([ # doctest: +SKIP
... {"A": i, "B": i**2} # doctest: +SKIP
... for i in range(100)]).mean(["A", "B"]) # doctest: +SKIP
Args:
on: The data subset on which to compute the mean.
- For a simple dataset: it can be a callable or a list thereof,
and the default is to return a scalar mean of all rows.
- For an Arrow dataset: it can be a column name or a list
thereof, and the default is to return an ``ArrowRow``
containing the column-wise mean of all columns.
ignore_nulls: Whether to ignore null values. If ``True``, null
values will be ignored when computing the mean; if ``False``,
if a null value is encountered, the output will be None.
We consider np.nan, None, and pd.NaT to be null values.
Default is ``True``.
Returns:
The mean result.
For a simple dataset, the output is:
- ``on=None``: a scalar representing the mean of all rows,
- ``on=callable``: a scalar representing the mean of the outputs
of the callable called on each row,
- ``on=[callable_1, ..., calalble_n]``: a tuple of
``(mean_1, ..., mean_n)`` representing the mean of the outputs
of the corresponding callables called on each row.
For an Arrow dataset, the output is:
- ``on=None``: an ``ArrowRow`` containing the column-wise mean of
all columns,
- ``on="col"``: a scalar representing the mean of all items in
column ``"col"``,
- ``on=["col_1", ..., "col_n"]``: an n-column ``ArrowRow``
containing the column-wise mean of the provided columns.
If the dataset is empty, all values are null, or any value is null
AND ``ignore_nulls`` is ``False``, then the output will be None.
"""
ret = self._aggregate_on(Mean, on, ignore_nulls)
return self._aggregate_result(ret)
def std(
self,
on: Optional[Union[KeyFn, List[KeyFn]]] = None,
ddof: int = 1,
ignore_nulls: bool = True,
) -> U:
"""Compute standard deviation over entire dataset.
This is a blocking operation.
Examples:
>>> import ray # doctest: +SKIP
>>> ray.data.range(100).std() # doctest: +SKIP
>>> ray.data.from_items([ # doctest: +SKIP
... (i, i**2) # doctest: +SKIP
... for i in range(100)]).std(lambda x: x[1]) # doctest: +SKIP
>>> ray.data.range_arrow(100).std("value", ddof=0) # doctest: +SKIP
>>> ray.data.from_items([ # doctest: +SKIP
... {"A": i, "B": i**2} # doctest: +SKIP
... for i in range(100)]).std(["A", "B"]) # doctest: +SKIP
NOTE: This uses Welford's online method for an accumulator-style
computation of the standard deviation. This method was chosen due to
it's numerical stability, and it being computable in a single pass.
This may give different (but more accurate) results than NumPy, Pandas,
and sklearn, which use a less numerically stable two-pass algorithm.
See
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
Args:
on: The data subset on which to compute the std.
- For a simple dataset: it can be a callable or a list thereof,
and the default is to return a scalar std of all rows.
- For an Arrow dataset: it can be a column name or a list
thereof, and the default is to return an ``ArrowRow``
containing the column-wise std of all columns.
ddof: Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
ignore_nulls: Whether to ignore null values. If ``True``, null
values will be ignored when computing the std; if ``False``,
if a null value is encountered, the output will be None.
We consider np.nan, None, and pd.NaT to be null values.
Default is ``True``.
Returns:
The standard deviation result.
For a simple dataset, the output is:
- ``on=None``: a scalar representing the std of all rows,
- ``on=callable``: a scalar representing the std of the outputs of
the callable called on each row,
- ``on=[callable_1, ..., calalble_n]``: a tuple of
``(std_1, ..., std_n)`` representing the std of the outputs of
the corresponding callables called on each row.
For an Arrow dataset, the output is:
- ``on=None``: an ``ArrowRow`` containing the column-wise std of
all columns,
- ``on="col"``: a scalar representing the std of all items in
column ``"col"``,
- ``on=["col_1", ..., "col_n"]``: an n-column ``ArrowRow``
containing the column-wise std of the provided columns.
If the dataset is empty, all values are null, or any value is null
AND ``ignore_nulls`` is ``False``, then the output will be None.
"""
ret = self._aggregate_on(Std, on, ignore_nulls, ddof=ddof)
return self._aggregate_result(ret)
def sort(
self, key: Optional[KeyFn] = None, descending: bool = False
) -> "Dataset[T]":
# TODO ds.sort(lambda ...) fails with:
# Callable key '<function <lambda> at 0x1b07a4cb0>' requires
# dataset format to be 'simple', was 'arrow'.
# How do I create something "simple" here?
"""Sort the dataset by the specified key column or key function.
This is a blocking operation.
Examples:
>>> import ray # doctest: +SKIP
>>> # Sort using the entire record as the key.
>>> ds = ray.data.range(100) # doctest: +SKIP
>>> ds.sort() # doctest: +SKIP
>>> # Sort by a single column in descending order.
>>> ds = ray.data.from_items( # doctest: +SKIP
... [{"value": i} for i in range(1000)])
>>> ds.sort("value", descending=True) # doctest: +SKIP
>>> # Sort by a key function.
>>> ds.sort(lambda record: record["value"]) # doctest: +SKIP
Time complexity: O(dataset size * log(dataset size / parallelism))
Args:
key:
- For Arrow tables, key must be a single column name.
- For datasets of Python objects, key can be either a lambda
function that returns a comparison key to sort by, or None
to sort by the original value.
descending: Whether to sort in descending order.
Returns:
A new, sorted dataset.
"""
def do_sort(block_list, clear_input_blocks: bool, *_):
# Handle empty dataset.
if block_list.initial_num_blocks() == 0:
return block_list, {}
if clear_input_blocks:
blocks = block_list.copy()
block_list.clear()
else:
blocks = block_list
if isinstance(key, list):
if not key:
raise ValueError("`key` must be a list of non-zero length")
for subkey in key:
_validate_key_fn(self, subkey)
else:
_validate_key_fn(self, key)
return sort_impl(blocks, clear_input_blocks, key, descending)
plan = self._plan.with_stage(AllToAllStage("sort", None, do_sort))
return Dataset(plan, self._epoch, self._lazy)
def zip(self, other: "Dataset[U]") -> "Dataset[(T, U)]":
"""Zip this dataset with the elements of another.
The datasets must have identical num rows, block types, and block sizes
(e.g., one was produced from a ``.map()`` of another). For Arrow
blocks, the schema will be concatenated, and any duplicate column
names disambiguated with _1, _2, etc. suffixes.
Time complexity: O(dataset size / parallelism)
Args:
other: The dataset to zip with on the right hand side.
Examples:
>>> import ray
>>> ds = ray.data.range(5) # doctest: +SKIP
>>> ds.zip(ds).take() # doctest: +SKIP
[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)]
Returns:
A Dataset with (k, v) pairs (or concatenated Arrow schema) where k
comes from the first dataset and v comes from the second.
"""
def do_zip_all(block_list, clear_input_blocks: bool, *_):
blocks1 = block_list.get_blocks()
blocks2 = other.get_internal_block_refs()
if clear_input_blocks:
block_list.clear()
if len(blocks1) != len(blocks2):
# TODO(ekl) consider supporting if num_rows are equal.
raise ValueError(
"Cannot zip dataset of different num blocks: {} vs {}".format(
len(blocks1), len(blocks2)
)
)
def do_zip(block1: Block, block2: Block) -> (Block, BlockMetadata):
stats = BlockExecStats.builder()
b1 = BlockAccessor.for_block(block1)
result = b1.zip(block2)
br = BlockAccessor.for_block(result)
return result, br.get_metadata(input_files=[], exec_stats=stats.build())
do_zip_fn = cached_remote_fn(do_zip, num_returns=2)
blocks = []
metadata = []
for b1, b2 in zip(blocks1, blocks2):
res, meta = do_zip_fn.remote(b1, b2)
blocks.append(res)
metadata.append(meta)
# Early release memory.
del blocks1, blocks2
# TODO(ekl) it might be nice to have a progress bar here.
metadata = ray.get(metadata)
blocks = BlockList(blocks, metadata)
return blocks, {}
plan = self._plan.with_stage(AllToAllStage("zip", None, do_zip_all))
return Dataset(plan, self._epoch, self._lazy)
def limit(self, limit: int) -> "Dataset[T]":
"""Limit the dataset to the first number of records specified.
Examples:
>>> import ray
>>> ds = ray.data.range(1000) # doctest: +SKIP
>>> ds.limit(100).map(lambda x: x * 2).take() # doctest: +SKIP
Time complexity: O(limit specified)
Args:
limit: The size of the dataset to truncate to.
Returns:
The truncated dataset.
"""
left, _ = self._split(limit, return_right_half=False)
return left
def take(self, limit: int = 20) -> List[T]:
"""Take up to the given number of records from the dataset.
Time complexity: O(limit specified)
Args:
limit: The max number of records to return.
Returns:
A list of up to ``limit`` records from the dataset.
"""
output = []
for row in self.iter_rows():
output.append(row)
if len(output) >= limit:
break
return output
def take_all(self, limit: int = 100000) -> List[T]:
"""Take all the records in the dataset.
Time complexity: O(dataset size)
Args:
limit: Raise an error if the size exceeds the specified limit.
Returns:
A list of all the records in the dataset.
"""
output = []
for row in self.iter_rows():
output.append(row)
if len(output) > limit:
raise ValueError(
"The dataset has more than the given limit of {} records.".format(
limit
)
)
return output
def show(self, limit: int = 20) -> None:
"""Print up to the given number of records from the dataset.
Time complexity: O(limit specified)
Args:
limit: The max number of records to print.
"""
for row in self.take(limit):
print(row)
def count(self) -> int:
"""Count the number of records in the dataset.
Time complexity: O(dataset size / parallelism), O(1) for parquet
Returns:
The number of records in the dataset.
"""
# Handle empty dataset.
if self.num_blocks() == 0:
return 0
# For parquet, we can return the count directly from metadata.
meta_count = self._meta_count()
if meta_count is not None:
return meta_count
get_num_rows = cached_remote_fn(_get_num_rows)
return sum(
ray.get(
[get_num_rows.remote(block) for block in self.get_internal_block_refs()]
)
)
def schema(
self, fetch_if_missing: bool = False
) -> Union[type, "pyarrow.lib.Schema"]:
"""Return the schema of the dataset.
For datasets of Arrow records, this will return the Arrow schema.
For datasets of Python objects, this returns their Python type.
Time complexity: O(1)
Args:
fetch_if_missing: If True, synchronously fetch the schema if it's
not known. Default is False, where None is returned if the
schema is not known.
Returns:
The Python type or Arrow schema of the records, or None if the
schema is not known and fetch_if_missing is False.
"""
return self._plan.schema(fetch_if_missing=fetch_if_missing)
def num_blocks(self) -> int:
"""Return the number of blocks of this dataset.
Note that during read and transform operations, the number of blocks
may be dynamically adjusted to respect memory limits, increasing the
number of blocks at runtime.
Time complexity: O(1)
Returns:
The number of blocks of this dataset.
"""
return self._plan.initial_num_blocks()
def size_bytes(self) -> int:
"""Return the in-memory size of the dataset.
Time complexity: O(1)
Returns:
The in-memory size of the dataset in bytes, or None if the
in-memory size is not known.
"""
metadata = self._plan.execute().get_metadata()
if not metadata or metadata[0].size_bytes is None:
return None
return sum(m.size_bytes for m in metadata)
def input_files(self) -> List[str]:
"""Return the list of input files for the dataset.
Time complexity: O(num input files)
Returns:
The list of input files used to create the dataset, or an empty
list if the input files is not known.
"""
metadata = self._plan.execute().get_metadata()
files = set()
for m in metadata:
for f in m.input_files:
files.add(f)
return list(files)
def write_parquet(
self,
path: str,
*,
filesystem: Optional["pyarrow.fs.FileSystem"] = None,
try_create_dir: bool = True,
arrow_open_stream_args: Optional[Dict[str, Any]] = None,
block_path_provider: BlockWritePathProvider = DefaultBlockWritePathProvider(),
arrow_parquet_args_fn: Callable[[], Dict[str, Any]] = lambda: {},
**arrow_parquet_args,
) -> None:
"""Write the dataset to parquet.
This is only supported for datasets convertible to Arrow records.
To control the number of files, use ``.repartition()``.
Unless a custom block path provider is given, the format of the output
files will be {uuid}_{block_idx}.parquet, where ``uuid`` is an unique
id for the dataset.
Examples:
>>> import ray
>>> ds = ray.data.range(100) # doctest: +SKIP
>>> ds.write_parquet("s3://bucket/path") # doctest: +SKIP
Time complexity: O(dataset size / parallelism)
Args:
path: The path to the destination root directory, where Parquet
files will be written to.
filesystem: The filesystem implementation to write to.
try_create_dir: Try to create all directories in destination path
if True. Does nothing if all directories already exist.
arrow_open_stream_args: kwargs passed to
pyarrow.fs.FileSystem.open_output_stream
block_path_provider: BlockWritePathProvider implementation to
write each dataset block to a custom output path.
arrow_parquet_args_fn: Callable that returns a dictionary of write
arguments to use when writing each block to a file. Overrides
any duplicate keys from arrow_parquet_args. This should be used
instead of arrow_parquet_args if any of your write arguments
cannot be pickled, or if you'd like to lazily resolve the write
arguments for each dataset block.
arrow_parquet_args: Options to pass to
pyarrow.parquet.write_table(), which is used to write out each
block to a file.
"""
self.write_datasource(
ParquetDatasource(),
path=path,
dataset_uuid=self._uuid,
filesystem=filesystem,
try_create_dir=try_create_dir,
open_stream_args=arrow_open_stream_args,
block_path_provider=block_path_provider,
write_args_fn=arrow_parquet_args_fn,
**arrow_parquet_args,
)
def write_json(
self,
path: str,
*,
filesystem: Optional["pyarrow.fs.FileSystem"] = None,
try_create_dir: bool = True,
arrow_open_stream_args: Optional[Dict[str, Any]] = None,
block_path_provider: BlockWritePathProvider = DefaultBlockWritePathProvider(),
pandas_json_args_fn: Callable[[], Dict[str, Any]] = lambda: {},
**pandas_json_args,
) -> None:
"""Write the dataset to json.
This is only supported for datasets convertible to Arrow records.
To control the number of files, use ``.repartition()``.
Unless a custom block path provider is given, the format of the output
files will be {self._uuid}_{block_idx}.json, where ``uuid`` is an
unique id for the dataset.
Examples:
>>> import ray
>>> ds = ray.data.range(100) # doctest: +SKIP
>>> ds.write_json("s3://bucket/path") # doctest: +SKIP
Time complexity: O(dataset size / parallelism)
Args:
path: The path to the destination root directory, where json
files will be written to.
filesystem: The filesystem implementation to write to.
try_create_dir: Try to create all directories in destination path
if True. Does nothing if all directories already exist.
arrow_open_stream_args: kwargs passed to
pyarrow.fs.FileSystem.open_output_stream
block_path_provider: BlockWritePathProvider implementation to
write each dataset block to a custom output path.
pandas_json_args_fn: Callable that returns a dictionary of write
arguments to use when writing each block to a file. Overrides
any duplicate keys from pandas_json_args. This should be used
instead of pandas_json_args if any of your write arguments
cannot be pickled, or if you'd like to lazily resolve the write
arguments for each dataset block.
pandas_json_args: These args will be passed to
pandas.DataFrame.to_json(), which we use under the hood to
write out each Datasets block. These
are dict(orient="records", lines=True) by default.
"""
self.write_datasource(
JSONDatasource(),
path=path,
dataset_uuid=self._uuid,
filesystem=filesystem,
try_create_dir=try_create_dir,
open_stream_args=arrow_open_stream_args,
block_path_provider=block_path_provider,
write_args_fn=pandas_json_args_fn,
**pandas_json_args,
)
def write_csv(
self,
path: str,
*,
filesystem: Optional["pyarrow.fs.FileSystem"] = None,
try_create_dir: bool = True,
arrow_open_stream_args: Optional[Dict[str, Any]] = None,
block_path_provider: BlockWritePathProvider = DefaultBlockWritePathProvider(),
arrow_csv_args_fn: Callable[[], Dict[str, Any]] = lambda: {},
**arrow_csv_args,
) -> None:
"""Write the dataset to csv.
This is only supported for datasets convertible to Arrow records.
To control the number of files, use ``.repartition()``.
Unless a custom block path provider is given, the format of the output
files will be {uuid}_{block_idx}.csv, where ``uuid`` is an unique id
for the dataset.
Examples:
>>> import ray
>>> ds = ray.data.range(100) # doctest: +SKIP
>>> ds.write_csv("s3://bucket/path") # doctest: +SKIP
Time complexity: O(dataset size / parallelism)
Args:
path: The path to the destination root directory, where csv
files will be written to.
filesystem: The filesystem implementation to write to.
try_create_dir: Try to create all directories in destination path
if True. Does nothing if all directories already exist.
arrow_open_stream_args: kwargs passed to
pyarrow.fs.FileSystem.open_output_stream
block_path_provider: BlockWritePathProvider implementation to
write each dataset block to a custom output path.
arrow_csv_args_fn: Callable that returns a dictionary of write
arguments to use when writing each block to a file. Overrides
any duplicate keys from arrow_csv_args. This should be used
instead of arrow_csv_args if any of your write arguments
cannot be pickled, or if you'd like to lazily resolve the write
arguments for each dataset block.
arrow_csv_args: Other CSV write options to pass to pyarrow.
"""
self.write_datasource(
CSVDatasource(),
path=path,
dataset_uuid=self._uuid,
filesystem=filesystem,
try_create_dir=try_create_dir,
open_stream_args=arrow_open_stream_args,
block_path_provider=block_path_provider,
write_args_fn=arrow_csv_args_fn,
**arrow_csv_args,
)
def write_numpy(
self,
path: str,
*,
column: str = "value",
filesystem: Optional["pyarrow.fs.FileSystem"] = None,
try_create_dir: bool = True,
arrow_open_stream_args: Optional[Dict[str, Any]] = None,
block_path_provider: BlockWritePathProvider = DefaultBlockWritePathProvider(),
) -> None:
"""Write a tensor column of the dataset to npy files.
This is only supported for datasets convertible to Arrow records that
contain a TensorArray column. To control the number of files, use
``.repartition()``.
Unless a custom block path provider is given, the format of the output
files will be {self._uuid}_{block_idx}.npy, where ``uuid`` is an unique
id for the dataset.
Examples:
>>> import ray
>>> ds = ray.data.range(100) # doctest: +SKIP
>>> ds.write_numpy("s3://bucket/path") # doctest: +SKIP
Time complexity: O(dataset size / parallelism)
Args:
path: The path to the destination root directory, where npy
files will be written to.
column: The name of the table column that contains the tensor to
be written. This defaults to "value".
filesystem: The filesystem implementation to write to.
try_create_dir: Try to create all directories in destination path
if True. Does nothing if all directories already exist.
arrow_open_stream_args: kwargs passed to
pyarrow.fs.FileSystem.open_output_stream
block_path_provider: BlockWritePathProvider implementation to
write each dataset block to a custom output path.
"""
self.write_datasource(
NumpyDatasource(),
path=path,
dataset_uuid=self._uuid,
column=column,
filesystem=filesystem,
try_create_dir=try_create_dir,
open_stream_args=arrow_open_stream_args,
block_path_provider=block_path_provider,
)
def write_datasource(self, datasource: Datasource[T], **write_args) -> None:
"""Write the dataset to a custom datasource.
Examples:
>>> import ray
>>> from ray.data.datasource import Datasource
>>> ds = ray.data.range(100) # doctest: +SKIP
>>> class CustomDatasource(Datasource): # doctest: +SKIP
... # define custom data source
... pass # doctest: +SKIP
>>> ds.write_datasource(CustomDatasource(...)) # doctest: +SKIP
Time complexity: O(dataset size / parallelism)
Args:
datasource: The datasource to write to.
write_args: Additional write args to pass to the datasource.
"""
ctx = DatasetContext.get_current()
blocks, metadata = zip(*self._plan.execute().get_blocks_with_metadata())
# TODO(ekl) remove this feature flag.
if "RAY_DATASET_FORCE_LOCAL_METADATA" in os.environ:
write_results: List[ObjectRef[WriteResult]] = datasource.do_write(
blocks, metadata, **write_args
)
else:
# Prepare write in a remote task so that in Ray client mode, we
# don't do metadata resolution from the client machine.
do_write = cached_remote_fn(_do_write, retry_exceptions=False, num_cpus=0)
write_results: List[ObjectRef[WriteResult]] = ray.get(
do_write.remote(
datasource,
ctx,
blocks,
metadata,
_wrap_arrow_serialization_workaround(write_args),
)
)
progress = ProgressBar("Write Progress", len(write_results))
try:
progress.block_until_complete(write_results)
datasource.on_write_complete(ray.get(write_results))
except Exception as e:
datasource.on_write_failed(write_results, e)
raise
finally:
progress.close()
def iter_rows(self, *, prefetch_blocks: int = 0) -> Iterator[Union[T, TableRow]]:
"""Return a local row iterator over the dataset.
If the dataset is a tabular dataset (Arrow/Pandas blocks), dict-like mappings
:py:class:`~ray.data.row.TableRow` are yielded for each row by the iterator.
If the dataset is not tabular, the raw row is yielded.
Examples:
>>> import ray
>>> for i in ray.data.range(1000000).iter_rows(): # doctest: +SKIP
... print(i) # doctest: +SKIP
Time complexity: O(1)
Args:
prefetch_blocks: The number of blocks to prefetch ahead of the
current block during the scan.
Returns:
A local iterator over the entire dataset.
"""
# During row-based ops, we also choose a batch format that lines up with the
# current dataset format in order to eliminate unnecessary copies and type
# conversions.
try:
dataset_format = self._dataset_format()
except ValueError:
# Dataset is empty or cleared, so fall back to "native".
batch_format = "native"
else:
batch_format = (
"pyarrow"
if dataset_format == "arrow"
else "pandas"
if dataset_format == "pandas"
else "native"
)
for batch in self.iter_batches(
prefetch_blocks=prefetch_blocks, batch_format=batch_format
):
batch = BlockAccessor.for_block(batch)
for row in batch.iter_rows():
yield row
def iter_batches(
self,
*,
prefetch_blocks: int = 0,
batch_size: Optional[int] = None,
batch_format: str = "native",
drop_last: bool = False,
) -> Iterator[BatchType]:
"""Return a local batched iterator over the dataset.
Examples:
>>> import ray
>>> for batch in ray.data.range(1000000).iter_batches(): # doctest: +SKIP
... print(batch) # doctest: +SKIP
Time complexity: O(1)
Args:
prefetch_blocks: The number of blocks to prefetch ahead of the
current block during the scan.
batch_size: Record batch size, or None to let the system pick.
batch_format: The format in which to return each batch.
Specify "native" to use the current block format (promoting
Arrow to pandas automatically), "pandas" to
select ``pandas.DataFrame`` or "pyarrow" to select
``pyarrow.Table``. Default is "native".
drop_last: Whether to drop the last batch if it's incomplete.
Returns:
An iterator over record batches.
"""
blocks = self._plan.execute()
stats = self._plan.stats()
time_start = time.perf_counter()
yield from batch_blocks(
blocks.iter_blocks(),
stats,
prefetch_blocks=prefetch_blocks,
batch_size=batch_size,
batch_format=batch_format,
drop_last=drop_last,
)
stats.iter_total_s.add(time.perf_counter() - time_start)
def to_torch(
self,
*,
label_column: Optional[str] = None,
feature_columns: Optional[
Union[List[str], List[List[str]], Dict[str, List[str]]]
] = None,
label_column_dtype: Optional["torch.dtype"] = None,
feature_column_dtypes: Optional[
Union["torch.dtype", List["torch.dtype"], Dict[str, "torch.dtype"]]
] = None,
batch_size: int = 1,
prefetch_blocks: int = 0,
drop_last: bool = False,
unsqueeze_label_tensor: bool = True,
) -> "torch.utils.data.IterableDataset":
"""Return a Torch IterableDataset over this dataset.
This is only supported for datasets convertible to Arrow records.
It is recommended to use the returned ``IterableDataset`` directly
instead of passing it into a torch ``DataLoader``.
Each element in IterableDataset will be a tuple consisting of 2
elements. The first item contains the feature tensor(s), and the
second item is the label tensor. Those can take on different
forms, depending on the specified arguments.
For the features tensor (N is the ``batch_size`` and n, m, k
are the number of features per tensor):
* If ``feature_columns`` is a ``List[str]``, the features will be
a tensor of shape (N, n), with columns corresponding to
``feature_columns``
* If ``feature_columns`` is a ``List[List[str]]``, the features will be
a list of tensors of shape [(N, m),...,(N, k)], with columns of each
tensor corresponding to the elements of ``feature_columns``
* If ``feature_columns`` is a ``Dict[str, List[str]]``, the features
will be a dict of key-tensor pairs of shape
{key1: (N, m),..., keyN: (N, k)}, with columns of each
tensor corresponding to the value of ``feature_columns`` under the
key.
If ``unsqueeze_label_tensor=True`` (default), the label tensor will be
of shape (N, 1). Otherwise, it will be of shape (N,).
If ``label_column`` is specified as ``None``, then no column from the
``Dataset`` will be treated as the label, and the output label tensor
will be ``None``.
Note that you probably want to call ``.split()`` on this dataset if
there are to be multiple Torch workers consuming the data.
Time complexity: O(1)
Args:
label_column (Optional[str]): The name of the column used as the
label (second element of the output list). Can be None for
prediction, in which case the second element of returned
tuple will also be None.
feature_columns (Union[None, List[str], List[List[str]], \
Dict[str, List[str]]]): The names of the columns
to use as the features. Can be a list of lists or
a dict of string-list pairs for multi-tensor output.
If None, then use all columns except the label column as
the features.
label_column_dtype (Optional[torch.dtype]): The torch dtype to
use for the label column. If None, then automatically infer
the dtype.
feature_column_dtypes (Union[None, torch.dtype, List[torch.dtype],\
Dict[str, torch.dtype]]): The dtypes to use for the feature
tensors. This should match the format of ``feature_columns``,
or be a single dtype, in which case it will be applied to
all tensors. If None, then automatically infer the dtype.
batch_size (int): How many samples per batch to yield at a time.
Defaults to 1.
prefetch_blocks (int): The number of blocks to prefetch ahead of
the current block during the scan.
drop_last (bool): Set to True to drop the last incomplete batch,
if the dataset size is not divisible by the batch size. If
False and the size of dataset is not divisible by the batch
size, then the last batch will be smaller. Defaults to False.
unsqueeze_label_tensor (bool): If set to True, the label tensor
will be unsqueezed (reshaped to (N, 1)). Otherwise, it will
be left as is, that is (N, ). In general, regression loss
functions expect an unsqueezed tensor, while classification
loss functions expect a squeezed one. Defaults to True.
Returns:
A torch IterableDataset.
"""
import torch
from ray.data.impl.torch_iterable_dataset import TorchIterableDataset
from ray.ml.utils.torch_utils import convert_pandas_to_torch_tensor
# If an empty collection is passed in, treat it the same as None
if not feature_columns:
feature_columns = None
if feature_column_dtypes and not isinstance(feature_column_dtypes, torch.dtype):
if isinstance(feature_columns, dict):
if not isinstance(feature_column_dtypes, dict):
raise TypeError(
"If `feature_columns` is a dict, "
"`feature_column_dtypes` must be None, `torch.dtype`,"
f" or dict, got {type(feature_column_dtypes)}."
)
if set(feature_columns) != set(feature_column_dtypes):
raise ValueError(
"`feature_columns` and `feature_column_dtypes` "
"must have the same keys."
)
if any(not subcolumns for subcolumns in feature_columns.values()):
raise ValueError("column list may not be empty")
elif isinstance(feature_columns[0], (list, tuple)):
if not isinstance(feature_column_dtypes, (list, tuple)):
raise TypeError(
"If `feature_columns` is a list of lists, "
"`feature_column_dtypes` must be None, `torch.dtype`,"
f" or a sequence, got {type(feature_column_dtypes)}."
)
if len(feature_columns) != len(feature_column_dtypes):
raise ValueError(
"`feature_columns` and `feature_column_dtypes` "
"must have the same length."
)
if any(not subcolumns for subcolumns in feature_columns):
raise ValueError("column list may not be empty")
def make_generator():
for batch in self.iter_batches(
batch_size=batch_size,
batch_format="pandas",
prefetch_blocks=prefetch_blocks,
drop_last=drop_last,
):
if label_column:
label_vals = batch.pop(label_column).values
label_tensor = torch.as_tensor(label_vals, dtype=label_column_dtype)
if unsqueeze_label_tensor:
label_tensor = label_tensor.view(-1, 1)
else:
label_tensor = None
if isinstance(feature_columns, dict):
features_tensor = {
key: convert_pandas_to_torch_tensor(
batch,
feature_columns[key],
feature_column_dtypes[key]
if isinstance(feature_column_dtypes, dict)
else feature_column_dtypes,
)
for key in feature_columns
}
else:
features_tensor = convert_pandas_to_torch_tensor(
batch,
columns=feature_columns,
column_dtypes=feature_column_dtypes,
)
yield (features_tensor, label_tensor)
return TorchIterableDataset(make_generator)
def to_tf(
self,
*,
output_signature: Union[
TensorflowFeatureTypeSpec, Tuple[TensorflowFeatureTypeSpec, "tf.TypeSpec"]
],
label_column: Optional[str] = None,
feature_columns: Optional[
Union[List[str], List[List[str]], Dict[str, List[str]]]
] = None,
prefetch_blocks: int = 0,
batch_size: int = 1,
) -> "tf.data.Dataset":
"""Return a TF Dataset over this dataset.
The TF Dataset will be created from the generator returned by the
``iter_batches`` method. ``prefetch_blocks`` and ``batch_size``
arguments will be passed to that method.
For the features tensor (N is the ``batch_size`` and n1, ..., nk
are the number of features per tensor):
* If ``feature_columns`` is a ``List[str]``, the features will be
a tensor of shape (N, n), with columns corresponding to
``feature_columns``
* If ``feature_columns`` is a ``List[List[str]]``, the features will be
a list of tensors of shape [(N, n1),...,(N, nk)], with columns of each
tensor corresponding to the elements of ``feature_columns``
* If ``feature_columns`` is a ``Dict[str, List[str]]``, the features
will be a dict of key-tensor pairs of shape
{key1: (N, n1),..., keyN: (N, nk)}, with columns of each
tensor corresponding to the value of ``feature_columns`` under the
key.
This is only supported for datasets convertible to Arrow records.
Requires all datasets to have the same columns.
It is recommended to call ``.split()`` on this dataset if
there are to be multiple TensorFlow workers consuming the data.
The elements generated must be compatible with the given
``output_signature`` argument (same as in
``tf.data.Dataset.from_generator``).
Time complexity: O(1)
Args:
output_signature (Union[TensorflowFeatureTypeSpec, \
Tuple[TensorflowFeatureTypeSpec, "tf.TypeSpec"]]): If ``label_column`` is specified,
a two-element tuple containing a ``FeatureTypeSpec`` and
``tf.TypeSpec`` object corresponding to (features, label). Otherwise, a
single ``TensorflowFeatureTypeSpec`` corresponding to features tensor.
A ``TensorflowFeatureTypeSpec`` is a ``tf.TypeSpec``,
``List["tf.TypeSpec"]``, or ``Dict[str, "tf.TypeSpec"]``.
label_column (Optional[str]): The name of the column used as the label
(second element of the output tuple). If not specified, output
will be just one tensor instead of a tuple.
feature_columns (Optional[Union[List[str], List[List[str]], Dict[str, \
List[str]]]): The names of the columns to use as the features. Can be a list of lists
or a dict of string-list pairs for multi-tensor output. If None, then
use all columns except the label columns as the features.
prefetch_blocks: The number of blocks to prefetch ahead of the
current block during the scan.
batch_size: Record batch size. Defaults to 1.
Returns:
A tf.data.Dataset.
"""
# argument exception checking is done in from_generator
try:
import tensorflow as tf
except ImportError:
raise ValueError("tensorflow must be installed!")
# `output_signature` can be a tuple but not a list. See
# https://stackoverflow.com/questions/59092423/what-is-a-nested-structure-in-tensorflow.
if isinstance(output_signature, list):
output_signature = tuple(output_signature)
def make_generator():
for batch in self.iter_batches(
prefetch_blocks=prefetch_blocks,
batch_size=batch_size,
batch_format="pandas",
):
if label_column:
targets = batch.pop(label_column).values
features = None
if feature_columns is None:
features = batch.values
elif isinstance(feature_columns, list):
if all(isinstance(column, str) for column in feature_columns):
features = batch[feature_columns].values
elif all(isinstance(columns, list) for columns in feature_columns):
features = tuple(
batch[columns].values for columns in feature_columns
)
else:
raise ValueError(
"Expected `feature_columns` to be a list of strings or a "
"list of lists."
)
elif isinstance(feature_columns, dict):
features = {
key: batch[columns].values
for key, columns in feature_columns.items()
}
else:
raise ValueError(
"Expected `feature_columns` to be a list or a dictionary, "
f"but got a `{type(feature_columns).__name__}` instead."
)
# TODO(Clark): Support batches containing our extension array
# TensorArray.
if label_column:
yield features, targets
else:
yield features
dataset = tf.data.Dataset.from_generator(
make_generator, output_signature=output_signature
)
return dataset
def to_dask(self) -> "dask.DataFrame":
"""Convert this dataset into a Dask DataFrame.
This is only supported for datasets convertible to Arrow records.
Note that this function will set the Dask scheduler to Dask-on-Ray
globally, via the config.
Time complexity: O(dataset size / parallelism)
Returns:
A Dask DataFrame created from this dataset.
"""
import dask
import dask.dataframe as dd
from ray.util.client.common import ClientObjectRef
from ray.util.dask import ray_dask_get
dask.config.set(scheduler=ray_dask_get)
@dask.delayed
def block_to_df(block: Block):
block = BlockAccessor.for_block(block)
if isinstance(block, (ray.ObjectRef, ClientObjectRef)):
raise ValueError(
"Dataset.to_dask() must be used with Dask-on-Ray, please "
"set the Dask scheduler to ray_dask_get (located in "
"ray.util.dask)."
)
return block.to_pandas()
# TODO(Clark): Give Dask a Pandas-esque schema via the Pyarrow schema,
# once that's implemented.
ddf = dd.from_delayed(
[block_to_df(block) for block in self.get_internal_block_refs()]
)
return ddf
def to_mars(self) -> "mars.DataFrame":
"""Convert this dataset into a MARS dataframe.
Time complexity: O(dataset size / parallelism)
Returns:
A MARS dataframe created from this dataset.
"""
raise NotImplementedError # P1
def to_modin(self) -> "modin.DataFrame":
"""Convert this dataset into a Modin dataframe.
This works by first converting this dataset into a distributed set of
Pandas dataframes (using ``.to_pandas_refs()``). Please see caveats
there. Then the individual dataframes are used to create the modin
DataFrame using
``modin.distributed.dataframe.pandas.partitions.from_partitions()``.
This is only supported for datasets convertible to Arrow records.
This function induces a copy of the data. For zero-copy access to the
underlying data, consider using ``.to_arrow()`` or
``.get_internal_block_refs()``.
Time complexity: O(dataset size / parallelism)
Returns:
A Modin dataframe created from this dataset.
"""
from modin.distributed.dataframe.pandas.partitions import from_partitions
pd_objs = self.to_pandas_refs()
return from_partitions(pd_objs, axis=0)
def to_spark(self, spark: "pyspark.sql.SparkSession") -> "pyspark.sql.DataFrame":
"""Convert this dataset into a Spark dataframe.
Time complexity: O(dataset size / parallelism)
Returns:
A Spark dataframe created from this dataset.
"""
import raydp
core_worker = ray.worker.global_worker.core_worker
locations = [
core_worker.get_owner_address(block)
for block in self.get_internal_block_refs()
]
return raydp.spark.ray_dataset_to_spark_dataframe(
spark, self.schema(), self.get_internal_block_refs(), locations
)
def to_pandas(self, limit: int = 100000) -> "pandas.DataFrame":
"""Convert this dataset into a single Pandas DataFrame.
This is only supported for datasets convertible to Arrow or Pandas
records. An error is raised if the number of records exceeds the
provided limit. Note that you can use ``.limit()`` on the dataset
beforehand to truncate the dataset manually.
Time complexity: O(dataset size)
Args:
limit: The maximum number of records to return. An error will be
raised if the limit is exceeded.
Returns:
A Pandas DataFrame created from this dataset, containing a limited
number of records.
"""
if self.count() > limit:
raise ValueError(
"The dataset has more than the given limit of {} records. "
"Use ds.limit(N).to_pandas().".format(limit)
)
blocks = self.get_internal_block_refs()
output = DelegatingBlockBuilder()
for block in blocks:
output.add_block(ray.get(block))
return BlockAccessor.for_block(output.build()).to_pandas()
def to_pandas_refs(self) -> List[ObjectRef["pandas.DataFrame"]]:
"""Convert this dataset into a distributed set of Pandas dataframes.
This is only supported for datasets convertible to Arrow records.
This function induces a copy of the data. For zero-copy access to the
underlying data, consider using ``.to_arrow()`` or
``.get_internal_block_refs()``.
Time complexity: O(dataset size / parallelism)
Returns:
A list of remote Pandas dataframes created from this dataset.
"""
block_to_df = cached_remote_fn(_block_to_df)
return [block_to_df.remote(block) for block in self.get_internal_block_refs()]
def to_numpy_refs(
self, *, column: Optional[str] = None
) -> List[ObjectRef[np.ndarray]]:
"""Convert this dataset into a distributed set of NumPy ndarrays.
This is only supported for datasets convertible to NumPy ndarrays.
This function induces a copy of the data. For zero-copy access to the
underlying data, consider using ``.to_arrow()`` or
``.get_internal_block_refs()``.
Time complexity: O(dataset size / parallelism)
Args:
column: The name of the column to convert to numpy, or None to
specify the entire row. Required for Arrow tables.
Returns:
A list of remote NumPy ndarrays created from this dataset.
"""
block_to_ndarray = cached_remote_fn(_block_to_ndarray)
return [
block_to_ndarray.remote(block, column=column)
for block in self.get_internal_block_refs()
]
def to_arrow_refs(self) -> List[ObjectRef["pyarrow.Table"]]:
"""Convert this dataset into a distributed set of Arrow tables.
This is only supported for datasets convertible to Arrow records.
This function is zero-copy if the existing data is already in Arrow
format. Otherwise, the data will be converted to Arrow format.
Time complexity: O(1) unless conversion is required.
Returns:
A list of remote Arrow tables created from this dataset.
"""
blocks: List[ObjectRef[Block]] = self.get_internal_block_refs()
if self._dataset_format() == "arrow":
# Zero-copy path.
return blocks
block_to_arrow = cached_remote_fn(_block_to_arrow)
return [block_to_arrow.remote(block) for block in blocks]
def to_random_access_dataset(
self,
key: str,
num_workers: Optional[int] = None,
) -> RandomAccessDataset:
"""Convert this Dataset into a distributed RandomAccessDataset (EXPERIMENTAL).
RandomAccessDataset partitions the dataset across the cluster by the given sort
key, providing efficient random access to records via binary search. A number
of worker actors are created, each of which has zero-copy access to the
underlying sorted data blocks of the Dataset.
Note that the key must be unique in the dataset. If there are duplicate keys,
an arbitrary value is returned.
This is only supported for Arrow-format datasets.
Args:
key: The key column over which records can be queried.
num_workers: The number of actors to use to serve random access queries.
By default, this is determined by multiplying the number of Ray nodes
in the cluster by four. As a rule of thumb, you can expect each worker
to provide ~3000 records / second via ``get_async()``, and
~10000 records / second via ``multiget()``.
"""
if num_workers is None:
num_workers = 4 * len(ray.nodes())
return RandomAccessDataset(self, key, num_workers=num_workers)
def repeat(self, times: Optional[int] = None) -> "DatasetPipeline[T]":
"""Convert this into a DatasetPipeline by looping over this dataset.
Transformations prior to the call to ``repeat()`` are evaluated once.
Transformations done on the returned pipeline are evaluated on each
loop of the pipeline over the base dataset.
Note that every repeat of the dataset is considered an "epoch" for
the purposes of ``DatasetPipeline.iter_epochs()``.
Examples:
>>> import ray
>>> # Infinite pipeline of numbers [0, 5)
>>> ray.data.range(5).repeat().take() # doctest: +SKIP
[0, 1, 2, 3, 4, 0, 1, 2, 3, 4, ...]
>>> # Can apply transformations to the pipeline.
>>> ray.data.range(5).repeat().map(lambda x: -x).take() # doctest: +SKIP
[0, -1, -2, -3, -4, 0, -1, -2, -3, -4, ...]
>>> # Can shuffle each epoch (dataset) in the pipeline.
>>> ray.data.range(5).repeat().random_shuffle().take() # doctest: +SKIP
[2, 3, 0, 4, 1, 4, 0, 2, 1, 3, ...]
Args:
times: The number of times to loop over this dataset, or None
to repeat indefinitely.
"""
from ray.data.dataset_pipeline import DatasetPipeline
from ray.data.impl.plan import _rewrite_read_stage
ctx = DatasetContext.get_current()
if self._plan.is_read_stage() and ctx.optimize_fuse_read_stages:
blocks, _ = self._plan._get_source_blocks()
blocks.clear()
blocks, outer_stats, read_stage = _rewrite_read_stage(blocks)
else:
blocks = self._plan.execute()
outer_stats = self._plan.stats()
read_stage = None
uuid = self._get_uuid()
outer_stats.dataset_uuid = uuid
if times is not None and times < 1:
raise ValueError("`times` must be >= 1, got {}".format(times))
class Iterator:
def __init__(self, blocks):
self._blocks = blocks
self._i = 0
def __next__(self) -> "Dataset[T]":
if times and self._i >= times:
raise StopIteration
epoch = self._i
blocks = self._blocks
self._i += 1
def gen():
ds = Dataset(
ExecutionPlan(blocks, outer_stats, dataset_uuid=uuid),
epoch,
lazy=False,
)
ds._set_uuid(uuid)
return ds
return gen
class Iterable:
def __init__(self, blocks):
self._blocks = blocks
def __iter__(self):
return Iterator(self._blocks)
pipe = DatasetPipeline(Iterable(blocks), length=times or float("inf"))
if read_stage:
pipe = pipe.foreach_window(
lambda ds, read_stage=read_stage: Dataset(
ds._plan.with_stage(read_stage), ds._epoch, True
)
)
return pipe
def window(
self,
*,
blocks_per_window: Optional[int] = None,
bytes_per_window: Optional[int] = None,
) -> "DatasetPipeline[T]":
"""Convert this into a DatasetPipeline by windowing over data blocks.
Transformations prior to the call to ``window()`` are evaluated in
bulk on the entire dataset. Transformations done on the returned
pipeline are evaluated incrementally per window of blocks as data is
read from the output of the pipeline.
Windowing execution allows for output to be read sooner without
waiting for all transformations to fully execute, and can also improve
efficiency if transforms use different resources (e.g., GPUs).
Without windowing::
[preprocessing......]
[inference.......]
[write........]
Time ----------------------------------------------------------->
With windowing::
[prep1] [prep2] [prep3]
[infer1] [infer2] [infer3]
[write1] [write2] [write3]
Time ----------------------------------------------------------->
Examples:
>>> import ray
>>> # Create an inference pipeline.
>>> ds = ray.data.read_binary_files(dir) # doctest: +SKIP
>>> infer = ... # doctest: +SKIP
>>> pipe = ds.window(blocks_per_window=10).map(infer) # doctest: +SKIP
DatasetPipeline(num_windows=40, num_stages=2)
>>> # The higher the stage parallelism, the shorter the pipeline.
>>> pipe = ds.window(blocks_per_window=20).map(infer) # doctest: +SKIP
DatasetPipeline(num_windows=20, num_stages=2)
>>> # Outputs can be incrementally read from the pipeline.
>>> for item in pipe.iter_rows(): # doctest: +SKIP
... print(item) # doctest: +SKIP
Args:
blocks_per_window: The window size (parallelism) in blocks.
Increasing window size increases pipeline throughput, but also
increases the latency to initial output, since it decreases the
length of the pipeline. Setting this to infinity effectively
disables pipelining.
bytes_per_window: Specify the window size in bytes instead of blocks.
This will be treated as an upper bound for the window size, but each
window will still include at least one block. This is mutually
exclusive with ``blocks_per_window``.
"""
from ray.data.dataset_pipeline import DatasetPipeline
from ray.data.impl.plan import _rewrite_read_stage
if blocks_per_window is not None and bytes_per_window is not None:
raise ValueError("Only one windowing scheme can be specified.")
if blocks_per_window is None:
blocks_per_window = 10
ctx = DatasetContext.get_current()
if self._plan.is_read_stage() and ctx.optimize_fuse_read_stages:
blocks, _ = self._plan._get_source_blocks()
blocks.clear()
blocks, outer_stats, read_stage = _rewrite_read_stage(blocks)
else:
blocks = self._plan.execute()
outer_stats = self._plan.stats()
read_stage = None
class Iterator:
def __init__(self, splits, epoch):
self._splits = splits.copy()
self._epoch = epoch
def __next__(self) -> "Dataset[T]":
if not self._splits:
raise StopIteration
blocks = self._splits.pop(0)
def gen():
ds = Dataset(
ExecutionPlan(blocks, outer_stats), self._epoch, lazy=False
)
return ds
return gen
class Iterable:
def __init__(self, blocks, epoch):
if bytes_per_window:
self._splits = blocks.split_by_bytes(bytes_per_window)
else:
self._splits = blocks.split(split_size=blocks_per_window)
try:
sizes = [s.size_bytes() for s in self._splits]
assert [s > 0 for s in sizes], sizes
def fmt(size_bytes):
if size_bytes > 10 * 1024:
return "{}MiB".format(round(size_bytes / (1024 * 1024), 2))
else:
return "{}b".format(size_bytes)
logger.info(
"Created DatasetPipeline with {} windows: "
"{} min, {} max, {} mean".format(
len(self._splits),
fmt(min(sizes)),
fmt(max(sizes)),
fmt(int(np.mean(sizes))),
)
)
except Exception as e:
logger.info(
"Created DatasetPipeline with {} windows; "
"error getting sizes: {}".format(
len(self._splits),
e,
)
)
self._epoch = epoch
def __iter__(self):
return Iterator(self._splits, self._epoch)
it = Iterable(blocks, self._epoch)
pipe = DatasetPipeline(it, length=len(it._splits))
if read_stage:
pipe = pipe.foreach_window(
lambda ds, read_stage=read_stage: Dataset(
ds._plan.with_stage(read_stage), ds._epoch, True
)
)
return pipe
def fully_executed(self) -> "Dataset[T]":
"""Force full evaluation of the blocks of this dataset.
This can be used to read all blocks into memory. By default, Datasets
doesn't read blocks from the datasource until the first transform.
Returns:
A Dataset with all blocks fully materialized in memory.
"""
plan = self._plan.deep_copy(preserve_uuid=True)
plan.execute(force_read=True)
ds = Dataset(plan, self._epoch, lazy=False)
ds._set_uuid(self._get_uuid())
return ds
def stats(self) -> str:
"""Returns a string containing execution timing information."""
return self._plan.stats().summary_string()
@DeveloperAPI
def get_internal_block_refs(self) -> List[ObjectRef[Block]]:
"""Get a list of references to the underlying blocks of this dataset.
This function can be used for zero-copy access to the data. It blocks
until the underlying blocks are computed.
Time complexity: O(1)
Returns:
A list of references to this dataset's blocks.
"""
return self._plan.execute().get_blocks()
def _experimental_lazy(self) -> "Dataset[T]":
"""Enable lazy evaluation (experimental)."""
self._lazy = True
return self
def _split(
self, index: int, return_right_half: bool
) -> ("Dataset[T]", "Dataset[T]"):
get_num_rows = cached_remote_fn(_get_num_rows)
split_block = cached_remote_fn(_split_block, num_returns=4)
count = 0
left_blocks = []
left_metadata = []
right_blocks = []
right_metadata = []
it = self._plan.execute().get_blocks_with_metadata()
for b, m in it:
if m.num_rows is None:
num_rows = ray.get(get_num_rows.remote(b))
else:
num_rows = m.num_rows
if count >= index:
if not return_right_half:
break
right_blocks.append(b)
right_metadata.append(m)
elif count + num_rows < index:
left_blocks.append(b)
left_metadata.append(m)
elif count + num_rows == index:
left_blocks.append(b)
left_metadata.append(m)
else:
b0, m0, b1, m1 = split_block.remote(
b, m, index - count, return_right_half
)
left_blocks.append(b0)
left_metadata.append(ray.get(m0))
right_blocks.append(b1)
right_metadata.append(ray.get(m1))
count += num_rows
left = Dataset(
ExecutionPlan(
BlockList(left_blocks, left_metadata),
self._plan.stats().child_TODO("split"),
),
self._epoch,
self._lazy,
)
if return_right_half:
right = Dataset(
ExecutionPlan(
BlockList(right_blocks, right_metadata),
self._plan.stats().child_TODO("split"),
),
self._epoch,
self._lazy,
)
else:
right = None
return left, right
def _divide(self, block_idx: int) -> ("Dataset[T]", "Dataset[T]"):
left, right = self._plan.execute().divide(block_idx)
l_ds = Dataset(ExecutionPlan(left, self._plan.stats()), self._epoch, self._lazy)
r_ds = Dataset(
ExecutionPlan(right, self._plan.stats()), self._epoch, self._lazy
)
return l_ds, r_ds
def _dataset_format(self) -> str:
"""Determine the format of the dataset. Possible values are: "arrow",
"pandas", "simple".
This may block; if the schema is unknown, this will synchronously fetch
the schema for the first block.
"""
# We need schema to properly validate, so synchronously
# fetch it if necessary.
schema = self.schema(fetch_if_missing=True)
if schema is None:
raise ValueError(
"Dataset is empty or cleared, can't determine the format of "
"the dataset."
)
try:
import pyarrow as pa
if isinstance(schema, pa.Schema):
return "arrow"
except ModuleNotFoundError:
pass
from ray.data.impl.pandas_block import PandasBlockSchema
if isinstance(schema, PandasBlockSchema):
return "pandas"
return "simple"
def _aggregate_on(
self, agg_cls: type, on: Optional[Union[KeyFn, List[KeyFn]]], *args, **kwargs
):
"""Helper for aggregating on a particular subset of the dataset.
This validates the `on` argument, and converts a list of column names
or lambdas to a multi-aggregation. A null `on` results in a
multi-aggregation on all columns for an Arrow Dataset, and a single
aggregation on the entire row for a simple Dataset.
"""
aggs = self._build_multicolumn_aggs(agg_cls, on, *args, **kwargs)
return self.aggregate(*aggs)
def _build_multicolumn_aggs(
self,
agg_cls: type,
on: Optional[Union[KeyFn, List[KeyFn]]],
ignore_nulls: bool,
*args,
skip_cols: Optional[List[str]] = None,
**kwargs,
):
"""Build set of aggregations for applying a single aggregation to
multiple columns.
"""
# Expand None into an aggregation for each column.
if on is None:
try:
dataset_format = self._dataset_format()
except ValueError:
dataset_format = None
if dataset_format in ["arrow", "pandas"]:
# This should be cached from the ._dataset_format() check, so we
# don't fetch and we assert that the schema is not None.
schema = self.schema(fetch_if_missing=False)
assert schema is not None
if not skip_cols:
skip_cols = []
if len(schema.names) > 0:
on = [col for col in schema.names if col not in skip_cols]
if not isinstance(on, list):
on = [on]
return [agg_cls(on_, *args, ignore_nulls=ignore_nulls, **kwargs) for on_ in on]
def _aggregate_result(self, result: Union[Tuple, TableRow]) -> U:
if result is not None and len(result) == 1:
if isinstance(result, tuple):
return result[0]
else:
# NOTE (kfstorm): We cannot call `result[0]` directly on
# `PandasRow` because indexing a column with position is not
# supported by pandas.
return list(result.values())[0]
else:
return result
def __repr__(self) -> str:
schema = self.schema()
if schema is None:
schema_str = "Unknown schema"
elif isinstance(schema, type):
schema_str = str(schema)
else:
schema_str = []
for n, t in zip(schema.names, schema.types):
if hasattr(t, "__name__"):
t = t.__name__
schema_str.append("{}: {}".format(n, t))
schema_str = ", ".join(schema_str)
schema_str = "{" + schema_str + "}"
count = self._meta_count()
return "Dataset(num_blocks={}, num_rows={}, schema={})".format(
self._plan.initial_num_blocks(), count, schema_str
)
def __str__(self) -> str:
return repr(self)
def _block_num_rows(self) -> List[int]:
get_num_rows = cached_remote_fn(_get_num_rows)
return ray.get([get_num_rows.remote(b) for b in self.get_internal_block_refs()])
def _block_size_bytes(self) -> List[int]:
get_size_bytes = cached_remote_fn(_get_size_bytes)
return ray.get(
[get_size_bytes.remote(b) for b in self.get_internal_block_refs()]
)
def _meta_count(self) -> Optional[int]:
return self._plan.meta_count()
def _get_uuid(self) -> str:
return self._uuid
def _set_uuid(self, uuid: str) -> None:
self._uuid = uuid
def _get_epoch(self) -> int:
return self._epoch
def _set_epoch(self, epoch: int) -> None:
self._epoch = epoch
def _warn_slow(self):
global _slow_warned
if not _slow_warned:
_slow_warned = True
logger.warning(
"The `map`, `flat_map`, and `filter` operations are unvectorized and "
"can be very slow. Consider using `.map_batches()` instead."
)
def _get_num_rows(block: Block) -> int:
block = BlockAccessor.for_block(block)
return block.num_rows()
def _get_size_bytes(block: Block) -> int:
block = BlockAccessor.for_block(block)
return block.size_bytes()
def _block_to_df(block: Block):
block = BlockAccessor.for_block(block)
return block.to_pandas()
def _block_to_ndarray(block: Block, column: Optional[str]):
block = BlockAccessor.for_block(block)
return block.to_numpy(column)
def _block_to_arrow(block: Block):
block = BlockAccessor.for_block(block)
return block.to_arrow()
def _sliding_window(iterable: Iterable, n: int):
"""Creates an iterator consisting of n-width sliding windows over
iterable. The sliding windows are constructed lazily such that an
element on the base iterator (iterable) isn't consumed until the
first sliding window containing that element is reached.
If n > len(iterable), then a single len(iterable) window is
returned.
Args:
iterable: The iterable on which the sliding window will be
created.
n: The width of the sliding window.
Returns:
An iterator of n-width windows over iterable.
If n > len(iterable), then a single len(iterable) window is
returned.
"""
it = iter(iterable)
window = collections.deque(itertools.islice(it, n), maxlen=n)
if len(window) > 0:
yield tuple(window)
for elem in it:
window.append(elem)
yield tuple(window)
def _split_block(
block: Block, meta: BlockMetadata, count: int, return_right_half: bool
) -> (Block, BlockMetadata, Optional[Block], Optional[BlockMetadata]):
stats = BlockExecStats.builder()
block = BlockAccessor.for_block(block)
logger.debug("Truncating last block to size: {}".format(count))
b0 = block.slice(0, count, copy=True)
a0 = BlockAccessor.for_block(b0)
m0 = BlockMetadata(
num_rows=a0.num_rows(),
size_bytes=a0.size_bytes(),
schema=meta.schema,
input_files=meta.input_files,
exec_stats=stats.build(),
)
if return_right_half:
b1 = block.slice(count, block.num_rows(), copy=True)
a1 = BlockAccessor.for_block(b1)
m1 = BlockMetadata(
num_rows=a1.num_rows(),
size_bytes=a1.size_bytes(),
schema=meta.schema,
input_files=meta.input_files,
exec_stats=stats.build(),
)
else:
b1 = None
m1 = None
return b0, m0, b1, m1
def _do_write(
ds: Datasource,
ctx: DatasetContext,
blocks: List[Block],
meta: List[BlockMetadata],
write_args: dict,
) -> List[ObjectRef[WriteResult]]:
write_args = _unwrap_arrow_serialization_workaround(write_args)
DatasetContext._set_current(ctx)
return ds.do_write(blocks, meta, **write_args)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
scalyr_agent/scalyr_monitor.py
|
# Copyright 2014 Scalyr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
#
# Contains the base class for all monitor plugins used by the Scalyr agent.
# This class should be used by developers creating their own monitor plugins.
#
# To see how to write your own Scalyr monitor plugin, please see:
# https://www.scalyr.com/help/creating-a-monitor-plugin
#
# author: Steven Czerwinski <[email protected]>
from __future__ import unicode_literals
from __future__ import absolute_import
__author__ = "[email protected]"
if False: # NOSONAR
from typing import Dict
from types import ModuleType
import inspect
import os
import sys
import time
import random
from threading import Lock
import six
import scalyr_agent.scalyr_logging as scalyr_logging
from scalyr_agent.json_lib.objects import ArrayOfStrings
from scalyr_agent.config_util import (
convert_config_param,
get_config_from_env,
BadConfiguration,
)
from scalyr_agent.util import StoppableThread
log = scalyr_logging.getLogger(__name__)
# Maximum value we will use for random sleep before first sample gathering.
# This way we guard against a potentially very large sleep which would be used for monitors
# configured with very large sample intervals.
MAX_INITIAL_GATHER_SAMPLE_SLEEP_SECS = 40
# Minimum value for initial sample gather sleep for monitors which use very large sample gather
# interval.
MIN_INITIAL_GATHER_SAMPLE_SLEEP_SECS = 15
class ScalyrMonitor(StoppableThread):
"""The default number of seconds between gathering a sample. This is the global default, which should
be set from the configuration file.
"""
DEFAULT_SAMPLE_INTERVAL_SECS = 30.0
"""The base class for all monitors used by the agent.
An instance of a monitor will be created for every reference to this module in the "monitors"
section in the agent.json configuration file. The monitor will be
executed in its own thread and will be expected to send all output to
provided Loggers. Do not used stdout, stderr.
Public attributes: (must be updated in derived constructor)
log_config: A dict containing the log configuration object that
should be used to configure the agent to copy the log generated
by this module. It has the same format as the entries in the
"logs" section in agent.json. In particular, the module
can use this to specify the path of the log file where all emitted metric values
from this monitor will be sent (using self._logger), set attributes to
associate with all log lines generated by the module, specify a parser
for the log, as well as set sampling rules.
Note, if the "path" field in "log_config" is not absolute, it will be resolved relative to the
directory specified by the "agent_log_path" option in the configuration file.
disabled: A boolean indicating if this module instance should be
run.
"""
def __init__(
self, monitor_config, logger, sample_interval_secs=None, global_config=None
):
"""Constructs an instance of the monitor.
It is optional for derived classes to override this method. The can instead
override _initialize which is invoked during initialization.
TODO: Determine which approach is preferred by developers and recommend that.
If a derived class overrides __init__, they must invoke this method in the override method.
This method will set default values for all public attributes (log_config, disabled, etc). These
may be overwritten by the derived class.
The derived classes must raise an Exception (or something derived from Exception)
in this method if the provided configuration is invalid or if there is any other
error known at this time preventing the module from running.
@param monitor_config: A dict containing the configuration information for this module instance from the
configuration file. The only valid values are strings, ints, longs, floats, and booleans.
@param logger: The logger to use for output.
@param sample_interval_secs: The interval in seconds to wait between gathering samples. If None, it will
set the value from the ``sample_interval`` field in the monitor_config if present, or the default
interval time for all monitors in ``DEFAULT_SAMPLE_INTERVAL_SECS``. Generally, you should probably
pass None here and allow the value to be taken from the configuration files.
@param global_config: the global configuration object. Monitors can use or ignore this as necessary
"""
# The logger instance that this monitor should use to report all information and metric values.
self._logger = logger
self.monitor_name = monitor_config["module"]
# Holds raw monitor name without the part which are specific to monitor instances
if "." in monitor_config["module"]:
split = monitor_config["module"].split(".")
self.raw_monitor_name = split[-1]
else:
self.raw_monitor_name = monitor_config["module"]
# save the global config
self._global_config = global_config
# The MonitorConfig object created from the config for this monitor instance.
self._config = MonitorConfig(monitor_config, monitor_module=self.monitor_name)
log_path = self.monitor_name.split(".")[-1] + ".log"
self.disabled = False
# TODO: For now, just leverage the logic in the loggers for naming this monitor. However,
# we should have it be more dynamic where the monitor can override it.
if logger.component.find("monitor:") == 0:
self.monitor_name = logger.component[8:]
else:
self.monitor_name = logger.component
self.log_config = {
"path": log_path,
}
# This lock protects all variables that can be access by other threads, reported_lines,
# emitted_lines, and errors. It does not protect _run_state since that already has its own lock.
self.__lock = Lock()
self.__reported_lines = 0
self.__errors = 0
# Set the time between samples for this monitor. We prefer configuration values over the values
# passed into the constructor.
if sample_interval_secs is not None:
self._sample_interval_secs = sample_interval_secs
else:
self._sample_interval_secs = self._config.get(
"sample_interval",
convert_to=float,
default=ScalyrMonitor.DEFAULT_SAMPLE_INTERVAL_SECS,
)
self.__metric_log_open = False
# These variables control the rate limiter on how fast we can write to the metric log.
# The first one is the average number of bytes that can be written per second. This is the bucket fill rate
# in the "leaky bucket" algorithm used to calculate the rate limit. Derived classes may change this.
self._log_write_rate = self._config.get(
"monitor_log_write_rate", convert_to=int, default=2000
)
# This is the maximum size of a write burst to the log. This is the bucket size in the "leaky bucket" algorithm
# used to calculate the rate limit. Derived classes may change this.
self._log_max_write_burst = self._config.get(
"monitor_log_max_write_burst", convert_to=int, default=100000
)
# This is the number of seconds between waiting to flush the metric log (if there are pending bytes that
# need to be flushed to disk). If this is greater than zero, then it will reduce the amount of disk
# flushing, but at the cost of possible loss of data if the agent shutdowns down unexpectantly.
self._log_flush_delay = self._config.get(
"monitor_log_flush_delay", convert_to=float, default=0.0, min_value=0
)
# List of metrics name which shouldn't be logged and sent to Scalyr
self._metric_name_blacklist = self._config.get(
"metric_name_blacklist", convert_to=ArrayOfStrings, default=[]
)
# If true, will adjust the sleep time between gather_sample calls by the time spent in gather_sample, rather
# than sleeping the full sample_interval_secs time.
self._adjust_sleep_by_gather_time = False
self._initialize()
StoppableThread.__init__(self, name="metric thread")
def _initialize(self):
"""Can be overridden by derived classes to perform initialization functions before the monitor is run.
This is meant to allow derived monitors to perform some initialization and configuration validation
without having to override the __init__ method (and be responsible for passing all of the arguments
to the super class).
The derived classes must raise an Exception (or something derived from Exception)
in this method if the provided configuration is invalid or if there is any other
error known at this time preventing the module from running.
NOTE: This will be called everytime the agent script is run, including when *stopping* the agent.
Therefore it is not the best place to do things like create sockets/open files etc.
@param global_config: the global configuration object. Monitors can use or ignore this as necessary
"""
pass
@property
def module_name(self):
"""Returns the name of the module that will run this monitor.
"""
return self._config["module"]
def reported_lines(self):
"""Returns the number of metric lines emitted to the metric log for this monitor.
This is calculated by counting how many times the logger instance on this monitor's report_values
method was invoked and all the times any logger has logged anything with metric_log_for_monitor set
to this monitor.
"""
self.__lock.acquire()
result = self.__reported_lines
self.__lock.release()
return result
def errors(self):
"""Returns the number of errors experienced by the monitor as it is running.
For monitors just implementing the gather_sample method, this will be the number of times
that invocation raised an exception. If a monitor overrides the run method, then it is up to
them to increment the errors as appropriate using increment_counter.
"""
self.__lock.acquire()
result = self.__errors
self.__lock.release()
return result
def increment_counter(self, reported_lines=0, errors=0):
"""Increment some of the counters pertaining to the performance of this monitor.
"""
self.__lock.acquire()
self.__reported_lines += reported_lines
self.__errors += errors
self.__lock.release()
def config_from_monitors(self, manager):
"""
Called directly before running the `run` method.
This method passes in the module manager object to a monitor before
it runs so that the monitor can query the monitor about other monitors
that exist.
In order to prevent circular references, callees should *not* retain a
reference to the manager object
"""
pass
def _get_initial_sleep_delay(self):
# type: () -> int
"""
Return initial sleep delay for this monitor.
We sleep this number of seconds before first sample gathering.
By default (on agent start up and when reloading the config and restarting the monitors),
all the monitors are started at the same time, which means that all the monitors with the
same sample gather interval will run at the same time.
To avoid this and potential larger load spike when running many monitors on lower powered
devices, we sleep random number of seconds before first sample gather interval for each
monitor.
This way we spread a potential short load spike during sample gathering across a longer time
frame.
"""
# NOTE: self._global_config will be None when using scalyr_agent/run_monitor.py script with
# -c flag
if not self._global_config:
return 0
if not self._global_config.global_monitor_sample_interval_enable_jitter:
return 0
sample_interval_secs = self._sample_interval_secs
if sample_interval_secs >= MAX_INITIAL_GATHER_SAMPLE_SLEEP_SECS:
min_jitter_secs = MIN_INITIAL_GATHER_SAMPLE_SLEEP_SECS
max_jitter_secs = MAX_INITIAL_GATHER_SAMPLE_SLEEP_SECS
else:
# min sleep time is 2/10 of the sample interval and max is 8/10
min_jitter_secs = round((sample_interval_secs / 10) * 2)
max_jitter_secs = round((sample_interval_secs / 10) * 8)
random_jitter_secs = random.randint(min_jitter_secs, max_jitter_secs)
return random_jitter_secs
def run(self):
"""Begins executing the monitor, writing metric output to logger.
Implements the business logic for this monitor. This method will
be invoked by the agent on its own thread. This method should
only return if the monitor instance should no longer be executed or
if the agent is shutting down.
The default implementation of this method will invoke the
"gather_sample" once every sample_period time, emitting the returned
dict to logger. Derived classes may override this method if they
wish.
This method should use "self._logger" to report any output. It should use
"self._logger.emit_value" to report any metric values generated by the monitor
plugin. See the documentation for 'scalyr_logging.AgentLogger.emit_value' method for more details.
"""
# noinspection PyBroadException
try:
# To avoid all the monitors with the same sample interval running at the same time,
# we add random sleep delay before the first same gathering.
initial_sleep_delay = self._get_initial_sleep_delay()
if initial_sleep_delay >= 1:
self._sleep_but_awaken_if_stopped(initial_sleep_delay)
self._logger.debug(
"Sleeping %s seconds before first sample gather interval"
% (initial_sleep_delay)
)
while not self._is_thread_stopped():
sample_interval = self._sample_interval_secs
# noinspection PyBroadException
adjustment = 0
try:
start_time = time.time()
self.gather_sample()
if self._adjust_sleep_by_gather_time:
adjustment = min(time.time() - start_time, sample_interval)
except Exception:
self._logger.exception(
"Failed to gather sample due to the following exception"
)
self.increment_counter(errors=1)
self._sleep_but_awaken_if_stopped(sample_interval - adjustment)
self._logger.info("Monitor has finished")
except Exception:
# TODO: Maybe remove this catch here and let the higher layer catch it. However, we do not
# right now join on the monitor threads, so no one would catch it. We should change that.
self._logger.exception(
"Monitor died from due to exception:", error_code="failedMonitor"
)
def get_extra_server_attributes(self):
"""Derived classes may optionally return a dict of server attributes to be added to the main config
server attributes. Keys already defined by server attributes or other monitors will be dropped with a warning.
You must ensure that this method is thread-safe as it will be invoked by a different thread than the monitor
itself.
@return: A dict or None
"""
return None
def get_user_agent_fragment(self):
"""Derived classes may optionally return a string fragment to be appended to the User-Agent header for all
data sent to Scalyr. Note: User-Agent augmentation applies to all data (not restricted to data from this
monitor).
You must ensure that this method is thread-safe as it will be invoked by a different thread (MonitorsManager).
@return: A string fragment or None (Empty strings are treated as None)
"""
return None
def gather_sample(self):
"""Derived classes should implement this method to gather a data sample for this monitor plugin
and report it.
If the default "run" method implementation is not overridden, then
derived classes must implement this method to actual perform the
monitor-specific work to gather whatever information it should be
collecting.
It is expected that the derived class will report any gathered metric samples
by using the 'emit_value' method on self._logger. They may invoke that method
multiple times in a single 'gather_sample' call to report multiple metrics.
See the documentation for 'scalyr_logging.AgentLogger.emit_value' method for more details.
Any exceptions raised by this method will be reported as an error but will
not stop execution of the monitor.
"""
pass
def set_sample_interval(self, secs):
"""Sets the number of seconds between calls to gather_sample when running.
This must be invoked before the monitor is started.
@param secs: The number of seconds, which can be fractional.
"""
self._sample_interval_secs = secs
def set_log_watcher(self, log_watcher):
"""Provides a log_watcher object that monitors can use to add/remove log files
"""
pass
def _get_log_rotation_configuration(self):
"""Gets the log rotation backup count and maximum byte settings from the monitor config,
and if not specified by the monitor config then from the global config
@return: A tuple containing the log_rotation_backup_count and the log_rotation_max_bytes
for this monitor.
"""
rotation_count = self._config.get("log_rotation_backup_count")
if rotation_count is None:
# Sometimes global_config can be null if derived monitor did not pass one in.
if self._global_config is not None:
rotation_count = self._global_config.log_rotation_backup_count
else:
rotation_count = 2
max_bytes = self._config.get("log_rotation_max_bytes")
if max_bytes is None:
if self._global_config is not None:
max_bytes = self._global_config.log_rotation_max_bytes
else:
max_bytes = 20 * 1024 * 1024
return (rotation_count, max_bytes)
def open_metric_log(self):
"""Opens the logger for this monitor.
This must be invoked before the monitor is started."""
backup_count, max_bytes = self._get_log_rotation_configuration()
self._logger.openMetricLogForMonitor(
self.log_config["path"],
self,
max_bytes=max_bytes,
backup_count=backup_count,
max_write_burst=self._log_max_write_burst,
log_write_rate=self._log_write_rate,
flush_delay=self._log_flush_delay,
)
self.__metric_log_open = True
return True
def close_metric_log(self):
"""Closes the logger for this monitor.
This must be invoked after the monitor has been stopped."""
if self.__metric_log_open:
self._logger.closeMetricLog()
self.__metric_log_open = False
# 2->TODO '_is_stopped' name is reserved in python3
def _is_thread_stopped(self):
"""Returns whether or not the "stop" method has been invoked."""
return not self._run_state.is_running()
def _sleep_but_awaken_if_stopped(self, time_to_sleep):
"""Sleeps for the specified amount of seconds or until the stop() method on this instance is invoked, whichever
comes first.
@param time_to_sleep: The number of seconds to sleep.
@return: True if the stop() has been invoked.
"""
return self._run_state.sleep_but_awaken_if_stopped(time_to_sleep)
def load_monitor_class(module_name, additional_python_paths):
"""Loads the ScalyrMonitor class from the specified module and return it.
This examines the module, locates the first class derived from ScalyrMonitor (there should only be one),
and returns it.
@param module_name: The name of the module
@param additional_python_paths: A list of paths (separate by os.pathsep) to add to the PYTHONPATH when
instantiating the module in case it needs to read other packages.
@type module_name: six.text_type
@type additional_python_paths: six.text_type
@return: A tuple containing the class for the monitor and the MonitorInformation object for it.
@rtype: (class, MonitorInformation)
"""
original_path = list(sys.path)
# Add in the additional paths.
if additional_python_paths is not None and len(additional_python_paths) > 0:
for x in additional_python_paths.split(os.pathsep):
sys.path.append(x)
MonitorInformation.set_monitor_info(module_name)
# Load monitor.
try:
module = __import__(module_name)
# If this a package name (contains periods) then we have to walk down
# the subparts to get the actual module we wanted.
for n in module_name.split(".")[1:]:
module = getattr(module, n)
# Now find any class that derives from ScalyrMonitor
for attr in module.__dict__:
value = getattr(module, attr)
if not inspect.isclass(value):
continue
if "ScalyrMonitor" in six.text_type(value.__bases__):
description = value.__doc__
if description:
description = description.strip()
MonitorInformation.set_monitor_info(
module_name, description=description
)
return value, MonitorInformation.get_monitor_info(module_name)
return None, None
finally:
# Be sure to reset the PYTHONPATH
sys.path = original_path
def define_config_option(
monitor_module,
option_name,
option_description,
required_option=False,
max_value=None,
min_value=None,
convert_to=None,
default=None,
env_aware=False,
env_name=None,
):
"""Defines a configuration option for the specified monitor.
Once this is invoked, any validation rules supplied here are applied to all MonitorConfig objects created
with the same monitor name.
Note, this overwrites any previously defined rules for this configuration option.
@param monitor_module: The module the monitor is defined in. This must be the same name that will be supplied
for any MonitorConfig instances for this monitor.
@param option_name: The name of the option field.
@param required_option: If True, then this is option considered to be required and when the configuration
is parsed for the monitor, a BadMonitorConfiguration exception if the field is not present.
@param convert_to: If not None, then will convert the value for the option to the specified type. Only int,
bool, float, long, and six.text_type are supported. If the type conversion cannot be done, a
BadMonitorConfiguration exception is raised during configuration parsing. The only true conversions allowed are
those from six.text_type value to other types such as int, bool, long, float. Trivial conversions are allowed
from int, long to float, but not the other way around. Additionally, any primitive type can be converted to
six.text_type.
@param default: The value to assign to the option if the option is not present in the configuration. This is
ignored if 'required_option' is True.
@param max_value: If not None, the maximum allowed value for option. Raises a BadMonitorConfiguration if the
value is greater during configuration parsing.
@param min_value: If not None, the minimum allowed value for option. Raises a BadMonitorConfiguration if the
value is less than during configuration parsing.
@param env_aware: If True and not defined in config file, look for presence of environment variable.
@param env_name: If provided, will use this name to lookup the environment variable. Otherwise, use
scalyr_<field> as the environment variable name.
Note: A non-empty value also automatically implies env_aware as True, regardless of it's value.
"""
option = ConfigOption()
option.option_name = option_name
option.description = option_description
option.required_option = required_option
option.max_value = max_value
option.min_value = min_value
option.convert_to = convert_to
option.default = default
option.env_aware = env_aware
option.env_name = env_name
MonitorInformation.set_monitor_info(monitor_module, option=option)
return None
def define_metric(
monitor_module,
metric_name,
description,
extra_fields=None,
unit=None,
cumulative=False,
category=None,
):
"""Defines description information for a metric with the specified name and extra fields.
This will overwrite previous metric information recorded for the same ``metric_name`` and ``extra_fields``.
Currently, this information is only used when creating documentation pages for the monitor. Not all of the fields
are used but will be used in the future.
@param monitor_module: The module name for the monitor this metric is defined in.
@param metric_name: The name of the metric.
@param description: The description of the metric.
@param extra_fields: A dict describing the extra fields that are recorded with this metric. It maps from the
extra field name to the values that the description apply to.
@param unit: A string describing the units of the value. For now, this should be 'sec' or 'bytes'. You may also
include a colon after the unit with a scale factor. For example, 'sec:.01' indicates the value represents
1/100ths of a second. You may also specify 'milliseconds', which is mapped to 'sec:.001'
@param cumulative: True if the metric records the sum all metric since the monitored process began. For example,
it could be the sum of all request sizes received by a server. In this case, calculating the difference between
two values for the metric is the same as calculating the rate of non-accumulated metric.
@param category: The category of the metric. Each category will get its own table when printing the documentation.
This should be used when there are many metrics and they need to be broken down into smaller groups.
@type monitor_module: six.text_type
@type metric_name: six.text_type
@type description: six.text_type
@type extra_fields: dict
@type unit: six.text_type
@type cumulative: bool
@type category: six.text_type
"""
info = MetricDescription()
info.metric_name = metric_name
info.description = description
info.extra_fields = extra_fields
info.unit = unit
info.cumulative = cumulative
info.category = category
MonitorInformation.set_monitor_info(monitor_module, metric=info)
def define_log_field(monitor_module, field_name, field_description):
"""Defines a field that can be parsed from the log lines generated by the specified monitor.
Note, this overwrites any previously defined rules for this log field.
@param monitor_module: The module the monitor is defined in. This must be the same name that will be supplied
for any MonitorConfig instances for this monitor.
@param field_name: The name of the log field.
@param field_description: The description for the log field.
"""
log_field = LogFieldDescription()
log_field.field = field_name
log_field.description = field_description
MonitorInformation.set_monitor_info(monitor_module, log_field=log_field)
return None
class MonitorInformation(object):
"""Encapsulates all the descriptive information that can be gather for a particular monitor.
This is generally used to create documentation pages for the monitor.
"""
def __init__(self, monitor_module):
self.__monitor_module = monitor_module
self.__description = None
# Maps from option name to the ConfigOption object that describes it.
self.__options = {}
# Maps from metric name with extra fields to the MetricDescription object that describes it.
self.__metrics = {}
# Maps from log field name to the LogFieldDescription object that describes it.
self.__log_fields = {}
# A counter used to determine insert sort order.
self.__counter = 0
@property
def monitor_module(self):
"""Returns the module the monitor is defined in.
@return: The module the monitor is defined in.
@rtype: six.text_type
"""
return self.__monitor_module
@property
def description(self):
"""Returns a description for the monitor using markdown.
@return: The description
@rtype: six.text_type
"""
return self.__description
@property
def config_options(self):
"""Returns the configuration options for this monitor.
@return: A list of the options
@rtype: list of ConfigOption
"""
return sorted(
six.itervalues(self.__options), key=self.__get_insert_sort_position
)
@property
def metrics(self):
"""Returns descriptions for the metrics recorded by this monitor.
@return: A list of metric descriptions
@rtype: list of MetricDescription
"""
return sorted(
six.itervalues(self.__metrics), key=self.__get_insert_sort_position
)
@property
def log_fields(self):
"""Returns the log fields that are parsed from the log lines generated by this monitor.
@return: A list of the log fields.
@rtype: list of LogFieldDescription
"""
return sorted(
six.itervalues(self.__log_fields), key=self.__get_insert_sort_position
)
def __get_insert_sort_position(self, item):
"""Returns the key to use for sorting the item by its insert position.
This relies on the 'sort_pos' attribute added to all ConfigOption, MetricDescription, and
LogFieldDescription objects when added to a monitor's information.
@param item: The object
@type item: object
@return: The insert position of the item
@rtype: int
"""
return getattr(item, "sort_pos")
__monitor_info__ = {} # type: Dict[ModuleType, MonitorInformation]
@staticmethod
def set_monitor_info(
monitor_module, description=None, option=None, metric=None, log_field=None
):
"""Sets information for the specified monitor.
@param monitor_module: The module the monitor is defined in.
@param description: If not None, sets the description for the monitor, using markdown.
@param option: If not None, adds the specified configuration option to the monitor's information.
@param metric: If not None, adds the specified metric description to the monitor's information.
@param log_field: If not None, adds the specified log field description to the monitor's information.
@type monitor_module: six.text_type
@type description: six.text_type
@type option: ConfigOption
@type metric: MetricDescription
@type log_field: LogFieldDescription
"""
if monitor_module not in MonitorInformation.__monitor_info__:
MonitorInformation.__monitor_info__[monitor_module] = MonitorInformation(
monitor_module
)
info = MonitorInformation.__monitor_info__[monitor_module]
if description is not None:
info.__description = description
# Increment the counter we use to recorder insert order.
info.__counter += 1
if option is not None:
info.__options[option.option_name] = option
# Stash a position attribute to capture what the insert order was for the options.
setattr(option, "sort_pos", info.__counter)
if metric is not None:
if metric.extra_fields is None:
info.__metrics[metric.metric_name] = metric
else:
# If there are extra fields, we use that as part of the key name to store the metric under to
# avoid collisions with the same metric but different extra fields registered.
info.__metrics[
"%s%s" % (metric.metric_name, six.text_type(metric.extra_fields))
] = metric
# Stash a position attribute to capture what the insert order was for the metrics.
setattr(metric, "sort_pos", info.__counter)
if log_field is not None:
info.__log_fields[log_field.field] = log_field
# Stash a position attribute to capture what the insert order was for the log fields.
setattr(log_field, "sort_pos", info.__counter)
@staticmethod
def get_monitor_info(monitor_module):
"""Returns the MonitorInformation object for the monitor defined in ``monitor_module``.
@param monitor_module: The module the monitor is defined in.
@type monitor_module: six.text_type
@return: The information for the specified monitor, or none if it has not been loaded.
@rtype: MonitorInformation
"""
if monitor_module in MonitorInformation.__monitor_info__:
return MonitorInformation.__monitor_info__[monitor_module]
else:
return None
def __repr__(self):
return "<MonitorInformation monitor_module=%s,metrics=%s>" % (
self.__monitor_module,
self.__metrics,
)
class ConfigOption(object):
"""Simple object to hold the fields for a single configuration option.
"""
def __init__(self):
# The name of the option.
self.option_name = None
# The description of the option.
self.description = None
# True if the option is required.
self.required_option = False
# The maximum value allowed value for the option if any.
self.max_value = None
# The minimum value allowed value for the option if any.
self.min_value = None
# The primitive type to convert the value to.
self.convert_to = None
# The default value, if any.
self.default = None
# Whether to look in the environment for a fallback value if not defined in config file
self.env_aware = False
# Customer environment variable name (instead of SCALYR_<option_name>
self.env_name = None
def __repr__(self):
return "%s %s %s" % (self.option_name, self.env_aware, self.env_name)
class MetricDescription(object):
"""Simple object to hold fields describing a monitor's metric."""
def __init__(self):
# The name of the metric.
self.metric_name = None
# The description for the metric.
self.description = None
# A dict containing a map of the extra fields included in the metric along with the format for the values.
self.extra_fields = None
# A string describing the units of the value. For now, this should be 'sec' or 'bytes'. You may also include
# a colon after the unit with a scale factor. For example, 'sec:.01' indicates the value represents 1/100ths
# of a second. You may also specify 'milliseconds', which is mapped to 'sec:.001'.
self.unit = None
# True if the metric records the sum all metric since the monitored process began. For example, it could be
# the sum of all the latencies for all requested received by the server.
self.cumulative = False
# The category for this metric. This needs only to be supplied if the metric list is long for a particular
# monitor.
self.category = None
def __repr__(self):
return (
"<MetricDescription metric_name=%s,unit=%s,cumulative=%s,extra_fields=%s>"
% (self.metric_name, self.unit, self.cumulative, str(self.extra_fields))
)
class LogFieldDescription(object):
"""Simple object to hold fields describing the entries that are parsed from a log line produced by the monitor."""
def __init__(self):
# The name of the field in the log line.
self.field = None
# The meaning of the field.
self.description = None
class MonitorConfig(object):
"""Encapsulates configuration parameters for a single monitor instance and includes helper utilities to
validate configuration values.
This supports most of the operators and methods that dict supports, but has additional support to allow
Monitor developers to easily validate configuration values. See the get method for more details.
This abstraction does not support any mutator operations. The configuration is read-only.
"""
def __init__(self, content=None, monitor_module=None):
"""Initializes MonitorConfig.
@param content: A dict containing the key/values pairs to use.
@param monitor_module: The module containing the monitor. This must be the same as what was previously
used for 'define_config_option' for any options registered for this monitor.
"""
self.__map = {}
self.__monitor_module = monitor_module
self._environment_aware_map = {}
if content is not None:
for x in content:
self.__map[x] = content[x]
info = MonitorInformation.get_monitor_info(monitor_module)
if info is not None:
for x in info.config_options:
# Config option is environment aware if either of the following are Truthy
env_aware = x.env_aware or x.env_name
if env_aware:
env_name = x.env_name or ("SCALYR_%s" % x.option_name.upper())
self._environment_aware_map[x.option_name] = env_name
defined_in_env = env_name in os.environ
else:
defined_in_env = False
if (
x.required_option
or x.default is not None
or x.option_name in self.__map
or defined_in_env
):
self.__map[x.option_name] = self.get(
x.option_name,
required_field=x.required_option,
max_value=x.max_value,
min_value=x.min_value,
convert_to=x.convert_to,
default=x.default,
report_conflicting_environment_value=True,
)
def __len__(self):
"""Returns the number of keys in the JsonObject"""
return len(self.__map)
def get(
self,
field,
required_field=False,
max_value=None,
min_value=None,
convert_to=None,
default=None,
report_conflicting_environment_value=False,
):
"""Returns the value for the requested field.
If the value is not set via config file, also look for it in the environment.
This method will optionally apply some validation rules as indicated by the optional arguments. If any
of these validation operations fail, then a BadMonitorConfiguration exception is raised. Monitor developers are
encouraged to catch this exception at their layer.
@param field: The name of the field.
@param required_field: If True, then will raise a BadMonitorConfiguration exception if the field is not
present.
@param convert_to: If not None, then will convert the value for the field to the specified type. Only int,
bool, float, long, and six.text_type are supported. If the type conversion cannot be done, a
BadMonitorConfiguration exception is raised. The only conversions allowed are those mapped out in
ALLOWED_CONVERSIONS. Trivial conversions are allowed from int, long to
float, but not the other way around. Additionally, any primitive type can be converted to six.text_type.
six.text_type can be converted to complex types such as ArrayOfStrings, JsonArray, JsonObject as long as
they can be correctly parsed.
@param default: The value to return if the field is not present in the configuration. This is ignored if
'required_field' is True.
@param max_value: If not None, the maximum allowed value for field. Raises a BadMonitorConfiguration if the
value is greater.
@param min_value: If not None, the minimum allowed value for field. Raises a BadMonitorConfiguration if the
value is less than.
@param report_conflicting_environment_value: If True, disallows overriding via environment variable.
@return: The value
@raise BadMonitorConfiguration: If any of the conversion or required rules are violated.
"""
try:
result = self.__map.get(field)
if (
result is not None
and convert_to is not None
and type(result) != convert_to
):
result = convert_config_param(field, result, convert_to)
# Param not found in config file, so check environment
if result is None or report_conflicting_environment_value:
envar_name = self._environment_aware_map.get(field)
logger = None
if report_conflicting_environment_value:
logger = log
envar_val = get_config_from_env(
field,
envar_name,
convert_to=convert_to,
logger=logger,
param_val=result,
monitor_name=self.__monitor_module,
)
if result is None:
result = envar_val
# Required field not found in environment nor in config
if result is None:
if required_field and field not in self.__map:
raise BadMonitorConfiguration(
'Missing required field "%s"' % field, field
)
result = self.__map.get(field, default)
if result is None:
return result
# Perform conversion again in case both config-file and environment values were absent and the default
# value requires conversion.
if convert_to is not None and not issubclass(convert_to, type(result)):
result = convert_config_param(field, result, convert_to)
if max_value is not None and result > max_value:
raise BadMonitorConfiguration(
'Value of %s in field "%s" is invalid; maximum is %s'
% (six.text_type(result), field, six.text_type(max_value)),
field,
)
if min_value is not None and result < min_value:
raise BadMonitorConfiguration(
'Value of %s in field "%s" is invalid; minimum is %s'
% (six.text_type(result), field, six.text_type(min_value)),
field,
)
return result
except BadConfiguration as e:
raise BadMonitorConfiguration(message=e.message, field=e.field)
def __iter__(self):
return six.iterkeys(self.__map)
def iteritems(self):
"""Returns an iterator over the items (key/value tuple) for this object."""
return six.iteritems(self.__map)
def itervalues(self):
"""Returns an iterator over the values for this object."""
return six.itervalues(self.__map)
def iterkeys(self):
"""Returns an iterator over the keys for this object."""
return six.iterkeys(self.__map)
def items(self):
"""Returns a list of items (key/value tuple) for this object."""
return list(self.__map.items())
def values(self):
"""Returns a list of values for this object."""
return list(self.__map.values())
def keys(self):
"""Returns a list keys for this object."""
return list(self.__map.keys())
def __getitem__(self, field):
if field not in self:
raise KeyError('The missing field "%s" in monitor config.' % field)
return self.__map[field]
def copy(self):
result = MonitorConfig()
result.__map = self.__map.copy()
return result
def __contains__(self, key):
"""Returns True if the JsonObject contains a value for key."""
return key in self.__map
def __eq__(self, other):
if other is None:
return False
if type(self) is not type(other):
return False
assert isinstance(other.__map, dict)
return self.__map == other.__map
def __ne__(self, other):
return not self.__eq__(other)
class BadMonitorConfiguration(Exception):
"""Exception indicating a bad monitor configuration, such as missing a required field."""
def __init__(self, message, field):
self.message = message
self.field = field
Exception.__init__(self, message)
class UnsupportedSystem(Exception):
"""Exception indicating a particular monitor is not supported on this system."""
def __init__(self, monitor_name, message):
"""Constructs an instance of the exception.
@param monitor_name: The name of the monitor
@param message: A message indicating what require was violated, such as requires Python version 2.6 or greater.
"""
Exception.__init__(self, message)
self.monitor_name = monitor_name
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
github/config.go
|
package github
import (
"bufio"
"fmt"
"io/ioutil"
"net/url"
"os"
"os/signal"
"path/filepath"
"strconv"
"strings"
"syscall"
"github.com/github/hub/ui"
"github.com/github/hub/utils"
"github.com/mitchellh/go-homedir"
"golang.org/x/crypto/ssh/terminal"
)
type yamlHost struct {
User string `yaml:"user"`
OAuthToken string `yaml:"oauth_token"`
Protocol string `yaml:"protocol"`
UnixSocket string `yaml:"unix_socket,omitempty"`
}
type Host struct {
Host string `toml:"host"`
User string `toml:"user"`
AccessToken string `toml:"access_token"`
Protocol string `toml:"protocol"`
UnixSocket string `toml:"unix_socket,omitempty"`
}
type Config struct {
Hosts []*Host `toml:"hosts"`
}
func (c *Config) PromptForHost(host string) (h *Host, err error) {
token := c.DetectToken()
tokenFromEnv := token != ""
if host != GitHubHost {
if _, e := url.Parse("https://" + host); e != nil {
err = fmt.Errorf("invalid hostname: %q", host)
return
}
}
h = c.Find(host)
if h != nil {
if h.User == "" {
utils.Check(CheckWriteable(configsFile()))
// User is missing from the config: this is a broken config probably
// because it was created with an old (broken) version of hub. Let's fix
// it now. See issue #1007 for details.
user := c.PromptForUser(host)
if user == "" {
utils.Check(fmt.Errorf("missing user"))
}
h.User = user
err := newConfigService().Save(configsFile(), c)
utils.Check(err)
}
if tokenFromEnv {
h.AccessToken = token
} else {
return
}
} else {
h = &Host{
Host: host,
AccessToken: token,
Protocol: "https",
}
c.Hosts = append(c.Hosts, h)
}
client := NewClientWithHost(h)
if !tokenFromEnv {
utils.Check(CheckWriteable(configsFile()))
err = c.authorizeClient(client, host)
if err != nil {
return
}
}
userFromEnv := os.Getenv("GITHUB_USER")
repoFromEnv := os.Getenv("GITHUB_REPOSITORY")
if userFromEnv == "" && repoFromEnv != "" {
repoParts := strings.SplitN(repoFromEnv, "/", 2)
if len(repoParts) > 0 {
userFromEnv = repoParts[0]
}
}
if tokenFromEnv && userFromEnv != "" {
h.User = userFromEnv
} else {
var currentUser *User
currentUser, err = client.CurrentUser()
if err != nil {
return
}
h.User = currentUser.Login
}
if !tokenFromEnv {
err = newConfigService().Save(configsFile(), c)
}
return
}
func (c *Config) authorizeClient(client *Client, host string) (err error) {
user := c.PromptForUser(host)
pass := c.PromptForPassword(host, user)
var code, token string
for {
token, err = client.FindOrCreateToken(user, pass, code)
if err == nil {
break
}
if ae, ok := err.(*errorInfo); ok && strings.HasPrefix(ae.Response.Header.Get("X-GitHub-OTP"), "required;") {
if code != "" {
ui.Errorln("warning: invalid two-factor code")
}
code = c.PromptForOTP()
} else {
break
}
}
if err == nil {
client.Host.AccessToken = token
}
return
}
func (c *Config) DetectToken() string {
return os.Getenv("GITHUB_TOKEN")
}
func (c *Config) PromptForUser(host string) (user string) {
user = os.Getenv("GITHUB_USER")
if user != "" {
return
}
ui.Printf("%s username: ", host)
user = c.scanLine()
return
}
func (c *Config) PromptForPassword(host, user string) (pass string) {
pass = os.Getenv("GITHUB_PASSWORD")
if pass != "" {
return
}
ui.Printf("%s password for %s (never stored): ", host, user)
if ui.IsTerminal(os.Stdin) {
if password, err := getPassword(); err == nil {
pass = password
}
} else {
pass = c.scanLine()
}
return
}
func (c *Config) PromptForOTP() string {
fmt.Print("two-factor authentication code: ")
return c.scanLine()
}
func (c *Config) scanLine() string {
var line string
scanner := bufio.NewScanner(os.Stdin)
if scanner.Scan() {
line = scanner.Text()
}
utils.Check(scanner.Err())
return line
}
func getPassword() (string, error) {
stdin := int(syscall.Stdin)
initialTermState, err := terminal.GetState(stdin)
if err != nil {
return "", err
}
c := make(chan os.Signal)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
go func() {
s := <-c
terminal.Restore(stdin, initialTermState)
switch sig := s.(type) {
case syscall.Signal:
if int(sig) == 2 {
fmt.Println("^C")
}
}
os.Exit(1)
}()
passBytes, err := terminal.ReadPassword(stdin)
if err != nil {
return "", err
}
signal.Stop(c)
fmt.Print("\n")
return string(passBytes), nil
}
func (c *Config) Find(host string) *Host {
for _, h := range c.Hosts {
if h.Host == host {
return h
}
}
return nil
}
func (c *Config) selectHost() *Host {
options := len(c.Hosts)
if options == 1 {
return c.Hosts[0]
}
prompt := "Select host:\n"
for idx, host := range c.Hosts {
prompt += fmt.Sprintf(" %d. %s\n", idx+1, host.Host)
}
prompt += fmt.Sprint("> ")
ui.Printf(prompt)
index := c.scanLine()
i, err := strconv.Atoi(index)
if err != nil || i < 1 || i > options {
utils.Check(fmt.Errorf("Error: must enter a number [1-%d]", options))
}
return c.Hosts[i-1]
}
var defaultConfigsFile string
func configsFile() string {
if configFromEnv := os.Getenv("HUB_CONFIG"); configFromEnv != "" {
return configFromEnv
}
if defaultConfigsFile == "" {
var err error
defaultConfigsFile, err = determineConfigLocation()
utils.Check(err)
}
return defaultConfigsFile
}
func homeConfig() (string, error) {
if home, err := homedir.Dir(); err != nil {
return "", err
} else {
return filepath.Join(home, ".config"), nil
}
}
func determineConfigLocation() (string, error) {
var err error
xdgHome := os.Getenv("XDG_CONFIG_HOME")
configDir := xdgHome
if configDir == "" {
if configDir, err = homeConfig(); err != nil {
return "", err
}
}
xdgDirs := os.Getenv("XDG_CONFIG_DIRS")
if xdgDirs == "" {
xdgDirs = "/etc/xdg"
}
searchDirs := append([]string{configDir}, strings.Split(xdgDirs, ":")...)
for _, dir := range searchDirs {
filename := filepath.Join(dir, "hub")
if _, err := os.Stat(filename); err == nil {
return filename, nil
}
}
configFile := filepath.Join(configDir, "hub")
if configDir == xdgHome {
if homeDir, _ := homeConfig(); homeDir != "" {
legacyConfig := filepath.Join(homeDir, "hub")
if _, err = os.Stat(legacyConfig); err == nil {
ui.Errorf("Notice: config file found but not respected at: %s\n", legacyConfig)
ui.Errorf("You might want to move it to `%s' to avoid re-authenticating.\n", configFile)
}
}
}
return configFile, nil
}
var currentConfig *Config
var configLoadedFrom = ""
func CurrentConfig() *Config {
filename := configsFile()
if configLoadedFrom != filename {
currentConfig = &Config{}
newConfigService().Load(filename, currentConfig)
configLoadedFrom = filename
}
return currentConfig
}
func (c *Config) DefaultHost() (host *Host, err error) {
if GitHubHostEnv != "" {
host, err = c.PromptForHost(GitHubHostEnv)
} else if len(c.Hosts) > 0 {
host = c.selectHost()
// HACK: forces host to inherit GITHUB_TOKEN if applicable
host, err = c.PromptForHost(host.Host)
} else {
host, err = c.PromptForHost(DefaultGitHubHost())
}
return
}
func (c *Config) DefaultHostNoPrompt() (*Host, error) {
if GitHubHostEnv != "" {
return c.PromptForHost(GitHubHostEnv)
} else if len(c.Hosts) > 0 {
host := c.Hosts[0]
// HACK: forces host to inherit GITHUB_TOKEN if applicable
return c.PromptForHost(host.Host)
} else {
return c.PromptForHost(GitHubHost)
}
}
// CheckWriteable checks if config file is writeable. This should
// be called before asking for credentials and only if current
// operation needs to update the file. See issue #1314 for details.
func CheckWriteable(filename string) error {
// Check if file exists already. if it doesn't, we will delete it after
// checking for writeabilty
fileExistsAlready := false
if _, err := os.Stat(filename); err == nil {
fileExistsAlready = true
}
err := os.MkdirAll(filepath.Dir(filename), 0771)
if err != nil {
return err
}
w, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0600)
if err != nil {
return err
}
w.Close()
if !fileExistsAlready {
err := os.Remove(filename)
if err != nil {
return err
}
}
return nil
}
// Public for testing purpose
func CreateTestConfigs(user, token string) *Config {
f, _ := ioutil.TempFile("", "test-config")
os.Setenv("HUB_CONFIG", f.Name())
host := &Host{
User: "jingweno",
AccessToken: "123",
Host: GitHubHost,
}
c := &Config{Hosts: []*Host{host}}
err := newConfigService().Save(f.Name(), c)
if err != nil {
panic(err)
}
return c
}
|
[
"\"GITHUB_USER\"",
"\"GITHUB_REPOSITORY\"",
"\"GITHUB_TOKEN\"",
"\"GITHUB_USER\"",
"\"GITHUB_PASSWORD\"",
"\"HUB_CONFIG\"",
"\"XDG_CONFIG_HOME\"",
"\"XDG_CONFIG_DIRS\""
] |
[] |
[
"XDG_CONFIG_DIRS",
"GITHUB_TOKEN",
"GITHUB_PASSWORD",
"GITHUB_USER",
"GITHUB_REPOSITORY",
"HUB_CONFIG",
"XDG_CONFIG_HOME"
] |
[]
|
["XDG_CONFIG_DIRS", "GITHUB_TOKEN", "GITHUB_PASSWORD", "GITHUB_USER", "GITHUB_REPOSITORY", "HUB_CONFIG", "XDG_CONFIG_HOME"]
|
go
| 7 | 0 | |
docs/conf.py
|
# -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
]
# TODO: Please Read!
# Uncomment the below if you use native CircuitPython modules such as
# digitalio, micropython and busio. List the modules you use. Without it, the
# autodoc module docs will fail to generate with a warning.
autodoc_mock_imports = [
"micropython",
"adafruit_bus_device",
"adafruit_framebuf",
"busio",
]
intersphinx_mapping = {
"python": ("https://docs.python.org/3.4", None),
"BusDevice": (
"https://circuitpython.readthedocs.io/projects/busdevice/en/latest/",
None,
),
"CircuitPython": ("https://circuitpython.readthedocs.io/en/latest/", None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Adafruit SSD1305 Library"
copyright = "2019 Bryan Siepert"
author = "Bryan Siepert"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.0"
# The full version, including alpha/beta/rc tags.
release = "1.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", ".env", "CODE_OF_CONDUCT.md"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
#
add_function_parentheses = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# If this is True, todo emits a warning for each TODO entries. The default is False.
todo_emit_warnings = True
napoleon_numpy_docstring = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), "."]
except:
html_theme = "default"
html_theme_path = ["."]
else:
html_theme_path = ["."]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
html_favicon = "_static/favicon.ico"
# Output file base name for HTML help builder.
htmlhelp_basename = "AdafruitSsd1305Librarydoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"AdafruitSSD1305Library.tex",
"AdafruitSSD1305 Library Documentation",
author,
"manual",
),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"AdafruitSSD1305library",
"Adafruit SSD1305 Library Documentation",
[author],
1,
)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"AdafruitSSD1305Library",
"Adafruit SSD1305 Library Documentation",
author,
"AdafruitSSD1305Library",
"One line description of project.",
"Miscellaneous",
),
]
|
[] |
[] |
[
"READTHEDOCS"
] |
[]
|
["READTHEDOCS"]
|
python
| 1 | 0 | |
storm-core/src/jvm/org/apache/storm/daemon/supervisor/BasicContainer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.storm.daemon.supervisor;
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.commons.lang.StringUtils;
import org.apache.storm.Config;
import org.apache.storm.generated.LocalAssignment;
import org.apache.storm.generated.ProfileAction;
import org.apache.storm.generated.ProfileRequest;
import org.apache.storm.generated.StormTopology;
import org.apache.storm.generated.WorkerResources;
import org.apache.storm.utils.ConfigUtils;
import org.apache.storm.utils.LocalState;
import org.apache.storm.utils.Utils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Joiner;
import com.google.common.collect.Lists;
/**
* A container that runs processes on the local box.
*/
public class BasicContainer extends Container {
private static final Logger LOG = LoggerFactory.getLogger(BasicContainer.class);
private static final FilenameFilter jarFilter = new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
return name.endsWith(".jar");
}
};
private static final Joiner CPJ =
Joiner.on(Utils.CLASS_PATH_SEPARATOR).skipNulls();
protected final LocalState _localState;
protected final String _profileCmd;
protected final String _stormHome = System.getProperty("storm.home");
protected volatile boolean _exitedEarly = false;
protected class ProcessExitCallback implements ExitCodeCallback {
private final String _logPrefix;
public ProcessExitCallback(String logPrefix) {
_logPrefix = logPrefix;
}
@Override
public void call(int exitCode) {
LOG.info("{} exited with code: {}", _logPrefix, exitCode);
_exitedEarly = true;
}
}
/**
* Create a new BasicContainer
* @param type the type of container being made.
* @param conf the supervisor config
* @param supervisorId the ID of the supervisor this is a part of.
* @param port the port the container is on. Should be <= 0 if only a partial recovery
* @param assignment the assignment for this container. Should be null if only a partial recovery.
* @param localState the local state of the supervisor. May be null if partial recovery
* @param workerId the id of the worker to use. Must not be null if doing a partial recovery.
*/
public BasicContainer(ContainerType type, Map<String, Object> conf, String supervisorId, int port,
LocalAssignment assignment,
LocalState localState, String workerId) throws IOException {
this(type, conf, supervisorId, port, assignment, localState, workerId, null, null, null);
}
/**
* Create a new BasicContainer
* @param type the type of container being made.
* @param conf the supervisor config
* @param supervisorId the ID of the supervisor this is a part of.
* @param port the port the container is on. Should be <= 0 if only a partial recovery
* @param assignment the assignment for this container. Should be null if only a partial recovery.
* @param localState the local state of the supervisor. May be null if partial recovery
* @param workerId the id of the worker to use. Must not be null if doing a partial recovery.
* @param ops file system operations (mostly for testing) if null a new one is made
* @param topoConf the config of the topology (mostly for testing) if null
* and not a partial recovery the real conf is read.
* @param profileCmd the command to use when profiling (used for testing)
* @throws IOException on any error
* @throws ContainerRecoveryException if the Container could not be recovered.
*/
BasicContainer(ContainerType type, Map<String, Object> conf, String supervisorId, int port,
LocalAssignment assignment,
LocalState localState, String workerId, Map<String, Object> topoConf,
AdvancedFSOps ops, String profileCmd) throws IOException {
super(type, conf, supervisorId, port, assignment, workerId, topoConf, ops);
assert(localState != null);
_localState = localState;
if (type.isRecovery() && !type.isOnlyKillable()) {
synchronized (localState) {
String wid = null;
Map<String, Integer> workerToPort = localState.getApprovedWorkers();
for (Map.Entry<String, Integer> entry : workerToPort.entrySet()) {
if (port == entry.getValue().intValue()) {
wid = entry.getKey();
}
}
if (wid == null) {
throw new ContainerRecoveryException("Could not find worker id for " + port + " " + assignment);
}
LOG.info("Recovered Worker {}", wid);
_workerId = wid;
}
} else if (_workerId == null){
createNewWorkerId();
}
if (profileCmd == null) {
profileCmd = _stormHome + Utils.FILE_PATH_SEPARATOR + "bin" + Utils.FILE_PATH_SEPARATOR
+ conf.get(Config.WORKER_PROFILER_COMMAND);
}
_profileCmd = profileCmd;
}
/**
* Create a new worker ID for this process and store in in this object and
* in the local state. Never call this if a worker is currently up and running.
* We will lose track of the process.
*/
protected void createNewWorkerId() {
_type.assertFull();
assert(_workerId == null);
synchronized (_localState) {
_workerId = Utils.uuid();
Map<String, Integer> workerToPort = _localState.getApprovedWorkers();
if (workerToPort == null) {
workerToPort = new HashMap<>(1);
}
removeWorkersOn(workerToPort, _port);
workerToPort.put(_workerId, _port);
_localState.setApprovedWorkers(workerToPort);
LOG.info("Created Worker ID {}", _workerId);
}
}
private static void removeWorkersOn(Map<String, Integer> workerToPort, int _port) {
for (Iterator<Entry<String, Integer>> i = workerToPort.entrySet().iterator(); i.hasNext();) {
Entry<String, Integer> found = i.next();
if (_port == found.getValue().intValue()) {
LOG.warn("Deleting worker {} from state", found.getKey());
i.remove();
}
}
}
@Override
public void cleanUpForRestart() throws IOException {
String origWorkerId = _workerId;
super.cleanUpForRestart();
synchronized (_localState) {
Map<String, Integer> workersToPort = _localState.getApprovedWorkers();
workersToPort.remove(origWorkerId);
removeWorkersOn(workersToPort, _port);
_localState.setApprovedWorkers(workersToPort);
LOG.info("Removed Worker ID {}", origWorkerId);
}
}
@Override
public void relaunch() throws IOException {
_type.assertFull();
//We are launching it now...
_type = ContainerType.LAUNCH;
createNewWorkerId();
setup();
launch();
}
@Override
public boolean didMainProcessExit() {
return _exitedEarly;
}
/**
* Run the given command for profiling
*
* @param command
* the command to run
* @param env
* the environment to run the command
* @param logPrefix
* the prefix to include in the logs
* @param targetDir
* the working directory to run the command in
* @return true if it ran successfully, else false
* @throws IOException
* on any error
* @throws InterruptedException
* if interrupted wile waiting for the process to exit.
*/
protected boolean runProfilingCommand(List<String> command, Map<String, String> env, String logPrefix,
File targetDir) throws IOException, InterruptedException {
_type.assertFull();
Process p = SupervisorUtils.launchProcess(command, env, logPrefix, null, targetDir);
int ret = p.waitFor();
return ret == 0;
}
@Override
public boolean runProfiling(ProfileRequest request, boolean stop) throws IOException, InterruptedException {
_type.assertFull();
String targetDir = ConfigUtils.workerArtifactsRoot(_conf, _topologyId, _port);
@SuppressWarnings("unchecked")
Map<String, String> env = (Map<String, String>) _topoConf.get(Config.TOPOLOGY_ENVIRONMENT);
if (env == null) {
env = new HashMap<String, String>();
}
String str = ConfigUtils.workerArtifactsPidPath(_conf, _topologyId, _port);
String workerPid = _ops.slurpString(new File(str)).trim();
ProfileAction profileAction = request.get_action();
String logPrefix = "ProfilerAction process " + _topologyId + ":" + _port + " PROFILER_ACTION: " + profileAction
+ " ";
List<String> command = mkProfileCommand(profileAction, stop, workerPid, targetDir);
File targetFile = new File(targetDir);
if (command.size() > 0) {
return runProfilingCommand(command, env, logPrefix, targetFile);
}
LOG.warn("PROFILING REQUEST NOT SUPPORTED {} IGNORED...", request);
return true;
}
/**
* Get the command to run when doing profiling
* @param action the profiling action to perform
* @param stop if this is meant to stop the profiling or start it
* @param workerPid the PID of the process to profile
* @param targetDir the current working directory of the worker process
* @return the command to run for profiling.
*/
private List<String> mkProfileCommand(ProfileAction action, boolean stop, String workerPid, String targetDir) {
switch(action) {
case JMAP_DUMP:
return jmapDumpCmd(workerPid, targetDir);
case JSTACK_DUMP:
return jstackDumpCmd(workerPid, targetDir);
case JPROFILE_DUMP:
return jprofileDump(workerPid, targetDir);
case JVM_RESTART:
return jprofileJvmRestart(workerPid);
case JPROFILE_STOP:
if (stop) {
return jprofileStop(workerPid, targetDir);
}
return jprofileStart(workerPid);
default:
return Lists.newArrayList();
}
}
private List<String> jmapDumpCmd(String pid, String targetDir) {
return Lists.newArrayList(_profileCmd, pid, "jmap", targetDir);
}
private List<String> jstackDumpCmd(String pid, String targetDir) {
return Lists.newArrayList(_profileCmd, pid, "jstack", targetDir);
}
private List<String> jprofileStart(String pid) {
return Lists.newArrayList(_profileCmd, pid, "start");
}
private List<String> jprofileStop(String pid, String targetDir) {
return Lists.newArrayList(_profileCmd, pid, "stop", targetDir);
}
private List<String> jprofileDump(String pid, String targetDir) {
return Lists.newArrayList(_profileCmd, pid, "dump", targetDir);
}
private List<String> jprofileJvmRestart(String pid) {
return Lists.newArrayList(_profileCmd, pid, "kill");
}
/**
* Compute the java.library.path that should be used for the worker.
* This helps it to load JNI libraries that are packaged in the uber jar.
* @param stormRoot the root directory of the worker process
* @param conf the config for the supervisor.
* @return the java.library.path/LD_LIBRARY_PATH to use so native libraries load correctly.
*/
protected String javaLibraryPath(String stormRoot, Map<String, Object> conf) {
String resourceRoot = stormRoot + Utils.FILE_PATH_SEPARATOR + ConfigUtils.RESOURCES_SUBDIR;
String os = System.getProperty("os.name").replaceAll("\\s+", "_");
String arch = System.getProperty("os.arch");
String archResourceRoot = resourceRoot + Utils.FILE_PATH_SEPARATOR + os + "-" + arch;
String ret = CPJ.join(archResourceRoot, resourceRoot,
conf.get(Config.JAVA_LIBRARY_PATH));
return ret;
}
/**
* Returns a collection of jar file names found under the given directory.
* @param dir the directory to search
* @return the jar file names
*/
protected List<String> getFullJars(File dir) {
File[] files = dir.listFiles(jarFilter);
if (files == null) {
return Collections.emptyList();
}
ArrayList<String> ret = new ArrayList<>(files.length);
for (File f: files) {
ret.add(f.getAbsolutePath());
}
return ret;
}
protected List<String> frameworkClasspath() {
File stormLibDir = new File(_stormHome, "lib");
String stormConfDir =
System.getenv("STORM_CONF_DIR") != null ?
System.getenv("STORM_CONF_DIR") :
new File(_stormHome, "conf").getAbsolutePath();
File stormExtlibDir = new File(_stormHome, "extlib");
String extcp = System.getenv("STORM_EXT_CLASSPATH");
List<String> pathElements = new LinkedList<>();
pathElements.addAll(getFullJars(stormLibDir));
pathElements.addAll(getFullJars(stormExtlibDir));
pathElements.add(extcp);
pathElements.add(stormConfDir);
return pathElements;
}
@SuppressWarnings("unchecked")
private List<String> asStringList(Object o) {
if (o instanceof String) {
return Arrays.asList((String)o);
} else if (o instanceof List) {
return (List<String>)o;
}
return Collections.EMPTY_LIST;
}
/**
* Compute the classpath for the worker process
* @param stormJar the topology jar
* @param dependencyLocations any dependencies from the topology
* @return the full classpath
*/
protected String getWorkerClassPath(String stormJar, List<String> dependencyLocations) {
List<String> workercp = new ArrayList<>();
workercp.addAll(frameworkClasspath());
workercp.add(stormJar);
workercp.addAll(dependencyLocations);
workercp.addAll(asStringList(_topoConf.get(Config.TOPOLOGY_CLASSPATH)));
return CPJ.join(workercp);
}
private String substituteChildOptsInternal(String string, int memOnheap) {
if (StringUtils.isNotBlank(string)) {
String p = String.valueOf(_port);
string = string.replace("%ID%", p);
string = string.replace("%WORKER-ID%", _workerId);
string = string.replace("%TOPOLOGY-ID%", _topologyId);
string = string.replace("%WORKER-PORT%", p);
if (memOnheap > 0) {
string = string.replace("%HEAP-MEM%", String.valueOf(memOnheap));
}
}
return string;
}
protected List<String> substituteChildopts(Object value) {
return substituteChildopts(value, -1);
}
protected List<String> substituteChildopts(Object value, int memOnheap) {
List<String> rets = new ArrayList<>();
if (value instanceof String) {
String string = substituteChildOptsInternal((String) value, memOnheap);
if (StringUtils.isNotBlank(string)) {
String[] strings = string.split("\\s+");
for (String s: strings) {
if (StringUtils.isNotBlank(s)) {
rets.add(s);
}
}
}
} else if (value instanceof List) {
@SuppressWarnings("unchecked")
List<String> objects = (List<String>) value;
for (String object : objects) {
String str = substituteChildOptsInternal(object, memOnheap);
if (StringUtils.isNotBlank(str)) {
rets.add(str);
}
}
}
return rets;
}
/**
* Launch the worker process (non-blocking)
*
* @param command
* the command to run
* @param env
* the environment to run the command
* @param processExitcallback
* a callback for when the process exits
* @param logPrefix
* the prefix to include in the logs
* @param targetDir
* the working directory to run the command in
* @return true if it ran successfully, else false
* @throws IOException
* on any error
*/
protected void launchWorkerProcess(List<String> command, Map<String, String> env, String logPrefix,
ExitCodeCallback processExitCallback, File targetDir) throws IOException {
SupervisorUtils.launchProcess(command, env, logPrefix, processExitCallback, targetDir);
}
private String getWorkerLoggingConfigFile() {
String log4jConfigurationDir = (String) (_conf.get(Config.STORM_LOG4J2_CONF_DIR));
if (StringUtils.isNotBlank(log4jConfigurationDir)) {
if (!Utils.isAbsolutePath(log4jConfigurationDir)) {
log4jConfigurationDir = _stormHome + Utils.FILE_PATH_SEPARATOR + log4jConfigurationDir;
}
} else {
log4jConfigurationDir = _stormHome + Utils.FILE_PATH_SEPARATOR + "log4j2";
}
if (Utils.IS_ON_WINDOWS && !log4jConfigurationDir.startsWith("file:")) {
log4jConfigurationDir = "file:///" + log4jConfigurationDir;
}
return log4jConfigurationDir + Utils.FILE_PATH_SEPARATOR + "worker.xml";
}
private static class DependencyLocations {
private List<String> _data = null;
private final Map<String, Object> _conf;
private final String _topologyId;
private final AdvancedFSOps _ops;
private final String _stormRoot;
public DependencyLocations(final Map<String, Object> conf, final String topologyId, final AdvancedFSOps ops, final String stormRoot) {
_conf = conf;
_topologyId = topologyId;
_ops = ops;
_stormRoot = stormRoot;
}
public String toString() {
List<String> data;
synchronized(this) {
data = _data;
}
return "DEP_LOCS for " + _topologyId +" => " + data;
}
public synchronized List<String> get() throws IOException {
if (_data != null) {
return _data;
}
final StormTopology stormTopology = ConfigUtils.readSupervisorTopology(_conf, _topologyId, _ops);
final List<String> dependencyLocations = new ArrayList<>();
if (stormTopology.get_dependency_jars() != null) {
for (String dependency : stormTopology.get_dependency_jars()) {
dependencyLocations.add(new File(_stormRoot, dependency).getAbsolutePath());
}
}
if (stormTopology.get_dependency_artifacts() != null) {
for (String dependency : stormTopology.get_dependency_artifacts()) {
dependencyLocations.add(new File(_stormRoot, dependency).getAbsolutePath());
}
}
_data = dependencyLocations;
return _data;
}
}
static class DepLRUCache {
public final int _maxSize = 100; //We could make this configurable in the future...
@SuppressWarnings("serial")
private LinkedHashMap<String, DependencyLocations> _cache = new LinkedHashMap<String, DependencyLocations>() {
@Override
protected boolean removeEldestEntry(Map.Entry<String,DependencyLocations> eldest) {
return (size() > _maxSize);
}
};
public synchronized DependencyLocations get(final Map<String, Object> conf, final String topologyId, final AdvancedFSOps ops, String stormRoot) {
//Only go off of the topology id for now.
DependencyLocations dl = _cache.get(topologyId);
if (dl == null) {
_cache.put(topologyId, new DependencyLocations(conf, topologyId, ops, stormRoot));
dl = _cache.get(topologyId);
}
return dl;
}
public synchronized void clear() {
_cache.clear();
}
}
static final DepLRUCache DEP_LOC_CACHE = new DepLRUCache();
public static List<String> getDependencyLocationsFor(final Map<String, Object> conf, final String topologyId, final AdvancedFSOps ops, String stormRoot) throws IOException {
return DEP_LOC_CACHE.get(conf, topologyId, ops, stormRoot).get();
}
/**
* Get parameters for the class path of the worker process. Also used by the
* log Writer
* @param stormRoot the root dist dir for the topology
* @return the classpath for the topology as command line arguments.
* @throws IOException on any error.
*/
protected List<String> getClassPathParams(final String stormRoot) throws IOException {
final String stormJar = ConfigUtils.supervisorStormJarPath(stormRoot);
final List<String> dependencyLocations = getDependencyLocationsFor(_conf, _topologyId, _ops, stormRoot);
final String workerClassPath = getWorkerClassPath(stormJar, dependencyLocations);
List<String> classPathParams = new ArrayList<>();
classPathParams.add("-cp");
classPathParams.add(workerClassPath);
return classPathParams;
}
/**
* Get a set of java properties that are common to both the log writer and the worker processes.
* These are mostly system properties that are used by logging.
* @return a list of command line options
*/
protected List<String> getCommonParams() {
final String workersArtifacts = ConfigUtils.workerArtifactsRoot(_conf);
String stormLogDir = ConfigUtils.getLogDir();
String log4jConfigurationFile = getWorkerLoggingConfigFile();
List<String> commonParams = new ArrayList<>();
commonParams.add("-Dlogging.sensitivity=" + OR((String) _topoConf.get(Config.TOPOLOGY_LOGGING_SENSITIVITY), "S3"));
commonParams.add("-Dlogfile.name=worker.log");
commonParams.add("-Dstorm.home=" + OR(_stormHome, ""));
commonParams.add("-Dworkers.artifacts=" + workersArtifacts);
commonParams.add("-Dstorm.id=" + _topologyId);
commonParams.add("-Dworker.id=" + _workerId);
commonParams.add("-Dworker.port=" + _port);
commonParams.add("-Dstorm.log.dir=" + stormLogDir);
commonParams.add("-Dlog4j.configurationFile=" + log4jConfigurationFile);
commonParams.add("-DLog4jContextSelector=org.apache.logging.log4j.core.selector.BasicContextSelector");
commonParams.add("-Dstorm.local.dir=" + _conf.get(Config.STORM_LOCAL_DIR));
return commonParams;
}
protected int getMemOnHeap(WorkerResources resources) {
int memOnheap = 0;
if (resources != null && resources.is_set_mem_on_heap() &&
resources.get_mem_on_heap() > 0) {
memOnheap = (int) Math.ceil(resources.get_mem_on_heap());
} else {
// set the default heap memory size for supervisor-test
memOnheap = Utils.getInt(_topoConf.get(Config.WORKER_HEAP_MEMORY_MB), 768);
}
return memOnheap;
}
protected List<String> getWorkerProfilerChildOpts(int memOnheap) {
List<String> workerProfilerChildopts = new ArrayList<>();
if (Utils.getBoolean(_conf.get(Config.WORKER_PROFILER_ENABLED), false)) {
workerProfilerChildopts = substituteChildopts(_conf.get(Config.WORKER_PROFILER_CHILDOPTS), memOnheap);
}
return workerProfilerChildopts;
}
/**
* a or b the first one that is not null
* @param a something
* @param b something else
* @return a or b the first one that is not null
*/
protected <V> V OR(V a, V b) {
return a == null ? b : a;
}
protected String javaCmd(String cmd) {
String ret = null;
String javaHome = System.getenv().get("JAVA_HOME");
if (StringUtils.isNotBlank(javaHome)) {
ret = javaHome + Utils.FILE_PATH_SEPARATOR + "bin" + Utils.FILE_PATH_SEPARATOR + cmd;
} else {
ret = cmd;
}
return ret;
}
/**
* Create the command to launch the worker process
* @param memOnheap the on heap memory for the worker
* @param stormRoot the root dist dir for the topology
* @param jlp java library path for the topology
* @return the command to run
* @throws IOException on any error.
*/
private List<String> mkLaunchCommand(final int memOnheap, final String stormRoot,
final String jlp) throws IOException {
final String javaCmd = javaCmd("java");
final String stormOptions = ConfigUtils.concatIfNotNull(System.getProperty("storm.options"));
final String stormConfFile = ConfigUtils.concatIfNotNull(System.getProperty("storm.conf.file"));
final String workerTmpDir = ConfigUtils.workerTmpRoot(_conf, _workerId);
List<String> classPathParams = getClassPathParams(stormRoot);
List<String> commonParams = getCommonParams();
List<String> commandList = new ArrayList<>();
//Log Writer Command...
commandList.add(javaCmd);
commandList.addAll(classPathParams);
commandList.addAll(substituteChildopts(_topoConf.get(Config.TOPOLOGY_WORKER_LOGWRITER_CHILDOPTS)));
commandList.addAll(commonParams);
commandList.add("org.apache.storm.LogWriter"); //The LogWriter in turn launches the actual worker.
//Worker Command...
commandList.add(javaCmd);
commandList.add("-server");
commandList.addAll(commonParams);
commandList.addAll(substituteChildopts(_conf.get(Config.WORKER_CHILDOPTS), memOnheap));
commandList.addAll(substituteChildopts(_topoConf.get(Config.TOPOLOGY_WORKER_CHILDOPTS), memOnheap));
commandList.addAll(substituteChildopts(OR(
_topoConf.get(Config.TOPOLOGY_WORKER_GC_CHILDOPTS),
_conf.get(Config.WORKER_GC_CHILDOPTS)), memOnheap));
commandList.addAll(getWorkerProfilerChildOpts(memOnheap));
commandList.add("-Djava.library.path=" + jlp);
commandList.add("-Dstorm.conf.file=" + stormConfFile);
commandList.add("-Dstorm.options=" + stormOptions);
commandList.add("-Djava.io.tmpdir=" + workerTmpDir);
commandList.addAll(classPathParams);
commandList.add("org.apache.storm.daemon.worker");
commandList.add(_topologyId);
commandList.add(_supervisorId);
commandList.add(String.valueOf(_port));
commandList.add(_workerId);
return commandList;
}
@Override
public void launch() throws IOException {
_type.assertFull();
LOG.info("Launching worker with assignment {} for this supervisor {} on port {} with id {}", _assignment,
_supervisorId, _port, _workerId);
String logPrefix = "Worker Process " + _workerId;
ProcessExitCallback processExitCallback = new ProcessExitCallback(logPrefix);
_exitedEarly = false;
final WorkerResources resources = _assignment.get_resources();
final int memOnheap = getMemOnHeap(resources);
final String stormRoot = ConfigUtils.supervisorStormDistRoot(_conf, _topologyId);
final String jlp = javaLibraryPath(stormRoot, _conf);
List<String> commandList = mkLaunchCommand(memOnheap, stormRoot, jlp);
Map<String, String> topEnvironment = new HashMap<String, String>();
@SuppressWarnings("unchecked")
Map<String, String> environment = (Map<String, String>) _topoConf.get(Config.TOPOLOGY_ENVIRONMENT);
if (environment != null) {
topEnvironment.putAll(environment);
}
topEnvironment.put("LD_LIBRARY_PATH", jlp);
LOG.info("Launching worker with command: {}. ", Utils.shellCmd(commandList));
String workerDir = ConfigUtils.workerRoot(_conf, _workerId);
launchWorkerProcess(commandList, topEnvironment, logPrefix, processExitCallback, new File(workerDir));
}
}
|
[
"\"STORM_CONF_DIR\"",
"\"STORM_CONF_DIR\"",
"\"STORM_EXT_CLASSPATH\""
] |
[] |
[
"STORM_EXT_CLASSPATH",
"STORM_CONF_DIR"
] |
[]
|
["STORM_EXT_CLASSPATH", "STORM_CONF_DIR"]
|
java
| 2 | 0 | |
exkaldirt/base.py
|
# coding=utf-8
#
# Yu Wang (University of Yamanashi)
# Apr, 2021
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import queue
import subprocess
import numpy as np
import sys
import threading
import multiprocessing
from multiprocessing import RawValue
import ctypes
import time
import random
import datetime
import random
from collections import namedtuple
import glob
from exkaldirt.version import version
from exkaldirt.utils import *
#from version import version
#from utils import *
class Info:
'''
A object to define some parameters of ExKaldi-RT.
'''
def __init__(self):
self.__timeout = 1800
self.__timescale = 0.01
self.__max_socket_buffer_size = 10000
# Check Kaldi root directory and ExKaldi-RT tool directory
self.__find_ctool_root()
# Get the float floor
self.__epsilon = self.__get_floot_floor()
def __find_ctool_root(self):
'''Look for the ExKaldi-RT C++ command root path.'''
self.__kaldi_root = None
if "KALDI_ROOT" in os.environ.keys():
self.__kaldi_root = os.environ["KALDI_ROOT"]
else:
cmd = "which copy-matrix"
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if out == b'':
print( "Warning: Kaldi root directory was not found automatically. " + \
"Module, exkaldirt.feature and exkaldirt.decode, are unavaliable."
)
else:
out = out.decode().strip()
# out is a string like "/yourhome/kaldi/src/bin/copy-matrix"
self.__kaldi_root = os.path.dirname( os.path.dirname( os.path.dirname(out)) )
if self.__kaldi_root is None:
self.__cmdroot = None
else:
decoder = glob.glob( os.path.join(self.__kaldi_root,"src","exkaldirtcbin","exkaldi-online-decoder") )
tools = glob.glob( os.path.join(self.__kaldi_root,"src","exkaldirtcbin","cutils.*.so") )
if len(decoder) == 0 or len(tools) == 0:
print("Warning: ExKaldi-RT C++ source files have not been compiled sucessfully. " + \
"Please consult the Installation in github: https://github.com/wangyu09/exkaldi-rt ." + \
"Otherwise, the exkaldi.feature and exkaldi.decode modules are not available."
)
self.__cmdroot = None
else:
self.__cmdroot = os.path.join(self.__kaldi_root,"src","exkaldirtcbin")
def __get_floot_floor(self):
'''Get the floot floor value.'''
if self.__cmdroot is None:
return 1.19209e-07
else:
sys.path.append( self.__cmdroot )
try:
import cutils
except ModuleNotFoundError:
raise Exception("ExKaldi-RT Pybind library have not been compiled sucessfully. " + \
"Please consult the Installation in github: https://github.com/wangyu09/exkaldi-rt .")
return cutils.get_float_floor()
@property
def VERSION(self):
return version
@property
def CMDROOT(self):
return self.__cmdroot
@property
def KALDI_ROOT(self):
return self.__kaldi_root
@property
def TIMEOUT(self):
return self.__timeout
@property
def TIMESCALE(self):
return self.__timescale
@property
def EPSILON(self):
return self.__epsilon
@property
def SOCKET_RETRY(self):
'''Maximum times to resend the packet if packet is lost'''
return 10
@property
def MAX_SOCKET_BUFFER_SIZE(self):
return self.__max_socket_buffer_size
def set_MAX_SOCKET_BUFFER_SIZE(self,size:int):
assert isinstance(size,int) and size > 4
self.__max_socket_buffer_size = size
def set_TIMEOUT(self,value):
assert isinstance(value,int) and value > 0, "TIMEOUT must be an int value."
self.__timeout = value
def set_TIMESCALE(self,value):
assert isinstance(value,float) and 0 < value < 1.0, "TIMESCALE should be a float value in (0,1)."
self.__timescale = value
# Instantiate this object.
info = Info()
class ExKaldiRTBase:
'''
Base class of ExKaldi-RT.
'''
# Object Counter
OBJ_COUNTER = 0
def __init__(self,name=None):
# Give an unique ID for this object.
self.__objid = ExKaldiRTBase.OBJ_COUNTER
ExKaldiRTBase.OBJ_COUNTER += 1
# Name it
self.__naming(name=name)
def __naming(self,name=None):
'''
Name it.
'''
if name is None:
name = self.__class__.__name__
else:
assert isinstance(name,str) and len(name) > 0, f"_name_ must be a string but got: {name}."
#assert " " not in name, "_name_ can not include space."
self.__name = name
@property
def objid(self):
return self.__objid
@property
def name(self):
return self.__name + f"[{self.__objid}]"
@property
def basename(self):
return self.__name
########################################
class Packet:
'''
Packet object is used to hold various stream data, such as audio stream, feature and probability.
These data will be processed by Component and passed in PIPE.
We only support 4 types of data: np.int, np.float, str, np.ndarray.
'''
def __init__(self,items,cid,idmaker,mainKey=None):
assert isinstance(items,dict), f"_items_ must be a dict object."
self.__data = {}
# Set items
if mainKey is not None:
assert mainKey in items.keys()
self.__mainKey = mainKey
for key,value in items.items():
self.add(key,value)
else:
self.__mainKey = None
for key,value in items.items():
self.add(key,value)
if self.__mainKey is None:
self.__mainKey = key
# Set chunk id
assert isinstance(cid,int) and cid >= 0
self.__cid = cid
assert isinstance(idmaker,int)
self.__idmaker = idmaker
@property
def cid(self):
return self.__cid
@property
def idmaker(self):
return self.__idmaker
@property
def mainKey(self):
return self.__mainKey
def __getitem__(self,key=None):
assert key in self.__data.keys()
return self.__data[key]
def add(self,key,data,asMainKey=False):
'''
Add one record, if this key has already existed, replace the record in Packet, or append this new record.
'''
# Verify key name
assert isinstance(key,str), "_key_ must be a string."
assert " " not in key and key.strip() != "", "_key_ can not include space."
# Verify value
if isinstance(data,int):
data = np.int16(data)
elif isinstance(data,float):
data = np.float32(data)
elif isinstance(data,(np.signedinteger,np.floating)):
pass
elif isinstance(data,np.ndarray):
assert len(data.shape) in [1,2]
assert 0 not in data.shape, "Invalid data."
data = data.copy()
elif isinstance(data,str):
assert data != ""
else:
raise Exception("Unsupported data type")
self.__data[key] = data
if asMainKey:
self.__mainKey = key
def encode(self)->bytes:
'''
Encode packet.
'''
result = b""
#Encode class name
result += ( self.__class__.__name__.encode() + b" " )
# Encode idmaker and fid
result += uint_to_bytes(self.idmaker, length=4)
result += uint_to_bytes(self.cid, length=4)
# If this is not an empty packet
if self.mainKey is not None:
# Encode main key
result += (self.mainKey.encode() + b" ")
# Encode data
for key,value in self.__data.items():
# Encode key
result += key.encode() + b" "
if isinstance( value,(np.signedinteger,np.floating) ):
bvalue = element_to_bytes( value )
flag = b"E"
elif isinstance(value,np.ndarray):
if len(value.shape) == 1:
bvalue = vector_to_bytes( value )
flag = b"V"
else:
bvalue = matrix_to_bytes( value )
flag = b"M"
elif isinstance(value,str):
bvalue = value.encode()
flag = b"S"
else:
raise Exception("Unsupported data type.")
result += ( flag + uint_to_bytes( len(bvalue),length=4 ) + bvalue )
return result
@classmethod
def decode(cls,bstr):
'''
Generate a packet object.
'''
with BytesIO(bstr) as sp:
# Read class name
className = read_string( sp )
# Read chunk ID
idmaker = uint_from_bytes( sp.read(4) )
cid = uint_from_bytes( sp.read(4) )
# Read main key
mainKey = read_string( sp )
result = {}
# If this is not an empty packet
if mainKey != "":
# Read data
while True:
key = read_string(sp)
if key == "":
break
flag = sp.read(1).decode()
if flag == "E":
size = uint_from_bytes( sp.read(4) )
data = element_from_bytes( sp.read(size) )
elif flag == "V":
size = uint_from_bytes( sp.read(4) )
data = vector_from_bytes( sp.read(size) )
elif flag == "M":
size = uint_from_bytes( sp.read(4) )
data = matrix_from_bytes( sp.read(size) )
elif flag == "S":
size = uint_from_bytes( sp.read(4) )
data = sp.read(size).decode()
else:
raise Exception(f"Unknown flag: {flag}")
result[ key ] = data
# otherwise, this is an empty packet
else:
mainKey = None
return globals()[className](items=result,cid=cid,idmaker=idmaker,mainKey=mainKey)
def keys(self):
return self.__data.keys()
def values(self):
return self.__data.values()
def items(self):
return self.__data.items()
def is_empty(self):
return len(self.keys()) == 0
# ENDPOINT is a special packet.
class Endpoint(Packet):
def __init__(self,cid,idmaker,items={},mainKey=None):
super().__init__(items,cid,idmaker,mainKey)
def is_endpoint(obj):
'''
If this is Endpoint, return True.
'''
return isinstance(obj, Endpoint)
# Standerd output lock
stdout_lock = threading.Lock()
def print_(*args,**kwargs):
with stdout_lock:
print(*args,**kwargs)
########################################
mark = namedtuple("Mark",["silent","active","terminated","wrong","stranded","endpoint","inPIPE","outPIPE"])(
0,1,2,3,4,5,6,7,)
# silent : PIPE is unavaliable untill it is activated.
# active | stranded : There might be new packets appended in it later.
# wrong | terminated : Can not add new packets in PIPE but can still get packets from it.
class PIPE(ExKaldiRTBase):
'''
PIPE is used to connect Components and pass Packets.
It is a Last-In-Last-Out queue.
It is designed to exchange data and state between mutiple processes.
Note that we will forcely:
1. remove continuous Endpoint flags.
2. discard the head packet if it is Endpoint flag.
'''
def __init__(self,name=None):
# Initilize state and name
super().__init__(name=name)
# Set a cache to pass data
self.__cache = queue.Queue()
self.__cacheSize = 0
# Flags used to communicate between different components
self.__state = mark.silent
self.__inlocked = False
self.__outlocked = False
self.__last_added_endpoint = False
self.__firstPut = 0.0
self.__lastPut = 0.0
self.__firstGet = 0.0
self.__lastGet = 0.0
self.__lastID = (-1,-1)
self.__time_stamp = time.time()
# Password to access this PIPE
self.__password = random.randint(0,100)
# Class backs functions
self.__callbacks = []
def state_is_(self,*m) -> bool:
return self.__state in m
def __shift_state_to_(self,m):
assert m in mark
self.__state = m
self.__time_stamp = time.time()
@property
def state(self):
return self.__state
@property
def timestamp(self):
return self.__time_stamp
#############
# Lock input or output port
#############
def is_inlocked(self)->bool:
return self.__inlocked
def is_outlocked(self)->bool:
return self.__outlocked
def lock_in(self)->int:
'''
Lock this input of PIPE.
'''
if self.is_inlocked():
return None
self.__inlocked = True
return self.__password
def lock_out(self)->int:
'''
Lock this output of PIPE.
'''
if self.is_outlocked():
return None
self.__outlocked = True
return self.__password
def release_in(self,password):
if self.is_inlocked:
if password == self.__password:
self.__inlocked = False
else:
print_(f"{self.name}: Wrong password to release input port!")
def release_out(self,password):
if self.is_outlocked:
if password == self.__password:
self.__outlocked = False
else:
print_(f"{self.name}: Wrong password to release output port!")
#############
# Some operations
#############
def clear(self):
assert not self.state_is_(mark.active), f"{self.name}: Can not clear a active PIPE."
# Clear
#size = self.size()
#for i in range(size):
# self.__cache.get()
self.__cache.queue.clear()
def reset(self):
'''
Do:
1. clear data,
2. reset state to silent,
3. reset endpoint and time information.
Do not:
1. reset input lock and output lock flags.
2. reset the callbacks.
'''
if self.state_is_(mark.silent):
return None
assert not (self.state_is_(mark.active) or self.state_is_(mark.stranded)), \
f"{self.name}: Can not reset a active or stranded PIPE."
# Clear cache
self.clear()
# Reset state
self.__shift_state_to_(mark.silent)
# A flag to remove continue ENDPOINT or head ENDPOINT
self.__last_added_endpoint = False
# flags to report time points
self.__firstPut = 0.0
self.__lastPut = 0.0
self.__firstGet = 0.0
self.__lastGet = 0.0
def activate(self):
'''
State: silent -> active
'''
if not self.state_is_(mark.active):
assert self.state_is_(mark.silent,mark.stranded)
self.__shift_state_to_(mark.active)
def kill(self):
'''
Kill this PIPE with state: wrong.
'''
if not self.state_is_(mark.wrong):
assert self.state_is_(mark.active) or self.state_is_(mark.stranded)
self.__shift_state_to_(mark.wrong)
def stop(self):
'''
Stop this PIPE state with: terminated.
'''
if not self.state_is_(mark.terminated):
assert self.state_is_(mark.active) or self.state_is_(mark.stranded)
# Append a endpoint flag
if not self.__last_added_endpoint:
self.__cache.put( Endpoint(cid=self.__lastID[0]+1,idmaker=self.__lastID[1]) )
self.__cacheSize += 1
self.__last_added_endpoint = True
# Shift state
self.__shift_state_to_(mark.terminated)
def pause(self):
if not self.state_is_(mark.stranded):
assert self.state_is_(mark.active), f"{self.name}: Can only pause active PIPE."
self.__shift_state_to_(mark.stranded)
def size(self):
'''
Get the size.
'''
return self.__cacheSize
def is_empty(self)->bool:
'''
If there is no any data in PIPE, return True.
'''
return self.__cacheSize == 0
def get(self,password=None,timeout=info.TIMEOUT)->Packet:
'''
Pop a packet from head.
Can get packet from: active, wrong, terminated PIPE.
Can not get packet from: silent and stranded PIPE.
'''
if self.state_is_(mark.silent,mark.stranded):
print_( f"Warning, {self.name}: Failed to get packet in PIPE. PIPE state is or silent or stranded." )
return False
#print( "arrived here 1" )
assert not (self.state_is_(mark.silent) or self.state_is_(mark.stranded)), \
f"{self.name}: Can not get packet from silent or stranded PIPE."
# If PIPE is active and output port is locked
if self.state_is_(mark.active) and self.is_outlocked():
if password is None:
raise Exception(f"{self.name}: Output of PIPE is clocked. Unlock or give the password to access it.")
elif password != self.__password:
raise Exception(f"{self.name}: Wrong password to access the PIPE.")
#print( "arrived here 2:",self.__cache.qsize() )
packet = self.__cache.get(timeout=timeout)
# Record time stamp
if self.__firstGet == 0.0:
self.__firstGet = datetime.datetime.now()
self.__lastGet = datetime.datetime.now()
#print( "arrived here 3" )
# Return
self.__cacheSize -= 1
return packet
def put(self,packet,password=None):
'''
Push a new packet to tail.
Note that: we will remove the continuous Endpoint.
Can put packet to: silent, alive.
Can not put packet to: wrong, terminated and stranded PIPE.
If this is a silent PIPE, activate it automatically.
'''
if self.state_is_(mark.wrong,mark.terminated,mark.stranded):
print_( f"{self.name}: Failed to put packet in PIPE. PIPE state is not active or silent." )
return False
# If input port is locked
if self.is_inlocked():
if password is None:
raise Exception(f"{self.name}: Input of PIPE is clocked. Unlock or give the password to access it.")
elif password != self.__password:
raise Exception(f"{self.name}: Wrong password to access the PIPE.")
if self.state_is_(mark.silent):
self.__shift_state_to_(mark.active)
assert isinstance(packet,Packet), f"{self.name}: Only Packet can be appended in PIPE."
# record time stamp
if self.__firstPut == 0.0:
self.__firstPut = datetime.datetime.now()
self.__lastPut = datetime.datetime.now()
# remove endpoint continuous flags and call back
if is_endpoint(packet):
if not self.__last_added_endpoint:
self.__cache.put(packet)
self.__last_added_endpoint = True
self.__cacheSize += 1
self.__lastID = (packet.cid,packet.idmaker)
elif not packet.is_empty():
print_("Warning: An endpoint Packet has been discarded, even though it is not empty.")
else:
self.__cache.put(packet)
self.__cacheSize += 1
self.__last_added_endpoint = False
self.__lastID = (packet.cid,packet.idmaker)
for func in self.__callbacks:
func(packet)
return True
def to_list(self,mapFunc=None)->list:
'''
Convert PIPE to lists divided by Endpoint.
Only terminated and wrong PIPE can be converted.
'''
assert self.state_is_(mark.terminated) or self.state_is_(mark.wrong), \
f"{self.name}: Only terminated or wrong PIPE can be converted to list."
# Check map function
if mapFunc is None:
mapFunc = lambda x:x[x.mainKey]
else:
assert callable(mapFunc)
size = self.size()
result = []
partial = []
for i in range(size):
packet = self.__cache.get()
if is_endpoint(packet):
if not packet.is_empty():
partial.append( mapFunc(packet) )
if len(partial) > 0:
result.append( partial )
partial = []
elif not packet.is_empty():
partial.append( mapFunc(packet) )
if len(partial)>0:
result.append( partial )
return result[0] if len(result) == 1 else result
def report_time(self):
'''
Report time information.
'''
keys = ["name",]
values = [self.name,]
for name in ["firstPut","lastPut","firstGet","lastGet"]:
value = getattr(self, f"_{type(self).__name__}__{name}")
if value != 0.0:
keys.append(name)
values.append(value)
return namedtuple("TimeReport",keys)(*values)
def callback(self,func):
'''
Add a callback function executing when a new packet is appended in PIPE.
If _func_ is None, clear callback functions.
'''
assert self.state_is_(mark.silent)
if func is None:
self.__callbacks.clear()
else:
assert callable(func)
self.__callbacks.append( func )
class NullPIPE(PIPE):
def __init__(self,name=None):
super().__init__(name=name)
def clear(self):
return None
def reset(self):
if self.state_is_(mark.silent):
return None
assert not (self.state_is_(mark.active) or self.state_is_(mark.stranded)), \
f"{self.name}: Can not reset a active or stranded PIPE."
# Reset state
self.__shift_state_to_(mark.silent)
# A flag to remove continue ENDPOINT or head ENDPOINT
def size(self):
return 0
def is_empty(self)->bool:
return True
def get(self,password=None,timeout=info.TIMEOUT)->Packet:
raise Exception("Null PIPE can not return packet.")
def put(self,packet,password=None):
raise Exception("Null PIPE can not storage packet.")
def to_list(self,mapFunc=None)->list:
raise Exception("Null PIPE can not convert to list.")
def report_time(self):
raise Exception("Null PIPE can not report time info.")
def callback(self,func):
raise Exception("Null PIPE can not add callback functions.")
def is_nullpipe(pipe):
'''
If this is Endpoint, return True.
'''
return isinstance(pipe,NullPIPE)
class Component(ExKaldiRTBase):
'''
Components are used to process Packets.
Components can only link to one input PIPE and has one output PIPE.
'''
def __init__(self,oKey="data",name=None):
# Initial state and name
super().__init__(name=name)
# Define input and output PIPE
# Input PIPE need to be linked
self.__inPIPE = None
self.__inPassword = None
self.__outPIPE = PIPE(name=f"The output PIPE of "+self.name)
self.__outPassword = self.__outPIPE.lock_in() # Lock the in-port of output PIPE
# Each component has a core process to run a function to handle packets.
self.__coreThread = None
# If need to redirect the input PIPE
# We will stop the core process firstly and then link a new input PIPE and restart core process.
self.__redirect_flag = False
# process over flag
self.__core_thread_over = False
# The key
if isinstance(oKey,str):
self.__oKey = (oKey,)
else:
assert isinstance(oKey,(tuple,list))
for i in oKey:
assert isinstance(i,str)
self.__oKey = tuple(oKey)
self.__iKey = None
@property
def iKey(self):
return self.__iKey
@property
def oKey(self):
return self.__oKey
def reset(self):
'''
Clear and reset Component.
'''
if self.coreThread is None:
return None
elif self.coreThread.is_alive():
raise Exception(f"{self.name}: Component is active and can not reset. Please stop it firstly.")
else:
self.__coreThread = None
self.__outPIPE.reset()
if not self.__inPIPE.state_is_(mark.silent):
self.__inPIPE.reset()
self.__core_thread_over = False
self.__redirect_flag = False
@property
def coreThread(self)->threading.Thread:
'''
Get the core process.
'''
return self.__coreThread
@property
def inPIPE(self)->PIPE:
return self.__inPIPE
@property
def outPIPE(self)->PIPE:
return self.__outPIPE
def link(self,inPIPE:PIPE,iKey=None):
assert isinstance(inPIPE,PIPE)
if iKey is not None:
assert isinstance(iKey,str)
self.__iKey = iKey
# Release
if self.coreThread is not None:
assert not self.coreThread.is_alive(), f"{self.name}: Can not redirect a new input PIPE when the component is running."
if inPIPE == inPIPE:
return None
# Release the original input PIPE
self.__inPIPE.release_out(password=self.__inPassword)
#
assert not inPIPE.is_outlocked(), "The output port of PIPE has already been locked. Please release it firstly."
# Lock out port of this input PIPE
self.__inPIPE = inPIPE
self.__inPassword = inPIPE.lock_out() # Lock the output port of PIPE
def start(self,inPIPE:PIPE=None,iKey=None):
'''
Start running a process to handle Packets in inPIPE.
'''
# If this is a silent component
if self.coreThread is None:
if inPIPE is None:
if self.__inPIPE is None:
raise Exception(f"{self.name}: Please give the input PIPE.")
else:
# If input PIPE has been linked
inPIPE = self.__inPIPE
else:
# Link (or redirect) the input PIPE
self.link( inPIPE,iKey )
# Activate the output PIPE
self.__outPIPE.activate()
# Try to activate input PIPE
if inPIPE.state_is_(mark.silent):
inPIPE.activate()
# Run core process
self.__coreThread = self._create_thread(func=self.__core_thread_loop_wrapper)
# If this is not silent component
elif self.coreThread.is_alive():
# If this component is stranded
if self.__outPIPE.state_is_(mark.stranded):
## If do not need to redirect
if inPIPE is None or inPIPE.objid == self.__inPIPE.objid:
self.__inPIPE.activate()
self.__outPIPE.activate()
## If need to redirect input PIPE
else:
# Close the core process
self.__redirect_flag = True
self.wait()
self.__redirect_flag = False
# Link the new input PIPE
self.link(inPIPE,iKey)
# Activate
self.__outPIPE.activate()
# Run core process
if inPIPE.state_is_(mark.silent):
inPIPE.activate()
# Run core process
self.__coreThread = self._create_thread(func=self.__core_thread_loop_wrapper)
else:
raise Exception(f"{self.name}: Can only start a silent or restart a stranded Component.")
def _create_thread(self,func):
coreThread = threading.Thread(target=func)
coreThread.setDaemon(True)
coreThread.start()
return coreThread
def decide_state(self):
assert (not self.inPIPE.state_is_(mark.silent)) and (not self.inPIPE.state_is_(mark.silent)), \
"Can not decide state because input PIPE or outPIPE have not been activated."
#print("Start to decide....")
# If input and output PIPE have the same state
if self.inPIPE.state == self.outPIPE.state:
#print("Debug: 1")
return mark.inPIPE, self.inPIPE.state
# firstly check whether there is wrong state
# if there is, terminate input and output PIPE instantly
if self.inPIPE.state_is_(mark.wrong):
if not self.outPIPE.state_is_(mark.terminated):
self.outPIPE.kill()
#print("Debug: 2")
return mark.inPIPE, mark.wrong
elif self.outPIPE.state_is_(mark.wrong):
if not self.inPIPE.state_is_(mark.terminated):
self.inPIPE.kill()
#print("Debug: 3")
return mark.outPIPE, mark.wrong
else:
# if output PIPE is terminated
# also terminate input PIPE instantly
if self.outPIPE.state_is_(mark.terminated):
self.inPIPE.stop()
#print("Debug: 4")
return mark.outPIPE, mark.terminated
else:
# in state might be: active, terminated, stranded
# out state might be: active, stranded
# and they does not have the same state
if self.inPIPE.state_is_(mark.active):
# the output state must be stranded
if self.inPIPE.timestamp > self.outPIPE.timestamp:
self.outPIPE.activate()
#print("Debug: 5")
return mark.inPIPE, mark.active
else:
self.inPIPE.pause()
#print("Debug: 6")
return mark.outPIPE, mark.stranded
elif self.inPIPE.state_is_(mark.terminated):
if self.outPIPE.state_is_(mark.active):
#print("Debug: 7")
return mark.inPIPE, mark.terminated
else:
#print("Debug: 8")
return mark.outPIPE, mark.stranded
else:
# the output state must be active
if self.inPIPE.timestamp > self.outPIPE.timestamp:
self.outPIPE.pause()
#print("Debug: 9")
return mark.inPIPE, mark.stranded
else:
self.inPIPE.activate()
#print("Debug: 10")
return mark.outPIPE, mark.active
def decide_action(self):
'''
A stanerd function to decide the behavior to get packet from inPIPE according to state of inPIPE and outPIPE.
This funtion will return:
1 True -> It ok to get a packet from inPIPE.
2 False -> Can not get new packet for error or other reasons.
3 None -> No packet is avaliable.
This function will change state of inPIPE and outPIPE.
'''
timecost = 0
while True:
master, state = self.decide_state()
#print( master, state )
if state == mark.active:
if self.inPIPE.is_empty():
time.sleep(info.TIMESCALE)
timecost += info.TIMESCALE
if timecost > info.TIMEOUT:
print(f"{self.name}: Timeout!")
self.inPIPE.kill()
self.outPIPE.kill()
return False
else:
continue
else:
return True
elif state == mark.wrong:
return False
elif state == mark.stranded:
time.sleep( info.TIMESCALE )
continue
elif state == mark.terminated:
if master == mark.outPIPE:
return False
else:
if self.inPIPE.is_empty():
return None
else:
return True
def core_loop(self):
raise Exception(f"{self.name}: Please implement the core_loop function.")
def __core_thread_loop_wrapper(self):
self.__core_thread_over = False
print_(f"{self.name}: Start...")
try:
self.core_loop()
except Exception as e:
if not self.inPIPE.state_is_(mark.wrong,mark.terminated):
self.inPIPE.kill()
if not self.outPIPE.state_is_(mark.wrong,mark.terminated):
self.outPIPE.kill()
raise e
else:
if not self.outPIPE.state_is_(mark.wrong,mark.terminated):
self.inPIPE.stop()
if not self.outPIPE.state_is_(mark.wrong,mark.terminated):
self.outPIPE.stop()
finally:
print_(f"{self.name}: Stop!")
self.__core_thread_over = True
def stop(self):
'''
Terminate this component normally.
Note that we do not terminate the core process by this function.
We hope the core process can be terminated with a mild way.
'''
# Stop input PIPE
assert self.__inPIPE is not None
self.__inPIPE.stop()
def kill(self):
'''
Terminate this component with state: wrong.
It means errors occurred somewhere.
Note that we do not kill the core thread by this function.
We hope the core thread can be terminated with a mild way.
'''
# Kill input PIPE
assert self.__inPIPE is not None
self.__inPIPE.kill()
def pause(self):
'''
Pause the Componnent
'''
# Kill input PIPE
assert self.__inPIPE is not None
self.__inPIPE.pause()
def wait(self):
'''
Wait until the core thread is finished
'''
if self.__coreThread is None:
raise Exception(f"{self.name}: Component has not been started.")
else:
self.__coreThread.join()
#while not self.__core_thread_over:
# time.sleep(info.TIMESCALE)
#self.__coreThread.terminate()
#self.__coreThread.join()
def get_packet(self):
'''
Get packet from input PIPE
'''
assert self.__inPIPE is not None
return self.__inPIPE.get(password=self.__inPassword)
def put_packet(self,packet):
self.__outPIPE.put(packet,password=self.__outPassword)
class Chain(ExKaldiRTBase):
'''
Chain is a container to easily manage the sequential Component-PIPEs.
'''
def __init__(self,name=None):
# Initial state and name
super().__init__(name=name)
# A container to hold components
self.__chain = []
self.__inPIPE_Pool = []
self.__outPIPE_Pool = []
# Component name -> Index
self.__name2id = {}
self.__id = 0
def add(self,node,inPIPE=None,iKey=None):
'''
Add a new component or joint into chain.
'''
# Verify chain's state
# Only allow adding new node to a silent chain.
for pipe in self.__outPIPE_Pool:
assert pipe.state_is_(mark.silent), f"{self.name}: Chain has been activated. Can not add new components."
assert isinstance(node,(Component,Joint))
# 1. if input PIPE is not specified
if inPIPE is None:
# 1.1. if node is a component
if isinstance(node,Component):
# 1.1.1 if this component has already been linked to an input PIPE
if node.inPIPE is not None:
# 1.1.1.1 if the input PIPE is one of PIPEs in outPIPE pool,
# remove the cache and does need to take a backup of this inPIPE
if node.inPIPE in self.__outPIPE_Pool:
self.__outPIPE_Pool.remove( node.inPIPE )
# 1.1.1.2 if the input PIPE is an external PIPE ( not included in the chain ),
# we need to add this input PIPE in Pool to take a backup of this PIPE
else:
self.__inPIPE_Pool.append( node.inPIPE )
# storage output PIPE to Pool
self.__outPIPE_Pool.append( node.outPIPE )
# 1.1.2 if the input PIPE is not been linked. We will try to link automatically.
else:
# 1.1.2.1 if output PIPE pool is empty
if len(self.__outPIPE_Pool) == 0:
raise Exception(f"No pipe is avaliable in poll. We expect this component should be linked an input PIPE in advance: {node.name}.")
# 1.1.2.2 if output PIPE pool is not empty
else:
assert len(self.__outPIPE_Pool) == 1, \
f"More than one output port was found in chain input pool. Please specify the input PIPE of this component: {node.name}."
node.link( self.__outPIPE_Pool[0], iKey=iKey )
# take a backup
self.__outPIPE_Pool[0] = node.outPIPE
# 1.2 if node is a joint
else:
# 1.2.1 if the input PIPE has already been linked
if node.inNums > 0:
# for pipe existed in outPIPE Pool, remove it
# or take a backup
for pipe in node.inPIPE:
if pipe in self.__outPIPE_Pool:
self.__outPIPE_Pool.remove( pipe )
else:
self.__inPIPE_Pool.append( pipe )
for pipe in node.outPIPE:
self.__outPIPE_Pool.append( pipe )
else:
if len(self.__outPIPE_Pool) == 0:
raise Exception(f"We expect this component should be linked an input PIPE in advance: {node.name}.")
else:
node.link( self.__outPIPE_Pool )
self.__outPIPE_Pool = list( node.outPIPE )
# 2. if the input PIPE is specified
else:
# 2.1 if node is component
if isinstance(node,Component):
# 2.1.1 if the input PIPE is one of PIPEs in outPIPE pool,
# remove the cache and does need to take a backup of this inPIPE
# if the node has already been linked
assert isinstance(inPIPE, PIPE)
if node.inPIPE is not None:
if node.inPIPE != inPIPE:
print_( f"Warning: Component {node.name} has already been linked to another PIPE. We will try to redirect it." )
if inPIPE in self.__outPIPE_Pool:
self.__outPIPE_Pool.remove( inPIPE )
# 2.1.2 if the input PIPE is an external PIPE ( not included in the chain ),
# we need to add this input PIPE in Pool to take a backup of this PIPE
else:
self.__inPIPE_Pool.append( inPIPE )
# link input PIPE
node.link( inPIPE, iKey=iKey )
# storage output PIPE to Pool
self.__outPIPE_Pool.append( node.outPIPE )
# Joint
else:
assert isinstance(inPIPE,(tuple,list))
inPIPE = list(set(inPIPE))
if node.inNums > 0:
print_( f"Warning: Joint {node.name} has already been linked to another PIPE. We will try to redirect it." )
for pipe in inPIPE:
assert isinstance(pipe, PIPE)
if pipe in self.__outPIPE_Pool:
self.__outPIPE_Pool.remove( pipe )
else:
self.__inPIPE_Pool.append( pipe )
node.link( inPIPE )
# storage output pipes
self.__outPIPE_Pool.extend( node.outPIPE )
# Remove repeated inPIPE and outPIPE
self.__inPIPE_Pool = list(set(self.__inPIPE_Pool))
self.__outPIPE_Pool = list(set(self.__outPIPE_Pool))
# Storage and numbering this node
self.__chain.append( node )
self.__name2id[ node.name ] = (node.basename,self.__id)
self.__id += 1
def get_node(self,name=None,ID=None):
'''
Get the component or joint by calling its name.
Args:
_name_: the name of Component.
_ID_: the index number of Component.
'''
assert not (name is None and ID is None), f"{self.name}: Both <name> and <ID> are None."
if name is not None:
assert ID is None
if name in self.__name2id.keys():
ID = self.__name2id[name]
return self.__chain[ID]
else:
for basename,ID in self.__name2ids():
if basename == name:
return self.__chain[ID]
raise Exception(f"{self.name}: No such Node: {name}")
else:
assert isinstance(ID,int)
return self.__chain[ID]
def start(self):
#
assert len(self.__chain) > 0
assert len(self.__inPIPE_Pool) > 0
for pipe in self.__outPIPE_Pool:
assert pipe.state_is_(mark.silent,mark.stranded)
# Run all components and joints
for node in self.__chain:
node.start()
def stop(self):
assert len(self.__chain) > 0
# Stop
for pipe in self.__inPIPE_Pool:
#print("here 1",pipe.state)
pipe.stop()
#if pipe.state_is_(mark.active,mark.stranded):
# print("here 2")
# pipe.stop()
def kill(self):
assert len(self.__chain) > 0
# Stop
for node in self.__chain:
node.kill()
def pause(self):
assert len(self.__chain) > 0
# Stop
for pipe in self.__inPIPE_Pool:
if pipe.state_is_(mark.active):
pipe.pause()
def wait(self):
assert len(self.__chain) > 0
for node in self.__chain:
node.wait()
@property
def inPIPE(self):
return self.__inPIPE_Pool[0] if len(self.__inPIPE_Pool) == 1 else self.__inPIPE_Pool
@property
def outPIPE(self):
return self.__outPIPE_Pool[0] if len(self.__outPIPE_Pool) == 1 else self.__outPIPE_Pool
def reset(self):
'''
Reset the Chain.
We will reset all components in it.
'''
for pipe in self.__outPIPE_Pool:
assert not pipe.state_is_(mark.active,mark.stranded)
# Reset all nodes
for node in self.__chain:
node.reset()
############################################
# Joint is designed to merge or seperate pipeline
# so that the chain can execute multiple tasks
############################################
class Joint(ExKaldiRTBase):
'''
Joint are used to process Packets.
Joints can link to multiple input PIPEs and output PIPEs.
'''
def __init__(self,jointFunc,outNums=1,name=None):
# Initial state and name
super().__init__(name=name)
# Define input and output PIPE
# Input PIPE need to be linked
self.__inPIPE_Pool = []
self.__inPassword_Pool = []
self.__outPIPE_Pool = []
self.__outPassword_Pool = []
assert isinstance(outNums,int) and outNums > 0
self.__inNums = 0
self.__outNums = outNums
for i in range(outNums):
self.__outPIPE_Pool.append( PIPE( name=f"{i}th output PIPE of "+self.basename ) )
self.__outPassword_Pool.append( self.__outPIPE_Pool[i].lock_in() ) # Lock the in-port of output PIPE
# Each joint has a core process to run a function to handle packets.
self.__coreThread = None
# If need to redirect the input PIPE
# We will stop the core process firstly and then link a new input PIPE and restart core process.
self.__redirect_flag = False
# process over flag. used to terminate core process forcely
self.__core_thread_over = False
# define a joint function
assert callable(jointFunc)
self.__joint_function = jointFunc
@property
def inNums(self):
return self.__inNums
@property
def outNums(self):
return self.__outNums
def reset(self):
'''
Clear and reset joint.
'''
if self.coreThread is None:
return None
elif self.coreThread.is_alive():
raise Exception(f"{self.name}: Component is active and can not reset. Please stop it firstly.")
else:
self.__coreThread = None
for pipe in self.__outPIPE_Pool:
pipe.reset()
self.__core_thread_over = False
@property
def coreThread(self)->threading.Thread:
'''
Get the core process.
'''
return self.__coreThread
@property
def inPIPE(self)->list:
return tuple(self.__inPIPE_Pool)
@property
def outPIPE(self)->list:
return tuple(self.__outPIPE_Pool)
def link(self,inPIPE):
'''
Add a new inPIPE into input PIPE pool.
Or replace the input PIPE pool with a list of PIPEs.
'''
if self.coreThread is not None:
assert not self.coreThread.is_alive(), f"{self.name}: Can not redirect a new input PIPE when the joint is running."
# 1. If this is a list/tuple of PIPEs
if isinstance(inPIPE, (list,tuple)):
assert len(set(inPIPE)) == len(inPIPE)
# 1.1 release the input PIPEs in Pool
for i in range(self.__inNums):
self.__inPIPE_Pool[i].release_out(password=self.__inPassword_Pool[i])
# 1.2 storage new PIPEs
self.__inPassword_Pool = []
for pipe in inPIPE:
assert isinstance(pipe, PIPE)
self.__inPassword_Pool.append( pipe.lock_out() )
self.__inPIPE_Pool = inPIPE
self.__inNums = len(inPIPE)
else:
assert isinstance(inPIPE, PIPE)
assert not inPIPE.is_outlocked(), "The output port of PIPE has already been locked. Please release it firstly."
password = inPIPE.lock_out()
assert password is not None
self.__inPIPE_Pool.append( inPIPE )
self.__inPassword_Pool.append( password )
self.__inNums += 1
def start(self,inPIPE=None):
'''
Start running a process to handle Packets in inPIPE.
'''
# 1. If this is a silent joint
if self.coreThread is None:
if inPIPE is None:
assert self.__inNums > 0, f"{self.name}: No input PIPEs avaliable."
else:
# Link (or redirect) the input PIPE
self.link( inPIPE )
# Activate the output PIPE
for pipe in self.__outPIPE_Pool:
pipe.activate()
# Run core process
for pipe in self.__inPIPE_Pool:
if pipe.state_is_(mark.silent):
pipe.activate()
# Run core process
self.__coreThread = self._create_thread(self.__core_thread_loop_wrapper)
# 2. If this is not silent component
elif self.coreThread.is_alive():
# 2.1 If this joint is stranded
if self.__outPIPE_Pool[0].state_is_(mark.stranded):
## Check whether it is necessary to redirect
needRedirect = False
if inPIPE is not None:
if isinstance(inPIPE,PIPE):
inPIPE = [inPIPE,]
else:
assert isinstance(inPIPE,(list,tuple))
inPIPE = list(set(inPIPE))
if len(inPIPE) != self.__inNums:
needRedirect = True
else:
inObjIDs = [ pipe.objid for pipe in self.__inPIPE_Pool ]
for pipe in inPIPE:
if pipe.objid not in inObjIDs:
needRedirect = True
break
##
if needRedirect is False:
for pipe in self.__inPIPE_Pool + self.__outPIPE_Pool:
pipe.activate()
## If need to redirect input PIPE
else:
# Close the core process
self.__redirect_flag = True
self.wait()
self.__redirect_flag = False
# Link the new input PIPE
self.link( inPIPE )
# Activate
for pipe in self.__outPIPE_Pool:
pipe.activate()
# Activate
for pipe in self.__inPIPE_Pool:
if pipe.state_is_(mark.silent):
pipe.activate()
# Run core process
self.__coreThread = self._create_thread(self.__core_thread_loop_wrapper)
else:
raise Exception(f"{self.name}: Can only start a silent or restart a stranded Component.")
def _create_thread(self,func):
coreThread = threading.Thread(target=func)
coreThread.setDaemon(True)
coreThread.start()
return coreThread
def decide_state(self):
# Check whether there is silent PIPE
states = set()
for pipe in self.__inPIPE_Pool + self.__outPIPE_Pool:
states.add( pipe.state )
assert mark.silent not in states, "Can not decide state because input PIPE or outPIPE have not been activated."
# If all PIPEs are the same state
if len(states) == 1:
return None, states.pop()
# firstly check whether there is wrong state
# if there is, terminate all input and output PIPEs instantly
# in state might be: active, wrong, terminated, stranded
# out state might be: active, wrong, terminated, stranded
if mark.wrong in states:
for pipe in self.__inPIPE_Pool + self.__outPIPE_Pool:
if not pipe.state_is_(mark.wrong,mark.terminated):
pipe.kill()
return None, mark.wrong
else:
# collect state flags
inStates = [ pipe.state for pipe in self.__inPIPE_Pool ]
outStates = [ pipe.state for pipe in self.__outPIPE_Pool ]
# in state might be: active, terminated, stranded
# out state might be: active, terminated, stranded
# if output PIPEs has "terminated"
if mark.terminated in outStates:
for pipe in self.__inPIPE_Pool + self.__outPIPE_Pool:
if not pipe.state_is_(mark.terminated):
pipe.stop()
return mark.outPIPE, mark.terminated
# in state might be: active, terminated, stranded
# out state might be: active, stranded
else:
# firstly, compare the lastest active flag and stranded flag
strandedStamps = []
activeStamps = []
for pipe in self.__inPIPE_Pool + self.__outPIPE_Pool:
if pipe.state_is_(mark.stranded):
strandedStamps.append( pipe.state )
elif pipe.state_is_(mark.active):
activeStamps.append( pipe.state )
# if no stranded flag existed
if len(strandedStamps) == 0:
# if terminated in in PIPEs
if mark.terminated in inStates:
return mark.inPIPE, mark.terminated
# if all flags are active
else:
return None, mark.active
# if no active flag existed
elif len(activeStamps) == 0:
return None, mark.stranded
# if active and stranded flag existed at the same time
else:
# if stranded flag is later than active flag
if max(strandedStamps) > max(activeStamps):
for pipe in self.__inPIPE_Pool + self.__outPIPE_Pool:
if pipe.state_is_(mark.active):
pipe.pause()
return None, mark.stranded
# if active flag is later than stranded flag
else:
for pipe in self.__inPIPE_Pool + self.__outPIPE_Pool:
if pipe.state_is_(mark.stranded):
pipe.activate()
if mark.terminated in inStates:
return mark.inPIPE, mark.terminated
else:
return None, mark.active
def __core_thread_loop_wrapper(self):
self.__core_thread_over = False
print_(f"{self.name}: Start...")
try:
self.core_loop()
except Exception as e:
for pipe in self.__inPIPE_Pool + self.__outPIPE_Pool:
if not pipe.state_is_(mark.wrong,mark.terminated):
pipe.kill()
raise e
else:
for pipe in self.__inPIPE_Pool + self.__outPIPE_Pool:
if not pipe.state_is_(mark.wrong,mark.terminated):
pipe.stop()
finally:
print_(f"{self.name}: Stop!")
self.__core_thread_over = True
def core_loop(self):
timecost = 0
idmaker = None
buffer = [ None for i in range(self.__inNums) ]
while True:
###########################################
# Decide state
############################################
master,state = self.decide_state()
###########################################
# Decide whether it need to get packet or terminate
############################################
if state == mark.active:
# If joint is active, skip to picking step
pass
elif state == mark.wrong:
# If joint is wrong, break loop and terminate
break
elif state == mark.stranded:
# If joint is stranded, wait (or terminate)
time.sleep( info.TIMESCALE )
if self.__redirect_flag == True:
break
continue
else:
# if joint is terminated
## if outPIPE is terminated, break loop and terminated
if master == mark.outPIPE:
break
## get packet
else:
## If packets are exhausted in (at least) one PIPE, stop joint and terminated
over = False
for pipe in self.__inPIPE_Pool:
if pipe.state_is_(mark.terminated) and pipe.is_empty():
for pipe in self.__inPIPE_Pool + self.__outPIPE_Pool:
pipe.stop()
over = True
break
if over:
break
## else continue to pick packets
pass # -> pick packets
###########################################
# Picking step
############################################
# fill input buffer with packets
for i, buf in enumerate(buffer):
if buf is None:
if self.__inPIPE_Pool[i].is_empty():
## skip one time
continue
else:
## Get a packet
packet = self.get_packet(i)
## Verify the idmaker
## Only match packets that their chunk IDs are maked by the same idmaker.
if not is_endpoint( packet ):
if idmaker is None:
idmaker = packet.idmaker
else:
assert idmaker == packet.idmaker, "id makers of all input PIPEs do not match."
## storage packet
buffer[i] = packet
# If buffer has not been filled fully
if None in buffer:
time.sleep( info.TIMESCALE )
timecost += info.TIMESCALE
## If timeout, break loop and terminate
if timecost > info.TIMEOUT:
print(f"{self.name}: Timeout!")
for pipe in self.__inPIPE_Pool + self.__outPIPE_Pool:
pipe.kill()
break
## try to fill again
else:
continue
## If buffer has been filled fully
else:
#### Match the chunk id
cids = [ x.cid for x in buffer ]
maxcid = max( cids )
for i,pack in enumerate(buffer):
if pack.cid != maxcid:
buffer[i] = None
##### If chunk ids does not match, only keep the latest packets
##### Remove mismatch packets and try fill again
if None in buffer:
continue
##### If chunk ids matched
else:
### If all packets are empty (Especially when they are the endpoint, the possibility is very high).
numsEndpoint = sum( [ int(is_endpoint(pack)) for pack in buffer ] )
assert numsEndpoint == 0 or numsEndpoint == self.__inNums
numsEmpty = sum( [ int(pack.is_empty()) for pack in buffer ] )
if numsEmpty == self.__inNums:
if is_endpoint(buffer[0]):
for i in range( self.__outNums ):
self.put_packet( i, Endpoint(cid=maxcid,idmaker=idmaker) )
else:
for i in range( self.__outNums ):
self.put_packet( i, Packet(items={},cid=maxcid,idmaker=idmaker) )
else:
###### Do joint operation according to specified rules.
inputs = [ dict(pack.items()) for pack in buffer ]
outputs = self.__joint_function( inputs )
###### Verify results
if isinstance(outputs,dict):
outputs = [ outputs, ]
else:
assert isinstance(outputs,(tuple,list))
for output in outputs:
assert isinstance(output,dict)
assert len(outputs) == self.__outNums
###### Append results into output PIPEs
if is_endpoint(buffer[0]):
for i in range(self.__outNums):
self.put_packet( i, Endpoint( items=outputs[i], cid=maxcid, idmaker=idmaker) )
else:
for i in range(self.__outNums):
self.put_packet( i, Packet( items=outputs[i], cid=maxcid, idmaker=idmaker) )
###### clear buffer and fill again
for i in range(self.__inNums):
buffer[i] = None
continue
def stop(self):
'''
Terminate this component normally.
Note that we do not terminate the core process by this function.
We hope the core process can be terminated with a mild way.
'''
# Stop input PIPE
for pipe in self.__inPIPE_Pool:
pipe.stop()
def kill(self):
'''
Terminate this component with state: wrong.
It means errors occurred somewhere.
Note that we do not kill the core thread by this function.
We hope the core thread can be terminated with a mild way.
'''
# Kill input PIPE
for pipe in self.__inPIPE_Pool:
pipe.kill()
def pause(self):
'''
Pause the Componnent
'''
# Kill input PIPE
for pipe in self.__inPIPE_Pool:
pipe.pause()
def wait(self):
'''
Wait until the core thread is finished.
'''
if self.__coreThread is None:
raise Exception(f"{self.name}: Component has not been started.")
else:
self.__coreThread.join()
#while not self.__core_thread_over:
# time.sleep(info.TIMESCALE)
#self.__coreThread.terminate()
def get_packet(self,inID):
'''
Get packet from input PIPE.
'''
assert len(self.__inPIPE_Pool) > 0
return self.__inPIPE_Pool[inID].get(password=self.__inPassword_Pool[inID])
def put_packet(self,outID,packet):
self.__outPIPE_Pool[outID].put(packet,password=self.__outPassword_Pool[outID])
def dynamic_display(pipe,mapFunc=None):
'''
This is a tool for debug or testing.
'''
assert isinstance(pipe,PIPE), "<pipe> should be a PIPE object."
assert not pipe.is_outlocked(), "The out port of <pipe> is locked. Please release it firstly."
assert not pipe.state_is_(mark.silent), "<pipe> is not activated."
if pipe.state_is_(mark.stranded):
print_( "Warning: the PIPE is stranded!" )
def default_function(pac):
out = []
for key,value in pac.items():
if isinstance(value,np.ndarray):
#temp = " ".join( [ str(v) for v in value[:10] ] )
#out.append( f"{key}: [ {temp} ...] " )
out.append( f"{key}: {value} " )
else:
out.append( f"{key}: {value} " )
out = "\n".join(out)
print_(out)
if mapFunc is None:
mapFunc = default_function
else:
assert callable( mapFunc )
# active, stranded, wrong, terminated
timecost = 0
while True:
if pipe.state_is_(mark.active):
if pipe.is_empty():
time.sleep( info.TIMESCALE )
timecost += info.TIMESCALE
if timecost > info.TIMEOUT:
raise Exception( f"{pipe.name}: Time out!" )
continue
else:
#print( "debug:", pipe.is_outlocked() )
packet = pipe.get()
elif pipe.state_is_(mark.stranded):
time.sleep( info.TIMESCALE )
continue
else:
if pipe.is_empty():
break
else:
#print( "debug:", pipe.is_outlocked() )
packet = pipe.get()
if is_endpoint( packet ):
if not packet.is_empty():
print_()
mapFunc( packet )
print_(f"----- Endpoint -----")
continue
else:
print_()
mapFunc( packet )
lastState = "terminated" if pipe.state_is_(mark.terminated) else "wrong"
print_( f"Final state of this PIPE: {lastState} \n Time report: {pipe.report_time()}" )
def dynamic_run(target,inPIPE=None,items=["data"]):
print_("exkaldirt.base.dynamic_run has been removed from version 1.2.0. See exkaldirt.base.dynamic_display function.")
class ContextManager(ExKaldiRTBase):
'''
Context manager.
'''
def __init__(self,left,right,name=None):
super().__init__(name=name)
assert isinstance(left,int) and left >= 0
assert isinstance(right,int) and right >= 0
self.__left = left
self.__right = right
self.__buffer = None
@property
def left(self):
return self.__left
@property
def right(self):
return self.__right
def __compute_size(self,center):
assert center >= self.__left and center >= self.__right
self.__center = center
if self.__right > 0:
self.__width = self.__left + center + center
else:
self.__width = self.__left + center
self.__tail = self.__left + center + self.__right
def wrap(self, batch):
'''
Storage a batch frames (matrix) and return the new frames wrapped with left and right context.
If right context > 0, we will storage this batch data and return the previous batch data,
and None will be returned at the first step.
'''
assert isinstance(batch,np.ndarray) and len(batch.shape) == 2
assert 0 not in batch.shape
if self.__buffer is None:
frames, dim = batch.shape
self.__compute_size(frames)
self.__buffer = np.zeros([self.__width,dim],dtype=batch.dtype)
if self.__right == 0:
self.__buffer[self.__left:,:] = batch
return self.__buffer.copy()
else:
self.__buffer[-self.__center:,:] = batch
return None
else:
assert len(batch) == self.__center
if self.__right == 0:
self.__buffer[0:self.__left,:] = self.__buffer[ self.__center: ]
self.__buffer[self.__left:,:] = batch
return self.__buffer.copy()
else:
self.__buffer[ 0:-self.__center,:] = self.__buffer[ self.__center:,: ]
self.__buffer[ -self.__center:,:] = batch
return self.__buffer[0:self.__tail,:].copy()
def strip(self,batch):
assert isinstance(batch,np.ndarray) and len(batch.shape) == 2
assert batch.shape[0] == self.__tail
return batch[ self.__left: self.__left + self.__center ]
|
[] |
[] |
[
"KALDI_ROOT"
] |
[]
|
["KALDI_ROOT"]
|
python
| 1 | 0 | |
pypi.wsgi
|
#!/usr/bin/python
import sys
import os
prefix = os.path.dirname(__file__)
sys.path.insert(0, prefix)
import cStringIO
import webui
import store
import config
import re
from functools import partial
store.keep_conn = True
CONFIG_FILE = os.environ.get("PYPI_CONFIG", os.path.join(prefix, 'config.ini'))
class Request:
def __init__(self, environ, start_response):
self.start_response = start_response
try:
length = int(environ.get('CONTENT_LENGTH', 0))
except ValueError:
length = 0
self.rfile = cStringIO.StringIO(environ['wsgi.input'].read(length))
self.wfile = cStringIO.StringIO()
self.config = config.Config(CONFIG_FILE)
self.status = None
self.headers = []
def set_status(self, status):
self.status = status
def send_response(self, code, message='no details available'):
self.status = '%s %s' % (code, message)
self.headers = []
def send_header(self, keyword, value):
self.headers.append((keyword, value))
def set_content_type(self, content_type):
self.send_header('Content-Type', content_type)
def end_headers(self):
self.start_response(self.status, self.headers)
class CacheControlMiddleware(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
def _start_response(status, headers, exc_info=None):
script = environ.get("SCRIPT_NAME", None)
if script in set(["/simple"]):
# Cache for a day in Fastly, but 10 minutes in browsers
headers += [
("Surrogate-Control", "max-age=86400"),
("Cache-Control", "max-age=600, public"),
]
elif script in set(["/packages"]):
if status[:3] in ["200", "304"]:
# Cache for a year
headers += [("Cache-Control", "max-age=31557600, public")]
else:
# Cache for an hour
headers += [("Cache-Control", "max-age=3600, public")]
elif script in set(["/mirrors", "/security"]):
# Cache these for a week
headers += [("Cache-Control", "max-age=604800, public")]
# http://www.gnuterrypratchett.com/
headers += [("X-Clacks-Overhead", "GNU Terry Pratchett")]
return start_response(status, headers, exc_info)
return self.app(environ, _start_response)
class SecurityHeaderMiddleware(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
def _start_response(status, headers, exc_info=None):
headers += [
("X-Frame-Options", "deny"),
("X-XSS-Protection", "1; mode=block"),
("X-Content-Type-Options", "nosniff"),
]
return start_response(status, headers, exc_info)
return self.app(environ, _start_response)
def debug(environ, start_response):
if environ['PATH_INFO'].startswith("/auth") and \
"HTTP_AUTHORIZATION" not in environ:
start_response("401 login",
[('WWW-Authenticate', 'Basic realm="foo"')])
return
start_response("200 ok", [('Content-type', 'text/plain')])
environ = environ.items()
environ.sort()
for k, v in environ:
yield "%s=%s\n" % (k, v)
return
def application(environ, start_response):
if "HTTP_AUTHORIZATION" in environ:
environ["HTTP_CGI_AUTHORIZATION"] = environ["HTTP_AUTHORIZATION"]
try:
r = Request(environ, start_response)
webui.WebUI(r, environ).run()
return [r.wfile.getvalue()]
except Exception, e:
import traceback;traceback.print_exc()
return ['Ooops, there was a problem (%s)' % e]
#application=debug
# Handle Caching at the WSGI layer
application = CacheControlMiddleware(application)
# Add some Security Headers to every response
application = SecurityHeaderMiddleware(application)
# pretend to be like the UWSGI configuration - set SCRIPT_NAME to the first
# part of the PATH_INFO if it's valid and remove that part from the PATH_INFO
def site_fake(app, environ, start_response):
PATH_INFO = environ['PATH_INFO']
m = re.match('^/(pypi|simple|daytime|serversig|mirrors|id|oauth|google_login|'
'security|packages|openid_login|openid_claim)(.*)', PATH_INFO)
if not m:
start_response("404 not found", [('Content-type', 'text/plain')])
return ['Not Found: %s' % PATH_INFO]
environ['SCRIPT_NAME'] = '/' + m.group(1)
environ['PATH_INFO'] = m.group(2)
return app(environ, start_response)
if __name__ == '__main__':
# very simple wsgi server so we can play locally
from wsgiref.simple_server import make_server
httpd = make_server('', 8000, partial(site_fake, application))
print "Serving on port 8000..."
httpd.serve_forever()
|
[] |
[] |
[
"PYPI_CONFIG"
] |
[]
|
["PYPI_CONFIG"]
|
python
| 1 | 0 | |
launcher.py
|
import os
import sys
import subprocess
import tkinter
from tkinter import ttk, N, E, W, S
from tkinter import ttk
from datetime import datetime
ENEMY_TEAM = os.environ.get('ENEMY_TEAM', 'jx:JXTeam')
root = tkinter.Tk()
root.title(u"jx_pong - pyponjp2019 ")
main = tkinter.Frame(root, padx=10, pady=8, bg='#00A6BC')
main.grid()
tkinter.Label(main, text='Entry JX Pong', bg='#00A6BC', fg="white", font=("Helvetica", 20, "bold")).grid(row=0, column=0)
form = tkinter.Frame(main, padx=10, pady=16)
form.grid()
tkinter.Label(form, text='github').grid(row=1, column=0)
github = tkinter.Entry(form)
github.grid(row=1, column=1)
output = tkinter.Label(main, text='\n', font=("Helvetica", 10), bg='#eeeeee')
output.grid(row=3, column=0, sticky=(W, E))
def write_result(proc):
result = proc.stdout.decode("utf8")
now = datetime.now().isoformat()
with open('result.txt', 'a') as f:
f.write(f'{now} {result}')
def on_click_start():
github_name = github.get()
proc = subprocess.run(
['bash', 'challenge.sh', github_name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
output['text'] = proc.stdout.decode("utf8") + proc.stderr.decode("utf8")
write_result(proc)
def on_click_manual():
proc = subprocess.run(
['pongpy', 'pongpy.teams.manual_team:ManualTeam', ENEMY_TEAM],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
output['text'] = proc.stdout.decode("utf8") + proc.stderr.decode("utf8")
write_result(proc)
start_btn = ttk.Button(form, text='Start', command=on_click_start)
start_btn.grid(row=2, column=1, sticky=E)
tkinter.Label(form, text='Manual').grid(row=3, column=0)
manual_btn = ttk.Button(form, text='Start', command=on_click_manual)
manual_btn.grid(row=4, column=1, sticky=E)
root.mainloop()
|
[] |
[] |
[
"ENEMY_TEAM"
] |
[]
|
["ENEMY_TEAM"]
|
python
| 1 | 0 | |
neurovault/celery.py
|
from __future__ import absolute_import
import os
from celery import Celery
from django.conf import settings
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'neurovault.settings')
nvcelery = Celery('neurovault')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
nvcelery.config_from_object('django.conf:settings')
nvcelery.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
from raven.contrib.django.raven_compat.models import client
from raven.contrib.celery import register_signal, register_logger_signal
# register a custom filter to filter out duplicate logs
register_logger_signal(client)
# hook into the Celery error handler
register_signal(client)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
server/web/tests/index.go
|
package tests
import (
"fmt"
"os"
"strings"
"github.com/bwmarrin/snowflake"
"github.com/snowlyg/helper/str"
"github.com/snowlyg/iris-admin/migration"
"github.com/snowlyg/iris-admin/server/cache"
"github.com/snowlyg/iris-admin/server/database"
"github.com/snowlyg/iris-admin/server/web"
"github.com/snowlyg/iris-admin/server/web/web_gin"
"github.com/snowlyg/iris-admin/server/web/web_iris"
"github.com/snowlyg/iris-admin/server/zap_server"
)
func BeforeTestMainGin(party func(wi *web_gin.WebServer), seed func(wi *web_gin.WebServer, mc *migration.MigrationCmd)) (string, *web_gin.WebServer) {
fmt.Println("+++++ before test +++++")
mysqlPwd := os.Getenv("mysqlPwd")
redisPwd := os.Getenv("redisPwd")
if strings.TrimSpace(mysqlPwd) != database.CONFIG.Password {
err := database.Remove()
if err != nil {
zap_server.ZAPLOG.Error(str.Join("删除数据库配置文件失败:", err.Error()))
}
}
if strings.TrimSpace(redisPwd) != cache.CONFIG.Password {
err := cache.Remove()
if err != nil {
zap_server.ZAPLOG.Error(str.Join("删除缓存配置文件失败:", err.Error()))
}
}
node, _ := snowflake.NewNode(1)
uuid := str.Join("gin", "_", node.Generate().String())
fmt.Printf("+++++ %s +++++\n\n", uuid)
web.CONFIG.System.DbType = "mysql"
web.InitWeb()
database.CONFIG.Dbname = uuid
database.CONFIG.Password = strings.TrimSpace(mysqlPwd)
database.CONFIG.LogMode = true
database.InitMysql()
wi := web_gin.Init()
party(wi)
web.StartTest(wi)
mc := migration.New()
// 添加 v1 内置模块数据表和数据
fmt.Println("++++++ add model ++++++")
seed(wi, mc)
err := mc.Migrate()
if err != nil {
fmt.Printf("migrate get error [%s]", err.Error())
return uuid, nil
}
err = mc.Seed()
if err != nil {
fmt.Printf("seed get error [%s]", err.Error())
return uuid, nil
}
return uuid, wi
}
func BeforeTestMainIris(party func(wi *web_iris.WebServer), seed func(wi *web_iris.WebServer, mc *migration.MigrationCmd)) (string, *web_iris.WebServer) {
fmt.Println("+++++ before test +++++")
mysqlPwd := os.Getenv("mysqlPwd")
redisPwd := os.Getenv("redisPwd")
if strings.TrimSpace(mysqlPwd) != database.CONFIG.Password {
err := database.Remove()
if err != nil {
zap_server.ZAPLOG.Error(str.Join("删除数据库配置文件失败:", err.Error()))
}
}
if strings.TrimSpace(redisPwd) != cache.CONFIG.Password {
err := cache.Remove()
if err != nil {
zap_server.ZAPLOG.Error(str.Join("删除缓存配置文件失败:", err.Error()))
}
}
node, _ := snowflake.NewNode(1)
uuid := str.Join("iris", "_", node.Generate().String())
fmt.Printf("+++++ %s +++++\n\n", uuid)
web.CONFIG.System.DbType = "mysql"
web.InitWeb()
database.CONFIG.Dbname = uuid
database.CONFIG.Password = strings.TrimSpace(mysqlPwd)
database.CONFIG.LogMode = true
database.InitMysql()
wi := web_iris.Init()
party(wi)
web.StartTest(wi)
mc := migration.New()
// 添加 v1 内置模块数据表和数据
fmt.Println("++++++ add model ++++++")
seed(wi, mc)
err := mc.Migrate()
if err != nil {
fmt.Printf("migrate get error [%s]", err.Error())
return uuid, nil
}
err = mc.Seed()
if err != nil {
fmt.Printf("seed get error [%s]", err.Error())
return uuid, nil
}
return uuid, wi
}
func AfterTestMain(uuid string, isDelDb bool) {
fmt.Println("++++++++ after test main ++++++++")
if isDelDb {
err := database.DorpDB(database.CONFIG.BaseDsn(), "mysql", uuid)
if err != nil {
text := str.Join("删除数据库 '", uuid, "' 错误: ", err.Error(), "\n")
zap_server.ZAPLOG.Error(text)
}
}
fmt.Println("++++++++ dorp db ++++++++")
db, err := database.Instance().DB()
if err != nil {
zap_server.ZAPLOG.Error(str.Join("获取数据库连接失败:", err.Error()))
}
if db != nil {
db.Close()
}
err = database.Remove()
if err != nil {
zap_server.ZAPLOG.Error(str.Join("删除数据库配置文件失败:", err.Error()))
}
err = web.Remove()
if err != nil {
zap_server.ZAPLOG.Error(str.Join("删除缓存配置文件失败:", err.Error()))
}
err = web.Remove()
if err != nil {
zap_server.ZAPLOG.Error(str.Join("删除web配置文件失败:", err.Error()))
}
}
|
[
"\"mysqlPwd\"",
"\"redisPwd\"",
"\"mysqlPwd\"",
"\"redisPwd\""
] |
[] |
[
"redisPwd",
"mysqlPwd"
] |
[]
|
["redisPwd", "mysqlPwd"]
|
go
| 2 | 0 | |
ephys/ephysanalysis/PSCAnalyzer.py
|
"""
Analyze EPSCs or IPSCs
Or EPSPs and IPSPs...
This module provides the following analyses:
1. Amplitudes from a train
2. Paired pulse facilitation for pulse pairs, and the first pair in a train.
3. Current-voltage relationship in voltage clamp measured over a time window
The results of the analysis are stored in the class variable analysis_summary
Note: if the analyzer is called with update_regions set True, then traces will be
sent to cursor_plot to get start and end times. (this might be broken now - need to test)
"""
import sys
from pathlib import Path
import os #legacy
import scipy.signal
import pandas as pd
import lmfit
from collections import OrderedDict
from cycler import cycler
from itertools import cycle
import numpy as np
from . import acq4read
from . import metaarray as EM # need to use this version for Python 3
from ..tools import cursor_plot as CP
import matplotlib.pyplot as mpl
import matplotlib.colors
import seaborn as sns
import pylibrary.plotting.plothelpers as PH
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui, QtCore
from pyqtgraph.Point import Point
os.environ['QT_MAC_WANTS_LAYER'] = '1'
def make_key(pathname):
"""
Make a key string using the date, slice, cell and protocol from the path name
"""
p = pathname.parts
return(str('~'.join([p[i] for i in range(-4, 0)])))
class PSC_Fitter():
"""
Provide fitting functions for PSCs:
1. decay tau only
2. PSC full fit (1-exp(tau_rise))^4 * exp(tau_fall)
"""
def __init__(self):
pass # nothing to do
def _fcn_tau(self, params, x, data):
"""Model single exponential"""
v = params.valuesdict()
model = v['amp'] * np.exp(-x/v['tau_fall']) + v['DC']
return model - data
def fitTau(self):
# create a set of Parameters
params = lmfit.Parameters()
params.add('amp', value=self.ptreedata.param('Initial Fit Parameters').param('amp').value(), min=-self.dmax, max=self.dmax)
params.add('tau_fall', value=self.ptreedata.param('Initial Fit Parameters').param('taufall').value(), min=1e-4, max=1e-1)
params.add('DC', value=self.ptreedata.param('Initial Fit Parameters').param('DC').value(), min=-1e3, max=1e3)
t0 = self.T0.value()
t1 = self.T1.value()
it0 = int(t0/self.dt)
it1 = int(t1/self.dt)
if it0 > it1:
t = it0
it0 = it1
it1 = t
time_zero = int(self.time_zero/self.dt)
print('timezero: ', time_zero, self.dataX[time_zero])
# do fit, here with the default leastsq algorithm
minner = lmfit.Minimizer(self._fcn_tau, params, fcn_args=(self.dataX[it0:it1]-self.dataX[time_zero], self.dataY[it0:it1]))
self.fitresult = minner.minimize('leastsq')
# calculate final result
final = self.dataY[it0:it1] + self.fitresult.residual
# write error report
lmfit.report_fit(self.fitresult)
def _fcn_EPSC(self, params, x, data):
"""Model EPSC"""
v = params.valuesdict()
model = v['amp'] * (((1. - np.exp(-x/v['tau_rise']))**4.0)*np.exp(-x/v['tau_fall'])) + v['DC']
return model - data
def fitEPSC(self):
# create a set of Parameters
params = lmfit.Parameters()
params.add('amp', value=self.ptreedata.param('Initial Fit Parameters').param('amp').value(), min=-self.dmax, max=self.dmax)
params.add('tau_rise', value=self.ptreedata.param('Initial Fit Parameters').param('taurise').value(), min=1e-4, max=1e-1)
params.add('tau_fall', value=self.ptreedata.param('Initial Fit Parameters').param('taufall').value(), min=1e-4, max=1e-1)
params.add('DC', value=self.ptreedata.param('Initial Fit Parameters').param('DC').value(), min=-1e3, max=1e3)
dc = np.mean(self.dataY[0:10])
params.add('DC', value=dc, min=dc-dc*1, max=dc+dc*1)
t0 = self.T0.value()
t1 = self.T1.value()
it0 = int(t0/self.dt)
it1 = int(t1/self.dt)
if it0 > it1:
t = it0
it0 = it1
it1 = t
# do fit, here with the default leastsq algorithm
time_zero = int(self.time_zero/self.dt)
print('timezero: ', time_zero, self.dataX[time_zero])
print(self.dataX[it0:it1]-self.time_zero)
print(self.dataY[it0:it1])
minner = lmfit.Minimizer(self._fcn_EPSC, params, fcn_args=(self.dataX[it0:it1]-self.dataX[time_zero], self.dataY[it0:it1]))
self.fitresult = minner.minimize(method='least_squares', )
# calculate final result
final = self.dataY[it0:it1] + self.fitresult.residual
# write error report
lmfit.report_fit(self.fitresult)
class PSCAnalyzer():
def __init__(self, datapath, plot=True, update_regions=False):
"""
Analyze PSCs in a few different formats:
IO - a stimulus sequence with increasing stimulation current,
all collected at a single holding voltage
VDEP - a Meausrement of EPSCs across voltage, targeted at obtaining
an NMDA/AMPA current ratio from currents at +50 and -90 mV. Data may include
averaging of repetead trials.
PPF - Paired pulse facilitiation over several intervals; may include repeated
trials
Parameters
----------
datapath : path to the data protocol (Path or string)
plot : boolean (default: True)
Flag to control plotting of the data
update_regions: Boolean (default: False)
A flag that forces the routines to plot data so that a time window for the
analysis can be defined and saved.
"""
self.datapath = datapath
self.AR = acq4read.Acq4Read() # make our own private cersion of the analysis and reader
self.plot = plot
self.db = None
self.db_filename = None
self.update_regions = update_regions
self.JunctionPotential = -8.0 * 1e-3 # junction potential for correction
self.NMDA_voltage = 0.050 # in V positive
self.AMPA_voltage = -0.0741 # in V - this is the Cl eq potential to minize GABA interference
self.NMDA_delay = 0.050 # delay in s to make measurement
def setup(self, clamps=None, spikes=None, baseline=[0, 0.001]):
"""
Set up for the fitting
Parameters
----------
clamps : A datamodel structure (required)
Brings the data to the module. This usually will be a PatchEphys object.
spikes : A spikeAnalysis structure (optional)
Has information about which traces have spikes
Use this when analyzing events that may be contaminated by spikes
baseline : list (2 elements)
times over which baseline is measured (in seconds)
"""
if clamps is None:
raise ValueError("VC analysis requires defined clamps ")
self.Clamps = clamps
self.spikes = spikes
self.set_baseline_times(baseline)
self.analysis_summary = {} # init the result structure
def check_protocol(self, protocol):
"""
Verify that the protocol we are examining is complete.
Returns True or False
"""
return(self.AR.checkProtocol(protocol))
def read_database(self, filename):
"""
Read the database that will be used for analysis
The database is a pandas pickled file with columns
date, protocol, T0 and T1
Parameters
----------
filename : str or Path
The name of the database file (full path or file if in the current
working directory)
"""
self.db_filename = Path(filename)
if self.db_filename.is_file():
with(open(self.db_filename, 'rb')) as fh:
self.db = pd.read_pickle(fh, compression=None)
else:
self.db = pd.DataFrame(columns=['date', 'protocol', 'T0', 'T1'])
def update_database(self):
"""
Write the database
"""
if self.db is not None:
self.db.to_pickle(self.db_filename)
def measure_PSC(self, protocolName, plot=True, savetimes=False, ignore_important_flag=True):
"""
Direct the analysis
Uses the beginning of the protocol name to select which analysis to use
Parameters:
protocolName : str
Name of the protocol to analyze, underneath the datapath
plot : boolean (default: True)
Flag to plot data
"""
dp_s = str(self.datapath)
date, name, cellname, proto, sliceid = self.AR.file_cell_protocol(dp_s)
dk = list(self.AR.getIndex(dp_s).keys())
# if 'important' in dk:
# print(str(Path(date, name, cellname, proto)), self.AR.getIndex(dp_s)['important'])
# else:
# print('No important flag in dk')
# return False
self.AR.setProtocol(self.datapath) # define the protocol path where the data is
self.setup(clamps=self.AR)
self.read_database(f"{protocolName:s}.p")
if self.AR.getData(): # get that data.
print('Protocol important: ', self.AR.protocol_important)
if not self.AR.protocol_important and not ignore_important_flag:
return False
ok = False
if protocolName.startswith('Stim_IO'):
ok = self.analyze_IO()
elif protocolName.startswith('VC-EPSC_3'):
ok = self.analyze_VDEP()
elif protocolName.startswith('PPF'):
print('analyzing ppf')
ok = self.analyze_PPF()
if not ok:
print('Failed on protocol in IV: ', self.datapath, protocolName)
return False
if plot:
self.plot_vciv()
if savetimes:
date = make_key(self.datapath)
if date not in self.db['date'].tolist():
self.db.loc[len(self.db)] = [date, protocolName, self.T0, self.T1]
print('new date added')
else:
self.db.loc[date, 'date'] = date
self.db.loc[date, 'protocol'] = protocolName
self.db.loc[date, 'T0'] = self.T0
self.db.loc[date, 'T1'] = self.T1
print('old date data updated')
self.update_database()
# print('db head: ', self.db.head())
return True
else:
return False
def get_stimtimes(self):
"""
This should get the stimulus times. Right now, it does nothing
"""
pass
def set_baseline_times(self, baseline):
"""
baseline: 2-element list or numpy array
"""
if len(baseline) != 2:
raise ValueError('Baseline must be a 2-element array')
if isinstance(baseline, list):
baseline = np.array(baseline)
self.baseline = np.sort(baseline)
def get_baseline(self):
""" Return the mean values in the data over the baseline region.
"""
bl = self.mean_I_analysis(region=self.baseline, reps=[0])
return bl
def analyze_IO(self, rmpregion=[0., 0.05], twidth=0.05, deadwin=0.001, protocolName=None, device='Stim0'):
"""Analyze in input=output relationship for a specific driving device
"""
pulse_train = self.AR.getStim(device) # get the stimulus information
# stim dict in pulse_train will look like:
# {'start': [0.05, 0.1], 'duration': [0.0001, 0.0001],
# 'amplitude': [0.00025, 0.00025], 'npulses': [2], 'period': [0.05], 'type': ['pulseTrain']}
# try:
devicedata = self.AR.getDeviceData(device=device, devicename='command')
if devicedata is None:
print('No device data? name command, ', device)
return False
filekey = Path(make_key(self.datapath))
# check the db to see if we have parameters already
dfiles = self.db['date'].tolist()
if filekey in dfiles:
delay = self.db.loc[filekey, 'date']['T0']
t1 = self.db.loc[filekey, 'date']['T1']
width = t1-delay
else:
delay = 1.0*1e-3
width = 15.0*1e-3
self.sign = -1
stim_io = self.AR.sequence[(device, 'command.PulseTrain_amplitude')]
reps = self.AR.sequence[('protocol', 'repetitions')]
Stim_IO = np.tile(stim_io, len(reps)) # stimuli in order
self.analysis_summary[f'PSP_IO'] = [[]]*len(pulse_train['start']) # create space for amplitude results, per pulse
self.analysis_summary[f'psc_stim_amplitudes'] = [[]]*len(pulse_train['start']) # create space for amplitude results, per pulse
stimintvl = []
idat = [None]*len(pulse_train['start'])
for i in range(len(idat)): # across each of the pulses in the train
idat[i] = OrderedDict() # storage for data for each stimulus level
pdelay = pulse_train['start'][i] + delay
if i == 0 and self.update_regions: # if self.update_region is set, then use cursor plot to get the regions
rgn = self.set_region([pulse_train['start'][i], pulse_train['start'][i]+twidth], baseline=bl)
else: # normal operation, just use stock values
rgn = [delay, delay+width]
self.T0 = rgn[0] # kind of bogus
self.T1 = rgn[1]
region = np.array(rgn)+pulse_train['start'][i] # get region relative to start of this pulse
for j in range(len(self.AR.traces)): # for all traces
mi = self.AR.trace_index[j] # get index into marked traces then compute the min value minus the baseline
da = np.min(self.Clamps.traces['Time': region[0]:region[1]][j]) - np.mean(self.Clamps.traces['Time': rmpregion[0]:rmpregion[1]][j])
if Stim_IO[mi] not in list(idat[i].keys()):
idat[i][Stim_IO[mi]] = [da]
else:
idat[i][Stim_IO[mi]].append(da)
for j in range(len(self.AR.traces)):
mi = self.AR.trace_index[j]
idat[i][Stim_IO[mi]] = np.mean(idat[i][Stim_IO[mi]]) # replace with the mean value for that stimulus level within the protocol
self.analysis_summary[f'PSP_IO'][i] = self.sign*1e12*np.array([idat[i][k] for k in idat[i].keys()])
self.analysis_summary[f'psc_stim_amplitudes'][i] = 1e6*np.array([k for k in idat[i].keys()])
stimintvl.append(pulse_train['period'][0])
stim_dt = np.diff(pulse_train['start'])
# self.analysis_summary['psc_stim_amplitudes'] = 1e6*np.array(stim_io)
self.analysis_summary['psc_intervals'] = np.array(stimintvl)
self.analysis_summary['ppf_dt'] = np.array(stim_dt)
self.analysis_summary['stim_times'] = pulse_train['start']
self.analysis_summary['window'] = [self.T0, self.T1]
return True
def _clean_array(self, rgn):
"""
Just make sure that the rgn array is a list of two values
"""
if isinstance(rgn[0], list) or isinstance(rgn[0], np.ndarray):
rgn = [x[0] for x in rgn]
return rgn
def _compute_interval(self, x0: float=0., artifact_delay: float=0.0,
index: int=0, stim_intvl=list, max_width:float=25.,
pre_time:float=1.0e-3, pflag=False):
"""
Comptue the interval over which to measure an event.
The interval cannot be longer than the interval to the next event.
x0 : float
starting time for the interval
artifact_delay: float
duration to remove from the start of the trace as the stimulus
artifact
index: int
index into the stim_intvl list
stim_intvl:
list of stimulus intervals, in order
max_width : float
width of the ttrace to retrun, starting at x0
pre_time : float
Time to clip at the end of the trace (in case the next stim is
not exactly where it is expected to be)
Returns
2 element list of times adjusted for the delays and widths.
--------
window as an np array of 2 elements, min and max time
"""
num_intervals = len(stim_intvl)
if index < num_intervals-1:
nxt_intvl = stim_intvl[index+1] - stim_intvl[index] # check interval sequence
max_w = np.min((nxt_intvl, max_width-pre_time))
if nxt_intvl > 0: # still ascending
t_stim = [x0+artifact_delay, x0+max_w-pre_time] # limit width if interval is
if pflag:
print('nxt>0: ', t_stim)
else:
t_stim = [x0+artifact_delay, x0+max_width-pre_time]
if pflag:
print('nxt < 0: ', t_stim)
else:
t_stim = [x0+artifact_delay, x0+max_width - pre_time]
if pflag:
print('last index: ', t_stim)
t_stim = self._clean_array(t_stim)
return t_stim
def analyze_PPF(self, rmpregion:list=[0., 0.045], twidth:float=0.02,
protocolName:str=None, device:str='Stim0',
measure_func:object=np.min):
"""
Analyze paired-pulse facilitiation
Notes:
The PPF protocol always involves 2 pulses, the second of which varies in time.
Here, we compute the ratio between the 2 pulses for each time,
and also save clips of the data waveforms for plotting
stim dict in pulse_train will look like:
{'start': [0.05, 0.1], 'duration': [0.0001, 0.0001],
'amplitude': [0.00025, 0.00025],
'npulses': [2], 'period': [0.05],
'type': ['pulseTrain']}
Parameters
----------
rmpregion : 2 element list (default: [0., 0.05])
The region of the trace used to measure the resting membrane potential,
in seconds.
protocolName : str (default: None)
The name of the protocol (not used here)
device : str (default: 'Stim0')
The name of the stimulus device
"""
pulse_train = self.AR.getStim(device)
dd = self.AR.getDeviceData(device=device, devicename='command')
reps = self.AR.sequence[('protocol', 'repetitions')]
stim_I = [pulse_train['amplitude'][0]]
mode = 'PPF'
filekey = make_key(self.datapath)
# check the db to see if we have parameters already
dfiles = self.db['date'].tolist() # protocols matching our prefix
baseline = []
meani = []
stimamp = []
stimintvl = []
cmdv = []
self.sign = 1
self.set_baseline_times(rmpregion)
self.i_mean = []
self.i_mean = []
if not (device, 'command.PulseTrain_period') in self.AR.sequence.keys():
raise ValueError('Cannot find PulseTrain_period in stimulus command')
stim_dt = self.AR.sequence[(device, 'command.PulseTrain_period')]
Stim_Intvl = np.tile(stim_dt, len(reps)) # stimuli in order
self.analysis_summary[f'PPF'] = [[]]*len(stim_dt)
self.analysis_summary['iHold'] = []
self.analysis_summary['ppf_dt'] = [None]*len(stim_dt)
self.i_mean = []
ppf_traces_T1 = OrderedDict([(k, []) for k in stim_dt]) # control response for each dt
ppf_traces_R1 = OrderedDict([(k, []) for k in stim_dt]) # Response to the second stimulus at dt
ppf_traces_T2 = OrderedDict([(k, []) for k in stim_dt])
ppf_traces_R2 = OrderedDict([(k, []) for k in stim_dt])
ppf_dat = OrderedDict([(k, []) for k in stim_dt]) # calculated PPF for each trial.
num_intervals = len(Stim_Intvl)
dead_time =1.5e-3 # time before start of response measure
# f, axx = mpl.subplots(1,1)
for j in range(len(self.AR.traces)): # for all (accepted) traces
mi = self.AR.trace_index[j] # get index into marked/accepted traces then compute the min value minus the baseline
t_stim1 = self._compute_interval(x0=pulse_train['start'][0], artifact_delay=dead_time,
index=mi, stim_intvl=Stim_Intvl, max_width=twidth, pre_time=1e-3, pflag=False)
t_stim2 = self._compute_interval(x0=Stim_Intvl[mi] + pulse_train['start'][0], artifact_delay=dead_time,
index=mi, stim_intvl=Stim_Intvl, max_width=twidth, pre_time=1e-3, pflag=False)
self.T0 = t_stim2[0] # kind of bogus
self.T1 = t_stim2[1]
bl = np.mean(self.Clamps.traces['Time': rmpregion[0]:rmpregion[1]][j])
i_pp1 = self.Clamps.traces['Time': t_stim1[0]:t_stim1[1]][j] - bl # first pulse trace
tb_ref = self.Clamps.time_base[np.where((self.Clamps.time_base >= t_stim1[0]) & (self.Clamps.time_base < t_stim1[1]))]
i_pp2 = self.Clamps.traces['Time': t_stim2[0]:t_stim2[1]][j] - bl # second pulse trace
tb_p2= self.Clamps.time_base[np.where((self.Clamps.time_base >= t_stim2[0]) & (self.Clamps.time_base < t_stim2[1]))]
da1 = measure_func(i_pp1)
da2 = measure_func(i_pp2)
ppf_tr = da2/da1 # get facilitation for this trace and interval
ppf_dat[Stim_Intvl[mi]].append(ppf_tr) # accumulate
ppf_traces_T1[Stim_Intvl[mi]].append(tb_ref)
ppf_traces_R1[Stim_Intvl[mi]].append(i_pp1)
ppf_traces_T2[Stim_Intvl[mi]].append(tb_p2)
ppf_traces_R2[Stim_Intvl[mi]].append(i_pp2)
# print(np.min(tb_ref), np.max(tb_ref), np.min(tb_p2), np.max(tb_p2))
# plotWidget = pg.plot(title="traces")
# si = Stim_Intvl[mi]
# plotWidget.plot(ppf_traces_T1[si][-1], ppf_traces_R1[si][-1], pen='g')
# plotWidget.plot(ppf_traces_T2[si][-1], ppf_traces_R2[si][-1], pen='r')
# plotWidget.plot(tb_ref, np.ones_like(tb_ref)*da1, pen='b')
# plotWidget.plot(tb_p2, np.ones_like(tb_p2)*da2, pen='m')
# if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
# QtGui.QApplication.instance().exec_()
# axx.plot(tb_ref, i_pp1, 'k-')
# axx.plot(tb_p2, i_pp2, 'r-')
self.analysis_summary[f'PPF'] = ppf_dat
self.analysis_summary['PPF_traces_T1'] = ppf_traces_T1
self.analysis_summary['PPF_traces_R1'] = ppf_traces_R1
self.analysis_summary['PPF_traces_T2'] = ppf_traces_T2
self.analysis_summary['PPF_traces_R2'] = ppf_traces_R2
self.analysis_summary['psc_stim_amplitudes'] = np.array(stim_I)
self.analysis_summary['psc_intervals'] = np.array(stim_dt)
self.analysis_summary['stim_times'] = pulse_train['start']
self.analysis_summary['window'] = [self.T0, self.T1]
# fname = Path(self.datapath).parts
# fname = '/'.join(fname[-4:]).replace('_', '\_')
# f.suptitle(f"{fname:s}")
# mpl.show()
return True
def analyze_VDEP(self, rmpregion=[0., 0.05], protocolName=None, device='Stim0'):
"""
Analyze the voltage-dependence of EPSCs
When selecting the analysis window, choose a window that encompases
the peak of the inward EPSC in the negative voltage range.
Do not try to balance the trace (the slope should be turned off)
Parameters
----------
rmpregion : 2 element list (default: [0., 0.05])
The region of the trace used to measure the resting membrane potential,
in seconds.
protocolName : str (default: None)
The name of the protocol (not used here)
device : str (default: 'Stim0')
The name of the stimulus device
"""
print('\n'+'******'*4)
pulse_train = self.AR.getStim(device)
dt = self.Clamps.sample_interval
# stim dict in pulse_train will look like:
# {'start': [0.05, 0.1], 'duration': [0.0001, 0.0001],
# 'amplitude': [0.00025, 0.00025], 'npulses': [2], 'period': [0.05], 'type': ['pulseTrain']}
dd = self.AR.getDeviceData(device=device, devicename='command')
# print('pulse_train: ', pulse_train)
# print('dd: ', dd)
reps = self.AR.sequence[('protocol', 'repetitions')]
stim_dt = np.diff(pulse_train['start'])
stim_I = [pulse_train['amplitude'][0]]
mode = '?'
if not ('MultiClamp1', 'Pulse_amplitude') in self.AR.sequence.keys():
raise ValueError('Cannot find (MultiClamp1, Pulse_amplitude) in stimulus command')
stim_V = self.AR.sequence[('MultiClamp1', 'Pulse_amplitude')]
filekey = make_key(self.datapath)
# check the db to see if we have parameters already
dfiles = self.db['date'].tolist()
width = 20.
if filekey in dfiles:
# print(self.db.loc[self.db['date'] == filekey])
# print(self.db.head())
delays = self.db.loc[self.db['date'] == filekey]['T0'].values
t1s = self.db.loc[self.db['date'] == filekey]['T1'].values
if isinstance(delays, np.ndarray) and len(delays) > 1:
delay = delays[0]
else:
delay = delays
if isinstance(t1s, np.ndarray) and len(t1s) > 1:
t1 = t1s[0]
else:
t1 = t1s
print('delay from file', delay, t1)
else:
delay = 1.0*1e-3
t1 = (width-1.0)*1e-3
print('auto delay', delay, t1)
ndelay = self.NMDA_delay
nwidth = 0.0025
bl_region = [pulse_train['start'][0]-0.060, pulse_train['start'][0]-0.010] # time just before stimulus
baseline = []
self.baseline = bl_region
meani = []
stimamp = []
stimintvl = []
cmdv = []
self.sign = 1
self.i_mean = []
# self.set_baseline_times(rmpregion)
self.analysis_summary['iHold'] = []
self.analysis_summary[f'PSP_VDEP_AMPA'] = [[]]*len(pulse_train['start'])
self.analysis_summary[f'PSP_VDEP_NMDA'] = [[]]*len(pulse_train['start'])
bl = self.mean_I_analysis(region=bl_region, mode='baseline', reps=[0])
# print('bl: ', bl)
rgn = [delay, t1]
# print('rgn: ', rgn)
if self.update_regions:
rgn = self.set_region([pulse_train['start'][0], pulse_train['start'][0]+self.NMDA_delay+0.010], baseline=bl, slope=True)
self.T0 = float(rgn[0])
self.T1 = float(rgn[1])
if rgn[0] > 0.012:
rgn[0] = 0.004
rgn[1] = 0.20
slope_region = rgn
self.T0 = float(rgn[0])
self.T1 = float(rgn[1])
print('t0, t1: ', self.T0, self.T1)
# two pass approach:
# 1 find min, and look at the most negative traces (-100 to -60) to get the time of the peak
# 2. average those times and make a new window
# 3. use the new window to do the analysis by taking the mean in a 1msec wide window
# around the mean time
# print(delay, t1)
slope_region=np.array(slope_region)+pulse_train['start'][0]
# print('slope region: ', slope_region)
cmds = np.array(self.V_cmd)+self.AR.holding+self.JunctionPotential
bl = self.mean_I_analysis(region=[pulse_train['start'][0]+self.T0-0.0005, pulse_train['start'][0]+self.T0], mode='baseline', reps=[0])
data1, tb = self.get_traces(region=slope_region,
trlist=None, baseline=bl, intno=0, nint=1, reps=reps, slope=False)
if data1.ndim == 1:
return False
# self.plot_data(tb, data1)
ind = np.argmin(np.fabs(cmds-self.AMPA_voltage))
self.T1 = self.T0 + 0.010
print('p1min: ', self.T0)
p1delay = pulse_train['start'][0] + self.T0
p1end = pulse_train['start'][0] + self.T1 # note that this is a narrow
nmdelay = pulse_train['start'][0] + ndelay
i_mean = self.mean_I_analysis(region=[p1delay, p1end], mode='min',
baseline=bl, reps=reps, slope=False)
print('IMEAN ARGMIN: ', i_mean, self.i_argmin)
if i_mean is None:
return False
if len(self.i_argmin) < 1:
return False
mintime = self.i_argmin[ind]*dt # get AMPA peak index in the window
print(f'AMPA mintime @ {self.AMPA_voltage*1e3:.1f} mV: {mintime*1e3:.3f} ms')
# values for nmda analysis are currently fixed
i_nmda_mean = self.mean_I_analysis(region=[nmdelay-nwidth, nmdelay+nwidth], mode='mean',
baseline=bl, reps=reps, slope=False)
self.analysis_summary[f'PSP_VDEP_AMPA'][0] = self.sign*i_mean
self.analysis_summary[f'PSP_VDEP_NMDA'][0] = self.sign*i_nmda_mean
stimamp.append(pulse_train['amplitude'][0])
stimintvl.append(pulse_train['period'][0])
# print('ampa window & mean: ', [p1delay, p1end], i_mean)
# print('nmda window & mean: ', [nmdelay-nwidth, nmdelay+nwidth], i_nmda_mean)
# find -80 and +30 voltage indices (so we can save them and save the data)
iAMPA = np.argmin(np.fabs(-self.AMPA_voltage+cmds))
iNMDA = np.argmin(np.fabs(-self.NMDA_voltage+cmds))
# print(iAMPA, iNMDA)
# print('-90 mV found closest command: ', cmds[iAMPA])
# print('+50 mV found closest command: ', cmds[iNMDA])
if data1 is None or iNMDA >= data1.shape[0]:
self.analysis_summary['Vindices'] = {'vAMPA': np.nan, 'vNMDA': np.nan}
self.analysis_summary['NMDAAMPARatio'] = np.nan
self.analysis_summary['AMPA_NMDA_traces'] = {'T': None, 'iAMPA': None, 'iNMDA': None}
else:
# print('data1 shape: ', data1.shape, iAMPA, iNMDA, cmds[iAMPA], cmds[iNMDA])
# print(self.analysis_summary[f'PSP_VDEP_AMPA'])
self.analysis_summary['Vindices'] = {'-90': iAMPA, '50': iNMDA}
self.analysis_summary['NMDAAMPARatio'] = self.analysis_summary[f'PSP_VDEP_NMDA'][0][iNMDA]/self.analysis_summary[f'PSP_VDEP_AMPA'][0][iAMPA]
self.analysis_summary['AMPA_NMDA_traces'] = {'T': tb, 'iAMPA': data1[iAMPA], 'iNMDA': data1[iNMDA]}
self.analysis_summary['meas_times'] = {'tAMPA': mintime, 'tNMDA': ndelay}
self.analysis_summary['psc_stim_amplitudes'] = np.array(stim_I)
self.analysis_summary['psc_intervals'] = np.array(stimintvl)
self.analysis_summary['stim_times'] = pulse_train['start']
self.analysis_summary['Vcmd'] = cmds
self.analysis_summary['Window'] = [self.T0, self.T1]
return True
def plot_data(self, tb, data1, title=''):
"""
Quick plot of data for testing purposes
Parameters
----------
tb : np.array (no default)
the time base (one dimension)
data1 : np.array (no default)
The data, can be [m traces x npoints]
title : str (default: '')
A title to put on the plot
Return
------
Nothing
"""
f, ax = mpl.subplots(1)
ax = np.array(ax).ravel()
ie = data1.shape[1]
it = tb.shape[0]
if ie > it:
ie = it
if it > ie:
it = ie
print(it, ie)
for i in range(data1.shape[0]):
ax[0].plot(tb[:it], data1[i,:ie])
ax[0].set_title(str(self.datapath).replace('_', '\_')+' '+title, fontsize=8)
mpl.show()
def set_region(self, region=None, baseline=None, slope=True):
print('set region')
if region is None:
raise ValueError("PSCAnalyzer, set_region requires a region beginning and end to measure the current")
data1 = self.Clamps.traces['Time': region[0]:region[1]]
if baseline is None:
baseline = [0.]
tb = np.arange(0, data1.shape[1]*self.Clamps.sample_interval, self.Clamps.sample_interval)
data1 = data1.view(np.ndarray)
newCP = CP.CursorPlot(str(self.datapath))
setline = True
# for i in range(data1.shape[0]):
newCP.plotData(x=tb, y=np.array([data1[i]-baseline[i] for i in range(data1.shape[0])])*1e12, setline=setline, slope=slope)
# setline = False # only do on first plot
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
print('done with cp')
self.T0, self.T1 = newCP.selectedRegion
if self.T0 == None:
return(None)
return(newCP.selectedRegion)
def mean_I_analysis(self, region=None, t0=0., mode='mean', baseline=None, intno=0, nint=1, reps=[0], slope=False, slopewin=None):
"""
Get the mean current in a window
Works with the current Clamps object
Parameters
----------
region : tuple, list or numpy array with 2 values (default: None)
start and end time of a trace used to measure the RMP across
traces. Note that if slopewin is set, it may replace the region
t0 : float (default=0.5)
start time for the mean current analysis, offset from the region ("deadwin")
mode : str (default='mean')
How to measure the value (valid values: 'mean', 'baseline' both compute mean,
'min' gives the minimum current in the window.
baseline: np.array (default None)
an array of precomputed baseline values to subtract from the data; one value
per trace
intno : int (default=0)
first trace to do in a group
nint : int (default=1)
# of traces to skip (e.g., for repeats or different values across the array)
reps: list (default=[0])
# of repetitions (used to reshape data in computation)
slope: bool (default=True)
set to subtract a slope from trace
slopewin: list or np.array of 2 elements (default=None)
Time window to use to compute slope, [start, stop], in seconds
Return
------
the mean current in the window
Stores computed mean current in the variable "name".
"""
if region is None:
raise ValueError("PSPSummary, mean_I_analysis requires a region beginning and end to measure the current")
analysis_region = region.copy()
if slope and slopewin is not None:
region = slopewin
data1 = self.Clamps.traces['Time': region[0]:region[1]]
# print('data shape: ', data1.shape)
# data2 = self.Clamps.traces.view(np.ndarray)
rgn = [int(region[i]/self.Clamps.sample_interval) for i in range(len(region))]
self.V_cmd = self.Clamps.cmd_wave[:,rgn[0]:rgn[1]].mean(axis=1).view(np.ndarray)
tb = np.arange(0, data1.shape[1]*self.Clamps.sample_interval, self.Clamps.sample_interval)
# tb2 = np.arange(0, data2.shape[1]*self.Clamps.sample_interval, self.Clamps.sample_interval)
data1 = data1.view(np.ndarray)
if baseline is not None:
data1 = np.array([data1[i]-baseline[i] for i in range(data1.shape[0])])
# data2 = np.array([data2[i]-baseline[i] for i in range(data2.shape[0])])
# subtract the "baseline" from the beginning of the interval to the end.
if slope:
slrgn = region
if slopewin is not None:
slrgn = slopewin
data1 = self.slope_subtraction(tb, data1, region, mode=mode)
# if not slope and slopewin is not None: # just first slope point to align current
# data1 = self.slope_subtraction(tb, data1, region, mode='point')
print('slope, slopewin: ', slope, slopewin, mode)
sh = data1.shape
nreps = len(reps)
if nint > 1:
dindx = range(intno, sh[0], nint)
if data1.ndim == 3:
data1 = data1[dindx,:,:]
elif data1.ndim == 2:
data1 = data1[dindx,:]
else:
raise ValueError('Data must have 2 or 3 dimensions')
# print(sh, data1.shape, nint)
self.i_mean_index = None
self.i_data = data1.mean(axis=0)
self.i_tb = tb+region[0]
nx = int(sh[0]/len(reps))
if mode in ['mean', 'baseline']: # just return the mean value
i_mean = data1.mean(axis=1) # all traces, average over specified time window
if nint == 1:
nx = int(sh[0]/len(reps))
try:
i_mean = np.reshape(i_mean, (len(reps), nx)) # reshape by repetition
except:
return i_mean
i_mean = i_mean.mean(axis=0) # average over reps
return i_mean
elif mode == 'min':
# mpl.plot(data1.T)
# mpl.show()
i_mina = data1.min(axis=1) # all traces, average over specified time window
if nint == 1:
nx = int(sh[0]/len(reps))
try:
i_mina = np.reshape(i_mina, (len(reps), nx)) # reshape by repetition
except:
raise ValueError("Reshape failed on min")
print('imina: ', i_mina)
i_min = i_mina.min(axis=0) # average over reps
self.i_argmin = np.argmin(i_mina, axis=0)
# return i_min
# dfilt = data1 # scipy.signal.savgol_filter(data1, 5, 2, axis=1, mode='nearest')
# print('data1.shpae 0: ', data1.shape)
# ist = int(t0/self.Clamps.sample_interval) # points in deadwin
# ien = int((analysis_region[1]-analysis_region[0])/self.Clamps.sample_interval)
# print('region 0: ', analysis_region[0])
# print('analysis time: ', ist*self.Clamps.sample_interval, ien*self.Clamps.sample_interval)
# print('dfilt shape: ', dfilt.shape)
# print('ist, ien: ', ist, ien)
# i_min = dfilt[:, ist:ien].min(axis=1) # all traces, get the minimum over specified time window
# print('nint: ', nint)
# print('imin shape: ', i_min.shape)
# print('reps: ', nreps)
# if nint == 1:
# nx = int(sh[0]/nreps)
# print('nx: ', nx)
# print(i_min.shape)
# print((nreps, nx))
# try:
# i_min = np.reshape(i_min, (nreps, nx)) # reshape by repetition
# print('rehsape ok')
# except:
# print('reshape failed!!!!')
# i_min = i_min.mean(axis=0) # average over reps
print('imin shape: ', i_min.shape)
# data2 = np.reshape(data1, (nreps, nx, data1.shape[1])).mean(axis=0)
# print(data2.shape)
# f, ax = mpl.subplots(1,1)
# sns.set_palette("coolwarm_r",data2.shape[0])
# cp = sns.color_palette("muted",data2.shape[0])
# device = 'Stim0'
# # stim_I = np.array(self.AR.sequence[(device, 'command.PulseTrain_amplitude')])*1e6
# for i in range(data2.shape[0]):
# ax.plot(data2[i,:], color = cp[i])
# data3 = np.reshape(data1, (nreps, nx, data1.shape[1]))
# for j in range(data3.shape[0]):
# for i in range(data3.shape[1]):
# csel = i
# print('csel: ', csel)
# ax.plot(data3[j, i,:], color = cp[csel], linestyle='--', linewidth=1, alpha=1)
# mpl.show()
# # print(ist, ien)
# print(dfilt.shape, nreps)
# dfw = [[]]*nreps
# nvs = int(sh[0]/nreps)
# print('nreps, nvs: ', nreps, nvs)
# for i in range(nreps):
# dfw[i] = data1[i*nvs:i*nvs + nvs,: ]
# dfw = np.array(dfw)
# print(dfw.shape)
# dfw = np.array(dfw).mean(axis=0)
# # dfw = dfw.reshape((nreps, -1, int(sh[0]/nreps)))
# # dfw = dfw.mean(axis=0)
# # for i in range(dfw.shape[0]):
# # mpl.plot(dfw[i])
# # mpl.show()
#
# # print(dfw.shape, ist, ien)
# self.i_argmin = dfw[:, ist:ien].argmin(axis=1) +ist
print('iargmin: ', self.i_argmin)
return i_min
def slope_subtraction(self, tb, data1, region, mode='mean'):
"""
Subtract a slope from the data; the slope is calculated from a time region
Parameters
----------
tb : np.array
time base, in seconds. Must be of same size as data1 2nd dimension
data1 : np.array
data array; 2 dimensional (traces x time)
region : 2 element list or np.array
time region for computation of the slope, in seconds
mode : str (default: 'mean')
Either 'point' (does nothing to the data)
or 'mean'
Return
------
slope-subtracted data
"""
dt = tb[1]-tb[0]
minX = 0 #int((region[0])/dt)
maxX = int((region[1]-region[0])/dt)
if mode is 'point': # do nothing...
# for i in range(data1.shape[0]):
# data1[i,:] -= data1[i,0]
return data1
print('SLOPE SUBTRACTION')
for i in range(data1.shape[0]):
x0 = list(range(minX,minX+3))
ml = maxX
x0.extend(list(range(ml-10, ml)))
fdx = np.array([tb[j] for j in x0])
fdy = np.array([data1[i][j] for j in x0])
pf = np.polyfit(fdx, fdy, 1)
bline = np.polyval(pf, tb)
if bline.shape[0] > data1[i].shape[0]:
bline = bline[:data1[i].shape[0]]
if mode != 'baseline':
data1[i,:] -= bline
return data1
def get_traces(self, region=None, trlist=None, baseline=None, order=0, intno=0, nint=1, reps=[0], mode='baseline', slope=True):
"""
Get the mean current (averages) in a window
Parameters
----------
region : tuple, list or numpy array with 2 values (default: None)
start and end time of a trace used to measure the RMP across
traces.
Return
------
Nothing
Stores computed mean current in the variable "name".
"""
if region is None:
raise ValueError("PSPSummary, mean_I_analysis requires a region beginning and end to measure the current")
data1 = self.Clamps.traces['Time': region[0]:region[1]]
tb = np.arange(0, data1.shape[1]*self.Clamps.sample_interval, self.Clamps.sample_interval)
data1 = data1.view(np.ndarray)
nreps = len(reps)
sh = data1.shape
# print('data1 initial shape: ', sh)
# print('tb shape: ', tb.shape)
# print('nint, nreps, order: ', nint, nreps, order)
if nint > 1:
# if order == 0:
dindx = range(intno, sh[0], nint)
data1 = data1[dindx]
# else:
# pass
# dindx = range(sh[0], intno, nreps)
# data1 = data1[dindx]
# print('unshaped data1: ', data1.shape)
# print('slope: ', slope, region)
# subtract the "baseline" from the beginning of the interval to the end.
if slope:
data1 = self.slope_subtraction(tb, data1, region, mode=mode)
if baseline is not None:
data1 = np.array([data1[i]-baseline[i] for i in range(data1.shape[0])])
nx = int(sh[0]/nreps)
if nx < 13:
nreps = 1
if order == 0 and nreps > 1:
try:
print('gettraces reshaping: data shape, reps, nx, nint: ', data1.shape, nreps, nx, data1.shape[0]/nreps, nint)
data2 = np.reshape(data1, (len(reps), nx, -1))
except:
print('Failed to reshape: data shape, reps, nx: ', data1.shape, len(reps), nx, data1.shape[0]/len(reps))
if data1.shape[0] > 1:
data2 = data1
return data2, tb
else:
return None, None
elif order == 1 or nreps == 1:
data2 = data1 # np.reshape(data1, (len(reps), nx, sh[1]))
# print('data 1 reshaped: data2: ', data2.shape)
### check data by plotting
# prop_cycle = mpl.rcParams['axes.prop_cycle']
# colors = prop_cycle.by_key()['color']
#
# colors = ['r', 'g', 'b', 'c', 'y', 'm', 'k']
# if len(colors) > data1.shape[0]:
# colors = colors[:data1.shape[0]]
# color_cycle = cycler(c=colors)
# f, ax = mpl.subplots(1, 1)
# k = 0
# print(data1.shape, nx, reps)
# if order == 0:
# for j in range(len(reps)):
# for i, sty in zip(range(nx), cycle(color_cycle)):
# # for i in range(data1.shape[0]):
# ax.plot(tb, data2[k], linewidth=0.5, **sty)
# k += 1
# elif order == 1:
# sty = zip(range(nint), cycle(color_cycle))
# for i in range(nint):
# # for i in range(data1.shape[0]):
# ax.plot(tb, data2[k], colors[intno], linewidth=0.5)
# k += 1
# print('nx: ', nx, ' reps: ', reps, ' len reps: ', len(reps))
###
#data1 = np.reshape(data1, (nx, len(reps), -1))
# print('reshaped data: ', data1.shape)
data2 = data2.mean(axis=0)
# print('mean data data: ', data1.shape)
### check data by plotting
# print('data2 mean shape: ', data2.shape)
# if order == 0:
# for i, sty in zip(range(data2.shape[0]), cycle(color_cycle)):
# ax.plot(tb, data2[i], '--', **sty)
# elif order == 1:
# ax.plot(tb, data2, '--k')
# mpl.show()
###
# if nint == 1:
# nx = int(sh[0]/len(reps))
# try:
# i_mean = np.reshape(i_mean, (len(reps), nx)) # reshape by repetition
# except:
# return None
# i_mean = i_mean.mean(axis=0) # average over reps
return data2, tb
def vcss_analysis(self, region=None):
"""
compute steady-state IV curve - from the mean current
across the stimulus set over the defined time region
(this usually will be the last half or third of the trace)
Parameters
----------
region : list or tuple
Start and end times for the analysis
"""
data1 = self.Clamps.traces['Time': region[0]:region[1]]
icmds = EM.MetaArray(self.Clamps.cmd_wave, # easiest = turn in to a matching metaarray...
info=[{'name': 'Command', 'units': 'A',
'values': np.array(self.Clamps.clampValues)},
self.Clamps.traces.infoCopy('Time'), self.Clamps.traces.infoCopy(-1)])
self.vcss_vcmd = icmds['Time': region[0]:region[1]].mean(axis=1)
self.r_in = np.nan
self.analysis_summary['Rin'] = np.nan
self.vcss_v = []
if data1.shape[1] == 0 or data1.shape[0] == 1:
return # skip it
ntr = len(self.Clamps.traces)
self.vcss_Im = data1.mean(axis=1) # steady-state, all traces
self.analysis_summary['Rin'] = np.NaN
# self.Clamps.plotClampData()
isort = np.argsort(self.vcss_vcmd)
self.vcss_Im= self.vcss_Im[isort]
self.vcss_vcmd = self.vcss_vcmd[isort]
bl = self.vcbaseline[isort]
self.vcss_bl = bl
# compute Rin from the SS IV:
# this makes the assumption that:
# successive trials are in order so we sort above
# commands are not repeated...
if len(self.vcss_vcmd) > 1 and len(self.vcss_v) > 1:
pf = np.polyfit(self.vcss_vcmd, self.vcss_v, 3, rcond=None, full=False, w=None, cov=False)
pval = np.polyval(pf, self.vcss_vcmd)
#print('pval: ', pval)
slope = np.diff(pval) / np.diff(self.vcss_vcmd) # local slopes
imids = np.array((self.vcss_vcmd[1:] + self.vcss_vcmd[:-1]) / 2.)
self.rss_fit ={'I': imids, 'V': np.polyval(pf, imids)}
#print('fit V: ', self.rss_fit['V'])
#slope = slope[[slope > 0 ] and [self.vcss_vcmd[:-1] > -0.8] ] # only consider positive slope points
l = int(len(slope)/2)
maxloc = np.argmax(slope[l:]) + l
self.r_in = slope[maxloc]
self.r_in_loc = [self.vcss_vcmd[maxloc], self.vcss_v[maxloc], maxloc] # where it was found
minloc = np.argmin(slope[:l])
self.r_in_min = slope[minloc]
self.r_in_minloc = [self.vcss_vcmd[minloc], self.vcss_v[minloc], minloc] # where it was found
self.analysis_summary['Rin'] = self.r_in*1.0e-6
def plot_vciv(self):
"""
Plot the current voltage-clamp IV function
"""
print('vciv')
P = PH.regular_grid(2 , 2, order='columnsfirst', figsize=(8., 6.), showgrid=False,
verticalspacing=0.1, horizontalspacing=0.1,
margins={'leftmargin': 0.12, 'rightmargin': 0.12, 'topmargin': 0.08, 'bottommargin': 0.1},
labelposition=(-0.12, 0.95))
(date, sliceid, cell, proto, p3) = self.file_cell_protocol(self.datapath)
P.figure_handle.suptitle(str(Path(date, sliceid, cell, proto)), fontsize=12)
bl = self.get_baseline()
if "PPF" in self.analysis_summary.keys():
maxt = 250.
else:
maxt = 150.
for i in range(self.AR.traces.shape[0]):
P.axdict['A'].plot(self.AR.time_base*1e3, (self.AR.traces[i,:]-bl[i])*1e12, 'k-', linewidth=0.5)
if 'PSP_VDEP' in self.analysis_summary.keys():
P.axdict['A'].set_xlim(self.analysis_summary['stim_times'][0]*1e3-10, self.analysis_summary['stim_times'][0]*1e3+50)
# P.axdict['A'].set_ylim(-2500, 2500)
else:
P.axdict['A'].set_xlim(40, maxt)
P.axdict['A'].set_ylim(-2000, 500)
# PH.talbotTicks(P.axdict['A'], tickPlacesAdd={'x': 0, 'y': 0}, floatAdd={'x': 0, 'y': 0})
P.axdict['A'].set_xlabel('T (ms)')
P.axdict['A'].set_ylabel('I (pA)')
if 'PSP_IO' in self.analysis_summary.keys(): # io function
for i in range(len(self.analysis_summary['stim_times'])):
try:
P.axdict['C'].plot(self.analysis_summary['psc_stim_amplitudes'],
np.array(self.analysis_summary[f'PSP_IO'][i]), linewidth=1, markersize=4, marker='s')
except:
print('Plot Failed on protocol: ', self.datapath, proto)
P.axdict['C'].set_xlabel('Istim (microAmps)')
P.axdict['C'].set_ylabel('EPSC I (pA)')
PH.talbotTicks(P.axdict['C'], tickPlacesAdd={'x': 0, 'y': 0}, floatAdd={'x': 0, 'y': 0})
elif 'PSP_VDEP' in self.analysis_summary.keys(): # io function
for i in range(len(self.analysis_summary['stim_times'])):
P.axdict['C'].plot(self.analysis_summary['Vcmd']*1e3,
self.sign*np.array(self.analysis_summary[f'PSP_VDEP'][i])*1e12, marker='o', linewidth=1, markersize=4)
P.axdict['C'].plot(self.analysis_summary['Vcmd']*1e3,
self.sign*np.array(self.analysis_summary[f'PSP_VDEP_NMDA'][i])*1e12, marker='s', linewidth=1, markersize=4)
P.axdict['C'].set_xlabel('V (mV)')
P.axdict['C'].set_ylabel('EPSC I (pA)')
PH.crossAxes(P.axdict['C'], xyzero=(-60., 0.))
PH.talbotTicks(P.axdict['C'], tickPlacesAdd={'x': 0, 'y': 0}, floatAdd={'x': 0, 'y': 0})
elif 'PPF' in self.analysis_summary.keys():
for i in range(len(self.analysis_summary['stim_times'])):
P.axdict['C'].plot(self.analysis_summary['ppf_dt']*1e3,
self.sign*np.array(self.analysis_summary[f'PPF']), linewidth=1, markersize=4)
P.axdict['C'].set_xlim(0, 200.)
P.axdict['C'].set_ylim(0, 2.0)
PH.referenceline(P.axdict['C'], 1.0)
P.axdict['C'].set_xlabel('Interval (ms)')
P.axdict['C'].set_ylabel('PPF (R2/R1)')
PH.talbotTicks(P.axdict['C'], tickPlacesAdd={'x': 0, 'y': 1}, floatAdd={'x': 0, 'y': 1})
P.axdict['B'].set_xlabel('I (nA)')
P.axdict['B'].set_ylabel('V (mV)')
PH.talbotTicks(P.axdict['B'], tickPlacesAdd={'x': 1, 'y': 0}, floatAdd={'x': 2, 'y': 0})
P.axdict['D'].set_xlabel('I (pA)')
P.axdict['D'].set_ylabel('Latency (ms)')
self.IVFigure = P.figure_handle
if self.plot:
mpl.show()
def file_cell_protocol(self, filename):
"""
file_cell_protocol breaks the current filename down and returns a
tuple: (date, cell, protocol)
Parameters
----------
filename : str
Name of the protocol to break down
Returns
-------
tuple : (date, sliceid, cell, protocol, any other...)
last argument returned is the rest of the path...
"""
(p0, proto) = os.path.split(filename)
(p1, cell) = os.path.split(p0)
(p2, sliceid) = os.path.split(p1)
(p3, date) = os.path.split(p2)
return (date, sliceid, cell, proto, p3)
def test(self):
"""
This is for testing - normally an instance of EPSC_analyzer would be
created and these values would be filled in.
"""
import matplotlib
matplotlib.use('Qt5Agg')
from matplotlib import rc
#rc('font',**{'family':'sans-serif','sans-serif':['Arial']})
#rcParams['font.sans-serif'] = ['Arial']
#rcParams['font.family'] = 'sans-serif'
rc('text', usetex=False)
rcParams = matplotlib.rcParams
rcParams['svg.fonttype'] = 'none' # No text as paths. Assume font installed.
rcParams['pdf.fonttype'] = 42
rcParams['ps.fonttype'] = 42
rcParams['text.latex.unicode'] = True
# disk = '/Volumes/Pegasus/ManisLab_Data3'
# disk = '/Volumes/PBM_005/data'
disk = '/Volumes/Pegasus/ManisLab_Data3'
middir = 'Kasten_Michael'
directory = 'Maness_PFC_stim'
cell = '2019.03.19_000/slice_000/cell_001'
# cell = '2019.03.19_000/slice_001/cell_000'
ddc = Path(disk, middir, directory, cell)
protocol = 'Stim_IO_1'
# protocol = 'PPF_2_001'
# protocol = 'VC-EPSC_3_ver2_003'
fn = Path(ddc, protocol)
PSC = PSCAnalyzer(fn)
PSC.measure_PSC(protocol[:-4], savetimes=True)
if __name__ == '__main__':
pass
|
[] |
[] |
[
"QT_MAC_WANTS_LAYER"
] |
[]
|
["QT_MAC_WANTS_LAYER"]
|
python
| 1 | 0 | |
python/ray/tune/tests/test_commands.py
|
import click
import os
import pytest
import subprocess
import sys
import time
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
import ray
from ray import tune
from ray.rllib import _register_all
from ray.tune import commands
from ray.tune.result import CONFIG_PREFIX
class Capturing:
def __enter__(self):
self._stdout = sys.stdout
sys.stdout = self._stringio = StringIO()
self.captured = []
return self
def __exit__(self, *args):
self.captured.extend(self._stringio.getvalue().splitlines())
del self._stringio # free up some memory
sys.stdout = self._stdout
@pytest.fixture
def start_ray():
ray.init(log_to_driver=False, local_mode=True)
_register_all()
yield
ray.shutdown()
def test_time(start_ray, tmpdir):
experiment_name = "test_time"
experiment_path = os.path.join(str(tmpdir), experiment_name)
num_samples = 2
tune.run_experiments(
{
experiment_name: {
"run": "__fake",
"stop": {"training_iteration": 1},
"num_samples": num_samples,
"local_dir": str(tmpdir),
}
}
)
times = []
for i in range(5):
start = time.time()
subprocess.check_call(["tune", "ls", experiment_path])
times += [time.time() - start]
assert sum(times) / len(times) < 6.0, "CLI is taking too long!"
def test_ls(start_ray, tmpdir):
"""This test captures output of list_trials."""
experiment_name = "test_ls"
experiment_path = os.path.join(str(tmpdir), experiment_name)
num_samples = 3
tune.run(
"__fake",
name=experiment_name,
stop={"training_iteration": 1},
num_samples=num_samples,
local_dir=str(tmpdir),
)
columns = ["episode_reward_mean", "training_iteration", "trial_id"]
limit = 2
with Capturing() as output:
commands.list_trials(experiment_path, info_keys=columns, limit=limit)
lines = output.captured
assert all(col in lines[1] for col in columns)
assert lines[1].count("|") == len(columns) + 1
assert len(lines) == 3 + limit + 1
with Capturing() as output:
commands.list_trials(
experiment_path,
sort=["trial_id"],
info_keys=("trial_id", "training_iteration"),
filter_op="training_iteration == 1",
)
lines = output.captured
assert len(lines) == 3 + num_samples + 1
with pytest.raises(click.ClickException):
commands.list_trials(
experiment_path, sort=["trial_id"], info_keys=("training_iteration",)
)
with pytest.raises(click.ClickException):
commands.list_trials(experiment_path, info_keys=("asdf",))
def test_ls_with_cfg(start_ray, tmpdir):
experiment_name = "test_ls_with_cfg"
experiment_path = os.path.join(str(tmpdir), experiment_name)
tune.run(
"__fake",
name=experiment_name,
stop={"training_iteration": 1},
config={"test_variable": tune.grid_search(list(range(5)))},
local_dir=str(tmpdir),
)
columns = [CONFIG_PREFIX + "/test_variable", "trial_id"]
limit = 4
with Capturing() as output:
commands.list_trials(experiment_path, info_keys=columns, limit=limit)
lines = output.captured
assert all(col in lines[1] for col in columns)
assert lines[1].count("|") == len(columns) + 1
assert len(lines) == 3 + limit + 1
def test_lsx(start_ray, tmpdir):
"""This test captures output of list_experiments."""
project_path = str(tmpdir)
num_experiments = 3
for i in range(num_experiments):
experiment_name = "test_lsx{}".format(i)
tune.run(
"__fake",
name=experiment_name,
stop={"training_iteration": 1},
num_samples=1,
local_dir=project_path,
)
limit = 2
with Capturing() as output:
commands.list_experiments(
project_path, info_keys=("total_trials",), limit=limit
)
lines = output.captured
assert "total_trials" in lines[1]
assert lines[1].count("|") == 2
assert len(lines) == 3 + limit + 1
with Capturing() as output:
commands.list_experiments(
project_path,
sort=["total_trials"],
info_keys=("total_trials",),
filter_op="total_trials == 1",
)
lines = output.captured
assert sum("1" in line for line in lines) >= num_experiments
assert len(lines) == 3 + num_experiments + 1
if __name__ == "__main__":
# Make click happy in bazel.
os.environ["LC_ALL"] = "en_US.UTF-8"
os.environ["LANG"] = "en_US.UTF-8"
sys.exit(pytest.main([__file__]))
|
[] |
[] |
[
"LC_ALL",
"LANG"
] |
[]
|
["LC_ALL", "LANG"]
|
python
| 2 | 0 | |
rabbit_python/sender.py
|
import pika
import os
URL_RABBIT = os.getenv('URL_RABBIT')
connection = pika.BlockingConnection(pika.ConnectionParameters(URL_RABBIT))
channel = connection.channel()
channel.queue_declare(queue='hello')
channel.basic_publish(exchange='',
routing_key='hello',
body='Hello World')
print('[x] "Hello World" enviado')
connection.close()
|
[] |
[] |
[
"URL_RABBIT"
] |
[]
|
["URL_RABBIT"]
|
python
| 1 | 0 | |
demo.py
|
import asyncio
import logging
import wx
from aio_wx_widgets.frame import DefaultFrame
from aio_wx_widgets.widgets import button, text
from winrtqrabber.controller import Controller
from winrtqrabber.view import ScannerView
from winrtqrabber.winrtcapture import WinrtCapture
from wxasync import WxAsyncApp
_LOGGER = logging.getLogger(__name__)
class MainWindow(DefaultFrame):
def __init__(self):
super().__init__("Main window")
model = WinrtCapture()
view = ScannerView(self, width=650, height=480)
self.controller = Controller(model, view)
self.add(view, layout=wx.ALIGN_CENTER, create=False)
self.add(button.async_button("start", self._on_start))
self.add(button.async_button("stop", self._on_stop))
self.scan_results: text.Text = self.add(text.Text("unknown"))
async def _on_start(self, event):
result = await self.controller.start_scan()
if result:
self.scan_results.set_text(result)
async def _on_stop(self, event):
_LOGGER.info("Stopping")
await self.controller.stop_scan()
if __name__ == "__main__":
# os.environ["DEBUGGING"] = "1"
logging.basicConfig(level=logging.DEBUG)
loop = asyncio.get_event_loop()
app = WxAsyncApp()
main_window = MainWindow()
main_window.Show()
app.SetTopWindow(main_window)
loop.run_until_complete(app.MainLoop())
|
[] |
[] |
[
"DEBUGGING"
] |
[]
|
["DEBUGGING"]
|
python
| 1 | 0 | |
tensorflow_examples/lite/model_maker/core/task/model_spec/object_detector_spec.py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model specification for object detection."""
import collections
import os
import tempfile
from absl import logging
import tensorflow as tf
from tensorflow_examples.lite.model_maker.core import compat
from tensorflow_examples.lite.model_maker.third_party.efficientdet import coco_metric
from tensorflow_examples.lite.model_maker.third_party.efficientdet import hparams_config
from tensorflow_examples.lite.model_maker.third_party.efficientdet import utils
from tensorflow_examples.lite.model_maker.third_party.efficientdet.keras import efficientdet_keras
from tensorflow_examples.lite.model_maker.third_party.efficientdet.keras import inference
from tensorflow_examples.lite.model_maker.third_party.efficientdet.keras import label_util
from tensorflow_examples.lite.model_maker.third_party.efficientdet.keras import postprocess
from tensorflow_examples.lite.model_maker.third_party.efficientdet.keras import train
from tensorflow_examples.lite.model_maker.third_party.efficientdet.keras import train_lib
from tensorflow_examples.lite.model_maker.third_party.efficientdet.keras import util_keras
def _get_ordered_label_map(label_map):
"""Gets label_map as an OrderedDict instance with ids sorted."""
if not label_map:
return label_map
ordered_label_map = collections.OrderedDict()
for idx in sorted(label_map.keys()):
ordered_label_map[idx] = label_map[idx]
return ordered_label_map
class EfficientDetModelSpec(object):
"""A specification of the EfficientDet model."""
compat_tf_versions = compat.get_compat_tf_versions(2)
def __init__(self,
model_name,
uri,
hparams='',
model_dir=None,
epochs=50,
batch_size=64,
steps_per_execution=1,
moving_average_decay=0,
var_freeze_expr='(efficientnet|fpn_cells|resample_p6)',
strategy=None,
tpu=None,
gcp_project=None,
tpu_zone=None,
use_xla=False,
profile=False,
debug=False,
tf_random_seed=111111):
"""Initialze an instance with model paramaters.
Args:
model_name: Model name.
uri: TF-Hub path/url to EfficientDet module.
hparams: Hyperparameters used to overwrite default configuration. Can be
1) Dict, contains parameter names and values; 2) String, Comma separated
k=v pairs of hyperparameters; 3) String, yaml filename which's a module
containing attributes to use as hyperparameters.
model_dir: The location to save the model checkpoint files.
epochs: Default training epochs.
batch_size: Training & Evaluation batch size.
steps_per_execution: Number of steps per training execution.
moving_average_decay: Float. The decay to use for maintaining moving
averages of the trained parameters.
var_freeze_expr: Expression to freeze variables.
strategy: A string specifying which distribution strategy to use.
Accepted values are 'tpu', 'gpus', None. tpu' means to use TPUStrategy.
'gpus' mean to use MirroredStrategy for multi-gpus. If None, use TF
default with OneDeviceStrategy.
tpu: The Cloud TPU to use for training. This should be either the name
used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470
url.
gcp_project: Project name for the Cloud TPU-enabled project. If not
specified, we will attempt to automatically detect the GCE project from
metadata.
tpu_zone: GCE zone where the Cloud TPU is located in. If not specified, we
will attempt to automatically detect the GCE project from metadata.
use_xla: Use XLA even if strategy is not tpu. If strategy is tpu, always
use XLA, and this flag has no effect.
profile: Enable profile mode.
debug: Enable debug mode.
tf_random_seed: Fixed random seed for deterministic execution across runs
for debugging.
"""
self.model_name = model_name
self.uri = uri
self.batch_size = batch_size
config = hparams_config.get_efficientdet_config(model_name)
config.override(hparams)
config.image_size = utils.parse_image_size(config.image_size)
config.var_freeze_expr = var_freeze_expr
config.moving_average_decay = moving_average_decay
if epochs:
config.num_epochs = epochs
if use_xla and strategy != 'tpu':
tf.config.optimizer.set_jit(True)
for gpu in tf.config.list_physical_devices('GPU'):
tf.config.experimental.set_memory_growth(gpu, True)
if debug:
tf.config.experimental_run_functions_eagerly(True)
tf.debugging.set_log_device_placement(True)
os.environ['TF_DETERMINISTIC_OPS'] = '1'
tf.random.set_seed(tf_random_seed)
logging.set_verbosity(logging.DEBUG)
if strategy == 'tpu':
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
tpu, zone=tpu_zone, project=gcp_project)
tf.config.experimental_connect_to_cluster(tpu_cluster_resolver)
tf.tpu.experimental.initialize_tpu_system(tpu_cluster_resolver)
ds_strategy = tf.distribute.TPUStrategy(tpu_cluster_resolver)
logging.info('All devices: %s', tf.config.list_logical_devices('TPU'))
tf.config.set_soft_device_placement(True)
elif strategy == 'gpus':
ds_strategy = tf.distribute.MirroredStrategy()
logging.info('All devices: %s', tf.config.list_physical_devices('GPU'))
else:
if tf.config.list_physical_devices('GPU'):
ds_strategy = tf.distribute.OneDeviceStrategy('device:GPU:0')
else:
ds_strategy = tf.distribute.OneDeviceStrategy('device:CPU:0')
self.ds_strategy = ds_strategy
if model_dir is None:
model_dir = tempfile.mkdtemp()
params = dict(
profile=profile,
model_name=model_name,
steps_per_execution=steps_per_execution,
model_dir=model_dir,
strategy=strategy,
batch_size=batch_size,
tf_random_seed=tf_random_seed,
debug=debug)
config.override(params, True)
self.config = config
# set mixed precision policy by keras api.
precision = utils.get_precision(config.strategy, config.mixed_precision)
policy = tf.keras.mixed_precision.experimental.Policy(precision)
tf.keras.mixed_precision.experimental.set_policy(policy)
def create_model(self):
"""Creates the EfficientDet model."""
return train_lib.EfficientDetNetTrainHub(
config=self.config, hub_module_url=self.uri)
def train(self,
model,
train_dataset,
steps_per_epoch,
val_dataset,
validation_steps,
epochs=None,
batch_size=None,
val_json_file=None):
"""Run EfficientDet training."""
config = self.config
if not epochs:
epochs = config.num_epochs
if not batch_size:
batch_size = config.batch_size
config.update(
dict(
steps_per_epoch=steps_per_epoch,
eval_samples=batch_size * validation_steps,
val_json_file=val_json_file,
batch_size=batch_size))
train.setup_model(model, config)
train.init_experimental(config)
model.fit(
train_dataset,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
callbacks=train_lib.get_callbacks(config.as_dict(), val_dataset),
validation_data=val_dataset,
validation_steps=validation_steps)
return model
def evaluate(self, model, dataset, steps, json_file=None):
"""Evaluate the EfficientDet keras model."""
label_map = label_util.get_label_map(self.config.label_map)
# Sorts label_map.keys since pycocotools.cocoeval uses sorted catIds
# (category ids) in COCOeval class.
label_map = _get_ordered_label_map(label_map)
evaluator = coco_metric.EvaluationMetric(
filename=json_file, label_map=label_map)
evaluator.reset_states()
dataset = dataset.take(steps)
@tf.function
def _get_detections(images, labels):
cls_outputs, box_outputs = model(images, training=False)
detections = postprocess.generate_detections(self.config, cls_outputs,
box_outputs,
labels['image_scales'],
labels['source_ids'])
tf.numpy_function(evaluator.update_state, [
labels['groundtruth_data'],
postprocess.transform_detections(detections)
], [])
dataset = self.ds_strategy.experimental_distribute_dataset(dataset)
for (images, labels) in dataset:
self.ds_strategy.run(_get_detections, (images, labels))
metrics = evaluator.result()
metric_dict = {}
for i, name in enumerate(evaluator.metric_names):
metric_dict[name] = metrics[i]
if label_map:
for i, cid in enumerate(label_map.keys()):
name = 'AP_/%s' % label_map[cid]
metric_dict[name] = metrics[i + len(evaluator.metric_names)]
return metric_dict
def export_saved_model(self,
saved_model_dir,
batch_size=None,
pre_mode='infer',
post_mode='global'):
"""Saves the model to Tensorflow SavedModel.
Args:
saved_model_dir: Folder path for saved model.
batch_size: Batch size to be saved in saved_model.
pre_mode: Pre-processing Mode in ExportModel, must be {None, 'infer'}.
post_mode: Post-processing Mode in ExportModel, must be {None, 'global',
'per_class'}.
"""
# Create EfficientDetModel with latest checkpoint.
config = self.config
model = efficientdet_keras.EfficientDetModel(config=config)
model.build((batch_size, *config.image_size, 3))
if config.model_dir:
util_keras.restore_ckpt(
model,
config.model_dir,
config['moving_average_decay'],
skip_mismatch=False)
else:
# EfficientDetModel is random initialized without restoring the
# checkpoint. This is mainly used in object_detector_test and shouldn't be
# used if we want to export trained model.
tf.compat.v1.logging.warn('Need to restore the checkpoint for '
'EfficientDet.')
# Gets tf.TensorSpec.
if pre_mode is None:
# Input is the preprocessed image that's already resized to a certain
# input shape.
input_spec = tf.TensorSpec(
shape=[batch_size, *config.image_size, 3],
dtype=tf.float32,
name='images')
else:
# Input is that raw image that can be in any input shape,
input_spec = tf.TensorSpec(
shape=[batch_size, None, None, 3], dtype=tf.uint8, name='images')
export_model = inference.ExportModel(
model, pre_mode=pre_mode, post_mode=post_mode)
tf.saved_model.save(
export_model,
saved_model_dir,
signatures=export_model.__call__.get_concrete_function(input_spec))
def export_tflite(self, tflite_filepath, quantization_config=None):
"""Converts the retrained model to tflite format and saves it.
The exported TFLite model has the following inputs & outputs:
One input:
image: a float32 tensor of shape[1, height, width, 3] containing the
normalized input image. `self.config.image_size` is [height, width].
Four Outputs:
detection_boxes: a float32 tensor of shape [1, num_boxes, 4] with box
locations.
detection_classes: a float32 tensor of shape [1, num_boxes] with class
indices.
detection_scores: a float32 tensor of shape [1, num_boxes] with class
scores.
num_boxes: a float32 tensor of size 1 containing the number of detected
boxes.
Args:
tflite_filepath: File path to save tflite model.
quantization_config: Configuration for post-training quantization.
"""
with tempfile.TemporaryDirectory() as temp_dir:
self.export_saved_model(
temp_dir, batch_size=1, pre_mode=None, post_mode='tflite')
converter = tf.lite.TFLiteConverter.from_saved_model(temp_dir)
if quantization_config:
converter = quantization_config.get_converter_with_quantization(
converter, model_spec=self)
# TFLITE_BUILTINS is needed for TFLite's custom NMS op for integer only
# quantization.
if tf.lite.OpsSet.TFLITE_BUILTINS not in converter.target_spec.supported_ops:
converter.target_spec.supported_ops += [tf.lite.OpsSet.TFLITE_BUILTINS]
tflite_model = converter.convert()
with tf.io.gfile.GFile(tflite_filepath, 'wb') as f:
f.write(tflite_model)
|
[] |
[] |
[
"TF_DETERMINISTIC_OPS"
] |
[]
|
["TF_DETERMINISTIC_OPS"]
|
python
| 1 | 0 | |
hail/python/hail/backend/spark_backend.py
|
import pkg_resources
import sys
import os
import json
import socket
import socketserver
from threading import Thread
import py4j
import pyspark
from hail.utils.java import Env, scala_package_object, scala_object
from hail.expr.types import dtype
from hail.expr.table_type import ttable
from hail.expr.matrix_type import tmatrix
from hail.expr.blockmatrix_type import tblockmatrix
from hail.ir.renderer import CSERenderer
from hail.ir import JavaIR
from hail.table import Table
from hail.matrixtable import MatrixTable
from .py4j_backend import Py4JBackend, handle_java_exception
from ..hail_logging import Logger
if pyspark.__version__ < '3' and sys.version_info > (3, 8):
raise EnvironmentError('Hail with spark {} requires Python 3.6 or 3.7, found {}.{}'.format(
pyspark.__version__, sys.version_info.major, sys.version_info.minor))
_installed = False
_original = None
def install_exception_handler():
global _installed
global _original
if not _installed:
_original = py4j.protocol.get_return_value
_installed = True
# The original `get_return_value` is not patched, it's idempotent.
patched = handle_java_exception(_original)
# only patch the one used in py4j.java_gateway (call Java API)
py4j.java_gateway.get_return_value = patched
def uninstall_exception_handler():
global _installed
global _original
if _installed:
_installed = False
py4j.protocol.get_return_value = _original
class LoggingTCPHandler(socketserver.StreamRequestHandler):
def handle(self):
for line in self.rfile:
sys.stderr.write(line.decode("ISO-8859-1"))
class SimpleServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
daemon_threads = True
allow_reuse_address = True
def __init__(self, server_address, handler_class):
socketserver.TCPServer.__init__(self, server_address, handler_class)
def connect_logger(utils_package_object, host, port):
"""
This method starts a simple server which listens on a port for a
client to connect and start writing messages. Whenever a message
is received, it is written to sys.stderr. The server is run in
a daemon thread from the caller, which is killed when the caller
thread dies.
If the socket is in use, then the server tries to listen on the
next port (port + 1). After 25 tries, it gives up.
:param str host: Hostname for server.
:param int port: Port to listen on.
"""
server = None
tries = 0
max_tries = 25
while not server:
try:
server = SimpleServer((host, port), LoggingTCPHandler)
except socket.error:
port += 1
tries += 1
if tries >= max_tries:
sys.stderr.write(
'WARNING: Could not find a free port for logger, maximum retries {} exceeded.'.format(max_tries))
return
t = Thread(target=server.serve_forever, args=())
# The thread should be a daemon so that it shuts down when the parent thread is killed
t.daemon = True
t.start()
utils_package_object.addSocketAppender(host, port)
class Log4jLogger(Logger):
def __init__(self, log_pkg):
self._log_pkg = log_pkg
def error(self, msg):
self._log_pkg.error(msg)
def warning(self, msg):
self._log_pkg.warn(msg)
def info(self, msg):
self._log_pkg.info(msg)
class SparkBackend(Py4JBackend):
def __init__(self, idempotent, sc, spark_conf, app_name, master,
local, log, quiet, append, min_block_size,
branching_factor, tmpdir, local_tmpdir, skip_logging_configuration, optimizer_iterations):
if pkg_resources.resource_exists(__name__, "hail-all-spark.jar"):
hail_jar_path = pkg_resources.resource_filename(__name__, "hail-all-spark.jar")
assert os.path.exists(hail_jar_path), f'{hail_jar_path} does not exist'
conf = pyspark.SparkConf()
base_conf = spark_conf or {}
for k, v in base_conf.items():
conf.set(k, v)
jars = [hail_jar_path]
if os.environ.get('HAIL_SPARK_MONITOR'):
import sparkmonitor
jars.append(os.path.join(os.path.dirname(sparkmonitor.__file__), 'listener.jar'))
conf.set("spark.extraListeners", "sparkmonitor.listener.JupyterSparkMonitorListener")
conf.set('spark.jars', ','.join(jars))
conf.set('spark.driver.extraClassPath', ','.join(jars))
conf.set('spark.executor.extraClassPath', './hail-all-spark.jar')
if sc is None:
pyspark.SparkContext._ensure_initialized(conf=conf)
elif not quiet:
sys.stderr.write(
'pip-installed Hail requires additional configuration options in Spark referring\n'
' to the path to the Hail Python module directory HAIL_DIR,\n'
' e.g. /path/to/python/site-packages/hail:\n'
' spark.jars=HAIL_DIR/hail-all-spark.jar\n'
' spark.driver.extraClassPath=HAIL_DIR/hail-all-spark.jar\n'
' spark.executor.extraClassPath=./hail-all-spark.jar')
else:
pyspark.SparkContext._ensure_initialized()
self._gateway = pyspark.SparkContext._gateway
self._jvm = pyspark.SparkContext._jvm
hail_package = getattr(self._jvm, 'is').hail
self._hail_package = hail_package
self._utils_package_object = scala_package_object(hail_package.utils)
jsc = sc._jsc.sc() if sc else None
if idempotent:
self._jbackend = hail_package.backend.spark.SparkBackend.getOrCreate(
jsc, app_name, master, local, True, min_block_size, tmpdir, local_tmpdir)
self._jhc = hail_package.HailContext.getOrCreate(
self._jbackend, log, True, append, branching_factor, skip_logging_configuration, optimizer_iterations)
else:
self._jbackend = hail_package.backend.spark.SparkBackend.apply(
jsc, app_name, master, local, True, min_block_size, tmpdir, local_tmpdir)
self._jhc = hail_package.HailContext.apply(
self._jbackend, log, True, append, branching_factor, skip_logging_configuration, optimizer_iterations)
self._jsc = self._jbackend.sc()
if sc:
self.sc = sc
else:
self.sc = pyspark.SparkContext(gateway=self._gateway, jsc=self._jvm.JavaSparkContext(self._jsc))
self._jspark_session = self._jbackend.sparkSession()
self._spark_session = pyspark.sql.SparkSession(self.sc, self._jspark_session)
# This has to go after creating the SparkSession. Unclear why.
# Maybe it does its own patch?
install_exception_handler()
from hail.context import version
py_version = version()
jar_version = self._jhc.version()
if jar_version != py_version:
raise RuntimeError(f"Hail version mismatch between JAR and Python library\n"
f" JAR: {jar_version}\n"
f" Python: {py_version}")
self._fs = None
self._logger = None
if not quiet:
sys.stderr.write('Running on Apache Spark version {}\n'.format(self.sc.version))
if self._jsc.uiWebUrl().isDefined():
sys.stderr.write('SparkUI available at {}\n'.format(self._jsc.uiWebUrl().get()))
connect_logger(self._utils_package_object, 'localhost', 12888)
self._jbackend.startProgressBar()
def jvm(self):
return self._jvm
def hail_package(self):
return self._hail_package
def utils_package_object(self):
return self._utils_package_object
def stop(self):
self._jbackend.close()
self._jhc.stop()
self._jhc = None
self.sc.stop()
self.sc = None
uninstall_exception_handler()
def _parse_value_ir(self, code, ref_map={}, ir_map={}):
return self._jbackend.parse_value_ir(
code,
{k: t._parsable_string() for k, t in ref_map.items()},
ir_map)
def _parse_table_ir(self, code, ref_map={}, ir_map={}):
return self._jbackend.parse_table_ir(code, ref_map, ir_map)
def _parse_matrix_ir(self, code, ref_map={}, ir_map={}):
return self._jbackend.parse_matrix_ir(code, ref_map, ir_map)
def _parse_blockmatrix_ir(self, code, ref_map={}, ir_map={}):
return self._jbackend.parse_blockmatrix_ir(code, ref_map, ir_map)
@property
def logger(self):
if self._logger is None:
self._logger = Log4jLogger(self._utils_package_object)
return self._logger
@property
def fs(self):
if self._fs is None:
from hail.fs.hadoop_fs import HadoopFS
self._fs = HadoopFS(self._utils_package_object, self._jbackend.fs())
return self._fs
def _to_java_ir(self, ir, parse):
if not hasattr(ir, '_jir'):
r = CSERenderer(stop_at_jir=True)
# FIXME parse should be static
ir._jir = parse(r(ir), ir_map=r.jirs)
return ir._jir
def _to_java_value_ir(self, ir):
return self._to_java_ir(ir, self._parse_value_ir)
def _to_java_table_ir(self, ir):
return self._to_java_ir(ir, self._parse_table_ir)
def _to_java_matrix_ir(self, ir):
return self._to_java_ir(ir, self._parse_matrix_ir)
def _to_java_blockmatrix_ir(self, ir):
return self._to_java_ir(ir, self._parse_blockmatrix_ir)
def value_type(self, ir):
jir = self._to_java_value_ir(ir)
return dtype(jir.typ().toString())
def table_type(self, tir):
jir = self._to_java_table_ir(tir)
return ttable._from_java(jir.typ())
def matrix_type(self, mir):
jir = self._to_java_matrix_ir(mir)
return tmatrix._from_java(jir.typ())
def persist_table(self, t, storage_level):
return Table._from_java(self._jbackend.pyPersistTable(storage_level, self._to_java_table_ir(t._tir)))
def unpersist_table(self, t):
return Table._from_java(self._to_java_table_ir(t._tir).pyUnpersist())
def persist_matrix_table(self, mt, storage_level):
return MatrixTable._from_java(self._jbackend.pyPersistMatrix(storage_level, self._to_java_matrix_ir(mt._mir)))
def unpersist_matrix_table(self, mt):
return MatrixTable._from_java(self._to_java_matrix_ir(mt._mir).pyUnpersist())
def unpersist_block_matrix(self, id):
self._jhc.backend().unpersist(id)
def blockmatrix_type(self, bmir):
jir = self._to_java_blockmatrix_ir(bmir)
return tblockmatrix._from_java(jir.typ())
def from_spark(self, df, key):
return Table._from_java(self._jbackend.pyFromDF(df._jdf, key))
def to_spark(self, t, flatten):
t = t.expand_types()
if flatten:
t = t.flatten()
return pyspark.sql.DataFrame(self._jbackend.pyToDF(self._to_java_table_ir(t._tir)), Env.spark_session()._wrapped)
def to_pandas(self, t, flatten):
return self.to_spark(t, flatten).toPandas()
def from_pandas(self, df, key):
return Table.from_spark(Env.spark_session().createDataFrame(df), key)
def add_reference(self, config):
Env.hail().variant.ReferenceGenome.fromJSON(json.dumps(config))
def load_references_from_dataset(self, path):
return json.loads(Env.hail().variant.ReferenceGenome.fromHailDataset(self.fs._jfs, path))
def from_fasta_file(self, name, fasta_file, index_file, x_contigs, y_contigs, mt_contigs, par):
self._jbackend.pyFromFASTAFile(
name, fasta_file, index_file, x_contigs, y_contigs, mt_contigs, par)
def remove_reference(self, name):
Env.hail().variant.ReferenceGenome.removeReference(name)
def get_reference(self, name):
return json.loads(Env.hail().variant.ReferenceGenome.getReference(name).toJSONString())
def add_sequence(self, name, fasta_file, index_file):
self._jbackend.pyAddSequence(name, fasta_file, index_file)
def remove_sequence(self, name):
scala_object(Env.hail().variant, 'ReferenceGenome').removeSequence(name)
def add_liftover(self, name, chain_file, dest_reference_genome):
self._jbackend.pyReferenceAddLiftover(name, chain_file, dest_reference_genome)
def remove_liftover(self, name, dest_reference_genome):
scala_object(Env.hail().variant, 'ReferenceGenome').referenceRemoveLiftover(
name, dest_reference_genome)
def parse_vcf_metadata(self, path):
return json.loads(self._jhc.pyParseVCFMetadataJSON(self.fs._jfs, path))
def index_bgen(self, files, index_file_map, rg, contig_recoding, skip_invalid_loci):
self._jbackend.pyIndexBgen(files, index_file_map, rg, contig_recoding, skip_invalid_loci)
def import_fam(self, path: str, quant_pheno: bool, delimiter: str, missing: str):
return json.loads(self._jbackend.pyImportFam(path, quant_pheno, delimiter, missing))
def register_ir_function(self, name, type_parameters, argument_names, argument_types, return_type, body):
r = CSERenderer(stop_at_jir=True)
code = r(body._ir)
jbody = (self._parse_value_ir(code, ref_map=dict(zip(argument_names, argument_types)), ir_map=r.jirs))
Env.hail().expr.ir.functions.IRFunctionRegistry.pyRegisterIR(
name,
[ta._parsable_string() for ta in type_parameters],
argument_names, [pt._parsable_string() for pt in argument_types],
return_type._parsable_string(),
jbody)
def persist_ir(self, ir):
return JavaIR(self._jhc.backend().executeLiteral(self._to_java_value_ir(ir)))
|
[] |
[] |
[
"HAIL_SPARK_MONITOR"
] |
[]
|
["HAIL_SPARK_MONITOR"]
|
python
| 1 | 0 | |
extend_euserv/login_euserv.py
|
"""Login to https://support.euserv.com/."""
# pylint:
from typing import Optional
# import os
from time import sleep
from random import randint
import asyncio
# import dotenv
import pyppeteer
from logzero import logger
from extend_euserv.get_ppbrowser import get_ppbrowser, BROWSER
from extend_euserv.config import Settings
# load .env to os.environ
# dotenv.load_dotenv()
CONFIG = Settings()
URL = "https://www.noip.com/members/dns/"
URL = "https://support.euserv.com/"
# fmt: off
async def login_euserv(
email: Optional[str] = "",
password: Optional[str] = "",
browser=BROWSER,
) -> pyppeteer.page.Page:
# fmt: on
"""Login to https://support.euserv.com/.
return a pyppeteer.page.Page for subsequent processing.
"""
try:
_ = await browser.newPage()
except Exception as exc:
logger.error(exc)
logger.info("Getting a new ppbrowser...")
browser = await get_ppbrowser()
try:
page = await browser.newPage()
except Exception as exc:
logger.error(exc)
raise
ubound = 3
idx = 0
err_flag = False
while idx < ubound:
idx += 1 # retry ubound times
logger.debug("Going to %s", URL)
done, pending = await asyncio.wait([
page.goto(URL),
page.waitForNavigation(),
])
err_flag = False
for task in done:
try:
await task
except Exception as exc:
logger.error(exc)
err_flag = True
if err_flag:
logger.info("Retry #%s", idx)
sleep(randint(1, 10))
else:
break
if err_flag:
raise SystemError("err_flag: %s, check previous error messages in the log" % err_flag) # return
# We give it another try
try:
_ = await page.waitForSelector(".td-title", {"timeout": 20000})
# already logged in
if "Logout" in (await page.content()):
logger.info("Already logged in.")
raise SystemExit(" Change this to return page ")
# return page
except Exception as exc:
logger.error("Not logged in yet, exc: %s, proceed", exc)
# proceed
# wait for form/submit
logger.debug("Trying logging in...")
try:
await page.waitForSelector(".td-title", {"timeout": 20000})
except TimeoutError:
logger.error(TimeoutError)
raise
except Exception as exc:
logger.error("Unable to fetch the page, network problem or euserv has changed page layout, %s, existing", exc)
raise SystemExit(1) from exc
if not email:
# email = os.environ.get("EUSERV_EMAIL")
email = CONFIG.email
if not password:
# password = os.environ.get("EUSERV_PASSWORD")
password = CONFIG.password
if not email:
logger.error('Supply email address login_euserv(email="...") or set it in .env or as ENVIRONMENT (set/export EUSERV_EMAILE="...")')
raise SystemExit(1)
if not password:
logger.error('Supply password, e.g., login_euserv(password="...") or set it in .env or as ENVIRONMENT (set/export EUSERV_EMAILE="...")')
raise SystemExit(1)
logger.info("\nemail: %s \npassword: %s", "*" * 6 + email[6:], "*" * (len(password) + 3))
logger.debug("Logging in with email and password")
try:
await page.type('input[name="email"]', email, {"delay": 20})
await page.type('input[name="password"]', password + "\n", {"delay": 20})
# await handle.type('input[name="email"]', email, {"delay": 20})
# await handle.type('input[name="password"]', password, {"delay": 20})
# bhandle = await page.xpath('//*[@id="clogs"]/button')
# await bhandle[0].click()
except Exception as exc:
logger.error("Oh no, exc: %s, exiting", exc)
raise SystemExit(1)
# wait for page to load
# kc2_order_customer_orders_tab_1 vServer
logger.info("Waiting for 'Cover Page' to load...")
try:
# _ = await page.waitForSelector('#kc2_order_customer_orders_tab_1', {"timeout": 45000})
# _ = await page.waitForSelector('#kc2_order_customer_orders_tab_1', {"timeout": 45000})
_ = await page.waitForXPath('//*[contains(text(),"Cover Page")]', {"timeout": 45000})
except Exception as exc:
logger.error("No go, exc: %s, exiting", exc)
if "Login failed" in (await page.content()):
logger.error("""
Login failed.
Please check email address/customer ID and password.""")
# raise Exception(str(exc))
logger.warning("Bad news: we are _not_ in, closing the page")
await page.close()
return page # use page.isClosed() to check
# if "vServer" in (await page.content()):
if "Cover Page" in (await page.content()):
logger.info("Good news: we are in.")
else:
logger.warning("Something is not right, maybe euserv's page layout is changed?")
return page
|
[] |
[] |
[
"EUSERV_EMAIL",
"EUSERV_PASSWORD"
] |
[]
|
["EUSERV_EMAIL", "EUSERV_PASSWORD"]
|
python
| 2 | 0 | |
apps/counter/main.go
|
package main
import (
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"sync/atomic"
"github.com/gorilla/mux"
)
func main() {
port := os.Getenv("PORT")
if port == "" {
port = "80"
}
portWithColon := fmt.Sprintf(":%s", port)
router := mux.NewRouter()
router.HandleFunc("/health", HealthHandler)
var index uint64
router.PathPrefix("/").Handler(CountHandler{index: &index})
// Serve!
fmt.Printf("Serving at http://localhost:%s\n(Pass as PORT environment variable)\n", port)
log.Fatal(http.ListenAndServe(portWithColon, router))
}
// HealthHandler returns a succesful status and a message.
// For use by Consul or other processes that need to verify service health.
func HealthHandler(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, "Hello, you've hit %s\n", r.URL.Path)
}
// Count stores a number that is being counted and other data to
// return as JSON in the API.
type Count struct {
Count uint64 `json:"count"`
Hostname string `json:"hostname"`
}
// CountHandler serves a JSON feed that contains a number that increments each time
// the API is called.
type CountHandler struct {
index *uint64
}
func (h CountHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
atomic.AddUint64(h.index, 1)
hostname, _ := os.Hostname()
index := atomic.LoadUint64(h.index)
count := Count{Count: index, Hostname: hostname}
responseJSON, _ := json.Marshal(count)
fmt.Fprintf(w, string(responseJSON))
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
site/main.go
|
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
)
func main() {
port := os.Getenv("PORT")
if port == "" {
port = "8080"
log.Printf("Defaulting to port %s", port)
}
var configPath string
switch len(os.Args) {
case 1:
configPath = "vanity.yaml"
case 2:
configPath = os.Args[1]
default:
log.Fatal("usage: govanityurls [CONFIG]")
}
log.Printf("loading: %s", configPath)
vanity, err := ioutil.ReadFile(configPath)
if err != nil {
log.Fatal(err)
}
h, err := newHandler(vanity)
if err != nil {
log.Fatal(err)
}
http.Handle("/", h)
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%s", port), nil))
}
func defaultHost(r *http.Request) string {
return r.Host
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
sunpy/util/config.py
|
"""SunPy configuration file functionality"""
import os
import configparser
from pathlib import Path
import sunpy
from sunpy.extern.appdirs import AppDirs
__all__ = ['load_config', 'print_config', 'CONFIG_DIR']
# This is to avoid creating a new config dir for each new dev version.
# We use AppDirs to locate and create the config directory.
dirs = AppDirs("sunpy", "sunpy")
# Default one set by AppDirs
CONFIG_DIR = dirs.user_config_dir
def load_config():
"""
Read the sunpyrc configuration file. If one does not exists in the user's
home directory then read in the defaults from module
"""
config = configparser.RawConfigParser()
# Get locations of SunPy configuration files to be loaded
config_files = _find_config_files()
# Read in configuration files
config.read(config_files)
# Specify the working directory as a default so that the user's home
# directory can be located in an OS-independent manner
if not config.has_option('general', 'working_dir'):
config.set('general', 'working_dir', str(Path.home() / "sunpy"))
# Specify the database url as a default so that the user's home
# directory can be located in an OS-independent manner
if not config.has_option('database', 'url'):
config.set('database', 'url', "sqlite:///" + str(Path.home() / "sunpy" / "sunpydb.sqlite"))
working_dir = Path(config.get('general', 'working_dir'))
sample_dir = Path(config.get('downloads', 'sample_dir'))
download_dir = Path(config.get('downloads', 'download_dir'))
config.set('downloads', 'sample_dir', str((working_dir / sample_dir).expanduser().resolve()))
config.set('downloads', 'download_dir', str((working_dir / download_dir).expanduser().resolve()))
return config
def _find_config_files():
"""Finds locations of SunPy configuration files"""
config_files = []
config_filename = 'sunpyrc'
# find default configuration file
module_dir = Path(sunpy.__file__).parent
config_files.append(str(module_dir / 'data' / 'sunpyrc'))
# if a user configuration file exists, add that to list of files to read
# so that any values set there will override ones specified in the default
# config file
config_path = Path(_get_user_configdir())
if config_path.joinpath(config_filename).exists():
config_files.append(str(config_path.joinpath(config_filename)))
return config_files
def get_and_create_download_dir():
"""
Get the config of download directory and create one if not present.
"""
download_dir = Path(sunpy.config.get('downloads', 'download_dir')).expanduser().resolve()
if not _is_writable_dir(download_dir):
raise RuntimeError(f'Could not write to SunPy downloads directory="{download_dir}"')
return sunpy.config.get('downloads', 'download_dir')
def get_and_create_sample_dir():
"""
Get the config of download directory and create one if not present.
"""
sample_dir = Path(sunpy.config.get('downloads', 'sample_dir')).expanduser().resolve()
if not _is_writable_dir(sample_dir):
raise RuntimeError(f'Could not write to SunPy sample data directory="{sample_dir}"')
return sunpy.config.get('downloads', 'sample_dir')
def print_config():
"""Print current configuration options"""
print("FILES USED:")
for file_ in _find_config_files():
print(" " + file_)
print("\nCONFIGURATION:")
for section in sunpy.config.sections():
print(" [{0}]".format(section))
for option in sunpy.config.options(section):
print(" {} = {}".format(option, sunpy.config.get(section, option)))
print("")
def _is_writable_dir(p):
"""Checks to see if a directory exists and is writable."""
if not Path(p).exists():
Path(p).mkdir(parents=True)
return Path(p).is_dir() and os.access(p, os.W_OK)
def _get_user_configdir():
"""
Return the string representing the configuration dir.
The default is set by "AppDirs" and can be accessed by importing
``sunpy.util.config.CONFIG_DIR``.
You can override this with the "SUNPY_CONFIGDIR" environment variable.
"""
configdir = os.environ.get('SUNPY_CONFIGDIR', CONFIG_DIR)
if not _is_writable_dir(configdir):
raise RuntimeError(f'Could not write to SUNPY_CONFIGDIR="{configdir}"')
return configdir
|
[] |
[] |
[
"SUNPY_CONFIGDIR"
] |
[]
|
["SUNPY_CONFIGDIR"]
|
python
| 1 | 0 | |
bin/Pan.go
|
package main
import (
"errors"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"regexp"
"strings"
)
const (
Controller = "`@Controller`"
Service = "`@Service`"
Aspect = "`@Aspect`"
Filter = "`@Filter`"
Been = "`@Been`"
AnnController = `//@Controller`
AnnService = `//@Service`
AnnAspect = `//@Aspect`
AnnFilter = `//@Filter`
AnnBeen = `//@Been`
)
//注解结构体
type Annotation struct {
filePkg string //当前文件所在的包
RowDataStr string //扫描到的最初始数据
StructName string
Type string
Pkg string
}
func (a *Annotation) GetPkgString() string {
return a.Pkg
}
func (a *Annotation) String() string {
return "\n\t" + a.StructName + " \t" + a.Type + "\r"
}
const (
all_reg = `//@\w+\stype\s\w+\sstruct`
been_reg = `//@Been\([\w\W]+?\)\sfunc?\s[\w\W]+?\(?\)`
)
var Anns = []Annotation{}
var goFiles = []string{}
var rootPath string
var src string
var conf string
var resourse string
var initPro string
var confFile string
var initProFile string
var mainFile string
var initProFileContent = `package initPro
import (
."github.com/CP-Panizza/Panizza"
)
var App = New()
type Components struct {
}
func init() {
RegisterComponents(new(Components))
}`
var mainFileContent = `package main
import(
."./initPro"
)
func main() {
App.StartServer()
}`
var confFileContent string
const (
FILE = 0
DIR = 1
)
type Obj struct {
path string
kind int
content string
}
func newObj(path string, kind int, content string) Obj {
return Obj{path, kind, content}
}
var ObjList = []Obj{}
func CreateProj(pro []Obj) {
for _, v := range pro {
if v.kind == FILE {
file, err := os.Create(v.path)
mustPanic(err)
file.WriteString(v.content)
file.Close()
} else {
mustPanic(os.Mkdir(v.path, os.ModeDir))
}
log.Println("build\t" + v.path + "\tseccess!")
}
}
func mustPanic(err error) {
if err != nil {
panic(err)
}
}
//判断访问的文件路径是否存在
func Existe(path string) bool {
_, err := os.Stat(path)
if err == nil {
return true
}
if os.IsNotExist(err) {
return false
}
return false
}
func IsDir(path string) bool {
s, err := os.Stat(path)
if err != nil {
return false
}
return s.IsDir()
}
var projectName *string
func main() {
complie := flag.Bool("c", false, "complie project!")
projectName = flag.String("n", "awesomePanizza", "Your application name!")
flag.Parse()
if *complie == true {
Complie()
} else {
NewProject()
}
}
//新建一个新项目
func NewProject() {
confFileContent = `PROJECT_PACKAGE=` + *projectName + `
PORT=8080
#FILE_SERVER=/resourses`
fmt.Println(*projectName)
dir, err := os.Getwd()
mustPanic(err)
dir = strings.Replace(dir, `\`, `/`, -1) + `/` + *projectName
rootPath = dir
ObjList = append(ObjList, newObj(rootPath, DIR, ""))
src = dir + "/src"
ObjList = append(ObjList, newObj(src, DIR, ""))
conf = src + "/conf"
ObjList = append(ObjList, newObj(conf, DIR, ""))
resourse = src + "/resourses"
ObjList = append(ObjList, newObj(resourse, DIR, ""))
initPro = src + "/initPro"
ObjList = append(ObjList, newObj(initPro, DIR, ""))
confFile = conf + "/application.conf"
ObjList = append(ObjList, newObj(confFile, FILE, confFileContent))
initProFile = initPro + "/initPro.go"
ObjList = append(ObjList, newObj(initProFile, FILE, initProFileContent))
mainFile = src + "/" + *projectName + ".go"
ObjList = append(ObjList, newObj(mainFile, FILE, mainFileContent))
CreateProj(ObjList)
log.Println("Project is build seccess!")
}
type BeenAnn struct {
FilePkg string
RowDataStr string
FuncName string
BeenName string
Pkg string
}
func (b *BeenAnn) String() string {
return "\n\t" + "App.AddBeen(" + b.BeenName + "," + b.FuncName + ")" + "\r"
}
var beens = []BeenAnn{}
var targetFile string
var targetPkg string
//进行项目编译
func Complie() {
filePath, err := FindFileAbsPath("initPro.go")
if err != nil {
panic(err)
}
targetFile = filePath
targetPkg = targetFile[:strings.LastIndex(targetFile, "/")]
fmt.Println(targetFile)
//fmt.Println("targetPkg:",targetPkg)
reg, err := regexp.Compile(all_reg)
if err != nil {
panic(err)
}
beenReg, err := regexp.Compile(been_reg)
if err != nil {
panic(err)
}
dir, err := os.Getwd()
if err != nil {
panic(err)
}
//fmt.Println("start path:", dir)
SerachSrcFile(dir, &goFiles)
for _, f := range goFiles {
absPkgPath := f[:strings.LastIndex(f, "/")]
data, err := ioutil.ReadFile(f)
if err != nil {
panic(err)
}
been := beenReg.FindAllString(string(data), len(data))
if len(been) != 0 {
for _, v := range been {
b := BeenAnn{
FilePkg: absPkgPath,
RowDataStr: v,
}
beens = append(beens, b)
}
}
strs := reg.FindAllString(string(data), len(data))
if len(strs) != 0 {
for _, val := range strs {
ann := Annotation{
filePkg: absPkgPath,
RowDataStr: val,
}
Anns = append(Anns, ann)
}
}
}
for k, v := range Anns {
a, err := ParserAnns(v)
if err != nil {
panic(err)
}
Anns[k] = a
}
for k, v := range beens {
b, err := ParserBeens(v)
if err != nil {
panic(err)
}
beens[k] = b
}
pkgsString := ""
componentsString := ""
for _, v := range Anns {
componentsString += v.String()
if !strings.Contains(pkgsString, v.Pkg) && v.Pkg != `."."` {
pkgsString += "\n\t" + v.Pkg + "\r"
}
}
beensString := ""
for _, v := range beens {
beensString += v.String()
if !strings.Contains(pkgsString, v.Pkg) && v.Pkg != `."."` {
pkgsString += "\n\t" + v.Pkg + "\r"
}
}
panizzaPath, err := FindFileAbsPath("Panizza.go")
if err != nil {
fmt.Println(err)
goPath := os.Getenv("GOPATH")
if goPath == "" {
panic(errors.New("Please set GOPATH!!!"))
} else {
file := []string{}
FindFilesFromStartPath(goPath, "Panizza.go", &file)
if len(file) == 0 {
panic("can not find Panizza from GOPATH!!!")
}
panizzaPath = "." + `"` + file[0][strings.Index(file[0], "github"):strings.LastIndex(file[0], "/")] + `"`
}
} else {
panizzaPath, err = GetRelPath(targetPkg, panizzaPath)
if err != nil {
panic(err)
}
panizzaPath = "." + `"` + panizzaPath[:strings.LastIndex(panizzaPath, "/")] + `"`
}
pkgsString = "\n\t" + panizzaPath + "\r" + pkgsString
content := CreateInitProContent(pkgsString, componentsString, beensString)
//create函数如果存在此文件就删除并从新创建
f, err := os.Create(targetFile)
if err != nil {
panic(err)
}
f.WriteString(content)
f.Close()
log.Println("COMPILE SECCESS!")
}
//生成initPro文件内容
func CreateInitProContent(pkgs, comps, beens string) string {
temp := `package initPro
import (
` + pkgs + `
)
var App = New()
type Components struct {
` + comps + `
}
func init() {
` + beens + `
RegisterComponents(new(Components))
}`
return temp
}
//从字符串中获取been的名字
func GetBeensNameFromStr(str string) (string, error) {
regName, err := regexp.Compile(`\([\w\W]+?\)`)
if err != nil {
panic(err)
}
name := string(regName.Find([]byte(str)))
if len(name) == 0 {
return "", errors.New("can not getBeensName at :" + str)
}
name = name[1 : len(name)-1]
name = strings.TrimSpace(strings.Split(name, "=")[1])
return name, nil
}
//获取return been的方法名
func GetBeenFuncNameFromStr(str string) (string, error) {
funcName := strings.TrimSpace(str[strings.Index(str, "func")+4:])
if len(funcName) == 0 {
return "", errors.New("can not getFuncName at:" + str)
}
return funcName, nil
}
//解析Been注解
func ParserBeens(b BeenAnn) (BeenAnn, error) {
beenName, err := GetBeensNameFromStr(b.RowDataStr)
if err != nil {
return BeenAnn{}, err
}
b.BeenName = beenName
beenFuncName, err := GetBeenFuncNameFromStr(b.RowDataStr)
if err != nil {
return BeenAnn{}, err
}
b.FuncName = beenFuncName
pkg, err := GetRelPath(targetPkg, b.FilePkg)
if err != nil {
return BeenAnn{}, err
}
pkg = "." + `"` + pkg + `"`
b.Pkg = pkg
return b, nil
}
//字符串数组转化为字符串
func StringArrToString(s []string) string {
str := ""
for _, v := range s {
str += v
}
return str
}
//遍历此目录下的所有.go文件
func SerachSrcFile(startPath string, files *[]string) {
fs, err := ioutil.ReadDir(startPath)
if err != nil {
panic(err)
}
for _, file := range fs {
if file.IsDir() {
next := startPath + "/" + file.Name()
SerachSrcFile(next, files)
} else if strings.Contains(file.Name(), ".go") {
right := startPath + "/" + file.Name()
*files = append(*files, strings.Replace(right, "\\", "/", -1))
}
}
}
//通过字符串和路径生成Annotation
func ParserAnns(a Annotation) (Annotation, error) {
structName, err := GetStructNameFromString(a.RowDataStr)
if err != nil {
return Annotation{}, err
}
a.StructName = structName
pkg, err := GetRelPath(targetPkg, a.filePkg)
if err != nil {
return Annotation{}, err
}
pkg = "." + `"` + pkg + `"`
a.Pkg = pkg
switch {
case strings.Contains(a.RowDataStr, AnnController):
a.Type = Controller
break
case strings.Contains(a.RowDataStr, AnnService):
a.Type = Service
break
case strings.Contains(a.RowDataStr, AnnAspect):
a.Type = Aspect
break
case strings.Contains(a.RowDataStr, AnnFilter):
a.Type = Filter
break
}
return a, nil
}
//通过解析字符串获取结构体名称
func GetStructNameFromString(str string) (string, error) {
reg, err := regexp.Compile(`type\s\w+\s`)
if err != nil {
return "", err
}
byte := reg.Find([]byte(str))
if len(byte) == 0 {
return "", errors.New("not find string!")
}
s := strings.Replace(string(byte), "type", "", 1)
s = strings.TrimSpace(s)
return s, nil
}
//解析两个绝对路径之间的相对路径
func GetRelPath(basePath, targetPath string) (string, error) {
relPath, err := filepath.Rel(basePath, targetPath)
if err != nil {
return "", err
}
return strings.Replace(relPath, `\`, `/`, len(relPath)), nil
}
//从当前程序运行的路径的子目录寻找某个文件的绝对路径
func FindFileAbsPath(FileName string) (string, error) {
dir, _ := os.Getwd()
var files []string
FindFilesFromStartPath(dir, FileName, &files)
if len(files) == 0 {
return "", errors.New("can not find" + FileName + "file from project!")
}
return files[0], nil
}
//从给定的目录下寻找某个文件,返回该文件绝对路径
func FindFilesFromStartPath(startPath string, rightFileName string, outStr *[]string) {
fs, err := ioutil.ReadDir(startPath)
if err != nil {
fmt.Println(err)
return
}
for _, file := range fs {
if file.IsDir() {
next := startPath + "/" + file.Name()
FindFilesFromStartPath(next, rightFileName, outStr)
} else if file.Name() == rightFileName {
right := startPath + "/" + file.Name()
*outStr = append(*outStr, strings.Replace(right, "\\", "/", -1))
return
}
}
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
vendor/github.com/hashicorp/consul/agent/util_windows.go
|
// +build windows
package agent
import (
"os"
"os/exec"
"strings"
"syscall"
)
// ExecScript returns a command to execute a script through a shell.
func ExecScript(script string) (*exec.Cmd, error) {
shell := "cmd"
if other := os.Getenv("SHELL"); other != "" {
shell = other
}
script = "\"" + script + "\""
cmd := exec.Command(shell, "/C", script)
cmd.SysProcAttr = &syscall.SysProcAttr{
CmdLine: strings.Join(cmd.Args, " "),
}
return cmd, nil
}
func SetSysProcAttr(cmd *exec.Cmd) {}
func KillCommandSubtree(cmd *exec.Cmd) error {
return cmd.Process.Kill()
}
|
[
"\"SHELL\""
] |
[] |
[
"SHELL"
] |
[]
|
["SHELL"]
|
go
| 1 | 0 | |
pkg/jx/cmd/create_micro.go
|
package cmd
import (
"fmt"
"io"
"os"
"path/filepath"
"runtime"
"github.com/spf13/cobra"
"github.com/jenkins-x/jx/pkg/jx/cmd/templates"
cmdutil "github.com/jenkins-x/jx/pkg/jx/cmd/util"
"github.com/jenkins-x/jx/pkg/log"
"github.com/jenkins-x/jx/pkg/util"
)
var (
createMicroLong = templates.LongDesc(`
Creates a new micro application and then optionally setups CI/CD pipelines and GitOps promotion.
Micro is an application generator for gRPC services in Go with a set of tools/libraries.
This command is expected to be run within your '$GOHOME' directory. e.g. at '$GOHOME/src/github.com/myOrgOrUser/'
For more documentation about micro see: [https://github.com/microio/micro](https://github.com/microio/micro)
`)
createMicroExample = templates.Examples(`
# Create a micro application and be prompted for the folder name
jx create micro
# Create a micro application under test1
jx create micro -o test1
`)
)
// CreateMicroOptions the options for the create spring command
type CreateMicroOptions struct {
CreateProjectOptions
}
// NewCmdCreateMicro creates a command object for the "create" command
func NewCmdCreateMicro(f cmdutil.Factory, out io.Writer, errOut io.Writer) *cobra.Command {
options := &CreateMicroOptions{
CreateProjectOptions: CreateProjectOptions{
ImportOptions: ImportOptions{
CommonOptions: CommonOptions{
Factory: f,
Out: out,
Err: errOut,
},
},
},
}
cmd := &cobra.Command{
Use: "micro [github.com/myuser/myapp]",
Short: "Create a new micro based application and import the generated code into git and Jenkins for CI/CD",
Long: createMicroLong,
Example: createMicroExample,
Run: func(cmd *cobra.Command, args []string) {
options.Cmd = cmd
options.Args = args
err := options.Run()
cmdutil.CheckErr(err)
},
}
return cmd
}
// checkMicroInstalled lazily install micro if its not installed already
func (o CreateMicroOptions) checkMicroInstalled() error {
_, err := o.getCommandOutput("", "micro", "help")
if err != nil {
log.Infoln("Installing micro's dependencies...")
// lets install micro
err = o.installBrewIfRequired()
if err != nil {
return err
}
if runtime.GOOS == "darwin" && !o.NoBrew {
err = o.runCommand("brew", "install", "protobuf")
if err != nil {
return err
}
}
log.Infoln("Downloading and building micro dependencies...")
packages := []string{"github.com/golang/protobuf/proto", "github.com/golang/protobuf/protoc-gen-go", "github.com/micro/protoc-gen-micro"}
for _, p := range packages {
log.Infof("Installing %s\n", p)
err = o.runCommand("go", "get", "-u", p)
if err != nil {
return fmt.Errorf("Failed to install %s: %s", p, err)
}
}
log.Infoln("Installed micro dependencies")
log.Infoln("Downloading and building micro - this can take a minute or so...")
err = o.runCommand("go", "get", "-u", "github.com/micro/micro")
if err == nil {
log.Infoln("Installed micro and its dependencies!")
}
}
return err
}
// GenerateMicro creates a fresh micro project by running micro on local shell
func (o CreateMicroOptions) GenerateMicro(dir string) error {
return o.runCommand("micro", "new", dir)
}
// Run implements the command
func (o *CreateMicroOptions) Run() error {
gopath := os.Getenv("GOPATH")
if gopath == "" {
log.Warnf(`No $GOPATH found.
You need to have installed go on your machine to be able to create micro services.
For instructions please see: %s
`, util.ColorInfo("https://golang.org/doc/install#install"))
return nil
}
err := o.checkMicroInstalled()
if err != nil {
return err
}
dir := ""
args := o.Args
if len(args) > 0 {
dir = args[0]
}
if dir == "" {
if o.BatchMode {
return util.MissingOption(optionOutputDir)
}
dir, err = util.PickValue("Pick a fully qualified name for the new project:", "github.com/myuser/myapp", true)
if err != nil {
return err
}
if dir == "" || dir == "." {
return fmt.Errorf("Invalid project name: %s", dir)
}
}
log.Blank()
// generate micro project
err = o.GenerateMicro(dir)
if err != nil {
return err
}
path := filepath.Join(gopath, "src", dir)
log.Infof("Created micro project at %s\n\n", util.ColorInfo(path))
return o.ImportCreatedProject(path)
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
mrjob/inline.py
|
# -*- coding: utf-8 -*-
# Copyright 2011 Matthew Tai and Yelp
# Copyright 2012-2015 Yelp and Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run an MRJob inline by running all mappers and reducers through the same
process. Useful for debugging."""
import logging
import os
from io import BytesIO
from mrjob.job import MRJob
from mrjob.parse import parse_mr_job_stderr
from mrjob.sim import SimMRJobRunner
from mrjob.sim import SimRunnerOptionStore
from mrjob.util import save_current_environment
from mrjob.util import save_cwd
__author__ = 'Matthew Tai <[email protected]>'
log = logging.getLogger(__name__)
class InlineMRJobRunner(SimMRJobRunner):
"""Runs an :py:class:`~mrjob.job.MRJob` in the same process, so it's easy
to attach a debugger.
This is the default way to run jobs (we assume you'll spend some time
debugging your job before you're ready to run it on EMR or Hadoop).
To more accurately simulate your environment prior to running on
Hadoop/EMR, use ``-r local`` (see
:py:class:`~mrjob.local.LocalMRJobRunner`).
"""
alias = 'inline'
OPTION_STORE_CLASS = SimRunnerOptionStore
# stick to a single split for efficiency
_DEFAULT_MAP_TASKS = 1
_DEFAULT_REDUCE_TASKS = 1
def __init__(self, mrjob_cls=None, **kwargs):
""":py:class:`~mrjob.inline.InlineMRJobRunner` takes the same keyword
args as :py:class:`~mrjob.runner.MRJobRunner`. However, please note:
* *hadoop_extra_args*, *hadoop_input_format*, *hadoop_output_format*,
and *hadoop_streaming_jar*, and *partitioner* are ignored
because they require Java. If you need to test these, consider
starting up a standalone Hadoop instance and running your job with
``-r hadoop``.
* *python_bin*, *setup*, *setup_cmds*, *setup_scripts* and
*steps_python_bin* are ignored because we don't invoke
subprocesses.
"""
super(InlineMRJobRunner, self).__init__(**kwargs)
assert ((mrjob_cls) is None or issubclass(mrjob_cls, MRJob))
self._mrjob_cls = mrjob_cls
# options that we ignore because they involve running subprocesses
IGNORED_LOCAL_OPTS = [
'bootstrap_mrjob',
'python_bin',
'setup',
'setup_cmds',
'setup_scripts',
'steps_python_bin',
]
def _check_step_works_with_runner(self, step_dict):
for key in ('mapper', 'combiner', 'reducer'):
if key in step_dict:
substep = step_dict[key]
if substep['type'] != 'script':
raise Exception(
"InlineMRJobRunner cannot run %s steps." %
substep['type'])
if 'pre_filter' in substep:
raise Exception(
"InlineMRJobRunner cannot run filters.")
def _create_setup_wrapper_script(self):
# Inline mode does not use a wrapper script (no subprocesses)
pass
def warn_ignored_opts(self):
""" Warn the user of opts being ignored by this runner.
"""
super(InlineMRJobRunner, self).warn_ignored_opts()
for ignored_opt in self.IGNORED_LOCAL_OPTS:
if ((not self._opts.is_default(ignored_opt)) and
self._opts[ignored_opt]):
log.warning('ignoring %s option (use -r local instead): %r' %
(ignored_opt, self._opts[ignored_opt]))
def _get_steps(self):
"""Redefine this so that we can get step descriptions without
calling a subprocess."""
if self._steps is None:
job_args = ['--steps'] + self._mr_job_extra_args(local=True)
self._steps = self._mrjob_cls(args=job_args)._steps_desc()
return self._steps
def _run_step(self, step_num, step_type, input_path, output_path,
working_dir, env, child_stdin=None):
step = self._get_step(step_num)
# Passing local=False ensures the job uses proper names for file
# options (see issue #851 on github)
common_args = (['--step-num=%d' % step_num] +
self._mr_job_extra_args(local=False))
if step_type == 'mapper':
child_args = (
['--mapper'] + [input_path] + common_args)
elif step_type == 'reducer':
child_args = (
['--reducer'] + [input_path] + common_args)
elif step_type == 'combiner':
child_args = ['--combiner'] + common_args + ['-']
has_combiner = (step_type == 'mapper' and 'combiner' in step)
try:
# Use custom stdout
if has_combiner:
child_stdout = BytesIO()
else:
child_stdout = open(output_path, 'wb')
with save_current_environment():
with save_cwd():
os.environ.update(env)
os.chdir(working_dir)
child_instance = self._mrjob_cls(args=child_args)
child_instance.sandbox(stdin=child_stdin,
stdout=child_stdout)
child_instance.execute()
if has_combiner:
sorted_lines = sorted(child_stdout.getvalue().splitlines())
combiner_stdin = BytesIO(b'\n'.join(sorted_lines))
else:
child_stdout.flush()
finally:
child_stdout.close()
while len(self._counters) <= step_num:
self._counters.append({})
parse_mr_job_stderr(child_instance.stderr.getvalue(),
counters=self._counters[step_num])
if has_combiner:
self._run_step(step_num, 'combiner', None, output_path,
working_dir, env, child_stdin=combiner_stdin)
combiner_stdin.close()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
cmd/report.go
|
/*
Copyright © 2021 NAME HERE <EMAIL ADDRESS>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"fmt"
"github.com/spf13/cobra"
)
// reportCmd represents the report command
var reportCmd = &cobra.Command{
Use: "report",
Short: "A brief description of your command",
Long: `A longer description that spans multiple lines and likely contains examples
and usage of using your command. For example:
Cobra is a CLI library for Go that empowers applications.
This application is a tool to generate the needed files
to quickly create a Cobra application.`,
Run: func(cmd *cobra.Command, args []string) {
fmt.Println("report called")
},
}
func init() {
rootCmd.AddCommand(reportCmd)
// Here you will define your flags and configuration settings.
// Cobra supports Persistent Flags which will work for this command
// and all subcommands, e.g.:
// reportCmd.PersistentFlags().String("foo", "", "A help for foo")
// Cobra supports local flags which will only run when this command
// is called directly, e.g.:
// reportCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
cmd/block-watch/main.go
|
// Watch blocks and report issues (to terminal and to Discord)
//
// Issues:
// 1. Failed Flashbots (or other 0-gas) transaction
// 2. Bundle out of order by effective-gasprice
// 3. Bundle effective-gasprice is lower than lowest non-fb tx gasprice
package main
import (
"context"
"flag"
"fmt"
"log"
"os"
"time"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/metachris/flashbots/api"
"github.com/metachris/flashbots/blockcheck"
"github.com/metachris/go-ethutils/blockswithtx"
"github.com/metachris/go-ethutils/utils"
"github.com/pkg/errors"
)
var silent bool
var sendErrorsToDiscord bool
// Backlog of new blocks that are not yet present in the mev-blocks API (it has ~5 blocks delay)
var BlockBacklog map[int64]*blockswithtx.BlockWithTxReceipts = make(map[int64]*blockswithtx.BlockWithTxReceipts)
var dailyErrorSummary blockcheck.ErrorSummary = blockcheck.NewErrorSummary()
var weeklyErrorSummary blockcheck.ErrorSummary = blockcheck.NewErrorSummary()
func main() {
log.SetOutput(os.Stdout)
ethUri := flag.String("eth", os.Getenv("ETH_NODE"), "Ethereum node URI")
// recentBundleOrdersPtr := flag.Bool("recentBundleOrder", false, "check recent bundle orders blocks")
blockHeightPtr := flag.Int64("block", 0, "specific block to check")
watchPtr := flag.Bool("watch", false, "watch and process new blocks")
silentPtr := flag.Bool("silent", false, "don't print info about every block")
discordPtr := flag.Bool("discord", false, "send errors to Discord")
flag.Parse()
silent = *silentPtr
if *discordPtr {
if len(os.Getenv("DISCORD_WEBHOOK")) == 0 {
log.Fatal("No DISCORD_WEBHOOK environment variable found!")
}
sendErrorsToDiscord = true
}
// Connect to the geth node and start the BlockCheckService
if *ethUri == "" {
log.Fatal("Pass a valid eth node with -eth argument or ETH_NODE env var.")
}
fmt.Printf("Connecting to %s ...", *ethUri)
client, err := ethclient.Dial(*ethUri)
utils.Perror(err)
fmt.Printf(" ok\n")
if *blockHeightPtr != 0 {
// get block with receipts
block, err := blockswithtx.GetBlockWithTxReceipts(client, *blockHeightPtr)
utils.Perror(err)
// check the block
check, err := blockcheck.CheckBlock(block, false)
if err != nil {
fmt.Println("Check at height error:", err)
}
msg := check.Sprint(true, false, true)
print(msg)
}
if *watchPtr {
log.Println("Start watching...")
watch(client)
}
}
func watch(client *ethclient.Client) {
headers := make(chan *types.Header)
sub, err := client.SubscribeNewHead(context.Background(), headers)
utils.Perror(err)
var errorCountSerious int
var errorCountNonSerious int
for {
select {
case err := <-sub.Err():
log.Fatal(err)
case header := <-headers:
// New block header received. Download block with tx-receipts
b, err := blockswithtx.GetBlockWithTxReceipts(client, header.Number.Int64())
if err != nil {
err = errors.Wrap(err, "error in GetBlockWithTxReceipts")
log.Printf("%+v\n", err)
continue
}
if !silent {
fmt.Println("Queueing new block", b.Block.Number())
}
// Add to backlog, because it can only be processed when the Flashbots API has caught up
BlockBacklog[header.Number.Int64()] = b
// Query flashbots API to get latest block it has processed
opts := api.GetBlocksOptions{BlockNumber: header.Number.Int64()}
flashbotsResponse, err := api.GetBlocks(&opts)
if err != nil {
log.Println("Flashbots API error:", err)
continue
}
// Go through block-backlog, and process those within Flashbots API range
for height, blockFromBacklog := range BlockBacklog {
if height <= flashbotsResponse.LatestBlockNumber {
if !silent {
utils.PrintBlock(blockFromBacklog.Block)
}
check, err := blockcheck.CheckBlock(blockFromBacklog, false)
if err != nil {
log.Println("CheckBlock from backlog error:", err, "block:", blockFromBacklog.Block.Number())
break
}
// no checking error, can process and remove from backlog
delete(BlockBacklog, blockFromBacklog.Block.Number().Int64())
// Handle errors in the bundle (print, Discord, etc.)
if check.HasErrors() {
if check.HasSeriousErrors() { // only serious errors are printed and sent to Discord
errorCountSerious += 1
msg := check.Sprint(true, false, true)
fmt.Println(msg)
// if sendErrorsToDiscord {
// if len(check.Errors) == 1 && check.HasBundleWith0EffectiveGasPrice {
// // Short message if only 1 error and that is a 0-effective-gas-price
// msg := check.SprintHeader(false, true)
// msg += " - Error: " + check.Errors[0]
// SendToDiscord(msg)
// } else {
// SendToDiscord(check.Sprint(false, true))
// }
// }
fmt.Println("")
} else if check.HasLessSeriousErrors() { // less serious errors are only counted
errorCountNonSerious += 1
}
// Send failed TX to Discord
// if sendErrorsToDiscord && check.TriggerAlertOnFailedTx {
// SendToDiscord(check.Sprint(false, true, false))
// }
// Count errors
if check.HasSeriousErrors() || check.HasLessSeriousErrors() { // update and print miner error count on serious and less-serious errors
log.Printf("stats - 50p_errors: %d, 25p_errors: %d\n", errorCountSerious, errorCountNonSerious)
weeklyErrorSummary.AddCheckErrors(check)
dailyErrorSummary.AddCheckErrors(check)
fmt.Println(dailyErrorSummary.String())
}
}
// IS IT TIME TO RESET DAILY & WEEKLY ERRORS?
now := time.Now()
// Daily summary at 3pm ET
dailySummaryTriggerHourUtc := 19 // 3pm ET
// log.Println(now.UTC().Hour(), dailySummaryTriggerHourUtc, time.Since(dailyErrorSummary.TimeStarted).Hours())
if now.UTC().Hour() == dailySummaryTriggerHourUtc && time.Since(dailyErrorSummary.TimeStarted).Hours() >= 2 {
log.Println("trigger daily summary")
if sendErrorsToDiscord {
msg := dailyErrorSummary.String()
if msg != "" {
fmt.Println(msg)
SendToDiscord("Daily miner summary: ```" + msg + "```")
}
}
// reset daily summery
dailyErrorSummary.Reset()
}
// Weekly summary on Friday at 10am ET
weeklySummaryTriggerHourUtc := 14 // 10am ET
if now.UTC().Weekday() == time.Friday && now.UTC().Hour() == weeklySummaryTriggerHourUtc && time.Since(weeklyErrorSummary.TimeStarted).Hours() >= 2 {
log.Println("trigger weekly summary")
if sendErrorsToDiscord {
msg := weeklyErrorSummary.String()
if msg != "" {
fmt.Println(msg)
SendToDiscord("Weekly miner summary: ```" + msg + "```")
}
}
// reset weekly summery
weeklyErrorSummary.Reset()
}
// // -------- Send daily summary to Discord ---------
// if sendErrorsToDiscord {
// // Check if it's time to send to Discord: first block after 3pm ET (7pm UTC)
// // triggerHourUtc := 19
// // dateLastSent := lastSummarySentToDiscord.Format("01-02-2006")
// // dateToday := now.Format("01-02-2006")
// // For testing, send at specific interval
// if time.Since(dailyErrorSummary.TimeStarted).Hours() >= 3 {
// // if dateToday != dateLastSent && now.UTC().Hour() == triggerHourUtc {
// log.Println("Sending summary to Discord:")
// msg := dailyErrorSummary.String()
// if msg != "" {
// fmt.Println(msg)
// SendToDiscord("```" + msg + "```")
// }
// // Reset errors
// dailyErrorSummary.Reset()
// log.Println("Done, errors are reset.")
// }
// }
time.Sleep(1 * time.Second)
}
}
}
}
}
|
[
"\"ETH_NODE\"",
"\"DISCORD_WEBHOOK\""
] |
[] |
[
"ETH_NODE",
"DISCORD_WEBHOOK"
] |
[]
|
["ETH_NODE", "DISCORD_WEBHOOK"]
|
go
| 2 | 0 | |
tests/trainer/connectors/test_checkpoint_connector.py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from unittest import mock
from unittest.mock import Mock
import torch
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.trainer.states import TrainerFn
from tests.helpers import BoringModel
from tests.helpers.runif import RunIf
class HPCHookdedModel(BoringModel):
def __init__(self):
super().__init__()
self.hpc_save_called = 0
self.hpc_load_called = 0
def on_hpc_save(self, checkpoint):
assert "state_dict" in checkpoint
self.hpc_save_called += 1
def on_hpc_load(self, checkpoint):
assert "state_dict" in checkpoint
self.hpc_load_called += 1
def test_hpc_hook_calls(tmpdir):
model = HPCHookdedModel()
trainer = Trainer(default_root_dir=tmpdir, max_steps=1, enable_checkpointing=False, logger=False)
trainer.fit(model)
connector = trainer.checkpoint_connector
connector.hpc_save(tmpdir, logger=Mock())
assert model.hpc_save_called == 1
assert model.hpc_load_called == 0
# new training run, restore from hpc checkpoint file automatically
assert set(os.listdir(tmpdir)) == {"hpc_ckpt_1.ckpt"}
trainer = Trainer(default_root_dir=tmpdir, max_steps=1, enable_checkpointing=False, logger=False)
trainer.fit(model)
assert model.hpc_save_called == 1
assert model.hpc_load_called == 1
def test_preloaded_checkpoint_lifecycle(tmpdir):
"""Tests that the preloaded checkpoint contents gets cleared from memory when it is not required anymore."""
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, max_steps=1)
trainer.fit(model)
connector = trainer.checkpoint_connector
assert not trainer.resume_from_checkpoint
assert not connector.resume_checkpoint_path
assert not connector._loaded_checkpoint
connector.resume_start()
assert not connector.resume_checkpoint_path
assert not connector._loaded_checkpoint
connector.resume_end()
assert not connector.resume_checkpoint_path
assert not connector._loaded_checkpoint
ckpt_path = trainer.checkpoint_callback.best_model_path
trainer = Trainer(default_root_dir=tmpdir, max_steps=2, resume_from_checkpoint=ckpt_path)
connector = trainer.checkpoint_connector
connector.resume_start()
assert connector.resume_checkpoint_path == ckpt_path
assert connector._loaded_checkpoint
assert isinstance(connector._loaded_checkpoint, dict)
connector.resume_end()
assert not connector.resume_checkpoint_path
assert not connector._loaded_checkpoint
def test_hpc_restore_attempt(tmpdir):
"""Test that restore() attempts to restore the hpc_ckpt with highest priority."""
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, max_steps=1, enable_checkpointing=False, logger=False)
trainer.fit(model)
hpc_ckpt_path = tmpdir / "hpc_ckpt_3.ckpt"
trainer.save_checkpoint(hpc_ckpt_path)
assert os.listdir(tmpdir) == ["hpc_ckpt_3.ckpt"]
# set weights to zero
for param in model.parameters():
torch.nn.init.constant_(param, 0)
# case 1: restore hpc first, no explicit resume path provided
trainer = Trainer(default_root_dir=tmpdir, max_steps=2, enable_checkpointing=False, logger=False)
trainer.fit(model)
for param in model.parameters():
assert param.abs().sum() > 0
torch.nn.init.constant_(param, 0)
# case 2: explicit resume path provided, restore hpc anyway
trainer = Trainer(default_root_dir=tmpdir, max_steps=3, resume_from_checkpoint="not existing")
trainer.fit(model)
for param in model.parameters():
assert param.abs().sum() > 0
def test_hpc_max_ckpt_version(tmpdir):
"""Test that the CheckpointConnector is able to find the hpc checkpoint file with the highest version."""
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, max_steps=1)
trainer.fit(model)
trainer.save_checkpoint(tmpdir / "hpc_ckpt.ckpt")
trainer.save_checkpoint(tmpdir / "hpc_ckpt_0.ckpt")
trainer.save_checkpoint(tmpdir / "hpc_ckpt_3.ckpt")
trainer.save_checkpoint(tmpdir / "hpc_ckpt_33.ckpt")
assert trainer.checkpoint_connector.hpc_resume_path == str(tmpdir / "hpc_ckpt_33.ckpt")
assert trainer.checkpoint_connector.max_ckpt_version_in_folder(tmpdir) == 33
assert trainer.checkpoint_connector.max_ckpt_version_in_folder(tmpdir / "not" / "existing") is None
@mock.patch.dict(os.environ, {"PL_FAULT_TOLERANT_TRAINING": "1"})
@RunIf(min_torch="1.7.0")
def test_loops_restore(tmpdir):
"""Test that required loop state_dict is loaded correctly by checkpoint connector."""
model = BoringModel()
checkpoint_callback = ModelCheckpoint(dirpath=tmpdir, save_last=True)
trainer_args = dict(
default_root_dir=tmpdir,
max_epochs=1,
limit_train_batches=1,
limit_val_batches=1,
logger=False,
callbacks=[checkpoint_callback],
num_sanity_val_steps=0,
)
trainer = Trainer(**trainer_args)
trainer.fit(model)
trainer_args["resume_from_checkpoint"] = str(tmpdir / "last.ckpt")
trainer = Trainer(**trainer_args)
for fn in TrainerFn:
if fn != TrainerFn.TUNING:
trainer_fn = getattr(trainer, f"{fn}_loop")
trainer_fn.load_state_dict = Mock()
for fn in TrainerFn:
if fn != TrainerFn.TUNING:
trainer.state.fn = fn
trainer.checkpoint_connector.resume_start()
trainer.checkpoint_connector.restore_loops()
trainer_loop = getattr(trainer, f"{fn}_loop")
trainer_loop.load_state_dict.assert_called()
trainer_loop.load_state_dict.reset_mock()
for fn2 in TrainerFn:
if fn2 not in (fn, TrainerFn.TUNING):
trainer_loop2 = getattr(trainer, f"{fn2}_loop")
trainer_loop2.load_state_dict.assert_not_called()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
vendor/github.com/danielgtaylor/openapi-cli-generator/cli/cli.go
|
package cli
import (
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"runtime"
"strings"
colorable "github.com/mattn/go-colorable"
isatty "github.com/mattn/go-isatty"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
gentleman "gopkg.in/h2non/gentleman.v2"
)
// Root command (entrypoint) of the CLI.
var Root *cobra.Command
// Cache is used to store temporary data between runs.
var Cache *viper.Viper
// Client makes HTTP requests and parses the responses.
var Client *gentleman.Client
// Formatter is the currently configured response output formatter.
var Formatter ResponseFormatter
// PreRun is a function that will run after flags are parsed but before the
// command handler has been called.
var PreRun func(cmd *cobra.Command, args []string) error
// Stdout is a cross-platform, color-safe writer if colors are enabled,
// otherwise it defaults to `os.Stdout`.
var Stdout io.Writer = os.Stdout
// Stderr is a cross-platform, color-safe writer if colors are enabled,
// otherwise it defaults to `os.Stderr`.
var Stderr io.Writer = os.Stderr
var tty bool
// Config is used to pass settings to the CLI.
type Config struct {
AppName string
EnvPrefix string
Version string
}
// Init will set up the CLI.
func Init(config *Config) {
initConfig(config.AppName, config.EnvPrefix)
initCache(config.AppName)
authInitialized = false
// Determine if we are using a TTY or colored output is forced-on.
tty = false
if isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsCygwinTerminal(os.Stdout.Fd()) || viper.GetBool("color") {
tty = true
}
if viper.GetBool("nocolor") {
// If forced off, ignore all of the above!
tty = false
}
if tty {
// Support colored output across operating systems.
Stdout = colorable.NewColorableStdout()
Stderr = colorable.NewColorableStderr()
}
zerolog.SetGlobalLevel(zerolog.WarnLevel)
log.Logger = log.Output(ConsoleWriter{Out: Stderr, NoColor: !tty}).With().Caller().Logger()
Client = gentleman.New()
UserAgentMiddleware()
LogMiddleware(tty)
Formatter = NewDefaultFormatter(tty)
Root = &cobra.Command{
Use: filepath.Base(os.Args[0]),
Version: config.Version,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
if viper.GetBool("verbose") {
zerolog.SetGlobalLevel(zerolog.DebugLevel)
settings := viper.AllSettings()
// Hide any secret values
for k := range settings {
if strings.Contains(k, "secret") || strings.Contains(k, "password") {
settings[k] = "**HIDDEN**"
}
}
log.Info().Fields(settings).Msg("Configuration")
}
if PreRun != nil {
if err := PreRun(cmd, args); err != nil {
return err
}
}
return nil
},
}
Root.SetOutput(Stdout)
Root.AddCommand(&cobra.Command{
Use: "help-config",
Short: "Show CLI configuration help",
Run: showHelpConfig,
})
Root.AddCommand(&cobra.Command{
Use: "help-input",
Short: "Show CLI input help",
Run: showHelpInput,
})
AddGlobalFlag("verbose", "", "Enable verbose log output", false)
AddGlobalFlag("output-format", "o", "Output format [json, yaml]", "json")
AddGlobalFlag("query", "q", "Filter / project results using JMESPath", "")
AddGlobalFlag("raw", "", "Output result of query as raw rather than an escaped JSON string or list", false)
AddGlobalFlag("server", "", "Override server URL", "")
}
func userHomeDir() string {
if runtime.GOOS == "windows" {
home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
if home == "" {
home = os.Getenv("USERPROFILE")
}
return home
}
return os.Getenv("HOME")
}
func initConfig(appName, envPrefix string) {
// One-time setup to ensure the path exists so we can write files into it
// later as needed.
configDir := path.Join(userHomeDir(), "."+appName)
if err := os.MkdirAll(configDir, 0700); err != nil {
panic(err)
}
// Load configuration from file(s) if provided.
viper.SetConfigName("config")
viper.AddConfigPath("/etc/" + appName + "/")
viper.AddConfigPath("$HOME/." + appName + "/")
viper.ReadInConfig()
// Load configuration from the environment if provided. Flags below get
// transformed automatically, e.g. `client-id` -> `PREFIX_CLIENT_ID`.
viper.SetEnvPrefix(envPrefix)
viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_"))
viper.AutomaticEnv()
// Save a few things that will be useful elsewhere.
viper.Set("app-name", appName)
viper.Set("config-directory", configDir)
viper.SetDefault("server-index", 0)
}
func initCache(appName string) {
Cache = viper.New()
Cache.SetConfigName("cache")
Cache.AddConfigPath("$HOME/." + appName + "/")
// Write a blank cache if no file is already there. Later you can use
// cli.Cache.SaveConfig() to write new values.
filename := path.Join(viper.GetString("config-directory"), "cache.json")
if _, err := os.Stat(filename); os.IsNotExist(err) {
if err := ioutil.WriteFile(filename, []byte("{}"), 0600); err != nil {
panic(err)
}
}
Cache.ReadInConfig()
}
func showHelpConfig(cmd *cobra.Command, args []string) {
help := `# CLI Configuration
Configuration for the CLI comes from the following places:
1. Command options
2. Environment variables
3. Configuration files
## Global Command Options
Command options are passed when invoking the command. For example, ¬--verbose¬ configures the CLI to run with additional output for debugging. Using the top level ¬--help¬ to shows a list of available options:
$flags
## Environment Variables
Environment variables must be capitalized, prefixed with ¬$APP¬, and words are separated by an underscore rather than a dash. For example, setting ¬$APP_VERBOSE=1¬ is equivalent to passing ¬--verbose¬ to the command.
## Configuration Files
Configuration files can be used to configure the CLI and can be written using JSON, YAML, or TOML. The CLI searches in your home directory first (e.g. ¬$config-dir/config.json¬) and on Mac/Linux also looks in e.g. ¬/etc/$app/config.json¬. The following is equivalent to passing ¬--verbose¬ to the command:
¬¬¬json
{
"verbose": true
}
¬¬¬
## Special Cases
Some configuration values are not exposed as command options but can be set via prefixed environment variables or in configuration files. They are documented here.
Name | Type | Description
--------- | ------ | -----------
¬color¬ | ¬bool¬ | Force colorized output.
¬nocolor¬ | ¬bool¬ | Disable colorized output.
`
help = strings.Replace(help, "¬", "`", -1)
help = strings.Replace(help, "$APP", strings.ToUpper(viper.GetString("app-name")), -1)
help = strings.Replace(help, "$app", viper.GetString("app-name"), -1)
help = strings.Replace(help, "$config-dir", viper.GetString("config-directory"), -1)
flags := make([]string, 0)
flags = append(flags, "Name | Type | Description")
flags = append(flags, "--------------- | -------- | -----------")
Root.PersistentFlags().VisitAll(func(f *pflag.Flag) {
flags = append(flags, fmt.Sprintf("%-15s", "`"+f.Name+"`")+" | `"+fmt.Sprintf("%-7s", f.Value.Type()+"`")+" | "+f.Usage)
})
help = strings.Replace(help, "$flags", strings.Join(flags, "\n"), -1)
fmt.Fprintln(Stdout, Markdown(help))
}
func showHelpInput(cmd *cobra.Command, args []string) {
help := `# CLI Request Input
Input to the CLI is handled via parameters, arguments, and standard input. The help for an individual command shows the available optional parameters and required arguments. Optional parameters can be passed like ¬--option=value¬ or ¬--option value¬.
For requests that require a body, standard input and a CLI shorthand can complement each other to supply the request data.
## Standard Input
Standard input allows you to send in whatever data is required to make a successful request against the API. For example: ¬my-cli command <input.json¬ or ¬echo '{\"hello\": \"world\"}' | my-cli command¬.
Note: Windows PowerShell and other shells that do not support input redirection via ¬<¬ will need to pipe input instead, for example: ¬cat input.json | my-cli command¬. This may load the entire input file into memory.
## CLI Shortand Syntax
Any arguments beyond those that are required for a command are treated as CLI shorthand and used to generate structured data for requests. Shorthand objects are specified as key/value pairs. They complement standard input so can be used to override or to add additional fields as needed. For example: ¬my-cli command <input.json field: value, other: value2¬.
Null, booleans, integers, and floats are automatically coerced into the appropriate type. Use the ¬~¬ modifier after the ¬:¬ to force a string, like ¬field:~ true¬.
Nested objects use a ¬.¬ separator. Properties can be grouped inside of ¬{¬ and ¬}¬. For example, ¬foo.bar{id: 1, count: 5}¬ will become:
¬¬¬json
{
"foo": {
"bar": {
"id": 1,
"count": 5
}
}
}
¬¬¬
Simple scalar arrays use a ¬,¬ to separate values, like ¬key: 1, 2, 3¬. Appending to an array is possible like ¬key[]: 1, key[]: 2, key[]: 3¬. For nested arrays you specify multiple square bracket sets like ¬key[][]: value¬. You can directly reference an index by including one like ¬key[2]: value¬.
Both objects and arrays can use backreferences. An object backref starts with a ¬.¬ and an array backref starts with ¬[¬. For example, ¬foo{id: 1, count: 5}¬ can be rewritten as ¬foo.id: 1, .count: 5¬.
Use an ¬@¬ to load the contents of a file as the value, like ¬key: @filename¬. Use the ¬~¬ modifier to disable this behavior: ¬key:~ @user¬. By default structured data is loaded when recognized. Use the ¬~¬ filename modifier to force a string: ¬key: @~filename¬. Use the ¬%¬ modifier to load as base-64 data: ¬key: @%filename¬.
See https://github.com/danielgtaylor/openapi-cli-generator/tree/master/shorthand#readme for more info.`
fmt.Fprintln(Stdout, Markdown(strings.Replace(help, "¬", "`", -1)))
}
|
[
"\"HOMEDRIVE\"",
"\"HOMEPATH\"",
"\"USERPROFILE\"",
"\"HOME\""
] |
[] |
[
"USERPROFILE",
"HOME",
"HOMEPATH",
"HOMEDRIVE"
] |
[]
|
["USERPROFILE", "HOME", "HOMEPATH", "HOMEDRIVE"]
|
go
| 4 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.