filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
pkg/runner/run_context.go | package runner
import (
"context"
"encoding/json"
"errors"
"fmt"
"os"
"path/filepath"
"regexp"
"runtime"
"strings"
"github.com/mitchellh/go-homedir"
log "github.com/sirupsen/logrus"
"github.com/nektos/act/pkg/common"
"github.com/nektos/act/pkg/container"
"github.com/nektos/act/pkg/model"
)
// RunContext contains info about current job
type RunContext struct {
Name string
Config *Config
Matrix map[string]interface{}
Run *model.Run
EventJSON string
Env map[string]string
ExtraPath []string
CurrentStep string
StepResults map[string]*stepResult
ExprEval ExpressionEvaluator
JobContainer container.Container
OutputMappings map[MappableOutput]MappableOutput
}
type MappableOutput struct {
StepID string
OutputName string
}
func (rc *RunContext) String() string {
return fmt.Sprintf("%s/%s", rc.Run.Workflow.Name, rc.Name)
}
type stepResult struct {
Success bool `json:"success"`
Outputs map[string]string `json:"outputs"`
}
// GetEnv returns the env for the context
func (rc *RunContext) GetEnv() map[string]string {
if rc.Env == nil {
rc.Env = mergeMaps(rc.Config.Env, rc.Run.Workflow.Env, rc.Run.Job().Env)
}
rc.Env["ACT"] = "true"
return rc.Env
}
func (rc *RunContext) jobContainerName() string {
return createContainerName("act", rc.String())
}
func (rc *RunContext) startJobContainer() common.Executor {
image := rc.platformImage()
return func(ctx context.Context) error {
rawLogger := common.Logger(ctx).WithField("raw_output", true)
logWriter := common.NewLineWriter(rc.commandHandler(ctx), func(s string) bool {
if rc.Config.LogOutput {
rawLogger.Infof("%s", s)
} else {
rawLogger.Debugf("%s", s)
}
return true
})
common.Logger(ctx).Infof("\U0001f680 Start image=%s", image)
name := rc.jobContainerName()
envList := make([]string, 0)
bindModifiers := ""
if runtime.GOOS == "darwin" {
bindModifiers = ":delegated"
}
envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_TOOL_CACHE", "/opt/hostedtoolcache"))
envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_OS", "Linux"))
envList = append(envList, fmt.Sprintf("%s=%s", "RUNNER_TEMP", "/tmp"))
binds := []string{
fmt.Sprintf("%s:%s", "/var/run/docker.sock", "/var/run/docker.sock"),
}
if rc.Config.BindWorkdir {
binds = append(binds, fmt.Sprintf("%s:%s%s", rc.Config.Workdir, rc.Config.Workdir, bindModifiers))
}
if rc.Config.ContainerArchitecture == "" {
rc.Config.ContainerArchitecture = fmt.Sprintf("%s/%s", "linux", runtime.GOARCH)
}
rc.JobContainer = container.NewContainer(&container.NewContainerInput{
Cmd: nil,
Entrypoint: []string{"/usr/bin/tail", "-f", "/dev/null"},
WorkingDir: rc.Config.Workdir,
Image: image,
Name: name,
Env: envList,
Mounts: map[string]string{
name: filepath.Dir(rc.Config.Workdir),
"act-toolcache": "/toolcache",
"act-actions": "/actions",
},
NetworkMode: "host",
Binds: binds,
Stdout: logWriter,
Stderr: logWriter,
Privileged: rc.Config.Privileged,
UsernsMode: rc.Config.UsernsMode,
Platform: rc.Config.ContainerArchitecture,
})
var copyWorkspace bool
var copyToPath string
if !rc.Config.BindWorkdir {
copyToPath, copyWorkspace = rc.localCheckoutPath()
copyToPath = filepath.Join(rc.Config.Workdir, copyToPath)
}
return common.NewPipelineExecutor(
rc.JobContainer.Pull(rc.Config.ForcePull),
rc.stopJobContainer(),
rc.JobContainer.Create(),
rc.JobContainer.Start(false),
rc.JobContainer.CopyDir(copyToPath, rc.Config.Workdir+string(filepath.Separator)+".").IfBool(copyWorkspace),
rc.JobContainer.Copy(filepath.Dir(rc.Config.Workdir), &container.FileEntry{
Name: "workflow/event.json",
Mode: 0644,
Body: rc.EventJSON,
}, &container.FileEntry{
Name: "workflow/envs.txt",
Mode: 0644,
Body: "",
}, &container.FileEntry{
Name: "home/.act",
Mode: 0644,
Body: "",
}),
)(ctx)
}
}
func (rc *RunContext) execJobContainer(cmd []string, env map[string]string) common.Executor {
return func(ctx context.Context) error {
return rc.JobContainer.Exec(cmd, env)(ctx)
}
}
// stopJobContainer removes the job container (if it exists) and its volume (if it exists) if !rc.Config.ReuseContainers
func (rc *RunContext) stopJobContainer() common.Executor {
return func(ctx context.Context) error {
if rc.JobContainer != nil && !rc.Config.ReuseContainers {
return rc.JobContainer.Remove().
Then(container.NewDockerVolumeRemoveExecutor(rc.jobContainerName(), false))(ctx)
}
return nil
}
}
// ActionCacheDir is for rc
func (rc *RunContext) ActionCacheDir() string {
var xdgCache string
var ok bool
if xdgCache, ok = os.LookupEnv("XDG_CACHE_HOME"); !ok || xdgCache == "" {
if home, err := homedir.Dir(); err == nil {
xdgCache = filepath.Join(home, ".cache")
} else if xdgCache, err = filepath.Abs("."); err != nil {
log.Fatal(err)
}
}
return filepath.Join(xdgCache, "act")
}
// Executor returns a pipeline executor for all the steps in the job
func (rc *RunContext) Executor() common.Executor {
steps := make([]common.Executor, 0)
steps = append(steps, func(ctx context.Context) error {
if len(rc.Matrix) > 0 {
common.Logger(ctx).Infof("\U0001F9EA Matrix: %v", rc.Matrix)
}
return nil
})
steps = append(steps, rc.startJobContainer())
for i, step := range rc.Run.Job().Steps {
if step.ID == "" {
step.ID = fmt.Sprintf("%d", i)
}
steps = append(steps, rc.newStepExecutor(step))
}
steps = append(steps, rc.stopJobContainer())
return common.NewPipelineExecutor(steps...).If(rc.isEnabled)
}
func (rc *RunContext) newStepExecutor(step *model.Step) common.Executor {
sc := &StepContext{
RunContext: rc,
Step: step,
}
return func(ctx context.Context) error {
rc.CurrentStep = sc.Step.ID
rc.StepResults[rc.CurrentStep] = &stepResult{
Success: true,
Outputs: make(map[string]string),
}
exprEval, err := sc.setupEnv(ctx)
if err != nil {
return err
}
rc.ExprEval = exprEval
runStep, err := rc.EvalBool(sc.Step.If)
if err != nil {
common.Logger(ctx).Errorf(" \u274C Error in if: expression - %s", sc.Step)
rc.StepResults[rc.CurrentStep].Success = false
return err
}
if !runStep {
log.Debugf("Skipping step '%s' due to '%s'", sc.Step.String(), sc.Step.If)
return nil
}
common.Logger(ctx).Infof("\u2B50 Run %s", sc.Step)
err = sc.Executor()(ctx)
if err == nil {
common.Logger(ctx).Infof(" \u2705 Success - %s", sc.Step)
} else {
common.Logger(ctx).Errorf(" \u274C Failure - %s", sc.Step)
if sc.Step.ContinueOnError {
common.Logger(ctx).Infof("Failed but continue next step")
err = nil
rc.StepResults[rc.CurrentStep].Success = true
} else {
rc.StepResults[rc.CurrentStep].Success = false
}
}
return err
}
}
func (rc *RunContext) platformImage() string {
job := rc.Run.Job()
c := job.Container()
if c != nil {
return rc.ExprEval.Interpolate(c.Image)
}
if job.RunsOn() == nil {
log.Errorf("'runs-on' key not defined in %s", rc.String())
}
for _, runnerLabel := range job.RunsOn() {
platformName := rc.ExprEval.Interpolate(runnerLabel)
image := rc.Config.Platforms[strings.ToLower(platformName)]
if image != "" {
return image
}
}
return ""
}
func (rc *RunContext) isEnabled(ctx context.Context) bool {
job := rc.Run.Job()
l := common.Logger(ctx)
runJob, err := rc.EvalBool(job.If)
if err != nil {
common.Logger(ctx).Errorf(" \u274C Error in if: expression - %s", job.Name)
return false
}
if !runJob {
l.Debugf("Skipping job '%s' due to '%s'", job.Name, job.If)
return false
}
img := rc.platformImage()
if img == "" {
if job.RunsOn() == nil {
log.Errorf("'runs-on' key not defined in %s", rc.String())
}
for _, runnerLabel := range job.RunsOn() {
platformName := rc.ExprEval.Interpolate(runnerLabel)
l.Infof("\U0001F6A7 Skipping unsupported platform '%+v'", platformName)
}
return false
}
return true
}
var splitPattern *regexp.Regexp
// EvalBool evaluates an expression against current run context
func (rc *RunContext) EvalBool(expr string) (bool, error) {
if splitPattern == nil {
splitPattern = regexp.MustCompile(fmt.Sprintf(`%s|%s|\S+`, expressionPattern.String(), operatorPattern.String()))
}
if strings.HasPrefix(strings.TrimSpace(expr), "!") {
return false, errors.New("expressions starting with ! must be wrapped in ${{ }}")
}
if expr != "" {
parts := splitPattern.FindAllString(expr, -1)
var evaluatedParts []string
for i, part := range parts {
if operatorPattern.MatchString(part) {
evaluatedParts = append(evaluatedParts, part)
continue
}
interpolatedPart, isString := rc.ExprEval.InterpolateWithStringCheck(part)
// This peculiar transformation has to be done because the GitHub parser
// treats false returned from contexts as a string, not a boolean.
// Hence env.SOMETHING will be evaluated to true in an if: expression
// regardless if SOMETHING is set to false, true or any other string.
// It also handles some other weirdness that I found by trial and error.
if (expressionPattern.MatchString(part) && // it is an expression
!strings.Contains(part, "!")) && // but it's not negated
interpolatedPart == "false" && // and the interpolated string is false
(isString || previousOrNextPartIsAnOperator(i, parts)) { // and it's of type string or has an logical operator before or after
interpolatedPart = fmt.Sprintf("'%s'", interpolatedPart) // then we have to quote the false expression
}
evaluatedParts = append(evaluatedParts, interpolatedPart)
}
joined := strings.Join(evaluatedParts, " ")
v, _, err := rc.ExprEval.Evaluate(fmt.Sprintf("Boolean(%s)", joined))
if err != nil {
return false, err
}
log.Debugf("expression '%s' evaluated to '%s'", expr, v)
return v == "true", nil
}
return true, nil
}
func previousOrNextPartIsAnOperator(i int, parts []string) bool {
operator := false
if i > 0 {
operator = operatorPattern.MatchString(parts[i-1])
}
if i+1 < len(parts) {
operator = operator || operatorPattern.MatchString(parts[i+1])
}
return operator
}
func mergeMaps(maps ...map[string]string) map[string]string {
rtnMap := make(map[string]string)
for _, m := range maps {
for k, v := range m {
rtnMap[k] = v
}
}
return rtnMap
}
func createContainerName(parts ...string) string {
name := make([]string, 0)
pattern := regexp.MustCompile("[^a-zA-Z0-9]")
partLen := (30 / len(parts)) - 1
for i, part := range parts {
if i == len(parts)-1 {
name = append(name, pattern.ReplaceAllString(part, "-"))
} else {
// If any part has a '-<number>' on the end it is likely part of a matrix job.
// Let's preserve the number to prevent clashes in container names.
re := regexp.MustCompile("-[0-9]+$")
num := re.FindStringSubmatch(part)
if len(num) > 0 {
name = append(name, trimToLen(pattern.ReplaceAllString(part, "-"), partLen-len(num[0])))
name = append(name, num[0])
} else {
name = append(name, trimToLen(pattern.ReplaceAllString(part, "-"), partLen))
}
}
}
return strings.ReplaceAll(strings.Trim(strings.Join(name, "-"), "-"), "--", "-")
}
func trimToLen(s string, l int) string {
if l < 0 {
l = 0
}
if len(s) > l {
return s[:l]
}
return s
}
type jobContext struct {
Status string `json:"status"`
Container struct {
ID string `json:"id"`
Network string `json:"network"`
} `json:"container"`
Services map[string]struct {
ID string `json:"id"`
} `json:"services"`
}
func (rc *RunContext) getJobContext() *jobContext {
jobStatus := "success"
for _, stepStatus := range rc.StepResults {
if !stepStatus.Success {
jobStatus = "failure"
break
}
}
return &jobContext{
Status: jobStatus,
}
}
func (rc *RunContext) getStepsContext() map[string]*stepResult {
return rc.StepResults
}
type githubContext struct {
Event map[string]interface{} `json:"event"`
EventPath string `json:"event_path"`
Workflow string `json:"workflow"`
RunID string `json:"run_id"`
RunNumber string `json:"run_number"`
Actor string `json:"actor"`
Repository string `json:"repository"`
EventName string `json:"event_name"`
Sha string `json:"sha"`
Ref string `json:"ref"`
HeadRef string `json:"head_ref"`
BaseRef string `json:"base_ref"`
Token string `json:"token"`
Workspace string `json:"workspace"`
Action string `json:"action"`
}
func (rc *RunContext) getGithubContext() *githubContext {
token, ok := rc.Config.Secrets["GITHUB_TOKEN"]
if !ok {
token = os.Getenv("GITHUB_TOKEN")
}
runID := rc.Config.Env["GITHUB_RUN_ID"]
if runID == "" {
runID = "1"
}
runNumber := rc.Config.Env["GITHUB_RUN_NUMBER"]
if runNumber == "" {
runNumber = "1"
}
ghc := &githubContext{
Event: make(map[string]interface{}),
EventPath: fmt.Sprintf("%s/%s", filepath.Dir(rc.Config.Workdir), "workflow/event.json"),
Workflow: rc.Run.Workflow.Name,
RunID: runID,
RunNumber: runNumber,
Actor: rc.Config.Actor,
EventName: rc.Config.EventName,
Token: token,
Workspace: rc.Config.Workdir,
Action: rc.CurrentStep,
}
// Backwards compatibility for configs that require
// a default rather than being run as a cmd
if ghc.Actor == "" {
ghc.Actor = "nektos/act"
}
repoPath := rc.Config.Workdir
repo, err := common.FindGithubRepo(repoPath)
if err != nil {
log.Warningf("unable to get git repo: %v", err)
} else {
ghc.Repository = repo
}
_, sha, err := common.FindGitRevision(repoPath)
if err != nil {
log.Warningf("unable to get git revision: %v", err)
} else {
ghc.Sha = sha
}
ref, err := common.FindGitRef(repoPath)
if err != nil {
log.Warningf("unable to get git ref: %v", err)
} else {
log.Debugf("using github ref: %s", ref)
ghc.Ref = ref
}
if rc.EventJSON != "" {
err = json.Unmarshal([]byte(rc.EventJSON), &ghc.Event)
if err != nil {
log.Errorf("Unable to Unmarshal event '%s': %v", rc.EventJSON, err)
}
}
// set the branch in the event data
if rc.Config.DefaultBranch != "" {
ghc.Event = withDefaultBranch(rc.Config.DefaultBranch, ghc.Event)
} else {
ghc.Event = withDefaultBranch("master", ghc.Event)
}
if ghc.EventName == "pull_request" {
ghc.BaseRef = asString(nestedMapLookup(ghc.Event, "pull_request", "base", "ref"))
ghc.HeadRef = asString(nestedMapLookup(ghc.Event, "pull_request", "head", "ref"))
}
return ghc
}
func (ghc *githubContext) isLocalCheckout(step *model.Step) bool {
if step.Type() != model.StepTypeUsesActionRemote {
return false
}
remoteAction := newRemoteAction(step.Uses)
if !remoteAction.IsCheckout() {
return false
}
if repository, ok := step.With["repository"]; ok && repository != ghc.Repository {
return false
}
if repository, ok := step.With["ref"]; ok && repository != ghc.Ref {
return false
}
return true
}
func asString(v interface{}) string {
if v == nil {
return ""
} else if s, ok := v.(string); ok {
return s
}
return ""
}
func nestedMapLookup(m map[string]interface{}, ks ...string) (rval interface{}) {
var ok bool
if len(ks) == 0 { // degenerate input
return nil
}
if rval, ok = m[ks[0]]; !ok {
return nil
} else if len(ks) == 1 { // we've reached the final key
return rval
} else if m, ok = rval.(map[string]interface{}); !ok {
return nil
} else { // 1+ more keys
return nestedMapLookup(m, ks[1:]...)
}
}
func withDefaultBranch(b string, event map[string]interface{}) map[string]interface{} {
repoI, ok := event["repository"]
if !ok {
repoI = make(map[string]interface{})
}
repo, ok := repoI.(map[string]interface{})
if !ok {
log.Warnf("unable to set default branch to %v", b)
return event
}
// if the branch is already there return with no changes
if _, ok = repo["default_branch"]; ok {
return event
}
repo["default_branch"] = b
event["repository"] = repo
return event
}
func (rc *RunContext) withGithubEnv(env map[string]string) map[string]string {
github := rc.getGithubContext()
env["CI"] = "true"
env["GITHUB_ENV"] = fmt.Sprintf("%s/%s", filepath.Dir(rc.Config.Workdir), "workflow/envs.txt")
env["GITHUB_WORKFLOW"] = github.Workflow
env["GITHUB_RUN_ID"] = github.RunID
env["GITHUB_RUN_NUMBER"] = github.RunNumber
env["GITHUB_ACTION"] = github.Action
env["GITHUB_ACTIONS"] = "true"
env["GITHUB_ACTOR"] = github.Actor
env["GITHUB_REPOSITORY"] = github.Repository
env["GITHUB_EVENT_NAME"] = github.EventName
env["GITHUB_EVENT_PATH"] = github.EventPath
env["GITHUB_WORKSPACE"] = github.Workspace
env["GITHUB_SHA"] = github.Sha
env["GITHUB_REF"] = github.Ref
env["GITHUB_TOKEN"] = github.Token
env["GITHUB_SERVER_URL"] = "https://github.com"
env["GITHUB_API_URL"] = "https://api.github.com"
env["GITHUB_GRAPHQL_URL"] = "https://api.github.com/graphql"
job := rc.Run.Job()
if job.RunsOn() != nil {
for _, runnerLabel := range job.RunsOn() {
platformName := rc.ExprEval.Interpolate(runnerLabel)
if platformName != "" {
if platformName == "ubuntu-latest" {
// hardcode current ubuntu-latest since we have no way to check that 'on the fly'
env["ImageOS"] = "ubuntu20"
} else {
platformName = strings.SplitN(strings.Replace(platformName, `-`, ``, 1), `.`, 1)[0]
env["ImageOS"] = platformName
}
}
}
}
return env
}
func (rc *RunContext) localCheckoutPath() (string, bool) {
ghContext := rc.getGithubContext()
for _, step := range rc.Run.Job().Steps {
if ghContext.isLocalCheckout(step) {
return step.With["path"], true
}
}
return "", false
}
| [
"\"GITHUB_TOKEN\""
]
| []
| [
"GITHUB_TOKEN"
]
| [] | ["GITHUB_TOKEN"] | go | 1 | 0 | |
core/etrace/ejaeger/config.go | package ejaeger
import (
"github.com/gotomicro/ego/core/eapp"
"os"
"time"
"github.com/gotomicro/ego/core/econf"
"github.com/gotomicro/ego/core/elog"
"github.com/opentracing/opentracing-go"
"github.com/uber/jaeger-client-go"
jconfig "github.com/uber/jaeger-client-go/config"
)
// Config ...
type Config struct {
ServiceName string
Sampler *jconfig.SamplerConfig
Reporter *jconfig.ReporterConfig
Headers *jaeger.HeadersConfig
EnableRPCMetrics bool
tags []opentracing.Tag
options []jconfig.Option
PanicOnError bool
closer func() error
}
// RawConfig ...
func Load(key string) *Config {
var config = DefaultConfig()
if err := econf.UnmarshalKey(key, config); err != nil {
elog.Panic("unmarshal key", elog.FieldErr(err))
}
return config
}
// DefaultConfig ...
func DefaultConfig() *Config {
agentAddr := "127.0.0.1:6831"
headerName := eapp.EgoTraceIDName()
if addr := os.Getenv("JAEGER_AGENT_ADDR"); addr != "" {
agentAddr = addr
}
return &Config{
ServiceName: eapp.Name(),
Sampler: &jconfig.SamplerConfig{
Type: "const",
Param: 0.001,
},
Reporter: &jconfig.ReporterConfig{
LogSpans: false,
BufferFlushInterval: 1 * time.Second,
LocalAgentHostPort: agentAddr,
},
EnableRPCMetrics: true,
Headers: &jaeger.HeadersConfig{
TraceBaggageHeaderPrefix: "ctx-",
TraceContextHeaderName: headerName,
},
tags: []opentracing.Tag{
{Key: "hostname", Value: eapp.HostName()},
},
PanicOnError: true,
}
}
// WithTag ...
func (config *Config) WithTag(tags ...opentracing.Tag) *Config {
if config.tags == nil {
config.tags = make([]opentracing.Tag, 0)
}
config.tags = append(config.tags, tags...)
return config
}
// WithOption ...
func (config *Config) WithOption(options ...jconfig.Option) *Config {
if config.options == nil {
config.options = make([]jconfig.Option, 0)
}
config.options = append(config.options, options...)
return config
}
// Build ...
func (config *Config) Build(options ...jconfig.Option) opentracing.Tracer {
var configuration = jconfig.Configuration{
ServiceName: config.ServiceName,
Sampler: config.Sampler,
Reporter: config.Reporter,
RPCMetrics: config.EnableRPCMetrics,
Headers: config.Headers,
Tags: config.tags,
}
tracer, closer, err := configuration.NewTracer(config.options...)
if err != nil {
if config.PanicOnError {
elog.Panic("new jaeger", elog.FieldComponent("jaeger"), elog.FieldErr(err))
} else {
elog.Error("new jaeger", elog.FieldComponent("jaeger"), elog.FieldErr(err))
}
}
config.closer = closer.Close
return tracer
}
func (config *Config) Stop() error {
return config.closer()
}
| [
"\"JAEGER_AGENT_ADDR\""
]
| []
| [
"JAEGER_AGENT_ADDR"
]
| [] | ["JAEGER_AGENT_ADDR"] | go | 1 | 0 | |
test/test_utils/__init__.py | import json
import logging
import os
import re
import subprocess
import sys
import time
import boto3
import git
import pytest
from botocore.exceptions import ClientError
from glob import glob
from invoke import run
from invoke.context import Context
from packaging.version import LegacyVersion, Version, parse
from packaging.specifiers import SpecifierSet
from retrying import retry
from src import config
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
LOGGER.addHandler(logging.StreamHandler(sys.stderr))
# Constant to represent default region for boto3 commands
DEFAULT_REGION = "us-west-2"
# Constant to represent region where p3dn tests can be run
P3DN_REGION = "us-east-1"
UBUNTU_18_BASE_DLAMI_US_WEST_2 = "ami-0150e36b3f936a26e"
UBUNTU_18_BASE_DLAMI_US_EAST_1 = "ami-044971d381e6a1109"
AML2_GPU_DLAMI_US_WEST_2 = "ami-071cb1e434903a577"
AML2_GPU_DLAMI_US_EAST_1 = "ami-044264d246686b043"
AML2_CPU_ARM64_US_WEST_2 = "ami-0bccd90b9db95e2e5"
AML2_CPU_ARM64_US_EAST_1 = "ami-01c47f32b27ed7fa0"
PT_GPU_PY3_BENCHMARK_IMAGENET_AMI_US_EAST_1 = "ami-0673bb31cc62485dd"
PT_GPU_PY3_BENCHMARK_IMAGENET_AMI_US_WEST_2 = "ami-02d9a47bc61a31d43"
NEURON_UBUNTU_18_BASE_DLAMI_US_WEST_2 = "ami-0b5d270a84e753c18"
UL_AMI_LIST = [
UBUNTU_18_BASE_DLAMI_US_EAST_1,
UBUNTU_18_BASE_DLAMI_US_WEST_2,
PT_GPU_PY3_BENCHMARK_IMAGENET_AMI_US_EAST_1,
PT_GPU_PY3_BENCHMARK_IMAGENET_AMI_US_WEST_2,
NEURON_UBUNTU_18_BASE_DLAMI_US_WEST_2,
]
# ECS images are maintained here: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html
ECS_AML2_GPU_USWEST2 = "ami-09ef8c43fa060063d"
ECS_AML2_CPU_USWEST2 = "ami-014a2e30da708ee8b"
ECS_AML2_GRAVITON_CPU_USWEST2 = "ami-0fb32cf53e5ab7686"
NEURON_AL2_DLAMI = "ami-03c4cdc89eca4dbcb"
DLAMI_PYTHON_MAPPING = {
UBUNTU_18_BASE_DLAMI_US_WEST_2: "/usr/bin/python3.7",
UBUNTU_18_BASE_DLAMI_US_EAST_1: "/usr/bin/python3.7",
}
# Used for referencing tests scripts from container_tests directory (i.e. from ECS cluster)
CONTAINER_TESTS_PREFIX = os.path.join(os.sep, "test", "bin")
# S3 Bucket to use to transfer tests into an EC2 instance
TEST_TRANSFER_S3_BUCKET = "s3://dlinfra-tests-transfer-bucket"
# S3 Bucket to use to record benchmark results for further retrieving
BENCHMARK_RESULTS_S3_BUCKET = "s3://dlinfra-dlc-cicd-performance"
# Ubuntu ami home dir
UBUNTU_HOME_DIR = "/home/ubuntu"
# Reason string for skipping tests in PR context
SKIP_PR_REASON = "Skipping test in PR context to speed up iteration time. Test will be run in nightly/release pipeline."
# Reason string for skipping tests in non-PR context
PR_ONLY_REASON = "Skipping test that doesn't need to be run outside of PR context."
KEYS_TO_DESTROY_FILE = os.path.join(os.sep, "tmp", "keys_to_destroy.txt")
# Sagemaker test types
SAGEMAKER_LOCAL_TEST_TYPE = "local"
SAGEMAKER_REMOTE_TEST_TYPE = "sagemaker"
PUBLIC_DLC_REGISTRY = "763104351884"
SAGEMAKER_EXECUTION_REGIONS = ["us-west-2", "us-east-1", "eu-west-1"]
class MissingPythonVersionException(Exception):
"""
When the Python Version is missing from an image_uri where it is expected to exist
"""
pass
def get_dockerfile_path_for_image(image_uri):
"""
For a given image_uri, find the path within the repository to its corresponding dockerfile
:param image_uri: str Image URI
:return: str Absolute path to dockerfile
"""
github_repo_path = os.path.abspath(os.path.curdir).split("test", 1)[0]
framework, framework_version = get_framework_and_version_from_tag(image_uri)
framework_path = framework.replace("_", os.path.sep) if "huggingface" in framework else framework
job_type = get_job_type_from_image(image_uri)
short_framework_version = re.search(r"(\d+\.\d+)", image_uri).group(1)
framework_version_path = os.path.join(github_repo_path, framework_path, job_type, "docker", short_framework_version)
if not os.path.isdir(framework_version_path):
long_framework_version = re.search(r"\d+(\.\d+){2}", image_uri).group()
framework_version_path = os.path.join(
github_repo_path, framework_path, job_type, "docker", long_framework_version
)
python_version = re.search(r"py\d+", image_uri).group()
python_version_path = os.path.join(framework_version_path, python_version)
if not os.path.isdir(python_version_path):
python_version_path = os.path.join(framework_version_path, "py3")
device_type = get_processor_from_image_uri(image_uri)
cuda_version = get_cuda_version_from_tag(image_uri)
dockerfile_name = get_expected_dockerfile_filename(device_type, image_uri)
dockerfiles_list = [
path
for path in glob(os.path.join(python_version_path, "**", dockerfile_name), recursive=True)
if "example" not in path
]
if device_type in ["gpu"]:
if not cuda_version and len(dockerfiles_list) > 1:
raise LookupError(
f"dockerfiles_list has more than one result, and needs cuda_version to be in image_uri to "
f"uniquely identify the right dockerfile:\n"
f"{dockerfiles_list}"
)
for dockerfile_path in dockerfiles_list:
if cuda_version in dockerfile_path:
return dockerfile_path
raise LookupError(f"Failed to find a dockerfile path for {cuda_version} in:\n{dockerfiles_list}")
assert len(dockerfiles_list) == 1, f"No unique dockerfile path in:\n{dockerfiles_list}\nfor image: {image_uri}"
return dockerfiles_list[0]
def get_expected_dockerfile_filename(device_type, image_uri):
if is_e3_image(image_uri):
return f"Dockerfile.diy.{device_type}"
if is_sagemaker_image(image_uri):
return f"Dockerfile.sagemaker.{device_type}"
return f"Dockerfile.{device_type}"
def get_customer_type():
return os.getenv("CUSTOMER_TYPE")
def get_python_invoker(ami_id):
return DLAMI_PYTHON_MAPPING.get(ami_id, "/usr/bin/python3")
def is_tf_version(required_version, image_uri):
"""
Validate that image_uri has framework version equal to required_version
:param required_version: str Framework version which is required from the image_uri
:param image_uri: str ECR Image URI for the image to be validated
:return: bool True if image_uri has same framework version as required_version, else False
"""
image_framework_name, image_framework_version = get_framework_and_version_from_tag(image_uri)
required_version_specifier_set = SpecifierSet(f"=={required_version}.*")
return image_framework_name == "tensorflow" and image_framework_version in required_version_specifier_set
def is_below_framework_version(version_upper_bound, image_uri, framework):
"""
Validate that image_uri has framework version strictly less than version_upper_bound
:param version_upper_bound: str Framework version that image_uri is required to be below
:param image_uri: str ECR Image URI for the image to be validated
:return: bool True if image_uri has framework version less than version_upper_bound, else False
"""
image_framework_name, image_framework_version = get_framework_and_version_from_tag(image_uri)
required_version_specifier_set = SpecifierSet(f"<{version_upper_bound}")
return image_framework_name == framework and image_framework_version in required_version_specifier_set
def is_image_incompatible_with_instance_type(image_uri, ec2_instance_type):
"""
Check for all compatibility issues between DLC Image Types and EC2 Instance Types.
Currently configured to fail on the following checks:
1. p4d.24xlarge instance type is used with a cuda<11.0 image
2. p2.8xlarge instance type is used with a cuda=11.0 image for MXNET framework
:param image_uri: ECR Image URI in valid DLC-format
:param ec2_instance_type: EC2 Instance Type
:return: bool True if there are incompatibilities, False if there aren't
"""
image_is_cuda10_on_incompatible_p4d_instance = (
get_processor_from_image_uri(image_uri) == "gpu"
and get_cuda_version_from_tag(image_uri).startswith("cu10")
and ec2_instance_type in ["p4d.24xlarge"]
)
framework, _ = get_framework_and_version_from_tag(image_uri)
image_is_cuda11_on_incompatible_p2_instance_mxnet = (
framework == "mxnet"
and get_processor_from_image_uri(image_uri) == "gpu"
and get_cuda_version_from_tag(image_uri).startswith("cu11")
and ec2_instance_type in ["p2.8xlarge"]
)
return image_is_cuda10_on_incompatible_p4d_instance or image_is_cuda11_on_incompatible_p2_instance_mxnet
def get_repository_local_path():
git_repo_path = os.getcwd().split("/test/")[0]
return git_repo_path
def get_inference_server_type(image_uri):
if "pytorch" not in image_uri:
return "mms"
if "neuron" in image_uri:
return "ts"
image_tag = image_uri.split(":")[1]
pytorch_ver = parse(image_tag.split("-")[0])
if isinstance(pytorch_ver, LegacyVersion) or pytorch_ver < Version("1.6"):
return "mms"
return "ts"
def get_build_context():
return os.getenv("BUILD_CONTEXT")
def is_pr_context():
return os.getenv("BUILD_CONTEXT") == "PR"
def is_canary_context():
return os.getenv("BUILD_CONTEXT") == "CANARY"
def is_mainline_context():
return os.getenv("BUILD_CONTEXT") == "MAINLINE"
def is_nightly_context():
return os.getenv("BUILD_CONTEXT") == "NIGHTLY"
def is_empty_build_context():
return not os.getenv("BUILD_CONTEXT")
def is_dlc_cicd_context():
return os.getenv("BUILD_CONTEXT") in ["PR", "CANARY", "NIGHTLY", "MAINLINE"]
def is_benchmark_dev_context():
return config.is_benchmark_mode_enabled()
def is_rc_test_context():
sm_remote_tests_val = config.get_sagemaker_remote_tests_config_value()
return sm_remote_tests_val == config.AllowedSMRemoteConfigValues.RC.value
def is_e3_image(image_uri):
return "-e3" in image_uri
def is_sagemaker_image(image_uri):
return "-sagemaker" in image_uri
def is_time_for_canary_safety_scan():
"""
Canary tests run every 15 minutes.
Using a 20 minutes interval to make tests run only once a day around 9 am PST (10 am during winter time).
"""
current_utc_time = time.gmtime()
return current_utc_time.tm_hour == 16 and (0 < current_utc_time.tm_min < 20)
def _get_remote_override_flags():
try:
s3_client = boto3.client("s3")
sts_client = boto3.client("sts")
account_id = sts_client.get_caller_identity().get("Account")
result = s3_client.get_object(Bucket=f"dlc-cicd-helper-{account_id}", Key="override_tests_flags.json")
json_content = json.loads(result["Body"].read().decode("utf-8"))
except ClientError as e:
LOGGER.warning("ClientError when performing S3/STS operation: {}".format(e))
json_content = {}
return json_content
# Now we can skip EFA tests on pipeline without making any source code change
def are_efa_tests_disabled():
disable_efa_tests = is_pr_context() and os.getenv("DISABLE_EFA_TESTS", "False").lower() == "true"
remote_override_flags = _get_remote_override_flags()
override_disable_efa_tests = remote_override_flags.get("disable_efa_tests", "false").lower() == "true"
return disable_efa_tests or override_disable_efa_tests
def is_safety_test_context():
return config.is_safety_check_test_enabled()
def is_test_disabled(test_name, build_name, version):
"""
Expected format of remote_override_flags:
{
"CB Project Name for Test Type A": {
"CodeBuild Resolved Source Version": ["test_type_A_test_function_1", "test_type_A_test_function_2"]
},
"CB Project Name for Test Type B": {
"CodeBuild Resolved Source Version": ["test_type_B_test_function_1", "test_type_B_test_function_2"]
}
}
:param test_name: str Test Function node name (includes parametrized values in string)
:param build_name: str Build Project name of current execution
:param version: str Source Version of current execution
:return: bool True if test is disabled as per remote override, False otherwise
"""
remote_override_flags = _get_remote_override_flags()
remote_override_build = remote_override_flags.get(build_name, {})
if version in remote_override_build:
return not remote_override_build[version] or any(
[test_keyword in test_name for test_keyword in remote_override_build[version]]
)
return False
def run_subprocess_cmd(cmd, failure="Command failed"):
command = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True)
if command.returncode:
pytest.fail(f"{failure}. Error log:\n{command.stdout.decode()}")
return command
def login_to_ecr_registry(context, account_id, region):
"""
Function to log into an ecr registry
:param context: either invoke context object or fabric connection object
:param account_id: Account ID with the desired ecr registry
:param region: i.e. us-west-2
"""
context.run(
f"aws ecr get-login-password --region {region} | docker login --username AWS "
f"--password-stdin {account_id}.dkr.ecr.{region}.amazonaws.com"
)
def retry_if_result_is_false(result):
"""Return True if we should retry (in this case retry if the result is False), False otherwise"""
return result is False
@retry(
stop_max_attempt_number=10, wait_fixed=10000, retry_on_result=retry_if_result_is_false,
)
def request_mxnet_inference(ip_address="127.0.0.1", port="80", connection=None, model="squeezenet"):
"""
Send request to container to test inference on kitten.jpg
:param ip_address:
:param port:
:connection: ec2_connection object to run the commands remotely over ssh
:return: <bool> True/False based on result of inference
"""
conn_run = connection.run if connection is not None else run
# Check if image already exists
run_out = conn_run("[ -f kitten.jpg ]", warn=True)
if run_out.return_code != 0:
conn_run("curl -O https://s3.amazonaws.com/model-server/inputs/kitten.jpg", hide=True)
run_out = conn_run(f"curl -X POST http://{ip_address}:{port}/predictions/{model} -T kitten.jpg", warn=True)
# The run_out.return_code is not reliable, since sometimes predict request may succeed but the returned result
# is 404. Hence the extra check.
if run_out.return_code != 0 or "probability" not in run_out.stdout:
return False
return True
@retry(stop_max_attempt_number=10, wait_fixed=10000, retry_on_result=retry_if_result_is_false)
def request_mxnet_inference_gluonnlp(ip_address="127.0.0.1", port="80", connection=None):
"""
Send request to container to test inference for predicting sentiments.
:param ip_address:
:param port:
:connection: ec2_connection object to run the commands remotely over ssh
:return: <bool> True/False based on result of inference
"""
conn_run = connection.run if connection is not None else run
run_out = conn_run(
(
f"curl -X POST http://{ip_address}:{port}/predictions/bert_sst/predict -F "
'\'data=["Positive sentiment", "Negative sentiment"]\''
),
warn=True,
)
# The run_out.return_code is not reliable, since sometimes predict request may succeed but the returned result
# is 404. Hence the extra check.
if run_out.return_code != 0 or "1" not in run_out.stdout:
return False
return True
@retry(
stop_max_attempt_number=10, wait_fixed=10000, retry_on_result=retry_if_result_is_false,
)
def request_pytorch_inference_densenet(
ip_address="127.0.0.1", port="80", connection=None, model_name="pytorch-densenet", server_type="ts"
):
"""
Send request to container to test inference on flower.jpg
:param ip_address: str
:param port: str
:param connection: obj
:param model_name: str
:return: <bool> True/False based on result of inference
"""
conn_run = connection.run if connection is not None else run
# Check if image already exists
run_out = conn_run("[ -f flower.jpg ]", warn=True)
if run_out.return_code != 0:
conn_run("curl -O https://s3.amazonaws.com/model-server/inputs/flower.jpg", hide=True)
run_out = conn_run(
f"curl -X POST http://{ip_address}:{port}/predictions/{model_name} -T flower.jpg", hide=True, warn=True
)
# The run_out.return_code is not reliable, since sometimes predict request may succeed but the returned result
# is 404. Hence the extra check.
if run_out.return_code != 0:
LOGGER.error("run_out.return_code != 0")
return False
else:
inference_output = json.loads(run_out.stdout.strip("\n"))
if not (
("neuron" in model_name and isinstance(inference_output, list) and len(inference_output) == 3)
or (server_type == "ts" and isinstance(inference_output, dict) and len(inference_output) == 5)
or (server_type == "mms" and isinstance(inference_output, list) and len(inference_output) == 5)
):
return False
LOGGER.info(f"Inference Output = {json.dumps(inference_output, indent=4)}")
return True
@retry(stop_max_attempt_number=20, wait_fixed=10000, retry_on_result=retry_if_result_is_false)
def request_tensorflow_inference(model_name, ip_address="127.0.0.1", port="8501", inference_string = "'{\"instances\": [1.0, 2.0, 5.0]}'"):
"""
Method to run tensorflow inference on half_plus_two model using CURL command
:param model_name:
:param ip_address:
:param port:
:connection: ec2_connection object to run the commands remotely over ssh
:return:
"""
run_out = run(
f"curl -d {inference_string} -X POST http://{ip_address}:{port}/v1/models/{model_name}:predict", warn=True
)
# The run_out.return_code is not reliable, since sometimes predict request may succeed but the returned result
# is 404. Hence the extra check.
if run_out.return_code != 0 or "predictions" not in run_out.stdout:
return False
return True
@retry(stop_max_attempt_number=20, wait_fixed=10000, retry_on_result=retry_if_result_is_false)
def request_tensorflow_inference_nlp(model_name, ip_address="127.0.0.1", port="8501"):
"""
Method to run tensorflow inference on half_plus_two model using CURL command
:param model_name:
:param ip_address:
:param port:
:connection: ec2_connection object to run the commands remotely over ssh
:return:
"""
inference_string = "'{\"instances\": [[2,1952,25,10901,3]]}'"
run_out = run(
f"curl -d {inference_string} -X POST http://{ip_address}:{port}/v1/models/{model_name}:predict", warn=True
)
# The run_out.return_code is not reliable, since sometimes predict request may succeed but the returned result
# is 404. Hence the extra check.
if run_out.return_code != 0 or "predictions" not in run_out.stdout:
return False
return True
def request_tensorflow_inference_grpc(
script_file_path, ip_address="127.0.0.1", port="8500", connection=None, ec2_instance_ami=None
):
"""
Method to run tensorflow inference on MNIST model using gRPC protocol
:param script_file_path:
:param ip_address:
:param port:
:param connection:
:return:
"""
python_invoker = get_python_invoker(ec2_instance_ami)
conn_run = connection.run if connection is not None else run
conn_run(f"{python_invoker} {script_file_path} --num_tests=1000 --server={ip_address}:{port}", hide=True)
def get_inference_run_command(image_uri, model_names, processor="cpu"):
"""
Helper function to format run command for MMS
:param image_uri:
:param model_names:
:param processor:
:return: <str> Command to start MMS server with given model
"""
server_type = get_inference_server_type(image_uri)
if processor == "eia":
multi_model_location = {
"resnet-152-eia": "https://s3.amazonaws.com/model-server/model_archive_1.0/resnet-152-eia-1-7-0.mar",
"resnet-152-eia-1-5-1": "https://s3.amazonaws.com/model-server/model_archive_1.0/resnet-152-eia-1-5-1.mar",
"pytorch-densenet": "https://aws-dlc-sample-models.s3.amazonaws.com/pytorch/densenet_eia/densenet_eia_v1_5_1.mar",
"pytorch-densenet-v1-3-1": "https://aws-dlc-sample-models.s3.amazonaws.com/pytorch/densenet_eia/densenet_eia_v1_3_1.mar",
}
elif server_type == "ts":
multi_model_location = {
"squeezenet": "https://torchserve.s3.amazonaws.com/mar_files/squeezenet1_1.mar",
"pytorch-densenet": "https://torchserve.s3.amazonaws.com/mar_files/densenet161.mar",
"pytorch-resnet-neuron": "https://aws-dlc-sample-models.s3.amazonaws.com/pytorch/Resnet50-neuron.mar",
}
else:
multi_model_location = {
"squeezenet": "https://s3.amazonaws.com/model-server/models/squeezenet_v1.1/squeezenet_v1.1.model",
"pytorch-densenet": "https://dlc-samples.s3.amazonaws.com/pytorch/multi-model-server/densenet/densenet.mar",
"bert_sst": "https://aws-dlc-sample-models.s3.amazonaws.com/bert_sst/bert_sst.mar",
"mxnet-resnet-neuron": "https://aws-dlc-sample-models.s3.amazonaws.com/mxnet/Resnet50-neuron.mar",
}
if not isinstance(model_names, list):
model_names = [model_names]
for model_name in model_names:
if model_name not in multi_model_location:
raise Exception("No entry found for model {} in dictionary".format(model_name))
parameters = ["{}={}".format(name, multi_model_location[name]) for name in model_names]
if server_type == "ts":
server_cmd = "torchserve"
else:
server_cmd = "multi-model-server"
if processor != "neuron":
mms_command = (
f"{server_cmd} --start --{server_type}-config /home/model-server/config.properties --models "
+ " ".join(parameters)
)
else:
# Temp till the mxnet dockerfile also have the neuron entrypoint file
if server_type == "ts":
mms_command = (
f"{server_cmd} --start --{server_type}-config /home/model-server/config.properties --models "
+ " ".join(parameters)
)
else:
mms_command = f"/usr/local/bin/entrypoint.sh -t /home/model-server/config.properties -m " + " ".join(
parameters
)
return mms_command
def get_tensorflow_model_name(processor, model_name):
"""
Helper function to get tensorflow model name
:param processor: Processor Type
:param model_name: Name of model to be used
:return: File name for model being used
"""
tensorflow_models = {
"saved_model_half_plus_two": {
"cpu": "saved_model_half_plus_two_cpu",
"gpu": "saved_model_half_plus_two_gpu",
"eia": "saved_model_half_plus_two",
},
"albert": {"cpu": "albert", "gpu": "albert", "eia": "albert",},
"saved_model_half_plus_three": {"eia": "saved_model_half_plus_three"},
}
if model_name in tensorflow_models:
return tensorflow_models[model_name][processor]
else:
raise Exception(f"No entry found for model {model_name} in dictionary")
def generate_ssh_keypair(ec2_client, key_name):
pwd = run("pwd", hide=True).stdout.strip("\n")
key_filename = os.path.join(pwd, f"{key_name}.pem")
if os.path.exists(key_filename):
run(f"chmod 400 {key_filename}")
return key_filename
try:
key_pair = ec2_client.create_key_pair(KeyName=key_name)
except ClientError as e:
if "InvalidKeyPair.Duplicate" in f"{e}":
# Wait 10 seconds for key to be created to avoid race condition
time.sleep(10)
if os.path.exists(key_filename):
run(f"chmod 400 {key_filename}")
return key_filename
raise e
run(f"echo '{key_pair['KeyMaterial']}' > {key_filename}")
run(f"chmod 400 {key_filename}")
return key_filename
def destroy_ssh_keypair(ec2_client, key_filename):
key_name = os.path.basename(key_filename).split(".pem")[0]
response = ec2_client.delete_key_pair(KeyName=key_name)
run(f"rm -f {key_filename}")
return response, key_name
def upload_tests_to_s3(testname_datetime_suffix):
"""
Upload test-related artifacts to unique s3 location.
Allows each test to have a unique remote location for test scripts and files.
These uploaded files and folders are copied into a container running an ECS test.
:param testname_datetime_suffix: test name and datetime suffix that is unique to a test
:return: <bool> True if upload was successful, False if any failure during upload
"""
s3_test_location = os.path.join(TEST_TRANSFER_S3_BUCKET, testname_datetime_suffix)
run_out = run(f"aws s3 ls {s3_test_location}", warn=True)
if run_out.return_code == 0:
raise FileExistsError(f"{s3_test_location} already exists. Skipping upload and failing the test.")
path = run("pwd", hide=True).stdout.strip("\n")
if "dlc_tests" not in path:
EnvironmentError("Test is being run from wrong path")
while os.path.basename(path) != "dlc_tests":
path = os.path.dirname(path)
container_tests_path = os.path.join(path, "container_tests")
run(f"aws s3 cp --recursive {container_tests_path}/ {s3_test_location}/")
return s3_test_location
def delete_uploaded_tests_from_s3(s3_test_location):
"""
Delete s3 bucket data related to current test after test is completed
:param s3_test_location: S3 URI for test artifacts to be removed
:return: <bool> True/False based on success/failure of removal
"""
run(f"aws s3 rm --recursive {s3_test_location}")
def get_dlc_images():
if is_pr_context() or is_empty_build_context():
return os.getenv("DLC_IMAGES")
elif is_canary_context():
return parse_canary_images(os.getenv("FRAMEWORK"), os.getenv("AWS_REGION"))
test_env_file = os.path.join(os.getenv("CODEBUILD_SRC_DIR_DLC_IMAGES_JSON"), "test_type_images.json")
with open(test_env_file) as test_env:
test_images = json.load(test_env)
for dlc_test_type, images in test_images.items():
if dlc_test_type == "sanity":
return " ".join(images)
raise RuntimeError(f"Cannot find any images for in {test_images}")
def get_canary_default_tag_py3_version(framework, version):
"""
Currently, only TF2.2 images and above have major/minor python version in their canary tag. Creating this function
to conditionally choose a python version based on framework version ranges. If we move up to py38, for example,
this is the place to make the conditional change.
:param framework: tensorflow1, tensorflow2, mxnet, pytorch
:param version: fw major.minor version, i.e. 2.2
:return: default tag python version
"""
if framework == "tensorflow2" or framework == "huggingface_tensorflow":
if Version("2.2") <= Version(version) < Version("2.6"):
return "py37"
if Version(version) >= Version("2.6"):
return "py38"
if framework == "mxnet":
if Version(version) == Version("1.8"):
return "py37"
if Version(version) >= Version("1.9"):
return "py38"
if framework == "pytorch":
if Version(version) >= Version("1.9"):
return "py38"
return "py3"
def get_e3_addon_tags(framework, version):
"""
# TODO: Remove this when we add the sagemaker-like e3 tags
Get e3 addon tags (os, dlc_major_version, cuda_version)
@param framework: tensorflow2, mxnet, pytorch
@param version: major.minor version
@return: tuple of os, dlc major version, cuda version
"""
fw_map = {
"tensorflow1": {},
"tensorflow2": {},
"pytorch": {
"latest": {
"cuda": "cu113",
"os": "ubuntu20.04",
"major_version": "v1"
}
},
"mxnet": {},
}
image_e3_components = fw_map.get(framework, {}).get(version, {})
if not image_e3_components:
image_e3_components = fw_map.get(framework, {}).get("latest", {})
# This message is common across all assertion statements, so defining it only once
common_assertion_msg = f"for {framework} in {image_e3_components}. Full framework map: {fw_map}"
operating_system = image_e3_components.get("os")
assert operating_system, f"Cannot find OS {common_assertion_msg}"
major_version = image_e3_components.get("major_version")
assert major_version, f"Cannot find DLC major version {common_assertion_msg}"
cuda_version = image_e3_components.get("cuda", "")
assert cuda_version, f"Cannot find CUDA version {common_assertion_msg}"
return operating_system, major_version, cuda_version
def parse_canary_images(framework, region):
"""
Return which canary images to run canary tests on for a given framework and AWS region
:param framework: ML framework (mxnet, tensorflow, pytorch)
:param region: AWS region
:return: dlc_images string (space separated string of image URIs)
"""
if framework == "tensorflow":
if "tensorflow2" in os.getenv("CODEBUILD_BUILD_ID") or "tensorflow2" in os.getenv("CODEBUILD_INITIATOR"):
framework = "tensorflow2"
else:
framework = "tensorflow1"
customer_type = get_customer_type()
customer_type_tag = f"-{customer_type}" if customer_type else ""
version_regex = {
"tensorflow1": r"tf-(1.\d+)",
"tensorflow2": rf"tf{customer_type_tag}-(2.\d+)",
"mxnet": rf"mx{customer_type_tag}-(\d+.\d+)",
"pytorch": rf"pt{customer_type_tag}-(\d+.\d+)",
"huggingface_pytorch": r"hf-pt-(\d+.\d+)",
"huggingface_tensorflow": r"hf-tf-(\d+.\d+)",
"autogluon": r"ag-(\d+.\d+)",
}
repo = git.Repo(os.getcwd(), search_parent_directories=True)
versions_counter = {}
for tag in repo.tags:
tag_str = str(tag)
match = re.search(version_regex[framework], tag_str)
if match:
version = match.group(1)
if not versions_counter.get(version):
versions_counter[version] = {"tr": False, "inf": False}
if "tr" not in tag_str and "inf" not in tag_str:
versions_counter[version]["tr"] = True
versions_counter[version]["inf"] = True
elif "tr" in tag_str:
versions_counter[version]["tr"] = True
elif "inf" in tag_str:
versions_counter[version]["inf"] = True
# Adding huggingface here since we dont have inference HF containers now
versions = []
for v, inf_train in versions_counter.items():
if (inf_train["inf"] and inf_train["tr"]) or framework.startswith("huggingface"):
versions.append(v)
# Sort ascending to descending, use lambda to ensure 2.2 < 2.15, for instance
versions.sort(key=lambda version_str: [int(point) for point in version_str.split(".")], reverse=True)
registry = PUBLIC_DLC_REGISTRY
framework_versions = versions if len(versions) < 4 else versions[:3]
dlc_images = []
for fw_version in framework_versions:
py3_version = get_canary_default_tag_py3_version(framework, fw_version)
# TODO: Remove this when we add the "sagemaker-like" e3 tag
operating_system, dlc_major_version, cuda = ("", "", "")
if customer_type == "e3":
operating_system, dlc_major_version, cuda = get_e3_addon_tags(framework, fw_version)
images = {
"tensorflow1": {
"e3": [],
"sagemaker": [],
},
"tensorflow2": {
"e3": [],
"sagemaker": [
f"{registry}.dkr.ecr.{region}.amazonaws.com/tensorflow-training:{fw_version}-gpu-{py3_version}",
f"{registry}.dkr.ecr.{region}.amazonaws.com/tensorflow-training:{fw_version}-cpu-{py3_version}",
f"{registry}.dkr.ecr.{region}.amazonaws.com/tensorflow-inference:{fw_version}-gpu",
f"{registry}.dkr.ecr.{region}.amazonaws.com/tensorflow-inference:{fw_version}-cpu",
],
},
"mxnet": {
"e3": [],
"sagemaker": [
f"{registry}.dkr.ecr.{region}.amazonaws.com/mxnet-training:{fw_version}-gpu-{py3_version}",
f"{registry}.dkr.ecr.{region}.amazonaws.com/mxnet-training:{fw_version}-cpu-{py3_version}",
f"{registry}.dkr.ecr.{region}.amazonaws.com/mxnet-inference:{fw_version}-gpu-{py3_version}",
f"{registry}.dkr.ecr.{region}.amazonaws.com/mxnet-inference:{fw_version}-cpu-{py3_version}",
],
},
"pytorch": {
"e3": [
f"{registry}.dkr.ecr.{region}.amazonaws.com/pytorch-training:{fw_version}-gpu-{py3_version}-{cuda}-{operating_system}-e3-{dlc_major_version}",
f"{registry}.dkr.ecr.{region}.amazonaws.com/pytorch-training:{fw_version}-cpu-{py3_version}-{operating_system}-e3-{dlc_major_version}",
f"{registry}.dkr.ecr.{region}.amazonaws.com/pytorch-inference:{fw_version}-gpu-{py3_version}-{cuda}-{operating_system}-e3-{dlc_major_version}",
f"{registry}.dkr.ecr.{region}.amazonaws.com/pytorch-inference:{fw_version}-cpu-{py3_version}-{operating_system}-e3-{dlc_major_version}",
],
"sagemaker": [
f"{registry}.dkr.ecr.{region}.amazonaws.com/pytorch-training:{fw_version}-gpu-{py3_version}",
f"{registry}.dkr.ecr.{region}.amazonaws.com/pytorch-training:{fw_version}-cpu-{py3_version}",
f"{registry}.dkr.ecr.{region}.amazonaws.com/pytorch-inference:{fw_version}-gpu-{py3_version}",
f"{registry}.dkr.ecr.{region}.amazonaws.com/pytorch-inference:{fw_version}-cpu-{py3_version}",
],
},
# TODO: uncomment once cpu training and inference images become available
"huggingface_pytorch": {
"e3": [],
"sagemaker": [
f"{registry}.dkr.ecr.{region}.amazonaws.com/huggingface-pytorch-training:{fw_version}-gpu-{py3_version}",
# f"{registry}.dkr.ecr.{region}.amazonaws.com/huggingface-pytorch-training:{fw_version}-cpu-{py3_version}",
# f"{registry}.dkr.ecr.{region}.amazonaws.com/huggingface-pytorch-inference:{fw_version}-gpu-{py3_version}",
# f"{registry}.dkr.ecr.{region}.amazonaws.com/huggingface-pytorch-inference:{fw_version}-cpu-{py3_version}",
],
},
"huggingface_tensorflow": {
"e3": [],
"sagemaker": [
f"{registry}.dkr.ecr.{region}.amazonaws.com/huggingface-tensorflow-training:{fw_version}-gpu-{py3_version}",
# f"{registry}.dkr.ecr.{region}.amazonaws.com/huggingface-tensorflow-training:{fw_version}-cpu-{py3_version}",
# f"{registry}.dkr.ecr.{region}.amazonaws.com/huggingface-tensorflow-inference:{fw_version}-gpu-{py3_version}",
# f"{registry}.dkr.ecr.{region}.amazonaws.com/huggingface-tensorflow-inference:{fw_version}-cpu-{py3_version}",
],
},
"autogluon": {
"e3": [],
"sagemaker": [
f"{registry}.dkr.ecr.{region}.amazonaws.com/autogluon-training:{fw_version}-gpu-{py3_version}",
f"{registry}.dkr.ecr.{region}.amazonaws.com/autogluon-training:{fw_version}-cpu-{py3_version}",
],
},
}
imgs_key = customer_type if customer_type else "sagemaker"
dlc_images += images[framework][imgs_key]
return " ".join(dlc_images)
def setup_sm_benchmark_tf_train_env(resources_location, setup_tf1_env, setup_tf2_env):
"""
Create a virtual environment for benchmark tests if it doesn't already exist, and download all necessary scripts
:param resources_location: <str> directory in which test resources should be placed
:param setup_tf1_env: <bool> True if tf1 resources need to be setup
:param setup_tf2_env: <bool> True if tf2 resources need to be setup
:return: absolute path to the location of the virtual environment
"""
ctx = Context()
tf_resource_dir_list = []
if setup_tf1_env:
tf_resource_dir_list.append("tensorflow1")
if setup_tf2_env:
tf_resource_dir_list.append("tensorflow2")
for resource_dir in tf_resource_dir_list:
with ctx.cd(os.path.join(resources_location, resource_dir)):
if not os.path.isdir(os.path.join(resources_location, resource_dir, "horovod")):
# v0.19.4 is the last version for which horovod example tests are py2 compatible
ctx.run("git clone -b v0.19.4 https://github.com/horovod/horovod.git")
if not os.path.isdir(os.path.join(resources_location, resource_dir, "deep-learning-models")):
# We clone branch tf2 for both 1.x and 2.x tests because tf2 branch contains all necessary files
ctx.run(f"git clone -b tf2 https://github.com/aws-samples/deep-learning-models.git")
venv_dir = os.path.join(resources_location, "sm_benchmark_venv")
if not os.path.isdir(venv_dir):
ctx.run(f"virtualenv {venv_dir}")
with ctx.prefix(f"source {venv_dir}/bin/activate"):
ctx.run("pip install 'sagemaker>=2,<3' awscli boto3 botocore six==1.11")
# SageMaker TF estimator is coded to only accept framework versions up to 2.1.0 as py2 compatible.
# Fixing this through the following changes:
estimator_location = ctx.run(
"echo $(pip3 show sagemaker |grep 'Location' |sed s/'Location: '//g)/sagemaker/tensorflow/estimator.py"
).stdout.strip("\n")
system = ctx.run("uname -s").stdout.strip("\n")
sed_input_arg = "'' " if system == "Darwin" else ""
ctx.run(f"sed -i {sed_input_arg}'s/\[2, 1, 0\]/\[2, 1, 1\]/g' {estimator_location}")
return venv_dir
def setup_sm_benchmark_mx_train_env(resources_location):
"""
Create a virtual environment for benchmark tests if it doesn't already exist, and download all necessary scripts
:param resources_location: <str> directory in which test resources should be placed
:return: absolute path to the location of the virtual environment
"""
ctx = Context()
venv_dir = os.path.join(resources_location, "sm_benchmark_venv")
if not os.path.isdir(venv_dir):
ctx.run(f"virtualenv {venv_dir}")
with ctx.prefix(f"source {venv_dir}/bin/activate"):
ctx.run("pip install sagemaker awscli boto3 botocore")
return venv_dir
def setup_sm_benchmark_hf_infer_env(resources_location):
"""
Create a virtual environment for benchmark tests if it doesn't already exist, and download all necessary scripts
:param resources_location: <str> directory in which test resources should be placed
:return: absolute path to the location of the virtual environment
"""
ctx = Context()
venv_dir = os.path.join(resources_location, "sm_benchmark_hf_venv")
if not os.path.isdir(venv_dir):
ctx.run(f"python3 -m virtualenv {venv_dir}")
with ctx.prefix(f"source {venv_dir}/bin/activate"):
ctx.run("pip install sagemaker awscli boto3 botocore")
return venv_dir
def get_account_id_from_image_uri(image_uri):
"""
Find the account ID where the image is located
:param image_uri: <str> ECR image URI
:return: <str> AWS Account ID
"""
return image_uri.split(".")[0]
def get_region_from_image_uri(image_uri):
"""
Find the region where the image is located
:param image_uri: <str> ECR image URI
:return: <str> AWS Region Name
"""
region_pattern = r"(us(-gov)?|ap|ca|cn|eu|sa)-(central|(north|south)?(east|west)?)-\d+"
region_search = re.search(region_pattern, image_uri)
assert region_search, f"{image_uri} must have region that matches {region_pattern}"
return region_search.group()
def get_unique_name_from_tag(image_uri):
"""
Return the unique from the image tag.
:param image_uri: ECR image URI
:return: unique name
"""
return re.sub("[^A-Za-z0-9]+", "", image_uri)
def get_framework_and_version_from_tag(image_uri):
"""
Return the framework and version from the image tag.
:param image_uri: ECR image URI
:return: framework name, framework version
"""
tested_framework = get_framework_from_image_uri(image_uri)
allowed_frameworks = (
"huggingface_tensorflow",
"huggingface_pytorch",
"tensorflow",
"mxnet",
"pytorch",
"autogluon",
)
if not tested_framework:
raise RuntimeError(
f"Cannot find framework in image uri {image_uri} " f"from allowed frameworks {allowed_frameworks}"
)
tag_framework_version = re.search(r"(\d+(\.\d+){1,2})", image_uri).groups()[0]
return tested_framework, tag_framework_version
# for the time being have this static table. Need to figure out a way to get this from
# neuron github once their version manifest file is updated to the latest
# 1.15.2 etc represent the neuron sdk version
# For each of the sdk version we have differen frameworks like pytoch, mxnet etc
# For each of the frameworks it has the framework version mapping to the actual neuron framework version in the container
# If the framework version does not exist then it means it is not supported for that neuron sdk version
NEURON_VERSION_MANIFEST = {
"1.15.2": {
"pytorch": {
"1.5.1": "1.5.1.1.5.21.0",
"1.6.0": "1.6.0.1.5.21.0",
"1.7.1": "1.7.1.1.5.21.0",
"1.8.1": "1.8.1.1.5.21.0",
},
"tensorflow": {
"2.1.4" : "2.1.4.1.6.10.0",
"2.2.3" : "2.2.3.1.6.10.0",
"2.3.3": "2.3.3.1.6.10.0",
"2.4.2": "2.4.2.1.6.10.0",
"2.4.2": "2.4.2.1.6.10.0",
"2.5.0": "2.5.0.1.6.10.0",
},
"mxnet" : {
"1.8.0": "1.8.0.1.3.4.0",
}
}
}
def get_neuron_sdk_version_from_tag(image_uri):
"""
Return the neuron sdk version from the image tag.
:param image_uri: ECR image URI
:return: neuron sdk version
"""
neuron_sdk_version = None
if "sdk" in image_uri:
neuron_sdk_version = re.search(r"sdk([\d\.]+)", image_uri).group(1)
return neuron_sdk_version
def get_neuron_framework_and_version_from_tag(image_uri):
"""
Return the framework version and expected framework version for the neuron tag from the image tag.
:param image_uri: ECR image URI
:return: framework version, expected framework version from neuron sdk version
"""
tested_framework, tag_framework_version = get_framework_and_version_from_tag(image_uri)
neuron_sdk_version = get_neuron_sdk_version_from_tag(image_uri)
if neuron_sdk_version is None:
return tag_framework_version, None
if neuron_sdk_version not in NEURON_VERSION_MANIFEST:
raise KeyError(f"Cannot find neuron sdk version {neuron_sdk_version} ")
neuron_framework_versions = NEURON_VERSION_MANIFEST[neuron_sdk_version][tested_framework]
neuron_tag_framework_version = neuron_framework_versions.get(tag_framework_version)
return tested_framework, neuron_tag_framework_version
def get_framework_from_image_uri(image_uri):
return (
"huggingface_tensorflow"
if "huggingface-tensorflow" in image_uri
else "huggingface_pytorch"
if "huggingface-pytorch" in image_uri
else "mxnet"
if "mxnet" in image_uri
else "pytorch"
if "pytorch" in image_uri
else "tensorflow"
if "tensorflow" in image_uri
else "autogluon"
if "autogluon" in image_uri
else None
)
def get_cuda_version_from_tag(image_uri):
"""
Return the cuda version from the image tag.
:param image_uri: ECR image URI
:return: cuda version
"""
cuda_framework_version = None
cuda_str = ["cu", "gpu"]
if all(keyword in image_uri for keyword in cuda_str):
cuda_framework_version = re.search(r"(cu\d+)-", image_uri).groups()[0]
return cuda_framework_version
def get_job_type_from_image(image_uri):
"""
Return the Job type from the image tag.
:param image_uri: ECR image URI
:return: Job Type
"""
tested_job_type = None
allowed_job_types = ("training", "inference")
for job_type in allowed_job_types:
if job_type in image_uri:
tested_job_type = job_type
break
if not tested_job_type and "eia" in image_uri:
tested_job_type = "inference"
if not tested_job_type:
raise RuntimeError(
f"Cannot find Job Type in image uri {image_uri} " f"from allowed frameworks {allowed_job_types}"
)
return tested_job_type
def get_repository_and_tag_from_image_uri(image_uri):
"""
Return the name of the repository holding the image
:param image_uri: URI of the image
:return: <str> repository name
"""
repository_uri, tag = image_uri.split(":")
_, repository_name = repository_uri.split("/")
return repository_name, tag
def get_processor_from_image_uri(image_uri):
"""
Return processor from the image URI
Assumes image uri includes -<processor> in it's tag, where <processor> is one of cpu, gpu or eia.
:param image_uri: ECR image URI
:return: cpu, gpu, or eia
"""
allowed_processors = ["eia", "neuron", "graviton", "cpu", "gpu"]
for processor in allowed_processors:
match = re.search(rf"-({processor})", image_uri)
if match:
return match.group(1)
raise RuntimeError("Cannot find processor")
def get_python_version_from_image_uri(image_uri):
"""
Return the python version from the image URI
:param image_uri: ECR image URI
:return: str py36, py37, py38, etc., based information available in image URI
"""
python_version_search = re.search(r"py\d+", image_uri)
if not python_version_search:
raise MissingPythonVersionException(f"{image_uri} does not have python version in the form 'py\\d+'")
python_version = python_version_search.group()
return "py36" if python_version == "py3" else python_version
def get_container_name(prefix, image_uri):
"""
Create a unique container name based off of a test related prefix and the image uri
:param prefix: test related prefix, like "emacs" or "pip-check"
:param image_uri: ECR image URI
:return: container name
"""
return f"{prefix}-{image_uri.split('/')[-1].replace('.', '-').replace(':', '-')}"
def stop_and_remove_container(container_name, context):
"""
Helper function to stop a container locally
:param container_name: Name of the docker container
:param context: Invoke context object
"""
context.run(
f"docker rm -f {container_name}", hide=True,
)
def start_container(container_name, image_uri, context):
"""
Helper function to start a container locally
:param container_name: Name of the docker container
:param image_uri: ECR image URI
:param context: Invoke context object
"""
context.run(
f"docker run --entrypoint='/bin/bash' --name {container_name} -itd {image_uri}", hide=True,
)
def run_cmd_on_container(container_name, context, cmd, executable="bash", warn=False):
"""
Helper function to run commands on a locally running container
:param container_name: Name of the docker container
:param context: ECR image URI
:param cmd: Command to run on the container
:param executable: Executable to run on the container (bash or python)
:param warn: Whether to only warn as opposed to exit if command fails
:return: invoke output, can be used to parse stdout, etc
"""
if executable not in ("bash", "python"):
LOGGER.warn(f"Unrecognized executable {executable}. It will be run as {executable} -c '{cmd}'")
return context.run(
f"docker exec --user root {container_name} {executable} -c '{cmd}'", hide=True, warn=warn, timeout=60
)
| []
| []
| [
"BUILD_CONTEXT",
"CUSTOMER_TYPE",
"AWS_REGION",
"FRAMEWORK",
"CODEBUILD_SRC_DIR_DLC_IMAGES_JSON",
"DLC_IMAGES",
"DISABLE_EFA_TESTS",
"CODEBUILD_BUILD_ID",
"CODEBUILD_INITIATOR"
]
| [] | ["BUILD_CONTEXT", "CUSTOMER_TYPE", "AWS_REGION", "FRAMEWORK", "CODEBUILD_SRC_DIR_DLC_IMAGES_JSON", "DLC_IMAGES", "DISABLE_EFA_TESTS", "CODEBUILD_BUILD_ID", "CODEBUILD_INITIATOR"] | python | 9 | 0 | |
cmd/watcher/main.go | package main
import (
"fmt"
"github.com/yametech/yamecloud/pkg/action/api"
apiService "github.com/yametech/yamecloud/pkg/action/api/watcher"
"github.com/yametech/yamecloud/pkg/action/service"
"github.com/yametech/yamecloud/pkg/configure"
"github.com/yametech/yamecloud/pkg/install"
"github.com/yametech/yamecloud/pkg/k8s"
"github.com/yametech/yamecloud/pkg/k8s/datasource"
"github.com/yametech/yamecloud/pkg/k8s/types"
"os"
"strings"
)
/*
#if the app runtime in kubernetes
export IN_CLUSTER=true
#if use etcd discover server
#argument additions
--registry etcd --registry_address ${etcd_addr}
# environments
SUBLIST="";SUBTOPIC=tekton,ovn
SUBTOPIC=[tekton,ovn,istio] or *
*/
const serviceName = "watcher"
const version = "latest"
var defaultResources = []string{
k8s.Water,
k8s.StatefulSet1,
k8s.Stone,
k8s.Injector,
k8s.Deployment,
k8s.StatefulSet,
k8s.DaemonSet,
k8s.Pod,
k8s.Job,
k8s.CronJob,
k8s.ReplicaSet,
k8s.Event,
k8s.Node,
k8s.ConfigMap,
k8s.Secret,
k8s.ResourceQuota,
k8s.Service,
k8s.Ingress,
k8s.NetworkPolicy,
k8s.HorizontalPodAutoscaler,
k8s.CustomResourceDefinition,
k8s.PersistentVolume,
k8s.PersistentVolumeClaims,
k8s.StorageClass,
k8s.ServiceAccount,
k8s.Role,
k8s.ClusterRole,
k8s.RoleBinding,
k8s.Namespace,
k8s.PodSecurityPolicie,
k8s.ClusterRoleBinding,
k8s.Endpoint,
// deployment resource workload template for CaaS
k8s.Workloads,
// tenant for PaaS
k8s.BaseDepartment,
k8s.BaseTenant,
k8s.BaseRole,
k8s.BaseUser,
k8s.BaseRoleUser,
}
var needDescSubscribeMap = map[string]k8s.ResourceType{
"ip": k8s.IP,
"subnet": k8s.SubNet,
"vlan": k8s.Vlan,
"networkAttachmentDefinition": k8s.NetworkAttachmentDefinition,
//tekton
"pipeline": k8s.Pipeline,
"pipelinerun": k8s.PipelineRun,
"task": k8s.Task,
"taskrun": k8s.TaskRun,
"pipelineresource": k8s.PipelineResource,
"tektongraph": k8s.TektonGraph,
"tektonconfig": k8s.TektonConfig,
"tektonwebhook": k8s.TektonWebHook,
"tektonstore": k8s.TektonStore,
//Istio NetWorking
"gateway": k8s.Gateway,
"destinationrule": k8s.DestinationRule,
"serviceentry": k8s.ServiceEntry,
"sidecar": k8s.Sidecar,
"virtualservice": k8s.VirtualService,
"workloadentry": k8s.WorkloadEntry,
}
var needDescSubscribeOVNList = []k8s.ResourceType{
k8s.IP,
k8s.SubNet,
k8s.Vlan,
k8s.NetworkAttachmentDefinition,
}
var needDescSubscribeTEKTONList = []k8s.ResourceType{
k8s.Pipeline,
k8s.PipelineRun,
k8s.Task,
k8s.TaskRun,
k8s.PipelineResource,
k8s.TektonGraph,
k8s.TektonConfig,
k8s.TektonWebHook,
k8s.TektonStore,
}
var needDescSubscribeISTIOList = []k8s.ResourceType{
k8s.Gateway,
k8s.DestinationRule,
k8s.ServiceEntry,
k8s.Sidecar,
k8s.VirtualService,
k8s.WorkloadEntry,
}
func subscribeMapList(includes ...string) []string {
result := make([]string, 0)
for _, v := range needDescSubscribeMap {
pass := false
if len(includes) != 0 {
for _, item := range includes {
if v == item {
pass = true
}
}
}
if len(includes) != 0 && !pass {
continue
}
result = append(result, v)
}
return result
}
var subscribeList []string
func main() {
subscribeListString := os.Getenv("SUBLIST")
subscribeTopicString := os.Getenv("SUBTOPIC")
subscribeList = append(subscribeList, defaultResources...)
if subscribeListString == "*" {
subscribeList = append(subscribeList, subscribeMapList()...)
} else if subscribeListString != "" {
needSubscribeList := strings.Split(subscribeListString, ",")
subscribeList = append(subscribeList, subscribeMapList(needSubscribeList...)...)
}
if subscribeTopicString == "*" {
subscribeList = append(subscribeList, subscribeMapList(needDescSubscribeOVNList...)...)
subscribeList = append(subscribeList, subscribeMapList(needDescSubscribeTEKTONList...)...)
subscribeList = append(subscribeList, subscribeMapList(needDescSubscribeISTIOList...)...)
} else if subscribeTopicString != "" {
topicList := strings.Split(subscribeTopicString, ",")
for _, topic := range topicList {
switch topic {
case "tekton":
subscribeList = append(subscribeList, subscribeMapList(needDescSubscribeTEKTONList...)...)
case "istio":
subscribeList = append(subscribeList, subscribeMapList(needDescSubscribeISTIOList...)...)
case "ovn":
subscribeList = append(subscribeList, subscribeMapList(needDescSubscribeOVNList...)...)
}
}
}
subscribeList = unique(subscribeList)
fmt.Printf("[INFO] service watch resource: %v\n", subscribeList)
resourceList := k8s.GVRMaps.Subscribe(subscribeList...)
config, err := configure.NewInstallConfigure(types.NewResourceITypes(resourceList))
if err != nil {
panic(fmt.Sprintf("new install configure error %s", err))
}
_datasource := datasource.NewInterface(config)
apiServer := api.NewServer(service.NewService(_datasource))
apiServer.SetExtends(apiService.NewWatcherServer(serviceName, apiServer))
microService, err := install.WebServiceInstall(serviceName, version, _datasource, apiServer)
if err != nil {
panic(fmt.Sprintf("web service install error %s", err))
}
if err := microService.Run(); err != nil {
panic(err)
}
}
func unique(src []string) []string {
keys := make(map[string]bool)
list := make([]string, 0)
for _, entry := range src {
if _, value := keys[entry]; !value {
keys[entry] = true
list = append(list, entry)
}
}
return list
}
| [
"\"SUBLIST\"",
"\"SUBTOPIC\""
]
| []
| [
"SUBTOPIC",
"SUBLIST"
]
| [] | ["SUBTOPIC", "SUBLIST"] | go | 2 | 0 | |
scripts/performance/blas/alltime.py | #!/usr/bin/python3
import sys, getopt
sys.path.append('../../../clients/common/')
import numpy as np
from math import *
import subprocess
import os
import re # regexp package
import shutil
import tempfile
import rocblas_gentest as gt
import getspecs
usage = '''A timing script for rocBLAS the generates plots
Usage:
\talltime.py
\t\t-A working directory A
\t\t-B working directory B (optional)
\t\t-i input yaml
\t\t-o output directory
\t\t-b output directory for the base run
\t\t-T do not perform BLAS functions; just generate document
\t\t-f document format: pdf (default) or docx (Need forked docx plugin)
\t\t-d device number (default: 0)
\t\t-g generate graphs via Asymptote: 0(default) or 1
\t\t-S plot speedup (default: 1, disabled: 0)
\t\t-X do not generate figures
'''
# \t\t-t data type: gflops #Maybe use option to plot time graphs too
def nextpow(val, radix):
x = 1
while(x <= val):
x *= radix
return x
class rundata:
def __init__(self, wdir, odir, diridx, label,
data, hwinfo):
self.wdir = wdir
self.odir = odir
self.diridx = diridx
self.minnsize = data['n']
self.maxNsize = data['N']
self.minmsize = data['m']
self.maxMsize = data['M']
self.minksize = data['k']
self.maxKsize = data['K']
self.nbatch = data['batch_count']
self.function = data['function']
self.precision = data['compute_type'] #This picks precision
self.label = label
self.incx = data['incx']
self.incy = data['incy']
self.alpha = data['alpha']
self.beta = data['beta']
self.iters = data['iters']
self.cold_iters = data['cold_iters']
self.samples = data['samples']
self.lda = data['lda']
self.ldb = data['ldb']
self.ldc = data['ldc']
self.LDA = data['LDA']
self.LDB = data['LDB']
self.LDC = data['LDC']
self.transA = data['transA']
self.transB = data['transB']
self.initialization = data['initialization']
self.step_size = data['step_size']
self.step_mult = data['step_mult']
self.side = data['side']
self.uplo = data['uplo']
self.diag = data['diag']
self.theoMax = -1
flopseq = data['flops']
memeq = data['mem']
precisionBits = int(re.search(r'\d+', self.precision).group())
if(self.function == 'trsm' or self.function == 'gemm'): #TODO better logic to decide memory bound vs compute bound
self.theoMax = hwinfo['theoMaxCompute'] * 32.00 / precisionBits #scaling to appropriate precision
elif flopseq and memeq: # Memory bound
try:
n=100000
flops = eval(flopseq)
mem = eval(memeq)
self.theoMax = hwinfo['Bandwidth'] / float(eval(memeq)) * eval (flopseq) * 32 / precisionBits / 4 #4 bytes scaled to type
except:
print("flops and mem equations produce errors")
def outfilename(self):
outfile = str(self.function)
outfile += "_" + self.precision
outfile += "_" + self.label.replace(' ', '_').replace('/', '_')
outfile += ".dat"
outfile = os.path.join(self.odir, outfile)
return outfile
def runcmd(self, nsample):
cmd = ["./timing.py"]
cmd.append("-w")
cmd.append(self.wdir)
cmd.append("-i")
cmd.append(str(self.iters))
cmd.append("-j")
cmd.append(str(self.cold_iters))
cmd.append("-a")
cmd.append(str(self.samples))
cmd.append("-b")
cmd.append(str(self.nbatch))
cmd.append("-m")
cmd.append(str(self.minmsize))
cmd.append("-M")
cmd.append(str(self.maxMsize))
cmd.append("-n")
cmd.append(str(self.minnsize))
cmd.append("-N")
cmd.append(str(self.maxNsize))
cmd.append("-k")
cmd.append(str(self.minksize))
cmd.append("-K")
cmd.append(str(self.maxKsize))
cmd.append("-f")
cmd.append(self.function)
cmd.append("-p")
cmd.append(self.precision)
cmd.append("--lda")
cmd.append(str(self.lda))
cmd.append("--LDA")
cmd.append(str(self.LDA))
cmd.append("--ldb")
cmd.append(str(self.ldb))
cmd.append("--LDB")
cmd.append(str(self.LDB))
cmd.append("--ldc")
cmd.append(str(self.ldc))
cmd.append("--LDC")
cmd.append(str(self.LDC))
cmd.append("--incx")
cmd.append(str(self.incx))
cmd.append("--incy")
cmd.append(str(self.incy))
cmd.append("--alpha")
cmd.append(str(self.alpha))
cmd.append("--beta")
cmd.append(str(self.beta))
cmd.append("--transA")
cmd.append(str(self.transA))
cmd.append("--transB")
cmd.append(str(self.transB))
cmd.append("--side")
cmd.append(str(self.side))
cmd.append("--uplo")
cmd.append(str(self.uplo))
cmd.append("--diag")
cmd.append(str(self.diag))
cmd.append("-s")
cmd.append(str(self.step_size))
if self.step_mult == 1:
cmd.append("-x")
cmd.append("-o")
cmd.append(self.outfilename())
# cmd.append("-t")
# cmd.append("gflops")
return cmd
def executerun(self, nsample):
fout = tempfile.TemporaryFile(mode="w+")
ferr = tempfile.TemporaryFile(mode="w+")
proc = subprocess.Popen(self.runcmd(nsample), stdout=fout, stderr=ferr,
env=os.environ.copy())
proc.wait()
rc = proc.returncode
if rc != 0:
print("****fail****")
return rc
class yamldata:
def __init__(self, configFile):
self.configFile = configFile
self.testcases = []
self.executerun()
def reorderdata(self):
oldData = self.testcases
newData = []
names = []
for test in oldData:
name = test['function']
precision = test['compute_type']
side = test['side']
if (name,precision) not in names:
type = [ x for x in oldData if x['function']==name and x['compute_type'] == precision and x['side'] == side ]
newData.append(type)
names.append((name,precision, side))
self.testcases = newData
#Monkey Patch
def write_test(self, test):
self.testcases.append(test)
#Monkey Patch
def process_doc(self, doc):
"""Process one document in the YAML file"""
# Ignore empty documents
if not doc or not doc.get('Tests'):
return
# Clear datatypes and params from previous documents
gt.datatypes.clear()
gt.param.clear()
# Return dictionary of all known datatypes
gt.datatypes.update(gt.get_datatypes(doc))
# Arguments structure corresponding to C/C++ structure
gt.param['Arguments'] = type('Arguments', (gt.ctypes.Structure,),
{'_fields_': gt.get_arguments(doc)})
# Special names which get expanded as lists of arguments
gt.param['dict_lists_to_expand'] = doc.get('Dictionary lists to expand') or ()
# Lists which are not expanded
gt.param['lists_to_not_expand'] = doc.get('Lists to not expand') or ()
# Defaults
defaults = doc.get('Defaults') or {}
default_add_ons = {"m": 1, "M": 1, "n": 1, "N": 1, "k": 1, "K": 1, "lda": 1, "ldb": 1, "ldc": 1, "LDA": 1, "LDB": 1, "LDC": 1, "iters": 1, "flops": '', "mem": '', "samples": 1, "step_mult": 0}
defaults.update(default_add_ons)
# Known Bugs
gt.param['known_bugs'] = doc.get('Known bugs') or []
# Functions
gt.param['Functions'] = doc.get('Functions') or {}
# Instantiate all of the tests, starting with defaults
for test in doc['Tests']:
case = defaults.copy()
case.update(test)
gt.generate(case, gt.instantiate)
def importdata(self):
gt.args['includes'] = []
gt.args['infile'] = self.configFile
gt.write_test = self.write_test
for doc in gt.get_yaml_docs():
self.process_doc(doc)
# timeCases.extend(self.testcases)
# print(timeCases)
def executerun(self):
self.importdata()
self.reorderdata()
class getVersion:
def __init__(self, wdir):
self.wdir = wdir
self.prog = os.path.join(self.wdir, "rocblas-bench")
self.version = ""
self.executerun()
def runcmd(self):
cmd = [self.prog]
cmd.append("--version")
return cmd
def executerun(self):
fout = os.popen(" ".join(self.runcmd())).read()
#self.version = fout.split(":",1)[0] + ": " + fout.split("rel-",1)[1]
self.version = fout
class figure:
def __init__(self, name, caption):
self.name = name
self.runs = []
self.caption = caption
def inputfiles(self):
files = []
for run in self.runs:
files.append(run.outfilename())
return files
def labels(self):
labels = []
for run in self.runs:
labels.append(run.label)
return labels
def filename(self, outdir, docformat):
outfigure = self.name
outfigure += ".pdf"
# if docformat == "pdf":
# outfigure += ".pdf"
# if docformat == "docx":
# outfigure += ".png"
return os.path.join(outdir, outfigure)
def asycmd(self, outdir, docformat, datatype, ncompare):
asycmd = ["asy"]
asycmd.append("-f")
asycmd.append("pdf")
# if docformat == "pdf":
# asycmd.append("-f")
# asycmd.append("pdf")
# if docformat == "docx":
# asycmd.append("-f")
# asycmd.append("png")
# asycmd.append("-render")
# asycmd.append("8")
asycmd.append("datagraphs.asy")
asycmd.append("-u")
asycmd.append('filenames="' + ",".join(self.inputfiles()) + '"')
asycmd.append("-u")
asycmd.append('legendlist="' + ",".join(self.labels()) + '"')
# if dirB != None and speedup: # disabled for now
# asycmd.append("-u")
# asycmd.append('speedup=true')
# else:
# asycmd.append("-u")
# asycmd.append('speedup=false')
asycmd.append("-u")
asycmd.append('speedup=' + str(ncompare))
if datatype == "gflops":
asycmd.append("-u")
asycmd.append('ylabel="GFLOP/sec"')
if self.runs[0].theoMax != -1:
asycmd.append("-u")
asycmd.append('theoMax=' + str(self.runs[0].theoMax))
asycmd.append("-u")
asycmd.append('xlabel='+'"Size: ' + getXLabel(self.runs[0])+'"')
asycmd.append("-o")
asycmd.append(self.filename(outdir, docformat) )
print(" ".join(asycmd))
return asycmd
def executeasy(self, outdir, docformat, datatype, ncompare):
asyproc = subprocess.Popen(self.asycmd(outdir, docformat, datatype, ncompare),
env=os.environ.copy())
asyproc.wait()
asyrc = asyproc.returncode
if asyrc != 0:
print("****asy fail****")
return asyrc
def getLabel(test):
if test['function']=='gemm':
return 'transA ' + test['transA']+ ' transB ' + test['transB']
elif test['function']=='axpy':
return 'alpha '+str(test['alpha'])+' incx '+str(test['incx'])+' incy '+str(test['incy'])
elif test['function']=='gemv':
return 'transA ' + test['transA']+' incx '+str(test['incx'])+' incy '+str(test['incy'])
elif test['function'] in ['dot', 'copy', 'swap', 'ger', 'gerc', 'geru']:
return 'incx '+str(test['incx'])+' incy '+str(test['incy'])
elif test['function'] in ['asum', 'nrm2', 'scal']:
return 'incx '+str(test['incx'])
elif test['function']=='trsm':
if test['side']=='R':
return 'N/lda '+str(test['N'])+' alpha '+ str(test['alpha']) + ' side ' + str(test['side']) + ' uplo ' + str(test['uplo']) + ' transA ' + test['transA'] + ' diag ' + str(test['diag'])
else:
return 'M/lda/ldb '+str(test['M'])+' alpha '+ str(test['alpha']) + ' side ' + str(test['side']) + ' uplo ' + str(test['uplo']) + ' transA ' + test['transA'] + ' diag ' + str(test['diag'])
else:
print('Legend label not defined for '+test['function'])
sys.exit(1)
def getXLabel(test):
if test.function=='gemm':
return 'M=N=K=lda=ldb=ldc'
elif test.function in ['axpy', 'asum', 'dot', 'copy', 'nrm2', 'scal', 'swap']:
return 'N'
elif test.function in ['gemv', 'ger', 'gerc', 'geru']:
return 'M=N=lda'
elif test.function=='trsm':
if test.side == 'R':
return 'M=ldb'
else:
return 'N'
else:
print('Xlabel not defined for ' + test.function)
sys.exit(1)
def getFunctionPreFix(computeType):
if "32_r" in computeType:
return "s"
elif "64_r" in computeType:
return "d"
elif "32_c" in computeType:
return "c"
elif "64_c" in computeType:
return "z"
elif "bf16_r" in computeType:
return "bf"
elif "f16_r" in computeType:
return "h"
else:
print("Error - Cannot detect precision preFix: "+computeType)
def getDeviceSpecs(device, sclk):
hwinfo = {}
hwinfo["theoMaxCompute"] = -1
hwinfo["sclk"] = int(sclk.split('M')[0])
# print(hwinfo["sclk"])
# print(hwinfo["sclk"]/1000.00 * 64 * 128)
if 'Vega 20' in device:
hwinfo["theoMaxCompute"] = hwinfo["sclk"]/1000.00 * 64 * 128 # 64 CU, 128 ops/ clk
hwinfo["Bandwidth"] = 1000
hwinfo["Device"] = 'Vega 20'
elif 'Vega 10' in device:
hwinfo["theoMaxCompute"] = hwinfo["sclk"]/1000.00 * 60 * 128
hwinfo["Bandwidth"] = 484
hwinfo["Device"] = 'Vega 10'
else:
print("Device type not supported or found - needed to display theoretical max")
return hwinfo
def main(argv):
dirA = "."
dirB = None
dryrun = False
inputYaml = ""
outdir = "."
baseOutDir = "."
speedup = False
datatype = "gflops"
# shortrun = False
docformat = "pdf"
devicenum = 0
doAsy = False
noFigures = False
nsample = 10
try:
opts, args = getopt.getopt(argv,"hA:f:B:Tt:a:b:o:S:sg:d:N:i:X")
except getopt.GetoptError:
print("error in parsing arguments.")
print(usage)
sys.exit(2)
for opt, arg in opts:
if opt in ("-h"):
print(usage)
exit(0)
elif opt in ("-A"):
dirA = arg
elif opt in ("-B"):
dirB = arg
elif opt in ("-i"):
inputYaml = arg
elif opt in ("-o"):
outdir = arg
elif opt in ("-b"):
baseOutDir = arg
elif opt in ("-T"):
dryrun = True
# elif opt in ("-s"):
# shortrun = True
elif opt in ("-X"):
noFigures = True
elif opt in ("-g"):
if int(arg) == 0:
doAsy = False
if int(arg) == 1:
doAsy = True
elif opt in ("-d"):
devicenum = int(arg)
elif opt in ("-N"):
nsample = int(arg)
elif opt in ("-S"):
if int(arg) == 0:
speedup = False
if int(arg) == 1:
speedup = True
elif opt in ("-t"):
if arg not in ["time", "gflops"]:
print("data type must be time or gflops")
print(usage)
sys.exit(1)
datatype = arg
elif opt in ("-f"):
goodvals = ["pdf", "docx"]
if arg not in goodvals:
print("error: format must in " + " ".join(goodvals))
print(usage)
sys.exit(1)
docformat = arg
if os.path.isfile(inputYaml)==False:
print("unable to find input yaml file: " + inputYaml)
sys.exit(1)
print("dirA: "+ dirA)
if not dryrun and not binaryisok(dirA, "rocblas-bench"):
print("unable to find " + "rocblas-bench" + " in " + dirA)
print("please specify with -A")
sys.exit(1)
dirlist = [[dirA, outdir]]
if not dirB == None:
print("dirB: "+ dirB)
if not dryrun and not binaryisok(dirB, "rocblas-bench"):
print("unable to find " + "rocblas-bench" + " in " + dirB)
print("please specify with -B")
sys.exit(1)
if not os.path.exists(baseOutDir):
os.makedirs(baseOutDir)
dirlist.append([dirB, baseOutDir])
elif dryrun:
dirlist.append([dirB, baseOutDir])
print("outdir: " + outdir)
# if shortrun:
# print("short run")
print("output format: " + docformat)
print("device number: " + str(devicenum))
if not os.path.exists(outdir):
os.makedirs(outdir)
rocBlasVersion = getVersion(dirA)
sclk = getspecs.getsclk(devicenum)
device = getspecs.getdeviceinfo(devicenum)
if not dryrun:
specs = "Host info:\n"
specs += "\thostname: " + getspecs.gethostname() + "\n"
specs += "\tcpu info: " + getspecs.getcpu() + "\n"
specs += "\tram: " + getspecs.getram() + "\n"
specs += "\tdistro: " + getspecs.getdistro() + "\n"
specs += "\tkernel version: " + getspecs.getkernel() + "\n"
specs += "\trocm version: " + getspecs.getrocmversion() + "\n"
specs += "\t" + rocBlasVersion.version + "\n"
specs += "Device info:\n"
specs += "\tdevice: " + device + "\n"
specs += "\tvbios version: " + getspecs.getvbios(devicenum) + "\n"
specs += "\tvram: " + getspecs.getvram(devicenum) + "\n"
specs += "\tperformance level: " + getspecs.getperflevel(devicenum) + "\n"
specs += "\tsystem clock: " + sclk + "\n"
specs += "\tmemory clock: " + getspecs.getmclk(devicenum) + "\n"
with open(os.path.join(outdir, "specs.txt"), "w+") as f:
f.write(specs)
hwinfo = getDeviceSpecs(device, sclk)
figs = []
#load yaml then create fig for every test
f = open(inputYaml, 'r')
data = yamldata(f)
f.close()
#only generate data
if noFigures:
benchruns = []
for tests in data.testcases:
for test in tests:
for idx, lwdir in enumerate(dirlist):
wdir = lwdir[0]
odir = lwdir[1]
label = getLabel(test)
benchruns.append( rundata(wdir, odir, idx, label, test, hwinfo) )
for run in benchruns:
print(" ".join(run.runcmd(nsample)))
run.executerun(nsample)
return
#setup tests sorted by their respectice figures
for tests in data.testcases:
name = getFunctionPreFix(tests[0]['compute_type']) + tests[0]['function'].split('_')[0] + " Performance"
fig = figure(name , name.replace('_', '\_'))
for test in tests:
for idx, lwdir in enumerate(dirlist):
wdir = lwdir[0]
odir = lwdir[1]
label = getLabel(test)
fig.runs.append( rundata(wdir, odir, idx, label,
test, hwinfo) )
figs.append(fig)
#print and launch blas functions
for fig in figs:
print(fig.name)
for run in fig.runs:
if not dryrun:
print(" ".join(run.runcmd(nsample)))
run.executerun(nsample)
#generate plots
if doAsy:
print("")
for fig in figs:
ncompare = len(dirlist) if speedup else 0
print(fig.labels())
print(fig.asycmd(outdir, docformat, datatype, ncompare))
fig.executeasy(outdir, docformat, datatype, ncompare)
if docformat == "pdf":
maketex(figs, outdir, nsample)
if docformat == "docx":
makedocx(figs, outdir, nsample)
def binaryisok(dirname, progname):
prog = os.path.join(dirname, progname)
return os.path.isfile(prog)
def maketex(figs, outdir, nsample):
header = '''\documentclass[12pt]{article}
\\usepackage{graphicx}
\\usepackage{url}
\\author{Wasiq Mahmood}
\\begin{document}
'''
texstring = header
texstring += "\n\\section{Introduction}\n"
texstring += "Each data point represents the median of " + str(nsample) + " values, with error bars showing the 95\\% confidence interval for the median.\n\n"
#TODO change message
# texstring += "The following figures display the performance of the user specified rocBLAS functions \n\n"
texstring += "\\vspace{1cm}\n"
# texstring += "\\begin{tabular}{ll}"
# texstring += labelA +" &\\url{"+ dirA+"} \\\\\n"
# if not dirB == None:
# texstring += labelB +" &\\url{"+ dirB+"} \\\\\n"
# texstring += "\\end{tabular}\n\n"
# texstring += "\\vspace{1cm}\n"
specfilename = os.path.join(outdir, "specs.txt")
if os.path.isfile(specfilename):
specs = ""
with open(specfilename, "r") as f:
specs = f.read()
for line in specs.split("\n"):
if line.startswith("Host info"):
texstring += "\\noindent " + line
texstring += "\\begin{itemize}\n"
elif line.startswith("Device info"):
texstring += "\\end{itemize}\n"
texstring += line
texstring += "\\begin{itemize}\n"
else:
if line.strip() != "":
texstring += "\\item " + line + "\n"
texstring += "\\end{itemize}\n"
texstring += "\n"
texstring += "\\clearpage\n"
texstring += "\n\\section{Figures}\n"
for fig in figs:
print(fig.filename(outdir, "pdf"))
print(fig.caption)
texstring += '''
\\centering
\\begin{figure}[htbp]
\\includegraphics[width=\\textwidth]{'''
texstring += fig.filename("", "pdf")
texstring += '''}
\\caption{''' + fig.caption + '''}
\\end{figure}
'''
texstring += "\n\\end{document}\n"
fname = os.path.join(outdir, 'figs.tex')
with open(fname, 'w') as outfile:
outfile.write(texstring)
fout = open(os.path.join(outdir, "texcmd.log"), 'w+')
ferr = open(os.path.join(outdir, "texcmd.err"), 'w+')
latexcmd = ["latexmk", "-pdf", 'figs.tex']
print(" ".join(latexcmd))
texproc = subprocess.Popen(latexcmd, cwd=outdir, stdout=fout, stderr=ferr,
env=os.environ.copy())
texproc.wait()
fout.close()
ferr.close()
texrc = texproc.returncode
if texrc != 0:
print("****tex fail****")
def pdf2emf(pdfname):
svgname = pdfname.replace(".pdf",".svg")
cmd_pdf2svg = ["pdf2svg", pdfname, svgname]
proc = subprocess.Popen(cmd_pdf2svg, env=os.environ.copy())
proc.wait()
if proc.returncode != 0:
print("pdf2svg failed!")
sys.exit(1)
emfname = pdfname.replace(".pdf",".emf")
cmd_svg2emf = ["inkscape", svgname, "-M", emfname]
proc = subprocess.Popen(cmd_svg2emf, env=os.environ.copy())
proc.wait()
if proc.returncode != 0:
print("svg2emf failed!")
sys.exit(1)
return emfname
def makedocx(figs, outdir, nsample):
import docx
document = docx.Document()
document.add_heading('rocBLAS benchmarks', 0)
document.add_paragraph("Each data point represents the median of " + str(nsample) + " values, with error bars showing the 95% confidence interval for the median.")
specfilename = os.path.join(outdir, "specs.txt")
if os.path.isfile(specfilename):
with open(specfilename, "r") as f:
specs = f.read()
for line in specs.split("\n"):
document.add_paragraph(line)
for fig in figs:
print(fig.filename(outdir, "docx"))
print(fig.caption)
emfname = pdf2emf(fig.filename(outdir, "docx"))
# NB: emf support does not work; adding the filename as a placeholder
# document.add_paragraph(emfname)
document.add_picture(emfname, width=docx.shared.Inches(6))
document.add_paragraph((fig.caption).replace('\\', ''))
document.save(os.path.join(outdir,'figs.docx'))
if __name__ == "__main__":
main(sys.argv[1:])
| []
| []
| []
| [] | [] | python | 0 | 0 | |
retrieveDataFromDB.py | import sqlite3
def readSqliteTable(db_name, table_name, order_col):
"""
Execute SQLite SELECT Query from Python application to retrieve data from the SQLite table.
:param db_name: The name of the DataBase
:param table_name: The name of the Table in <db_name>
:param order_col: The name of the column to order selecter rows by
:return: list of the selected rows
"""
# init the empty array to store all the records from the table
records = []
try:
# Open a connection to an SQLite database
sqliteConnection = sqlite3.connect(db_name)
cursor = sqliteConnection.cursor()
print("Connected to SQLite")
# Define the SQLite SELECT statement query (by default in chronological order)
sqlite_select_query = "SELECT * FROM " + table_name + " ORDER BY " + order_col + ";"
# Execute the SELECT query
cursor.execute(sqlite_select_query)
# Get rows from the cursor object
records = cursor.fetchall()
print("Total rows are: ", len(records))
# Close the Cursor connection
cursor.close()
except sqlite3.Error as error:
print("Failed to read data from sqlite table", error)
finally:
if (sqliteConnection):
sqliteConnection.close()
print("The SQLite connection is closed")
return records
| []
| []
| []
| [] | [] | python | null | null | null |
src/core/mcir/spec.go | package mcir
import (
"encoding/json"
"fmt"
"os"
"sort"
"strconv"
"strings"
//"strings"
"github.com/cloud-barista/cb-spider/interface/api"
"github.com/cloud-barista/poc-cicd-tumblebug/src/core/common"
"github.com/go-resty/resty/v2"
//"github.com/cloud-barista/poc-cicd-tumblebug/src/core/mcis"
_ "github.com/go-sql-driver/mysql"
)
type SpiderSpecInfo struct { // Spider
// https://github.com/cloud-barista/cb-spider/blob/master/cloud-control-manager/cloud-driver/interfaces/resources/VMSpecHandler.go
Region string
Name string
VCpu SpiderVCpuInfo
Mem string
Gpu []SpiderGpuInfo
KeyValueList []common.KeyValue
}
type SpiderVCpuInfo struct { // Spider
Count string
Clock string // GHz
}
type SpiderGpuInfo struct { // Spider
Count string
Mfr string
Model string
Mem string
}
type TbSpecReq struct { // Tumblebug
Name string `json:"name"`
ConnectionName string `json:"connectionName"`
CspSpecName string `json:"cspSpecName"`
Description string `json:"description"`
}
type TbSpecInfo struct { // Tumblebug
Namespace string `json:"namespace"` // required to save in RDB
Id string `json:"id"`
Name string `json:"name"`
ConnectionName string `json:"connectionName"`
CspSpecName string `json:"cspSpecName"`
Os_type string `json:"os_type"`
Num_vCPU uint16 `json:"num_vCPU"`
Num_core uint16 `json:"num_core"`
Mem_GiB uint16 `json:"mem_GiB"`
Storage_GiB uint32 `json:"storage_GiB"`
Description string `json:"description"`
Cost_per_hour float32 `json:"cost_per_hour"`
Num_storage uint8 `json:"num_storage"`
Max_num_storage uint8 `json:"max_num_storage"`
Max_total_storage_TiB uint16 `json:"max_total_storage_TiB"`
Net_bw_Gbps uint16 `json:"net_bw_Gbps"`
Ebs_bw_Mbps uint32 `json:"ebs_bw_Mbps"`
Gpu_model string `json:"gpu_model"`
Num_gpu uint8 `json:"num_gpu"`
Gpumem_GiB uint16 `json:"gpumem_GiB"`
Gpu_p2p string `json:"gpu_p2p"`
OrderInFilteredResult uint16 `json:"orderInFilteredResult"`
EvaluationStatus string `json:"evaluationStatus"`
EvaluationScore_01 float32 `json:"evaluationScore_01"`
EvaluationScore_02 float32 `json:"evaluationScore_02"`
EvaluationScore_03 float32 `json:"evaluationScore_03"`
EvaluationScore_04 float32 `json:"evaluationScore_04"`
EvaluationScore_05 float32 `json:"evaluationScore_05"`
EvaluationScore_06 float32 `json:"evaluationScore_06"`
EvaluationScore_07 float32 `json:"evaluationScore_07"`
EvaluationScore_08 float32 `json:"evaluationScore_08"`
EvaluationScore_09 float32 `json:"evaluationScore_09"`
EvaluationScore_10 float32 `json:"evaluationScore_10"`
AssociatedObjectList []string `json:"associatedObjectList"`
IsAutoGenerated bool `json:"isAutoGenerated"`
}
// ConvertSpiderSpecToTumblebugSpec accepts an Spider spec object, converts to and returns an TB spec object
func ConvertSpiderSpecToTumblebugSpec(spiderSpec SpiderSpecInfo) (TbSpecInfo, error) {
if spiderSpec.Name == "" {
err := fmt.Errorf("ConvertSpiderSpecToTumblebugSpec failed; spiderSpec.Name == \"\" ")
emptyTumblebugSpec := TbSpecInfo{}
return emptyTumblebugSpec, err
}
tumblebugSpec := TbSpecInfo{}
tumblebugSpec.Name = spiderSpec.Name
tumblebugSpec.CspSpecName = spiderSpec.Name
tempUint64, _ := strconv.ParseUint(spiderSpec.VCpu.Count, 10, 16)
tumblebugSpec.Num_vCPU = uint16(tempUint64)
tempFloat64, _ := strconv.ParseFloat(spiderSpec.Mem, 32)
tumblebugSpec.Mem_GiB = uint16(tempFloat64 / 1024) //fmt.Sprintf("%.0f", tempFloat64/1024)
return tumblebugSpec, nil
}
type SpiderSpecList struct {
Vmspec []SpiderSpecInfo `json:"vmspec"`
}
// LookupSpecList accepts Spider conn config,
// lookups and returns the list of all specs in the region of conn config
// in the form of the list of Spider spec objects
func LookupSpecList(connConfig string) (SpiderSpecList, error) {
if connConfig == "" {
content := SpiderSpecList{}
err := fmt.Errorf("LookupSpec() called with empty connConfig.")
common.CBLog.Error(err)
return content, err
}
if os.Getenv("SPIDER_CALL_METHOD") == "REST" {
url := common.SPIDER_REST_URL + "/vmspec"
// Create Req body
tempReq := common.SpiderConnectionName{}
tempReq.ConnectionName = connConfig
client := resty.New().SetCloseConnection(true)
client.SetAllowGetMethodPayload(true)
resp, err := client.R().
SetHeader("Content-Type", "application/json").
SetBody(tempReq).
SetResult(&SpiderSpecList{}). // or SetResult(AuthSuccess{}).
//SetError(&AuthError{}). // or SetError(AuthError{}).
Get(url)
if err != nil {
common.CBLog.Error(err)
content := SpiderSpecList{}
err := fmt.Errorf("an error occurred while requesting to CB-Spider")
return content, err
}
fmt.Println(string(resp.Body()))
fmt.Println("HTTP Status code: " + strconv.Itoa(resp.StatusCode()))
switch {
case resp.StatusCode() >= 400 || resp.StatusCode() < 200:
err := fmt.Errorf(string(resp.Body()))
common.CBLog.Error(err)
content := SpiderSpecList{}
return content, err
}
temp := resp.Result().(*SpiderSpecList)
return *temp, nil
} else {
// CCM API 설정
ccm := api.NewCloudResourceHandler()
err := ccm.SetConfigPath(os.Getenv("CBTUMBLEBUG_ROOT") + "/conf/grpc_conf.yaml")
if err != nil {
common.CBLog.Error("ccm failed to set config : ", err)
return SpiderSpecList{}, err
}
err = ccm.Open()
if err != nil {
common.CBLog.Error("ccm api open failed : ", err)
return SpiderSpecList{}, err
}
defer ccm.Close()
result, err := ccm.ListVMSpecByParam(connConfig)
if err != nil {
common.CBLog.Error(err)
return SpiderSpecList{}, err
}
temp := SpiderSpecList{}
err = json.Unmarshal([]byte(result), &temp)
if err != nil {
common.CBLog.Error(err)
return SpiderSpecList{}, err
}
return temp, nil
}
}
// LookupSpec accepts Spider conn config and CSP spec name, lookups and returns the Spider spec object
func LookupSpec(connConfig string, specName string) (SpiderSpecInfo, error) {
if connConfig == "" {
content := SpiderSpecInfo{}
err := fmt.Errorf("LookupSpec() called with empty connConfig.")
common.CBLog.Error(err)
return content, err
} else if specName == "" {
content := SpiderSpecInfo{}
err := fmt.Errorf("LookupSpec() called with empty specName.")
common.CBLog.Error(err)
return content, err
}
if os.Getenv("SPIDER_CALL_METHOD") == "REST" {
//url := common.SPIDER_REST_URL + "/vmspec/" + u.CspSpecName
url := common.SPIDER_REST_URL + "/vmspec/" + specName
// Create Req body
tempReq := common.SpiderConnectionName{}
tempReq.ConnectionName = connConfig
client := resty.New().SetCloseConnection(true)
client.SetAllowGetMethodPayload(true)
resp, err := client.R().
SetHeader("Content-Type", "application/json").
SetBody(tempReq).
SetResult(&SpiderSpecInfo{}). // or SetResult(AuthSuccess{}).
//SetError(&AuthError{}). // or SetError(AuthError{}).
Get(url)
if err != nil {
common.CBLog.Error(err)
content := SpiderSpecInfo{}
err := fmt.Errorf("an error occurred while requesting to CB-Spider")
return content, err
}
fmt.Println(string(resp.Body()))
fmt.Println("HTTP Status code: " + strconv.Itoa(resp.StatusCode()))
switch {
case resp.StatusCode() >= 400 || resp.StatusCode() < 200:
err := fmt.Errorf(string(resp.Body()))
common.CBLog.Error(err)
content := SpiderSpecInfo{}
return content, err
}
temp := resp.Result().(*SpiderSpecInfo)
return *temp, nil
} else {
// CCM API 설정
ccm := api.NewCloudResourceHandler()
err := ccm.SetConfigPath(os.Getenv("CBTUMBLEBUG_ROOT") + "/conf/grpc_conf.yaml")
if err != nil {
common.CBLog.Error("ccm failed to set config : ", err)
return SpiderSpecInfo{}, err
}
err = ccm.Open()
if err != nil {
common.CBLog.Error("ccm api open failed : ", err)
return SpiderSpecInfo{}, err
}
defer ccm.Close()
result, err := ccm.GetVMSpecByParam(connConfig, specName)
if err != nil {
common.CBLog.Error(err)
return SpiderSpecInfo{}, err
}
temp := SpiderSpecInfo{}
err2 := json.Unmarshal([]byte(result), &temp)
if err2 != nil {
fmt.Errorf("an error occurred while unmarshaling: " + err2.Error())
}
return temp, nil
}
}
func RefineSpecName(specName string) string {
out := strings.ToLower(specName)
out = strings.ReplaceAll(out, ".", "-")
out = strings.ReplaceAll(out, "_", "-")
return out
}
// FetchSpecsForConnConfig lookups all specs for region of conn config, and saves into TB spec objects
func FetchSpecsForConnConfig(connConfig string, nsId string) (specCount uint, err error) {
fmt.Println("FetchSpecsForConnConfig(" + connConfig + ")")
spiderSpecList, err := LookupSpecList(connConfig)
if err != nil {
common.CBLog.Error(err)
return 0, err
}
for _, spiderSpec := range spiderSpecList.Vmspec {
tumblebugSpec, err := ConvertSpiderSpecToTumblebugSpec(spiderSpec)
if err != nil {
common.CBLog.Error(err)
return 0, err
}
tumblebugSpecId := connConfig + "-" + RefineSpecName(tumblebugSpec.Name)
//fmt.Println("tumblebugSpecId: " + tumblebugSpecId) // for debug
check, err := CheckResource(nsId, common.StrSpec, tumblebugSpecId)
if check {
common.CBLog.Infoln("The spec " + tumblebugSpecId + " already exists in TB; continue")
continue
} else if err != nil {
common.CBLog.Infoln("Cannot check the existence of " + tumblebugSpecId + " in TB; continue")
continue
} else {
tumblebugSpec.Name = tumblebugSpecId
tumblebugSpec.ConnectionName = connConfig
_, err := RegisterSpecWithInfo(nsId, &tumblebugSpec)
if err != nil {
common.CBLog.Error(err)
return 0, err
}
specCount++
}
}
return specCount, nil
}
// FetchSpecsForAllConnConfigs gets all conn configs from Spider, lookups all specs for each region of conn config, and saves into TB spec objects
func FetchSpecsForAllConnConfigs(nsId string) (connConfigCount uint, specCount uint, err error) {
err = common.CheckString(nsId)
if err != nil {
common.CBLog.Error(err)
return 0, 0, err
}
connConfigs, err := common.GetConnConfigList()
if err != nil {
common.CBLog.Error(err)
return 0, 0, err
}
for _, connConfig := range connConfigs.Connectionconfig {
temp, _ := FetchSpecsForConnConfig(connConfig.ConfigName, nsId)
specCount += temp
connConfigCount++
}
return connConfigCount, specCount, nil
}
// RegisterSpecWithCspSpecName accepts spec creation request, creates and returns an TB spec object
func RegisterSpecWithCspSpecName(nsId string, u *TbSpecReq) (TbSpecInfo, error) {
resourceType := common.StrSpec
err := common.CheckString(nsId)
if err != nil {
temp := TbSpecInfo{}
common.CBLog.Error(err)
return temp, err
}
err = common.CheckString(u.Name)
if err != nil {
temp := TbSpecInfo{}
common.CBLog.Error(err)
return temp, err
}
check, _ := CheckResource(nsId, resourceType, u.Name)
if check {
temp := TbSpecInfo{}
err := fmt.Errorf("The spec " + u.Name + " already exists.")
return temp, err
}
res, err := LookupSpec(u.ConnectionName, u.CspSpecName)
if err != nil {
common.CBLog.Error(err)
err := fmt.Errorf("an error occurred while lookup spec via CB-Spider")
emptySpecInfoObj := TbSpecInfo{}
return emptySpecInfoObj, err
}
content := TbSpecInfo{}
content.Namespace = nsId
//content.Id = common.GenUuid()
content.Id = u.Name
content.Name = u.Name
content.CspSpecName = res.Name
content.ConnectionName = u.ConnectionName
content.AssociatedObjectList = []string{}
tempUint64, _ := strconv.ParseUint(res.VCpu.Count, 10, 16)
content.Num_vCPU = uint16(tempUint64)
//content.Num_core = res.Num_core
tempFloat64, _ := strconv.ParseFloat(res.Mem, 32)
content.Mem_GiB = uint16(tempFloat64 / 1024)
//content.Storage_GiB = res.Storage_GiB
//content.Description = res.Description
// cb-store
fmt.Println("=========================== PUT registerSpec")
Key := common.GenResourceKey(nsId, resourceType, content.Id)
Val, _ := json.Marshal(content)
err = common.CBStore.Put(string(Key), string(Val))
if err != nil {
common.CBLog.Error(err)
return content, err
}
keyValue, _ := common.CBStore.Get(string(Key))
fmt.Println("<" + keyValue.Key + "> \n" + keyValue.Value)
fmt.Println("===========================")
// register information related with MCIS recommendation
err = RegisterRecommendList(nsId, content.ConnectionName, content.Num_vCPU, content.Mem_GiB, content.Storage_GiB, content.Id, content.Cost_per_hour)
if err != nil {
common.CBLog.Error(err)
return content, err
}
// "INSERT INTO `spec`(`namespace`, `id`, ...) VALUES ('nsId', 'content.Id', ...);
_, err = common.ORM.Insert(&content)
if err != nil {
fmt.Println(err.Error())
} else {
fmt.Println("Data inserted successfully..")
}
return content, nil
}
// RegisterSpecWithInfo accepts spec creation request, creates and returns an TB spec object
func RegisterSpecWithInfo(nsId string, content *TbSpecInfo) (TbSpecInfo, error) {
resourceType := common.StrSpec
err := common.CheckString(nsId)
if err != nil {
temp := TbSpecInfo{}
common.CBLog.Error(err)
return temp, err
}
err = common.CheckString(content.Name)
if err != nil {
temp := TbSpecInfo{}
common.CBLog.Error(err)
return temp, err
}
check, _ := CheckResource(nsId, resourceType, content.Name)
if check {
temp := TbSpecInfo{}
err := fmt.Errorf("The spec " + content.Name + " already exists.")
return temp, err
}
content.Namespace = nsId
content.Id = content.Name
content.AssociatedObjectList = []string{}
// cb-store
fmt.Println("=========================== PUT registerSpec")
Key := common.GenResourceKey(nsId, resourceType, content.Id)
Val, _ := json.Marshal(content)
err = common.CBStore.Put(string(Key), string(Val))
if err != nil {
common.CBLog.Error(err)
return *content, err
}
keyValue, _ := common.CBStore.Get(string(Key))
fmt.Println("<" + keyValue.Key + "> \n" + keyValue.Value)
fmt.Println("===========================")
// register information related with MCIS recommendation
err = RegisterRecommendList(nsId, content.ConnectionName, content.Num_vCPU, content.Mem_GiB, content.Storage_GiB, content.Id, content.Cost_per_hour)
if err != nil {
common.CBLog.Error(err)
return *content, err
}
// "INSERT INTO `spec`(`namespace`, `id`, ...) VALUES ('nsId', 'content.Id', ...);
_, err = common.ORM.Insert(content)
if err != nil {
fmt.Println(err.Error())
} else {
fmt.Println("Data inserted successfully..")
}
return *content, nil
}
// RegisterRecommendList creates the spec recommendation info
func RegisterRecommendList(nsId string, connectionName string, cpuSize uint16, memSize uint16, diskSize uint32, specId string, price float32) error {
err := common.CheckString(nsId)
if err != nil {
common.CBLog.Error(err)
return err
}
//fmt.Println("[Get MCISs")
key := common.GenMcisKey(nsId, "", "") + "/cpuSize/" + strconv.Itoa(int(cpuSize)) + "/memSize/" + strconv.Itoa(int(memSize)) + "/diskSize/" + strconv.Itoa(int(diskSize)) + "/specId/" + specId
fmt.Println(key)
mapA := map[string]string{"id": specId, "price": fmt.Sprintf("%.6f", price), "connectionName": connectionName}
Val, _ := json.Marshal(mapA)
err = common.CBStore.Put(string(key), string(Val))
if err != nil {
common.CBLog.Error(err)
return err
}
fmt.Println("===============================================")
return nil
}
// DelRecommendSpec deletes the spec recommendation info
func DelRecommendSpec(nsId string, specId string, cpuSize uint16, memSize uint16, diskSize uint32) error {
err := common.CheckString(nsId)
if err != nil {
common.CBLog.Error(err)
return err
}
fmt.Println("DelRecommendSpec()")
key := common.GenMcisKey(nsId, "", "") + "/cpuSize/" + strconv.Itoa(int(cpuSize)) + "/memSize/" + strconv.Itoa(int(memSize)) + "/diskSize/" + strconv.Itoa(int(diskSize)) + "/specId/" + specId
err = common.CBStore.Delete(key)
if err != nil {
common.CBLog.Error(err)
return err
}
return nil
}
// FilterSpecs accepts criteria for filtering, and returns the list of filtered TB spec objects
func FilterSpecs(nsId string, filter TbSpecInfo) ([]TbSpecInfo, error) {
err := common.CheckString(nsId)
if err != nil {
common.CBLog.Error(err)
return nil, err
}
tempList := []TbSpecInfo{}
//sqlQuery := "SELECT * FROM `spec` WHERE `namespace`='" + nsId + "'"
sqlQuery := common.ORM.Where("Namespace = ?", nsId)
if filter.Id != "" {
//sqlQuery += " AND `id` LIKE '%" + filter.Id + "%'"
filter.Id = RefineSpecName(filter.Id)
sqlQuery = sqlQuery.And("Id LIKE ?", "%"+filter.Id+"%")
}
if filter.Name != "" {
//sqlQuery += " AND `name` LIKE '%" + filter.Name + "%'"
filter.Name = RefineSpecName(filter.Name)
sqlQuery = sqlQuery.And("Name LIKE ?", "%"+filter.Name+"%")
}
if filter.ConnectionName != "" {
//sqlQuery += " AND `connectionName` LIKE '%" + filter.ConnectionName + "%'"
filter.ConnectionName = RefineSpecName(filter.ConnectionName)
sqlQuery = sqlQuery.And("ConnectionName LIKE ?", "%"+filter.ConnectionName+"%")
}
if filter.CspSpecName != "" {
//sqlQuery += " AND `cspSpecName` LIKE '%" + filter.CspSpecName + "%'"
filter.CspSpecName = RefineSpecName(filter.CspSpecName)
sqlQuery = sqlQuery.And("CspSpecName LIKE ?", "%"+filter.CspSpecName+"%")
}
if filter.Os_type != "" {
//sqlQuery += " AND `os_type` LIKE '%" + filter.Os_type + "%'"
filter.Os_type = RefineSpecName(filter.Os_type)
sqlQuery = sqlQuery.And("Os_type LIKE ?", "%"+filter.Os_type+"%")
}
if filter.Num_vCPU > 0 {
//sqlQuery += " AND `num_vCPU`=" + strconv.Itoa(int(filter.Num_vCPU))
sqlQuery = sqlQuery.And("Num_vCPU = ?", filter.Num_vCPU)
}
if filter.Num_core > 0 {
//sqlQuery += " AND `num_core`=" + strconv.Itoa(int(filter.Num_core))
sqlQuery = sqlQuery.And("Num_core = ?", filter.Num_core)
}
if filter.Mem_GiB > 0 {
//sqlQuery += " AND `mem_GiB`=" + strconv.Itoa(int(filter.Mem_GiB))
sqlQuery = sqlQuery.And("Mem_GiB = ?", filter.Mem_GiB)
}
if filter.Storage_GiB > 0 {
//sqlQuery += " AND `storage_GiB`=" + strconv.Itoa(int(filter.Storage_GiB))
sqlQuery = sqlQuery.And("Storage_GiB = ?", filter.Storage_GiB)
}
if filter.Description != "" {
//sqlQuery += " AND `description` LIKE '%" + filter.Description + "%'"
filter.Description = RefineSpecName(filter.Description)
sqlQuery = sqlQuery.And("Description LIKE ?", "%"+filter.Description+"%")
}
if filter.Cost_per_hour > 0 {
//sqlQuery += " AND `cost_per_hour`=" + fmt.Sprintf("%.6f", filter.Cost_per_hour)
sqlQuery = sqlQuery.And("Cost_per_hour = ?", filter.Cost_per_hour)
}
if filter.Num_storage > 0 {
//sqlQuery += " AND `num_storage`=" + strconv.Itoa(int(filter.Num_storage))
sqlQuery = sqlQuery.And("Num_storage = ?", filter.Num_storage)
}
if filter.Max_num_storage > 0 {
//sqlQuery += " AND `max_num_storage`=" + strconv.Itoa(int(filter.Max_num_storage))
sqlQuery = sqlQuery.And("Max_num_storage = ?", filter.Max_num_storage)
}
if filter.Max_total_storage_TiB > 0 {
//sqlQuery += " AND `max_total_storage_TiB`=" + strconv.Itoa(int(filter.Max_total_storage_TiB))
sqlQuery = sqlQuery.And("Max_total_storage_TiB = ?", filter.Max_total_storage_TiB)
}
if filter.Net_bw_Gbps > 0 {
//sqlQuery += " AND `net_bw_Gbps`=" + strconv.Itoa(int(filter.Net_bw_Gbps))
sqlQuery = sqlQuery.And("Net_bw_Gbps = ?", filter.Net_bw_Gbps)
}
if filter.Ebs_bw_Mbps > 0 {
//sqlQuery += " AND `ebs_bw_Mbps`=" + strconv.Itoa(int(filter.Ebs_bw_Mbps))
sqlQuery = sqlQuery.And("Ebs_bw_Mbps = ?", filter.Ebs_bw_Mbps)
}
if filter.Gpu_model != "" {
//sqlQuery += " AND `gpu_model` LIKE '%" + filter.Gpu_model + "%'"
filter.Gpu_model = RefineSpecName(filter.Gpu_model)
sqlQuery = sqlQuery.And("Gpu_model LIKE ?", "%"+filter.Gpu_model+"%")
}
if filter.Num_gpu > 0 {
//sqlQuery += " AND `num_gpu`=" + strconv.Itoa(int(filter.Num_gpu))
sqlQuery = sqlQuery.And("Num_gpu = ?", filter.Num_gpu)
}
if filter.Gpumem_GiB > 0 {
//sqlQuery += " AND `gpumem_GiB`=" + strconv.Itoa(int(filter.Gpumem_GiB))
sqlQuery = sqlQuery.And("Gpumem_GiB = ?", filter.Gpumem_GiB)
}
if filter.Gpu_p2p != "" {
//sqlQuery += " AND `gpu_p2p` LIKE '%" + filter.Gpu_p2p + "%'"
filter.Gpu_p2p = RefineSpecName(filter.Gpu_p2p)
sqlQuery = sqlQuery.And("Gpu_p2p LIKE ?", "%"+filter.Gpu_p2p+"%")
}
if filter.EvaluationStatus != "" {
//sqlQuery += " AND `evaluationStatus` LIKE '%" + filter.EvaluationStatus + "%'"
filter.EvaluationStatus = RefineSpecName(filter.EvaluationStatus)
sqlQuery = sqlQuery.And("EvaluationStatus LIKE ?", "%"+filter.EvaluationStatus+"%")
}
if filter.EvaluationScore_01 > 0 {
//sqlQuery += " AND `evaluationScore_01`=" + fmt.Sprintf("%.6f", filter.EvaluationScore_01)
sqlQuery = sqlQuery.And("EvaluationScore_01 = ?", filter.EvaluationScore_01)
}
if filter.EvaluationScore_02 > 0 {
//sqlQuery += " AND `evaluationScore_02`=" + fmt.Sprintf("%.6f", filter.EvaluationScore_02)
sqlQuery = sqlQuery.And("EvaluationScore_02 = ?", filter.EvaluationScore_02)
}
if filter.EvaluationScore_03 > 0 {
//sqlQuery += " AND `evaluationScore_03`=" + fmt.Sprintf("%.6f", filter.EvaluationScore_03)
sqlQuery = sqlQuery.And("EvaluationScore_03 = ?", filter.EvaluationScore_03)
}
if filter.EvaluationScore_04 > 0 {
//sqlQuery += " AND `evaluationScore_04`=" + fmt.Sprintf("%.6f", filter.EvaluationScore_04)
sqlQuery = sqlQuery.And("EvaluationScore_04 = ?", filter.EvaluationScore_04)
}
if filter.EvaluationScore_05 > 0 {
//sqlQuery += " AND `evaluationScore_05`=" + fmt.Sprintf("%.6f", filter.EvaluationScore_05)
sqlQuery = sqlQuery.And("EvaluationScore_05 = ?", filter.EvaluationScore_05)
}
if filter.EvaluationScore_06 > 0 {
//sqlQuery += " AND `evaluationScore_06`=" + fmt.Sprintf("%.6f", filter.EvaluationScore_06)
sqlQuery = sqlQuery.And("EvaluationScore_06 = ?", filter.EvaluationScore_06)
}
if filter.EvaluationScore_07 > 0 {
//sqlQuery += " AND `evaluationScore_07`=" + fmt.Sprintf("%.6f", filter.EvaluationScore_07)
sqlQuery = sqlQuery.And("EvaluationScore_07 = ?", filter.EvaluationScore_07)
}
if filter.EvaluationScore_08 > 0 {
//sqlQuery += " AND `evaluationScore_08`=" + fmt.Sprintf("%.6f", filter.EvaluationScore_08)
sqlQuery = sqlQuery.And("EvaluationScore_08 = ?", filter.EvaluationScore_08)
}
if filter.EvaluationScore_09 > 0 {
//sqlQuery += " AND `evaluationScore_09`=" + fmt.Sprintf("%.6f", filter.EvaluationScore_09)
sqlQuery = sqlQuery.And("EvaluationScore_09 = ?", filter.EvaluationScore_09)
}
if filter.EvaluationScore_10 > 0 {
//sqlQuery += " AND `evaluationScore_10`=" + fmt.Sprintf("%.6f", filter.EvaluationScore_10)
sqlQuery = sqlQuery.And("EvaluationScore_10 = ?", filter.EvaluationScore_10)
}
err = sqlQuery.Find(&tempList)
if err != nil {
common.CBLog.Error(err)
return tempList, err
}
return tempList, nil
}
type Range struct {
Min float32 `json:"min"`
Max float32 `json:"max"`
}
type FilterSpecsByRangeRequest struct {
Id string `json:"id"`
Name string `json:"name"`
ConnectionName string `json:"connectionName"`
CspSpecName string `json:"cspSpecName"`
Os_type string `json:"os_type"`
Num_vCPU Range `json:"num_vCPU"`
Num_core Range `json:"num_core"`
Mem_GiB Range `json:"mem_GiB"`
Storage_GiB Range `json:"storage_GiB"`
Description string `json:"description"`
Cost_per_hour Range `json:"cost_per_hour"`
Num_storage Range `json:"num_storage"`
Max_num_storage Range `json:"max_num_storage"`
Max_total_storage_TiB Range `json:"max_total_storage_TiB"`
Net_bw_Gbps Range `json:"net_bw_Gbps"`
Ebs_bw_Mbps Range `json:"ebs_bw_Mbps"`
Gpu_model string `json:"gpu_model"`
Num_gpu Range `json:"num_gpu"`
Gpumem_GiB Range `json:"gpumem_GiB"`
Gpu_p2p string `json:"gpu_p2p"`
EvaluationStatus string `json:"evaluationStatus"`
EvaluationScore_01 Range `json:"evaluationScore_01"`
EvaluationScore_02 Range `json:"evaluationScore_02"`
EvaluationScore_03 Range `json:"evaluationScore_03"`
EvaluationScore_04 Range `json:"evaluationScore_04"`
EvaluationScore_05 Range `json:"evaluationScore_05"`
EvaluationScore_06 Range `json:"evaluationScore_06"`
EvaluationScore_07 Range `json:"evaluationScore_07"`
EvaluationScore_08 Range `json:"evaluationScore_08"`
EvaluationScore_09 Range `json:"evaluationScore_09"`
EvaluationScore_10 Range `json:"evaluationScore_10"`
}
// FilterSpecsByRange accepts criteria ranges for filtering, and returns the list of filtered TB spec objects
func FilterSpecsByRange(nsId string, filter FilterSpecsByRangeRequest) ([]TbSpecInfo, error) {
err := common.CheckString(nsId)
if err != nil {
common.CBLog.Error(err)
return nil, err
}
tempList := []TbSpecInfo{}
//sqlQuery := "SELECT * FROM `spec` WHERE `namespace`='" + nsId + "'"
sqlQuery := common.ORM.Where("Namespace = ?", nsId)
if filter.Id != "" {
//sqlQuery += " AND `id` LIKE '%" + filter.Id + "%'"
filter.Id = RefineSpecName(filter.Id)
sqlQuery = sqlQuery.And("Id LIKE ?", "%"+filter.Id+"%")
}
if filter.Name != "" {
//sqlQuery += " AND `name` LIKE '%" + filter.Name + "%'"
filter.Name = RefineSpecName(filter.Name)
sqlQuery = sqlQuery.And("Name LIKE ?", "%"+filter.Name+"%")
}
if filter.ConnectionName != "" {
//sqlQuery += " AND `connectionName` LIKE '%" + filter.ConnectionName + "%'"
filter.ConnectionName = RefineSpecName(filter.ConnectionName)
sqlQuery = sqlQuery.And("ConnectionName LIKE ?", "%"+filter.ConnectionName+"%")
}
if filter.CspSpecName != "" {
//sqlQuery += " AND `cspSpecName` LIKE '%" + filter.CspSpecName + "%'"
filter.CspSpecName = RefineSpecName(filter.CspSpecName)
sqlQuery = sqlQuery.And("CspSpecName LIKE ?", "%"+filter.CspSpecName+"%")
}
if filter.Os_type != "" {
//sqlQuery += " AND `os_type` LIKE '%" + filter.Os_type + "%'"
filter.Os_type = RefineSpecName(filter.Os_type)
sqlQuery = sqlQuery.And("Os_type LIKE ?", "%"+filter.Os_type+"%")
}
if filter.Num_vCPU.Min > 0 {
//sqlQuery += " AND `num_vCPU`>=" + fmt.Sprintf("%.6f", filter.Num_vCPU.Min)
sqlQuery = sqlQuery.And("Num_vCPU >= ?", filter.Num_vCPU.Min)
}
if filter.Num_vCPU.Max > 0 {
//sqlQuery += " AND `num_vCPU`<=" + fmt.Sprintf("%.6f", filter.Num_vCPU.Max)
sqlQuery = sqlQuery.And("Num_vCPU <= ?", filter.Num_vCPU.Max)
}
if filter.Num_core.Min > 0 {
//sqlQuery += " AND `num_core`>=" + fmt.Sprintf("%.6f", filter.Num_core.Min)
sqlQuery = sqlQuery.And("Num_core >= ?", filter.Num_core.Min)
}
if filter.Num_core.Max > 0 {
//sqlQuery += " AND `num_core`<=" + fmt.Sprintf("%.6f", filter.Num_core.Max)
sqlQuery = sqlQuery.And("Num_core <= ?", filter.Num_core.Max)
}
if filter.Mem_GiB.Min > 0 {
//sqlQuery += " AND `mem_GiB`>=" + fmt.Sprintf("%.6f", filter.Mem_GiB.Min)
sqlQuery = sqlQuery.And("Mem_GiB >= ?", filter.Mem_GiB.Min)
}
if filter.Mem_GiB.Max > 0 {
//sqlQuery += " AND `mem_GiB`<=" + fmt.Sprintf("%.6f", filter.Mem_GiB.Max)
sqlQuery = sqlQuery.And("Mem_GiB <= ?", filter.Mem_GiB.Max)
}
if filter.Storage_GiB.Min > 0 {
//sqlQuery += " AND `storage_GiB`>=" + fmt.Sprintf("%.6f", filter.Storage_GiB.Min)
sqlQuery = sqlQuery.And("Storage_GiB >= ?", filter.Storage_GiB.Min)
}
if filter.Storage_GiB.Max > 0 {
//sqlQuery += " AND `storage_GiB`<=" + fmt.Sprintf("%.6f", filter.Storage_GiB.Max)
sqlQuery = sqlQuery.And("Storage_GiB <= ?", filter.Storage_GiB.Max)
}
if filter.Description != "" {
//sqlQuery += " AND `description` LIKE '%" + filter.Description + "%'"
filter.Description = RefineSpecName(filter.Description)
sqlQuery = sqlQuery.And("Description LIKE ?", "%"+filter.Description+"%")
}
if filter.Cost_per_hour.Min > 0 {
//sqlQuery += " AND `cost_per_hour`>=" + fmt.Sprintf("%.6f", filter.Cost_per_hour.Min)
sqlQuery = sqlQuery.And("Cost_per_hour >= ?", filter.Cost_per_hour.Min)
}
if filter.Cost_per_hour.Max > 0 {
//sqlQuery += " AND `cost_per_hour`<=" + fmt.Sprintf("%.6f", filter.Cost_per_hour.Max)
sqlQuery = sqlQuery.And("Cost_per_hour <= ?", filter.Cost_per_hour.Max)
}
if filter.Num_storage.Min > 0 {
//sqlQuery += " AND `num_storage`>=" + fmt.Sprintf("%.6f", filter.Num_storage.Min)
sqlQuery = sqlQuery.And("Num_storage >= ?", filter.Num_storage.Min)
}
if filter.Num_storage.Max > 0 {
//sqlQuery += " AND `num_storage`<=" + fmt.Sprintf("%.6f", filter.Num_storage.Max)
sqlQuery = sqlQuery.And("Num_storage <= ?", filter.Num_storage.Max)
}
if filter.Max_num_storage.Min > 0 {
//sqlQuery += " AND `max_num_storage`>=" + fmt.Sprintf("%.6f", filter.Max_num_storage.Min)
sqlQuery = sqlQuery.And("Max_num_storage >= ?", filter.Max_num_storage.Min)
}
if filter.Max_num_storage.Max > 0 {
//sqlQuery += " AND `max_num_storage`<=" + fmt.Sprintf("%.6f", filter.Max_num_storage.Max)
sqlQuery = sqlQuery.And("Max_num_storage <= ?", filter.Max_num_storage.Max)
}
if filter.Max_total_storage_TiB.Min > 0 {
//sqlQuery += " AND `max_total_storage_TiB`>=" + fmt.Sprintf("%.6f", filter.Max_total_storage_TiB.Min)
sqlQuery = sqlQuery.And("Max_total_storage_TiB >= ?", filter.Max_total_storage_TiB.Min)
}
if filter.Max_total_storage_TiB.Max > 0 {
//sqlQuery += " AND `max_total_storage_TiB`<=" + fmt.Sprintf("%.6f", filter.Max_total_storage_TiB.Max)
sqlQuery = sqlQuery.And("Max_total_storage_TiB <= ?", filter.Max_total_storage_TiB.Max)
}
if filter.Net_bw_Gbps.Min > 0 {
//sqlQuery += " AND `net_bw_Gbps`>=" + fmt.Sprintf("%.6f", filter.Net_bw_Gbps.Min)
sqlQuery = sqlQuery.And("Net_bw_Gbps >= ?", filter.Net_bw_Gbps.Min)
}
if filter.Net_bw_Gbps.Max > 0 {
//sqlQuery += " AND `net_bw_Gbps`<=" + fmt.Sprintf("%.6f", filter.Net_bw_Gbps.Max)
sqlQuery = sqlQuery.And("Net_bw_Gbps <= ?", filter.Net_bw_Gbps.Max)
}
if filter.Ebs_bw_Mbps.Min > 0 {
//sqlQuery += " AND `ebs_bw_Mbps`>=" + fmt.Sprintf("%.6f", filter.Ebs_bw_Mbps.Min)
sqlQuery = sqlQuery.And("Ebs_bw_Mbps >= ?", filter.Ebs_bw_Mbps.Min)
}
if filter.Ebs_bw_Mbps.Max > 0 {
//sqlQuery += " AND `ebs_bw_Mbps`<=" + fmt.Sprintf("%.6f", filter.Ebs_bw_Mbps.Max)
sqlQuery = sqlQuery.And("Ebs_bw_Mbps <= ?", filter.Ebs_bw_Mbps.Max)
}
if filter.Gpu_model != "" {
//sqlQuery += " AND `gpu_model` LIKE '%" + filter.Gpu_model + "%'"
filter.Gpu_model = RefineSpecName(filter.Gpu_model)
sqlQuery = sqlQuery.And("Gpu_model LIKE ?", "%"+filter.Gpu_model+"%")
}
if filter.Num_gpu.Min > 0 {
//sqlQuery += " AND `num_gpu`>=" + fmt.Sprintf("%.6f", filter.Num_gpu.Min)
sqlQuery = sqlQuery.And("Num_gpu >= ?", filter.Num_gpu.Min)
}
if filter.Num_gpu.Max > 0 {
//sqlQuery += " AND `num_gpu`<=" + fmt.Sprintf("%.6f", filter.Num_gpu.Max)
sqlQuery = sqlQuery.And("Num_gpu <= ?", filter.Num_gpu.Max)
}
if filter.Gpumem_GiB.Min > 0 {
//sqlQuery += " AND `gpumem_GiB`>=" + fmt.Sprintf("%.6f", filter.Gpumem_GiB.Min)
sqlQuery = sqlQuery.And("Gpumem_GiB >= ?", filter.Gpumem_GiB.Min)
}
if filter.Gpumem_GiB.Max > 0 {
//sqlQuery += " AND `gpumem_GiB`<=" + fmt.Sprintf("%.6f", filter.Gpumem_GiB.Max)
sqlQuery = sqlQuery.And("Gpumem_GiB <= ?", filter.Gpumem_GiB.Max)
}
if filter.Gpu_p2p != "" {
//sqlQuery += " AND `gpu_p2p` LIKE '%" + filter.Gpu_p2p + "%'"
filter.Gpu_p2p = RefineSpecName(filter.Gpu_p2p)
sqlQuery = sqlQuery.And("Gpu_p2p LIKE ?", "%"+filter.Gpu_p2p+"%")
}
if filter.EvaluationStatus != "" {
//sqlQuery += " AND `evaluationStatus` LIKE '%" + filter.EvaluationStatus + "%'"
filter.EvaluationStatus = RefineSpecName(filter.EvaluationStatus)
sqlQuery = sqlQuery.And("EvaluationStatus LIKE ?", "%"+filter.EvaluationStatus+"%")
}
if filter.EvaluationScore_01.Min > 0 {
//sqlQuery += " AND `evaluationScore_01`>=" + fmt.Sprintf("%.6f", filter.EvaluationScore_01.Min)
sqlQuery = sqlQuery.And("EvaluationScore_01 >= ?", filter.EvaluationScore_01.Min)
}
if filter.EvaluationScore_01.Max > 0 {
//sqlQuery += " AND `evaluationScore_01`<=" + fmt.Sprintf("%.6f", filter.EvaluationScore_01.Max)
sqlQuery = sqlQuery.And("EvaluationScore_01 <= ?", filter.EvaluationScore_01.Max)
}
if filter.EvaluationScore_02.Min > 0 {
//sqlQuery += " AND `evaluationScore_02`>=" + fmt.Sprintf("%.6f", filter.EvaluationScore_02.Min)
sqlQuery = sqlQuery.And("EvaluationScore_02 >= ?", filter.EvaluationScore_02.Min)
}
if filter.EvaluationScore_02.Max > 0 {
//sqlQuery += " AND `evaluationScore_02`<=" + fmt.Sprintf("%.6f", filter.EvaluationScore_02.Max)
sqlQuery = sqlQuery.And("EvaluationScore_02 <= ?", filter.EvaluationScore_02.Max)
}
if filter.EvaluationScore_03.Min > 0 {
//sqlQuery += " AND `evaluationScore_03`>=" + fmt.Sprintf("%.6f", filter.EvaluationScore_03.Min)
sqlQuery = sqlQuery.And("EvaluationScore_03 >= ?", filter.EvaluationScore_03.Min)
}
if filter.EvaluationScore_03.Max > 0 {
//sqlQuery += " AND `evaluationScore_03`<=" + fmt.Sprintf("%.6f", filter.EvaluationScore_03.Max)
sqlQuery = sqlQuery.And("EvaluationScore_03 <= ?", filter.EvaluationScore_03.Max)
}
if filter.EvaluationScore_04.Min > 0 {
//sqlQuery += " AND `evaluationScore_04`>=" + fmt.Sprintf("%.6f", filter.EvaluationScore_04.Min)
sqlQuery = sqlQuery.And("EvaluationScore_04 >= ?", filter.EvaluationScore_04.Min)
}
if filter.EvaluationScore_04.Max > 0 {
//sqlQuery += " AND `evaluationScore_04`<=" + fmt.Sprintf("%.6f", filter.EvaluationScore_04.Max)
sqlQuery = sqlQuery.And("EvaluationScore_04 <= ?", filter.EvaluationScore_04.Max)
}
if filter.EvaluationScore_05.Min > 0 {
//sqlQuery += " AND `evaluationScore_05`>=" + fmt.Sprintf("%.6f", filter.EvaluationScore_05.Min)
sqlQuery = sqlQuery.And("EvaluationScore_05 >= ?", filter.EvaluationScore_05.Min)
}
if filter.EvaluationScore_05.Max > 0 {
//sqlQuery += " AND `evaluationScore_05`<=" + fmt.Sprintf("%.6f", filter.EvaluationScore_05.Max)
sqlQuery = sqlQuery.And("EvaluationScore_05 <= ?", filter.EvaluationScore_05.Max)
}
if filter.EvaluationScore_06.Min > 0 {
//sqlQuery += " AND `evaluationScore_06`>=" + fmt.Sprintf("%.6f", filter.EvaluationScore_06.Min)
sqlQuery = sqlQuery.And("EvaluationScore_06 >= ?", filter.EvaluationScore_06.Min)
}
if filter.EvaluationScore_06.Max > 0 {
//sqlQuery += " AND `evaluationScore_06`<=" + fmt.Sprintf("%.6f", filter.EvaluationScore_06.Max)
sqlQuery = sqlQuery.And("EvaluationScore_06 <= ?", filter.EvaluationScore_06.Max)
}
if filter.EvaluationScore_07.Min > 0 {
//sqlQuery += " AND `evaluationScore_07`>=" + fmt.Sprintf("%.6f", filter.EvaluationScore_07.Min)
sqlQuery = sqlQuery.And("EvaluationScore_07 >= ?", filter.EvaluationScore_07.Min)
}
if filter.EvaluationScore_07.Max > 0 {
//sqlQuery += " AND `evaluationScore_07`<=" + fmt.Sprintf("%.6f", filter.EvaluationScore_07.Max)
sqlQuery = sqlQuery.And("EvaluationScore_07 <= ?", filter.EvaluationScore_07.Max)
}
if filter.EvaluationScore_08.Min > 0 {
//sqlQuery += " AND `evaluationScore_08`>=" + fmt.Sprintf("%.6f", filter.EvaluationScore_08.Min)
sqlQuery = sqlQuery.And("EvaluationScore_08 >= ?", filter.EvaluationScore_08.Min)
}
if filter.EvaluationScore_08.Max > 0 {
//sqlQuery += " AND `evaluationScore_08`<=" + fmt.Sprintf("%.6f", filter.EvaluationScore_08.Max)
sqlQuery = sqlQuery.And("EvaluationScore_08 <= ?", filter.EvaluationScore_08.Max)
}
if filter.EvaluationScore_09.Min > 0 {
//sqlQuery += " AND `evaluationScore_09`>=" + fmt.Sprintf("%.6f", filter.EvaluationScore_09.Min)
sqlQuery = sqlQuery.And("EvaluationScore_09 >= ?", filter.EvaluationScore_09.Min)
}
if filter.EvaluationScore_09.Max > 0 {
//sqlQuery += " AND `evaluationScore_09`<=" + fmt.Sprintf("%.6f", filter.EvaluationScore_09.Max)
sqlQuery = sqlQuery.And("EvaluationScore_09 <= ?", filter.EvaluationScore_09.Max)
}
if filter.EvaluationScore_10.Min > 0 {
//sqlQuery += " AND `evaluationScore_10`>=" + fmt.Sprintf("%.6f", filter.EvaluationScore_10.Min)
sqlQuery = sqlQuery.And("EvaluationScore_10 >= ?", filter.EvaluationScore_10.Min)
}
if filter.EvaluationScore_10.Max > 0 {
//sqlQuery += " AND `evaluationScore_10`<=" + fmt.Sprintf("%.6f", filter.EvaluationScore_10.Max)
sqlQuery = sqlQuery.And("EvaluationScore_10 <= ?", filter.EvaluationScore_10.Max)
}
err = sqlQuery.Find(&tempList)
if err != nil {
common.CBLog.Error(err)
return tempList, err
}
return tempList, nil
}
// SortSpecs accepts the list of TB spec objects, criteria and sorting direction,
// sorts and returns the sorted list of TB spec objects
func SortSpecs(specList []TbSpecInfo, orderBy string, direction string) ([]TbSpecInfo, error) {
var err error = nil
sort.Slice(specList, func(i, j int) bool {
if orderBy == "num_vCPU" {
if direction == "descending" {
return specList[i].Num_vCPU > specList[j].Num_vCPU
} else if direction == "ascending" {
return specList[i].Num_vCPU < specList[j].Num_vCPU
} else {
err = fmt.Errorf("'direction' should one of these: ascending, descending")
return true
}
} else if orderBy == "mem_GiB" {
if direction == "descending" {
return specList[i].Mem_GiB > specList[j].Mem_GiB
} else if direction == "ascending" {
return specList[i].Mem_GiB < specList[j].Mem_GiB
} else {
err = fmt.Errorf("'direction' should one of these: ascending, descending")
return true
}
} else if orderBy == "storage_GiB" {
if direction == "descending" {
return specList[i].Storage_GiB > specList[j].Storage_GiB
} else if direction == "ascending" {
return specList[i].Storage_GiB < specList[j].Storage_GiB
} else {
err = fmt.Errorf("'direction' should one of these: ascending, descending")
return true
}
} else if orderBy == "evaluationScore_01" {
if direction == "descending" {
return specList[i].EvaluationScore_01 > specList[j].EvaluationScore_01
} else if direction == "ascending" {
return specList[i].EvaluationScore_01 < specList[j].EvaluationScore_01
} else {
err = fmt.Errorf("'direction' should one of these: ascending, descending")
return true
}
} else if orderBy == "evaluationScore_02" {
if direction == "descending" {
return specList[i].EvaluationScore_02 > specList[j].EvaluationScore_02
} else if direction == "ascending" {
return specList[i].EvaluationScore_02 < specList[j].EvaluationScore_02
} else {
err = fmt.Errorf("'direction' should one of these: ascending, descending")
return true
}
} else if orderBy == "evaluationScore_03" {
if direction == "descending" {
return specList[i].EvaluationScore_03 > specList[j].EvaluationScore_03
} else if direction == "ascending" {
return specList[i].EvaluationScore_03 < specList[j].EvaluationScore_03
} else {
err = fmt.Errorf("'direction' should one of these: ascending, descending")
return true
}
} else if orderBy == "evaluationScore_04" {
if direction == "descending" {
return specList[i].EvaluationScore_04 > specList[j].EvaluationScore_04
} else if direction == "ascending" {
return specList[i].EvaluationScore_04 < specList[j].EvaluationScore_04
} else {
err = fmt.Errorf("'direction' should one of these: ascending, descending")
return true
}
} else if orderBy == "evaluationScore_05" {
if direction == "descending" {
return specList[i].EvaluationScore_05 > specList[j].EvaluationScore_05
} else if direction == "ascending" {
return specList[i].EvaluationScore_05 < specList[j].EvaluationScore_05
} else {
err = fmt.Errorf("'direction' should one of these: ascending, descending")
return true
}
} else if orderBy == "evaluationScore_06" {
if direction == "descending" {
return specList[i].EvaluationScore_06 > specList[j].EvaluationScore_06
} else if direction == "ascending" {
return specList[i].EvaluationScore_06 < specList[j].EvaluationScore_06
} else {
err = fmt.Errorf("'direction' should one of these: ascending, descending")
return true
}
} else if orderBy == "evaluationScore_07" {
if direction == "descending" {
return specList[i].EvaluationScore_07 > specList[j].EvaluationScore_07
} else if direction == "ascending" {
return specList[i].EvaluationScore_07 < specList[j].EvaluationScore_07
} else {
err = fmt.Errorf("'direction' should one of these: ascending, descending")
return true
}
} else if orderBy == "evaluationScore_08" {
if direction == "descending" {
return specList[i].EvaluationScore_08 > specList[j].EvaluationScore_08
} else if direction == "ascending" {
return specList[i].EvaluationScore_08 < specList[j].EvaluationScore_08
} else {
err = fmt.Errorf("'direction' should one of these: ascending, descending")
return true
}
} else if orderBy == "evaluationScore_09" {
if direction == "descending" {
return specList[i].EvaluationScore_09 > specList[j].EvaluationScore_09
} else if direction == "ascending" {
return specList[i].EvaluationScore_09 < specList[j].EvaluationScore_09
} else {
err = fmt.Errorf("'direction' should one of these: ascending, descending")
return true
}
} else if orderBy == "evaluationScore_10" {
if direction == "descending" {
return specList[i].EvaluationScore_10 > specList[j].EvaluationScore_10
} else if direction == "ascending" {
return specList[i].EvaluationScore_10 < specList[j].EvaluationScore_10
} else {
err = fmt.Errorf("'direction' should one of these: ascending, descending")
return true
}
} else {
err = fmt.Errorf("'orderBy' should one of these: num_vCPU, mem_GiB, storage_GiB")
return true
}
})
for i := range specList {
specList[i].OrderInFilteredResult = uint16(i + 1)
}
return specList, err
}
// UpdateSpec accepts to-be TB spec objects,
// updates and returns the updated TB spec objects
func UpdateSpec(nsId string, newSpec TbSpecInfo) (TbSpecInfo, error) {
resourceType := common.StrSpec
err := common.CheckString(nsId)
if err != nil {
temp := TbSpecInfo{}
common.CBLog.Error(err)
return temp, err
}
err = common.CheckString(newSpec.Id)
if err != nil {
temp := TbSpecInfo{}
common.CBLog.Error(err)
return temp, err
}
check, _ := CheckResource(nsId, resourceType, newSpec.Id)
if !check {
temp := TbSpecInfo{}
err := fmt.Errorf("The spec " + newSpec.Id + " does not exist.")
return temp, err
}
tempInterface, err := GetResource(nsId, resourceType, newSpec.Id)
if err != nil {
temp := TbSpecInfo{}
err := fmt.Errorf("Failed to get the spec " + newSpec.Id + ".")
return temp, err
}
tempSpec := TbSpecInfo{}
err = common.CopySrcToDest(&tempInterface, &tempSpec)
if err != nil {
temp := TbSpecInfo{}
err := fmt.Errorf("Failed to CopySrcToDest() " + newSpec.Id + ".")
return temp, err
}
// cb-store
fmt.Println("=========================== PUT registerSpec")
Key := common.GenResourceKey(nsId, resourceType, tempSpec.Id)
Val, _ := json.Marshal(tempSpec)
err = common.CBStore.Put(string(Key), string(Val))
if err != nil {
temp := TbSpecInfo{}
common.CBLog.Error(err)
return temp, err
}
keyValue, _ := common.CBStore.Get(string(Key))
fmt.Println("<" + keyValue.Key + "> \n" + keyValue.Value)
fmt.Println("===========================")
// register information related with MCIS recommendation
err = RegisterRecommendList(nsId, tempSpec.ConnectionName, tempSpec.Num_vCPU, tempSpec.Mem_GiB, tempSpec.Storage_GiB, tempSpec.Id, tempSpec.Cost_per_hour)
if err != nil {
common.CBLog.Error(err)
return tempSpec, err
}
// "UPDATE `spec` SET `id`='" + newSpec.Id + "', ... WHERE `namespace`='" + nsId + "' AND `id`='" + newSpec.Id + "';"
_, err = common.ORM.Update(&newSpec, &TbSpecInfo{Namespace: nsId, Id: newSpec.Id})
if err != nil {
fmt.Println(err.Error())
} else {
fmt.Println("Data inserted successfully..")
}
return tempSpec, nil
}
| [
"\"SPIDER_CALL_METHOD\"",
"\"CBTUMBLEBUG_ROOT\"",
"\"SPIDER_CALL_METHOD\"",
"\"CBTUMBLEBUG_ROOT\""
]
| []
| [
"CBTUMBLEBUG_ROOT",
"SPIDER_CALL_METHOD"
]
| [] | ["CBTUMBLEBUG_ROOT", "SPIDER_CALL_METHOD"] | go | 2 | 0 | |
web-server/check-server.py | # Copyright 2021 The Fraud Detection Framework Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND< either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import os
import sys
import threading
import time
import json
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
import pg
import constants
sys.path.append(constants.FDF_PYD_PATH)
os.environ["PATH"] += ";" + constants.FDF_PYD_PATH
import fdf
from MultiProcessingLog import MultiProcessingLog
logging.getLogger().addHandler(MultiProcessingLog("check-server.txt", "a", 0, 0))
class CheckServer(object):
def __init__(self):
self.info = fdf.Photos.All.INFO()
self.cnn = fdf.Photos.All.CNN(json.dumps({
"modelPath": "./fdf/m88-1.pb",
"modelInput": "input_input",
"modelOutput": "softmax_tensor/Softmax" }))
self.pca = fdf.Photos.All.PCA()
self.benford = fdf.Photos.Jpeg.BENFORD()
self.exif = fdf.Photos.Jpeg.EXIF()
self.quality = fdf.Photos.Jpeg.QUALITY()
def check(self):
while True:
task = (None, None, None, None)
try:
if os.path.exists("server.stop"):
exit()
task = pg.getTask()
if not task[0] is None:
methods = json.loads(task[4]) if not task[4] is None and task[4].lower() != "null" else ["info", "cnn", "pca", "benford", "exif", "quality"]
result = dict()
for m in methods:
if m == "info":
result[m] = self.info.check(task[0]).as_dict
elif m == "cnn":
result[m] = self.cnn.check(task[0]).as_dict
elif m == "pca":
result[m] = self.pca.check(task[0]).as_dict
elif m == "benford":
result[m] = self.benford.check(task[0]).as_dict
elif m == "exif":
result[m] = self.exif.check(task[0]).as_dict
elif m == "quality":
result[m] = self.quality.check(task[0]).as_dict
pg.setTaskResult(task[2], json.dumps(result), constants.STATUS_COMPLETED)
except Exception as e:
message = "CHECK EXCEPTION: {}".format(json.dumps({
"exception": MultiProcessingLog.exception2string(e),
"user_id": task[3],
"source_id": task[1],
"task_id": task[2]
}))
logging.info(message)
pg.addMessage(message, constants.MESSAGE_TYPE_ERROR, task[3])
finally:
time.sleep(3)
def run(self):
checkThread = threading.Thread(target=self.check, args=())
checkThread.start()
if __name__ == "__main__":
CheckServer().run()
| []
| []
| [
"PATH"
]
| [] | ["PATH"] | python | 1 | 0 | |
pkg/clipboard/clipboard.go | package clipboard
import (
"context"
"fmt"
"os"
"os/exec"
"github.com/atotto/clipboard"
"github.com/fatih/color"
"github.com/gopasspw/gopass/internal/notify"
"github.com/gopasspw/gopass/internal/out"
"github.com/gopasspw/gopass/pkg/debug"
)
var (
// Helpers can be overridden at compile time, e.g. go build \
// -ldflags=='-X github.com/gopasspw/gopass/pkg/clipboard.Helpers=termux-api'.
Helpers = "xsel or xclip"
// ErrNotSupported is returned when the clipboard is not accessible.
ErrNotSupported = fmt.Errorf("WARNING: No clipboard available. Install " + Helpers + ", provide $GOPASS_CLIPBOARD_COPY_CMD and $GOPASS_CLIPBOARD_CLEAR_CMD or use -f to print to console")
)
// CopyTo copies the given data to the clipboard and enqueues automatic
// clearing of the clipboard.
func CopyTo(ctx context.Context, name string, content []byte, timeout int) error {
debug.Log("Copying to clipboard: %s for %ds", name, timeout)
clipboardCopyCMD := os.Getenv("GOPASS_CLIPBOARD_COPY_CMD")
if clipboardCopyCMD != "" {
if err := callCommand(ctx, clipboardCopyCMD, name, content); err != nil {
_ = notify.Notify(ctx, "gopass - clipboard", "failed to call clipboard copy command")
return fmt.Errorf("failed to call clipboard copy command: %w", err)
}
} else if clipboard.Unsupported {
out.Errorf(ctx, "%s", ErrNotSupported)
_ = notify.Notify(ctx, "gopass - clipboard", fmt.Sprintf("%s", ErrNotSupported))
return nil
} else if err := copyToClipboard(ctx, content); err != nil {
_ = notify.Notify(ctx, "gopass - clipboard", "failed to write to clipboard")
return fmt.Errorf("failed to write to clipboard: %w", err)
}
if timeout < 1 {
timeout = 45
}
if err := clear(ctx, name, content, timeout); err != nil {
_ = notify.Notify(ctx, "gopass - clipboard", "failed to clear clipboard")
return fmt.Errorf("failed to clear clipboard: %w", err)
}
out.Printf(ctx, "✔ Copied %s to clipboard. Will clear in %d seconds.", color.YellowString(name), timeout)
_ = notify.Notify(ctx, "gopass - clipboard", fmt.Sprintf("✔ Copied %s to clipboard. Will clear in %d seconds.", name, timeout))
return nil
}
func callCommand(ctx context.Context, cmd string, parameter string, stdinValue []byte) error {
clipboardProcess := exec.Command(cmd, parameter)
stdin, err := clipboardProcess.StdinPipe()
defer func() {
_ = stdin.Close()
}()
if err != nil {
return fmt.Errorf("failed to create stdin pipe: %w", err)
}
if err = clipboardProcess.Start(); err != nil {
return fmt.Errorf("failed to start clipboard process: %w", err)
}
if _, err = stdin.Write(stdinValue); err != nil {
return fmt.Errorf("failed to write to STDIN: %w", err)
}
// Force STDIN close before we wait for the process to finish, so we avoid deadlocks
if err = stdin.Close(); err != nil {
return fmt.Errorf("failed to close STDIN: %w", err)
}
if err := clipboardProcess.Wait(); err != nil {
return fmt.Errorf("failed to call clipboard command: %w", err)
}
return nil
}
func killProc(pid int) {
// err should be always nil, but just to be sure
proc, err := os.FindProcess(pid)
if err != nil {
return
}
// we ignore this error as we're going to return nil anyway
_ = proc.Kill()
}
| [
"\"GOPASS_CLIPBOARD_COPY_CMD\""
]
| []
| [
"GOPASS_CLIPBOARD_COPY_CMD"
]
| [] | ["GOPASS_CLIPBOARD_COPY_CMD"] | go | 1 | 0 | |
lib/discover/debug.go | // Copyright (C) 2014 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package discover
import (
"os"
"strings"
"github.com/syncthing/syncthing/lib/logger"
)
var (
l = logger.DefaultLogger.NewFacility("discover", "Remote device discovery")
)
func init() {
l.SetDebug("discover", strings.Contains(os.Getenv("STTRACE"), "discover") || os.Getenv("STTRACE") == "all")
}
| [
"\"STTRACE\"",
"\"STTRACE\""
]
| []
| [
"STTRACE"
]
| [] | ["STTRACE"] | go | 1 | 0 | |
quickstart/python/sms/example-2/send_notifications.6.x.py | # /usr/bin/env python
# Download the twilio-python library from twilio.com/docs/libraries/python
import os
from twilio.rest import Client
# Find these values at https://twilio.com/user/account
# To set up environmental variables, see http://twil.io/secure
account_sid = os.environ['TWILIO_ACCOUNT_SID']
auth_token = "YYYYYYYYYYYYYYYYYY"
client = Client(account_sid, auth_token)
message = client.api.account.messages.create(
to="+12316851234",
from_="+15555555555",
body="Hello there!",
media_url=['https://demo.twilio.com/owl.png',
'https://demo.twilio.com/logo.png'])
| []
| []
| [
"TWILIO_ACCOUNT_SID"
]
| [] | ["TWILIO_ACCOUNT_SID"] | python | 1 | 0 | |
tests/testutils/base.py | import os
import unittest
import django
import django.test
from google.appengine.ext import testbed
from testutils import data_generator
class ServerTestsBase(unittest.TestCase):
def init_testbed_stubs(self):
"""Initializes the App Engine testbed stubs.
Subclasses can override this, but should include the user stub even if
they don't need it directly (it seems to be required).
"""
self.testbed.init_user_stub()
self.testbed.init_datastore_v3_stub()
def setUp(self):
self.data_generator = data_generator.TestDataGenerator()
self.testbed = testbed.Testbed()
self.testbed.activate()
self.init_testbed_stubs()
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
django.setup()
django.test.utils.setup_test_environment()
self.client = django.test.Client()
def tearDown(self):
self.testbed.deactivate()
django.test.utils.teardown_test_environment()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
pkg/gasper/login.go | package gasper
import (
"errors"
"fmt"
"github.com/docker/cli/cli/config"
"github.com/docker/cli/cli/config/types"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/name"
"io/ioutil"
"os"
"strings"
)
func Login(serverAddress, username, password string, passwordStdin bool) error {
if passwordStdin {
contents, err := ioutil.ReadAll(os.Stdin)
if err != nil {
return err
}
password = strings.TrimSuffix(string(contents), "\n")
password = strings.TrimSuffix(password, "\r")
}
if username == "" || password == "" {
return errors.New("Username and password required")
}
cf, err := config.Load(os.Getenv("DOCKER_CONFIG"))
if err != nil {
return err
}
creds := cf.GetCredentialsStore(serverAddress)
if serverAddress == name.DefaultRegistry {
serverAddress = authn.DefaultAuthKey
}
if err := creds.Store(types.AuthConfig{
ServerAddress: serverAddress,
Username: username,
Password: password,
}); err != nil {
return err
}
fmt.Println("Login succeeded")
return nil
}
| [
"\"DOCKER_CONFIG\""
]
| []
| [
"DOCKER_CONFIG"
]
| [] | ["DOCKER_CONFIG"] | go | 1 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fb_bot.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
src/main/java/com/amazonaws/kvstranscribestreaming/lambda/TranscriptionWebSocketIntegrationLambda.java | package com.amazonaws.kvstranscribestreaming.lambda;
import com.amazonaws.kvstranscribestreaming.constants.WebSocketMappingDDBConstants;
import com.amazonaws.kvstranscribestreaming.constants.WebsocketConnectionDDBConstants;
import com.amazonaws.regions.Regions;
import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClientBuilder;
import com.amazonaws.services.dynamodbv2.document.DynamoDB;
import com.amazonaws.services.dynamodbv2.document.Item;
import com.amazonaws.services.dynamodbv2.document.Table;
import com.amazonaws.services.dynamodbv2.document.spec.DeleteItemSpec;
import com.amazonaws.services.dynamodbv2.document.spec.GetItemSpec;
import com.amazonaws.services.lambda.runtime.Context;
import com.amazonaws.services.lambda.runtime.RequestHandler;
import com.amazonaws.services.lambda.runtime.events.APIGatewayV2WebSocketEvent;
import com.amazonaws.services.lambda.runtime.events.APIGatewayV2WebSocketResponse;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.time.Instant;
import java.util.Collections;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
/**
* Integration function that processes and responds web socket connection request through API Gateway.
*
* 1. When client connects to API Gateway (route key: $connect), Lambda does nothing.
* 2. When client is ready to receive transcription (route key: transcribe), Lambda stores both from and to numbers in the mapping table, also
* stores connection and associated numbers in the connection table.
* 3. When client disconnects, Lambda removes the numbers and connection from mapping and connection table.
*/
public class TranscriptionWebSocketIntegrationLambda implements RequestHandler<APIGatewayV2WebSocketEvent, APIGatewayV2WebSocketResponse> {
private static final Logger logger = LoggerFactory.getLogger(TranscriptionWebSocketIntegrationLambda.class);
private static final String LAMBDA_KEY_PREFIX = "TranscriptionWebSocketIntegrationLambda:";
private static final ObjectMapper objectMapper = new ObjectMapper()
.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
private static final String WEB_SOCKET_MAPPING_TABLE = System.getenv("WEB_SOCKET_MAPPING_TABLE");
private static final String WEB_SOCKET_CONNECTION_TABLE = System.getenv("WEB_SOCKET_CONNECTION_TABLE");
private static final String TRANSCRIBE_ROUTE_KEY = System.getenv("TRANSCRIBE_ROUTE_KEY");
private static final String DISCONNECT_ROUTE_KEY = "$disconnect";
private static final Regions AWS_REGION = Regions.fromName(System.getenv("AWS_REGION"));
private static final DynamoDB dynamoDB = new DynamoDB(
AmazonDynamoDBClientBuilder.standard().withRegion(AWS_REGION.getName()).build());
@Override
public APIGatewayV2WebSocketResponse handleRequest(APIGatewayV2WebSocketEvent requestEvent, Context context) {
try {
logger.info(LAMBDA_KEY_PREFIX + " received request : " + objectMapper.writeValueAsString(requestEvent));
} catch (JsonProcessingException e) {
logger.error(LAMBDA_KEY_PREFIX + " Error happened where serializing the event", e);
}
logger.info(LAMBDA_KEY_PREFIX + " received context: " + context.toString());
APIGatewayV2WebSocketResponse responseEvent = new APIGatewayV2WebSocketResponse();
try {
String routeKey = requestEvent.getRequestContext().getRouteKey(), connectionId = requestEvent.getRequestContext().getConnectionId();
if (!routeKey.equals(DISCONNECT_ROUTE_KEY) && !routeKey.equals(TRANSCRIBE_ROUTE_KEY)) {
generateResponse(responseEvent, 200, "Success");
return responseEvent;
}
Table mappingTable = dynamoDB.getTable(WEB_SOCKET_MAPPING_TABLE);
Table connectionTable = dynamoDB.getTable(WEB_SOCKET_CONNECTION_TABLE);
// Release all resources when client calls disconnect.
if (routeKey.equals(DISCONNECT_ROUTE_KEY)) {
GetItemSpec spec = new GetItemSpec()
.withPrimaryKey(WebsocketConnectionDDBConstants.CONNECTION_ID, connectionId)
.withConsistentRead(true);
Item item = dynamoDB.getTable(WEB_SOCKET_CONNECTION_TABLE).getItem(spec);
DeleteItemSpec deleteNumberSpec;
if(item.hasAttribute(WebsocketConnectionDDBConstants.ASSOCIATED_NUMBERS)) {
Set<String> numbers = (Set) item.get(WebsocketConnectionDDBConstants.ASSOCIATED_NUMBERS);
// Remove number mappings from the mapping table.
for(String n : numbers) {
deleteNumberSpec = new DeleteItemSpec().withPrimaryKey(WebSocketMappingDDBConstants.NUMBER, n);
mappingTable.deleteItem(deleteNumberSpec);
}
}
// Remove the connection from the connection table.
deleteNumberSpec = new DeleteItemSpec().withPrimaryKey(WebsocketConnectionDDBConstants.CONNECTION_ID, connectionId);
connectionTable.deleteItem(deleteNumberSpec);
generateResponse(responseEvent, 200, "Web socket connection " + connectionId + " with route key " + routeKey + " has been disconnected");
}
// Put DDB resources when client is ready to receive transcription
if (routeKey.equals(TRANSCRIBE_ROUTE_KEY)) {
if(requestEvent.getBody() == null) {
generateResponse(responseEvent, 400, "Must specify body");
return responseEvent;
}
Map<String, String> eventBodyMap = objectMapper.readValue(requestEvent.getBody(), Map.class);
if(eventBodyMap.get("from") == null && eventBodyMap.get("to") == null) {
generateResponse(responseEvent, 400, "Must specify from or to numbers");
return responseEvent;
}
String fromNumber = eventBodyMap.get("from"), toNumber = eventBodyMap.get("to");
Item numberItem, connectionItem;
Set<String> numbers = new HashSet<>();
if(fromNumber != null) {
numbers.add(fromNumber);
numberItem = new Item()
.withPrimaryKey(WebSocketMappingDDBConstants.NUMBER, fromNumber)
.withString(WebSocketMappingDDBConstants.CONNECTION_ID, connectionId)
.withString(WebSocketMappingDDBConstants.UPDATE_TIME, Instant.now().toString());
mappingTable.putItem(numberItem);
}
if(toNumber != null) {
numbers.add(toNumber);
numberItem = new Item()
.withPrimaryKey(WebSocketMappingDDBConstants.NUMBER, toNumber)
.withString(WebSocketMappingDDBConstants.CONNECTION_ID, connectionId)
.withString(WebSocketMappingDDBConstants.UPDATE_TIME, Instant.now().toString());
mappingTable.putItem(numberItem);
}
connectionItem = new Item()
.withPrimaryKey(WebsocketConnectionDDBConstants.CONNECTION_ID, connectionId)
.withStringSet(WebsocketConnectionDDBConstants.ASSOCIATED_NUMBERS, numbers);
connectionTable.putItem(connectionItem);
generateResponse(responseEvent, 200,
"Web socket connection " + connectionId + " with route key " + routeKey + " associated with numbers " + numbers.toString() + " has been established");
}
} catch (Exception e) {
logger.error("{} transcription integration failed. Reason: {}", LAMBDA_KEY_PREFIX, e.getMessage(), e);
generateResponse(responseEvent, 500, "Must specify from and to numbers");
}
logger.info("{} response event {}", LAMBDA_KEY_PREFIX, responseEvent);
return responseEvent;
}
private void generateResponse(APIGatewayV2WebSocketResponse responseEvent, Integer statusCode, String message) {
responseEvent.setHeaders(Collections.singletonMap("timeStamp", String.valueOf(System.currentTimeMillis())));
responseEvent.setStatusCode(statusCode);
responseEvent.setBody(message);
}
}
| [
"\"WEB_SOCKET_MAPPING_TABLE\"",
"\"WEB_SOCKET_CONNECTION_TABLE\"",
"\"TRANSCRIBE_ROUTE_KEY\"",
"\"AWS_REGION\""
]
| []
| [
"AWS_REGION",
"WEB_SOCKET_MAPPING_TABLE",
"TRANSCRIBE_ROUTE_KEY",
"WEB_SOCKET_CONNECTION_TABLE"
]
| [] | ["AWS_REGION", "WEB_SOCKET_MAPPING_TABLE", "TRANSCRIBE_ROUTE_KEY", "WEB_SOCKET_CONNECTION_TABLE"] | java | 4 | 0 | |
pkg/controller/statusmanager/pod_status.go | package statusmanager
import (
"context"
"encoding/json"
"fmt"
"log"
"os"
"reflect"
"strings"
"time"
configv1 "github.com/openshift/api/config/v1"
operv1 "github.com/openshift/api/operator/v1"
"github.com/openshift/cluster-network-operator/pkg/names"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
crclient "sigs.k8s.io/controller-runtime/pkg/client"
)
const (
// if a rollout has not made any progress by this time,
// mark ourselves as Degraded
ProgressTimeout = 10 * time.Minute
// lastSeenAnnotation - the annotation where we stash our state
lastSeenAnnotation = "network.operator.openshift.io/last-seen-state"
)
// podState is a snapshot of the last-seen-state and last-changed-times
// for pod-creating entities, as marshalled to json in an annotation
type podState struct {
// "public" for marshalling to json, since we can't have complex keys
DaemonsetStates []daemonsetState
DeploymentStates []deploymentState
StatefulsetStates []statefulsetState
}
// daemonsetState is the internal state we use to check if a rollout has
// stalled.
type daemonsetState struct {
ClusteredName
LastSeenStatus appsv1.DaemonSetStatus
LastChangeTime time.Time
}
// deploymentState is the same as daemonsetState.. but for deployments!
type deploymentState struct {
ClusteredName
LastSeenStatus appsv1.DeploymentStatus
LastChangeTime time.Time
}
// statefulsetState is the same as daemonsetState.. but for statefulsets!
type statefulsetState struct {
ClusteredName
LastSeenStatus appsv1.StatefulSetStatus
LastChangeTime time.Time
}
// SetFromPods sets the operator Degraded/Progressing/Available status, based on
// the current status of the manager's DaemonSets, Deployments and StatefulSets.
func (status *StatusManager) SetFromPods() {
status.Lock()
defer status.Unlock()
daemonSets, deployments, statefulSets := status.listAllStatusObjects()
targetLevel := os.Getenv("RELEASE_VERSION")
reachedAvailableLevel := (len(daemonSets) + len(deployments) + len(statefulSets)) > 0
progressing := []string{}
hung := []string{}
daemonsetStates, deploymentStates, statefulsetStates := status.getLastPodState()
if (len(daemonSets) + len(deployments) + len(statefulSets)) == 0 {
progressing = append(progressing, "Deploying")
}
for _, ds := range daemonSets {
dsName := NewClusteredName(ds)
dsProgressing := false
if isNonCritical(ds) && ds.Status.NumberReady == 0 && !status.installComplete {
progressing = append(progressing, fmt.Sprintf("DaemonSet %q is waiting for other operators to become ready", dsName.String()))
dsProgressing = true
} else if ds.Status.UpdatedNumberScheduled < ds.Status.DesiredNumberScheduled {
progressing = append(progressing, fmt.Sprintf("DaemonSet %q update is rolling out (%d out of %d updated)", dsName.String(), ds.Status.UpdatedNumberScheduled, ds.Status.DesiredNumberScheduled))
dsProgressing = true
} else if ds.Status.NumberUnavailable > 0 {
progressing = append(progressing, fmt.Sprintf("DaemonSet %q is not available (awaiting %d nodes)", dsName.String(), ds.Status.NumberUnavailable))
dsProgressing = true
// Check for any pods in CrashLoopBackOff state and mark the operator as degraded if so.
if !isNonCritical(ds) {
hung = append(hung, status.CheckCrashLoopBackOffPods(dsName, ds.Spec.Selector.MatchLabels, "DaemonSet")...)
}
} else if ds.Status.NumberAvailable == 0 { // NOTE: update this if we ever expect empty (unscheduled) daemonsets ~cdc
progressing = append(progressing, fmt.Sprintf("DaemonSet %q is not yet scheduled on any nodes", dsName.String()))
dsProgressing = true
} else if ds.Generation > ds.Status.ObservedGeneration {
progressing = append(progressing, fmt.Sprintf("DaemonSet %q update is being processed (generation %d, observed generation %d)", dsName.String(), ds.Generation, ds.Status.ObservedGeneration))
dsProgressing = true
}
if ds.Annotations["release.openshift.io/version"] != targetLevel {
reachedAvailableLevel = false
}
var dsHung *string
if dsProgressing && !isNonCritical(ds) {
reachedAvailableLevel = false
dsState, exists := daemonsetStates[dsName]
if !exists || !reflect.DeepEqual(dsState.LastSeenStatus, ds.Status) {
dsState.LastChangeTime = time.Now()
ds.Status.DeepCopyInto(&dsState.LastSeenStatus)
daemonsetStates[dsName] = dsState
}
// Catch hung rollouts
if exists && (time.Since(dsState.LastChangeTime)) > ProgressTimeout {
hung = append(hung, fmt.Sprintf("DaemonSet %q rollout is not making progress - last change %s", dsName.String(), dsState.LastChangeTime.Format(time.RFC3339)))
empty := ""
dsHung = &empty
}
} else {
delete(daemonsetStates, dsName)
}
if err := status.setAnnotation(context.TODO(), ds, names.RolloutHungAnnotation, dsHung); err != nil {
log.Printf("Error setting DaemonSet %q annotation: %v", dsName, err)
}
}
for _, ss := range statefulSets {
ssName := NewClusteredName(ss)
ssProgressing := false
if isNonCritical(ss) && ss.Status.ReadyReplicas == 0 && !status.installComplete {
progressing = append(progressing, fmt.Sprintf("StatefulSet %q is waiting for other operators to become ready", ssName.String()))
ssProgressing = true
} else if ss.Status.ReadyReplicas > 0 && ss.Status.ReadyReplicas < ss.Status.Replicas {
progressing = append(progressing, fmt.Sprintf("StatefulSet %q is not available (awaiting %d nodes)", ssName.String(), (ss.Status.Replicas-ss.Status.ReadyReplicas)))
ssProgressing = true
// Check for any pods in CrashLoopBackOff state and mark the operator as degraded if so.
if !isNonCritical(ss) {
hung = append(hung, status.CheckCrashLoopBackOffPods(ssName, ss.Spec.Selector.MatchLabels, "StatefulSet")...)
}
} else if ss.Status.AvailableReplicas == 0 {
progressing = append(progressing, fmt.Sprintf("StatefulSet %q is not yet scheduled on any nodes", ssName.String()))
ssProgressing = true
} else if ss.Status.ObservedGeneration < ss.Generation {
progressing = append(progressing, fmt.Sprintf("StatefulSet %q update is being processed (generation %d, observed generation %d)", ssName.String(), ss.Generation, ss.Status.ObservedGeneration))
ssProgressing = true
}
if ss.Annotations["release.openshift.io/version"] != targetLevel {
reachedAvailableLevel = false
}
var ssHung *string
if ssProgressing && !isNonCritical(ss) {
reachedAvailableLevel = false
ssState, exists := statefulsetStates[ssName]
if !exists || !reflect.DeepEqual(ssState.LastSeenStatus, ss.Status) {
ssState.LastChangeTime = time.Now()
ss.Status.DeepCopyInto(&ssState.LastSeenStatus)
statefulsetStates[ssName] = ssState
}
// Catch hung rollouts
if exists && (time.Since(ssState.LastChangeTime)) > ProgressTimeout {
hung = append(hung, fmt.Sprintf("StatefulSet %q rollout is not making progress - last change %s", ssName.String(), ssState.LastChangeTime.Format(time.RFC3339)))
empty := ""
ssHung = &empty
}
} else {
delete(statefulsetStates, ssName)
}
if err := status.setAnnotation(context.TODO(), ss, names.RolloutHungAnnotation, ssHung); err != nil {
log.Printf("Error setting StatefulSet %q annotation: %v", ssName, err)
}
}
for _, dep := range deployments {
depName := NewClusteredName(dep)
depProgressing := false
if isNonCritical(dep) && dep.Status.UnavailableReplicas > 0 && !status.installComplete {
progressing = append(progressing, fmt.Sprintf("Deployment %q is waiting for other operators to become ready", depName.String()))
depProgressing = true
} else if dep.Status.UnavailableReplicas > 0 {
progressing = append(progressing, fmt.Sprintf("Deployment %q is not available (awaiting %d nodes)", depName.String(), dep.Status.UnavailableReplicas))
depProgressing = true
// Check for any pods in CrashLoopBackOff state and mark the operator as degraded if so.
if !isNonCritical(dep) {
hung = append(hung, status.CheckCrashLoopBackOffPods(depName, dep.Spec.Selector.MatchLabels, "Deployment")...)
}
} else if dep.Status.AvailableReplicas == 0 {
progressing = append(progressing, fmt.Sprintf("Deployment %q is not yet scheduled on any nodes", depName.String()))
depProgressing = true
} else if dep.Status.ObservedGeneration < dep.Generation {
progressing = append(progressing, fmt.Sprintf("Deployment %q update is being processed (generation %d, observed generation %d)", depName.String(), dep.Generation, dep.Status.ObservedGeneration))
depProgressing = true
}
if dep.Annotations["release.openshift.io/version"] != targetLevel {
reachedAvailableLevel = false
}
var depHung *string
if depProgressing && !isNonCritical(dep) {
reachedAvailableLevel = false
depState, exists := deploymentStates[depName]
if !exists || !reflect.DeepEqual(depState.LastSeenStatus, dep.Status) {
depState.LastChangeTime = time.Now()
dep.Status.DeepCopyInto(&depState.LastSeenStatus)
deploymentStates[depName] = depState
}
// Catch hung rollouts
if exists && (time.Since(depState.LastChangeTime)) > ProgressTimeout {
hung = append(hung, fmt.Sprintf("Deployment %q rollout is not making progress - last change %s", depName.String(), depState.LastChangeTime.Format(time.RFC3339)))
empty := ""
depHung = &empty
}
} else {
delete(deploymentStates, depName)
}
if err := status.setAnnotation(context.TODO(), dep, names.RolloutHungAnnotation, depHung); err != nil {
log.Printf("Error setting Deployment %q annotation: %v", depName, err)
}
}
status.setNotDegraded(PodDeployment)
if err := status.setLastPodState(daemonsetStates, deploymentStates, statefulsetStates); err != nil {
log.Printf("Failed to set pod state (continuing): %+v\n", err)
}
if len(progressing) > 0 {
status.setProgressing(PodDeployment, "Deploying", strings.Join(progressing, "\n"))
} else {
status.unsetProgressing(PodDeployment)
}
if reachedAvailableLevel {
status.set(reachedAvailableLevel, operv1.OperatorCondition{
Type: operv1.OperatorStatusTypeAvailable,
Status: operv1.ConditionTrue})
}
if reachedAvailableLevel && len(progressing) == 0 {
status.installComplete = true
}
if len(hung) > 0 {
status.setDegraded(RolloutHung, "RolloutHung", strings.Join(hung, "\n"))
} else {
status.setNotDegraded(RolloutHung)
}
}
// getLastPodState reads the last-seen daemonset + deployment + statefulset
// states from the clusteroperator annotation and parses it. On error, it
// returns an empty state, since this should not block updating operator status.
func (status *StatusManager) getLastPodState() (map[ClusteredName]daemonsetState, map[ClusteredName]deploymentState, map[ClusteredName]statefulsetState) {
// with maps allocated
daemonsetStates := map[ClusteredName]daemonsetState{}
deploymentStates := map[ClusteredName]deploymentState{}
statefulsetStates := map[ClusteredName]statefulsetState{}
// Load the last-seen snapshot from our annotation
co := &configv1.ClusterOperator{ObjectMeta: metav1.ObjectMeta{Name: status.name}}
err := status.client.ClientFor("").CRClient().Get(context.TODO(), types.NamespacedName{Name: status.name}, co)
if err != nil {
log.Printf("Failed to get ClusterOperator: %v", err)
return daemonsetStates, deploymentStates, statefulsetStates
}
lsbytes := co.Annotations[lastSeenAnnotation]
if lsbytes == "" {
return daemonsetStates, deploymentStates, statefulsetStates
}
out := podState{}
err = json.Unmarshal([]byte(lsbytes), &out)
if err != nil {
// No need to return error; just move on
log.Printf("failed to unmashal last-seen-status: %v", err)
return daemonsetStates, deploymentStates, statefulsetStates
}
for _, ds := range out.DaemonsetStates {
daemonsetStates[ds.ClusteredName] = ds
}
for _, ds := range out.DeploymentStates {
deploymentStates[ds.ClusteredName] = ds
}
for _, ss := range out.StatefulsetStates {
statefulsetStates[ss.ClusteredName] = ss
}
return daemonsetStates, deploymentStates, statefulsetStates
}
func (status *StatusManager) setLastPodState(
dss map[ClusteredName]daemonsetState,
deps map[ClusteredName]deploymentState,
sss map[ClusteredName]statefulsetState) error {
ps := podState{
DaemonsetStates: make([]daemonsetState, 0, len(dss)),
DeploymentStates: make([]deploymentState, 0, len(deps)),
StatefulsetStates: make([]statefulsetState, 0, len(sss)),
}
for nsn, ds := range dss {
ds.ClusteredName = nsn
ps.DaemonsetStates = append(ps.DaemonsetStates, ds)
}
for nsn, ds := range deps {
ds.ClusteredName = nsn
ps.DeploymentStates = append(ps.DeploymentStates, ds)
}
for nsn, ss := range sss {
ss.ClusteredName = nsn
ps.StatefulsetStates = append(ps.StatefulsetStates, ss)
}
lsbytes, err := json.Marshal(ps)
if err != nil {
return err
}
co := &configv1.ClusterOperator{ObjectMeta: metav1.ObjectMeta{Name: status.name}}
anno := string(lsbytes)
return status.setAnnotation(context.TODO(), co, lastSeenAnnotation, &anno)
}
// CheckCrashLoopBackOffPods checks for pods (matching the label selector) with
// any containers in the CrashLoopBackoff state. It returns a human-readable string
// for any pod in such a state.
// name should be the name of a DaemonSet or Deployment or StatefulSet.
func (status *StatusManager) CheckCrashLoopBackOffPods(name ClusteredName, selector map[string]string, kind string) []string {
hung := []string{}
pods := &v1.PodList{}
err := status.client.ClientFor(name.ClusterName).CRClient().List(context.TODO(), pods, crclient.InNamespace(name.Namespace), crclient.MatchingLabels(selector))
if err != nil {
log.Printf("Error getting pods from %s %q: %v", kind, name.String(), err)
}
for _, pod := range pods.Items {
for _, container := range pod.Status.ContainerStatuses {
if container.State.Waiting != nil {
if container.State.Waiting.Reason == "CrashLoopBackOff" {
hung = append(hung, fmt.Sprintf("%s %q rollout is not making progress - pod %s is in CrashLoopBackOff State", kind, name.String(), pod.Name))
// we can break once we find at least one container crashing in this pod
break
}
}
}
}
return hung
}
func isNonCritical(obj metav1.Object) bool {
_, exists := obj.GetAnnotations()[names.NonCriticalAnnotation]
return exists
}
func (status *StatusManager) listAllStatusObjects() (dss []*appsv1.DaemonSet, deps []*appsv1.Deployment, sss []*appsv1.StatefulSet) {
selector, err := labels.Parse(generateStatusSelector)
if err != nil {
panic(err) // selector is guaranteed valid, unreachable
}
// these lists can't fail, they're backed by informers
for _, lister := range status.dsListers {
l, _ := lister.List(selector)
dss = append(dss, l...)
}
for _, lister := range status.depListers {
l, _ := lister.List(selector)
deps = append(deps, l...)
}
for _, lister := range status.ssListers {
l, _ := lister.List(selector)
sss = append(sss, l...)
}
return
}
| [
"\"RELEASE_VERSION\""
]
| []
| [
"RELEASE_VERSION"
]
| [] | ["RELEASE_VERSION"] | go | 1 | 0 | |
pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/CmdFunctions.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.pulsar.admin.cli;
import static org.apache.commons.lang.StringUtils.isBlank;
import static org.apache.commons.lang.StringUtils.isNotBlank;
import static org.apache.pulsar.common.naming.TopicName.DEFAULT_NAMESPACE;
import static org.apache.pulsar.common.naming.TopicName.PUBLIC_TENANT;
import com.beust.jcommander.Parameter;
import com.beust.jcommander.ParameterException;
import com.beust.jcommander.Parameters;
import com.beust.jcommander.converters.StringConverter;
import com.fasterxml.jackson.core.type.TypeReference;
import com.google.common.annotations.VisibleForTesting;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import com.google.gson.reflect.TypeToken;
import java.io.File;
import java.lang.reflect.Field;
import java.lang.reflect.Type;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.function.Supplier;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang.StringUtils;
import org.apache.pulsar.admin.cli.utils.CmdUtils;
import org.apache.pulsar.client.admin.PulsarAdmin;
import org.apache.pulsar.client.admin.PulsarAdminException;
import org.apache.pulsar.client.api.PulsarClientException;
import org.apache.pulsar.client.api.SubscriptionInitialPosition;
import org.apache.pulsar.common.functions.ConsumerConfig;
import org.apache.pulsar.common.functions.FunctionConfig;
import org.apache.pulsar.common.functions.FunctionState;
import org.apache.pulsar.common.functions.ProducerConfig;
import org.apache.pulsar.common.functions.Resources;
import org.apache.pulsar.common.functions.UpdateOptionsImpl;
import org.apache.pulsar.common.functions.Utils;
import org.apache.pulsar.common.functions.WindowConfig;
import org.apache.pulsar.common.util.ObjectMapperFactory;
@Slf4j
@Parameters(commandDescription = "Interface for managing Pulsar Functions "
+ "(lightweight, Lambda-style compute processes that work with Pulsar)")
public class CmdFunctions extends CmdBase {
private final LocalRunner localRunner;
private final CreateFunction creater;
private final DeleteFunction deleter;
private final UpdateFunction updater;
private final GetFunction getter;
private final GetFunctionStatus functionStatus;
@Getter
private final GetFunctionStats functionStats;
private final RestartFunction restart;
private final StopFunction stop;
private final StartFunction start;
private final ListFunctions lister;
private final StateGetter stateGetter;
private final StatePutter statePutter;
private final TriggerFunction triggerer;
private final UploadFunction uploader;
private final DownloadFunction downloader;
/**
* Base command.
*/
@Getter
abstract class BaseCommand extends CliCommand {
@Override
void run() throws Exception {
try {
processArguments();
} catch (Exception e) {
System.err.println(e.getMessage());
System.err.println();
String chosenCommand = jcommander.getParsedCommand();
getUsageFormatter().usage(chosenCommand);
return;
}
runCmd();
}
void processArguments() throws Exception {}
abstract void runCmd() throws Exception;
}
/**
* Namespace level command.
*/
@Getter
abstract class NamespaceCommand extends BaseCommand {
@Parameter(names = "--tenant", description = "The tenant of a Pulsar Function")
protected String tenant;
@Parameter(names = "--namespace", description = "The namespace of a Pulsar Function")
protected String namespace;
@Override
public void processArguments() {
if (tenant == null) {
tenant = PUBLIC_TENANT;
}
if (namespace == null) {
namespace = DEFAULT_NAMESPACE;
}
}
}
/**
* Function level command.
*/
@Getter
abstract class FunctionCommand extends BaseCommand {
@Parameter(names = "--fqfn", description = "The Fully Qualified Function Name (FQFN) for the function")
protected String fqfn;
@Parameter(names = "--tenant", description = "The tenant of a Pulsar Function")
protected String tenant;
@Parameter(names = "--namespace", description = "The namespace of a Pulsar Function")
protected String namespace;
@Parameter(names = "--name", description = "The name of a Pulsar Function")
protected String functionName;
@Override
void processArguments() throws Exception {
super.processArguments();
boolean usesSetters = (null != tenant || null != namespace || null != functionName);
boolean usesFqfn = (null != fqfn);
// Throw an exception if --fqfn is set alongside any combination of --tenant, --namespace, and --name
if (usesFqfn && usesSetters) {
throw new RuntimeException("You must specify either a Fully Qualified Function Name (FQFN) "
+ "or tenant, namespace, and function name");
} else if (usesFqfn) {
// If the --fqfn flag is used, parse tenant, namespace, and name using that flag
String[] fqfnParts = fqfn.split("/");
if (fqfnParts.length != 3) {
throw new RuntimeException(
"Fully qualified function names (FQFNs) must be of the form tenant/namespace/name");
}
tenant = fqfnParts[0];
namespace = fqfnParts[1];
functionName = fqfnParts[2];
} else {
if (tenant == null) {
tenant = PUBLIC_TENANT;
}
if (namespace == null) {
namespace = DEFAULT_NAMESPACE;
}
if (null == functionName) {
throw new RuntimeException(
"You must specify a name for the function or a Fully Qualified Function Name (FQFN)");
}
}
}
}
/**
* Commands that require a function config.
*/
@Getter
abstract class FunctionDetailsCommand extends BaseCommand {
@Parameter(names = "--fqfn", description = "The Fully Qualified Function Name (FQFN) for the function")
protected String fqfn;
@Parameter(names = "--tenant", description = "The tenant of a Pulsar Function")
protected String tenant;
@Parameter(names = "--namespace", description = "The namespace of a Pulsar Function")
protected String namespace;
@Parameter(names = "--name", description = "The name of a Pulsar Function")
protected String functionName;
// for backwards compatibility purposes
@Parameter(names = "--className", description = "The class name of a Pulsar Function", hidden = true)
protected String deprecatedClassName;
@Parameter(names = "--classname", description = "The class name of a Pulsar Function")
protected String className;
@Parameter(names = { "-t", "--function-type" }, description = "The built-in Pulsar Function type")
protected String functionType;
@Parameter(names = "--jar", description = "Path to the JAR file for the function "
+ "(if the function is written in Java). It also supports URL path [http/https/file "
+ "(file protocol assumes that file already exists on worker host)/function "
+ "(package URL from packages management service)] from which worker can download the package.",
listConverter = StringConverter.class)
protected String jarFile;
@Parameter(names = "--py", description = "Path to the main Python file/Python Wheel file for the function "
+ "(if the function is written in Python). It also supports URL path [http/https/file "
+ "(file protocol assumes that file already exists on worker host)/function "
+ "(package URL from packages management service)] from which worker can download the package.",
listConverter = StringConverter.class)
protected String pyFile;
@Parameter(names = "--go", description = "Path to the main Go executable binary for the function "
+ "(if the function is written in Go). It also supports URL path [http/https/file "
+ "(file protocol assumes that file already exists on worker host)/function "
+ "(package URL from packages management service)] from which worker can download the package.")
protected String goFile;
@Parameter(names = {"-i", "--inputs"}, description = "The input topic or "
+ "topics (multiple topics can be specified as a comma-separated list) of a Pulsar Function")
protected String inputs;
// for backwards compatibility purposes
@Parameter(names = "--topicsPattern", description = "TopicsPattern to consume from list of topics "
+ "under a namespace that match the pattern. [--input] and [--topic-pattern] are mutually exclusive. "
+ "Add SerDe class name for a pattern in --custom-serde-inputs (supported for java fun only)",
hidden = true)
protected String deprecatedTopicsPattern;
@Parameter(names = "--topics-pattern", description = "The topic pattern to consume from list of topics "
+ "under a namespace that match the pattern. [--input] and [--topic-pattern] are mutually exclusive. "
+ "Add SerDe class name for a pattern in --custom-serde-inputs (supported for java fun only)")
protected String topicsPattern;
@Parameter(names = {"-o", "--output"},
description = "The output topic of a Pulsar Function (If none is specified, no output is written)")
protected String output;
@Parameter(names = "--producer-config", description = "The custom producer configuration (as a JSON string)")
protected String producerConfig;
// for backwards compatibility purposes
@Parameter(names = "--logTopic",
description = "The topic to which the logs of a Pulsar Function are produced", hidden = true)
protected String deprecatedLogTopic;
@Parameter(names = "--log-topic", description = "The topic to which the logs of a Pulsar Function are produced")
protected String logTopic;
@Parameter(names = {"-st", "--schema-type"}, description = "The builtin schema type or "
+ "custom schema class name to be used for messages output by the function")
protected String schemaType = "";
// for backwards compatibility purposes
@Parameter(names = "--customSerdeInputs",
description = "The map of input topics to SerDe class names (as a JSON string)", hidden = true)
protected String deprecatedCustomSerdeInputString;
@Parameter(names = "--custom-serde-inputs",
description = "The map of input topics to SerDe class names (as a JSON string)")
protected String customSerdeInputString;
@Parameter(names = "--custom-schema-inputs",
description = "The map of input topics to Schema properties (as a JSON string)")
protected String customSchemaInputString;
@Parameter(names = "--custom-schema-outputs",
description = "The map of input topics to Schema properties (as a JSON string)")
protected String customSchemaOutputString;
@Parameter(names = "--input-specs",
description = "The map of inputs to custom configuration (as a JSON string)")
protected String inputSpecs;
// for backwards compatibility purposes
@Parameter(names = "--outputSerdeClassName",
description = "The SerDe class to be used for messages output by the function", hidden = true)
protected String deprecatedOutputSerdeClassName;
@Parameter(names = "--output-serde-classname",
description = "The SerDe class to be used for messages output by the function")
protected String outputSerdeClassName;
// for backwards compatibility purposes
@Parameter(names = "--functionConfigFile", description = "The path to a YAML config file that specifies "
+ "the configuration of a Pulsar Function", hidden = true)
protected String deprecatedFnConfigFile;
@Parameter(names = "--function-config-file",
description = "The path to a YAML config file that specifies the configuration of a Pulsar Function")
protected String fnConfigFile;
// for backwards compatibility purposes
@Parameter(names = "--processingGuarantees", description = "The processing guarantees (aka delivery semantics) "
+ "applied to the function", hidden = true)
protected FunctionConfig.ProcessingGuarantees deprecatedProcessingGuarantees;
@Parameter(names = "--processing-guarantees",
description = "The processing guarantees (aka delivery semantics) applied to the function")
protected FunctionConfig.ProcessingGuarantees processingGuarantees;
// for backwards compatibility purposes
@Parameter(names = "--userConfig", description = "User-defined config key/values", hidden = true)
protected String deprecatedUserConfigString;
@Parameter(names = "--user-config", description = "User-defined config key/values")
protected String userConfigString;
@Parameter(names = "--retainOrdering",
description = "Function consumes and processes messages in order", hidden = true)
protected Boolean deprecatedRetainOrdering;
@Parameter(names = "--retain-ordering", description = "Function consumes and processes messages in order")
protected Boolean retainOrdering;
@Parameter(names = "--retain-key-ordering",
description = "Function consumes and processes messages in key order")
protected Boolean retainKeyOrdering;
@Parameter(names = "--batch-builder", description = "BatcherBuilder provides two types of "
+ "batch construction methods, DEFAULT and KEY_BASED. The default value is: DEFAULT")
protected String batchBuilder;
@Parameter(names = "--forward-source-message-property", description = "Forwarding input message's properties "
+ "to output topic when processing (use false to disable it)", arity = 1)
protected Boolean forwardSourceMessageProperty = true;
@Parameter(names = "--subs-name", description = "Pulsar source subscription name if user wants a specific "
+ "subscription-name for input-topic consumer")
protected String subsName;
@Parameter(names = "--subs-position", description = "Pulsar source subscription position if user wants to "
+ "consume messages from the specified location")
protected SubscriptionInitialPosition subsPosition;
@Parameter(names = "--parallelism", description = "The parallelism factor of a Pulsar Function "
+ "(i.e. the number of function instances to run)")
protected Integer parallelism;
@Parameter(names = "--cpu", description = "The cpu in cores that need to be allocated "
+ "per function instance(applicable only to docker runtime)")
protected Double cpu;
@Parameter(names = "--ram", description = "The ram in bytes that need to be allocated "
+ "per function instance(applicable only to process/docker runtime)")
protected Long ram;
@Parameter(names = "--disk", description = "The disk in bytes that need to be allocated "
+ "per function instance(applicable only to docker runtime)")
protected Long disk;
// for backwards compatibility purposes
@Parameter(names = "--windowLengthCount", description = "The number of messages per window", hidden = true)
protected Integer deprecatedWindowLengthCount;
@Parameter(names = "--window-length-count", description = "The number of messages per window")
protected Integer windowLengthCount;
// for backwards compatibility purposes
@Parameter(names = "--windowLengthDurationMs",
description = "The time duration of the window in milliseconds", hidden = true)
protected Long deprecatedWindowLengthDurationMs;
@Parameter(names = "--window-length-duration-ms",
description = "The time duration of the window in milliseconds")
protected Long windowLengthDurationMs;
// for backwards compatibility purposes
@Parameter(names = "--slidingIntervalCount",
description = "The number of messages after which the window slides", hidden = true)
protected Integer deprecatedSlidingIntervalCount;
@Parameter(names = "--sliding-interval-count",
description = "The number of messages after which the window slides")
protected Integer slidingIntervalCount;
// for backwards compatibility purposes
@Parameter(names = "--slidingIntervalDurationMs",
description = "The time duration after which the window slides", hidden = true)
protected Long deprecatedSlidingIntervalDurationMs;
@Parameter(names = "--sliding-interval-duration-ms",
description = "The time duration after which the window slides")
protected Long slidingIntervalDurationMs;
// for backwards compatibility purposes
@Parameter(names = "--autoAck",
description = "Whether or not the framework acknowledges messages automatically", hidden = true)
protected Boolean deprecatedAutoAck = null;
@Parameter(names = "--auto-ack",
description = "Whether or not the framework acknowledges messages automatically", arity = 1)
protected Boolean autoAck;
// for backwards compatibility purposes
@Parameter(names = "--timeoutMs", description = "The message timeout in milliseconds", hidden = true)
protected Long deprecatedTimeoutMs;
@Parameter(names = "--timeout-ms", description = "The message timeout in milliseconds")
protected Long timeoutMs;
@Parameter(names = "--max-message-retries",
description = "How many times should we try to process a message before giving up")
protected Integer maxMessageRetries;
@Parameter(names = "--custom-runtime-options", description = "A string that encodes options to "
+ "customize the runtime, see docs for configured runtime for details")
protected String customRuntimeOptions;
@Parameter(names = "--secrets", description = "The map of secretName to an object that encapsulates "
+ "how the secret is fetched by the underlying secrets provider")
protected String secretsString;
@Parameter(names = "--dead-letter-topic",
description = "The topic where messages that are not processed successfully are sent to")
protected String deadLetterTopic;
protected FunctionConfig functionConfig;
protected String userCodeFile;
private void mergeArgs() {
if (isBlank(className) && !isBlank(deprecatedClassName)) {
className = deprecatedClassName;
}
if (isBlank(topicsPattern) && !isBlank(deprecatedTopicsPattern)) {
topicsPattern = deprecatedTopicsPattern;
}
if (isBlank(logTopic) && !isBlank(deprecatedLogTopic)) {
logTopic = deprecatedLogTopic;
}
if (isBlank(outputSerdeClassName) && !isBlank(deprecatedOutputSerdeClassName)) {
outputSerdeClassName = deprecatedOutputSerdeClassName;
}
if (isBlank(customSerdeInputString) && !isBlank(deprecatedCustomSerdeInputString)) {
customSerdeInputString = deprecatedCustomSerdeInputString;
}
if (isBlank(fnConfigFile) && !isBlank(deprecatedFnConfigFile)) {
fnConfigFile = deprecatedFnConfigFile;
}
if (processingGuarantees == null && deprecatedProcessingGuarantees != null) {
processingGuarantees = deprecatedProcessingGuarantees;
}
if (isBlank(userConfigString) && !isBlank(deprecatedUserConfigString)) {
userConfigString = deprecatedUserConfigString;
}
if (retainOrdering == null && deprecatedRetainOrdering != null) {
retainOrdering = deprecatedRetainOrdering;
}
if (windowLengthCount == null && deprecatedWindowLengthCount != null) {
windowLengthCount = deprecatedWindowLengthCount;
}
if (windowLengthDurationMs == null && deprecatedWindowLengthDurationMs != null) {
windowLengthDurationMs = deprecatedWindowLengthDurationMs;
}
if (slidingIntervalCount == null && deprecatedSlidingIntervalCount != null) {
slidingIntervalCount = deprecatedSlidingIntervalCount;
}
if (slidingIntervalDurationMs == null && deprecatedSlidingIntervalDurationMs != null) {
slidingIntervalDurationMs = deprecatedSlidingIntervalDurationMs;
}
if (autoAck == null && deprecatedAutoAck != null) {
autoAck = deprecatedAutoAck;
}
if (timeoutMs == null && deprecatedTimeoutMs != null) {
timeoutMs = deprecatedTimeoutMs;
}
}
@Override
void processArguments() throws Exception {
super.processArguments();
// merge deprecated args with new args
mergeArgs();
// Initialize config builder either from a supplied YAML config file or from scratch
if (null != fnConfigFile) {
functionConfig = CmdUtils.loadConfig(fnConfigFile, FunctionConfig.class);
} else {
functionConfig = new FunctionConfig();
}
if (null != fqfn) {
parseFullyQualifiedFunctionName(fqfn, functionConfig);
} else {
if (null != tenant) {
functionConfig.setTenant(tenant);
}
if (null != namespace) {
functionConfig.setNamespace(namespace);
}
if (null != functionName) {
functionConfig.setName(functionName);
}
}
if (null != inputs) {
List<String> inputTopics = Arrays.asList(inputs.split(","));
functionConfig.setInputs(inputTopics);
}
if (null != customSerdeInputString) {
Type type = new TypeToken<Map<String, String>>() {}.getType();
Map<String, String> customSerdeInputMap = new Gson().fromJson(customSerdeInputString, type);
functionConfig.setCustomSerdeInputs(customSerdeInputMap);
}
if (null != customSchemaInputString) {
Type type = new TypeToken<Map<String, String>>() {}.getType();
Map<String, String> customschemaInputMap = new Gson().fromJson(customSchemaInputString, type);
functionConfig.setCustomSchemaInputs(customschemaInputMap);
}
if (null != customSchemaOutputString) {
Type type = new TypeToken<Map<String, String>>() {}.getType();
Map<String, String> customSchemaOutputMap = new Gson().fromJson(customSchemaOutputString, type);
functionConfig.setCustomSchemaOutputs(customSchemaOutputMap);
}
if (null != inputSpecs) {
Type type = new TypeToken<Map<String, ConsumerConfig>>() {}.getType();
functionConfig.setInputSpecs(new Gson().fromJson(inputSpecs, type));
}
if (null != topicsPattern) {
functionConfig.setTopicsPattern(topicsPattern);
}
if (null != output) {
functionConfig.setOutput(output);
}
if (null != producerConfig) {
Type type = new TypeToken<ProducerConfig>() {}.getType();
functionConfig.setProducerConfig(new Gson().fromJson(producerConfig, type));
}
if (null != logTopic) {
functionConfig.setLogTopic(logTopic);
}
if (null != className) {
functionConfig.setClassName(className);
}
if (null != outputSerdeClassName) {
functionConfig.setOutputSerdeClassName(outputSerdeClassName);
}
if (null != schemaType) {
functionConfig.setOutputSchemaType(schemaType);
}
if (null != processingGuarantees) {
functionConfig.setProcessingGuarantees(processingGuarantees);
}
if (null != retainOrdering) {
functionConfig.setRetainOrdering(retainOrdering);
}
if (null != retainKeyOrdering) {
functionConfig.setRetainKeyOrdering(retainKeyOrdering);
}
if (isNotBlank(batchBuilder)) {
functionConfig.setBatchBuilder(batchBuilder);
}
if (null != forwardSourceMessageProperty) {
functionConfig.setForwardSourceMessageProperty(forwardSourceMessageProperty);
}
if (isNotBlank(subsName)) {
functionConfig.setSubName(subsName);
}
if (null != subsPosition) {
functionConfig.setSubscriptionPosition(subsPosition);
}
if (null != userConfigString) {
Type type = new TypeToken<Map<String, Object>>() {}.getType();
Map<String, Object> userConfigMap = new Gson().fromJson(userConfigString, type);
if (userConfigMap == null) {
userConfigMap = new HashMap<>();
}
functionConfig.setUserConfig(userConfigMap);
}
if (parallelism != null) {
functionConfig.setParallelism(parallelism);
}
Resources resources = functionConfig.getResources();
if (cpu != null) {
if (resources == null) {
resources = new Resources();
}
resources.setCpu(cpu);
}
if (ram != null) {
if (resources == null) {
resources = new Resources();
}
resources.setRam(ram);
}
if (disk != null) {
if (resources == null) {
resources = new Resources();
}
resources.setDisk(disk);
}
if (resources != null) {
functionConfig.setResources(resources);
}
if (timeoutMs != null) {
functionConfig.setTimeoutMs(timeoutMs);
}
if (customRuntimeOptions != null) {
functionConfig.setCustomRuntimeOptions(customRuntimeOptions);
}
if (secretsString != null) {
Type type = new TypeToken<Map<String, Object>>() {}.getType();
Map<String, Object> secretsMap = new Gson().fromJson(secretsString, type);
if (secretsMap == null) {
secretsMap = Collections.emptyMap();
}
functionConfig.setSecrets(secretsMap);
}
// window configs
WindowConfig windowConfig = functionConfig.getWindowConfig();
if (null != windowLengthCount) {
if (windowConfig == null) {
windowConfig = new WindowConfig();
}
windowConfig.setWindowLengthCount(windowLengthCount);
}
if (null != windowLengthDurationMs) {
if (windowConfig == null) {
windowConfig = new WindowConfig();
}
windowConfig.setWindowLengthDurationMs(windowLengthDurationMs);
}
if (null != slidingIntervalCount) {
if (windowConfig == null) {
windowConfig = new WindowConfig();
}
windowConfig.setSlidingIntervalCount(slidingIntervalCount);
}
if (null != slidingIntervalDurationMs) {
if (windowConfig == null) {
windowConfig = new WindowConfig();
}
windowConfig.setSlidingIntervalDurationMs(slidingIntervalDurationMs);
}
functionConfig.setWindowConfig(windowConfig);
if (autoAck != null) {
functionConfig.setAutoAck(autoAck);
}
if (null != maxMessageRetries) {
functionConfig.setMaxMessageRetries(maxMessageRetries);
}
if (null != deadLetterTopic) {
functionConfig.setDeadLetterTopic(deadLetterTopic);
}
if (jarFile != null && functionType != null) {
throw new ParameterException("Cannot specify both jar and function-type");
}
if (null != jarFile) {
functionConfig.setJar(jarFile);
}
if (functionType != null) {
functionConfig.setJar("builtin://" + functionType);
} else if (functionConfig.getFunctionType() != null) {
functionConfig.setJar("builtin://" + functionConfig.getFunctionType());
}
if (null != pyFile) {
functionConfig.setPy(pyFile);
}
if (null != goFile) {
functionConfig.setGo(goFile);
}
if (functionConfig.getJar() != null) {
userCodeFile = functionConfig.getJar();
} else if (functionConfig.getPy() != null) {
userCodeFile = functionConfig.getPy();
} else if (functionConfig.getGo() != null) {
userCodeFile = functionConfig.getGo();
}
// check if configs are valid
validateFunctionConfigs(functionConfig);
}
protected void validateFunctionConfigs(FunctionConfig functionConfig) {
// go doesn't need className
if (functionConfig.getPy() != null
|| (functionConfig.getJar() != null && !functionConfig.getJar().startsWith("builtin://"))) {
if (StringUtils.isEmpty(functionConfig.getClassName())) {
throw new ParameterException("No Function Classname specified");
}
}
if (StringUtils.isEmpty(functionConfig.getName())) {
org.apache.pulsar.common.functions.Utils.inferMissingFunctionName(functionConfig);
}
if (StringUtils.isEmpty(functionConfig.getName())) {
throw new IllegalArgumentException("No Function name specified");
}
if (StringUtils.isEmpty(functionConfig.getTenant())) {
org.apache.pulsar.common.functions.Utils.inferMissingTenant(functionConfig);
}
if (StringUtils.isEmpty(functionConfig.getNamespace())) {
org.apache.pulsar.common.functions.Utils.inferMissingNamespace(functionConfig);
}
if (isNotBlank(functionConfig.getJar()) && isNotBlank(functionConfig.getPy())
&& isNotBlank(functionConfig.getGo())) {
throw new ParameterException("Either a Java jar or a Python file or a Go executable binary needs to"
+ " be specified for the function. Cannot specify both.");
}
if (isBlank(functionConfig.getJar()) && isBlank(functionConfig.getPy())
&& isBlank(functionConfig.getGo())) {
throw new ParameterException("Either a Java jar or a Python file or a Go executable binary needs to"
+ " be specified for the function. Please specify one.");
}
if (!isBlank(functionConfig.getJar()) && !functionConfig.getJar().startsWith("builtin://")
&& !Utils.isFunctionPackageUrlSupported(functionConfig.getJar())
&& !new File(functionConfig.getJar()).exists()) {
throw new ParameterException("The specified jar file does not exist");
}
if (!isBlank(functionConfig.getPy()) && !Utils.isFunctionPackageUrlSupported(functionConfig.getPy())
&& !new File(functionConfig.getPy()).exists()) {
throw new ParameterException("The specified python file does not exist");
}
if (!isBlank(functionConfig.getGo()) && !Utils.isFunctionPackageUrlSupported(functionConfig.getGo())
&& !new File(functionConfig.getGo()).exists()) {
throw new ParameterException("The specified go executable binary does not exist");
}
}
}
@Parameters(commandDescription = "Run a Pulsar Function locally, rather than deploy to a Pulsar cluster)")
class LocalRunner extends FunctionDetailsCommand {
// TODO: this should become BookKeeper URL and it should be fetched from Pulsar client.
// for backwards compatibility purposes
@Parameter(names = "--stateStorageServiceUrl", description = "The URL for the state storage service "
+ "(the default is Apache BookKeeper)", hidden = true)
protected String deprecatedStateStorageServiceUrl;
@Parameter(names = "--state-storage-service-url", description = "The URL for the state storage service "
+ "(the default is Apache BookKeeper)")
protected String stateStorageServiceUrl;
// for backwards compatibility purposes
@Parameter(names = "--brokerServiceUrl", description = "The URL for Pulsar broker", hidden = true)
protected String deprecatedBrokerServiceUrl;
@Parameter(names = "--broker-service-url", description = "The URL for Pulsar broker")
protected String brokerServiceUrl;
@Parameter(names = "--web-service-url", description = "The URL for Pulsar web service")
protected String webServiceUrl = null;
// for backwards compatibility purposes
@Parameter(names = "--clientAuthPlugin", description = "Client authentication plugin using "
+ "which function-process can connect to broker", hidden = true)
protected String deprecatedClientAuthPlugin;
@Parameter(names = "--client-auth-plugin",
description = "Client authentication plugin using which function-process can connect to broker")
protected String clientAuthPlugin;
// for backwards compatibility purposes
@Parameter(names = "--clientAuthParams", description = "Client authentication param", hidden = true)
protected String deprecatedClientAuthParams;
@Parameter(names = "--client-auth-params", description = "Client authentication param")
protected String clientAuthParams;
// for backwards compatibility purposes
@Parameter(names = "--use_tls", description = "Use tls connection", hidden = true)
protected Boolean deprecatedUseTls = null;
@Parameter(names = "--use-tls", description = "Use tls connection")
protected boolean useTls;
// for backwards compatibility purposes
@Parameter(names = "--tls_allow_insecure", description = "Allow insecure tls connection", hidden = true)
protected Boolean deprecatedTlsAllowInsecureConnection = null;
@Parameter(names = "--tls-allow-insecure", description = "Allow insecure tls connection")
protected boolean tlsAllowInsecureConnection;
// for backwards compatibility purposes
@Parameter(names = "--hostname_verification_enabled",
description = "Enable hostname verification", hidden = true)
protected Boolean deprecatedTlsHostNameVerificationEnabled = null;
@Parameter(names = "--hostname-verification-enabled", description = "Enable hostname verification")
protected boolean tlsHostNameVerificationEnabled;
// for backwards compatibility purposes
@Parameter(names = "--tls_trust_cert_path", description = "tls trust cert file path", hidden = true)
protected String deprecatedTlsTrustCertFilePath;
@Parameter(names = "--tls-trust-cert-path", description = "tls trust cert file path")
protected String tlsTrustCertFilePath;
// for backwards compatibility purposes
@Parameter(names = "--instanceIdOffset", description = "Start the instanceIds from this offset", hidden = true)
protected Integer deprecatedInstanceIdOffset = null;
@Parameter(names = "--instance-id-offset", description = "Start the instanceIds from this offset")
protected Integer instanceIdOffset = 0;
@Parameter(names = "--runtime", description = "either THREAD or PROCESS. Only applies for Java functions")
protected String runtime;
@Parameter(names = "--secrets-provider-classname", description = "Whats the classname for secrets provider")
protected String secretsProviderClassName;
@Parameter(names = "--secrets-provider-config",
description = "Config that needs to be passed to secrets provider")
protected String secretsProviderConfig;
@Parameter(names = "--metrics-port-start", description = "The starting port range for metrics server")
protected String metricsPortStart;
private void mergeArgs() {
if (isBlank(stateStorageServiceUrl) && !isBlank(deprecatedStateStorageServiceUrl)) {
stateStorageServiceUrl = deprecatedStateStorageServiceUrl;
}
if (isBlank(brokerServiceUrl) && !isBlank(deprecatedBrokerServiceUrl)) {
brokerServiceUrl = deprecatedBrokerServiceUrl;
}
if (isBlank(clientAuthPlugin) && !isBlank(deprecatedClientAuthPlugin)) {
clientAuthPlugin = deprecatedClientAuthPlugin;
}
if (isBlank(clientAuthParams) && !isBlank(deprecatedClientAuthParams)) {
clientAuthParams = deprecatedClientAuthParams;
}
if (!useTls && deprecatedUseTls != null) {
useTls = deprecatedUseTls;
}
if (!tlsAllowInsecureConnection && deprecatedTlsAllowInsecureConnection != null) {
tlsAllowInsecureConnection = deprecatedTlsAllowInsecureConnection;
}
if (!tlsHostNameVerificationEnabled && deprecatedTlsHostNameVerificationEnabled != null) {
tlsHostNameVerificationEnabled = deprecatedTlsHostNameVerificationEnabled;
}
if (isBlank(tlsTrustCertFilePath) && !isBlank(deprecatedTlsTrustCertFilePath)) {
tlsTrustCertFilePath = deprecatedTlsTrustCertFilePath;
}
if (instanceIdOffset == null && deprecatedInstanceIdOffset != null) {
instanceIdOffset = deprecatedInstanceIdOffset;
}
}
@Override
void runCmd() throws Exception {
// merge deprecated args with new args
mergeArgs();
List<String> localRunArgs = new LinkedList<>();
localRunArgs.add(System.getenv("PULSAR_HOME") + "/bin/function-localrunner");
localRunArgs.add("--functionConfig");
localRunArgs.add(new Gson().toJson(functionConfig));
for (Field field : this.getClass().getDeclaredFields()) {
if (field.getName().startsWith("DEPRECATED")) {
continue;
}
if (field.getName().contains("$")) {
continue;
}
Object value = field.get(this);
if (value != null) {
localRunArgs.add("--" + field.getName());
localRunArgs.add(value.toString());
}
}
ProcessBuilder processBuilder = new ProcessBuilder(localRunArgs).inheritIO();
Process process = processBuilder.start();
process.waitFor();
}
}
@Parameters(commandDescription = "Create a Pulsar Function in cluster mode (deploy it on a Pulsar cluster)")
class CreateFunction extends FunctionDetailsCommand {
@Override
void runCmd() throws Exception {
if (Utils.isFunctionPackageUrlSupported(functionConfig.getJar())) {
getAdmin().functions().createFunctionWithUrl(functionConfig, functionConfig.getJar());
} else if (Utils.isFunctionPackageUrlSupported(functionConfig.getPy())) {
getAdmin().functions().createFunctionWithUrl(functionConfig, functionConfig.getPy());
} else if (Utils.isFunctionPackageUrlSupported(functionConfig.getGo())) {
getAdmin().functions().createFunctionWithUrl(functionConfig, functionConfig.getGo());
} else {
getAdmin().functions().createFunction(functionConfig, userCodeFile);
}
print("Created successfully");
}
}
@Parameters(commandDescription = "Fetch information about a Pulsar Function")
class GetFunction extends FunctionCommand {
@Override
void runCmd() throws Exception {
FunctionConfig functionConfig = getAdmin().functions().getFunction(tenant, namespace, functionName);
Gson gson = new GsonBuilder().setPrettyPrinting().create();
System.out.println(gson.toJson(functionConfig));
}
}
@Parameters(commandDescription = "Check the current status of a Pulsar Function")
class GetFunctionStatus extends FunctionCommand {
@Parameter(names = "--instance-id", description = "The function instanceId "
+ "(Get-status of all instances if instance-id is not provided)")
protected String instanceId;
@Override
void runCmd() throws Exception {
if (isBlank(instanceId)) {
print(getAdmin().functions().getFunctionStatus(tenant, namespace, functionName));
} else {
print(getAdmin().functions()
.getFunctionStatus(tenant, namespace, functionName, Integer.parseInt(instanceId)));
}
}
}
@Parameters(commandDescription = "Get the current stats of a Pulsar Function")
class GetFunctionStats extends FunctionCommand {
@Parameter(names = "--instance-id", description = "The function instanceId "
+ "(Get-stats of all instances if instance-id is not provided)")
protected String instanceId;
@Override
void runCmd() throws Exception {
if (isBlank(instanceId)) {
print(getAdmin().functions().getFunctionStats(tenant, namespace, functionName));
} else {
print(getAdmin().functions()
.getFunctionStats(tenant, namespace, functionName, Integer.parseInt(instanceId)));
}
}
}
@Parameters(commandDescription = "Restart function instance")
class RestartFunction extends FunctionCommand {
@Parameter(names = "--instance-id", description = "The function instanceId "
+ "(restart all instances if instance-id is not provided)")
protected String instanceId;
@Override
void runCmd() throws Exception {
if (isNotBlank(instanceId)) {
try {
getAdmin().functions()
.restartFunction(tenant, namespace, functionName, Integer.parseInt(instanceId));
} catch (NumberFormatException e) {
System.err.println("instance-id must be a number");
}
} else {
getAdmin().functions().restartFunction(tenant, namespace, functionName);
}
System.out.println("Restarted successfully");
}
}
@Parameters(commandDescription = "Stops function instance")
class StopFunction extends FunctionCommand {
@Parameter(names = "--instance-id", description = "The function instanceId "
+ "(stop all instances if instance-id is not provided)")
protected String instanceId;
@Override
void runCmd() throws Exception {
if (isNotBlank(instanceId)) {
try {
getAdmin().functions().stopFunction(tenant, namespace, functionName, Integer.parseInt(instanceId));
} catch (NumberFormatException e) {
System.err.println("instance-id must be a number");
}
} else {
getAdmin().functions().stopFunction(tenant, namespace, functionName);
}
System.out.println("Stopped successfully");
}
}
@Parameters(commandDescription = "Starts a stopped function instance")
class StartFunction extends FunctionCommand {
@Parameter(names = "--instance-id", description = "The function instanceId "
+ "(start all instances if instance-id is not provided)")
protected String instanceId;
@Override
void runCmd() throws Exception {
if (isNotBlank(instanceId)) {
try {
getAdmin().functions().startFunction(tenant, namespace, functionName, Integer.parseInt(instanceId));
} catch (NumberFormatException e) {
System.err.println("instance-id must be a number");
}
} else {
getAdmin().functions().startFunction(tenant, namespace, functionName);
}
System.out.println("Started successfully");
}
}
@Parameters(commandDescription = "Delete a Pulsar Function that is running on a Pulsar cluster")
class DeleteFunction extends FunctionCommand {
@Override
void runCmd() throws Exception {
getAdmin().functions().deleteFunction(tenant, namespace, functionName);
print("Deleted successfully");
}
}
@Parameters(commandDescription = "Update a Pulsar Function that has been deployed to a Pulsar cluster")
class UpdateFunction extends FunctionDetailsCommand {
@Parameter(names = "--update-auth-data", description = "Whether or not to update the auth data")
protected boolean updateAuthData;
@Override
protected void validateFunctionConfigs(FunctionConfig functionConfig) {
if (StringUtils.isEmpty(functionConfig.getName())) {
org.apache.pulsar.common.functions.Utils.inferMissingFunctionName(functionConfig);
}
if (StringUtils.isEmpty(functionConfig.getName())) {
throw new ParameterException("Function Name not provided");
}
if (StringUtils.isEmpty(functionConfig.getTenant())) {
org.apache.pulsar.common.functions.Utils.inferMissingTenant(functionConfig);
}
if (StringUtils.isEmpty(functionConfig.getNamespace())) {
org.apache.pulsar.common.functions.Utils.inferMissingNamespace(functionConfig);
}
}
@Override
void runCmd() throws Exception {
UpdateOptionsImpl updateOptions = new UpdateOptionsImpl();
updateOptions.setUpdateAuthData(updateAuthData);
if (Utils.isFunctionPackageUrlSupported(functionConfig.getJar())) {
getAdmin().functions().updateFunctionWithUrl(functionConfig, functionConfig.getJar(), updateOptions);
} else {
getAdmin().functions().updateFunction(functionConfig, userCodeFile, updateOptions);
}
print("Updated successfully");
}
}
@Parameters(commandDescription = "List all Pulsar Functions running under a specific tenant and namespace")
class ListFunctions extends NamespaceCommand {
@Override
void runCmd() throws Exception {
print(getAdmin().functions().getFunctions(tenant, namespace));
}
}
@Parameters(commandDescription = "Fetch the current state associated with a Pulsar Function")
class StateGetter extends FunctionCommand {
@Parameter(names = { "-k", "--key" }, description = "Key name of State")
private String key = null;
@Parameter(names = { "-w", "--watch" }, description = "Watch for changes in the value associated with a key "
+ "for a Pulsar Function")
private boolean watch = false;
@Override
void runCmd() throws Exception {
if (isBlank(key)) {
throw new ParameterException("State key needs to be specified");
}
do {
try {
FunctionState functionState = getAdmin().functions()
.getFunctionState(tenant, namespace, functionName, key);
Gson gson = new GsonBuilder().setPrettyPrinting().create();
System.out.println(gson.toJson(functionState));
} catch (PulsarAdminException pae) {
if (pae.getStatusCode() == 404 && watch) {
System.err.println(pae.getMessage());
} else {
throw pae;
}
}
if (watch) {
Thread.sleep(1000);
}
} while (watch);
}
}
@Parameters(commandDescription = "Put the state associated with a Pulsar Function")
class StatePutter extends FunctionCommand {
@Parameter(names = { "-s", "--state" }, description = "The FunctionState that needs to be put", required = true)
private String state = null;
@Override
void runCmd() throws Exception {
TypeReference<FunctionState> typeRef = new TypeReference<FunctionState>() {};
FunctionState stateRepr = ObjectMapperFactory.getThreadLocal().readValue(state, typeRef);
getAdmin().functions()
.putFunctionState(tenant, namespace, functionName, stateRepr);
}
}
@Parameters(commandDescription = "Trigger the specified Pulsar Function with a supplied value")
class TriggerFunction extends FunctionCommand {
// for backward compatibility purposes
@Parameter(names = "--triggerValue",
description = "The value with which you want to trigger the function", hidden = true)
protected String deprecatedTriggerValue;
@Parameter(names = "--trigger-value", description = "The value with which you want to trigger the function")
protected String triggerValue;
// for backward compatibility purposes
@Parameter(names = "--triggerFile", description = "The path to the file that contains the data with which "
+ "you want to trigger the function", hidden = true)
protected String deprecatedTriggerFile;
@Parameter(names = "--trigger-file", description = "The path to the file that contains the data with which "
+ "you want to trigger the function")
protected String triggerFile;
@Parameter(names = "--topic", description = "The specific topic name that the function consumes from that"
+ " you want to inject the data to")
protected String topic;
public void mergeArgs() {
if (isBlank(triggerValue) && !isBlank(deprecatedTriggerValue)) {
triggerValue = deprecatedTriggerValue;
}
if (isBlank(triggerFile) && !isBlank(deprecatedTriggerFile)) {
triggerFile = deprecatedTriggerFile;
}
}
@Override
void runCmd() throws Exception {
// merge deprecated args with new args
mergeArgs();
if (triggerFile == null && triggerValue == null) {
throw new ParameterException("Either a trigger value or a trigger filepath needs to be specified");
}
String retval = getAdmin().functions()
.triggerFunction(tenant, namespace, functionName, topic, triggerValue, triggerFile);
System.out.println(retval);
}
}
@Parameters(commandDescription = "Upload File Data to Pulsar", hidden = true)
class UploadFunction extends BaseCommand {
// for backward compatibility purposes
@Parameter(
names = "--sourceFile",
description = "The file whose contents need to be uploaded",
listConverter = StringConverter.class, hidden = true)
protected String deprecatedSourceFile;
@Parameter(
names = "--source-file",
description = "The file whose contents need to be uploaded",
listConverter = StringConverter.class)
protected String sourceFile;
@Parameter(
names = "--path",
description = "Path or functionPkgUrl where the contents need to be stored",
listConverter = StringConverter.class, required = true)
protected String path;
private void mergeArgs() {
if (isBlank(sourceFile) && !isBlank(deprecatedSourceFile)) {
sourceFile = deprecatedSourceFile;
}
}
@Override
void runCmd() throws Exception {
// merge deprecated args with new args
mergeArgs();
if (StringUtils.isBlank(sourceFile)) {
throw new ParameterException("--source-file needs to be specified");
}
getAdmin().functions().uploadFunction(sourceFile, path);
print("Uploaded successfully");
}
}
@Parameters(commandDescription = "Download File Data from Pulsar", hidden = true)
class DownloadFunction extends FunctionCommand {
// for backward compatibility purposes
@Parameter(
names = "--destinationFile",
description = "The file to store downloaded content",
listConverter = StringConverter.class, hidden = true)
protected String deprecatedDestinationFile;
@Parameter(
names = "--destination-file",
description = "The file to store downloaded content",
listConverter = StringConverter.class)
protected String destinationFile;
@Parameter(
names = "--path",
description = "Path or functionPkgUrl to store the content",
listConverter = StringConverter.class, required = false, hidden = true)
protected String path;
private void mergeArgs() {
if (isBlank(destinationFile) && !isBlank(deprecatedDestinationFile)) {
destinationFile = deprecatedDestinationFile;
}
}
@Override
void processArguments() throws Exception {
if (path == null) {
super.processArguments();
}
}
@Override
void runCmd() throws Exception {
// merge deprecated args with new args
mergeArgs();
if (StringUtils.isBlank(destinationFile)) {
throw new ParameterException("--destination-file needs to be specified");
}
if (path != null) {
getAdmin().functions().downloadFunction(destinationFile, path);
} else {
getAdmin().functions().downloadFunction(destinationFile, tenant, namespace, functionName);
}
print("Downloaded successfully");
}
}
public CmdFunctions(Supplier<PulsarAdmin> admin) throws PulsarClientException {
super("functions", admin);
localRunner = new LocalRunner();
creater = new CreateFunction();
deleter = new DeleteFunction();
updater = new UpdateFunction();
getter = new GetFunction();
functionStatus = new GetFunctionStatus();
functionStats = new GetFunctionStats();
lister = new ListFunctions();
stateGetter = new StateGetter();
statePutter = new StatePutter();
triggerer = new TriggerFunction();
uploader = new UploadFunction();
downloader = new DownloadFunction();
restart = new RestartFunction();
stop = new StopFunction();
start = new StartFunction();
jcommander.addCommand("localrun", getLocalRunner());
jcommander.addCommand("create", getCreater());
jcommander.addCommand("delete", getDeleter());
jcommander.addCommand("update", getUpdater());
jcommander.addCommand("get", getGetter());
jcommander.addCommand("restart", getRestarter());
jcommander.addCommand("stop", getStopper());
jcommander.addCommand("start", getStarter());
// TODO depecreate getstatus
jcommander.addCommand("status", getStatuser(), "getstatus");
jcommander.addCommand("stats", getFunctionStats());
jcommander.addCommand("list", getLister());
jcommander.addCommand("querystate", getStateGetter());
jcommander.addCommand("putstate", getStatePutter());
jcommander.addCommand("trigger", getTriggerer());
jcommander.addCommand("upload", getUploader());
jcommander.addCommand("download", getDownloader());
}
@VisibleForTesting
LocalRunner getLocalRunner() {
return localRunner;
}
@VisibleForTesting
CreateFunction getCreater() {
return creater;
}
@VisibleForTesting
DeleteFunction getDeleter() {
return deleter;
}
@VisibleForTesting
UpdateFunction getUpdater() {
return updater;
}
@VisibleForTesting
GetFunction getGetter() {
return getter;
}
@VisibleForTesting
GetFunctionStatus getStatuser() {
return functionStatus;
}
@VisibleForTesting
ListFunctions getLister() {
return lister;
}
@VisibleForTesting
StatePutter getStatePutter() {
return statePutter;
}
@VisibleForTesting
StateGetter getStateGetter() {
return stateGetter;
}
@VisibleForTesting
TriggerFunction getTriggerer() {
return triggerer;
}
@VisibleForTesting
UploadFunction getUploader() {
return uploader;
}
@VisibleForTesting
DownloadFunction getDownloader() {
return downloader;
}
@VisibleForTesting
RestartFunction getRestarter() {
return restart;
}
@VisibleForTesting
StopFunction getStopper() {
return stop;
}
@VisibleForTesting
StartFunction getStarter() {
return start;
}
private void parseFullyQualifiedFunctionName(String fqfn, FunctionConfig functionConfig) {
String[] args = fqfn.split("/");
if (args.length != 3) {
throw new ParameterException("Fully qualified function names (FQFNs) must "
+ "be of the form tenant/namespace/name");
} else {
functionConfig.setTenant(args[0]);
functionConfig.setNamespace(args[1]);
functionConfig.setName(args[2]);
}
}
}
| [
"\"PULSAR_HOME\""
]
| []
| [
"PULSAR_HOME"
]
| [] | ["PULSAR_HOME"] | java | 1 | 0 | |
test/e2e/network/scale/localrun/ingress_scale.go | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"fmt"
"os"
"path/filepath"
"sort"
"strconv"
"k8s.io/klog"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
gcecloud "k8s.io/legacy-cloud-providers/gce"
"github.com/divinerapier/learn-kubernetes/test/e2e/framework"
"github.com/divinerapier/learn-kubernetes/test/e2e/framework/ingress"
"github.com/divinerapier/learn-kubernetes/test/e2e/framework/providers/gce"
"github.com/divinerapier/learn-kubernetes/test/e2e/network/scale"
)
var (
kubeconfig string
enableTLS bool
numIngressesTest numIngressesSlice
testNamespace string
cloudConfig framework.CloudConfig
outputFile string
cleanup bool
)
type numIngressesSlice []int
func (i *numIngressesSlice) String() string {
return fmt.Sprintf("%d", *i)
}
func (i *numIngressesSlice) Set(value string) error {
v, err := strconv.Atoi(value)
if err != nil {
return err
}
*i = append(*i, v)
sort.Ints(*i)
return nil
}
func registerFlags() {
if home := os.Getenv("HOME"); home != "" {
flag.StringVar(&kubeconfig, "kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) Absolute path to the kubeconfig file")
} else {
flag.StringVar(&kubeconfig, "kubeconfig", "", "Absolute path to the kubeconfig file")
}
flag.StringVar(&cloudConfig.ProjectID, "project", "", "GCE project being used")
flag.StringVar(&cloudConfig.Zone, "zone", "", "GCE zone being used")
flag.StringVar(&cloudConfig.Region, "region", "", "GCE region being used")
flag.Var(&numIngressesTest, "num-ingresses", "The number of ingresses to test, specify multiple times for step testing (e.g. 5 ingresses -> 20 ingresses -> 100 ingresses)")
flag.BoolVar(&enableTLS, "enable-tls", true, "Whether to enable TLS on ingress")
flag.StringVar(&testNamespace, "namespace", "ingress-test-scale", "Namespace for testing")
flag.StringVar(&outputFile, "output", "", "If specify, dump latencies to the specified file")
flag.BoolVar(&cleanup, "cleanup", true, "Whether to cleanup resources after test")
}
func verifyFlags() error {
if cloudConfig.ProjectID == "" || cloudConfig.Zone == "" || cloudConfig.Region == "" {
return fmt.Errorf("must set all of --project, --zone and --region")
}
return nil
}
func main() {
registerFlags()
flag.Parse()
if err := verifyFlags(); err != nil {
klog.Errorf("Failed to verify flags: %v", err)
os.Exit(1)
}
// Initializing a k8s client.
config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
klog.Errorf("Failed to build kubeconfig: %v", err)
os.Exit(1)
}
cs, err := clientset.NewForConfig(config)
if err != nil {
klog.Errorf("Failed to create kubeclient: %v", err)
os.Exit(1)
}
// Initializing a GCE client.
gceAlphaFeatureGate := gcecloud.NewAlphaFeatureGate([]string{})
gceCloud, err := gcecloud.CreateGCECloud(&gcecloud.CloudConfig{
ProjectID: cloudConfig.ProjectID,
Region: cloudConfig.Region,
Zone: cloudConfig.Zone,
AlphaFeatureGate: gceAlphaFeatureGate,
})
if err != nil {
klog.Errorf("Error building GCE provider: %v", err)
os.Exit(1)
}
cloudConfig.Provider = gce.NewProvider(gceCloud)
testSuccessFlag := true
defer func() {
if !testSuccessFlag {
klog.Errorf("Ingress scale test failed.")
os.Exit(1)
}
}()
ns := &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: testNamespace,
},
}
klog.Infof("Creating namespace %s...", ns.Name)
if _, err := cs.CoreV1().Namespaces().Create(ns); err != nil {
klog.Errorf("Failed to create namespace %s: %v", ns.Name, err)
testSuccessFlag = false
return
}
if cleanup {
defer func() {
klog.Infof("Deleting namespace %s...", ns.Name)
if err := cs.CoreV1().Namespaces().Delete(ns.Name, nil); err != nil {
klog.Errorf("Failed to delete namespace %s: %v", ns.Name, err)
testSuccessFlag = false
}
}()
}
// Setting up a localized scale test framework.
f := scale.NewIngressScaleFramework(cs, ns.Name, cloudConfig)
f.Logger = &ingress.GLogger{}
// Customizing scale test.
f.EnableTLS = enableTLS
f.OutputFile = outputFile
if len(numIngressesTest) != 0 {
f.NumIngressesTest = numIngressesTest
}
// Real test begins.
if cleanup {
defer func() {
if errs := f.CleanupScaleTest(); len(errs) != 0 {
klog.Errorf("Failed to cleanup scale test: %v", errs)
testSuccessFlag = false
}
}()
}
err = f.PrepareScaleTest()
if err != nil {
klog.Errorf("Failed to prepare scale test: %v", err)
testSuccessFlag = false
return
}
if errs := f.RunScaleTest(); len(errs) != 0 {
klog.Errorf("Failed while running scale test: %v", errs)
testSuccessFlag = false
}
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
src/core/main.go | // Copyright 2018 Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"encoding/gob"
"fmt"
"github.com/goharbor/harbor/src/migration"
"os"
"os/signal"
"strconv"
"syscall"
"time"
"github.com/astaxie/beego"
_ "github.com/astaxie/beego/session/redis"
"github.com/goharbor/harbor/src/common/dao"
"github.com/goharbor/harbor/src/common/job"
"github.com/goharbor/harbor/src/common/models"
common_quota "github.com/goharbor/harbor/src/common/quota"
"github.com/goharbor/harbor/src/common/utils"
"github.com/goharbor/harbor/src/common/utils/log"
"github.com/goharbor/harbor/src/core/api"
quota "github.com/goharbor/harbor/src/core/api/quota"
_ "github.com/goharbor/harbor/src/core/api/quota/chart"
_ "github.com/goharbor/harbor/src/core/api/quota/registry"
_ "github.com/goharbor/harbor/src/core/auth/authproxy"
_ "github.com/goharbor/harbor/src/core/auth/db"
_ "github.com/goharbor/harbor/src/core/auth/ldap"
_ "github.com/goharbor/harbor/src/core/auth/oidc"
_ "github.com/goharbor/harbor/src/core/auth/uaa"
"github.com/goharbor/harbor/src/core/config"
"github.com/goharbor/harbor/src/core/filter"
"github.com/goharbor/harbor/src/core/middlewares"
"github.com/goharbor/harbor/src/core/service/token"
"github.com/goharbor/harbor/src/pkg/notification"
_ "github.com/goharbor/harbor/src/pkg/notifier/topic"
"github.com/goharbor/harbor/src/pkg/scan"
"github.com/goharbor/harbor/src/pkg/scan/dao/scanner"
"github.com/goharbor/harbor/src/pkg/scan/event"
"github.com/goharbor/harbor/src/pkg/scheduler"
"github.com/goharbor/harbor/src/pkg/types"
"github.com/goharbor/harbor/src/pkg/version"
"github.com/goharbor/harbor/src/replication"
"github.com/goharbor/harbor/src/server"
)
const (
adminUserID = 1
)
func updateInitPassword(userID int, password string) error {
queryUser := models.User{UserID: userID}
user, err := dao.GetUser(queryUser)
if err != nil {
return fmt.Errorf("Failed to get user, userID: %d %v", userID, err)
}
if user == nil {
return fmt.Errorf("user id: %d does not exist", userID)
}
if user.Salt == "" {
salt := utils.GenerateRandomString()
user.Salt = salt
user.Password = password
err = dao.ChangeUserPassword(*user)
if err != nil {
return fmt.Errorf("Failed to update user encrypted password, userID: %d, err: %v", userID, err)
}
log.Infof("User id: %d updated its encrypted password successfully.", userID)
} else {
log.Infof("User id: %d already has its encrypted password.", userID)
}
return nil
}
// Quota migration
func quotaSync() error {
projects, err := dao.GetProjects(nil)
if err != nil {
log.Errorf("list project error, %v", err)
return err
}
var pids []string
for _, project := range projects {
pids = append(pids, strconv.FormatInt(project.ProjectID, 10))
}
usages, err := dao.ListQuotaUsages(&models.QuotaUsageQuery{Reference: "project", ReferenceIDs: pids})
if err != nil {
log.Errorf("list quota usage error, %v", err)
return err
}
// The condition handles these two cases:
// 1, len(project) > 1 && len(usages) == 1. existing projects without usage, as we do always has 'library' usage in DB.
// 2, migration fails at the phase of inserting usage into DB, and parts of them are inserted successfully.
if len(projects) != len(usages) {
log.Info("Start to sync quota data .....")
if err := quota.Sync(config.GlobalProjectMgr, true); err != nil {
log.Errorf("Fail to sync quota data, %v", err)
return err
}
log.Info("Success to sync quota data .....")
return nil
}
// Only has one project without usage
zero := common_quota.ResourceList{
common_quota.ResourceCount: 0,
common_quota.ResourceStorage: 0,
}
if len(projects) == 1 && len(usages) == 1 {
totalRepo, err := dao.GetTotalOfRepositories()
if totalRepo == 0 {
return nil
}
refID, err := strconv.ParseInt(usages[0].ReferenceID, 10, 64)
if err != nil {
log.Error(err)
return err
}
usedRes, err := types.NewResourceList(usages[0].Used)
if err != nil {
log.Error(err)
return err
}
if types.Equals(usedRes, zero) && refID == projects[0].ProjectID {
log.Info("Start to sync quota data .....")
if err := quota.Sync(config.GlobalProjectMgr, true); err != nil {
log.Errorf("Fail to sync quota data, %v", err)
return err
}
log.Info("Success to sync quota data .....")
}
}
return nil
}
func gracefulShutdown(closing, done chan struct{}) {
signals := make(chan os.Signal, 1)
signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
log.Infof("capture system signal %s, to close \"closing\" channel", <-signals)
close(closing)
select {
case <-done:
log.Infof("Goroutines exited normally")
case <-time.After(time.Second * 3):
log.Infof("Timeout waiting goroutines to exit")
}
os.Exit(0)
}
func main() {
beego.BConfig.WebConfig.Session.SessionOn = true
beego.BConfig.WebConfig.Session.SessionName = config.SessionCookieName
redisURL := os.Getenv("_REDIS_URL")
if len(redisURL) > 0 {
gob.Register(models.User{})
beego.BConfig.WebConfig.Session.SessionProvider = "redis"
beego.BConfig.WebConfig.Session.SessionProviderConfig = redisURL
}
beego.AddTemplateExt("htm")
log.Info("initializing configurations...")
config.Init()
log.Info("configurations initialization completed")
token.InitCreators()
database, err := config.Database()
if err != nil {
log.Fatalf("failed to get database configuration: %v", err)
}
if err := dao.InitDatabase(database); err != nil {
log.Fatalf("failed to initialize database: %v", err)
}
if err = migration.Migrate(database); err != nil {
log.Fatalf("failed to migrate: %v", err)
}
if err := config.Load(); err != nil {
log.Fatalf("failed to load config: %v", err)
}
// init the jobservice client
job.Init()
// init the scheduler
scheduler.Init()
password, err := config.InitialAdminPassword()
if err != nil {
log.Fatalf("failed to get admin's initial password: %v", err)
}
if err := updateInitPassword(adminUserID, password); err != nil {
log.Error(err)
}
// Init API handler
if err := api.Init(); err != nil {
log.Fatalf("Failed to initialize API handlers with error: %s", err.Error())
}
registerScanners()
closing := make(chan struct{})
done := make(chan struct{})
go gracefulShutdown(closing, done)
if err := replication.Init(closing, done); err != nil {
log.Fatalf("failed to init for replication: %v", err)
}
log.Info("initializing notification...")
notification.Init()
// Initialize the event handlers for handling artifact cascade deletion
event.Init()
filter.Init()
beego.InsertFilter("/api/*", beego.BeforeStatic, filter.SessionCheck)
beego.InsertFilter("/*", beego.BeforeRouter, filter.SecurityFilter)
beego.InsertFilter("/*", beego.BeforeRouter, filter.ReadonlyFilter)
server.RegisterRoutes()
log.Infof("Version: %s, Git commit: %s", version.ReleaseVersion, version.GitCommit)
beego.RunWithMiddleWares("", middlewares.MiddleWares()...)
}
func registerScanners() {
wantedScanners := make([]scanner.Registration, 0)
uninstallURLs := make([]string, 0)
if config.WithTrivy() {
log.Info("Registering Trivy scanner")
wantedScanners = append(wantedScanners, scanner.Registration{
Name: "Trivy",
Description: "The Trivy scanner adapter",
URL: config.TrivyAdapterURL(),
UseInternalAddr: true,
Immutable: true,
})
} else {
log.Info("Removing Trivy scanner")
uninstallURLs = append(uninstallURLs, config.TrivyAdapterURL())
}
if config.WithClair() {
clairDB, err := config.ClairDB()
if err != nil {
log.Fatalf("failed to load clair database information: %v", err)
}
if err := dao.InitClairDB(clairDB); err != nil {
log.Fatalf("failed to initialize clair database: %v", err)
}
log.Info("Registering Clair scanner")
wantedScanners = append(wantedScanners, scanner.Registration{
Name: "Clair",
Description: "The Clair scanner adapter",
URL: config.ClairAdapterEndpoint(),
UseInternalAddr: true,
Immutable: true,
})
} else {
log.Info("Removing Clair scanner")
uninstallURLs = append(uninstallURLs, config.ClairAdapterEndpoint())
}
if err := scan.EnsureScanners(wantedScanners); err != nil {
log.Fatalf("failed to register scanners: %v", err)
}
if defaultScannerURL := getDefaultScannerURL(); defaultScannerURL != "" {
log.Infof("Setting %s as default scanner", defaultScannerURL)
if err := scan.EnsureDefaultScanner(defaultScannerURL); err != nil {
log.Fatalf("failed to set default scanner: %v", err)
}
}
if err := scan.RemoveImmutableScanners(uninstallURLs); err != nil {
log.Warningf("failed to remove scanners: %v", err)
}
}
func getDefaultScannerURL() string {
if config.WithTrivy() {
return config.TrivyAdapterURL()
}
if config.WithClair() {
return config.ClairAdapterEndpoint()
}
return ""
}
| [
"\"_REDIS_URL\""
]
| []
| [
"_REDIS_URL"
]
| [] | ["_REDIS_URL"] | go | 1 | 0 | |
Src/.ipynb_checkpoints/config-checkpoint.py | # **************************************************************************** #
# #
# ::: :::::::: #
# config.py :+: :+: :+: #
# +:+ +:+ +:+ #
# By: winshare <[email protected]> +#+ +:+ +#+ #
# +#+#+#+#+#+ +#+ #
# Created: 2020/02/28 11:45:40 by winshare #+# #+# #
# Updated: 2020/05/28 15:01:24 by winshare ### ########.fr #
# #
# **************************************************************************** #
# Copyright 2020 winshare
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import json
import numpy as np
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import torch
import torchvision.datasets as dataset
# ---------------------------- Official Reference ---------------------------- #
from Data.DataSets.NPY.segmentation_dataset import Costum_NPY_DataSet
from Data.DataSets.CitysCapes.cityscapes import CityscapesSegmentation
from Data.DataSets.COCO.coco import CocoDataset
from Data.DataSets.PascalVoc.pascal import VOCSegmentation
# ------------------------------ Local Reference ----------------------------- #
class CFG():
def __init__(self):
# ---------------------------------------------------------------------------- #
# init process #
# ---------------------------------------------------------------------------- #
for i in range(5):
print("#####------------------------------------------------------------------#####")
print("#####-------------------------<===== WSNET =====>----------------------#####")
for i in range(5):
print("#####------------------------------------------------------------------#####")
print("\n\n# -----Decode Config File :",self.configfile,"-----#")
# ---------------------------------------------------------------------------- #
# init process #
# ---------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------- #
# Pytorch Function Dictionary #
# ---------------------------------------------------------------------------- #
self.datasets_function_dict={
"Classification":{
"MINST":dataset.MNIST,
"FashionMINST":dataset.FashionMNIST,
"KMINST":dataset.KMNIST,
"EMINST":dataset.EMNIST,
"CIFAR10":dataset.CIFAR10,
"CIFAR100":dataset.CIFAR100,
"ImageNet":dataset.ImageNet
},
"Detection":{
"CocoDetection":CocoDataset,
"VOC_Detection":dataset.VOCDetection
},
"Segmentation":{
"VOC_Segmentation":dataset.VOCSegmentation,
"Cityscapes":dataset.Cityscapes,
"Costum_NPY_DataSet":Costum_NPY_DataSet,
"CocoSegmentation":CocoDataset
},
"Caption":{
"CocoCaptions":dataset.CocoCaptions
},
"InstenceSegmentation":{
"CocoDetection":CocoDataset
}
}
self.dataset_support_list=self.datasets_function_dict.keys()
# ---------------------------------------------------------------------------- #
self.OptimDict={
"SGD":optim.SGD,
"ASGD":optim.ASGD,
"Adam":optim.Adam,
"Adadelta":optim.Adadelta,
"Adagrad":optim.Adagrad,
"AdamW":optim.AdamW,
"LBFGS":optim.LBFGS,
"RMSprop":optim.RMSprop,
"SparseAdam":optim.SparseAdam,
"Adamax":optim.Adamax
}
# ---------------------------------------------------------------------------- #
self.Loss_Function_Dict={
"AdaptiveLogSoftmaxWithLoss":nn.AdaptiveLogSoftmaxWithLoss
,"BCELoss":nn.BCELoss
,"BCEWithLogitsLoss":nn.BCEWithLogitsLoss
,"CosineEmbeddingLoss":nn.CosineEmbeddingLoss
,"CrossEntropyLoss":nn.CrossEntropyLoss
,"CTCLoss":nn.CTCLoss
,"cosine_embedding_loss":F.cosine_embedding_loss
,"ctc_loss":F.ctc_loss
,"hinge_embedding_loss":F.hinge_embedding_loss
,"l1_loss":F.l1_loss
,"margin_ranking_loss":F.margin_ranking_loss
,"mse_loss":F.mse_loss
,"multi_margin_loss":F.mse_loss
,"multilabel_margin_loss":F.multilabel_margin_loss
,"multilabel_soft_margin_loss":F.multilabel_margin_loss
,"nll_loss":F.nll_loss
,"poisson_nll_loss":F.poisson_nll_loss
,"smooth_l1_loss":F.smooth_l1_loss
,"soft_margin_loss":F.soft_margin_loss
,"triplet_margin_loss":F.triplet_margin_loss
,"HingeEmbeddingLoss":nn.HingeEmbeddingLoss
,"KLDivLoss":nn.KLDivLoss
,"L1Loss":nn.L1Loss
,"MarginRankingLoss":nn.MarginRankingLoss
,"MSELoss":nn.MSELoss
,"MultiLabelMarginLoss":nn.MultiLabelMarginLoss
,"MultiLabelSoftMarginLoss":nn.MultiLabelSoftMarginLoss
,"MultiMarginLoss":nn.MultiMarginLoss
,"NLLLoss":nn.MultiMarginLoss
,"PoissonNLLLoss":nn.PoissonNLLLoss
,"SmoothL1Loss":nn.SmoothL1Loss
,"SoftMarginLoss":nn.SoftMarginLoss
,"TripletMarginLoss":nn.TripletMarginLoss
}
# ---------------------------------------------------------------------------- #
self.Lr_Dict={
"StepLR":optim.lr_scheduler.StepLR,
"MultiStepLR":optim.lr_scheduler.MultiStepLR,
"ExponentialLR":optim.lr_scheduler.ExponentialLR,
"CosineAnnealingLR":optim.lr_scheduler.CosineAnnealingLR,
"ReduceLROnPlateau":optim.lr_scheduler.ReduceLROnPlateau,
"CyclicLR":optim.lr_scheduler.CyclicLR,
"OneCycleLR":optim.lr_scheduler.OneCycleLR,
"CosineAnnealingWarmRestarts":optim.lr_scheduler.CosineAnnealingWarmRestarts
}
# ---------------------------------------------------------------------------- #
# Config in 3 Level #
# ---------------------------------------------------------------------------- #
# -------------------------------- File Level -------------------------------- #
self.__configfile=self.configfile
self.__json=json.load(open(self.__configfile,'r'))
self.usegpu=False
self.MissionType=self.__json['MissionType']
self.InstanceID=self.__json['instance_id']
self.Content=self.__json['content']
# ------------------------------- Second Level ------------------------------- #
self.Net=self.Content['Net']
self.DataSetConfig=self.Content['Dataset']
self.Config=self.Content['Config']
print('\n\n# ---------------------------------- config ---------------------------------- #')
print("# ------------------------------ NETWORK CONFIG ------------------------------ #")
self.print_dict(self.Net)
print("# ------------------------------ NETWORK CONFIG ------------------------------ #")
print("# ------------------------------ DATASET CONFIG ------------------------------ #")
self.print_dict(self.DataSetConfig)
print("# ------------------------------ DATASET CONFIG ------------------------------ #")
print("# ------------------------------ GENERAL CONFIG ------------------------------ #")
self.print_dict(self.Config)
print("# ------------------------------ GENERAL CONFIG ------------------------------ #")
print('# ---------------------------------- config ---------------------------------- #')
# -------------------------------- Third Level ------------------------------- #
# ---------------------------------------------------------------------------- #
# NET #
# ---------------------------------------------------------------------------- #
# self.NetType=self.Net['NetType']
self.DefaultNetwork=self.Net["DefaultNetwork"]
self.BatchSize=self.Net['BatchSize']
if self.Net['BackBone']=='None':
self.BackBone=None
else:
self.BackBone=self.Net['BackBone']
self.NetType=self.Net["NetType"]
# --------------------------------- Optimizer -------------------------------- #
self.optimizer=self.OptimDict[self.Net['Optimizer']]
self.learning_rate=self.Net['learning_rate']
self.momentum=self.Net['momentum']
self.weight_decay=self.Net['weight_decay']
# ------------------------------- lr_scheduler ------------------------------- #
self.lr_scheduler=self.Net['lr_scheduler']
self.lr_steps=self.Net['lr_steps']
self.lr_gamma=self.Net['lr_gamma']
self.lr_scheduler=self.Lr_Dict[self.lr_scheduler]
self.class_num=self.Net['class_num']
# ------------------------------- Loss Function ------------------------------ #
self.Loss_Function=self.Loss_Function_Dict[self.Net['Loss_Function']]()
# ---------------------------------------------------------------------------- #
# Dataset #
# ---------------------------------------------------------------------------- #
self.DataSetType=self.DataSetConfig['Type']
self.DataSet_Root=self.DataSetConfig['root']
self.Dataset_Train_file=os.path.join(self.DataSet_Root,self.DataSetConfig['train_index_file'])
self.Dataset_Val_file=os.path.join(self.DataSet_Root,self.DataSetConfig['val_index_file'])
self.DefaultDataset=self.DataSetConfig['DefaultDataset']
self.NPY=self.DataSetConfig["NPY"]
if os.path.exists(self.NPY):
self.NPY_Data=np.load(self.NPY,allow_pickle=True)
self.SFT_Enable=self.DataSetConfig["SFT_Enable"]
# --------------------------------- Transform (Aborted)------------------------#
# ---------------------------------------------------------------------------- #
"""
Because the defalut detection network has transform flow
so the image list should include 3d tensors
[
[C, H, W],
[C, H, W].....
]
Target should be
list of dict :
{
boxes: list of box tensor[n,4] (float32)
masks: list of segmentation mask points [n,n] (float32)
keypoints: list of key pointss[n,n] (float32)
labels: list of index of label[n] (int64)
}
For Default Detection:
The transformations it perform are:
- input normalization (mean subtraction and std division)
- input / target resizing to match min_size / max_size
It returns a ImageList for the inputs, and a List[Dict[Tensor]] for the targets
"""
# print('\n\n--------------------------------- Transform --------------------------------')
# self.TransformDict=self.DataSetConfig['Transform']
# functionlist=[list(i.keys())[0] for i in self.TransformDict]
# paralist=[list(i.values())[0] for i in self.TransformDict]
# self.transforms=GeneralTransform(self.TransformDict)
# ---------------------------------------------------------------------------- #
# Config #
# ---------------------------------------------------------------------------- #
self.DistributedDataParallel=self.Config['DistributedDataParallel']
self.resume=self.Config['Resume']
self.checkpoint=self.Config['checkpoint_path']
self.MultiScale_Training=self.Config['multiscale_training']
self.logdir=self.Config['logdir']
self.devices=self.Config['devices']
self.pre_estimation=self.Config['pre_estimation']
if not os.path.exists(self.checkpoint):
os.makedirs(self.checkpoint)
if self.devices=='GPU':
self.usegpu=True
self.gpu_id=self.Config['gpu_id']
# os.environ['CUDA_VISIBLE_DEVICES']=str(self.gpu_id)
self.device = torch.device("cuda:"+str(self.gpu_id) if torch.cuda.is_available() else "cpu")
print('#-----Device:\n',self.device)
if self.devices=='CPU':
self.device=torch.device("cpu")
self.download_pretrain_model=self.Config['down_pretrain_model']
self.visualization=self.Config['visualization']
self.worker_num=self.Config['worker_num']
self.epochs=self.Config['epochs']
self.aspect_ratio_factor=self.Config['group_factor']
print("# ---------------------------------------------------------------------------- #")
print("# Configure Class Init Successful #")
print("# ---------------------------------------------------------------------------- #")
self.Enviroment_Info()
# ---------------------------------------------------------------------------- #
# Config Class Function #
# ---------------------------------------------------------------------------- #
def GenerateDefaultConfig(self,mode='detection'):
print('Generate Default Config with mode :',mode)
def configinfo(self):
print('***** Already read Config file ,'+self.__configfile,'*****')
print('***** Instance ID : ',self.InstanceID,'*****')
print('***** Mission Type : ',self.MissionType,'*****')
def Enviroment_Info(self):
print("\n\n# --------------------------------- NVCC INFO -------------------------------- #\n\n")
os.system('nvcc -V')
print("\n\n# --------------------------------- NVCC INFO -------------------------------- #\n\n")
print("\n\n# --------------------------------- GPU INFO --------------------------------- #")
os.system('nvidia-smi')
print("# --------------------------------- GPU INFO --------------------------------- #\n\n")
def print_dict(self,d,n=0):
length=74
for k,v in d.items():
# print ('\t'*n)
if type(v)==type({}):
print("%s : {" % k)
self.print_dict(v,n+1)
else:
strl=len(str(k))+len(str(v))
space=length-strl
print("# %s : %s" % (k,v)+" "*space+"#")
if n!=0:
print('\t'*(n-1)+ '}')
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
server/http/main.go | package main
import (
"context"
"flag"
"fmt"
"log"
"net"
"net/http"
"os"
"os/signal"
"syscall"
"time"
"github.com/holger-hoffmann/dev.httpfileserver/service"
)
func main() {
var (
bindAddress = flag.String("bind-address", "0.0.0.0", "Address the server will bind to.")
port = flag.String("port", "8080", "The port the server will listen on.")
dir = flag.String("dir", "/tmp", "The directory from where the files are served and where to they are uploaded.")
)
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "Usage: %s [options]\n\n", os.Args[0])
flag.PrintDefaults()
return
}
flag.Parse()
if os.Getenv("PORT") != "" {
*port = os.Getenv("PORT")
}
fmt.Printf("dev.httpfileserver - Starting.\n")
fmt.Printf("dev.httpfileserver - bind-address: %s\n", *bindAddress)
fmt.Printf("dev.httpfileserver - port: %s\n", *port)
fmt.Printf("dev.httpfileserver - dir: %s\n", *dir)
ctx := context.Background()
s, err := service.New(ctx, *dir)
if err != nil {
log.Printf("dev.httpfileserver - Failed to initialize service: %s", err)
log.Fatalf("dev.httpfileserver - Exiting.")
}
srv := &http.Server{
Addr: net.JoinHostPort(*bindAddress, *port),
Handler: s.GetHandler(),
}
errChan := make(chan error, 10)
go func() {
fmt.Printf("dev.httpfileserver - Listening on %s.\n", net.JoinHostPort(*bindAddress, *port))
err := srv.ListenAndServe()
if err != nil {
errChan <- err
return
}
}()
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM)
for {
select {
case err := <-errChan:
if err != nil {
log.Printf("dev.httpfileserver - Error while serving, exiting: %s", err)
os.Exit(1)
}
case <-signalChan:
fmt.Println()
log.Printf("dev.httpfileserver - Shutdown signal received, stopping gracefully.")
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
srv.Shutdown(ctx)
log.Printf("dev.httpfileserver - Exiting.")
os.Exit(0)
}
}
}
| [
"\"PORT\"",
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
setup.py | #!/usr/bin/env python3
import sys
import os
import platform
import setuptools
from setuptools.command.build_ext import build_ext as _build_ext
if 'Windows' == platform.system():
if 'AMD64' == platform.machine():
vixpath = os.path.join(os.getenv('PROGRAMFILES(x86)'), 'VMware\\VMware VIX')
libs = ['Vix64AllProductsDyn','kernel32','user32','advapi32','ole32','oleaut32','ws2_32','shell32']
else:
vixpath = os.path.join(os.getenv('PROGRAMFILES'), 'VMware\\VMware VIX')
libs = ['VixAllProductsDyn','kernel32','user32','advapi32','ole32','oleaut32','ws2_32','shell32']
defines = [('WIN32', None)]
include_dirs = [vixpath]
library_dirs = [vixpath]
elif 'Darwin' == platform.system():
VIX_PATH = '/Applications/VMware Fusion.app/Contents/Public'
VIX_LIB = 'vixAllProducts'
defines = []
include_dirs = [VIX_PATH + '/include']
library_dirs = [VIX_PATH]
libs = [VIX_LIB]
elif 'Linux' == platform.system():
defines = []
include_dirs = ['/usr/include/vmware-vix']
library_dirs = ['/usr/lib/vmware-vix/lib']
libs = ['vixAllProducts', 'dl']
# hack to run install_name_tool to fix path to vix library
class build_ext(_build_ext):
def build_extension(self, ext):
_build_ext.build_extension(self, ext)
if 'Darwin' == platform.system():
ext_name = self.get_ext_fullpath(ext.name)
fmt = 'install_name_tool -change lib%s.dylib "%s/lib%s.dylib" %s'
os.system(fmt % (VIX_LIB, VIX_PATH, VIX_LIB, ext_name))
setuptools.setup(
cmdclass = {'build_ext': build_ext},
name = 'pyvix',
author = 'Matthew Oertle',
author_email = '[email protected]',
version = '0.4.3',
license = 'MIT',
url = 'https://github.com/moertle/pyvix',
description = 'Python bindings for VIX library.',
long_description = open('README.rst').read(),
ext_modules = [
setuptools.Extension(
'pyvix',
['src/pyvix.c', 'src/pyvix_host.c', 'src/pyvix_vm.c'],
define_macros = defines,
include_dirs = include_dirs,
library_dirs = library_dirs,
libraries = libs,
)
],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
]
)
| []
| []
| [
"PROGRAMFILES(x8",
"PROGRAMFILES"
]
| [] | ["PROGRAMFILES(x8", "PROGRAMFILES"] | python | 2 | 0 | |
job/storage/mysql/mysql_test.go | package mysql
import (
"database/sql"
"encoding/json"
"os"
"testing"
"time"
"github.com/DATA-DOG/go-sqlmock"
"github.com/jmoiron/sqlx"
mysqltest "github.com/lestrrat-go/test-mysqld"
"github.com/stretchr/testify/assert"
"github.com/primedata-ai/kala/job"
)
func NewTestDb() (*DB, sqlmock.Sqlmock) {
connection, m, _ := sqlmock.New()
var db = &DB{
conn: sqlx.NewDb(connection, "sqlmock"),
}
return db, m
}
func TestSaveAndGetJob(t *testing.T) {
db, m := NewTestDb()
cache := job.NewLockFreeJobCache(db)
defer db.Close()
genericMockJob := job.GetMockJobWithGenericSchedule(time.Now())
genericMockJob.Init(cache)
j, err := json.Marshal(genericMockJob)
if assert.NoError(t, err) {
m.ExpectBegin()
m.ExpectPrepare("replace .*").
ExpectExec().
WithArgs(genericMockJob.Id, string(j)).
WillReturnResult(sqlmock.NewResult(1, 1))
m.ExpectCommit()
err := db.Save(genericMockJob)
if assert.NoError(t, err) {
m.ExpectQuery("select .*").
WithArgs(genericMockJob.Id).
WillReturnRows(sqlmock.NewRows([]string{"job"}).AddRow(j))
j2, err := db.Get(genericMockJob.Id)
if assert.Nil(t, err) {
assert.WithinDuration(t, j2.NextRunAt, genericMockJob.NextRunAt, 400*time.Microsecond)
assert.Equal(t, j2.Name, genericMockJob.Name)
assert.Equal(t, j2.Id, genericMockJob.Id)
assert.Equal(t, j2.Command, genericMockJob.Command)
assert.Equal(t, j2.Schedule, genericMockJob.Schedule)
assert.Equal(t, j2.Owner, genericMockJob.Owner)
assert.Equal(t, j2.Metadata.SuccessCount, genericMockJob.Metadata.SuccessCount)
}
}
}
}
func TestDeleteJob(t *testing.T) {
db, m := NewTestDb()
cache := job.NewLockFreeJobCache(db)
genericMockJob := job.GetMockJobWithGenericSchedule(time.Now())
genericMockJob.Init(cache)
j, err := json.Marshal(genericMockJob)
if assert.NoError(t, err) {
m.ExpectBegin()
m.ExpectPrepare("replace .*").
ExpectExec().
WithArgs(genericMockJob.Id, string(j)).
WillReturnResult(sqlmock.NewResult(1, 1))
m.ExpectCommit()
err := db.Save(genericMockJob)
if assert.NoError(t, err) {
// Delete it
m.ExpectExec("delete .*").
WithArgs(genericMockJob.Id).
WillReturnResult(sqlmock.NewResult(1, 1))
err = db.Delete(genericMockJob.Id)
assert.Nil(t, err)
// Verify deletion
m.ExpectQuery("select .*").
WithArgs(genericMockJob.Id).
WillReturnError(sql.ErrNoRows)
k, err := db.Get(genericMockJob.Id)
assert.Error(t, err)
assert.Nil(t, k)
}
}
}
func TestSaveAndGetAllJobs(t *testing.T) {
db, m := NewTestDb()
genericMockJobOne := job.GetMockJobWithGenericSchedule(time.Now())
genericMockJobTwo := job.GetMockJobWithGenericSchedule(time.Now())
jobOne, err := json.Marshal(genericMockJobOne)
if assert.NoError(t, err) {
jobTwo, err := json.Marshal(genericMockJobTwo)
if assert.NoError(t, err) {
m.ExpectQuery(".*").WillReturnRows(sqlmock.NewRows([]string{"jobs"}).AddRow(jobOne).AddRow(jobTwo))
jobs, err := db.GetAll()
assert.Nil(t, err)
assert.Equal(t, 2, len(jobs))
}
}
}
func TestRealDb(t *testing.T) {
dsn := os.Getenv("MYSQL_DSN")
if dsn == "" {
mysqld, err := mysqltest.NewMysqld(nil)
if assert.NoError(t, err) {
dsn = mysqld.Datasource("test", "", "", 0)
defer mysqld.Stop()
} else {
t.FailNow()
}
}
db := New(dsn, nil)
cache := job.NewLockFreeJobCache(db)
genericMockJobOne := job.GetMockJobWithGenericSchedule(time.Now().Add(time.Hour))
genericMockJobTwo := job.GetMockJobWithGenericSchedule(time.Now().Add(time.Hour))
genericMockJobOne.Init(cache)
genericMockJobTwo.Init(cache)
t.Logf("Mock job one: %s", genericMockJobOne.Id)
t.Logf("Mock job two: %s", genericMockJobTwo.Id)
err := db.Save(genericMockJobOne)
if assert.NoError(t, err) {
err := db.Save(genericMockJobTwo)
if assert.NoError(t, err) {
jobs, err := db.GetAll()
if assert.NoError(t, err) {
assert.Equal(t, 2, len(jobs))
if jobs[0].Id == genericMockJobTwo.Id {
jobs[0], jobs[1] = jobs[1], jobs[0]
}
assert.WithinDuration(t, jobs[0].NextRunAt, genericMockJobOne.NextRunAt, 400*time.Microsecond)
assert.Equal(t, jobs[0].Name, genericMockJobOne.Name)
assert.Equal(t, jobs[0].Id, genericMockJobOne.Id)
assert.Equal(t, jobs[0].Command, genericMockJobOne.Command)
assert.Equal(t, jobs[0].Schedule, genericMockJobOne.Schedule)
assert.Equal(t, jobs[0].Owner, genericMockJobOne.Owner)
assert.Equal(t, jobs[0].Metadata.SuccessCount, genericMockJobOne.Metadata.SuccessCount)
assert.WithinDuration(t, jobs[1].NextRunAt, genericMockJobTwo.NextRunAt, 400*time.Microsecond)
assert.Equal(t, jobs[1].Name, genericMockJobTwo.Name)
assert.Equal(t, jobs[1].Id, genericMockJobTwo.Id)
assert.Equal(t, jobs[1].Command, genericMockJobTwo.Command)
assert.Equal(t, jobs[1].Schedule, genericMockJobTwo.Schedule)
assert.Equal(t, jobs[1].Owner, genericMockJobTwo.Owner)
assert.Equal(t, jobs[1].Metadata.SuccessCount, genericMockJobTwo.Metadata.SuccessCount)
job2, err := db.Get(genericMockJobTwo.Id)
if assert.NoError(t, err) {
assert.WithinDuration(t, job2.NextRunAt, genericMockJobTwo.NextRunAt, 400*time.Microsecond)
assert.Equal(t, job2.Name, genericMockJobTwo.Name)
assert.Equal(t, job2.Id, genericMockJobTwo.Id)
assert.Equal(t, job2.Command, genericMockJobTwo.Command)
assert.Equal(t, job2.Schedule, genericMockJobTwo.Schedule)
assert.Equal(t, job2.Owner, genericMockJobTwo.Owner)
assert.Equal(t, job2.Metadata.SuccessCount, genericMockJobTwo.Metadata.SuccessCount)
t.Logf("Deleting job with id %s", job2.Id)
err := db.Delete(job2.Id)
if assert.NoError(t, err) {
jobs, err := db.GetAll()
if assert.NoError(t, err) {
assert.Equal(t, 1, len(jobs))
assert.WithinDuration(t, jobs[0].NextRunAt, genericMockJobOne.NextRunAt, 400*time.Microsecond)
assert.Equal(t, jobs[0].Name, genericMockJobOne.Name)
assert.Equal(t, jobs[0].Id, genericMockJobOne.Id)
assert.Equal(t, jobs[0].Command, genericMockJobOne.Command)
assert.Equal(t, jobs[0].Schedule, genericMockJobOne.Schedule)
assert.Equal(t, jobs[0].Owner, genericMockJobOne.Owner)
assert.Equal(t, jobs[0].Metadata.SuccessCount, genericMockJobOne.Metadata.SuccessCount)
}
}
}
}
}
}
}
func TestPersistEpsilon(t *testing.T) {
dsn := os.Getenv("MYSQL_DSN")
if dsn == "" {
mysqld, err := mysqltest.NewMysqld(nil)
if assert.NoError(t, err) {
dsn = mysqld.Datasource("test", "", "", 0)
defer mysqld.Stop()
} else {
t.FailNow()
}
}
db := New(dsn, nil)
defer db.Close()
cache := job.NewMemoryJobCache(db)
mockJob := job.GetMockRecurringJobWithSchedule(time.Now().Add(time.Second*1), "PT1H")
mockJob.Epsilon = "PT1H"
mockJob.Command = "asdf"
mockJob.Retries = 2
err := mockJob.Init(cache)
if assert.NoError(t, err) {
err := db.Save(mockJob)
if assert.NoError(t, err) {
retrieved, err := db.GetAll()
if assert.NoError(t, err) {
retrieved[0].Run(cache)
}
}
}
}
| [
"\"MYSQL_DSN\"",
"\"MYSQL_DSN\""
]
| []
| [
"MYSQL_DSN"
]
| [] | ["MYSQL_DSN"] | go | 1 | 0 | |
test/e2e/instrumentation/monitoring/stackdriver.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package monitoring
import (
"context"
"fmt"
"math"
"os"
"time"
"golang.org/x/oauth2/google"
. "github.com/onsi/ginkgo"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
gcm "google.golang.org/api/monitoring/v3"
)
var (
// Stackdriver container metrics, as described here:
// https://cloud.google.com/monitoring/api/metrics#gcp-container
stackdriverMetrics = []string{
"uptime",
"memory/bytes_total",
"memory/bytes_used",
"cpu/reserved_cores",
"cpu/usage_time",
"memory/page_fault_count",
"disk/bytes_used",
"disk/bytes_total",
"cpu/utilization",
}
pollFrequency = time.Second * 5
pollTimeout = time.Minute * 7
rcName = "resource-consumer"
memoryUsed = 64
memoryLimit int64 = 200
tolerance = 0.25
)
var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() {
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke")
})
f := framework.NewDefaultFramework("stackdriver-monitoring")
It("should have cluster metrics [Feature:StackdriverMonitoring]", func() {
testStackdriverMonitoring(f, 1, 100, 200)
})
})
func testStackdriverMonitoring(f *framework.Framework, pods, allPodsCPU int, perPodCPU int64) {
projectId := framework.TestContext.CloudConfig.ProjectID
ctx := context.Background()
client, err := google.DefaultClient(ctx, gcm.CloudPlatformScope)
gcmService, err := gcm.New(client)
// set this env var if accessing Stackdriver test endpoint (default is prod):
// $ export STACKDRIVER_API_ENDPOINT_OVERRIDE=https://test-monitoring.sandbox.googleapis.com/
basePathOverride := os.Getenv("STACKDRIVER_API_ENDPOINT_OVERRIDE")
if basePathOverride != "" {
gcmService.BasePath = basePathOverride
}
framework.ExpectNoError(err)
rc := common.NewDynamicResourceConsumer(rcName, common.KindDeployment, pods, allPodsCPU, memoryUsed, 0, perPodCPU, memoryLimit, f)
defer rc.CleanUp()
rc.WaitForReplicas(pods, 15*time.Minute)
metricsMap := map[string]bool{}
pollingFunction := checkForMetrics(projectId, gcmService, time.Now(), metricsMap, allPodsCPU, perPodCPU)
err = wait.Poll(pollFrequency, pollTimeout, pollingFunction)
if err != nil {
framework.Logf("Missing metrics: %+v\n", metricsMap)
}
framework.ExpectNoError(err)
}
func checkForMetrics(projectId string, gcmService *gcm.Service, start time.Time, metricsMap map[string]bool, cpuUsed int, cpuLimit int64) func() (bool, error) {
return func() (bool, error) {
counter := 0
correctUtilization := false
for _, metric := range stackdriverMetrics {
metricsMap[metric] = false
}
for _, metric := range stackdriverMetrics {
// TODO: check only for metrics from this cluster
ts, err := fetchTimeSeries(projectId, gcmService, metric, start, time.Now())
framework.ExpectNoError(err)
if len(ts) > 0 {
counter = counter + 1
metricsMap[metric] = true
framework.Logf("Received %v timeseries for metric %v\n", len(ts), metric)
} else {
framework.Logf("No timeseries for metric %v\n", metric)
}
var sum float64 = 0
switch metric {
case "cpu/utilization":
for _, t := range ts {
max := t.Points[0]
maxEnd, _ := time.Parse(time.RFC3339, max.Interval.EndTime)
for _, p := range t.Points {
pEnd, _ := time.Parse(time.RFC3339, p.Interval.EndTime)
if pEnd.After(maxEnd) {
max = p
maxEnd, _ = time.Parse(time.RFC3339, max.Interval.EndTime)
}
}
sum = sum + *max.Value.DoubleValue
framework.Logf("Received %v points for metric %v\n",
len(t.Points), metric)
}
framework.Logf("Most recent cpu/utilization sum*cpu/limit: %v\n", sum*float64(cpuLimit))
if math.Abs(sum*float64(cpuLimit)-float64(cpuUsed)) > tolerance*float64(cpuUsed) {
return false, nil
} else {
correctUtilization = true
}
}
}
if counter < 9 || !correctUtilization {
return false, nil
}
return true, nil
}
}
func createMetricFilter(metric string, container_name string) string {
return fmt.Sprintf(`metric.type="container.googleapis.com/container/%s" AND
resource.label.container_name="%s"`, metric, container_name)
}
func fetchTimeSeries(projectId string, gcmService *gcm.Service, metric string, start time.Time, end time.Time) ([]*gcm.TimeSeries, error) {
response, err := gcmService.Projects.TimeSeries.
List(fullProjectName(projectId)).
Filter(createMetricFilter(metric, rcName)).
IntervalStartTime(start.Format(time.RFC3339)).
IntervalEndTime(end.Format(time.RFC3339)).
Do()
if err != nil {
return nil, err
}
return response.TimeSeries, nil
}
func fullProjectName(name string) string {
return fmt.Sprintf("projects/%s", name)
}
| [
"\"STACKDRIVER_API_ENDPOINT_OVERRIDE\""
]
| []
| [
"STACKDRIVER_API_ENDPOINT_OVERRIDE"
]
| [] | ["STACKDRIVER_API_ENDPOINT_OVERRIDE"] | go | 1 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "EarthquakeSpy.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
glightning/tests/interop_test.go | package glightning_test
import (
"bufio"
"errors"
"fmt"
"github.com/vertiond/glightning/gbitcoin"
"github.com/vertiond/glightning/glightning"
"github.com/stretchr/testify/assert"
"io"
"io/ioutil"
"log"
"net"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime/debug"
"syscall"
"testing"
"time"
)
const defaultTimeout int = 10
func check(t *testing.T, err error) {
if err != nil {
debug.PrintStack()
t.Fatal(err)
}
}
func advanceChain(t *testing.T, n *Node, btc *gbitcoin.Bitcoin, numBlocks uint) {
timeout := time.Now().Add(time.Duration(defaultTimeout) * time.Second)
info, _ := n.rpc.GetInfo()
blockheight := info.Blockheight
mineBlocks(t, numBlocks, btc)
for {
info, _ = n.rpc.GetInfo()
if info.Blockheight >= uint(blockheight)+numBlocks {
return
}
if time.Now().After(timeout) {
t.Fatal("timed out waiting for chain to advance")
}
}
}
func waitForChannelActive(n *Node, scid string) error {
timeout := time.Now().Add(time.Duration(defaultTimeout) * time.Second)
for {
chans, _ := n.rpc.GetChannel(scid)
// both need to be active
active := 0
for i := 0; i < len(chans); i++ {
if chans[i].IsActive {
active += 1
}
}
if active == 2 {
return nil
}
if time.Now().After(timeout) {
return errors.New(fmt.Sprintf("timed out waiting for scid %s", scid))
}
time.Sleep(100 * time.Millisecond)
}
}
func waitForChannelReady(t *testing.T, from, to *Node) {
timeout := time.Now().Add(time.Duration(defaultTimeout) * time.Second)
for {
info, err := to.rpc.GetInfo()
check(t, err)
peer, err := from.rpc.GetPeer(info.Id)
check(t, err)
if peer.Channels == nil {
t.Fatal("no channels for peer")
}
if peer.Channels[0].State == "CHANNELD_NORMAL" {
return
}
if time.Now().After(timeout) {
t.Fatal("timed out waiting for channel normal")
}
time.Sleep(100 * time.Millisecond)
}
}
func Init(t *testing.T) (string, string, int, *gbitcoin.Bitcoin) {
// let's put it in a temporary directory
testDir, err := ioutil.TempDir("", "gltests-")
check(t, err)
dataDir, _, btcPort, btc := SpinUpBitcoind(t, testDir)
return testDir, dataDir, btcPort, btc
}
func CleanUp(testDir string) {
os.Remove(testDir)
}
type BNode struct {
rpc *gbitcoin.Bitcoin
dir string
port uint
pid uint
}
// Returns a bitcoin node w/ RPC client
func SpinUpBitcoind(t *testing.T, dir string) (string, int, int, *gbitcoin.Bitcoin) {
// make some dirs!
bitcoindDir := filepath.Join(dir, "bitcoind")
err := os.Mkdir(bitcoindDir, os.ModeDir|0755)
check(t, err)
bitcoinPath, err := exec.LookPath("bitcoind")
check(t, err)
btcPort, err := getPort()
check(t, err)
btcUser := "btcuser"
btcPass := "btcpass"
bitcoind := exec.Command(bitcoinPath, "-regtest",
fmt.Sprintf("-datadir=%s", bitcoindDir),
"-server", "-logtimestamps", "-nolisten",
fmt.Sprintf("-rpcport=%d", btcPort),
"-addresstype=bech32", "-logtimestamps", "-txindex",
fmt.Sprintf("-rpcpassword=%s", btcPass),
fmt.Sprintf("-rpcuser=%s", btcUser))
bitcoind.SysProcAttr = &syscall.SysProcAttr{
Pdeathsig: syscall.SIGKILL,
}
log.Printf("starting %s on %d...", bitcoinPath, btcPort)
err = bitcoind.Start()
check(t, err)
log.Printf(" bitcoind started (%d)!\n", bitcoind.Process.Pid)
btc := gbitcoin.NewBitcoin(btcUser, btcPass)
btc.SetTimeout(uint(2))
// Waits til bitcoind is up
btc.StartUp("", bitcoindDir, uint(btcPort))
// Go ahead and run 50 blocks
addr, err := btc.GetNewAddress(gbitcoin.Bech32)
check(t, err)
_, err = btc.GenerateToAddress(addr, 101)
check(t, err)
return bitcoindDir, bitcoind.Process.Pid, btcPort, btc
}
func (node *Node) waitForLog(t *testing.T, phrase string, timeoutSec int) {
timeout := time.Now().Add(time.Duration(timeoutSec) * time.Second)
// at startup we need to wait for the file to open
logfilePath := filepath.Join(node.dir, "log")
for time.Now().Before(timeout) || timeoutSec == 0 {
if _, err := os.Stat(logfilePath); os.IsNotExist(err) {
time.Sleep(100 * time.Millisecond)
continue
}
break
}
logfile, _ := os.Open(logfilePath)
defer logfile.Close()
reader := bufio.NewReader(logfile)
for timeoutSec == 0 || time.Now().Before(timeout) {
line, err := reader.ReadString('\n')
if err != nil {
if err == io.EOF {
time.Sleep(100 * time.Millisecond)
} else {
check(t, err)
}
}
m, err := regexp.MatchString(phrase, line)
check(t, err)
if m {
return
}
}
t.Fatal(fmt.Sprintf("Unable to find \"%s\" in %s/log", phrase, node.dir))
}
func getPort() (int, error) {
addr, err := net.ResolveTCPAddr("tcp", "localhost:0")
if err != nil {
return 0, err
}
l, err := net.ListenTCP("tcp", addr)
if err != nil {
return 0, err
}
defer l.Close()
return l.Addr().(*net.TCPAddr).Port, nil
}
type Node struct {
rpc *glightning.Lightning
dir string
}
func LnNode(t *testing.T, testDir, dataDir string, btcPort int, name string, extraOps map[string]string) *Node {
var err error
lightningPath := os.Getenv("LIGHTNINGD_PATH")
if lightningPath == "" {
// assume it's just a thing i can call
lightningPath, err = exec.LookPath("lightningd")
check(t, err)
}
lightningdDir := filepath.Join(testDir, fmt.Sprintf("lightningd-%s", name))
err = os.Mkdir(lightningdDir, os.ModeDir|0755)
check(t, err)
port, err := getPort()
check(t, err)
args := []string{
fmt.Sprintf("--lightning-dir=%s", lightningdDir),
fmt.Sprintf("--bitcoin-datadir=%s", dataDir),
"--network=regtest", "--funding-confirms=3",
fmt.Sprintf("--addr=localhost:%d", port),
fmt.Sprintf("--bitcoin-rpcport=%d", btcPort),
"--log-file=log",
"--log-level=debug",
"--bitcoin-rpcuser=btcuser",
"--bitcoin-rpcpassword=btcpass",
"--dev-fast-gossip",
"--dev-bitcoind-poll=1",
"--allow-deprecated-apis=false",
}
if extraOps != nil {
for arg, val := range extraOps {
if val == "" {
args = append(args, fmt.Sprintf("--%s", arg))
} else {
args = append(args, fmt.Sprintf("--%s=%s", arg, val))
}
}
}
lightningd := exec.Command(lightningPath, args...)
lightningd.SysProcAttr = &syscall.SysProcAttr{
Pdeathsig: syscall.SIGKILL,
}
stderr, err := lightningd.StderrPipe()
check(t, err)
stdout, err := lightningd.StdoutPipe()
log.Printf("starting %s on %d...", lightningPath, port)
err = lightningd.Start()
check(t, err)
go func() {
// print any stderr output to the test log
log.Printf("Starting stderr scanner")
scanner := bufio.NewScanner(stderr)
for scanner.Scan() {
log.Println(scanner.Text())
}
}()
go func() {
// print any stderr output to the test log
log.Printf("Starting stdout scanner")
scanner := bufio.NewScanner(stdout)
for scanner.Scan() {
log.Println(scanner.Text())
}
}()
go func() {
err := lightningd.Wait()
if err != nil {
t.Fatal(fmt.Sprintf("lightningd exited with error %s", err))
}
log.Printf("process exited normally")
}()
time.Sleep(200 * time.Millisecond)
lightningdDir = filepath.Join(lightningdDir, "regtest")
node := &Node{nil, lightningdDir}
log.Printf("starting node in %s\n", lightningdDir)
node.waitForLog(t, "Server started with public key", 30)
log.Printf(" lightningd started (%d)!\n", lightningd.Process.Pid)
node.rpc = glightning.NewLightning()
node.rpc.StartUp("lightning-rpc", lightningdDir)
return node
}
func short(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
}
func TestBitcoinProxy(t *testing.T) {
short(t)
testDir, _, _, btc := Init(t)
defer CleanUp(testDir)
addr, err := btc.GetNewAddress(gbitcoin.Bech32)
check(t, err)
assert.NotNil(t, addr)
}
func TestConnectRpc(t *testing.T) {
short(t)
testDir, dataDir, btcPid, _ := Init(t)
defer CleanUp(testDir)
l1 := LnNode(t, testDir, dataDir, btcPid, "one", nil)
l1Info, _ := l1.rpc.GetInfo()
assert.Equal(t, 1, len(l1Info.Binding))
l1Addr := l1Info.Binding[0]
l2 := LnNode(t, testDir, dataDir, btcPid, "two", nil)
peerId, err := l2.rpc.Connect(l1Info.Id, l1Addr.Addr, uint(l1Addr.Port))
check(t, err)
assert.Equal(t, peerId, l1Info.Id)
}
func TestConfigsRpc(t *testing.T) {
short(t)
testDir, dataDir, btcPid, _ := Init(t)
defer CleanUp(testDir)
l1 := LnNode(t, testDir, dataDir, btcPid, "one", nil)
configs, err := l1.rpc.ListConfigs()
check(t, err)
assert.Equal(t, "lightning-rpc", configs["rpc-file"])
assert.Equal(t, false, configs["always-use-proxy"])
network, err := l1.rpc.GetConfig("network")
check(t, err)
assert.Equal(t, "regtest", network)
}
func TestHelpRpc(t *testing.T) {
short(t)
testDir, dataDir, btcPid, _ := Init(t)
defer CleanUp(testDir)
l1 := LnNode(t, testDir, dataDir, btcPid, "one", nil)
commands, err := l1.rpc.Help()
check(t, err)
if len(commands) == 0 {
t.Error("No help commands returned")
}
cmd, err := l1.rpc.HelpFor("help")
check(t, err)
assert.Equal(t, "help [command]", cmd.NameAndUsage)
}
func TestSignCheckMessage(t *testing.T) {
short(t)
msg := "hello there"
testDir, dataDir, btcPid, _ := Init(t)
defer CleanUp(testDir)
l1 := LnNode(t, testDir, dataDir, btcPid, "one", nil)
l2 := LnNode(t, testDir, dataDir, btcPid, "two", nil)
l1Info, _ := l1.rpc.GetInfo()
signed, err := l1.rpc.SignMessage(msg)
check(t, err)
v, err := l2.rpc.CheckMessageVerify(msg, signed.ZBase, l1Info.Id)
check(t, err)
assert.True(t, v)
}
func TestListTransactions(t *testing.T) {
short(t)
testDir, dataDir, btcPid, btc := Init(t)
defer CleanUp(testDir)
l1 := LnNode(t, testDir, dataDir, btcPid, "one", nil)
fundNode(t, "1.0", l1, btc)
fundNode(t, "1.0", l1, btc)
waitToSync(l1)
trans, err := l1.rpc.ListTransactions()
check(t, err)
assert.Equal(t, len(trans), 2)
}
func fundNode(t *testing.T, amount string, n *Node, b *gbitcoin.Bitcoin) {
addr, err := n.rpc.NewAddr()
check(t, err)
_, err = b.SendToAddress(addr, amount)
check(t, err)
mineBlocks(t, 1, b)
}
// n is number of blocks to mine
func mineBlocks(t *testing.T, n uint, b *gbitcoin.Bitcoin) {
addr, err := b.GetNewAddress(gbitcoin.Bech32)
check(t, err)
_, err = b.GenerateToAddress(addr, n)
check(t, err)
}
func waitToSync(n *Node) {
for {
info, _ := n.rpc.GetInfo()
if info.IsLightningdSync() {
break
}
time.Sleep(100 * time.Millisecond)
}
}
func TestCreateOnion(t *testing.T) {
short(t)
testDir, dataDir, btcPid, _ := Init(t)
defer CleanUp(testDir)
l1 := LnNode(t, testDir, dataDir, btcPid, "one", nil)
hops := []glightning.Hop{
glightning.Hop{
Pubkey: "02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619",
Payload: "000000000000000000000000000000000000000000000000000000000000000000",
},
glightning.Hop{
Pubkey: "0324653eac434488002cc06bbfb7f10fe18991e35f9fe4302dbea6d2353dc0ab1c",
Payload: "140101010101010101000000000000000100000001",
},
glightning.Hop{
Pubkey: "027f31ebc5462c1fdce1b737ecff52d37d75dea43ce11c74d25aa297165faa2007",
Payload: "fd0100000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f606162636465666768696a6b6c6d6e6f707172737475767778797a7b7c7d7e7f808182838485868788898a8b8c8d8e8f909192939495969798999a9b9c9d9e9fa0a1a2a3a4a5a6a7a8a9aaabacadaeafb0b1b2b3b4b5b6b7b8b9babbbcbdbebfc0c1c2c3c4c5c6c7c8c9cacbcccdcecfd0d1d2d3d4d5d6d7d8d9dadbdcdddedfe0e1e2e3e4e5e6e7e8e9eaebecedeeeff0f1f2f3f4f5f6f7f8f9fafbfcfdfeff",
},
glightning.Hop{
Pubkey: "032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991",
Payload: "140303030303030303000000000000000300000003",
},
glightning.Hop{
Pubkey: "02edabbd16b41c8371b92ef2f04c1185b4f03b6dcd52ba9b78d9d7c89c8f221145",
Payload: "000404040404040404000000000000000400000004000000000000000000000000",
},
}
privateHash := "BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB"
resp, err := l1.rpc.CreateOnion(hops, privateHash, "")
check(t, err)
assert.Equal(t, len(resp.SharedSecrets), len(hops))
assert.Equal(t, len(resp.Onion), 2*1366)
privateHash = "4242424242424242424242424242424242424242424242424242424242424242"
sessionKey := "4141414141414141414141414141414141414141414141414141414141414141"
resp, err = l1.rpc.CreateOnion(hops, privateHash, sessionKey)
check(t, err)
firstHop := glightning.FirstHop{
ShortChannelId: "100x1x1",
Direction: 1,
AmountMsat: "1000sat",
Delay: 8,
}
// Ideally we'd do a 'real' send onion but we don't
// need to know if c-lightning works, only that the API
// functions correctly...
_, err = l1.rpc.SendOnionWithDetails(resp.Onion, firstHop, privateHash, "label", resp.SharedSecrets, nil)
// ... which means we expect an error back!
assert.NotNil(t, err)
assert.Equal(t, err.Error(), "204:No connection to first peer found")
}
func getShortChannelId(t *testing.T, node1, node2 *Node) string {
info, err := node2.rpc.GetInfo()
check(t, err)
peer, err := node1.rpc.GetPeer(info.Id)
check(t, err)
if peer == nil || len(peer.Channels) == 0 {
t.Fatal(fmt.Sprintf("peer %s not found", info.Id))
}
return peer.Channels[0].ShortChannelId
}
func TestPluginOptions(t *testing.T) {
short(t)
testDir, dataDir, btcPid, _ := Init(t)
defer CleanUp(testDir)
// try with the defaults
l1 := LnNode(t, testDir, dataDir, btcPid, "one", nil)
exPlugin := pluginPath(t, "plugin_example")
_, err := l1.rpc.StartPlugin(exPlugin)
check(t, err)
l1.waitForLog(t, `Is this initial node startup\? false`, 1)
l1.waitForLog(t, `the bool option is set to true`, 1)
l1.waitForLog(t, `the int option is set to 11`, 1)
l1.waitForLog(t, `the flag option is set\? false`, 1)
// now try with some different values!
optsMap := make(map[string]string)
optsMap["plugin"] = exPlugin
optsMap["int_opt"] = "-55"
optsMap["bool_opt"] = "false"
optsMap["flag_opt"] = ""
l2 := LnNode(t, testDir, dataDir, btcPid, "two", optsMap)
l2.waitForLog(t, `Is this initial node startup\? true`, 1)
l2.waitForLog(t, `the bool option is set to false`, 1)
l2.waitForLog(t, `the int option is set to -55`, 1)
l2.waitForLog(t, `the flag option is set\? true`, 1)
}
// ok, now let's check the plugin subs+hooks etc
func TestPlugins(t *testing.T) {
short(t)
testDir, dataDir, btcPid, btc := Init(t)
defer CleanUp(testDir)
l1 := LnNode(t, testDir, dataDir, btcPid, "one", nil)
plugins, err := l1.rpc.ListPlugins()
check(t, err)
pluginCount := len(plugins)
exPlugin := pluginPath(t, "plugin_example")
plugins, err = l1.rpc.StartPlugin(exPlugin)
check(t, err)
assert.Equal(t, pluginCount+1, len(plugins))
l1.waitForLog(t, `Is this initial node startup\? false`, 1)
l1Info, _ := l1.rpc.GetInfo()
assert.Equal(t, 1, len(l1Info.Binding))
l1Addr := l1Info.Binding[0]
l2 := LnNode(t, testDir, dataDir, btcPid, "two", nil)
plugins, err = l2.rpc.StartPlugin(exPlugin)
check(t, err)
l2.waitForLog(t, `Is this initial node startup\? false`, 1)
// We should have a third node!
l3 := LnNode(t, testDir, dataDir, btcPid, "three", nil)
check(t, err)
peerId, err := l2.rpc.Connect(l1Info.Id, "localhost", uint(l1Addr.Port))
check(t, err)
l3Info, _ := l3.rpc.GetInfo()
peer3, err := l2.rpc.Connect(l3Info.Id, "localhost", uint(l3Info.Binding[0].Port))
check(t, err)
fundNode(t, "1.0", l2, btc)
waitToSync(l1)
waitToSync(l2)
// open a channel
amount := glightning.NewSat(10000000)
feerate := glightning.NewFeeRate(glightning.PerKw, uint(253))
pushSats := glightning.NewMsat(10000)
_, err = l2.rpc.FundChannelExt(peerId, amount, feerate, true, nil, pushSats)
check(t, err)
// wait til the change is onchain
advanceChain(t, l2, btc, 1)
// fund a second channel!
_, err = l2.rpc.FundChannelAtFee(peer3, amount, feerate)
check(t, err)
mineBlocks(t, 6, btc)
waitForChannelReady(t, l2, l3)
waitForChannelReady(t, l2, l1)
// there's two now??
scid23 := getShortChannelId(t, l2, l3)
l2.waitForLog(t, fmt.Sprintf(`Received channel_update for channel %s/. now ACTIVE`, scid23), 20)
scid21 := getShortChannelId(t, l2, l1)
l2.waitForLog(t, fmt.Sprintf(`Received channel_update for channel %s/. now ACTIVE`, scid21), 20)
// wait for everybody to know about other channels
waitForChannelActive(l1, scid23)
waitForChannelActive(l3, scid21)
// warnings go off because of feerate misfires
l1.waitForLog(t, "Got a warning!!", 1)
// channel opened notification
l1.waitForLog(t, "channel opened", 1)
invAmt := uint64(100000)
invAmt2 := uint64(10000)
inv, err := l1.rpc.CreateInvoice(invAmt, "push pay", "money", 100, nil, "", false)
inv2, err := l3.rpc.CreateInvoice(invAmt, "push pay two", "money two", 100, nil, "", false)
check(t, err)
route, err := l2.rpc.GetRouteSimple(peerId, invAmt, 1.0)
check(t, err)
// l2 -> l1
_, err = l2.rpc.SendPayLite(route, inv.PaymentHash)
check(t, err)
_, err = l2.rpc.WaitSendPay(inv.PaymentHash, 0)
check(t, err)
// SEND PAY SUCCESS
l2.waitForLog(t, "send pay success!", 1)
l1.waitForLog(t, "invoice paid", 1)
/* ?? why no work
l2.waitForLog(t, "invoice payment called", 1)
*/
// now try to route from l1 -> l3 (but with broken middle)
route2, err := l1.rpc.GetRouteSimple(peer3, invAmt2, 1.0)
check(t, err)
_, err = l2.rpc.CloseNormal(peer3)
check(t, err)
mineBlocks(t, 1, btc)
_, err = l1.rpc.SendPayLite(route2, inv2.PaymentHash)
check(t, err)
_, failure := l1.rpc.WaitSendPay(inv2.PaymentHash, 0)
pe, ok := failure.(*glightning.PaymentError)
if !ok {
t.Fatal(failure)
}
data := pe.Data
assert.Equal(t, data.Status, "failed")
assert.Equal(t, data.AmountSentMilliSatoshi, "10001msat")
// SEND PAY FAILURE
l1.waitForLog(t, "send pay failure!", 1)
}
func TestAcceptWithClose(t *testing.T) {
short(t)
testDir, dataDir, btcPid, btc := Init(t)
defer CleanUp(testDir)
l1 := LnNode(t, testDir, dataDir, btcPid, "one", nil)
exPlugin := pluginPath(t, "plugin_openchan")
_, err := l1.rpc.StartPlugin(exPlugin)
l1.waitForLog(t, "successfully init'd!", 1)
l1Info, _ := l1.rpc.GetInfo()
assert.Equal(t, 1, len(l1Info.Binding))
l1Addr := l1Info.Binding[0]
l2 := LnNode(t, testDir, dataDir, btcPid, "two", nil)
peerId, err := l2.rpc.Connect(l1Info.Id, "localhost", uint(l1Addr.Port))
check(t, err)
fundNode(t, "1.0", l2, btc)
waitToSync(l1)
waitToSync(l2)
feerate := glightning.NewFeeRate(glightning.PerKw, uint(253))
amount := uint64(100000)
starter, err := l2.rpc.StartFundChannel(peerId, amount, true, feerate, "")
check(t, err)
// build a transaction
outs := []*gbitcoin.TxOut{
&gbitcoin.TxOut{
Address: starter.Address,
Satoshi: amount,
},
}
rawtx, err := btc.CreateRawTx(nil, outs, nil, nil)
check(t, err)
fundedtx, err := btc.FundRawTx(rawtx)
check(t, err)
tx, err := btc.DecodeRawTx(fundedtx.TxString)
check(t, err)
txout, err := tx.FindOutputIndex(starter.Address)
check(t, err)
_, err = l2.rpc.CompleteFundChannel(peerId, tx.TxId, txout)
check(t, err)
l1.waitForLog(t, "openchannel called", 1)
l2info, _ := l2.rpc.GetInfo()
peer, err := l1.rpc.GetPeer(l2info.Id)
check(t, err)
closeTo := "bcrt1q8q4xevfuwgsm7mxant8aadz50xt67768s4332d"
assert.Equal(t, closeTo, peer.Channels[0].CloseToAddress)
}
func TestCloseTo(t *testing.T) {
short(t)
testDir, dataDir, btcPid, btc := Init(t)
defer CleanUp(testDir)
l1 := LnNode(t, testDir, dataDir, btcPid, "one", nil)
l1Info, _ := l1.rpc.GetInfo()
assert.Equal(t, 1, len(l1Info.Binding))
l1Addr := l1Info.Binding[0]
l2 := LnNode(t, testDir, dataDir, btcPid, "two", nil)
peerId, err := l2.rpc.Connect(l1Info.Id, "localhost", uint(l1Addr.Port))
check(t, err)
fundNode(t, "1.0", l2, btc)
waitToSync(l1)
waitToSync(l2)
closeTo, err := btc.GetNewAddress(gbitcoin.Bech32)
check(t, err)
feerate := glightning.NewFeeRate(glightning.PerKw, uint(253))
amount := uint64(100000)
starter, err := l2.rpc.StartFundChannel(peerId, amount, true, feerate, closeTo)
check(t, err)
// build a transaction
outs := []*gbitcoin.TxOut{
&gbitcoin.TxOut{
Address: starter.Address,
Satoshi: amount,
},
}
rawtx, err := btc.CreateRawTx(nil, outs, nil, nil)
check(t, err)
fundedtx, err := btc.FundRawTx(rawtx)
check(t, err)
tx, err := btc.DecodeRawTx(fundedtx.TxString)
check(t, err)
txout, err := tx.FindOutputIndex(starter.Address)
check(t, err)
_, err = l2.rpc.CompleteFundChannel(peerId, tx.TxId, txout)
check(t, err)
peer, err := l2.rpc.GetPeer(peerId)
check(t, err)
assert.Equal(t, closeTo, peer.Channels[0].CloseToAddress)
}
func TestInvoiceFieldsOnPaid(t *testing.T) {
short(t)
testDir, dataDir, btcPid, btc := Init(t)
defer CleanUp(testDir)
l1 := LnNode(t, testDir, dataDir, btcPid, "one", nil)
l1Info, _ := l1.rpc.GetInfo()
assert.Equal(t, 1, len(l1Info.Binding))
l1Addr := l1Info.Binding[0]
l2 := LnNode(t, testDir, dataDir, btcPid, "two", nil)
peerId, err := l2.rpc.Connect(l1Info.Id, "localhost", uint(l1Addr.Port))
check(t, err)
fundNode(t, "1.0", l2, btc)
waitToSync(l1)
waitToSync(l2)
// open a channel
amount := glightning.NewSat(10000000)
feerate := glightning.NewFeeRate(glightning.PerKw, uint(253))
_, err = l2.rpc.FundChannelAtFee(peerId, amount, feerate)
check(t, err)
// wait til the change is onchain
advanceChain(t, l2, btc, 6)
waitForChannelReady(t, l2, l1)
invAmt := uint64(100000)
invO, err := l1.rpc.CreateInvoice(invAmt, "pay me", "money", 100, nil, "", false)
check(t, err)
_, err = l2.rpc.PayBolt(invO.Bolt11)
check(t, err)
invA, err := l1.rpc.GetInvoice("pay me")
check(t, err)
assert.True(t, len(invA.PaymentHash) > 0)
}
func TestBtcBackend(t *testing.T) {
short(t)
testDir, dataDir, btcPid, btc := Init(t)
defer CleanUp(testDir)
exPlugin := pluginPath(t, "plugin_btc")
optsMap := make(map[string]string)
optsMap["disable-plugin"] = "bcli"
optsMap["plugin"] = exPlugin
l1 := LnNode(t, testDir, dataDir, btcPid, "one", optsMap)
l2 := LnNode(t, testDir, dataDir, btcPid, "two", optsMap)
l1.waitForLog(t, "All Bitcoin plugin commands registered", 1)
l1.waitForLog(t, "called getchaininfo", 1)
l1.waitForLog(t, "called blockbyheight", 1)
fundNode(t, "1.0", l1, btc)
waitToSync(l1)
// send yourself some funds, so sendrawtransaction gets called
addr, err := l1.rpc.NewAddr()
check(t, err)
amt := glightning.NewSat(10000)
rate := glightning.NewFeeRate(glightning.PerKw, 253)
_, err = l1.rpc.Withdraw(addr, amt, rate, nil)
check(t, err)
l1.waitForLog(t, "called sendrawtransaction", 1)
mineBlocks(t, 1, btc)
// try to open a channel and then cancel it, so getutxo gets called
connectInfo := connectNode(t, l1, l2)
channelfunds := uint64(100000)
starter, err := l1.rpc.StartFundChannel(connectInfo.Id, channelfunds, true, rate, "")
check(t, err)
// build a transaction
outs := []*gbitcoin.TxOut{
&gbitcoin.TxOut{
Address: starter.Address,
Satoshi: channelfunds,
},
}
rawtx, err := btc.CreateRawTx(nil, outs, nil, nil)
check(t, err)
fundedtx, err := btc.FundRawTx(rawtx)
check(t, err)
tx, err := btc.DecodeRawTx(fundedtx.TxString)
check(t, err)
txout, err := tx.FindOutputIndex(starter.Address)
check(t, err)
_, err = l1.rpc.CompleteFundChannel(connectInfo.Id, tx.TxId, txout)
check(t, err)
// ok this will call a check for the utxo...
canceled, err := l1.rpc.CancelFundChannel(connectInfo.Id)
check(t, err)
assert.True(t, canceled)
l1.waitForLog(t, "called getutxo", 1)
l1.waitForLog(t, "called estimatefees", 1)
}
// let's try out some hooks!
func TestHooks(t *testing.T) {
short(t)
testDir, dataDir, btcPid, _ := Init(t)
defer CleanUp(testDir)
l1 := LnNode(t, testDir, dataDir, btcPid, "one", nil)
l2 := LnNode(t, testDir, dataDir, btcPid, "two", nil)
exPlugin := pluginPath(t, "plugin_example")
loadPlugin(t, l1, exPlugin)
l1Info, _ := l1.rpc.GetInfo()
l2Info, _ := l2.rpc.GetInfo()
peer := connectNode(t, l1, l2)
assert.Equal(t, peer.Id, l2Info.Id)
l1.waitForLog(t, "peer connected called", 1)
l2.rpc.Disconnect(l1Info.Id, true)
l1.waitForLog(t, "disconnect called for", 1)
}
func TestDbWrites(t *testing.T) {
short(t)
testDir, dataDir, btcPid, _ := Init(t)
defer CleanUp(testDir)
exPlugin := pluginPath(t, "plugin_dbwrites")
optsMap := make(map[string]string)
optsMap["plugin"] = exPlugin
l1 := LnNode(t, testDir, dataDir, btcPid, "one", optsMap)
waitToSync(l1)
l1.waitForLog(t, "dbwrite called 1", 1)
}
func TestRpcCmd(t *testing.T) {
short(t)
testDir, dataDir, btcPid, _ := Init(t)
defer CleanUp(testDir)
l1 := LnNode(t, testDir, dataDir, btcPid, "one", nil)
l2 := LnNode(t, testDir, dataDir, btcPid, "two", nil)
exPlugin := pluginPath(t, "plugin_rpccmd")
loadPlugin(t, l1, exPlugin)
connectInfo := connectNode(t, l1, l2)
addr, err := l1.rpc.NewAddress(glightning.P2SHSegwit)
check(t, err)
// we pass in segwit but the rpc_command hook always gives
// us back bech32
assert.NotNil(t, addr.Bech32, "tb1")
assert.Equal(t, "", addr.P2SHSegwit)
amt := glightning.NewSat(10000)
rate := glightning.NewFeeRate(glightning.PerKw, 253)
res, err := l1.rpc.Withdraw(addr.Bech32, amt, rate, nil)
assert.Equal(t, "-401:withdrawals not allowed", err.Error())
assert.Equal(t, &glightning.WithdrawResult{}, res)
// this fails because we can't handle random responses
_, err = l1.rpc.Ping(connectInfo.Id)
assert.NotNil(t, err)
}
func pluginPath(t *testing.T, pluginName string) string {
// Get the path to our current test binary
val, ok := os.LookupEnv("PLUGINS_PATH")
if !ok {
t.Fatal("No plugin example path (PLUGINS_PATH) passed in")
}
return filepath.Join(val, pluginName)
}
func loadPlugin(t *testing.T, n *Node, exPlugin string) {
_, err := n.rpc.StartPlugin(exPlugin)
check(t, err)
//n.waitForLog(t, `successfully init'd`, 5)
}
func connectNode(t *testing.T, from, to *Node) *glightning.ConnectResult {
info, _ := to.rpc.GetInfo()
conn, err := from.rpc.ConnectPeer(info.Id, info.Binding[0].Addr, uint(info.Binding[0].Port))
check(t, err)
return conn
}
func openChannel(t *testing.T, btc *gbitcoin.Bitcoin, from, to *Node, amt uint64, waitTilReady bool) {
connectInfo := connectNode(t, from, to)
amount := glightning.NewSat64(amt)
feerate := glightning.NewFeeRate(glightning.PerKw, uint(253))
_, err := from.rpc.FundChannelAtFee(connectInfo.Id, amount, feerate)
check(t, err)
mineBlocks(t, 6, btc)
if waitTilReady {
waitForChannelReady(t, from, to)
}
}
func TestFeatureBits(t *testing.T) {
short(t)
testDir, dataDir, btcPid, btc := Init(t)
defer CleanUp(testDir)
pp := pluginPath(t, "plugin_featurebits")
optsMap := make(map[string]string)
optsMap["plugin"] = pp
l1 := LnNode(t, testDir, dataDir, btcPid, "one", optsMap)
l2 := LnNode(t, testDir, dataDir, btcPid, "two", nil)
info := connectNode(t, l2, l1)
// check for init feature bits in connect response (1 << 101)
assert.NotNil(t, info.Features)
assert.True(t, info.Features.IsSet(101))
// open a channel + wait til active
l1Info, _ := l1.rpc.GetInfo()
fundNode(t, "1.0", l2, btc)
waitToSync(l2)
amount := glightning.NewSat(10000000)
feerate := glightning.NewFeeRate(glightning.PerKw, uint(253))
_, err := l2.rpc.FundChannelAtFee(l1Info.Id, amount, feerate)
check(t, err)
mineBlocks(t, 6, btc)
waitForChannelReady(t, l2, l1)
waitForChannelReady(t, l1, l2)
scid21 := getShortChannelId(t, l2, l1)
err = waitForChannelActive(l2, scid21)
check(t, err)
err = waitForChannelActive(l1, scid21)
check(t, err)
// check for init message bits (1 << 101)
peer, _ := l2.rpc.GetPeer(l1Info.Id)
assert.True(t, peer.Features.IsSet(101))
// check for 1 << 105
inv, err := l1.rpc.Invoice(uint64(10000), "test", "desc")
check(t, err)
decoded, err := l1.rpc.DecodeBolt11(inv.Bolt11)
assert.True(t, decoded.Features.IsSet(105))
time.Sleep(1 * time.Second)
node, err := l1.rpc.GetNode(l1Info.Id)
check(t, err)
assert.NotNil(t, node.Features)
assert.True(t, node.Features.IsSet(103))
}
func TestHtlcAcceptedHook(t *testing.T) {
short(t)
testDir, dataDir, btcPid, btc := Init(t)
defer CleanUp(testDir)
l1 := LnNode(t, testDir, dataDir, btcPid, "one", nil)
l2 := LnNode(t, testDir, dataDir, btcPid, "two", nil)
l3 := LnNode(t, testDir, dataDir, btcPid, "three", nil)
// 2nd + 3rd node listens for htlc accepts
exPlugin := pluginPath(t, "plugin_htlcacc")
loadPlugin(t, l2, exPlugin)
loadPlugin(t, l3, exPlugin)
// fund l1 + l2
fundNode(t, "1.0", l1, btc)
fundNode(t, "1.0", l2, btc)
waitToSync(l1)
waitToSync(l2)
// open a channel
openChannel(t, btc, l2, l3, uint64(10000000), true)
openChannel(t, btc, l1, l2, uint64(10000000), true)
// wait for everybody to know about other channels
scid23 := getShortChannelId(t, l2, l3)
l2.waitForLog(t, fmt.Sprintf(`Received channel_update for channel %s/. now ACTIVE`, scid23), 20)
scid21 := getShortChannelId(t, l1, l2)
l2.waitForLog(t, fmt.Sprintf(`Received channel_update for channel %s/. now ACTIVE`, scid21), 20)
err := waitForChannelActive(l1, scid23)
check(t, err)
err = waitForChannelActive(l3, scid21)
check(t, err)
invAmt := uint64(100000)
inv, err := l3.rpc.CreateInvoice(invAmt, "push pay", "money", 100, nil, "", false)
check(t, err)
// now route from l1 -> l3
_, err = l1.rpc.PayBolt(inv.Bolt11)
check(t, err)
_, err = l1.rpc.WaitSendPay(inv.PaymentHash, 0)
check(t, err)
// l2 should have gotten an htlc_accept hook call
l2.waitForLog(t, "htlc_accepted called", 1)
l2.waitForLog(t, `has perhop\? false`, 1)
l2.waitForLog(t, `type is tlv`, 1)
l2.waitForLog(t, `payment secret is empty`, 1)
l2.waitForLog(t, `amount is empty`, 1)
// l3 should have gotten an htlc_accept hook call, with different info
l3.waitForLog(t, "htlc_accepted called", 1)
l3.waitForLog(t, `type is tlv`, 1)
l3.waitForLog(t, `has perhop\? false`, 1)
l3.waitForLog(t, `payment secret is not empty`, 1)
l3.waitForLog(t, `amount is 10000\dmsat`, 1)
}
| [
"\"LIGHTNINGD_PATH\""
]
| []
| [
"LIGHTNINGD_PATH"
]
| [] | ["LIGHTNINGD_PATH"] | go | 1 | 0 | |
bluelog/__init__.py |
import logging
import os
from logging.handlers import SMTPHandler, RotatingFileHandler
import click
from flask import Flask, render_template, request
from flask_login import current_user
from flask_sqlalchemy import get_debug_queries
from flask_wtf.csrf import CSRFError
from bluelog.blueprints.admin import admin_bp
from bluelog.blueprints.auth import auth_bp
from bluelog.blueprints.blog import blog_bp
from bluelog.extensions import bootstrap, db, login_manager, csrf, ckeditor, mail, moment, toolbar, migrate
from bluelog.models import Admin, Post, Category, Comment, Link
from bluelog.settings import config
basedir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
def create_app(config_name=None):
if config_name is None:
config_name = os.getenv('FLASK_CONFIG', 'development')
app = Flask('bluelog')
app.config.from_object(config[config_name])
register_logging(app)
register_extensions(app)
register_blueprints(app)
register_commands(app)
register_errors(app)
register_shell_context(app)
register_template_context(app)
register_request_handlers(app)
return app
def register_logging(app):
class RequestFormatter(logging.Formatter):
def format(self, record):
record.url = request.url
record.remote_addr = request.remote_addr
return super(RequestFormatter, self).format(record)
request_formatter = RequestFormatter(
'[%(asctime)s] %(remote_addr)s requested %(url)s\n'
'%(levelname)s in %(module)s: %(message)s'
)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler = RotatingFileHandler(os.path.join(basedir, 'logs/bluelog.log'),
maxBytes=10 * 1024 * 1024, backupCount=10)
file_handler.setFormatter(formatter)
file_handler.setLevel(logging.INFO)
mail_handler = SMTPHandler(
mailhost=app.config['MAIL_SERVER'],
fromaddr=app.config['MAIL_USERNAME'],
toaddrs=['ADMIN_EMAIL'],
subject='Bluelog Application Error',
credentials=(app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD']))
mail_handler.setLevel(logging.ERROR)
mail_handler.setFormatter(request_formatter)
if not app.debug:
app.logger.addHandler(mail_handler)
app.logger.addHandler(file_handler)
def register_extensions(app):
bootstrap.init_app(app)
db.init_app(app)
login_manager.init_app(app)
csrf.init_app(app)
ckeditor.init_app(app)
mail.init_app(app)
moment.init_app(app)
toolbar.init_app(app)
migrate.init_app(app, db)
def register_blueprints(app):
app.register_blueprint(blog_bp)
app.register_blueprint(admin_bp, url_prefix='/admin')
app.register_blueprint(auth_bp, url_prefix='/auth')
def register_shell_context(app):
@app.shell_context_processor
def make_shell_context():
return dict(db=db, Admin=Admin, Post=Post, Category=Category, Comment=Comment)
def register_template_context(app):
@app.context_processor
def make_template_context():
admin = Admin.query.first()
categories = Category.query.order_by(Category.name).all()
links = Link.query.order_by(Link.name).all()
if current_user.is_authenticated:
unread_comments = Comment.query.filter_by(reviewed=False).count()
else:
unread_comments = None
return dict(
admin=admin, categories=categories,
links=links, unread_comments=unread_comments)
def register_errors(app):
@app.errorhandler(400)
def bad_request(e):
return render_template('errors/400.html'), 400
@app.errorhandler(404)
def page_not_found(e):
return render_template('errors/404.html'), 404
@app.errorhandler(500)
def internal_server_error(e):
return render_template('errors/500.html'), 500
@app.errorhandler(CSRFError)
def handle_csrf_error(e):
return render_template('errors/400.html', description=e.description), 400
def register_commands(app):
@app.cli.command()
@click.option('--drop', is_flag=True, help='Create after drop.')
def initdb(drop):
"""Initialize the database."""
if drop:
click.confirm('此操作将删除数据库, 你确定要继续吗?', abort=True)
db.drop_all()
click.echo('删除表.')
db.create_all()
click.echo('初始化数据库.')
@app.cli.command()
@click.option('--username', prompt=True, help='The username used to login.')
@click.option('--password', prompt=True, hide_input=True,
confirmation_prompt=True, help='The password used to login.')
def init(username, password):
"""Building Bluelog, just for you."""
click.echo('正在初始化数据库...')
db.create_all()
admin = Admin.query.first()
if admin is not None:
click.echo('管理员已存在, 更新中...')
admin.username = username
admin.set_password(password)
else:
click.echo('创建临时管理员账户...')
admin = Admin(
username=username,
blog_title='Bluelog',
blog_sub_title="No, I'm the real thing.",
name='Admin',
about='Anything about you.'
)
admin.set_password(password)
db.session.add(admin)
category = Category.query.first()
if category is None:
click.echo('创建默认分类...')
category = Category(name='Default')
db.session.add(category)
db.session.commit()
click.echo('完成.')
@app.cli.command()
@click.option('--category', default=10, help='分类数量, 默认是10.')
@click.option('--post', default=50, help='文章数量, 默认是50.')
@click.option('--comment', default=500, help='评论数量, 默认是500.')
def forge(category, post, comment):
"""构造虚拟数据"""
from bluelog.fakes import fake_admin, fake_categories, fake_posts, fake_comments, fake_links
db.drop_all()
db.create_all()
click.echo('生成管理员...')
fake_admin()
click.echo('生成 %d 分类...' % category)
fake_categories(category)
click.echo('生成 %d 文章...' % post)
fake_posts(post)
click.echo('生成 %d 评论...' % comment)
fake_comments(comment)
click.echo('生成链接...')
fake_links()
click.echo('构造完成...')
def register_request_handlers(app):
@app.after_request
def query_profiler(response):
for q in get_debug_queries():
if q.duration >= app.config['BLUELOG_SLOW_QUERY_THRESHOLD']:
app.logger.warning(
'Slow query: Duration: %fs\n Context: %s\nQuery: %s\n '
% (q.duration, q.context, q.statement)
)
return response
| []
| []
| [
"FLASK_CONFIG"
]
| [] | ["FLASK_CONFIG"] | python | 1 | 0 | |
pkg/provider/kubernetes/crd/kubernetes.go | package crd
import (
"bufio"
"bytes"
"context"
"crypto/sha256"
"errors"
"fmt"
"os"
"sort"
"strings"
"time"
"github.com/cenkalti/backoff/v4"
"github.com/containous/traefik/v2/pkg/config/dynamic"
"github.com/containous/traefik/v2/pkg/job"
"github.com/containous/traefik/v2/pkg/log"
"github.com/containous/traefik/v2/pkg/provider"
"github.com/containous/traefik/v2/pkg/provider/kubernetes/crd/traefik/v1alpha1"
"github.com/containous/traefik/v2/pkg/safe"
"github.com/containous/traefik/v2/pkg/tls"
"github.com/containous/traefik/v2/pkg/types"
"github.com/mitchellh/hashstructure"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
)
const (
annotationKubernetesIngressClass = "kubernetes.io/ingress.class"
traefikDefaultIngressClass = "traefik"
)
const (
providerName = "kubernetescrd"
providerNamespaceSeparator = "@"
)
// Provider holds configurations of the provider.
type Provider struct {
Endpoint string `description:"Kubernetes server endpoint (required for external cluster client)." json:"endpoint,omitempty" toml:"endpoint,omitempty" yaml:"endpoint,omitempty"`
Token string `description:"Kubernetes bearer token (not needed for in-cluster client)." json:"token,omitempty" toml:"token,omitempty" yaml:"token,omitempty"`
CertAuthFilePath string `description:"Kubernetes certificate authority file path (not needed for in-cluster client)." json:"certAuthFilePath,omitempty" toml:"certAuthFilePath,omitempty" yaml:"certAuthFilePath,omitempty"`
DisablePassHostHeaders bool `description:"Kubernetes disable PassHost Headers." json:"disablePassHostHeaders,omitempty" toml:"disablePassHostHeaders,omitempty" yaml:"disablePassHostHeaders,omitempty" export:"true"`
Namespaces []string `description:"Kubernetes namespaces." json:"namespaces,omitempty" toml:"namespaces,omitempty" yaml:"namespaces,omitempty" export:"true"`
LabelSelector string `description:"Kubernetes label selector to use." json:"labelSelector,omitempty" toml:"labelSelector,omitempty" yaml:"labelSelector,omitempty" export:"true"`
IngressClass string `description:"Value of kubernetes.io/ingress.class annotation to watch for." json:"ingressClass,omitempty" toml:"ingressClass,omitempty" yaml:"ingressClass,omitempty" export:"true"`
ThrottleDuration types.Duration `description:"Ingress refresh throttle duration" json:"throttleDuration,omitempty" toml:"throttleDuration,omitempty" yaml:"throttleDuration,omitempty"`
lastConfiguration safe.Safe
}
func (p *Provider) newK8sClient(ctx context.Context, labelSelector string) (*clientWrapper, error) {
labelSel, err := labels.Parse(labelSelector)
if err != nil {
return nil, fmt.Errorf("invalid label selector: %q", labelSelector)
}
log.FromContext(ctx).Infof("label selector is: %q", labelSel)
withEndpoint := ""
if p.Endpoint != "" {
withEndpoint = fmt.Sprintf(" with endpoint %s", p.Endpoint)
}
var client *clientWrapper
switch {
case os.Getenv("KUBERNETES_SERVICE_HOST") != "" && os.Getenv("KUBERNETES_SERVICE_PORT") != "":
log.FromContext(ctx).Infof("Creating in-cluster Provider client%s", withEndpoint)
client, err = newInClusterClient(p.Endpoint)
case os.Getenv("KUBECONFIG") != "":
log.FromContext(ctx).Infof("Creating cluster-external Provider client from KUBECONFIG %s", os.Getenv("KUBECONFIG"))
client, err = newExternalClusterClientFromFile(os.Getenv("KUBECONFIG"))
default:
log.FromContext(ctx).Infof("Creating cluster-external Provider client%s", withEndpoint)
client, err = newExternalClusterClient(p.Endpoint, p.Token, p.CertAuthFilePath)
}
if err == nil {
client.labelSelector = labelSel
}
return client, err
}
// Init the provider.
func (p *Provider) Init() error {
return nil
}
// Provide allows the k8s provider to provide configurations to traefik
// using the given configuration channel.
func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.Pool) error {
ctxLog := log.With(context.Background(), log.Str(log.ProviderName, providerName))
logger := log.FromContext(ctxLog)
logger.Debugf("Using label selector: %q", p.LabelSelector)
k8sClient, err := p.newK8sClient(ctxLog, p.LabelSelector)
if err != nil {
return err
}
pool.GoCtx(func(ctxPool context.Context) {
operation := func() error {
eventsChan, err := k8sClient.WatchAll(p.Namespaces, ctxPool.Done())
if err != nil {
logger.Errorf("Error watching kubernetes events: %v", err)
timer := time.NewTimer(1 * time.Second)
select {
case <-timer.C:
return err
case <-ctxPool.Done():
return nil
}
}
throttleDuration := time.Duration(p.ThrottleDuration)
throttledChan := throttleEvents(ctxLog, throttleDuration, pool, eventsChan)
if throttledChan != nil {
eventsChan = throttledChan
}
for {
select {
case <-ctxPool.Done():
return nil
case event := <-eventsChan:
// Note that event is the *first* event that came in during this throttling interval -- if we're hitting our throttle, we may have dropped events.
// This is fine, because we don't treat different event types differently.
// But if we do in the future, we'll need to track more information about the dropped events.
conf := p.loadConfigurationFromCRD(ctxLog, k8sClient)
confHash, err := hashstructure.Hash(conf, nil)
switch {
case err != nil:
logger.Error("Unable to hash the configuration")
case p.lastConfiguration.Get() == confHash:
logger.Debugf("Skipping Kubernetes event kind %T", event)
default:
p.lastConfiguration.Set(confHash)
configurationChan <- dynamic.Message{
ProviderName: providerName,
Configuration: conf,
}
}
// If we're throttling,
// we sleep here for the throttle duration to enforce that we don't refresh faster than our throttle.
// time.Sleep returns immediately if p.ThrottleDuration is 0 (no throttle).
time.Sleep(throttleDuration)
}
}
}
notify := func(err error, time time.Duration) {
logger.Errorf("Provider connection error: %v; retrying in %s", err, time)
}
err := backoff.RetryNotify(safe.OperationWithRecover(operation), backoff.WithContext(job.NewBackOff(backoff.NewExponentialBackOff()), ctxPool), notify)
if err != nil {
logger.Errorf("Cannot connect to Provider: %v", err)
}
})
return nil
}
func (p *Provider) loadConfigurationFromCRD(ctx context.Context, client Client) *dynamic.Configuration {
tlsConfigs := make(map[string]*tls.CertAndStores)
conf := &dynamic.Configuration{
HTTP: p.loadIngressRouteConfiguration(ctx, client, tlsConfigs),
TCP: p.loadIngressRouteTCPConfiguration(ctx, client, tlsConfigs),
UDP: p.loadIngressRouteUDPConfiguration(ctx, client),
TLS: &dynamic.TLSConfiguration{
Certificates: getTLSConfig(tlsConfigs),
Options: buildTLSOptions(ctx, client),
Stores: buildTLSStores(ctx, client),
},
}
for _, middleware := range client.GetMiddlewares() {
id := provider.Normalize(makeID(middleware.Namespace, middleware.Name))
ctxMid := log.With(ctx, log.Str(log.MiddlewareName, id))
basicAuth, err := createBasicAuthMiddleware(client, middleware.Namespace, middleware.Spec.BasicAuth)
if err != nil {
log.FromContext(ctxMid).Errorf("Error while reading basic auth middleware: %v", err)
continue
}
digestAuth, err := createDigestAuthMiddleware(client, middleware.Namespace, middleware.Spec.DigestAuth)
if err != nil {
log.FromContext(ctxMid).Errorf("Error while reading digest auth middleware: %v", err)
continue
}
forwardAuth, err := createForwardAuthMiddleware(client, middleware.Namespace, middleware.Spec.ForwardAuth)
if err != nil {
log.FromContext(ctxMid).Errorf("Error while reading forward auth middleware: %v", err)
continue
}
errorPage, errorPageService, err := createErrorPageMiddleware(client, middleware.Namespace, middleware.Spec.Errors)
if err != nil {
log.FromContext(ctxMid).Errorf("Error while reading error page middleware: %v", err)
continue
}
if errorPage != nil && errorPageService != nil {
serviceName := id + "-errorpage-service"
errorPage.Service = serviceName
conf.HTTP.Services[serviceName] = errorPageService
}
conf.HTTP.Middlewares[id] = &dynamic.Middleware{
AddPrefix: middleware.Spec.AddPrefix,
StripPrefix: middleware.Spec.StripPrefix,
StripPrefixRegex: middleware.Spec.StripPrefixRegex,
ReplacePath: middleware.Spec.ReplacePath,
ReplacePathRegex: middleware.Spec.ReplacePathRegex,
Chain: createChainMiddleware(ctxMid, middleware.Namespace, middleware.Spec.Chain),
IPWhiteList: middleware.Spec.IPWhiteList,
Headers: middleware.Spec.Headers,
Errors: errorPage,
RateLimit: middleware.Spec.RateLimit,
RedirectRegex: middleware.Spec.RedirectRegex,
RedirectScheme: middleware.Spec.RedirectScheme,
BasicAuth: basicAuth,
DigestAuth: digestAuth,
ForwardAuth: forwardAuth,
InFlightReq: middleware.Spec.InFlightReq,
Buffering: middleware.Spec.Buffering,
CircuitBreaker: middleware.Spec.CircuitBreaker,
Compress: middleware.Spec.Compress,
PassTLSClientCert: middleware.Spec.PassTLSClientCert,
Retry: middleware.Spec.Retry,
}
}
cb := configBuilder{client}
for _, service := range client.GetTraefikServices() {
err := cb.buildTraefikService(ctx, service, conf.HTTP.Services)
if err != nil {
log.FromContext(ctx).WithField(log.ServiceName, service.Name).
Errorf("Error while building TraefikService: %v", err)
continue
}
}
return conf
}
func getServicePort(svc *corev1.Service, port int32) (*corev1.ServicePort, error) {
if svc == nil {
return nil, errors.New("service is not defined")
}
if port == 0 {
return nil, errors.New("ingressRoute service port not defined")
}
hasValidPort := false
for _, p := range svc.Spec.Ports {
if p.Port == port {
return &p, nil
}
if p.Port != 0 {
hasValidPort = true
}
}
if svc.Spec.Type != corev1.ServiceTypeExternalName {
return nil, fmt.Errorf("service port not found: %d", port)
}
if hasValidPort {
log.WithoutContext().
Warning("The port %d from IngressRoute doesn't match with ports defined in the ExternalName service %s/%s.", port, svc.Namespace, svc.Name)
}
return &corev1.ServicePort{Port: port}, nil
}
func createErrorPageMiddleware(client Client, namespace string, errorPage *v1alpha1.ErrorPage) (*dynamic.ErrorPage, *dynamic.Service, error) {
if errorPage == nil {
return nil, nil, nil
}
errorPageMiddleware := &dynamic.ErrorPage{
Status: errorPage.Status,
Query: errorPage.Query,
}
balancerServerHTTP, err := configBuilder{client}.buildServersLB(namespace, errorPage.Service.LoadBalancerSpec)
if err != nil {
return nil, nil, err
}
return errorPageMiddleware, balancerServerHTTP, nil
}
func createForwardAuthMiddleware(k8sClient Client, namespace string, auth *v1alpha1.ForwardAuth) (*dynamic.ForwardAuth, error) {
if auth == nil {
return nil, nil
}
if len(auth.Address) == 0 {
return nil, fmt.Errorf("forward authentication requires an address")
}
forwardAuth := &dynamic.ForwardAuth{
Address: auth.Address,
TrustForwardHeader: auth.TrustForwardHeader,
AuthResponseHeaders: auth.AuthResponseHeaders,
}
if auth.TLS == nil {
return forwardAuth, nil
}
forwardAuth.TLS = &dynamic.ClientTLS{
CAOptional: auth.TLS.CAOptional,
InsecureSkipVerify: auth.TLS.InsecureSkipVerify,
}
if len(auth.TLS.CASecret) > 0 {
caSecret, err := loadCASecret(namespace, auth.TLS.CASecret, k8sClient)
if err != nil {
return nil, fmt.Errorf("failed to load auth ca secret: %w", err)
}
forwardAuth.TLS.CA = caSecret
}
if len(auth.TLS.CertSecret) > 0 {
authSecretCert, authSecretKey, err := loadAuthTLSSecret(namespace, auth.TLS.CertSecret, k8sClient)
if err != nil {
return nil, fmt.Errorf("failed to load auth secret: %w", err)
}
forwardAuth.TLS.Cert = authSecretCert
forwardAuth.TLS.Key = authSecretKey
}
return forwardAuth, nil
}
func loadCASecret(namespace, secretName string, k8sClient Client) (string, error) {
secret, ok, err := k8sClient.GetSecret(namespace, secretName)
if err != nil {
return "", fmt.Errorf("failed to fetch secret '%s/%s': %w", namespace, secretName, err)
}
if !ok {
return "", fmt.Errorf("secret '%s/%s' not found", namespace, secretName)
}
if secret == nil {
return "", fmt.Errorf("data for secret '%s/%s' must not be nil", namespace, secretName)
}
if len(secret.Data) != 1 {
return "", fmt.Errorf("found %d elements for secret '%s/%s', must be single element exactly", len(secret.Data), namespace, secretName)
}
for _, v := range secret.Data {
return string(v), nil
}
return "", nil
}
func loadAuthTLSSecret(namespace, secretName string, k8sClient Client) (string, string, error) {
secret, exists, err := k8sClient.GetSecret(namespace, secretName)
if err != nil {
return "", "", fmt.Errorf("failed to fetch secret '%s/%s': %w", namespace, secretName, err)
}
if !exists {
return "", "", fmt.Errorf("secret '%s/%s' does not exist", namespace, secretName)
}
if secret == nil {
return "", "", fmt.Errorf("data for secret '%s/%s' must not be nil", namespace, secretName)
}
if len(secret.Data) != 2 {
return "", "", fmt.Errorf("found %d elements for secret '%s/%s', must be two elements exactly", len(secret.Data), namespace, secretName)
}
return getCertificateBlocks(secret, namespace, secretName)
}
func createBasicAuthMiddleware(client Client, namespace string, basicAuth *v1alpha1.BasicAuth) (*dynamic.BasicAuth, error) {
if basicAuth == nil {
return nil, nil
}
credentials, err := getAuthCredentials(client, basicAuth.Secret, namespace)
if err != nil {
return nil, err
}
return &dynamic.BasicAuth{
Users: credentials,
Realm: basicAuth.Realm,
RemoveHeader: basicAuth.RemoveHeader,
HeaderField: basicAuth.HeaderField,
}, nil
}
func createDigestAuthMiddleware(client Client, namespace string, digestAuth *v1alpha1.DigestAuth) (*dynamic.DigestAuth, error) {
if digestAuth == nil {
return nil, nil
}
credentials, err := getAuthCredentials(client, digestAuth.Secret, namespace)
if err != nil {
return nil, err
}
return &dynamic.DigestAuth{
Users: credentials,
Realm: digestAuth.Realm,
RemoveHeader: digestAuth.RemoveHeader,
HeaderField: digestAuth.HeaderField,
}, nil
}
func getAuthCredentials(k8sClient Client, authSecret, namespace string) ([]string, error) {
if authSecret == "" {
return nil, fmt.Errorf("auth secret must be set")
}
auth, err := loadAuthCredentials(namespace, authSecret, k8sClient)
if err != nil {
return nil, fmt.Errorf("failed to load auth credentials: %w", err)
}
return auth, nil
}
func loadAuthCredentials(namespace, secretName string, k8sClient Client) ([]string, error) {
secret, ok, err := k8sClient.GetSecret(namespace, secretName)
if err != nil {
return nil, fmt.Errorf("failed to fetch secret '%s/%s': %w", namespace, secretName, err)
}
if !ok {
return nil, fmt.Errorf("secret '%s/%s' not found", namespace, secretName)
}
if secret == nil {
return nil, fmt.Errorf("data for secret '%s/%s' must not be nil", namespace, secretName)
}
if len(secret.Data) != 1 {
return nil, fmt.Errorf("found %d elements for secret '%s/%s', must be single element exactly", len(secret.Data), namespace, secretName)
}
var firstSecret []byte
for _, v := range secret.Data {
firstSecret = v
break
}
var credentials []string
scanner := bufio.NewScanner(bytes.NewReader(firstSecret))
for scanner.Scan() {
if cred := scanner.Text(); len(cred) > 0 {
credentials = append(credentials, cred)
}
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("error reading secret for %s/%s: %w", namespace, secretName, err)
}
if len(credentials) == 0 {
return nil, fmt.Errorf("secret '%s/%s' does not contain any credentials", namespace, secretName)
}
return credentials, nil
}
func createChainMiddleware(ctx context.Context, namespace string, chain *v1alpha1.Chain) *dynamic.Chain {
if chain == nil {
return nil
}
var mds []string
for _, mi := range chain.Middlewares {
if strings.Contains(mi.Name, providerNamespaceSeparator) {
if len(mi.Namespace) > 0 {
log.FromContext(ctx).
Warnf("namespace %q is ignored in cross-provider context", mi.Namespace)
}
mds = append(mds, mi.Name)
continue
}
ns := mi.Namespace
if len(ns) == 0 {
ns = namespace
}
mds = append(mds, makeID(ns, mi.Name))
}
return &dynamic.Chain{Middlewares: mds}
}
func buildTLSOptions(ctx context.Context, client Client) map[string]tls.Options {
tlsOptionsCRD := client.GetTLSOptions()
var tlsOptions map[string]tls.Options
if len(tlsOptionsCRD) == 0 {
return tlsOptions
}
tlsOptions = make(map[string]tls.Options)
var nsDefault []string
for _, tlsOption := range tlsOptionsCRD {
logger := log.FromContext(log.With(ctx, log.Str("tlsOption", tlsOption.Name), log.Str("namespace", tlsOption.Namespace)))
var clientCAs []tls.FileOrContent
for _, secretName := range tlsOption.Spec.ClientAuth.SecretNames {
secret, exists, err := client.GetSecret(tlsOption.Namespace, secretName)
if err != nil {
logger.Errorf("Failed to fetch secret %s/%s: %v", tlsOption.Namespace, secretName, err)
continue
}
if !exists {
logger.Warnf("Secret %s/%s does not exist", tlsOption.Namespace, secretName)
continue
}
cert, err := getCABlocks(secret, tlsOption.Namespace, secretName)
if err != nil {
logger.Errorf("Failed to extract CA from secret %s/%s: %v", tlsOption.Namespace, secretName, err)
continue
}
clientCAs = append(clientCAs, tls.FileOrContent(cert))
}
id := makeID(tlsOption.Namespace, tlsOption.Name)
// If the name is default, we override the default config.
if tlsOption.Name == "default" {
id = tlsOption.Name
nsDefault = append(nsDefault, tlsOption.Namespace)
}
tlsOptions[id] = tls.Options{
MinVersion: tlsOption.Spec.MinVersion,
MaxVersion: tlsOption.Spec.MaxVersion,
CipherSuites: tlsOption.Spec.CipherSuites,
CurvePreferences: tlsOption.Spec.CurvePreferences,
ClientAuth: tls.ClientAuth{
CAFiles: clientCAs,
ClientAuthType: tlsOption.Spec.ClientAuth.ClientAuthType,
},
SniStrict: tlsOption.Spec.SniStrict,
PreferServerCipherSuites: tlsOption.Spec.PreferServerCipherSuites,
}
}
if len(nsDefault) > 1 {
delete(tlsOptions, "default")
log.FromContext(ctx).Errorf("Default TLS Options defined in multiple namespaces: %v", nsDefault)
}
return tlsOptions
}
func buildTLSStores(ctx context.Context, client Client) map[string]tls.Store {
tlsStoreCRD := client.GetTLSStores()
var tlsStores map[string]tls.Store
if len(tlsStoreCRD) == 0 {
return tlsStores
}
tlsStores = make(map[string]tls.Store)
var nsDefault []string
for _, tlsStore := range tlsStoreCRD {
namespace := tlsStore.Namespace
secretName := tlsStore.Spec.DefaultCertificate.SecretName
logger := log.FromContext(log.With(ctx, log.Str("tlsStore", tlsStore.Name), log.Str("namespace", namespace), log.Str("secretName", secretName)))
secret, exists, err := client.GetSecret(namespace, secretName)
if err != nil {
logger.Errorf("Failed to fetch secret %s/%s: %v", namespace, secretName, err)
continue
}
if !exists {
logger.Errorf("Secret %s/%s does not exist", namespace, secretName)
continue
}
cert, key, err := getCertificateBlocks(secret, namespace, secretName)
if err != nil {
logger.Errorf("Could not get certificate blocks: %v", err)
continue
}
id := makeID(tlsStore.Namespace, tlsStore.Name)
// If the name is default, we override the default config.
if tlsStore.Name == "default" {
id = tlsStore.Name
nsDefault = append(nsDefault, tlsStore.Namespace)
}
tlsStores[id] = tls.Store{
DefaultCertificate: &tls.Certificate{
CertFile: tls.FileOrContent(cert),
KeyFile: tls.FileOrContent(key),
},
}
}
if len(nsDefault) > 1 {
delete(tlsStores, "default")
log.FromContext(ctx).Errorf("Default TLS Stores defined in multiple namespaces: %v", nsDefault)
}
return tlsStores
}
func makeServiceKey(rule, ingressName string) (string, error) {
h := sha256.New()
if _, err := h.Write([]byte(rule)); err != nil {
return "", err
}
key := fmt.Sprintf("%s-%.10x", ingressName, h.Sum(nil))
return key, nil
}
func makeID(namespace, name string) string {
if namespace == "" {
return name
}
return namespace + "-" + name
}
func shouldProcessIngress(ingressClass, ingressClassAnnotation string) bool {
return ingressClass == ingressClassAnnotation ||
(len(ingressClass) == 0 && ingressClassAnnotation == traefikDefaultIngressClass)
}
func getTLS(k8sClient Client, secretName, namespace string) (*tls.CertAndStores, error) {
secret, exists, err := k8sClient.GetSecret(namespace, secretName)
if err != nil {
return nil, fmt.Errorf("failed to fetch secret %s/%s: %w", namespace, secretName, err)
}
if !exists {
return nil, fmt.Errorf("secret %s/%s does not exist", namespace, secretName)
}
cert, key, err := getCertificateBlocks(secret, namespace, secretName)
if err != nil {
return nil, err
}
return &tls.CertAndStores{
Certificate: tls.Certificate{
CertFile: tls.FileOrContent(cert),
KeyFile: tls.FileOrContent(key),
},
}, nil
}
func getTLSConfig(tlsConfigs map[string]*tls.CertAndStores) []*tls.CertAndStores {
var secretNames []string
for secretName := range tlsConfigs {
secretNames = append(secretNames, secretName)
}
sort.Strings(secretNames)
var configs []*tls.CertAndStores
for _, secretName := range secretNames {
configs = append(configs, tlsConfigs[secretName])
}
return configs
}
func getCertificateBlocks(secret *corev1.Secret, namespace, secretName string) (string, string, error) {
var missingEntries []string
tlsCrtData, tlsCrtExists := secret.Data["tls.crt"]
if !tlsCrtExists {
missingEntries = append(missingEntries, "tls.crt")
}
tlsKeyData, tlsKeyExists := secret.Data["tls.key"]
if !tlsKeyExists {
missingEntries = append(missingEntries, "tls.key")
}
if len(missingEntries) > 0 {
return "", "", fmt.Errorf("secret %s/%s is missing the following TLS data entries: %s",
namespace, secretName, strings.Join(missingEntries, ", "))
}
cert := string(tlsCrtData)
if cert == "" {
missingEntries = append(missingEntries, "tls.crt")
}
key := string(tlsKeyData)
if key == "" {
missingEntries = append(missingEntries, "tls.key")
}
if len(missingEntries) > 0 {
return "", "", fmt.Errorf("secret %s/%s contains the following empty TLS data entries: %s",
namespace, secretName, strings.Join(missingEntries, ", "))
}
return cert, key, nil
}
func getCABlocks(secret *corev1.Secret, namespace, secretName string) (string, error) {
tlsCrtData, tlsCrtExists := secret.Data["tls.ca"]
if !tlsCrtExists {
return "", fmt.Errorf("the tls.ca entry is missing from secret %s/%s", namespace, secretName)
}
cert := string(tlsCrtData)
if cert == "" {
return "", fmt.Errorf("the tls.ca entry in secret %s/%s is empty", namespace, secretName)
}
return cert, nil
}
func throttleEvents(ctx context.Context, throttleDuration time.Duration, pool *safe.Pool, eventsChan <-chan interface{}) chan interface{} {
if throttleDuration == 0 {
return nil
}
// Create a buffered channel to hold the pending event (if we're delaying processing the event due to throttling)
eventsChanBuffered := make(chan interface{}, 1)
// Run a goroutine that reads events from eventChan and does a non-blocking write to pendingEvent.
// This guarantees that writing to eventChan will never block,
// and that pendingEvent will have something in it if there's been an event since we read from that channel.
pool.GoCtx(func(ctxPool context.Context) {
for {
select {
case <-ctxPool.Done():
return
case nextEvent := <-eventsChan:
select {
case eventsChanBuffered <- nextEvent:
default:
// We already have an event in eventsChanBuffered, so we'll do a refresh as soon as our throttle allows us to.
// It's fine to drop the event and keep whatever's in the buffer -- we don't do different things for different events
log.FromContext(ctx).Debugf("Dropping event kind %T due to throttling", nextEvent)
}
}
}
})
return eventsChanBuffered
}
| [
"\"KUBERNETES_SERVICE_HOST\"",
"\"KUBERNETES_SERVICE_PORT\"",
"\"KUBECONFIG\"",
"\"KUBECONFIG\"",
"\"KUBECONFIG\""
]
| []
| [
"KUBERNETES_SERVICE_HOST",
"KUBERNETES_SERVICE_PORT",
"KUBECONFIG"
]
| [] | ["KUBERNETES_SERVICE_HOST", "KUBERNETES_SERVICE_PORT", "KUBECONFIG"] | go | 3 | 0 | |
scripts/virtualenv/virtualenv.py | #!/usr/bin/env python
"""Create a "virtual" Python installation
"""
# If you change the version here, change it in setup.py
# and docs/conf.py as well.
__version__ = "1.9" # following best practices
virtualenv_version = __version__ # legacy, again
import base64
import sys
import os
import codecs
import optparse
import re
import shutil
import logging
import tempfile
import zlib
import errno
import glob
import distutils.sysconfig
from distutils.util import strtobool
import struct
import subprocess
if sys.version_info < (2, 5):
print('ERROR: %s' % sys.exc_info()[1])
print('ERROR: this script requires Python 2.5 or greater.')
sys.exit(101)
try:
set
except NameError:
from sets import Set as set
try:
basestring
except NameError:
basestring = str
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
join = os.path.join
py_version = 'python%s.%s' % (sys.version_info[0], sys.version_info[1])
is_jython = sys.platform.startswith('java')
is_pypy = hasattr(sys, 'pypy_version_info')
is_win = (sys.platform == 'win32')
is_cygwin = (sys.platform == 'cygwin')
is_darwin = (sys.platform == 'darwin')
abiflags = getattr(sys, 'abiflags', '')
user_dir = os.path.expanduser('~')
if is_win:
default_storage_dir = os.path.join(user_dir, 'virtualenv')
else:
default_storage_dir = os.path.join(user_dir, '.virtualenv')
default_config_file = os.path.join(default_storage_dir, 'virtualenv.ini')
if is_pypy:
expected_exe = 'pypy'
elif is_jython:
expected_exe = 'jython'
else:
expected_exe = 'python'
REQUIRED_MODULES = ['os', 'posix', 'posixpath', 'nt', 'ntpath', 'genericpath',
'fnmatch', 'locale', 'encodings', 'codecs',
'stat', 'UserDict', 'readline', 'copy_reg', 'types',
're', 'sre', 'sre_parse', 'sre_constants', 'sre_compile',
'zlib']
REQUIRED_FILES = ['lib-dynload', 'config']
majver, minver = sys.version_info[:2]
if majver == 2:
if minver >= 6:
REQUIRED_MODULES.extend(['warnings', 'linecache', '_abcoll', 'abc'])
if minver >= 7:
REQUIRED_MODULES.extend(['_weakrefset'])
if minver <= 3:
REQUIRED_MODULES.extend(['sets', '__future__'])
elif majver == 3:
# Some extra modules are needed for Python 3, but different ones
# for different versions.
print "You must use python 2.7"
sys.exit(1)
REQUIRED_MODULES.extend(['_abcoll', 'warnings', 'linecache', 'abc', 'io',
'_weakrefset', 'copyreg', 'tempfile', 'random',
'__future__', 'collections', 'keyword', 'tarfile',
'shutil', 'struct', 'copy', 'tokenize', 'token',
'functools', 'heapq', 'bisect', 'weakref',
'reprlib'])
if minver >= 2:
REQUIRED_FILES[-1] = 'config-%s' % majver
if minver == 3:
import sysconfig
platdir = sysconfig.get_config_var('PLATDIR')
REQUIRED_FILES.append(platdir)
# The whole list of 3.3 modules is reproduced below - the current
# uncommented ones are required for 3.3 as of now, but more may be
# added as 3.3 development continues.
REQUIRED_MODULES.extend([
#"aifc",
#"antigravity",
#"argparse",
#"ast",
#"asynchat",
#"asyncore",
"base64",
#"bdb",
#"binhex",
#"bisect",
#"calendar",
#"cgi",
#"cgitb",
#"chunk",
#"cmd",
#"codeop",
#"code",
#"colorsys",
#"_compat_pickle",
#"compileall",
#"concurrent",
#"configparser",
#"contextlib",
#"cProfile",
#"crypt",
#"csv",
#"ctypes",
#"curses",
#"datetime",
#"dbm",
#"decimal",
#"difflib",
#"dis",
#"doctest",
#"dummy_threading",
"_dummy_thread",
#"email",
#"filecmp",
#"fileinput",
#"formatter",
#"fractions",
#"ftplib",
#"functools",
#"getopt",
#"getpass",
#"gettext",
#"glob",
#"gzip",
"hashlib",
#"heapq",
"hmac",
#"html",
#"http",
#"idlelib",
#"imaplib",
#"imghdr",
"imp",
"importlib",
#"inspect",
#"json",
#"lib2to3",
#"logging",
#"macpath",
#"macurl2path",
#"mailbox",
#"mailcap",
#"_markupbase",
#"mimetypes",
#"modulefinder",
#"multiprocessing",
#"netrc",
#"nntplib",
#"nturl2path",
#"numbers",
#"opcode",
#"optparse",
#"os2emxpath",
#"pdb",
#"pickle",
#"pickletools",
#"pipes",
#"pkgutil",
#"platform",
#"plat-linux2",
#"plistlib",
#"poplib",
#"pprint",
#"profile",
#"pstats",
#"pty",
#"pyclbr",
#"py_compile",
#"pydoc_data",
#"pydoc",
#"_pyio",
#"queue",
#"quopri",
#"reprlib",
"rlcompleter",
#"runpy",
#"sched",
#"shelve",
#"shlex",
#"smtpd",
#"smtplib",
#"sndhdr",
#"socket",
#"socketserver",
#"sqlite3",
#"ssl",
#"stringprep",
#"string",
#"_strptime",
#"subprocess",
#"sunau",
#"symbol",
#"symtable",
#"sysconfig",
#"tabnanny",
#"telnetlib",
#"test",
#"textwrap",
#"this",
#"_threading_local",
#"threading",
#"timeit",
#"tkinter",
#"tokenize",
#"token",
#"traceback",
#"trace",
#"tty",
#"turtledemo",
#"turtle",
#"unittest",
#"urllib",
#"uuid",
#"uu",
#"wave",
#"weakref",
#"webbrowser",
#"wsgiref",
#"xdrlib",
#"xml",
#"xmlrpc",
#"zipfile",
])
if is_pypy:
# these are needed to correctly display the exceptions that may happen
# during the bootstrap
REQUIRED_MODULES.extend(['traceback', 'linecache'])
class Logger(object):
"""
Logging object for use in command-line script. Allows ranges of
levels, to avoid some redundancy of displayed information.
"""
DEBUG = logging.DEBUG
INFO = logging.INFO
NOTIFY = (logging.INFO+logging.WARN)/2
WARN = WARNING = logging.WARN
ERROR = logging.ERROR
FATAL = logging.FATAL
LEVELS = [DEBUG, INFO, NOTIFY, WARN, ERROR, FATAL]
def __init__(self, consumers):
self.consumers = consumers
self.indent = 0
self.in_progress = None
self.in_progress_hanging = False
def debug(self, msg, *args, **kw):
self.log(self.DEBUG, msg, *args, **kw)
def info(self, msg, *args, **kw):
self.log(self.INFO, msg, *args, **kw)
def notify(self, msg, *args, **kw):
self.log(self.NOTIFY, msg, *args, **kw)
def warn(self, msg, *args, **kw):
self.log(self.WARN, msg, *args, **kw)
def error(self, msg, *args, **kw):
self.log(self.ERROR, msg, *args, **kw)
def fatal(self, msg, *args, **kw):
self.log(self.FATAL, msg, *args, **kw)
def log(self, level, msg, *args, **kw):
if args:
if kw:
raise TypeError(
"You may give positional or keyword arguments, not both")
args = args or kw
rendered = None
for consumer_level, consumer in self.consumers:
if self.level_matches(level, consumer_level):
if (self.in_progress_hanging
and consumer in (sys.stdout, sys.stderr)):
self.in_progress_hanging = False
sys.stdout.write('\n')
sys.stdout.flush()
if rendered is None:
if args:
rendered = msg % args
else:
rendered = msg
rendered = ' '*self.indent + rendered
if hasattr(consumer, 'write'):
consumer.write(rendered+'\n')
else:
consumer(rendered)
def start_progress(self, msg):
assert not self.in_progress, (
"Tried to start_progress(%r) while in_progress %r"
% (msg, self.in_progress))
if self.level_matches(self.NOTIFY, self._stdout_level()):
sys.stdout.write(msg)
sys.stdout.flush()
self.in_progress_hanging = True
else:
self.in_progress_hanging = False
self.in_progress = msg
def end_progress(self, msg='done.'):
assert self.in_progress, (
"Tried to end_progress without start_progress")
if self.stdout_level_matches(self.NOTIFY):
if not self.in_progress_hanging:
# Some message has been printed out since start_progress
sys.stdout.write('...' + self.in_progress + msg + '\n')
sys.stdout.flush()
else:
sys.stdout.write(msg + '\n')
sys.stdout.flush()
self.in_progress = None
self.in_progress_hanging = False
def show_progress(self):
"""If we are in a progress scope, and no log messages have been
shown, write out another '.'"""
if self.in_progress_hanging:
sys.stdout.write('.')
sys.stdout.flush()
def stdout_level_matches(self, level):
"""Returns true if a message at this level will go to stdout"""
return self.level_matches(level, self._stdout_level())
def _stdout_level(self):
"""Returns the level that stdout runs at"""
for level, consumer in self.consumers:
if consumer is sys.stdout:
return level
return self.FATAL
def level_matches(self, level, consumer_level):
"""
>>> l = Logger([])
>>> l.level_matches(3, 4)
False
>>> l.level_matches(3, 2)
True
>>> l.level_matches(slice(None, 3), 3)
False
>>> l.level_matches(slice(None, 3), 2)
True
>>> l.level_matches(slice(1, 3), 1)
True
>>> l.level_matches(slice(2, 3), 1)
False
"""
if isinstance(level, slice):
start, stop = level.start, level.stop
if start is not None and start > consumer_level:
return False
if stop is not None and stop <= consumer_level:
return False
return True
else:
return level >= consumer_level
#@classmethod
def level_for_integer(cls, level):
levels = cls.LEVELS
if level < 0:
return levels[0]
if level >= len(levels):
return levels[-1]
return levels[level]
level_for_integer = classmethod(level_for_integer)
# create a silent logger just to prevent this from being undefined
# will be overridden with requested verbosity main() is called.
logger = Logger([(Logger.LEVELS[-1], sys.stdout)])
def mkdir(path):
if not os.path.exists(path):
logger.info('Creating %s', path)
os.makedirs(path)
else:
logger.info('Directory %s already exists', path)
def copyfileordir(src, dest):
if os.path.isdir(src):
shutil.copytree(src, dest, True)
else:
shutil.copy2(src, dest)
def copyfile(src, dest, symlink=True):
if not os.path.exists(src):
# Some bad symlink in the src
logger.warn('Cannot find file %s (bad symlink)', src)
return
if os.path.exists(dest):
logger.debug('File %s already exists', dest)
return
if not os.path.exists(os.path.dirname(dest)):
logger.info('Creating parent directories for %s' % os.path.dirname(dest))
os.makedirs(os.path.dirname(dest))
if not os.path.islink(src):
srcpath = os.path.abspath(src)
else:
srcpath = os.readlink(src)
if symlink and hasattr(os, 'symlink') and not is_win:
logger.info('Symlinking %s', dest)
try:
os.symlink(srcpath, dest)
except (OSError, NotImplementedError):
logger.info('Symlinking failed, copying to %s', dest)
copyfileordir(src, dest)
else:
logger.info('Copying to %s', dest)
copyfileordir(src, dest)
def writefile(dest, content, overwrite=True):
if not os.path.exists(dest):
logger.info('Writing %s', dest)
f = open(dest, 'wb')
f.write(content.encode('utf-8'))
f.close()
return
else:
f = open(dest, 'rb')
c = f.read()
f.close()
if c != content.encode("utf-8"):
if not overwrite:
logger.notify('File %s exists with different content; not overwriting', dest)
return
logger.notify('Overwriting %s with new content', dest)
f = open(dest, 'wb')
f.write(content.encode('utf-8'))
f.close()
else:
logger.info('Content %s already in place', dest)
def rmtree(dir):
if os.path.exists(dir):
logger.notify('Deleting tree %s', dir)
shutil.rmtree(dir)
else:
logger.info('Do not need to delete %s; already gone', dir)
def make_exe(fn):
if hasattr(os, 'chmod'):
oldmode = os.stat(fn).st_mode & 0xFFF # 0o7777
newmode = (oldmode | 0x16D) & 0xFFF # 0o555, 0o7777
os.chmod(fn, newmode)
logger.info('Changed mode of %s to %s', fn, oct(newmode))
def _find_file(filename, dirs):
for dir in reversed(dirs):
files = glob.glob(os.path.join(dir, filename))
if files and os.path.isfile(files[0]):
return True, files[0]
return False, filename
def _install_req(py_executable, unzip=False, distribute=False,
search_dirs=None, never_download=False):
if search_dirs is None:
search_dirs = file_search_dirs()
if not distribute:
egg_path = 'setuptools-*-py%s.egg' % sys.version[:3]
found, egg_path = _find_file(egg_path, search_dirs)
project_name = 'setuptools'
bootstrap_script = EZ_SETUP_PY
tgz_path = None
else:
# Look for a distribute egg (these are not distributed by default,
# but can be made available by the user)
egg_path = 'distribute-*-py%s.egg' % sys.version[:3]
found, egg_path = _find_file(egg_path, search_dirs)
project_name = 'distribute'
if found:
tgz_path = None
bootstrap_script = DISTRIBUTE_FROM_EGG_PY
else:
# Fall back to sdist
# NB: egg_path is not None iff tgz_path is None
# iff bootstrap_script is a generic setup script accepting
# the standard arguments.
egg_path = None
tgz_path = 'distribute-*.tar.gz'
found, tgz_path = _find_file(tgz_path, search_dirs)
bootstrap_script = DISTRIBUTE_SETUP_PY
if is_jython and os._name == 'nt':
# Jython's .bat sys.executable can't handle a command line
# argument with newlines
fd, ez_setup = tempfile.mkstemp('.py')
os.write(fd, bootstrap_script)
os.close(fd)
cmd = [py_executable, ez_setup]
else:
cmd = [py_executable, '-c', bootstrap_script]
if unzip and egg_path:
cmd.append('--always-unzip')
env = {}
remove_from_env = ['__PYVENV_LAUNCHER__']
if logger.stdout_level_matches(logger.DEBUG) and egg_path:
cmd.append('-v')
old_chdir = os.getcwd()
if egg_path is not None and os.path.exists(egg_path):
logger.info('Using existing %s egg: %s' % (project_name, egg_path))
cmd.append(egg_path)
if os.environ.get('PYTHONPATH'):
env['PYTHONPATH'] = egg_path + os.path.pathsep + os.environ['PYTHONPATH']
else:
env['PYTHONPATH'] = egg_path
elif tgz_path is not None and os.path.exists(tgz_path):
# Found a tgz source dist, let's chdir
logger.info('Using existing %s egg: %s' % (project_name, tgz_path))
os.chdir(os.path.dirname(tgz_path))
# in this case, we want to be sure that PYTHONPATH is unset (not
# just empty, really unset), else CPython tries to import the
# site.py that it's in virtualenv_support
remove_from_env.append('PYTHONPATH')
elif never_download:
logger.fatal("Can't find any local distributions of %s to install "
"and --never-download is set. Either re-run virtualenv "
"without the --never-download option, or place a %s "
"distribution (%s) in one of these "
"locations: %r" % (project_name, project_name,
egg_path or tgz_path,
search_dirs))
sys.exit(1)
elif egg_path:
logger.info('No %s egg found; downloading' % project_name)
cmd.extend(['--always-copy', '-U', project_name])
else:
logger.info('No %s tgz found; downloading' % project_name)
logger.start_progress('Installing %s...' % project_name)
logger.indent += 2
cwd = None
if project_name == 'distribute':
env['DONT_PATCH_SETUPTOOLS'] = 'true'
def _filter_ez_setup(line):
return filter_ez_setup(line, project_name)
if not os.access(os.getcwd(), os.W_OK):
cwd = tempfile.mkdtemp()
if tgz_path is not None and os.path.exists(tgz_path):
# the current working dir is hostile, let's copy the
# tarball to a temp dir
target = os.path.join(cwd, os.path.split(tgz_path)[-1])
shutil.copy(tgz_path, target)
try:
call_subprocess(cmd, show_stdout=False,
filter_stdout=_filter_ez_setup,
extra_env=env,
remove_from_env=remove_from_env,
cwd=cwd)
finally:
logger.indent -= 2
logger.end_progress()
if cwd is not None:
shutil.rmtree(cwd)
if os.getcwd() != old_chdir:
os.chdir(old_chdir)
if is_jython and os._name == 'nt':
os.remove(ez_setup)
def file_search_dirs():
here = os.path.dirname(os.path.abspath(__file__))
dirs = ['.', here,
join(here, 'virtualenv_support')]
if os.path.splitext(os.path.dirname(__file__))[0] != 'virtualenv':
# Probably some boot script; just in case virtualenv is installed...
try:
import virtualenv
except ImportError:
pass
else:
dirs.append(os.path.join(os.path.dirname(virtualenv.__file__), 'virtualenv_support'))
return [d for d in dirs if os.path.isdir(d)]
def install_setuptools(py_executable, unzip=False,
search_dirs=None, never_download=False):
_install_req(py_executable, unzip,
search_dirs=search_dirs, never_download=never_download)
def install_distribute(py_executable, unzip=False,
search_dirs=None, never_download=False):
_install_req(py_executable, unzip, distribute=True,
search_dirs=search_dirs, never_download=never_download)
_pip_re = re.compile(r'^pip-.*(zip|tar.gz|tar.bz2|tgz|tbz)$', re.I)
def install_pip(py_executable, search_dirs=None, never_download=False):
if search_dirs is None:
search_dirs = file_search_dirs()
filenames = []
for dir in search_dirs:
filenames.extend([join(dir, fn) for fn in os.listdir(dir)
if _pip_re.search(fn)])
filenames = [(os.path.basename(filename).lower(), i, filename) for i, filename in enumerate(filenames)]
filenames.sort()
filenames = [filename for basename, i, filename in filenames]
if not filenames:
filename = 'pip'
else:
filename = filenames[-1]
easy_install_script = 'easy_install'
if is_win:
easy_install_script = 'easy_install-script.py'
# There's two subtle issues here when invoking easy_install.
# 1. On unix-like systems the easy_install script can *only* be executed
# directly if its full filesystem path is no longer than 78 characters.
# 2. A work around to [1] is to use the `python path/to/easy_install foo`
# pattern, but that breaks if the path contains non-ASCII characters, as
# you can't put the file encoding declaration before the shebang line.
# The solution is to use Python's -x flag to skip the first line of the
# script (and any ASCII decoding errors that may have occurred in that line)
cmd = [py_executable, '-x', join(os.path.dirname(py_executable), easy_install_script), filename]
# jython and pypy don't yet support -x
if is_jython or is_pypy:
cmd.remove('-x')
if filename == 'pip':
if never_download:
logger.fatal("Can't find any local distributions of pip to install "
"and --never-download is set. Either re-run virtualenv "
"without the --never-download option, or place a pip "
"source distribution (zip/tar.gz/tar.bz2) in one of these "
"locations: %r" % search_dirs)
sys.exit(1)
logger.info('Installing pip from network...')
else:
logger.info('Installing existing %s distribution: %s' % (
os.path.basename(filename), filename))
logger.start_progress('Installing pip...')
logger.indent += 2
def _filter_setup(line):
return filter_ez_setup(line, 'pip')
try:
call_subprocess(cmd, show_stdout=False,
filter_stdout=_filter_setup)
finally:
logger.indent -= 2
logger.end_progress()
def filter_ez_setup(line, project_name='setuptools'):
if not line.strip():
return Logger.DEBUG
if project_name == 'distribute':
for prefix in ('Extracting', 'Now working', 'Installing', 'Before',
'Scanning', 'Setuptools', 'Egg', 'Already',
'running', 'writing', 'reading', 'installing',
'creating', 'copying', 'byte-compiling', 'removing',
'Processing'):
if line.startswith(prefix):
return Logger.DEBUG
return Logger.DEBUG
for prefix in ['Reading ', 'Best match', 'Processing setuptools',
'Copying setuptools', 'Adding setuptools',
'Installing ', 'Installed ']:
if line.startswith(prefix):
return Logger.DEBUG
return Logger.INFO
class UpdatingDefaultsHelpFormatter(optparse.IndentedHelpFormatter):
"""
Custom help formatter for use in ConfigOptionParser that updates
the defaults before expanding them, allowing them to show up correctly
in the help listing
"""
def expand_default(self, option):
if self.parser is not None:
self.parser.update_defaults(self.parser.defaults)
return optparse.IndentedHelpFormatter.expand_default(self, option)
class ConfigOptionParser(optparse.OptionParser):
"""
Custom option parser which updates its defaults by by checking the
configuration files and environmental variables
"""
def __init__(self, *args, **kwargs):
self.config = ConfigParser.RawConfigParser()
self.files = self.get_config_files()
self.config.read(self.files)
optparse.OptionParser.__init__(self, *args, **kwargs)
def get_config_files(self):
config_file = os.environ.get('VIRTUALENV_CONFIG_FILE', False)
if config_file and os.path.exists(config_file):
return [config_file]
return [default_config_file]
def update_defaults(self, defaults):
"""
Updates the given defaults with values from the config files and
the environ. Does a little special handling for certain types of
options (lists).
"""
# Then go and look for the other sources of configuration:
config = {}
# 1. config files
config.update(dict(self.get_config_section('virtualenv')))
# 2. environmental variables
config.update(dict(self.get_environ_vars()))
# Then set the options with those values
for key, val in config.items():
key = key.replace('_', '-')
if not key.startswith('--'):
key = '--%s' % key # only prefer long opts
option = self.get_option(key)
if option is not None:
# ignore empty values
if not val:
continue
# handle multiline configs
if option.action == 'append':
val = val.split()
else:
option.nargs = 1
if option.action == 'store_false':
val = not strtobool(val)
elif option.action in ('store_true', 'count'):
val = strtobool(val)
try:
val = option.convert_value(key, val)
except optparse.OptionValueError:
e = sys.exc_info()[1]
print("An error occured during configuration: %s" % e)
sys.exit(3)
defaults[option.dest] = val
return defaults
def get_config_section(self, name):
"""
Get a section of a configuration
"""
if self.config.has_section(name):
return self.config.items(name)
return []
def get_environ_vars(self, prefix='VIRTUALENV_'):
"""
Returns a generator with all environmental vars with prefix VIRTUALENV
"""
for key, val in os.environ.items():
if key.startswith(prefix):
yield (key.replace(prefix, '').lower(), val)
def get_default_values(self):
"""
Overridding to make updating the defaults after instantiation of
the option parser possible, update_defaults() does the dirty work.
"""
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return optparse.Values(self.defaults)
defaults = self.update_defaults(self.defaults.copy()) # ours
for option in self._get_all_options():
default = defaults.get(option.dest)
if isinstance(default, basestring):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return optparse.Values(defaults)
def main(argv):
parser = ConfigOptionParser(
version=virtualenv_version,
usage="%prog [OPTIONS] DEST_DIR",
formatter=UpdatingDefaultsHelpFormatter())
parser.add_option(
'-v', '--verbose',
action='count',
dest='verbose',
default=0,
help="Increase verbosity")
parser.add_option(
'-q', '--quiet',
action='count',
dest='quiet',
default=0,
help='Decrease verbosity')
parser.add_option(
'-p', '--python',
dest='python',
metavar='PYTHON_EXE',
help='The Python interpreter to use, e.g., --python=python2.5 will use the python2.5 '
'interpreter to create the new environment. The default is the interpreter that '
'virtualenv was installed with (%s)' % sys.executable)
parser.add_option(
'--clear',
dest='clear',
action='store_true',
help="Clear out the non-root install and start from scratch")
parser.set_defaults(system_site_packages=False)
parser.add_option(
'--no-site-packages',
dest='system_site_packages',
action='store_false',
help="Don't give access to the global site-packages dir to the "
"virtual environment (default)")
parser.add_option(
'--system-site-packages',
dest='system_site_packages',
action='store_true',
help="Give access to the global site-packages dir to the "
"virtual environment")
parser.add_option(
'--unzip-setuptools',
dest='unzip_setuptools',
action='store_true',
help="Unzip Setuptools or Distribute when installing it")
parser.add_option(
'--relocatable',
dest='relocatable',
action='store_true',
help='Make an EXISTING virtualenv environment relocatable. '
'This fixes up scripts and makes all .pth files relative')
parser.add_option(
'--distribute', '--use-distribute', # the second option is for legacy reasons here. Hi Kenneth!
dest='use_distribute',
action='store_true',
help='Use Distribute instead of Setuptools. Set environ variable '
'VIRTUALENV_DISTRIBUTE to make it the default ')
parser.add_option(
'--no-setuptools',
dest='no_setuptools',
action='store_true',
help='Do not install distribute/setuptools (or pip) '
'in the new virtualenv.')
parser.add_option(
'--no-pip',
dest='no_pip',
action='store_true',
help='Do not install pip in the new virtualenv.')
parser.add_option(
'--setuptools',
dest='use_distribute',
action='store_false',
help='Use Setuptools instead of Distribute. Set environ variable '
'VIRTUALENV_SETUPTOOLS to make it the default ')
# Set this to True to use distribute by default, even in Python 2.
parser.set_defaults(use_distribute=False)
default_search_dirs = file_search_dirs()
parser.add_option(
'--extra-search-dir',
dest="search_dirs",
action="append",
default=default_search_dirs,
help="Directory to look for setuptools/distribute/pip distributions in. "
"You can add any number of additional --extra-search-dir paths.")
parser.add_option(
'--never-download',
dest="never_download",
action="store_true",
help="Never download anything from the network. Instead, virtualenv will fail "
"if local distributions of setuptools/distribute/pip are not present.")
parser.add_option(
'--prompt',
dest='prompt',
help='Provides an alternative prompt prefix for this environment')
if 'extend_parser' in globals():
extend_parser(parser)
options, args = parser.parse_args(argv)
global logger
if 'adjust_options' in globals():
adjust_options(options, args)
verbosity = options.verbose - options.quiet
logger = Logger([(Logger.level_for_integer(2 - verbosity), sys.stdout)])
if options.python and not os.environ.get('VIRTUALENV_INTERPRETER_RUNNING'):
env = os.environ.copy()
interpreter = resolve_interpreter(options.python)
if interpreter == sys.executable:
logger.warn('Already using interpreter %s' % interpreter)
else:
logger.notify('Running virtualenv with interpreter %s' % interpreter)
env['VIRTUALENV_INTERPRETER_RUNNING'] = 'true'
file = __file__
if file.endswith('.pyc'):
file = file[:-1]
popen = subprocess.Popen([interpreter, file] + args[1:], env=env)
raise SystemExit(popen.wait())
# Force --distribute on Python 3, since setuptools is not available.
if majver > 2:
options.use_distribute = True
if os.environ.get('PYTHONDONTWRITEBYTECODE') and not options.use_distribute:
print(
"The PYTHONDONTWRITEBYTECODE environment variable is "
"not compatible with setuptools. Either use --distribute "
"or unset PYTHONDONTWRITEBYTECODE.")
sys.exit(2)
if not args:
print('You must provide a DEST_DIR')
parser.print_help()
sys.exit(2)
if len(args) > 1:
print('There must be only one argument: DEST_DIR (you gave %s)' % (
' '.join(args)))
parser.print_help()
sys.exit(2)
home_dir = args[0]
if os.environ.get('WORKING_ENV'):
logger.fatal('ERROR: you cannot run virtualenv while in a workingenv')
logger.fatal('Please deactivate your workingenv, then re-run this script')
sys.exit(3)
if 'PYTHONHOME' in os.environ:
logger.warn('PYTHONHOME is set. You *must* activate the virtualenv before using it')
del os.environ['PYTHONHOME']
if options.relocatable:
make_environment_relocatable(home_dir)
return
create_environment(home_dir,
site_packages=options.system_site_packages,
clear=options.clear,
unzip_setuptools=options.unzip_setuptools,
use_distribute=options.use_distribute,
prompt=options.prompt,
search_dirs=options.search_dirs,
never_download=options.never_download,
no_setuptools=options.no_setuptools,
no_pip=options.no_pip)
if 'after_install' in globals():
after_install(options, home_dir)
def call_subprocess(cmd, show_stdout=True,
filter_stdout=None, cwd=None,
raise_on_returncode=True, extra_env=None,
remove_from_env=None):
cmd_parts = []
for part in cmd:
if len(part) > 45:
part = part[:20]+"..."+part[-20:]
if ' ' in part or '\n' in part or '"' in part or "'" in part:
part = '"%s"' % part.replace('"', '\\"')
if hasattr(part, 'decode'):
try:
part = part.decode(sys.getdefaultencoding())
except UnicodeDecodeError:
part = part.decode(sys.getfilesystemencoding())
cmd_parts.append(part)
cmd_desc = ' '.join(cmd_parts)
if show_stdout:
stdout = None
else:
stdout = subprocess.PIPE
logger.debug("Running command %s" % cmd_desc)
if extra_env or remove_from_env:
env = os.environ.copy()
if extra_env:
env.update(extra_env)
if remove_from_env:
for varname in remove_from_env:
env.pop(varname, None)
else:
env = None
try:
proc = subprocess.Popen(
cmd, stderr=subprocess.STDOUT, stdin=None, stdout=stdout,
cwd=cwd, env=env)
except Exception:
e = sys.exc_info()[1]
logger.fatal(
"Error %s while executing command %s" % (e, cmd_desc))
raise
all_output = []
if stdout is not None:
stdout = proc.stdout
encoding = sys.getdefaultencoding()
fs_encoding = sys.getfilesystemencoding()
while 1:
line = stdout.readline()
try:
line = line.decode(encoding)
except UnicodeDecodeError:
line = line.decode(fs_encoding)
if not line:
break
line = line.rstrip()
all_output.append(line)
if filter_stdout:
level = filter_stdout(line)
if isinstance(level, tuple):
level, line = level
logger.log(level, line)
if not logger.stdout_level_matches(level):
logger.show_progress()
else:
logger.info(line)
else:
proc.communicate()
proc.wait()
if proc.returncode:
if raise_on_returncode:
if all_output:
logger.notify('Complete output from command %s:' % cmd_desc)
logger.notify('\n'.join(all_output) + '\n----------------------------------------')
raise OSError(
"Command %s failed with error code %s"
% (cmd_desc, proc.returncode))
else:
logger.warn(
"Command %s had error code %s"
% (cmd_desc, proc.returncode))
def create_environment(home_dir, site_packages=False, clear=False,
unzip_setuptools=False, use_distribute=False,
prompt=None, search_dirs=None, never_download=False,
no_setuptools=False, no_pip=False):
"""
Creates a new environment in ``home_dir``.
If ``site_packages`` is true, then the global ``site-packages/``
directory will be on the path.
If ``clear`` is true (default False) then the environment will
first be cleared.
"""
home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir)
py_executable = os.path.abspath(install_python(
home_dir, lib_dir, inc_dir, bin_dir,
site_packages=site_packages, clear=clear))
install_distutils(home_dir)
if not no_setuptools:
if use_distribute:
install_distribute(py_executable, unzip=unzip_setuptools,
search_dirs=search_dirs, never_download=never_download)
else:
install_setuptools(py_executable, unzip=unzip_setuptools,
search_dirs=search_dirs, never_download=never_download)
if not no_pip:
install_pip(py_executable, search_dirs=search_dirs, never_download=never_download)
install_activate(home_dir, bin_dir, prompt)
def is_executable_file(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
def path_locations(home_dir):
"""Return the path locations for the environment (where libraries are,
where scripts go, etc)"""
# XXX: We'd use distutils.sysconfig.get_python_inc/lib but its
# prefix arg is broken: http://bugs.python.org/issue3386
if is_win:
# Windows has lots of problems with executables with spaces in
# the name; this function will remove them (using the ~1
# format):
mkdir(home_dir)
if ' ' in home_dir:
import ctypes
GetShortPathName = ctypes.windll.kernel32.GetShortPathNameW
size = max(len(home_dir)+1, 256)
buf = ctypes.create_unicode_buffer(size)
try:
u = unicode
except NameError:
u = str
ret = GetShortPathName(u(home_dir), buf, size)
if not ret:
print('Error: the path "%s" has a space in it' % home_dir)
print('We could not determine the short pathname for it.')
print('Exiting.')
sys.exit(3)
home_dir = str(buf.value)
lib_dir = join(home_dir, 'Lib')
inc_dir = join(home_dir, 'Include')
bin_dir = join(home_dir, 'Scripts')
if is_jython:
lib_dir = join(home_dir, 'Lib')
inc_dir = join(home_dir, 'Include')
bin_dir = join(home_dir, 'bin')
elif is_pypy:
lib_dir = home_dir
inc_dir = join(home_dir, 'include')
bin_dir = join(home_dir, 'bin')
elif not is_win:
lib_dir = join(home_dir, 'lib', py_version)
multiarch_exec = '/usr/bin/multiarch-platform'
if is_executable_file(multiarch_exec):
# In Mageia (2) and Mandriva distros the include dir must be like:
# virtualenv/include/multiarch-x86_64-linux/python2.7
# instead of being virtualenv/include/python2.7
p = subprocess.Popen(multiarch_exec, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
# stdout.strip is needed to remove newline character
inc_dir = join(home_dir, 'include', stdout.strip(), py_version + abiflags)
else:
inc_dir = join(home_dir, 'include', py_version + abiflags)
bin_dir = join(home_dir, 'bin')
return home_dir, lib_dir, inc_dir, bin_dir
def change_prefix(filename, dst_prefix):
prefixes = [sys.prefix]
if is_darwin:
prefixes.extend((
os.path.join("/Library/Python", sys.version[:3], "site-packages"),
os.path.join(sys.prefix, "Extras", "lib", "python"),
os.path.join("~", "Library", "Python", sys.version[:3], "site-packages"),
# Python 2.6 no-frameworks
os.path.join("~", ".local", "lib","python", sys.version[:3], "site-packages"),
# System Python 2.7 on OSX Mountain Lion
os.path.join("~", "Library", "Python", sys.version[:3], "lib", "python", "site-packages")))
if hasattr(sys, 'real_prefix'):
prefixes.append(sys.real_prefix)
if hasattr(sys, 'base_prefix'):
prefixes.append(sys.base_prefix)
prefixes = list(map(os.path.expanduser, prefixes))
prefixes = list(map(os.path.abspath, prefixes))
# Check longer prefixes first so we don't split in the middle of a filename
prefixes = sorted(prefixes, key=len, reverse=True)
filename = os.path.abspath(filename)
for src_prefix in prefixes:
if filename.startswith(src_prefix):
_, relpath = filename.split(src_prefix, 1)
if src_prefix != os.sep: # sys.prefix == "/"
assert relpath[0] == os.sep
relpath = relpath[1:]
return join(dst_prefix, relpath)
assert False, "Filename %s does not start with any of these prefixes: %s" % \
(filename, prefixes)
def copy_required_modules(dst_prefix):
import imp
# If we are running under -p, we need to remove the current
# directory from sys.path temporarily here, so that we
# definitely get the modules from the site directory of
# the interpreter we are running under, not the one
# virtualenv.py is installed under (which might lead to py2/py3
# incompatibility issues)
_prev_sys_path = sys.path
if os.environ.get('VIRTUALENV_INTERPRETER_RUNNING'):
sys.path = sys.path[1:]
try:
for modname in REQUIRED_MODULES:
if modname in sys.builtin_module_names:
logger.info("Ignoring built-in bootstrap module: %s" % modname)
continue
try:
f, filename, _ = imp.find_module(modname)
except ImportError:
logger.info("Cannot import bootstrap module: %s" % modname)
else:
if f is not None:
f.close()
# special-case custom readline.so on OS X, but not for pypy:
if modname == 'readline' and sys.platform == 'darwin' and not (
is_pypy or filename.endswith(join('lib-dynload', 'readline.so'))):
dst_filename = join(dst_prefix, 'lib', 'python%s' % sys.version[:3], 'readline.so')
else:
dst_filename = change_prefix(filename, dst_prefix)
copyfile(filename, dst_filename)
if filename.endswith('.pyc'):
pyfile = filename[:-1]
if os.path.exists(pyfile):
copyfile(pyfile, dst_filename[:-1])
finally:
sys.path = _prev_sys_path
def subst_path(prefix_path, prefix, home_dir):
prefix_path = os.path.normpath(prefix_path)
prefix = os.path.normpath(prefix)
home_dir = os.path.normpath(home_dir)
if not prefix_path.startswith(prefix):
logger.warn('Path not in prefix %r %r', prefix_path, prefix)
return
return prefix_path.replace(prefix, home_dir, 1)
def install_python(home_dir, lib_dir, inc_dir, bin_dir, site_packages, clear):
"""Install just the base environment, no distutils patches etc"""
if sys.executable.startswith(bin_dir):
print('Please use the *system* python to run this script')
return
if clear:
rmtree(lib_dir)
## FIXME: why not delete it?
## Maybe it should delete everything with #!/path/to/venv/python in it
logger.notify('Not deleting %s', bin_dir)
if hasattr(sys, 'real_prefix'):
logger.notify('Using real prefix %r' % sys.real_prefix)
prefix = sys.real_prefix
elif hasattr(sys, 'base_prefix'):
logger.notify('Using base prefix %r' % sys.base_prefix)
prefix = sys.base_prefix
else:
prefix = sys.prefix
mkdir(lib_dir)
fix_lib64(lib_dir)
stdlib_dirs = [os.path.dirname(os.__file__)]
if is_win:
stdlib_dirs.append(join(os.path.dirname(stdlib_dirs[0]), 'DLLs'))
elif is_darwin:
stdlib_dirs.append(join(stdlib_dirs[0], 'site-packages'))
if hasattr(os, 'symlink'):
logger.info('Symlinking Python bootstrap modules')
else:
logger.info('Copying Python bootstrap modules')
logger.indent += 2
try:
# copy required files...
for stdlib_dir in stdlib_dirs:
if not os.path.isdir(stdlib_dir):
continue
for fn in os.listdir(stdlib_dir):
bn = os.path.splitext(fn)[0]
if fn != 'site-packages' and bn in REQUIRED_FILES:
copyfile(join(stdlib_dir, fn), join(lib_dir, fn))
# ...and modules
copy_required_modules(home_dir)
finally:
logger.indent -= 2
mkdir(join(lib_dir, 'site-packages'))
import site
site_filename = site.__file__
if site_filename.endswith('.pyc'):
site_filename = site_filename[:-1]
elif site_filename.endswith('$py.class'):
site_filename = site_filename.replace('$py.class', '.py')
site_filename_dst = change_prefix(site_filename, home_dir)
site_dir = os.path.dirname(site_filename_dst)
writefile(site_filename_dst, SITE_PY)
writefile(join(site_dir, 'orig-prefix.txt'), prefix)
site_packages_filename = join(site_dir, 'no-global-site-packages.txt')
if not site_packages:
writefile(site_packages_filename, '')
if is_pypy or is_win:
stdinc_dir = join(prefix, 'include')
else:
stdinc_dir = join(prefix, 'include', py_version + abiflags)
if os.path.exists(stdinc_dir):
copyfile(stdinc_dir, inc_dir)
else:
logger.debug('No include dir %s' % stdinc_dir)
platinc_dir = distutils.sysconfig.get_python_inc(plat_specific=1)
if platinc_dir != stdinc_dir:
platinc_dest = distutils.sysconfig.get_python_inc(
plat_specific=1, prefix=home_dir)
if platinc_dir == platinc_dest:
# Do platinc_dest manually due to a CPython bug;
# not http://bugs.python.org/issue3386 but a close cousin
platinc_dest = subst_path(platinc_dir, prefix, home_dir)
if platinc_dest:
# PyPy's stdinc_dir and prefix are relative to the original binary
# (traversing virtualenvs), whereas the platinc_dir is relative to
# the inner virtualenv and ignores the prefix argument.
# This seems more evolved than designed.
copyfile(platinc_dir, platinc_dest)
# pypy never uses exec_prefix, just ignore it
if sys.exec_prefix != prefix and not is_pypy:
if is_win:
exec_dir = join(sys.exec_prefix, 'lib')
elif is_jython:
exec_dir = join(sys.exec_prefix, 'Lib')
else:
exec_dir = join(sys.exec_prefix, 'lib', py_version)
for fn in os.listdir(exec_dir):
copyfile(join(exec_dir, fn), join(lib_dir, fn))
if is_jython:
# Jython has either jython-dev.jar and javalib/ dir, or just
# jython.jar
for name in 'jython-dev.jar', 'javalib', 'jython.jar':
src = join(prefix, name)
if os.path.exists(src):
copyfile(src, join(home_dir, name))
# XXX: registry should always exist after Jython 2.5rc1
src = join(prefix, 'registry')
if os.path.exists(src):
copyfile(src, join(home_dir, 'registry'), symlink=False)
copyfile(join(prefix, 'cachedir'), join(home_dir, 'cachedir'),
symlink=False)
mkdir(bin_dir)
py_executable = join(bin_dir, os.path.basename(sys.executable))
if 'Python.framework' in prefix:
# OS X framework builds cause validation to break
# https://github.com/pypa/virtualenv/issues/322
if os.environ.get('__PYVENV_LAUNCHER__'):
os.unsetenv('__PYVENV_LAUNCHER__')
if re.search(r'/Python(?:-32|-64)*$', py_executable):
# The name of the python executable is not quite what
# we want, rename it.
py_executable = os.path.join(
os.path.dirname(py_executable), 'python')
logger.notify('New %s executable in %s', expected_exe, py_executable)
pcbuild_dir = os.path.dirname(sys.executable)
pyd_pth = os.path.join(lib_dir, 'site-packages', 'virtualenv_builddir_pyd.pth')
if is_win and os.path.exists(os.path.join(pcbuild_dir, 'build.bat')):
logger.notify('Detected python running from build directory %s', pcbuild_dir)
logger.notify('Writing .pth file linking to build directory for *.pyd files')
writefile(pyd_pth, pcbuild_dir)
else:
pcbuild_dir = None
if os.path.exists(pyd_pth):
logger.info('Deleting %s (not Windows env or not build directory python)' % pyd_pth)
os.unlink(pyd_pth)
if sys.executable != py_executable:
## FIXME: could I just hard link?
executable = sys.executable
shutil.copyfile(executable, py_executable)
make_exe(py_executable)
if is_win or is_cygwin:
pythonw = os.path.join(os.path.dirname(sys.executable), 'pythonw.exe')
if os.path.exists(pythonw):
logger.info('Also created pythonw.exe')
shutil.copyfile(pythonw, os.path.join(os.path.dirname(py_executable), 'pythonw.exe'))
python_d = os.path.join(os.path.dirname(sys.executable), 'python_d.exe')
python_d_dest = os.path.join(os.path.dirname(py_executable), 'python_d.exe')
if os.path.exists(python_d):
logger.info('Also created python_d.exe')
shutil.copyfile(python_d, python_d_dest)
elif os.path.exists(python_d_dest):
logger.info('Removed python_d.exe as it is no longer at the source')
os.unlink(python_d_dest)
# we need to copy the DLL to enforce that windows will load the correct one.
# may not exist if we are cygwin.
py_executable_dll = 'python%s%s.dll' % (
sys.version_info[0], sys.version_info[1])
py_executable_dll_d = 'python%s%s_d.dll' % (
sys.version_info[0], sys.version_info[1])
pythondll = os.path.join(os.path.dirname(sys.executable), py_executable_dll)
pythondll_d = os.path.join(os.path.dirname(sys.executable), py_executable_dll_d)
pythondll_d_dest = os.path.join(os.path.dirname(py_executable), py_executable_dll_d)
if os.path.exists(pythondll):
logger.info('Also created %s' % py_executable_dll)
shutil.copyfile(pythondll, os.path.join(os.path.dirname(py_executable), py_executable_dll))
if os.path.exists(pythondll_d):
logger.info('Also created %s' % py_executable_dll_d)
shutil.copyfile(pythondll_d, pythondll_d_dest)
elif os.path.exists(pythondll_d_dest):
logger.info('Removed %s as the source does not exist' % pythondll_d_dest)
os.unlink(pythondll_d_dest)
if is_pypy:
# make a symlink python --> pypy-c
python_executable = os.path.join(os.path.dirname(py_executable), 'python')
if sys.platform in ('win32', 'cygwin'):
python_executable += '.exe'
logger.info('Also created executable %s' % python_executable)
copyfile(py_executable, python_executable)
if is_win:
for name in 'libexpat.dll', 'libpypy.dll', 'libpypy-c.dll', 'libeay32.dll', 'ssleay32.dll', 'sqlite.dll':
src = join(prefix, name)
if os.path.exists(src):
copyfile(src, join(bin_dir, name))
if os.path.splitext(os.path.basename(py_executable))[0] != expected_exe:
secondary_exe = os.path.join(os.path.dirname(py_executable),
expected_exe)
py_executable_ext = os.path.splitext(py_executable)[1]
if py_executable_ext == '.exe':
# python2.4 gives an extension of '.4' :P
secondary_exe += py_executable_ext
if os.path.exists(secondary_exe):
logger.warn('Not overwriting existing %s script %s (you must use %s)'
% (expected_exe, secondary_exe, py_executable))
else:
logger.notify('Also creating executable in %s' % secondary_exe)
shutil.copyfile(sys.executable, secondary_exe)
make_exe(secondary_exe)
if '.framework' in prefix:
if 'Python.framework' in prefix:
logger.debug('MacOSX Python framework detected')
# Make sure we use the the embedded interpreter inside
# the framework, even if sys.executable points to
# the stub executable in ${sys.prefix}/bin
# See http://groups.google.com/group/python-virtualenv/
# browse_thread/thread/17cab2f85da75951
original_python = os.path.join(
prefix, 'Resources/Python.app/Contents/MacOS/Python')
if 'EPD' in prefix:
logger.debug('EPD framework detected')
original_python = os.path.join(prefix, 'bin/python')
shutil.copy(original_python, py_executable)
# Copy the framework's dylib into the virtual
# environment
virtual_lib = os.path.join(home_dir, '.Python')
if os.path.exists(virtual_lib):
os.unlink(virtual_lib)
copyfile(
os.path.join(prefix, 'Python'),
virtual_lib)
# And then change the install_name of the copied python executable
try:
mach_o_change(py_executable,
os.path.join(prefix, 'Python'),
'@executable_path/../.Python')
except:
e = sys.exc_info()[1]
logger.warn("Could not call mach_o_change: %s. "
"Trying to call install_name_tool instead." % e)
try:
call_subprocess(
["install_name_tool", "-change",
os.path.join(prefix, 'Python'),
'@executable_path/../.Python',
py_executable])
except:
logger.fatal("Could not call install_name_tool -- you must "
"have Apple's development tools installed")
raise
if not is_win:
# Ensure that 'python', 'pythonX' and 'pythonX.Y' all exist
py_exe_version_major = 'python%s' % sys.version_info[0]
py_exe_version_major_minor = 'python%s.%s' % (
sys.version_info[0], sys.version_info[1])
py_exe_no_version = 'python'
required_symlinks = [ py_exe_no_version, py_exe_version_major,
py_exe_version_major_minor ]
py_executable_base = os.path.basename(py_executable)
if py_executable_base in required_symlinks:
# Don't try to symlink to yourself.
required_symlinks.remove(py_executable_base)
for pth in required_symlinks:
full_pth = join(bin_dir, pth)
if os.path.exists(full_pth):
os.unlink(full_pth)
os.symlink(py_executable_base, full_pth)
if is_win and ' ' in py_executable:
# There's a bug with subprocess on Windows when using a first
# argument that has a space in it. Instead we have to quote
# the value:
py_executable = '"%s"' % py_executable
# NOTE: keep this check as one line, cmd.exe doesn't cope with line breaks
cmd = [py_executable, '-c', 'import sys;out=sys.stdout;'
'getattr(out, "buffer", out).write(sys.prefix.encode("utf-8"))']
logger.info('Testing executable with %s %s "%s"' % tuple(cmd))
try:
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE)
proc_stdout, proc_stderr = proc.communicate()
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.EACCES:
logger.fatal('ERROR: The executable %s could not be run: %s' % (py_executable, e))
sys.exit(100)
else:
raise e
proc_stdout = proc_stdout.strip().decode("utf-8")
proc_stdout = os.path.normcase(os.path.abspath(proc_stdout))
norm_home_dir = os.path.normcase(os.path.abspath(home_dir))
if hasattr(norm_home_dir, 'decode'):
norm_home_dir = norm_home_dir.decode(sys.getfilesystemencoding())
if proc_stdout != norm_home_dir:
logger.fatal(
'ERROR: The executable %s is not functioning' % py_executable)
logger.fatal(
'ERROR: It thinks sys.prefix is %r (should be %r)'
% (proc_stdout, norm_home_dir))
logger.fatal(
'ERROR: virtualenv is not compatible with this system or executable')
if is_win:
logger.fatal(
'Note: some Windows users have reported this error when they '
'installed Python for "Only this user" or have multiple '
'versions of Python installed. Copying the appropriate '
'PythonXX.dll to the virtualenv Scripts/ directory may fix '
'this problem.')
sys.exit(100)
else:
logger.info('Got sys.prefix result: %r' % proc_stdout)
pydistutils = os.path.expanduser('~/.pydistutils.cfg')
if os.path.exists(pydistutils):
logger.notify('Please make sure you remove any previous custom paths from '
'your %s file.' % pydistutils)
## FIXME: really this should be calculated earlier
fix_local_scheme(home_dir)
if site_packages:
if os.path.exists(site_packages_filename):
logger.info('Deleting %s' % site_packages_filename)
os.unlink(site_packages_filename)
return py_executable
def install_activate(home_dir, bin_dir, prompt=None):
home_dir = os.path.abspath(home_dir)
if is_win or is_jython and os._name == 'nt':
files = {
'activate.bat': ACTIVATE_BAT,
'deactivate.bat': DEACTIVATE_BAT,
'activate.ps1': ACTIVATE_PS,
}
# MSYS needs paths of the form /c/path/to/file
drive, tail = os.path.splitdrive(home_dir.replace(os.sep, '/'))
home_dir_msys = (drive and "/%s%s" or "%s%s") % (drive[:1], tail)
# Run-time conditional enables (basic) Cygwin compatibility
home_dir_sh = ("""$(if [ "$OSTYPE" "==" "cygwin" ]; then cygpath -u '%s'; else echo '%s'; fi;)""" %
(home_dir, home_dir_msys))
files['activate'] = ACTIVATE_SH.replace('__VIRTUAL_ENV__', home_dir_sh)
else:
files = {'activate': ACTIVATE_SH}
# suppling activate.fish in addition to, not instead of, the
# bash script support.
files['activate.fish'] = ACTIVATE_FISH
# same for csh/tcsh support...
files['activate.csh'] = ACTIVATE_CSH
files['activate_this.py'] = ACTIVATE_THIS
if hasattr(home_dir, 'decode'):
home_dir = home_dir.decode(sys.getfilesystemencoding())
vname = os.path.basename(home_dir)
for name, content in files.items():
content = content.replace('__VIRTUAL_PROMPT__', prompt or '')
content = content.replace('__VIRTUAL_WINPROMPT__', prompt or '(%s)' % vname)
content = content.replace('__VIRTUAL_ENV__', home_dir)
content = content.replace('__VIRTUAL_NAME__', vname)
content = content.replace('__BIN_NAME__', os.path.basename(bin_dir))
writefile(os.path.join(bin_dir, name), content)
def install_distutils(home_dir):
distutils_path = change_prefix(distutils.__path__[0], home_dir)
mkdir(distutils_path)
## FIXME: maybe this prefix setting should only be put in place if
## there's a local distutils.cfg with a prefix setting?
home_dir = os.path.abspath(home_dir)
## FIXME: this is breaking things, removing for now:
#distutils_cfg = DISTUTILS_CFG + "\n[install]\nprefix=%s\n" % home_dir
writefile(os.path.join(distutils_path, '__init__.py'), DISTUTILS_INIT)
writefile(os.path.join(distutils_path, 'distutils.cfg'), DISTUTILS_CFG, overwrite=False)
def fix_local_scheme(home_dir):
"""
Platforms that use the "posix_local" install scheme (like Ubuntu with
Python 2.7) need to be given an additional "local" location, sigh.
"""
try:
import sysconfig
except ImportError:
pass
else:
if sysconfig._get_default_scheme() == 'posix_local':
local_path = os.path.join(home_dir, 'local')
if not os.path.exists(local_path):
os.mkdir(local_path)
for subdir_name in os.listdir(home_dir):
if subdir_name == 'local':
continue
os.symlink(os.path.abspath(os.path.join(home_dir, subdir_name)), \
os.path.join(local_path, subdir_name))
def fix_lib64(lib_dir):
"""
Some platforms (particularly Gentoo on x64) put things in lib64/pythonX.Y
instead of lib/pythonX.Y. If this is such a platform we'll just create a
symlink so lib64 points to lib
"""
if [p for p in distutils.sysconfig.get_config_vars().values()
if isinstance(p, basestring) and 'lib64' in p]:
logger.debug('This system uses lib64; symlinking lib64 to lib')
assert os.path.basename(lib_dir) == 'python%s' % sys.version[:3], (
"Unexpected python lib dir: %r" % lib_dir)
lib_parent = os.path.dirname(lib_dir)
top_level = os.path.dirname(lib_parent)
lib_dir = os.path.join(top_level, 'lib')
lib64_link = os.path.join(top_level, 'lib64')
assert os.path.basename(lib_parent) == 'lib', (
"Unexpected parent dir: %r" % lib_parent)
if os.path.lexists(lib64_link):
return
os.symlink('lib', lib64_link)
def resolve_interpreter(exe):
"""
If the executable given isn't an absolute path, search $PATH for the interpreter
"""
if os.path.abspath(exe) != exe:
paths = os.environ.get('PATH', '').split(os.pathsep)
for path in paths:
if os.path.exists(os.path.join(path, exe)):
exe = os.path.join(path, exe)
break
if not os.path.exists(exe):
logger.fatal('The executable %s (from --python=%s) does not exist' % (exe, exe))
raise SystemExit(3)
if not is_executable(exe):
logger.fatal('The executable %s (from --python=%s) is not executable' % (exe, exe))
raise SystemExit(3)
return exe
def is_executable(exe):
"""Checks a file is executable"""
return os.access(exe, os.X_OK)
############################################################
## Relocating the environment:
def make_environment_relocatable(home_dir):
"""
Makes the already-existing environment use relative paths, and takes out
the #!-based environment selection in scripts.
"""
home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir)
activate_this = os.path.join(bin_dir, 'activate_this.py')
if not os.path.exists(activate_this):
logger.fatal(
'The environment doesn\'t have a file %s -- please re-run virtualenv '
'on this environment to update it' % activate_this)
fixup_scripts(home_dir)
fixup_pth_and_egg_link(home_dir)
## FIXME: need to fix up distutils.cfg
OK_ABS_SCRIPTS = ['python', 'python%s' % sys.version[:3],
'activate', 'activate.bat', 'activate_this.py']
def fixup_scripts(home_dir):
# This is what we expect at the top of scripts:
shebang = '#!%s/bin/python' % os.path.normcase(os.path.abspath(home_dir))
# This is what we'll put:
new_shebang = '#!/usr/bin/env python%s' % sys.version[:3]
if is_win:
bin_suffix = 'Scripts'
else:
bin_suffix = 'bin'
bin_dir = os.path.join(home_dir, bin_suffix)
home_dir, lib_dir, inc_dir, bin_dir = path_locations(home_dir)
for filename in os.listdir(bin_dir):
filename = os.path.join(bin_dir, filename)
if not os.path.isfile(filename):
# ignore subdirs, e.g. .svn ones.
continue
f = open(filename, 'rb')
try:
try:
lines = f.read().decode('utf-8').splitlines()
except UnicodeDecodeError:
# This is probably a binary program instead
# of a script, so just ignore it.
continue
finally:
f.close()
if not lines:
logger.warn('Script %s is an empty file' % filename)
continue
if not lines[0].strip().startswith(shebang):
if os.path.basename(filename) in OK_ABS_SCRIPTS:
logger.debug('Cannot make script %s relative' % filename)
elif lines[0].strip() == new_shebang:
logger.info('Script %s has already been made relative' % filename)
else:
logger.warn('Script %s cannot be made relative (it\'s not a normal script that starts with %s)'
% (filename, shebang))
continue
logger.notify('Making script %s relative' % filename)
script = relative_script([new_shebang] + lines[1:])
f = open(filename, 'wb')
f.write('\n'.join(script).encode('utf-8'))
f.close()
def relative_script(lines):
"Return a script that'll work in a relocatable environment."
activate = "import os; activate_this=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'activate_this.py'); execfile(activate_this, dict(__file__=activate_this)); del os, activate_this"
# Find the last future statement in the script. If we insert the activation
# line before a future statement, Python will raise a SyntaxError.
activate_at = None
for idx, line in reversed(list(enumerate(lines))):
if line.split()[:3] == ['from', '__future__', 'import']:
activate_at = idx + 1
break
if activate_at is None:
# Activate after the shebang.
activate_at = 1
return lines[:activate_at] + ['', activate, ''] + lines[activate_at:]
def fixup_pth_and_egg_link(home_dir, sys_path=None):
"""Makes .pth and .egg-link files use relative paths"""
home_dir = os.path.normcase(os.path.abspath(home_dir))
if sys_path is None:
sys_path = sys.path
for path in sys_path:
if not path:
path = '.'
if not os.path.isdir(path):
continue
path = os.path.normcase(os.path.abspath(path))
if not path.startswith(home_dir):
logger.debug('Skipping system (non-environment) directory %s' % path)
continue
for filename in os.listdir(path):
filename = os.path.join(path, filename)
if filename.endswith('.pth'):
if not os.access(filename, os.W_OK):
logger.warn('Cannot write .pth file %s, skipping' % filename)
else:
fixup_pth_file(filename)
if filename.endswith('.egg-link'):
if not os.access(filename, os.W_OK):
logger.warn('Cannot write .egg-link file %s, skipping' % filename)
else:
fixup_egg_link(filename)
def fixup_pth_file(filename):
lines = []
prev_lines = []
f = open(filename)
prev_lines = f.readlines()
f.close()
for line in prev_lines:
line = line.strip()
if (not line or line.startswith('#') or line.startswith('import ')
or os.path.abspath(line) != line):
lines.append(line)
else:
new_value = make_relative_path(filename, line)
if line != new_value:
logger.debug('Rewriting path %s as %s (in %s)' % (line, new_value, filename))
lines.append(new_value)
if lines == prev_lines:
logger.info('No changes to .pth file %s' % filename)
return
logger.notify('Making paths in .pth file %s relative' % filename)
f = open(filename, 'w')
f.write('\n'.join(lines) + '\n')
f.close()
def fixup_egg_link(filename):
f = open(filename)
link = f.readline().strip()
f.close()
if os.path.abspath(link) != link:
logger.debug('Link in %s already relative' % filename)
return
new_link = make_relative_path(filename, link)
logger.notify('Rewriting link %s in %s as %s' % (link, filename, new_link))
f = open(filename, 'w')
f.write(new_link)
f.close()
def make_relative_path(source, dest, dest_is_directory=True):
"""
Make a filename relative, where the filename is dest, and it is
being referred to from the filename source.
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/usr/share/another-place/src/Directory')
'../another-place/src/Directory'
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/home/user/src/Directory')
'../../../home/user/src/Directory'
>>> make_relative_path('/usr/share/a-file.pth', '/usr/share/')
'./'
"""
source = os.path.dirname(source)
if not dest_is_directory:
dest_filename = os.path.basename(dest)
dest = os.path.dirname(dest)
dest = os.path.normpath(os.path.abspath(dest))
source = os.path.normpath(os.path.abspath(source))
dest_parts = dest.strip(os.path.sep).split(os.path.sep)
source_parts = source.strip(os.path.sep).split(os.path.sep)
while dest_parts and source_parts and dest_parts[0] == source_parts[0]:
dest_parts.pop(0)
source_parts.pop(0)
full_parts = ['..']*len(source_parts) + dest_parts
if not dest_is_directory:
full_parts.append(dest_filename)
if not full_parts:
# Special case for the current directory (otherwise it'd be '')
return './'
return os.path.sep.join(full_parts)
############################################################
## Bootstrap script creation:
def create_bootstrap_script(extra_text, python_version=''):
"""
Creates a bootstrap script, which is like this script but with
extend_parser, adjust_options, and after_install hooks.
This returns a string that (written to disk of course) can be used
as a bootstrap script with your own customizations. The script
will be the standard virtualenv.py script, with your extra text
added (your extra text should be Python code).
If you include these functions, they will be called:
``extend_parser(optparse_parser)``:
You can add or remove options from the parser here.
``adjust_options(options, args)``:
You can change options here, or change the args (if you accept
different kinds of arguments, be sure you modify ``args`` so it is
only ``[DEST_DIR]``).
``after_install(options, home_dir)``:
After everything is installed, this function is called. This
is probably the function you are most likely to use. An
example would be::
def after_install(options, home_dir):
subprocess.call([join(home_dir, 'bin', 'easy_install'),
'MyPackage'])
subprocess.call([join(home_dir, 'bin', 'my-package-script'),
'setup', home_dir])
This example immediately installs a package, and runs a setup
script from that package.
If you provide something like ``python_version='2.5'`` then the
script will start with ``#!/usr/bin/env python2.5`` instead of
``#!/usr/bin/env python``. You can use this when the script must
be run with a particular Python version.
"""
filename = __file__
if filename.endswith('.pyc'):
filename = filename[:-1]
f = codecs.open(filename, 'r', encoding='utf-8')
content = f.read()
f.close()
py_exe = 'python%s' % python_version
content = (('#!/usr/bin/env %s\n' % py_exe)
+ '## WARNING: This file is generated\n'
+ content)
return content.replace('##EXT' 'END##', extra_text)
##EXTEND##
def convert(s):
b = base64.b64decode(s.encode('ascii'))
return zlib.decompress(b).decode('utf-8')
##file site.py
SITE_PY = convert("""
eJzFPf1z2zaWv/OvwMqToZTIdOK0vR2nzo2TOK3v3MTbpLO5dT1aSoIs1hTJEqRl7c3d337vAwAB
kpLtTXdO04klEnh4eHhfeHgPHQwGJ0Uhs7lY5fM6lULJuJwtRRFXSyUWeSmqZVLO94u4rDbwdHYT
X0slqlyojYqwVRQET7/yEzwVn5eJMijAt7iu8lVcJbM4TTciWRV5Wcm5mNdlkl2LJEuqJE6Tf0CL
PIvE06/HIDjLBMw8TWQpbmWpAK4S+UJcbKplnolhXeCcX0Tfxi9HY6FmZVJU0KDUOANFlnEVZFLO
AU1oWSsgZVLJfVXIWbJIZrbhOq/TuSjSeCbF3//OU6OmYRiofCXXS1lKkQEyAFMCrALxgK9JKWb5
XEZCvJGzGAfg5w2xAoY2xjVTSMYsF2meXcOcMjmTSsXlRgyndUWACGUxzwGnBDCokjQN1nl5o0aw
pLQea3gkYmYPfzLMHjBPHL/LOYDjxyz4JUvuxgwbuAfBVUtmm1IukjsRI1j4Ke/kbKKfDZOFmCeL
BdAgq0bYJGAElEiT6UFBy/G9XqHXB4SV5coYxpCIMjfml9QjCs4qEacK2LYukEaKMH8np0mcATWy
WxgOIAJJg75x5omq7Dg0O5EDgBLXsQIpWSkxXMVJBsz6UzwjtP+aZPN8rUZEAVgtJX6rVeXOf9hD
AGjtEGAc4GKZ1ayzNLmR6WYECHwG7Eup6rRCgZgnpZxVeZlIRQAAtY2Qd4D0WMSl1CRkzjRyOyb6
E02SDBcWBQwFHl8iSRbJdV2ShIlFApwLXPH+48/i3embs5MPmscMMJbZ6xXgDFBooR2cYABxUKvy
IM1BoKPgHP+IeD5HIbvG8QGvpsHBvSsdDGHuRdTu4yw4kF0vrh4G5liBMqGxAur339BlrJZAn/+5
Z72D4GQbVWji/G29zEEms3glxTJm/kLOCL7XcF5HRbV8BdygEE4FpFK4OIhggvCAJC7NhnkmRQEs
liaZHAVAoSm19VcRWOFDnu3TWrc4ASCUQQYvnWcjGjGTMNEurFeoL0zjDc1MNwnsOq/ykhQH8H82
I12UxtkN4aiIofjbVF4nWYYIIS8E4V5IA6ubBDhxHolzakV6wTQSIWsvbokiUQMvIdMBT8q7eFWk
cszii7p1txqhwWQlzFqnzHHQsiL1SqvWTLWX9w6jLy2uIzSrZSkBeD31hG6R52MxBZ1N2BTxisWr
WufEOUGPPFEn5AlqCX3xO1D0RKl6Je1L5BXQLMRQwSJP03wNJDsKAiH2sJExyj5zwlt4B/8CXPw3
ldVsGQTOSBawBoXIbwOFQMAkyExztUbC4zbNym0lk2SsKfJyLksa6mHEPmDEH9gY5xp8yCtt1Hi6
uMr5KqlQJU21yUzY4mVhxfrxFc8bpgGWWxHNTNOGTiucXlos46k0LslULlAS9CK9sssOYwY9Y5It
rsSKrQy8A7LIhC1Iv2JBpbOoJDkBAIOFL86Sok6pkUIGEzEMtCoI/ipGk55rZwnYm81ygAqJzfcM
7A/g9g8Qo/UyAfrMAAJoGNRSsHzTpCrRQWj0UeAbfdOfxwdOPVto28RDLuIk1VY+zoIzenhaliS+
M1lgr7EmhoIZZhW6dtcZ0BHFfDAYBIFxhzbKfM1VUJWbI2AFYcaZTKZ1goZvMkFTr3+ogEcRzsBe
N9vOwgMNYTp9ACo5XRZlvsLXdm6fQJnAWNgj2BMXpGUkO8geJ75C8rkqvTBN0XY77CxQDwUXP5++
P/ty+kkci8tGpY3b+uwKxjzNYmBrsgjAVK1hG10GLVHxJaj7xHsw78QUYM+oN4mvjKsaeBdQ/1zW
9BqmMfNeBqcfTt6cn05++XT68+TT2edTQBDsjAz2aMpoHmtwGFUEwgFcOVeRtq9Bpwc9eHPyyT4I
JomafPcNsBs8GV7LCpi4HMKMxyJcxXcKGDQcU9MR4thpABY8HI3Ea3H49OnLQ4JWbIoNAAOz6zTF
hxNt0SdJtsjDETX+jV36Y1ZS2n+7PPrmShwfi/C3+DYOA/ChmqbMEj+ROH3eFBK6VvBnmKtREMzl
AkTvRqKADp+SXzziDrAk0DLXdvq3PMnMe+ZKdwjSH0PqAThMJrM0VgobTyYhEIE69HygQ8TONUrd
EDoWG7frSKOCn1LCwmbYZYz/9KAYT6kfosEoul1MIxDX1SxWklvR9KHfZII6azIZ6gFBmEliwOFi
NRQK0wR1VpmAX0uchzpsqvIUfyJ81AIkgLi1Qi2Ji6S3TtFtnNZSDZ1JARGHwxYZUdEmivgRXJQh
WOJm6UajNjUNz0AzIF+agxYtW5TDzx74O6CuzCYON3q892KaIab/wTsNwgFczhDVvVItKKwdxcXp
hXj5/HAf3RnYc84tdbzmaKGTrJb24QJWy8gDI8y9jLy4dFmgnsWnR7thriK7Ml1WWOglLuUqv5Vz
wBYZ2Fll8TO9gZ05zGMWwyqCXid/gFWo8Rtj3Ify7EFa0HcA6q0Iill/s/R7HAyQmQJFxBtrIrXe
9bMpLMr8NkFnY7rRL8FWgrJEi2kcm8BZOI/J0CSChgAvOENKrWUI6rCs2WElvBEk2ot5o1gjAneO
mvqKvt5k+Tqb8E74GJXucGRZFwVLMy82aJZgT7wHKwRI5rCxa4jGUMDlFyhb+4A8TB+mC5SlvQUA
AkOvaLvmwDJbPZoi7xpxWIQxeiVIeEuJ/sKtGYK2WoYYDiR6G9kHRksgJJicVXBWNWgmQ1kzzWBg
hyQ+151HvAX1AbSoGIHZHGpo3MjQ7/IIlLM4d5WS0w8t8pcvX5ht1JLiK4jYFCeNLsSCjGVUbMCw
JqATjEfG0RpigzU4twCmVpo1xf4nkRfsjcF6XmjZBj8AdndVVRwdHKzX60hHF/Ly+kAtDr7983ff
/fk568T5nPgHpuNIiw61RQf0Dj3a6HtjgV6blWvxY5L53EiwhpK8MnJFEb8f6mSei6P9kdWfyMWN
mcZ/jSsDCmRiBmUqA20HDUZP1P6T6KUaiCdknW3b4Yj9Em1SrRXzrS70qHLwBMBvmeU1muqGE5R4
BtYNduhzOa2vQzu4ZyPND5gqyunQ8sD+iyvEwOcMw1fGFE9QSxBboMV3SP8zs01M3pHWEEheNFGd
3fOmX4sZ4s4fLu/W13SExswwUcgdKBF+kwcLoG3clRz8aNcW7Z7j2pqPZwiMpQ8M82rHcoiCQ7jg
WoxdqXO4Gj1ekKY1q2ZQMK5qBAUNTuKUqa3BkY0MESR6N2azzwurWwCdWpFDEx8wqwAt3HE61q7N
Co4nhDxwLF7QEwku8lHn3XNe2jpNKaDT4lGPKgzYW2i00znw5dAAGItB+cuAW5ptysfWovAa9ADL
OQaEDLboMBO+cX3Awd6gh506Vn9bb6ZxHwhcpCHHoh4EnVA+5hFKBdJUDP2e21jcErc72E6LQ0xl
lolEWm0Rrrby6BWqnYZpkWSoe51FimZpDl6x1YrESM1731mgfRA+7jNmWgI1GRpyOI2OydvzBDDU
7TB8dl1joMGNwyBGq0SRdUMyLeEfcCsovkHBKKAlQbNgHipl/sT+AJmz89VftrCHJTQyhNt0mxvS
sRgajnm/J5CMOhoDUpABCbvCSK4jq4MUOMxZIE+44bXcKt0EI1IgZ44FITUDuNNLb4ODTyI8ASEJ
Rch3lZKFeCYGsHxtUX2Y7v5DudQEIYZOA3IVdPTi2I1sOFGN41aUw2doP75BZyVFDhw8BZfHDfS7
bG6Y1gZdwFn3FbdFCjQyxWEGIxfVK0MYN5j8p2OnRUMsM4hhKG8g70jHjDQK7HJr0LDgBoy35u2x
9GM3YoF9h2GuDuXqDvZ/YZmoWa5Cipm0YxfuR3NFlzYW2/NkOoA/3gIMRlceJJnq+AVGWf6JQUIP
etgH3ZsshkXmcblOspAUmKbfsb80HTwsKT0jd/CJtlMHMFGMeB68L0FA6OjzAMQJNQHsymWotNvf
BbtzigMLl7sPPLf58ujlVZe4420RHvvpX6rTu6qMFa5WyovGQoGr1TXgqHRhcnG20YeX+nAbtwll
rmAXKT5++iKQEBzXXcebx029YXjE5t45eR+DOui1e8nVmh2xCyCCWhEZ5SB8PEc+HNnHTm7HxB4B
5FEMs2NRDCTNJ/8MnF0LBWPszzcZxtHaKgM/8Pq7byY9kVEXye++GdwzSosYfWI/bHmCdmROKtg1
21LGKbkaTh8KKmYN69g2xYj1OW3/NI9d9ficGi0b++5vgR8DBUPqEnyE5+OGbN2p4sd3p7bC03Zq
B7DObtV89mgRYG+fT3+DHbLSQbXbOEnpXAEmv7+PytVs7jle0a89PEg7FYxDgr79l7p8DtwQcjRh
1J2OdsZOTMC5ZxdsPkWsuqjs6RyC5gjMywtwjz+7ULUFM4z7nI8XDntUkzfjPmfia9Qqfv4QDWSB
eTQY9JF9Kzv+f8zy+b9mkg+cijm5/gOt4SMB/VEzYePB0LTx8GH1L7trdw2wB5inLW7nDrewOzSf
VS6Mc8cqSYmnqLueijWlK1BsFU+KAMqc/b4eOLiM+tD7bV2WfHRNKrCQ5T4ex44FZmoZz6/XxOyJ
gw+yQkxssxnFqp28nrxPjYQ6+mxnEjb7hn45W+YmZiWz26SEvqBwh+GPH386DftNCMZxodPDrcjD
/QaE+wimDTVxwsf0YQo9pss/L1XtrYtPUJMRYCLCmmy99sEPBJs4Qv8a3BMR8g5s+Zgdd+izpZzd
TCSlDiCbYlcnKP4WXyMmNqPAz/9S8YKS2GAms7RGWrHjjdmHizqb0flIJcG/0qnCmDpECQEc/luk
8bUYUuc5hp40N1J06jYutfdZlDkmp4o6mR9cJ3Mhf6/jFLf1crEAXPDwSr+KeHiKQIl3nNPASYtK
zuoyqTZAgljl+uyP0h+chtMNT3ToIcnHPExATIg4Ep9w2vieCTc35DLBAf/EAyeJ+27s4CQrRPQc
3mf5BEedUI7vmJHqnsvT46A9Qg4ABgAU5j8Y6cid/0bSK/eAkdbcJSpqSY+UbqQhJ2cMoQxHGOng
3/TTZ0SXt7Zgeb0dy+vdWF63sbzuxfLax/J6N5auSODC2qCVkYS+wFX7WKM338aNOfEwp/Fsye0w
9xNzPAGiKMwG28gUp0B7kS0+3yMgpLadA2d62OTPJJxUWuYcAtcgkfvxEEtv5k3yutOZsnF0Z56K
cWe35RD5fQ+iiFLFptSd5W0eV3HkycV1mk9BbC264wbAWLTTiThWmt1OphzdbVmqwcV/ff7x4wds
jqAGJr2BuuEiomHBqQyfxuW16kpTs/krgB2ppZ+IQ900wL0HRtZ4lD3+5x1leCDjiDVlKOSiAA+A
srpsMzf3KQxbz3WSlH7OTM6HTcdikFWDZlJbiHRycfHu5PPJgEJ+g/8duAJjaOtLh4uPaWEbdP03
t7mlOPYBodaxrcb4uXPyaN1wxP021oDt+PCtB4cPMdi9YQJ/lv9SSsGSAKEiHfx9DKEevAf6qm1C
hz6GETvJf+7JGjsr9p0je46L4oh+37FDewD/sBP3GBMggHahhmZn0GymWkrfmtcdFHWAPtDX++ot
WHvr1d7J+BS1k+hxAB3K2mbb3T/vnIaNnpLVm9Mfzj6cn725OPn8o+MCoiv38dPBoTj96Yug/BA0
YOwTxZgaUWEmEhgWt9BJzHP4r8bIz7yuOEgMvd6dn+uTmhWWumDuM9qcCJ5zGpOFxkEzjkLbhzr/
CDFK9QbJqSmidB2qOcL90orrWVSu86OpVGmKzmqtt166VszUlNG5dgTSB41dUjAITjGDV5TFXpld
YckngLrOqgcpbaNtYkhKQcFOuoBz/mVOV7xAKXWGJ01nregvQxfX8CpSRZrATu5VaGVJd8P0mIZx
9EN7wM149WlApzuMrBvyrLdigVbrVchz0/1HDaP9XgOGDYO9g3lnktJDKAMbk9tEiI34JCeUd/DV
Lr1eAwULhgd9FS6iYboEZh/D5losE9hAAE8uwfriPgEgtFbCPxA4cqIDMsfsjPDtar7/l1ATxG/9
6689zasy3f+bKGAXJDiVKOwhptv4HWx8IhmJ04/vRyEjR6m54i81lgeAQ0IBUEfaKX+JT9AnQyXT
hc4v8fUBvtB+Ar1udS9lUeru/a5xiBLwRA3Ja3iiDP1CTPeysMc4lVELNFY+WMywgtBNQzCfPfFp
KdNU57ufvTs/Bd8RizFQgvjc7RSG43gJHqHr5DuucGyBwgN2eF0iG5fowlKSxTzymvUGrVHkqLeX
l2HXiQLD3V6dKHAZJ8pFe4jTZlimnCBCVoa1MMvKrN1qgxR22xDFUWaYJSYXJSWw+jwBvExPY94S
wV4JSz1MBJ5PkZOsMhmLaTIDPQoqFxTqGIQEiYv1jMR5ecYx8LxUpgwKHhabMrleVni6AZ0jKsHA
5j+dfDk/+0BlCYcvG6+7hznHtBMYcxLJMaYIYrQDvrhpf8hVk0kfz+pXCAO1D/xpv+LslGMeoNOP
A4v4p/2K69COnZ0gzwAUVF20xQM3AE63PrlpZIFxtftg/LgpgA1mPhiKRWLZi070cOfX5UTbsmVK
KO5jXj7iAGdR2JQ03dlNSWt/9BwXBZ5zzYf9jeBtn2yZzxS63nTebEt+cz8dKcSSWMCo29ofw2SH
dZrq6TjMto1baFurbeyvmRMrddrNMhRlIOLQ7TxymaxfCevmzIFeGnUHmPheo2sksVeVD37NBtrD
8DCxxO7sU0xHKmMhI4CRDKlrf2rwodAigAKh7N+hI7nj0dNDb46ONbh/jlp3gW38ERShzsWlGo+8
BE6EL7+z48ivCC3Uo0cidDyVTGa5zRPDz3qJXuULf469MkBBTBS7Ms6u5ZBhjQ3MZz6xt4RgSdt6
pL5MrvoMizgD5/RuC4d35aL/4MSg1mKETrsbuWmrI5882KC3FGQnwXzwZbwG3V/U1ZBXcss5dG8t
3Xao90PE7ENoqk/fhyGGY34Pt6xPA7iXGhoWeni/bzmF5bUxjqy1j62qptC+0B7srIStWaXoWMYp
TjS+qPUCGoN73Jj8gX2qE4Xs7546MScmZIHy4C5Ib24D3aAVThhwuRJXjiaUDt9U0+h3c3krUzAa
YGSHWO3wm612GEU2nNKbB/bV2F1sLjb9uNGbBrMjU46BnpkqYP2iTFYHiE5vxGcXZg0yuNS/6i1J
nN2Ql/z2r2dj8fbDz/DvG/kRTCkWP47F3wAN8TYvYX/J1bt0rQJWclS8ccxrhRWSBI2OKvgGCnTb
Ljw647GILjHxa0usphSYVVuu+NoTQJEnSBXtjZ9gCifgt6nsanmjxlPsW5SBfok02F7sggUiB7pl
tKxWKdoLJ0rSrObl4Pzs7emHT6dRdYccbn4OnCiKn5CF09FnxCWeh42FfTKr8cmV4zj/KNOix2/W
m05TOIObThHCvqSwG02+UiO2m4u4xMiBKDbzfBZhS2B5rtWr1uBIj5z95b2G3rOyCGs40qdojTeP
j4Ea4te2IhpAQ+qj50Q9CaF4ikVj/Dga9JvisaDQNvx5erOeu5FxXf1DE2xj2sx66He3unDJdNbw
LCcRXsd2GUxBaJrEajWduYWCHzOhb0QBLUfnHHIR12klZAaSS5t8upoCNL1b28cSwqzC5owK3ihM
k67jjXKSkGIlBjjqgKrr8UCGIoawB/8pvmF7gEWHouZaaIBOiNL+KXe6qnq2ZAnmLRFRryfxYJ1k
L918Hk1hHpR3yLPGkYV5otvIGF3LSs+fHwxHly+aTAeKSs+8yt5ZAVbPZZM9UJ3F06dPB+Lf7/d+
GJUozfMbcMsAdq/Xck6vt1huPTm7Wl3P3ryJgB9nS3kJD64oem6f1xmFJnd0pQWR9q+BEeLahJYZ
TfuWXeagXckHzdyCD6y05fglS+jeIwwtSVS2+vooDDsZaSKWBMUQxmqWJCGHKWA9NnmNRXkYZtT8
Iu+A4xMEM8a3eELGW+0lepiUQGu5x6JzLAYEeEC5ZTwaVTVTWRrgObnYaDQnZ1lSNfUkz93DU30X
QGWvM9J8JeI1SoaZR4sYTn2nx6qNh53vZFFvx5LPLt2AY2uW/Po+3IG1QdLyxcJgCg/NIs1yWc6M
OcUVS2ZJ5YAx7RAOd6ZbnMj6REEPSgNQ72QV5lai7ds/2XVxMf1I58j7ZiSdPlTZm7E4OBRnrQTD
KGrGpzCUJaTlW/NlBKN8oLC29gS8scSfdFAViwm8CzzcusY60xdzcP5Gc1sHwKHLoKyCtOzo6Qjn
BjILn5l2y3Ua+KEtOuF2m5RVHacTff/DBB22iT1Y13jaeridlZ7WWwEnPwcPeF+n7oPjYLJskJ6Y
emtKM47FQocoIrfEzK/GKnL08g7ZVwKfAikzn5jCaBNEurTsaitOdc6mo+IR1DNTxbTFMzflM53K
ExfzMeU5mbqHLV60waV9kYV4fSyGL8bi29ZGaFZs8GInQPnJPHoyD32fjLpeHh02dqa78WxB2Ark
5dWjp5smU5pe2Jdzfn9fnXSIG8AVyM4ikfP9JwqxY5y/FqqG0sxrO6fQjLEkfc9mPelq7KZGhUrR
puDVrxuF4qgW43/aQUyZt9YDXBGLQssWyFbxm8STVvKfvbcNEwM1ev7Koucy6Tucwm94Wwq81wR1
HZ2th5Y6rd6C7dmT69pJPoJqGjYcf69H9ShRaueId1rh8WQjcS7rP4KHQ7pZhpjmWetY+F/JPJy0
v+1wsYPld9/swtNVML1lEj0Lurt2gZe6XbDQLLf59Ie6PEbp6/pVAuNAaUQHvD5z+SP5a0eYD8y3
uuQ2L3iF1yvSWS/allS6/gfvSfkeLXQIaBNO6VmwFuCS1As8mr2l2yJPFKWR4aUv3xy+GJtaWwak
J/AyevlMX6pI3cx1Ar6zOtabIHip+x1G/+YASyq/t33V2RbQtI5btyv5g4UUjxpFE0uHxnLcX1nR
rFks8BbChpjspNorNd6D2zAFh8FcJ5qD5wM7u6gPXVdjNNK7TbVtEeCtwUP72SY5D+raKFJEepew
bVOeuxTno0VB9+q3ILgXR85fxvwGfaq6OLKxKmNT8Cxx6OZH4qe66a3kYnuCxrW6CXdNn/vvmrtu
EdiZm/SAztz9ik2XBrrvdivaRwOOE2hCPKjooNH4/cbEtQNjnZXSH/PWHyS/2wlnusWs3AfG5MBg
BJ3YU2NvzP4qnrnfMcVqn684dgt0e52N1rQ7NqPN8Q/xFDidBJ/bmn3KEZprDuSNB91ZN+Gs04m8
vlaTGO9LnNBulTKkOtsQs/95T9fdyVhtzLYFrwECEIabdC6rm64OjAG6ku9t5gQj574XQUNTGq6T
16uSOZsEvUcCcBGHHqm/CW1zYu4glRgxVnVZlLCtHOjbfTnzpS9ZuAFqImGrWN0Y1E2Psb7slRQr
pVuZol4OeLbSZoAIbMQ7pmEyse+AV543FxckY8sMMqtXsoyr5tIe/4w9Ea+dEaiMGxfXiXM1Utni
EhexxPKGgxRGmuz3Z7BD83anO24qGFlt93B2oh46dvqYSxAcY2S4OLmzF/a5F0XN6bJo1zu0zRqu
s5cUwTKY2+dIR+qgE7/VN2Lxra0cEkf/0uEfkHe3ltHP67bqjL1bi4bzzFUI3SuQsAafjHPfzYYd
DujeYdjaodrxfX1hGaXjYW5pbKmoffJehdOMNmpCMZiCeU8oxk+zf2QoxoP/wFCMvocSDI3GR+uB
3sT7e2I2rB7cSx0bRoA+EyASHgm3rgQ0pnLoprEXuUruBvaKZtaVTm2cMQ/Ikd3bvggEX96o3Jxf
73K1XaEYX7ro8Q/nH9+cnBMtJhcnb//z5AdKc8Jzh5atenCsKsv3mdr7XkK1G7fSqSl9gzfY9ty5
ylVBGkLnfedUvwdCfwVY34K2FZn7eluHTiVNtxMgvnvaLajbVHYv5I5fpqs23ISUVuZzoJ9ymqr5
5Zz1m0fmyIvFoTnSMu+bUwgto50g7baFcxJGu+pE+6v6Xs0tAeSRTVumFcDDB+Qve/ZgalBshJsd
lPb/OINyrbF+z9xJA1I4k87diHQtIoOq/P9DRwnKLsa9HTuKY3vbNbXjcxZlr3HHQ9SZjAxBvAK6
QXd+rrDPZbqFCkHACk/f/MeIGP2nTybtOf4TJS73qVR3H5XNlf2Fa6ad278meFpf2Ru0FKf88Hkl
NF7UqXsCb/t0OpDTR8c6+cKpDQHNdwB0bsRTAXujv8QKcboRIWwctUuG6aZER339nYM82k0He0Or
52J/WyGnW8goxIvtDeetWknd45B7qHt6qNqUyzkWGPMet1VoitcEmc8FBV2Z5TkfeBitt/3w9fby
xZGN0iO/42tHkVB+1sAx7JdOfuPOaxqd7sQs5ZgS4HCv5tT36hZXDlT2CbbtbTpFHlv2PyZhgCEN
vPf9ITPTw7vMftDG1LLeEUxJDJ+oEU3LKYvRuNsno+50G7XVBcIlPg8A0lGBAAvBdHSjk3K54bzp
4XO9G5zWdMGte1QTOlJB6Vc+R3AP4/s1+LW7U2nug7oziqY/N2hzoF5yEG72HbjVyAuFbDcJ7ak3
fLDFBeAq5/7+Lx7Qv5sYaLsf7vKrbauXvZV17MtiLimm2LRIZB5HYGRAbw5JW2MBghF0vNiloaPL
UM3ckC/Q8aP8VLy+mjYY5MxOtAdgjULwf2RtvCc=
""")
##file ez_setup.py
EZ_SETUP_PY = convert("""
eJzNWmmP20YS/a5fwSgYSIJlDu9DhrzIJg5gIMgGuYCFPavpc8SYIhWS8li7yH/f181DJDWcJIt8
WAbOzJDN6qpXVa+qWvr8s+O52ufZbD6f/z3Pq7IqyNEoRXU6VnmelkaSlRVJU1IlWDR7K41zfjIe
SVYZVW6cSjFcq54WxpGwD+RBLMr6oXk8r41fTmWFBSw9cWFU+6ScySQV6pVqDyHkIAyeFIJVeXE2
HpNqbyTV2iAZNwjn+gW1oVpb5Ucjl/VOrfzNZjYzcMkiPxji3zt930gOx7yolJa7i5Z63fDWcnVl
WSF+PUEdgxjlUbBEJsz4KIoSIKi9L6+u1e9YxfPHLM0Jnx2SosiLtZEXGh2SGSStRJGRSnSLLpau
9aYMq3hulLlBz0Z5Oh7Tc5I9zJSx5Hgs8mORqNfzo3KCxuH+fmzB/b05m/2oYNK4Mr2xkiiM4oTf
S2UKK5KjNq/xqtby+FAQ3vejqYJh1oBXnsvZV2++/uKnb37c/fzm+x/e/uNbY2vMLTNgtj3vHv30
/TcKV/VoX1XHze3t8XxMzDq4zLx4uG2Cory9KW/xX7fb7dy4UbuYDb7vNu7dbHbg/o6TikDgf7TH
Fpc3XmJzar88nh3TNcXDw2JjLKLIcRiRsWU7vsUjL6JxHNBQOj4LRMDIYv2MFK+VQsOYRMSzXOH5
liMpjXwhXGnHnh26PqMTUpyhLn7gh6Ef84gEPJLM86zQIjG3Qid0eBw/L6XTxYMBJOJ2EHOHiiCw
JXEdEgjfEZ6MnCmL3KEulLo2syQL3TgmgeuHcRz6jPBY+sQK7OhZKZ0ubkQihrs8EIw7juOF0g5j
GXISBLEkbEKKN9QlcCzPJ44nuCdsQVkYSmG5MSGeCGQo/GelXHBh1CF25EOPiBMmJXW4DX0sl7rU
Zt7TUtgoXqgrHer7bswD+DWUoUd4GNsOBJHYiiYsYuN4gT1ccCAZhNzhjpTC9iwrdgNPOsSb8DSz
raEyDHA4hPrcJZbjB54fwD/MdiPLIqEVW8+L6bTxQ44X4aOYRlYYOsyPie+SyHNd4nM+iUwtxm/F
cOEFhEXAMg5ZFPt+6AhfRD7CUdCIhc+LCTptIoFMIkJaAQBymAg824M0B0YC8Alvg1SG2DiUCIIc
tl2O95FGTiRCSnzqE2jExfNiLp7igRvLmFoQ5jHP8eLQcj0umCOYxZxJT9lDbAKPxZ50qQxJiCh0
BYtcYVEH7g69mDrPi+mwoZLEjm1ZlMNNHDkBSYJzF44PPCsKJsSMeEZaVuBRGRDi0JBbUAvIeghs
K7JD5kw5asQzgR3YsSMEc33phQJeswPGA2I7kOqEU1JGPCPtCAQF8uUSoUIcP2YxpEibhzSM5ARb
sRHPCEvw0Asih8VxRCUNgXRkIXot+Dy0p5ztDp1EqJB2IDmHYb7v217k2SwEf/E4igN/SsqIrahF
Y9u1CSPUdSyAAZ4LpecxH0QR2vJZKZ1FCBKJPQPuSSpdZBSVsRcwC1CB9cRUwHhDiyLF1iB+12Gc
xix0KJMe6MsJpBMROcVW/tAiIWLJIwvqICERsdIV4HQ/BGHwyA6mPO0PLSISXMUlqoodWrYQADdE
cfIpQ8EjwRTL+CMfRdyVAQjBY4yQKLQ9BA53Q8oYd7nPJ6QEQ4uQMBGqfGTbASpRFHmhAxGomL4X
I7WniDMYVTfmB0T6IQW+6B6QDYEFQzzPRYL5ZIobgqFF1JERCX0HxR60S10UaQuu5sKXaCV8d0JK
OKI7Cz6SMeHMJYHtC9+2faQhWooIFDgZL+GoEpBIxr6HKsDB5ZakQcikLR24AY+cqQwIhxZ5qLEE
fCvRMiABPdezbVtyEbk2/oVTukSjbshSvZATA5GYo36oEASBR66lGivreSmdRYwSNwI3oOfwIpdZ
KmYRbQCbobJMloFoaJEdOnYIkoOjY85s3/Jji/gRdQXyPPanPB0PLYLuzLPQzNgKYerFgfCYpMKK
YCuzpjwdj5gBQYbGDrXVjSIegJ2IEFYA8mKB6031d42UziIp4FpX+MQOqe0wuIn5nk1D1F5UfjFV
SeJhPWIEaWNLxZrEERzEZMcuKltI/dhBjwMpv816EwHGm3JWFedNPXDtSblPE9rOW+jdZ+ITExg1
3uo7b9RI1KzFw/66GRfS2H0kaYJuX+xwawmddhnmwbWhBoDVRhuQSKO9r2bGdjyoH6qLJ5gtKowL
SoR+0dyLT/VdzHftMshpVn627aS8a0XfXeSpC3MXpsHXr9V0UlZcFJjrloMV6porkxoLmvnwBlMY
wRjGPzOM5Xd5WSY07Y1/GOnw9+Fvq/mVsJvOzMGj1eAvpY/4lFRLp75fwLlFpuGqAR0Nh3pRM15t
R8PculNrR0kptr2Bbo1JcYdRdZuXJjsV+K0Opu4FLlJy3tr+rHESxsYvTlV+AA4M0+UZo2jGbzuz
eycFaq4/kA/wJYbnj4CKKIAAnjLtSKp9Pc7fN0rfG+U+P6VcTbOkxrovrZ3Ms9OBisKo9qQyMAh3
grUsNQFnCl1DYurtlDplXL8ijPsBEPeGGmmXj/uE7dvdBbRWRxO1PGNxu1iZULJG6V5tqeT0jjH2
ohgckDwmmLnpJRIEXyMi6wDXKmc58EgLQfj5oj72eCt76mnY9XbN2YQWUzVaamlUaFUaQPSJBcsz
XtbYtGocCQJFgQpEVFolVQLXZQ+984za4439eSb0eUJ9NsJrvQBqnioMnzwfUVo2hw2iEabPcor8
hJ1ErUqdZ8Q4iLIkD6I+4Lgk3f29jpeCJKUwfjiXlTi8+aTwympHZAapcK8+2SBUUYsyXoWgMqY+
9TDbCNU/H0m5q1kI9m+NxfHDw64QZX4qmCgXimHU9oecn1JRqlOSHoGOH9c5gazjiIMGtuXqwiQq
5LaXpOnlZYPYKAXbtFuPEu3CAW2SmEBWFNXSWqtNeiTXEHW306v+6Q5tj/l2jWN2mpi3SkbtIBD7
WNYAIP3wCYbvXmoJqQ9I8+h6h4Foswmu5fyi8evt/EUD1epVI7uvwlDAz/XKL/NMpgmrAM2mz/59
z/9Ztp//uL9E/0S8L19vb8pVl8ttDuujzPfZkPDnjGSLSqVUlyLgDHV8p3OkOa5T2XLKMoSyaXyX
CkRIu/xKnsohlcogIAFbWg1lUpQA4lSqdFhAwrl1vfHyp57yC3Mk7332Plt+eSoKSAOd1wJuilHd
WqFqXWJZmKR4KN9Zd8/XrCd991WCwEzoSdXRb/Pq6xzs3AsUUpazJtvS4ZvrfkK+G6XznXrlc4Ci
CT//MKiZ/RCti+dTmfpXV1CVz8i4Qen86ok6qTOTXHjeSHNWdxmaEWsbkqo+9NVdw/9p3axZVx3r
t3Xz98qmuqd2va6ZNZXfX8rgRKnL6wLX1jdVJ1h1IunFiKZuDGtD+6lBgfJBHUTWHvGY1kHbtqBb
o8dPL29KtNM3peqm5/1cGJ1q14EPuf1yoDAzXgy7vpJ8FNB+iy675vlf8iRbtlWhXVqLKwumxOnW
91sU6LZbVuzTvo68K6tyWYtdbVQyfPExT1QAHQVRJbBVp+ySbUDR6tKhyCFIoVG2KKX5w2CV6q+V
X4bvqgsrzUdSZEuF88u/7qo/9Gi4siHn8qkov9EhoT4MWYqPIlN/wJwjlJ3tRXpUrdzbOtp67UQX
Kug3VPyrj2uWCooZWH5tgKpm6tYB6ZwJAIlXkIeqmQXpikdFsQQTalnqt/u0rknZnDVbgo2btuWy
I1TmbTSbs9kSjCg2CmEt5kDYXnVQPBd1rdnDvVCiesyLD82ma+NYF4ycVqT5qE0xhWaJG5CpYhEg
wHQjrhdA8iUTm8wpRFOA+gaYq7/SiwiK9VXI9Ej3qkfSUbZW2XT1GpoEHaxVoobFphdKhTi+qn8s
R+3UMDpbGtalrpzrLUalTKdcww8mfuZHkS2vln1ufI8+/vaxSCqQD3wMfHUHDQ7/sFaf9j0q76kO
gBUqDUGNLC+Kkw6OVIyEab/3w0M11pXQ61tObK/mk7OpuRoGmGrGWK6GGtcsoq2puWI9f6RzwIkH
prajnqy7lzDfqTlvM6YAbLDRu7A0L8VydUURZbXRQvvPm2rWkhYUTNUvLW3N/sil6vcBkb5ED/Jx
PVWxLzX37XOfg+oa+wbdUrOqLRBP9cejz5efa47reaDj6iuJlzXPzwx6+Lauu6zhZDAYDLTPVGr0
xgGWHw4w1By0he0JDWlmrPZqfKQhTlELNM6rF+oA5W6lw/RRLAod1sJQZfx3Q0VZqnAe1Sql9nUN
waJThqHuw7IzS6TlsMHvmbbbNWjtdsYWU55lWqa9+NNd/z9B8Jpc1ahLyzwVyNWJabft41FM6l79
qkcvxCH/qPlWe6L+GoMealE5KlBv+ju8O2q+J7vsJql+HTYrvWGq3+1cz3d/YEbDz2ea+dEgtpmO
9v85JJ9Ls07w70q5iuan8q5Nt7vhGK7BtlYIfFilqj8cx3SkqCdPR6ja5S8CoFNfa37BZbCldqAO
8/kPV23RfN0yyhwk+KALUaFOdBGEaJIuAT1/Qt5i+T3aqXn7hRvzeB4OlPP6qzTX3zYxV4vmpPLY
1ad2hCkv9PyTfmqoFKGnJK1e1ke/EPmgJsWzYuR+FBfN/KN6rfaouBN7AUT33JfuWv2pViwvXbUW
0tZCXTQXBV1cnnUnx+rdu+bUWbZF9cmTZ9kVu3oErEv0u7n646bY4N8aXIHxoek064as3chE8T2U
y9Vd97JZwuKudB7VUDGf15NCXaT7wMADGCGrdmLQXxHatnfNB1HVSavuL/uT9E53DLtdE/UdJI2M
taFhedW0RC0Ar8bGHkiFaXALPc1SkILtl/P3Wf8rPu+z5bt//Xb3YvXbXLcnq/4Yo9/ucdETjI1C
rr9klRpCscBn8+skbRmxVhX/f7fRgk3dei/t1R3GMA3kC/20fojRFY82d0+bv3hsYkI27VGneg+A
GcxocdxuF7udStjdbtF9sJEqiVBT5/BrR5fD9u939h3eefkSYNWp0itfvdzpljubu6fqouaIi0y1
qL7+C1AkCcw=
""")
##file distribute_from_egg.py
DISTRIBUTE_FROM_EGG_PY = convert("""
eJw9j8tqAzEMRfcG/4MgmxQyptkGusonZBmGoGTUGYFfWPKE6dfXTkM3gqt7rh47OKP3NMF3SQFW
LlrRU1zhybpAxoKBlIqcrNnBdRjQP3GTocYfzmNrrCPQPN9iwzpxSQfQhWBi0cL3qtRtYIG/4Mv0
KApY5hooqrOGQ05FQTaxptF9Fnx16Rq0XofjaE1XGXVxHIWK7j8P8EY/rHndLqQ1a0pe3COFgHFy
hLLdWkDbi/DeEpCjNb3u/zccT2Ob8gtnwVyI
""")
##file distribute_setup.py
DISTRIBUTE_SETUP_PY = convert("""
eJztPGtz2ziS3/UrcHK5SOUkxs7MzV25TlOVmTizrs0mKdvZ/ZC4aIiEJI75GpC0ov311403SEp2
LrMfruq8O7ZENBqNfncDzMm/1ft2W5WT6XT6S1W1TctpTdIM/marrmUkK5uW5jltMwCaXK3JvurI
jpYtaSvSNYw0rO3qtqryBmBxlJOaJg90w4JGDkb1fk5+75oWAJK8Sxlpt1kzWWc5oocvgIQWDFbl
LGkrvie7rN2SrJ0TWqaEpqmYgAsibFvVpFrLlTT+i4vJhMDPmleFQ30sxklW1BVvkdrYUivg/Ufh
bLBDzv7ogCxCSVOzJFtnCXlkvAFmIA126hw/A1Ra7cq8oumkyDiv+JxUXHCJloTmLeMlBZ5qILvj
uVg0Aai0Ik1FVnvSdHWd77NyM8FN07rmVc0znF7VKAzBj/v7/g7u76PJ5BbZJfibiIURIyO8g88N
biXhWS22p6QrqKw3nKauPCNUioliXtXoT822a7PcfNubgTYrmP68LgvaJlszxIoa6THfKXe/wo5q
yhs2mRgB4hqNllxebSaTlu8vrJCbDJVTDn+6ubyOb65uLyfsa8JgZ1fi+SVKQE4xEGRJ3lclc7Dp
fXQr4HDCmkZqUsrWJJa2ESdFGr6gfNPM5BT8wa+ALIT9R+wrS7qWrnI2n5F/F0MGjgM7eemgjxJg
eCiwkeWSnE0OEn0CdgCyAcmBkFOyBiFJgsir6Ic/lcgT8kdXtaBr+LgrWNkC69ewfAmqasHgEWKq
wRsAMQWSHwDMD68Cu6QmCxEy3ObMH1N4Avgf2D6MD4cdtgXT02YakFMEHMApmP6Q2vRnS4FgHXxQ
KzZ3felUTdTUFIwyhE8f43+8vrqdkx7TyAtXZm8u377+9O42/vvl9c3Vh/ew3vQs+in64cepGfp0
/Q4fb9u2vnj5st7XWSRFFVV881L5yOZlA34sYS/Tl9ZtvZxObi5vP328/fDh3U389vVfL9/0FkrO
z6cTF+jjX3+Lr96//YDj0+mXyd9YS1Pa0sXfpbe6IOfR2eQ9uNkLx8InZvS0mdx0RUHBKshX+Jn8
pSrYogYKxffJ6w4o5+7nBStolssn77KElY0CfcOkfxF48QEQBBI8tKPJZCLUWLmiEFzDCv7OtW+K
ke3LcDbTRsG+QoxKhLaKcCDhxWBb1OBSgQfa30TFQ4qfwbPjOPiRaEd5GQaXFgkoxWkTzNVkCVjl
abxLARHow4a1yS5VGIzbEFBgzFuYE7pTBRQVREgnF1U1K/W2LEys9qH27E2OkrxqGIYja6GbShGL
mzaBwwCAg5FbB6Jq2m6j3wFeETbHhzmol0Pr57O72XAjEosdsAx7X+3IruIPLsc0tEOlEhqGrSGO
KzNI3hhlD2aufymr1vNogY7wsFygkMPHF65y9DyMXe8GdBgyB1huBy6N7HgFH9OOa9Vxc5vIoaOH
hTEBzdAzkwJcOFgFoavqkfUnoXJmbVJBGNWu+5UHoPyNfLjOSlh9TJ+k+lncMuRGvGg5Y0bblOGs
ugzA2WYTwn9zYuynrWIE+3+z+T9gNkKGIv6WBKQ4gugXA+HYDsJaQUh5W04dMqPFH/h7hfEG1UY8
WuA3+MUdRH+Kksr9Sb3XusdZ0+Wtr1pAiARWTkDLAwyqaRsxbGngNIOc+uqDSJbC4Neqy1MxS/BR
Wutmg9apbCSFLamkO1T5+9yk4fGKNkxv23mcspzu1arI6L6SKPjABu7FabOo96dpBP9Hzo6mNvBz
SiwVmGaoLxAD1xVo2MjD87vZ89mjjAYINntxSoQD+z9Ea+/nAJes1j3hjgSgyCKRfPDAjLfh2ZxY
+at83C/UnKpkpctUnTLEoiBYCsOR8u4VRWrHy17S1uPA0kncRrkhd7BEA+j4CBOW5/8xB+HEa/rA
lre8Y8b3FlQ4gKaDSnIn0nmho3TVVDmaMfJiYpdwNA1A8G/ocm9Hm1hyiaGvDeqHTQwmJfLIRqTV
yN+iSrucNVjafTG7CSxX+oBDP+19cUTjrecDSOXc0oa2LQ89QDCUOHWi/mhZgLMVB8frAjHkl+x9
EOUcbDVlIA4VWmamjM7f4y0OM89jRqT6CuHUsuTn5RTqMrXebISw/j58jCqV/7Uq13mWtP7iDPRE
1jOJ8CfhDDxKX3SuXg25j9MhFEIWFO04FN/hAGJ6K3y72FjqtkmcdlL48/IUiqisEaKmj1BCiOrq
Szkd4sPuT0LLoMVEShk7YN5tsbMhWkKqkwGfeFdifInIx5yBgEbx6W4HJUXFkdQE00JN6DrjTTsH
4wQ0o9MDQLzXTocsPjn7CqIR+C/llzL8teMcVsn3EjE55TNA7kUAFmEWi5nFUJml0LI2fOWPsbwZ
sRDQQdIzOsfCP/c8xR1OwdgselHVw6EC+1vs4VlR5JDNjOq1yXZg1fdV+7bqyvS7zfZJMsdIHKRC
xxxWnHBGW9b3VzFuTligybJExDoSqL83bImfkdilQpZyxFCkv7FtSWOvIrSa5icYX14lol4SrVnF
+ayV3caSFkxmjfeK9nvICkVytsIW6iPNMw+7Nr2yK1aMg0lTYcvGLQhc2LIUWbFo45jeKaiBmMLI
vcePe4KNlxCcRLLVq7MylZET+8qUBC+DWUTuJU/ucUWvOAAHwzjTWaSp5PQqLI3kHgUHzXS1B9EV
TqoyFf3ZmmKsX7E1+htsxSZtR3PbJRb7a7HUaiMthn9JzuCFIyHUjkMlvhKBiGFrXvXIeY5118Qx
x9Fw6aB4NTa33fwzRnXAfpSXH0dYp23+iR5QSV824rmXrqIgIRhqLDIFpI8MWHogC9egKsHkCaKD
fal+r2OuvdRZop1dIM9fP1YZanWNppsacmySM4jqpn4x1iOcfDOd45Z8ny2JUlwKB8Mn5JrR9KUI
rgQjDORnQDpZgck9zPFUYIdKiOFQ+hbQ5KTiHNyFsL4eMtit0GptLxmez7RMwGsV1j/YKcQMgSeg
DzTtJVWSjYJoyaw5me5W0wGQygsQmR0bOE0lCVhrJMcAAnQN34MH/CPxDhZ14W07V0gY9pILS1Ay
1tUgOOwG3Neq+hquuzJBd6a8oBh2x0XTd05evHjYzY5kxvJIwtYoarq2jDfatdzI58eS5j4s5s1Q
ao8lzEjtY1bJBtag+e/+1LRpBgP9lSJcByQ9fG4WeQYOAwuYDs+r8XRIlC9YKD0jtbET3lIAeHZO
3593WIZKebRGeKJ/Up3VMkO6jzNoVASjad04pKv1rt5qTRdkxegdQjSEOTgM8AFla4P+P0R0o8lD
Vwt/sZa5NSvlliC265C01k4AMc1UhAAXCg4vVmgBYu16kLVnncCm4YSlJsmy7gS8HyLZa66OtMNe
+xBuI1axw6qJnfURobFKiPQESDQxasTCTdiNeXsFC9wFY2FUOTzN0/EkcT3moYTSTxzxwHqu23FG
jNfCM3LNt1FpfreAFHFHhKRpGXBNUlCynY76+BQieBB9ePcmOm3wDA/PhyP8NWgrXyM6GTgxaxLt
TLlDjVH1l7Fwxq/h2KgiXz+0tBbVIyTiYHSx2/EP65wmbAtmxHSXvJchZA32OYdgPvGfygeIsd5h
AuR0ahPO3MMKusaaxvNsmOnq+xFOE3qcFKBaHbdH6m+Ic+dut+cF9iMXWHj0A4lefOCHV6AnDy5b
1n7pZTlg+6+iOnDvELjr9hgw6SnB36pHVAGWM3kAXXUtZtPolHZ0b01WV1D9TNBhzpxIy1HE9+Sp
5jt8sEFCGR4QHXuw0pq8yDSYJN2smjEnI6ezqqeu+DmIGZYXYAe07+HmxKdmVJVOAPOO5KwNGoJq
b3x6n59GzRS/UdNCtz047zUW1eEB3rvAjw73NIZj8lAw3llfv4etQHp1tOtqBliGucKYVoJPlocC
wFZNrOLEgRZ9cGNvNaVOAyLo7cR354c8Td+5H4Izrp6uIVE3J+JIgOKKEwARxNzfMT1xYySW+VgI
AQY8kAOPXhRARVytfg/Nceos0o30GopNqOhkZHyqgeH5NkX4t8zxXK5LLyjlSJ32lBseEbfmju5Z
DF2QYNX+UTAJjE4FqvDZZzKy2LQbVaHcsSN1JNRYPwgLfPG0Ljx0NWIuafsGt9cjZeABNS+HLnDU
90jwI56n78N/RfnLQD6Y5edOJlcx/tIkWSqlvywfM16VaGy9vN4turEc3kJ5R2rGi6xp9M04WUaf
Ygf0IatroGl6ZBtD+lRuN+rEBcDhPE+KqzWJ3WFxOXoSwYSgnxf12NluHalaDqrHT6WpHhlOI7Cv
M0/v7ykz7/m7Z7mTycyvWUwEttnliYprEA6TB9TqDL+N1QoHbUVm85e//bZASWI8A6nKz99gK9kg
Gz8a9A8FqOcGeaunTqA/ULgA8cWD4Zv/6CgrZk94mSc5d8yi/zTTcljhlVBKW8arKDVoL8yIdqwJ
r4PQ+ots1x6MrSNnkAqz6EnHNWfr7Guoo44NdCbiijCljl8p3zxe9PyRTcbVZUYN+Fl/gJCdsq9O
DIda6/zizmR1YniuLz2ysisYp/I6pNsjQlB5nVjmf4sFh93KGyFyG/1yAbYBOCJYlbcN9tNRj5cY
1CSekQZUW9VKOGJmnWdtGOA6y2D2edE7h3SYoBnoLqZw9Q/DJFVYqEoqRg+Xc1BOeYfzZ8mf8V6Z
R27zWUAid4d0fiutlkpgb9cwHohTFHs5WR2LYsd6tDc1toqZPWIdUisH6tpX+JuEisNT54xVX08d
M+CD1wCO9eJOyI4FYFUJkDCSdDj5Nqikc8MprZhkSsNYgYHdPQoetn3E1x2ajF+8qDtYyIbhhpxw
hJkyTN41EWaR/hm3j/FaHnRjehKJy+u96okzEepxfCnctq+zXqpzu6/ZgF/YjHXOyl5/vPpXEmyp
s0VqfxlQT1813Xtu7osgbskk2wbjgjohKWuZuk+I8RzvIJigiHqb9jNsc/647JMX6aG+drsvqDhF
mVwadF03a0ZWUbwQpynSN6J6Ct+YfRXE1rx6zFKWyndVsrWCd9+KaZzWSKquIhZze5qjG61uPeSH
kjHKxqWgsAFD532CAZE8BBq7hDv0bfJ+PtCyherocAXlZWZgo1KOjXuRUW1pZBMRK1MVRMR9uQOb
KhfynqMVnkcHWvvhLt+oVPVkRRrgGPO3I00f5yrsYZIOJVEjpBzPqRSJ4aGUFHXO75Z8Q1p6MC89
0lvv8cafN+yuu7phzizRrMXBuvSQ4pDb8f4l64vWLwi+V55DeiEmFTUQyZxDgZx2ZbK1mZ190g+e
12rE2zhGO1mWinfIJIToSeiXjCRUndWkoPwBbzJUhIrjZ2onrLqNKp6K9BzfaQkWiX8RHhIJvFaU
s4VqTSzYV/GaGSTQi4KWEMPT4M4geXUICWdJxTWkes9HJJwXP9xhwiIpAFcyNvDKCaV6+OzO9EGw
Xegms5/9N2vuILnS0yYah7jzNPrSlBGJcxG8YflanhgspxHU+QXDuxjNEqOVPepSl9fF2bqCkAe3
4l4FBxFKeeHXRF7b0ne39f7sHRH09vjKX7UrsZIvqhRfDpSRBc84BIDbk7CHoBpJBuotOn2gSGkT
kXvcQGDu2uCbeoB0zQQhg6vrQKjiAHyEyWpHAfp4mQTTXBBR4JuX4v4N8FOQLFqfGg+eLSj7gOi0
2pMNaxWucOZfSlGJX1LVe/c7VH1QW6h7lpKh8gq/BlCMt5cxXQ6APtyZjEOLZZBp6AGM+vl6Yuoc
WEl4WohVCsQr09Ww6vz3PN6JJsyjR90RauiaoVRZ76aEhYxoDeVuGqo1fCep6VoKbkX46ygg3tHD
XtGPP/6XTIuSrAD5ifoMCDz7z7MzJ/vL15GSvUYqtd+kK9cM3QEjDbLfpdm1b7eZSf6bhK/m5EeH
RWhkOJ/xEDCczxHPq9loXZIUtYCJsCUhASN7LtfnGyINJeZxAC6pD8dOXQaIHth+qTUwwhsUoL9I
c4AEBDNMxAU2eSNbMwiSQnF5BnAZEzZmi7or5IFZYp95Pa1zxj0ixfnnaBNFS9xn0OA6gpBysgXi
rIwV3tkQsBPnqs8ATLawsyOAuvnqmOz/4iqxVFGcnAP3cyi4z4fFtrio3Svkx65+CGRxutqEoIRT
5VvwlUW8RMZ670G5L4aF6k1pGwLE31/MSyL2bVfwpoF6uVbHLGK6NZV+e8gUY6o89r2js7L0aooZ
iooIK35Nn+elDhjjT4cytKnsHui71g35qF8L/glDNOSjjPeuZ8lL8Tf7pmXFJcbWcydpcgjXTk03
KLymggtomrVgWpLZPS5/xBEZS+WhE0Sakjkdp8YDF4jELUb1Lnj0QUAJNFy5AgkU0TSNJQ5b72qC
8WJr0y4Dl9nwkIo7PcugabH114IrEJBr2uWqPLd3Z7csr5c6PUIbF8wWL5wruZPwGOtnwXOo1Rfz
FnjX0ZDt3YAMMJNp6SPly+mn63dTS6KmfPTur6Rf/3MDmNTgjVgRmNXN1speCxxXbLUDJai5ztzU
jlyh60S2Av6onMMYFcUu6qYEjqeuGmnxCw0qKDjGAzedrUZdHft3CoTPvqTNXkFpldL/TsLSV1PZ
/zn6ipR/wVrbr/fUM4zhy8vHvBF4rExcM8RaLRbtwDhGPsSxepHeZMCCOzDhfwBqDMd7
""")
##file activate.sh
ACTIVATE_SH = convert("""
eJytVVFvokAQfudXTLEPtTlLeo9tvMSmJpq02hSvl7u2wRUG2QR2DSxSe7n/frOACEVNLlceRHa+
nfl25pvZDswCnoDPQ4QoTRQsENIEPci4CsBMZBq7CAsuLOYqvmYKTTj3YxnBgiXBudGBjUzBZUJI
BXEqgCvweIyuCjeG4eF2F5x14bcB9KQiQQWrjSddI1/oQIx6SYYeoFjzWIoIhYI1izlbhJjkKO7D
M/QEmKfO9O7WeRo/zr4P7pyHwWxkwitcgwpQ5Ej96OX+PmiFwLeVjFUOrNYKaq1Nud3nR2n8nI2m
k9H0friPTGVsUdptaxGrTEfpNVFEskxpXtUkkCkl1UNF9cgLBkx48J4EXyALuBtAwNYIjF5kcmUU
abMKmMq1ULoiRbgsDEkTSsKSGFCJ6Z8vY/2xYiSacmtyAfCDdCNTVZoVF8vSTQOoEwSnOrngBkws
MYGMBMg8/bMBLSYKS7pYEXP0PqT+ZmBT0Xuy+Pplj5yn4aM9nk72JD8/Wi+Gr98sD9eWSMOwkapD
BbUv91XSvmyVkICt2tmXR4tWmrcUCsjWOpw87YidEC8i0gdTSOFhouJUNxR+4NYBG0MftoCTD9F7
2rTtxG3oPwY1b2HncYwhrlmj6Wq924xtGDWqfdNxap+OYxplEurnMVo9RWks+rH8qKEtx7kZT5zJ
4H7oOFclrN6uFe+d+nW2aIUsSgs/42EIPuOhXq+jEo3S6tX6w2ilNkDnIpHCWdEQhFgwj9pkk7FN
l/y5eQvRSIQ5+TrL05lewxWpt/Lbhes5cJF3mLET1MGhcKCF+40tNWnUulxrpojwDo2sObdje3Bz
N3QeHqf3D7OjEXMVV8LN3ZlvuzoWHqiUcNKHtwNd0IbvPGKYYM31nPKCgkUILw3KL+Y8l7aO1ArS
Ad37nIU0fCj5NE5gQCuC5sOSu+UdI2NeXg/lFkQIlFpdWVaWZRfvqGiirC9o6liJ9FXGYrSY9mI1
D/Ncozgn13vJvsznr7DnkJWXsyMH7e42ljdJ+aqNDF1bFnKWFLdj31xtaJYK6EXFgqmV/ymD/ROG
+n8O9H8f5vsGOWXsL1+1k3g=
""")
##file activate.fish
ACTIVATE_FISH = convert("""
eJyVVWFv2jAQ/c6vuBoqQVWC9nVSNVGVCaS2VC2rNLWVZZILWAs2s52wVvvxsyEJDrjbmgpK7PP5
3bt3d22YLbmGlGcIq1wbmCPkGhPYcLMEEsGciwGLDS+YwSjlekngLFVyBe73GXSXxqw/DwbuTS8x
yyKpFr1WG15lDjETQhpQuQBuIOEKY5O9tlppLqxHKSDByjVAPwEy+mXtCq5MzjIUBTCRgEKTKwFG
gpBqxTLYXgN2myspVigMaYF92tZSowGZJf4mFExxNs9Qb614CgZtmH0BpEOn11f0cXI/+za8pnfD
2ZjA1sg9zlV/8QvcMhxbNu0QwgYokn/d+n02nt6Opzcjcnx1vXcIoN74O4ymWQXmHURfJw9jenc/
vbmb0enj6P5+cuVhqlKm3S0u2XRtRbA2QQAhV7VhBF0rsgUX9Ur1rBUXJgVSy8O751k8mzY5OrKH
RW3eaQhYGTr8hrXO59ALhxQ83mCsDLAid3T72CCSdJhaFE+fXgicXAARUiR2WeVO37gH3oYHzFKo
9k7CaPZ1UeNwH1tWuXA4uFKYYcEa8vaKqXl7q1UpygMPhFLvlVKyNzsSM3S2km7UBOl4xweUXk5u
6e3wZmQ9leY1XE/Ili670tr9g/5POBBpGIJXCCF79L1siarl/dbESa8mD8PL61GpzqpzuMS7tqeB
1YkALrRBloBMbR9yLcVx7frQAgUqR7NZIuzkEu110gbNit1enNs82Rx5utq7Z3prU78HFRgulqNC
OTwbqJa9vkJFclQgZSjbKeBgSsUtCtt9D8OwAbIVJuewQdfvQRaoFE9wd1TmCuRG7OgJ1bVXGHc7
z5WDL/WW36v2oi37CyVBak61+yPBA9C1qqGxzKQqZ0oPuocU9hpud0PIp8sDHkXR1HKkNlzjuUWA
a0enFUyzOWZA4yXGP+ZMI3Tdt2OuqU/SO4q64526cPE0A7ZyW2PMbWZiZ5HamIZ2RcCKLXhcDl2b
vXL+eccQoRzem80mekPDEiyiWK4GWqZmwxQOmPM0eIfgp1P9cqrBsewR2p/DPMtt+pfcYM+Ls2uh
hALufTAdmGl8B1H3VPd2af8fQAc4PgqjlIBL9cGQqNpXaAwe3LrtVn8AkZTUxg==
""")
##file activate.csh
ACTIVATE_CSH = convert("""
eJx9VG1P2zAQ/u5fcYQKNgTNPtN1WxlIQ4KCUEGaxuQ6yYVYSuzKdhqVX7+zk3bpy5YPUXL3PPfc
ne98DLNCWshliVDV1kGCUFvMoJGugMjq2qQIiVSxSJ1cCofD1BYRnOVGV0CfZ0N2DD91DalQSjsw
tQLpIJMGU1euvPe7QeJlkKzgWixlhnAt4aoUVsLnLBiy5NtbJWQ5THX1ZciYKKWwkOFaE04dUm6D
r/zh7pq/3D7Nnid3/HEy+wFHY/gEJydg0aFaQrBFgz1c5DG1IhTs+UZgsBC2GMFBlaeH+8dZXwcW
VPvCjXdlAvCfQsE7al0+07XjZvrSCUevR5dnkVeKlFYZmUztG4BdzL2u9KyLVabTU0bdfg7a0hgs
cSmUg6UwUiQl2iHrcbcVGNvPCiLOe7+cRwG13z9qRGgx2z6DHjfm/Op2yqeT+xvOLzs0PTKHDz2V
tkckFHoQfQRXoGJAj9el0FyJCmEMhzgMS4sB7KPOE2ExoLcSieYwDvR+cP8cg11gKkVJc2wRcm1g
QhYFlXiTaTfO2ki0fQoiFM4tLuO4aZrhOzqR4dIPcWx17hphMBY+Srwh7RTyN83XOWkcSPh1Pg/k
TXX/jbJTbMtUmcxZ+/bbqOsy82suFQg/BhdSOTRhMNBHlUarCpU7JzBhmkKmRejKOQzayQe6MWoa
n1wqWmuh6LZAaHxcdeqIlVLhIBJdO9/kbl0It2oEXQj+eGjJOuvOIR/YGRqvFhttUB2XTvLXYN2H
37CBdbW2W7j2r2+VsCn0doVWcFG1/4y1VwBjfwAyoZhD
""")
##file activate.bat
ACTIVATE_BAT = convert("""
eJx9UdEKgjAUfW6wfxjiIH+hEDKUFHSKLCMI7kNOEkIf9P9pTJ3OLJ/03HPPPed4Es9XS9qqwqgT
PbGKKOdXL4aAFS7A4gvAwgijuiKlqOpGlATS2NeMLE+TjJM9RkQ+SmqAXLrBo1LLIeLdiWlD6jZt
r7VNubWkndkXaxg5GO3UaOOKS6drO3luDDiO5my3iA0YAKGzPRV1ack8cOdhysI0CYzIPzjSiH5X
0QcvC8Lfaj0emsVKYF2rhL5L3fCkVjV76kShi59NHwDniAHzkgDgqBcwOgTMx+gDQQqXCw==
""")
##file deactivate.bat
DEACTIVATE_BAT = convert("""
eJxzSE3OyFfIT0vj4ipOLVEI8wwKCXX0iXf1C7Pl4spMU0hJTcvMS01RiPf3cYmHyQYE+fsGhCho
cCkAAUibEkTEVhWLMlUlLk6QGixStlyaeCyJDPHw9/Pw93VFsQguim4ZXAJoIUw5DhX47XUM8UCx
EchHtwsohN1bILUgw61c/Vy4AJYPYm4=
""")
##file activate.ps1
ACTIVATE_PS = convert("""
eJylWdmS40Z2fVeE/oHT6rCloNUEAXDThB6wAyQAEjsB29GBjdgXYiWgmC/zgz/Jv+AEWNVd3S2N
xuOKYEUxM+/Jmzfvcm7W//zXf/+wUMOoXtyi1F9kbd0sHH/hFc2iLtrK9b3FrSqyxaVQwr8uhqJd
uHaeg9mqzRdR8/13Pyy8qPLdJh0+LMhi0QCoXxYfFh9WtttEnd34H8p6/f1300KauwrULws39e18
0ZaLNm9rgN/ZVf3h++/e124Vlc0vKsspHy+Yyi5+XbzPhijvCtduoiL/kA1ukWV27n0o7Sb8LIFj
CvWR5GQgUJdp1Pw8TS9+rPy6SDv/+e3d+0+4qw8f3v20+PliV37efEYBAB9FTKC+RHn/Cfxn3rdv
00Fube5O+iyCtHDs9BfPfz3q4sfFv9d91Ljhfy7ei0VO+nVTtdOkv/jpt0l2AX6iG1jXgKnnDuD4
ke2k/i8fzzz5UedkVcP4pwF+Wvz2FJl+3vt598urXf5Y6LNA5WcFOP7r0sW7b9a+W/xcu0Xpv5zk
Kfq3P9Dz9di/fCxS72MXVU1rpx9L4Bxl85Wmn5a+zP76Zuh3pL9ROWr87PN+//GHIl+oOtvn9XSU
qH+p0gQBFnx1uV+JLH5O5zv+PXW+WepXVVHZT0+oQezkIATcIm+ivPV/z5J/+cYj3ir4w0Lx09vC
e5n/y5/Y5LPPfdrqb88ga/PabxZRVfmp39l588m/6u+/e+OpP+dF7n1WZpJ9//Z4v372fDDz9eHB
7Juvs/BLMHzrxL9+9twXpJfhd1/DrpQ5Euu/vlss3wp9HXC/54C/Ld69m6zwdx3tC0d8daSv0V8B
n4b9YYF53sJelJV/ix6LZspw/sJtqyl5LJ5r/23htA1Imfm/gt9R7dqVB1LjhydAX4Gb+zksQF59
9+P7H//U+376afFuvh2/T6P85Xr/5c8C6OXyFY4BGuN+EE0+GeR201b+wkkLN5mmBY5TfMw8ngqL
CztXxCSXKMCYrRIElWkEJlEPYsSOeKBVZCAQTKBhApMwRFQzmCThE0YQu2CdEhgjbgmk9GluHpfR
/hhwJCZhGI5jt5FsAkOrObVyE6g2y1snyhMGFlDY1x+BoHpCMulTj5JYWNAYJmnKpvLxXgmQ8az1
4fUGxxcitMbbhDFcsiAItg04E+OSBIHTUYD1HI4FHH4kMREPknuYRMyhh3AARWMkfhCketqD1CWJ
mTCo/nhUScoQcInB1hpFhIKoIXLo5jLpwFCgsnLCx1QlEMlz/iFEGqzH3vWYcpRcThgWnEKm0QcS
rA8ek2a2IYYeowUanOZOlrbWSJUC4c7y2EMI3uJPMnMF/SSXdk6E495VLhzkWHps0rOhKwqk+xBI
DhJirhdUCTamMfXz2Hy303hM4DFJ8QL21BcPBULR+gcdYxoeiDqOFSqpi5B5PUISfGg46gFZBPo4
jdh8lueaWuVSMTURfbAUnLINr/QYuuYoMQV6l1aWxuZVTjlaLC14UzqZ+ziTGDzJzhiYoPLrt3uI
tXkVR47kAo09lo5BD76CH51cTt1snVpMOttLhY93yxChCQPI4OBecS7++h4p4Bdn4H97bJongtPk
s9gQnXku1vzsjjmX4/o4YUDkXkjHwDg5FXozU0fW4y5kyeYW0uJWlh536BKr0kMGjtzTkng6Ep62
uTWnQtiIqKnEsx7e1hLtzlXs7Upw9TwEnp0t9yzCGgUJIZConx9OHJArLkRYW0dW42G9OeR5Nzwk
yk1mX7du5RGHT7dka7N3AznmSif7y6tuKe2N1Al/1TUPRqH6E2GLVc27h9IptMLkCKQYRqPQJgzV
2m6WLsSipS3v3b1/WmXEYY1meLEVIU/arOGVkyie7ZsH05ZKpjFW4cpY0YkjySpSExNG2TS8nnJx
nrQmWh2WY3cP1eISP9wbaVK35ZXc60yC3VN/j9n7UFoK6zvjSTE2+Pvz6Mx322rnftfP8Y0XKIdv
Qd7AfK0nexBTMqRiErvCMa3Hegpfjdh58glW2oNMsKeAX8x6YJLZs9K8/ozjJkWL+JmECMvhQ54x
9rsTHwcoGrDi6Y4I+H7yY4/rJVPAbYymUH7C2D3uiUS3KQ1nrCAUkE1dJMneDQIJMQQx5SONxoEO
OEn1/Ig1eBBUeEDRuOT2WGGGE4bNypBLFh2PeIg3bEbg44PHiqNDbGIQm50LW6MJU62JHCGBrmc9
2F7WBJrrj1ssnTAK4sxwRgh5LLblhwNAclv3Gd+jC/etCfyfR8TMhcWQz8TBIbG8IIyAQ81w2n/C
mHWAwRzxd3WoBY7BZnsqGOWrOCKwGkMMNfO0Kci/joZgEocLjNnzgcmdehPHJY0FudXgsr+v44TB
I3jnMGnsK5veAhgi9iXGifkHMOC09Rh9cAw9sQ0asl6wKMk8mpzFYaaDSgG4F0wisQDDBRpjCINg
FIxhlhQ31xdSkkk6odXZFpTYOQpOOgw9ugM2cDQ+2MYa7JsEirGBrOuxsQy5nPMRdYjsTJ/j1iNw
FeSt1jY2+dd5yx1/pzZMOQXUIDcXeAzR7QlDRM8AMkUldXOmGmvYXPABjxqkYKO7VAY6JRU7kpXr
+Epu2BU3qFFXClFi27784LrDZsJwbNlDw0JzhZ6M0SMXE4iBHehCpHVkrQhpTFn2dsvsZYkiPEEB
GSEAwdiur9LS1U6P2U9JhGp4hnFpJo4FfkdJHcwV6Q5dV1Q9uNeeu7rV8PAjwdFg9RLtroifOr0k
uOiRTo/obNPhQIf42Fr4mtThWoSjitEdAmFW66UCe8WFjPk1YVNpL9srFbond7jrLg8tqAasIMpy
zkH0SY/6zVAwJrEc14zt14YRXdY+fcJ4qOd2XKB0/Kghw1ovd11t2o+zjt+txndo1ZDZ2T+uMVHT
VSXhedBAHoJIID9xm6wPQI3cXY+HR7vxtrJuCKh6kbXaW5KkVeJsdsjqsYsOwYSh0w5sMbu7LF8J
5T7U6LJdiTx+ca7RKlulGgS5Z1JSU2Llt32cHFipkaurtBrvNX5UtvNZjkufZ/r1/XyLl6yOpytL
Km8Fn+y4wkhlqZP5db0rooqy7xdL4wxzFVTX+6HaxuQJK5E5B1neSSovZ9ALB8091dDbbjVxhWNY
Ve5hn1VnI9OF0wpvaRm7SZuC1IRczwC7GnkhPt3muHV1YxUJfo+uh1sYnJy+vI0ZwuPV2uqWJYUH
bmBsi1zmFSxHrqwA+WIzLrHkwW4r+bad7xbOzJCnKIa3S3YvrzEBK1Dc0emzJW+SqysQfdEDorQG
9ZJlbQzEHQV8naPaF440YXzJk/7vHGK2xwuP+Gc5xITxyiP+WQ4x18oXHjFzCBy9kir1EFTAm0Zq
LYwS8MpiGhtfxiBRDXpxDWxk9g9Q2fzPPAhS6VFDAc/aiNGatUkPtZIStZFQ1qD0IlJa/5ZPAi5J
ySp1ETDomZMnvgiysZSBfMikrSDte/K5lqV6iwC5q7YN9I1dBZXUytDJNqU74MJsUyNNLAPopWK3
tzmLkCiDyl7WQnj9sm7Kd5kzgpoccdNeMw/6zPVB3pUwMgi4C7hj4AMFAf4G27oXH8NNT9zll/sK
S6wVlQwazjxWKWy20ZzXb9ne8ngGalPBWSUSj9xkc1drsXkZ8oOyvYT3e0rnYsGwx85xZB9wKeKg
cJKZnamYwiaMymZvzk6wtDUkxmdUg0mPad0YHtvzpjEfp2iMxvORhnx0kCVLf5Qa43WJsVoyfEyI
pzmf8ruM6xBr7dnBgzyxpqXuUPYaKahOaz1LrxNkS/Q3Ae5AC+xl6NbxAqXXlzghZBZHmOrM6Y6Y
ctAkltwlF7SKEsShjVh7QHuxMU0a08/eiu3x3M+07OijMcKFFltByXrpk8w+JNnZpnp3CfgjV1Ax
gUYCnWwYow42I5wHCcTzLXK0hMZN2DrPM/zCSqe9jRSlJnr70BPE4+zrwbk/xVIDHy2FAQyHoomT
Tt5jiM68nBQut35Y0qLclLiQrutxt/c0OlSqXAC8VrxW97lGoRWzhOnifE2zbF05W4xuyhg7JTUL
aqJ7SWDywhjlal0b+NLTpERBgnPW0+Nw99X2Ws72gOL27iER9jgzj7Uu09JaZ3n+hmCjjvZpjNst
vOWWTbuLrg+/1ltX8WpPauEDEvcunIgTxuMEHweWKCx2KQ9DU/UKdO/3za4Szm2iHYL+ss9AAttm
gZHq2pkUXFbV+FiJCKrpBms18zH75vax5jSo7FNunrVWY3Chvd8KKnHdaTt/6ealwaA1x17yTlft
8VBle3nAE+7R0MScC3MJofNCCkA9PGKBgGMYEwfB2QO5j8zUqa8F/EkWKCzGQJ5EZ05HTly1B01E
z813G5BY++RZ2sxbQS8ZveGPJNabp5kXAeoign6Tlt5+L8i5ZquY9+S+KEUHkmYMRFBxRrHnbl2X
rVemKnG+oB1yd9+zT+4c43jQ0wWmQRR6mTCkY1q3VG05Y120ZzKOMBe6Vy7I5Vz4ygPB3yY4G0FP
8RxiMx985YJPXsgRU58EuHj75gygTzejP+W/zKGe78UQN3yOJ1aMQV9hFH+GAfLRsza84WlPLAI/
9G/5JdcHftEfH+Y3/fHUG7/o8bv98dzzy3e8S+XCvgqB+VUf7sH0yDHpONdbRE8tAg9NWOzcTJ7q
TuAxe/AJ07c1Rs9okJvl1/0G60qvbdDzz5zO0FuPFQIHNp9y9Bd1CufYVx7dB26mAxwa8GMNrN/U
oGbNZ3EQ7inLzHy5tRg9AXJrN8cB59cCUBeCiVO7zKM0jU0MamhnRThkg/NMmBOGb6StNeD9tDfA
7czsAWopDdnGoXUHtA+s/k0vNPkBcxEI13jVd/axp85va3LpwGggXXWw12Gwr/JGAH0b8CPboiZd
QO1l0mk/UHukud4C+w5uRoNzpCmoW6GbgbMyaQNkga2pQINB18lOXOCJzSWPFOhZcwzdgrsQnne7
nvjBi+7cP2BbtBeDOW5uOLGf3z94FasKIguOqJl+8ss/6Kumns4cuWbqq5592TN/RNIbn5Qo6qbi
O4F0P9txxPAwagqPlftztO8cWBzdN/jz3b7GD6JHYP/Zp4ToAMaA74M+EGSft3hEGMuf8EwjnTk/
nz/P7SLipB/ogQ6xNX0fDqNncMCfHqGLCMM0ZzFa+6lPJYQ5p81vW4HkCvidYf6kb+P/oB965g8K
C6uR0rdjX1DNKc5pOSTquI8uQ6KXxYaKBn+30/09tK4kMpJPgUIQkbENEPbuezNPPje2Um83SgyX
GTCJb6MnGVIpgncdQg1qz2bvPfxYD9fewCXDomx9S+HQJuX6W3VAL+v5WZMudRQZk9ZdOk6GIUtC
PqEb/uwSIrtR7/edzqgEdtpEwq7p2J5OQV+RLrmtTvFwFpf03M/VrRyTZ73qVod7v7Jh2Dwe5J25
JqFOU2qEu1sP+CRotklediycKfLjeIZzjJQsvKmiGSNQhxuJpKa+hoWUizaE1PuIRGzJqropwgVB
oo1hr870MZLgnXF5ZIpr6mF0L8aSy2gVnTAuoB4WEd4d5NPVC9TMotYXERKlTcwQ2KiB/C48AEfH
Qbyq4CN8xTFnTvf/ebOc3isnjD95s0QF0nx9s+y+zMmz782xL0SgEmRpA3x1w1Ff9/74xcxKEPdS
IEFTz6GgU0+BK/UZ5Gwbl4gZwycxEw+Kqa5QmMkh4OzgzEVPnDAiAOGBFaBW4wkDmj1G4RyElKgj
NlLCq8zsp085MNh/+R4t1Q8yxoSv8PUpTt7izZwf2BTHZZ3pIZpUIpuLkL1nNL6sYcHqcKm237wp
T2+RCjgXweXd2Zp7ZM8W6dG5bZsqo0nrJBTx8EC0+CQQdzEGnabTnkzofu1pYkWl4E7XSniECdxy
vLYavPMcL9LW5SToJFNnos+uqweOHriUZ1ntIYZUonc7ltEQ6oTRtwOHNwez2sVREskHN+bqG3ua
eaEbJ8XpyO8CeD9QJc8nbLP2C2R3A437ISUNyt5Yd0TbDNcl11/DSsOzdbi/VhCC0KE6v1vqVNkq
45ZnG6fiV2NwzInxCNth3BwL0+8814jE6+1W1EeWtpWbSZJOJNYXmWRXa7vLnAljE692eHjZ4y5u
y1u63De0IzKca7As48Z3XshVF+3XiLNz0JIMh/JOpbiNLlMi672uO0wYzOCZjRxcxj3D+gVenGIE
MvFUGGXuRps2RzMcgWIRolHXpGUP6sMsQt1hspUBnVKUn/WQj2u6j3SXd9Xz0QtEzoM7qTu5y7gR
q9gNNsrlEMLdikBt9bFvBnfbUIh6voTw7eDsyTmPKUvF0bHqWLbHe3VRHyRZnNeSGKsB73q66Vsk
taxWYmwz1tYVFG/vOQhlM0gUkyvIab3nv2caJ1udU1F3pDMty7stubTE4OJqm0i0ECfrJIkLtraC
HwRWKzlqpfhEIqYH09eT9WrOhQyt8YEoyBlnXtAT37WHIQ03TIuEHbnRxZDdLun0iok9PUC79prU
m5beZzfQUelEXnhzb/pIROKx3F7qCttYIFGh5dXNzFzID7u8vKykA8Uejf7XXz//S4nKvW//ofS/
QastYw==
""")
##file distutils-init.py
DISTUTILS_INIT = convert("""
eJytV1uL4zYUfvevOE0ottuMW9q3gVDa3aUMXXbLMlDKMBiNrSTqOJKRlMxkf33PkXyRbGe7Dw2E
UXTu37lpxLFV2oIyifAncxmOL0xLIfcG+gv80x9VW6maw7o/CANSWWBwFtqeWMPlGY6qPjV8A0bB
C4eKSTgZ5LRgFeyErMEeOBhbN+Ipgeizhjtnhkn7DdyjuNLPoCS0l/ayQTG0djwZC08cLXozeMss
aG5EzQ0IScpnWtHSTXuxByV/QCmxE7y+eS0uxWeoheaVVfqSJHiU7Mhhi6gULbOHorshkrEnKxpT
0n3A8Y8SMpuwZx6aoix3ouFlmW8gHRSkeSJ2g7hU+kiHLDaQw3bmRDaTGfTnty7gPm0FHbIBg9U9
oh1kZzAFLaue2R6htPCtAda2nGlDSUJ4PZBgCJBGVcwKTAMz/vJiLD+Oin5Z5QlvDPdulC6EsiyE
NFzb7McNTKJzbJqzphx92VKRFY1idenzmq3K0emRcbWBD0ryqc4NZGmKOOOX9Pz5x+/l27tP797c
f/z0d+4NruGNai8uAM0bfsYaw8itFk8ny41jsfpyO+BWlpqfhcG4yxLdi/0tQqoT4a8Vby382mt8
p7XSo7aWGdPBc+b6utaBmCQ7rQKQoWtAuthQCiold2KfJIPTT8xwg9blPumc+YDZC/wYGdAyHpJk
vUbHbHWAp5No6pK/WhhLEWrFjUwtPEv1Agf8YmnsuXUQYkeZoHm8ogP16gt2uHoxcEMdf2C6pmbw
hUMsWGhanboh4IzzmsIpWs134jVPqD/c74bZHdY69UKKSn/+KfVhxLgUlToemayLMYQOqfEC61bh
cbhwaqoGUzIyZRFHPmau5juaWqwRn3mpWmoEA5nhzS5gog/5jbcFQqOZvmBasZtwYlG93k5GEiyw
buHhMWLjDarEGpMGB2LFs5nIJkhp/nUmZneFaRth++lieJtHepIvKgx6PJqIlD9X2j6pG1i9x3pZ
5bHuCPFiirGHeO7McvoXkz786GaKVzC9DSpnOxJdc4xm6NSVq7lNEnKdVlnpu9BNYoKX2Iq3wvgh
gGEUM66kK6j4NiyoneuPLSwaCWDxczgaolEWpiMyDVDb7dNuLAbriL8ig8mmeju31oNvQdpnvEPC
1vAXbWacGRVrGt/uXN/gU0CDDwgooKRrHfTBb1/s9lYZ8ZqOBU0yLvpuP6+K9hLFsvIjeNhBi0KL
MlOuWRn3FRwx5oHXjl0YImUx0+gLzjGchrgzca026ETmYJzPD+IpuKzNi8AFn048Thd63OdD86M6
84zE8yQm0VqXdbbgvub2pKVnS76icBGdeTHHXTKspUmr4NYo/furFLKiMdQzFjHJNcdAnMhltBJK
0/IKX3DVFqvPJ2dLE7bDBkH0l/PJ29074+F0CsGYOxsb7U3myTUncYfXqnLLfa6sJybX4g+hmcjO
kMRBfA1JellfRRKJcyRpxdS4rIl6FdmQCWjo/o9Qz7yKffoP4JHjOvABcRn4CZIT2RH4jnxmfpVG
qgLaAvQBNfuO6X0/Ux02nb4FKx3vgP+XnkX0QW9pLy/NsXgdN24dD3LxO2Nwil7Zlc1dqtP3d7/h
kzp1/+7hGBuY4pk0XD/0Ao/oTe/XGrfyM773aB7iUhgkpy+dwAMalxMP0DrBcsVw/6p25+/hobP9
GBknrWExDhLJ1bwt1NcCNblaFbMKCyvmX0PeRaQ=
""")
##file distutils.cfg
DISTUTILS_CFG = convert("""
eJxNj00KwkAMhfc9xYNuxe4Ft57AjYiUtDO1wXSmNJnK3N5pdSEEAu8nH6lxHVlRhtDHMPATA4uH
xJ4EFmGbvfJiicSHFRzUSISMY6hq3GLCRLnIvSTnEefN0FIjw5tF0Hkk9Q5dRunBsVoyFi24aaLg
9FDOlL0FPGluf4QjcInLlxd6f6rqkgPu/5nHLg0cXCscXoozRrP51DRT3j9QNl99AP53T2Q=
""")
##file activate_this.py
ACTIVATE_THIS = convert("""
eJyNU01v2zAMvetXEB4K21jmDOstQA4dMGCHbeihlyEIDMWmG62yJEiKE//7kXKdpN2KzYBt8euR
fKSyLPs8wiEo8wh4wqZTGou4V6Hm0wJa1cSiTkJdr8+GsoTRHuCotBayiWqQEYGtMCgfD1KjGYBe
5a3p0cRKiAe2NtLADikftnDco0ko/SFEVgEZ8aRC5GLux7i3BpSJ6J1H+i7A2CjiHq9z7JRZuuQq
siwTIvpxJYCeuWaBpwZdhB+yxy/eWz+ZvVSU8C4E9FFZkyxFsvCT/ZzL8gcz9aXVE14Yyp2M+2W0
y7n5mp0qN+avKXvbsyyzUqjeWR8hjGE+2iCE1W1tQ82hsCZN9UzlJr+/e/iab8WfqsmPI6pWeUPd
FrMsd4H/55poeO9n54COhUs+sZNEzNtg/wanpjpuqHJaxs76HtZryI/K3H7KJ/KDIhqcbJ7kI4ar
XL+sMgXnX0D+Te2Iy5xdP8yueSlQB/x/ED2BTAtyE3K4SYUN6AMNfbO63f4lBW3bUJPbTL+mjSxS
PyRfJkZRgj+VbFv+EzHFi5pKwUEepa4JslMnwkowSRCXI+m5XvEOvtuBrxHdhLalG0JofYBok6qj
YdN2dEngUlbC4PG60M1WEN0piu7Nq7on0mgyyUw3iV1etLo6r/81biWdQ9MWHFaePWZYaq+nmp+t
s3az+sj7eA0jfgPfeoN1
""")
MH_MAGIC = 0xfeedface
MH_CIGAM = 0xcefaedfe
MH_MAGIC_64 = 0xfeedfacf
MH_CIGAM_64 = 0xcffaedfe
FAT_MAGIC = 0xcafebabe
BIG_ENDIAN = '>'
LITTLE_ENDIAN = '<'
LC_LOAD_DYLIB = 0xc
maxint = majver == 3 and getattr(sys, 'maxsize') or getattr(sys, 'maxint')
class fileview(object):
"""
A proxy for file-like objects that exposes a given view of a file.
Modified from macholib.
"""
def __init__(self, fileobj, start=0, size=maxint):
if isinstance(fileobj, fileview):
self._fileobj = fileobj._fileobj
else:
self._fileobj = fileobj
self._start = start
self._end = start + size
self._pos = 0
def __repr__(self):
return '<fileview [%d, %d] %r>' % (
self._start, self._end, self._fileobj)
def tell(self):
return self._pos
def _checkwindow(self, seekto, op):
if not (self._start <= seekto <= self._end):
raise IOError("%s to offset %d is outside window [%d, %d]" % (
op, seekto, self._start, self._end))
def seek(self, offset, whence=0):
seekto = offset
if whence == os.SEEK_SET:
seekto += self._start
elif whence == os.SEEK_CUR:
seekto += self._start + self._pos
elif whence == os.SEEK_END:
seekto += self._end
else:
raise IOError("Invalid whence argument to seek: %r" % (whence,))
self._checkwindow(seekto, 'seek')
self._fileobj.seek(seekto)
self._pos = seekto - self._start
def write(self, bytes):
here = self._start + self._pos
self._checkwindow(here, 'write')
self._checkwindow(here + len(bytes), 'write')
self._fileobj.seek(here, os.SEEK_SET)
self._fileobj.write(bytes)
self._pos += len(bytes)
def read(self, size=maxint):
assert size >= 0
here = self._start + self._pos
self._checkwindow(here, 'read')
size = min(size, self._end - here)
self._fileobj.seek(here, os.SEEK_SET)
bytes = self._fileobj.read(size)
self._pos += len(bytes)
return bytes
def read_data(file, endian, num=1):
"""
Read a given number of 32-bits unsigned integers from the given file
with the given endianness.
"""
res = struct.unpack(endian + 'L' * num, file.read(num * 4))
if len(res) == 1:
return res[0]
return res
def mach_o_change(path, what, value):
"""
Replace a given name (what) in any LC_LOAD_DYLIB command found in
the given binary with a new name (value), provided it's shorter.
"""
def do_macho(file, bits, endian):
# Read Mach-O header (the magic number is assumed read by the caller)
cputype, cpusubtype, filetype, ncmds, sizeofcmds, flags = read_data(file, endian, 6)
# 64-bits header has one more field.
if bits == 64:
read_data(file, endian)
# The header is followed by ncmds commands
for n in range(ncmds):
where = file.tell()
# Read command header
cmd, cmdsize = read_data(file, endian, 2)
if cmd == LC_LOAD_DYLIB:
# The first data field in LC_LOAD_DYLIB commands is the
# offset of the name, starting from the beginning of the
# command.
name_offset = read_data(file, endian)
file.seek(where + name_offset, os.SEEK_SET)
# Read the NUL terminated string
load = file.read(cmdsize - name_offset).decode()
load = load[:load.index('\0')]
# If the string is what is being replaced, overwrite it.
if load == what:
file.seek(where + name_offset, os.SEEK_SET)
file.write(value.encode() + '\0'.encode())
# Seek to the next command
file.seek(where + cmdsize, os.SEEK_SET)
def do_file(file, offset=0, size=maxint):
file = fileview(file, offset, size)
# Read magic number
magic = read_data(file, BIG_ENDIAN)
if magic == FAT_MAGIC:
# Fat binaries contain nfat_arch Mach-O binaries
nfat_arch = read_data(file, BIG_ENDIAN)
for n in range(nfat_arch):
# Read arch header
cputype, cpusubtype, offset, size, align = read_data(file, BIG_ENDIAN, 5)
do_file(file, offset, size)
elif magic == MH_MAGIC:
do_macho(file, 32, BIG_ENDIAN)
elif magic == MH_CIGAM:
do_macho(file, 32, LITTLE_ENDIAN)
elif magic == MH_MAGIC_64:
do_macho(file, 64, BIG_ENDIAN)
elif magic == MH_CIGAM_64:
do_macho(file, 64, LITTLE_ENDIAN)
assert(len(what) >= len(value))
do_file(open(path, 'r+b'))
if __name__ == '__main__':
main(sys.argv)
## TODO:
## Copy python.exe.manifest
## Monkeypatch distutils.sysconfig
| []
| []
| [
"__PYVENV_LAUNCHER__",
"VIRTUALENV_INTERPRETER_RUNNING",
"PYTHONHOME",
"PYTHONDONTWRITEBYTECODE",
"WORKING_ENV",
"VIRTUALENV_CONFIG_FILE",
"PATH",
"PYTHONPATH"
]
| [] | ["__PYVENV_LAUNCHER__", "VIRTUALENV_INTERPRETER_RUNNING", "PYTHONHOME", "PYTHONDONTWRITEBYTECODE", "WORKING_ENV", "VIRTUALENV_CONFIG_FILE", "PATH", "PYTHONPATH"] | python | 8 | 0 | |
tools/flakiness_checker.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Checks a given test for flakiness
Takes the file name and function name of a test, as well as, optionally,
the number of trials to run and the random seed to use
"""
import subprocess
import sys
import os
import random
import argparse
import re
import logging
logging.basicConfig(level=logging.INFO)
DEFAULT_NUM_TRIALS = 10000
DEFAULT_VERBOSITY = 2
def run_test_trials(args):
test_path = args.test_path + ":" + args.test_name
logging.info("Testing: %s", test_path)
new_env = os.environ.copy()
new_env["MXNET_TEST_COUNT"] = str(args.num_trials)
if args.seed is None:
logging.info("No test seed provided, using random seed")
else:
new_env["MXNET_TEST_SEED"] = str(args.seed)
verbosity = "--verbosity=" + str(args.verbosity)
code = subprocess.call(["nosetests", verbosity, test_path],
env = new_env)
logging.info("Nosetests terminated with exit code %d", code)
def find_test_path(test_file):
"""Searches for the test file and returns the path if found
As a default, the currend working directory is the top of the search.
If a directory was provided as part of the argument, the directory will be
joined with cwd unless it was an absolute path, in which case, the
absolute path will be used instead.
"""
test_file += ".py"
test_path = os.path.split(test_file)
top = os.path.join(os.getcwd(), test_path[0])
for (path, dirs, files) in os.walk(top):
if test_path[1] in files:
return os.path.join(path, test_path[1])
raise FileNotFoundError("Could not find " + test_path[1] +
"in directory: " + top)
class NameAction(argparse.Action):
"""Parses command line argument to get test file and test name"""
def __call__(self, parser, namespace, values, option_string=None):
name = re.split("\.py:|\.", values)
if len(name) != 2:
raise ValueError("Invalid argument format for test. Format: "
"<file-name>.<test-name> or"
" <directory>/<file>:<test-name>")
setattr(namespace, "test_path", find_test_path(name[0]))
setattr(namespace, "test_name", name[1])
def parse_args():
parser = argparse.ArgumentParser(description="Check test for flakiness")
parser.add_argument("test", action=NameAction,
help="file name and and function name of test, "
"provided in the format: <file-name>.<test-name> "
"or <directory>/<file>:<test-name>")
parser.add_argument("-n", "--num-trials", metavar="N",
default=DEFAULT_NUM_TRIALS, type=int,
help="number of test trials, passed as "
"MXNET_TEST_COUNT, defaults to 500")
parser.add_argument("-s", "--seed", type=int,
help="test seed, passed as MXNET_TEST_SEED, "
"defaults to random seed")
parser.add_argument("-v", "--verbosity",
default=DEFAULT_VERBOSITY, type=int,
help="logging level, passed to nosetests")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
run_test_trials(args)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
contrib/spendfrom/spendfrom.py | #!/usr/bin/env python
#
# Use the raw transactions API to spend REEXs received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a weycashd or weycash-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the weycash data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/weycash/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "weycash")
return os.path.expanduser("~/.weycash")
def read_bitcoin_config(dbdir):
"""Read the weycash.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "weycash.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a weycash JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 53211 if testnet else 43211
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the weycashd we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(weycashd):
info = weycashd.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
weycashd.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = weycashd.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(weycashd):
address_summary = dict()
address_to_account = dict()
for info in weycashd.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = weycashd.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = weycashd.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-weycash-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(weycashd, fromaddresses, toaddress, amount, fee):
all_coins = list_available(weycashd)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to weycashd.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = weycashd.createrawtransaction(inputs, outputs)
signed_rawtx = weycashd.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(weycashd, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = weycashd.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(weycashd, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = weycashd.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(weycashd, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get REEXs from")
parser.add_option("--to", dest="to", default=None,
help="address to get send REEXs to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of weycash.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
weycashd = connect_JSON(config)
if options.amount is None:
address_summary = list_available(weycashd)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(weycashd) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(weycashd, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(weycashd, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = weycashd.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| []
| []
| [
"APPDATA"
]
| [] | ["APPDATA"] | python | 1 | 0 | |
provider/resource_keycloak_user_test.go | package provider
import (
"fmt"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
"github.com/mrparkers/terraform-provider-keycloak/keycloak"
"io/ioutil"
"net/http"
"net/url"
"os"
"regexp"
"strings"
"testing"
)
func TestAccKeycloakUser_basic(t *testing.T) {
realmName := "terraform-" + acctest.RandString(10)
username := "terraform-user-" + acctest.RandString(10)
attributeName := "terraform-attribute-" + acctest.RandString(10)
attributeValue := acctest.RandString(250)
resourceName := "keycloak_user.user"
resource.Test(t, resource.TestCase{
ProviderFactories: testAccProviderFactories,
PreCheck: func() { testAccPreCheck(t) },
CheckDestroy: testAccCheckKeycloakUserDestroy(),
Steps: []resource.TestStep{
{
Config: testKeycloakUser_basic(realmName, username, attributeName, attributeValue),
Check: testAccCheckKeycloakUserExists(resourceName),
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
ImportStateIdPrefix: realmName + "/",
},
},
})
}
func TestAccKeycloakUser_withInitialPassword(t *testing.T) {
realmName := "terraform-" + acctest.RandString(10)
username := "terraform-user-" + acctest.RandString(10)
password := "terraform-password-" + acctest.RandString(10)
clientId := "terraform-client-" + acctest.RandString(10)
resourceName := "keycloak_user.user"
resource.Test(t, resource.TestCase{
ProviderFactories: testAccProviderFactories,
PreCheck: func() { testAccPreCheck(t) },
CheckDestroy: testAccCheckKeycloakUserDestroy(),
Steps: []resource.TestStep{
{
Config: testKeycloakUser_initialPassword(realmName, username, password, clientId),
Check: resource.ComposeTestCheckFunc(
testAccCheckKeycloakUserExists(resourceName),
testAccCheckKeycloakUserInitialPasswordLogin(realmName, username, password, clientId),
),
},
},
})
}
func TestAccKeycloakUser_createAfterManualDestroy(t *testing.T) {
var user = &keycloak.User{}
realmName := "terraform-" + acctest.RandString(10)
username := "terraform-user-" + acctest.RandString(10)
attributeName := "terraform-attribute-" + acctest.RandString(10)
attributeValue := acctest.RandString(250)
resourceName := "keycloak_user.user"
resource.Test(t, resource.TestCase{
ProviderFactories: testAccProviderFactories,
PreCheck: func() { testAccPreCheck(t) },
CheckDestroy: testAccCheckKeycloakUserDestroy(),
Steps: []resource.TestStep{
{
Config: testKeycloakUser_basic(realmName, username, attributeName, attributeValue),
Check: resource.ComposeTestCheckFunc(
testAccCheckKeycloakUserExists(resourceName),
testAccCheckKeycloakUserFetch(resourceName, user),
),
},
{
PreConfig: func() {
keycloakClient := testAccProvider.Meta().(*keycloak.KeycloakClient)
err := keycloakClient.DeleteUser(user.RealmId, user.Id)
if err != nil {
t.Fatal(err)
}
},
Config: testKeycloakUser_basic(realmName, username, attributeName, attributeValue),
Check: testAccCheckKeycloakUserExists(resourceName),
},
},
})
}
func TestAccKeycloakUser_updateRealm(t *testing.T) {
realmOne := "terraform-" + acctest.RandString(10)
realmTwo := "terraform-" + acctest.RandString(10)
username := "terraform-user-" + acctest.RandString(10)
resourceName := "keycloak_user.user"
resource.Test(t, resource.TestCase{
ProviderFactories: testAccProviderFactories,
PreCheck: func() { testAccPreCheck(t) },
CheckDestroy: testAccCheckKeycloakUserDestroy(),
Steps: []resource.TestStep{
{
Config: testKeycloakUser_updateRealmBefore(realmOne, realmTwo, username),
Check: resource.ComposeTestCheckFunc(
testAccCheckKeycloakUserExists(resourceName),
resource.TestCheckResourceAttr(resourceName, "realm_id", realmOne),
),
},
{
Config: testKeycloakUser_updateRealmAfter(realmOne, realmTwo, username),
Check: resource.ComposeTestCheckFunc(
testAccCheckKeycloakUserExists(resourceName),
resource.TestCheckResourceAttr(resourceName, "realm_id", realmTwo),
),
},
},
})
}
func TestAccKeycloakUser_updateUsername(t *testing.T) {
realmName := "terraform-" + acctest.RandString(10)
usernameOne := "terraform-user-" + acctest.RandString(10)
usernameTwo := "terraform-user-" + acctest.RandString(10)
attributeName := "terraform-attribute-" + acctest.RandString(10)
attributeValue := acctest.RandString(250)
resourceName := "keycloak_user.user"
resource.Test(t, resource.TestCase{
ProviderFactories: testAccProviderFactories,
PreCheck: func() { testAccPreCheck(t) },
CheckDestroy: testAccCheckKeycloakUserDestroy(),
Steps: []resource.TestStep{
{
Config: testKeycloakUser_basic(realmName, usernameOne, attributeName, attributeValue),
Check: resource.ComposeTestCheckFunc(
testAccCheckKeycloakUserExists(resourceName),
resource.TestCheckResourceAttr(resourceName, "username", usernameOne),
),
},
{
Config: testKeycloakUser_basic(realmName, usernameTwo, attributeName, attributeValue),
Check: resource.ComposeTestCheckFunc(
testAccCheckKeycloakUserExists(resourceName),
resource.TestCheckResourceAttr(resourceName, "username", usernameTwo),
),
},
},
})
}
func TestAccKeycloakUser_updateWithInitialPasswordChangeDoesNotReset(t *testing.T) {
realmName := "terraform-" + acctest.RandString(10)
username := "terraform-user-" + acctest.RandString(10)
passwordOne := "terraform-password1-" + acctest.RandString(10)
passwordTwo := "terraform-password2-" + acctest.RandString(10)
clientId := "terraform-client-" + acctest.RandString(10)
resource.Test(t, resource.TestCase{
ProviderFactories: testAccProviderFactories,
PreCheck: func() { testAccPreCheck(t) },
CheckDestroy: testAccCheckKeycloakUserDestroy(),
Steps: []resource.TestStep{
{
Config: testKeycloakUser_initialPassword(realmName, username, passwordOne, clientId),
Check: resource.ComposeTestCheckFunc(
testAccCheckKeycloakUserInitialPasswordLogin(realmName, username, passwordOne, clientId),
),
},
{
Config: testKeycloakUser_initialPassword(realmName, username, passwordTwo, clientId),
Check: resource.ComposeTestCheckFunc(
testAccCheckKeycloakUserInitialPasswordLogin(realmName, username, passwordOne, clientId),
),
},
},
})
}
func TestAccKeycloakUser_updateInPlace(t *testing.T) {
userOne := &keycloak.User{
RealmId: "terraform-" + acctest.RandString(10),
Username: "terraform-user-" + acctest.RandString(10),
Email: fmt.Sprintf("%[email protected]", acctest.RandString(10)),
FirstName: acctest.RandString(10),
LastName: acctest.RandString(10),
Enabled: randomBool(),
EmailVerified: randomBool(),
}
userTwo := &keycloak.User{
RealmId: userOne.RealmId,
Username: userOne.Username,
Email: fmt.Sprintf("%[email protected]", acctest.RandString(10)),
FirstName: acctest.RandString(10),
LastName: acctest.RandString(10),
Enabled: randomBool(),
EmailVerified: !userOne.EmailVerified,
}
resourceName := "keycloak_user.user"
resource.Test(t, resource.TestCase{
ProviderFactories: testAccProviderFactories,
PreCheck: func() { testAccPreCheck(t) },
CheckDestroy: testAccCheckKeycloakUserDestroy(),
Steps: []resource.TestStep{
{
Config: testKeycloakUser_fromInterface(userOne),
Check: testAccCheckKeycloakUserExists(resourceName),
},
{
Config: testKeycloakUser_fromInterface(userTwo),
Check: testAccCheckKeycloakUserExists(resourceName),
},
},
})
}
func TestAccKeycloakUser_unsetOptionalAttributes(t *testing.T) {
attributeName := "terraform-attribute-" + acctest.RandString(10)
userWithOptionalAttributes := &keycloak.User{
RealmId: "terraform-" + acctest.RandString(10),
Username: "terraform-user-" + acctest.RandString(10),
Email: fmt.Sprintf("%[email protected]", acctest.RandString(10)),
FirstName: acctest.RandString(10),
LastName: acctest.RandString(10),
Enabled: randomBool(),
Attributes: map[string][]string{
attributeName: {
acctest.RandString(230),
acctest.RandString(12),
},
},
}
resourceName := "keycloak_user.user"
resource.Test(t, resource.TestCase{
ProviderFactories: testAccProviderFactories,
PreCheck: func() { testAccPreCheck(t) },
CheckDestroy: testAccCheckKeycloakUserDestroy(),
Steps: []resource.TestStep{
{
Config: testKeycloakUser_fromInterface(userWithOptionalAttributes),
Check: testAccCheckKeycloakUserExists(resourceName),
},
{
Config: testKeycloakUser_basic(userWithOptionalAttributes.RealmId, userWithOptionalAttributes.Username, attributeName, strings.Join(userWithOptionalAttributes.Attributes[attributeName], "")),
Check: resource.ComposeTestCheckFunc(
testAccCheckKeycloakUserExists(resourceName),
resource.TestCheckResourceAttr(resourceName, "email", ""),
resource.TestCheckResourceAttr(resourceName, "first_name", ""),
resource.TestCheckResourceAttr(resourceName, "last_name", ""),
),
},
},
})
}
func TestAccKeycloakUser_validateLowercaseUsernames(t *testing.T) {
realmName := "terraform-" + acctest.RandString(10)
username := "terraform-user-" + strings.ToUpper(acctest.RandString(10))
attributeName := "terraform-attribute-" + acctest.RandString(10)
attributeValue := acctest.RandString(250)
resource.Test(t, resource.TestCase{
ProviderFactories: testAccProviderFactories,
PreCheck: func() { testAccPreCheck(t) },
CheckDestroy: testAccCheckKeycloakUserDestroy(),
Steps: []resource.TestStep{
{
Config: testKeycloakUser_basic(realmName, username, attributeName, attributeValue),
ExpectError: regexp.MustCompile("expected username .+ to be all lowercase"),
},
},
})
}
func TestAccKeycloakUser_federatedLink(t *testing.T) {
sourceUserName := "terraform-source-user-" + acctest.RandString(10)
sourceUserName2 := "terraform-source-user2-" + acctest.RandString(10)
destinationRealmName := "terraform-dest-" + acctest.RandString(10)
resourceName := "keycloak_user.destination_user"
resource.Test(t, resource.TestCase{
ProviderFactories: testAccProviderFactories,
PreCheck: func() { testAccPreCheck(t) },
CheckDestroy: testAccCheckKeycloakUserDestroy(),
Steps: []resource.TestStep{
{
Config: testKeycloakUser_FederationLink(sourceUserName, destinationRealmName),
Check: testAccCheckKeycloakUserHasFederationLinkWithSourceUserName(resourceName, sourceUserName),
},
{
Config: testKeycloakUser_FederationLink(sourceUserName2, destinationRealmName),
Check: testAccCheckKeycloakUserHasFederationLinkWithSourceUserName(resourceName, sourceUserName2),
},
},
})
}
func testAccCheckKeycloakUserHasFederationLinkWithSourceUserName(resourceName, sourceUserName string) resource.TestCheckFunc {
return func(s *terraform.State) error {
fetchedUser, err := getUserFromState(s, resourceName)
if err != nil {
return err
}
var found bool = false
for _, federatedIdentity := range fetchedUser.FederatedIdentities {
if federatedIdentity.UserName == sourceUserName {
found = true
}
if !found {
return fmt.Errorf("user had unexpected federatedLink %s or unexpected username %s", federatedIdentity.IdentityProvider, federatedIdentity.UserName)
}
}
if !found {
return fmt.Errorf("user had no federatedLink, but one was expected")
}
return nil
}
}
func testAccCheckKeycloakUserExists(resourceName string) resource.TestCheckFunc {
return func(s *terraform.State) error {
_, err := getUserFromState(s, resourceName)
if err != nil {
return err
}
return nil
}
}
func testAccCheckKeycloakUserFetch(resourceName string, user *keycloak.User) resource.TestCheckFunc {
return func(s *terraform.State) error {
fetchedUser, err := getUserFromState(s, resourceName)
if err != nil {
return err
}
user.Id = fetchedUser.Id
user.RealmId = fetchedUser.RealmId
return nil
}
}
func testAccCheckKeycloakUserInitialPasswordLogin(realmName string, username string, password string, clientId string) resource.TestCheckFunc {
return func(s *terraform.State) error {
httpClient := &http.Client{}
resourceUrl := fmt.Sprintf("%s/auth/realms/%s/protocol/openid-connect/token", os.Getenv("KEYCLOAK_URL"), realmName)
form := url.Values{}
form.Add("username", username)
form.Add("password", password)
form.Add("client_id", clientId)
form.Add("grant_type", "password")
request, err := http.NewRequest(http.MethodPost, resourceUrl, strings.NewReader(form.Encode()))
if err != nil {
return err
}
request.Header.Add("Content-Type", "application/x-www-form-urlencoded")
response, err := httpClient.Do(request)
if err != nil {
return err
}
defer response.Body.Close()
if response.StatusCode != http.StatusOK {
body, _ := ioutil.ReadAll(response.Body)
return fmt.Errorf("user with username %s cannot login with password %s\n body: %s", username, password, string(body))
}
return nil
}
}
func testAccCheckKeycloakUserDestroy() resource.TestCheckFunc {
return func(s *terraform.State) error {
for _, rs := range s.RootModule().Resources {
if rs.Type != "keycloak_user" {
continue
}
id := rs.Primary.ID
realm := rs.Primary.Attributes["realm_id"]
keycloakClient := testAccProvider.Meta().(*keycloak.KeycloakClient)
user, _ := keycloakClient.GetUser(realm, id)
if user != nil {
return fmt.Errorf("user with id %s still exists", id)
}
}
return nil
}
}
func getUserFromState(s *terraform.State, resourceName string) (*keycloak.User, error) {
keycloakClient := testAccProvider.Meta().(*keycloak.KeycloakClient)
rs, ok := s.RootModule().Resources[resourceName]
if !ok {
return nil, fmt.Errorf("resource not found: %s", resourceName)
}
id := rs.Primary.ID
realm := rs.Primary.Attributes["realm_id"]
user, err := keycloakClient.GetUser(realm, id)
if err != nil {
return nil, fmt.Errorf("error getting user with id %s: %s", id, err)
}
return user, nil
}
func testKeycloakUser_basic(realm, username, attributeName, attributeValue string) string {
return fmt.Sprintf(`
resource "keycloak_realm" "realm" {
realm = "%s"
}
resource "keycloak_user" "user" {
realm_id = "${keycloak_realm.realm.id}"
username = "%s"
attributes = {
"%s" = "%s"
}
}
`, realm, username, attributeName, attributeValue)
}
func testKeycloakUser_initialPassword(realm, username string, password string, clientId string) string {
return fmt.Sprintf(`
resource "keycloak_realm" "realm" {
realm = "%s"
}
resource "keycloak_openid_client" "client" {
realm_id = "${keycloak_realm.realm.id}"
client_id = "%s"
name = "test client"
enabled = true
access_type = "PUBLIC"
direct_access_grants_enabled = true
}
resource "keycloak_user" "user" {
realm_id = "${keycloak_realm.realm.id}"
username = "%s"
initial_password {
value = "%s"
temporary = false
}
}
`, realm, clientId, username, password)
}
func testKeycloakUser_updateRealmBefore(realmOne, realmTwo, username string) string {
return fmt.Sprintf(`
resource "keycloak_realm" "realm_1" {
realm = "%s"
}
resource "keycloak_realm" "realm_2" {
realm = "%s"
}
resource "keycloak_user" "user" {
realm_id = "${keycloak_realm.realm_1.id}"
username = "%s"
}
`, realmOne, realmTwo, username)
}
func testKeycloakUser_updateRealmAfter(realmOne, realmTwo, username string) string {
return fmt.Sprintf(`
resource "keycloak_realm" "realm_1" {
realm = "%s"
}
resource "keycloak_realm" "realm_2" {
realm = "%s"
}
resource "keycloak_user" "user" {
realm_id = "${keycloak_realm.realm_2.id}"
username = "%s"
}
`, realmOne, realmTwo, username)
}
func testKeycloakUser_fromInterface(user *keycloak.User) string {
return fmt.Sprintf(`
resource "keycloak_realm" "realm" {
realm = "%s"
}
resource "keycloak_user" "user" {
realm_id = "${keycloak_realm.realm.id}"
username = "%s"
email = "%s"
first_name = "%s"
last_name = "%s"
enabled = %t
email_verified = "%t"
}
`, user.RealmId, user.Username, user.Email, user.FirstName, user.LastName, user.Enabled, user.EmailVerified)
}
func testKeycloakUser_FederationLink(sourceRealmUserName, destinationRealmId string) string {
return fmt.Sprintf(`
resource "keycloak_realm" "source_realm" {
realm = "source_test_realm"
enabled = true
}
resource "keycloak_openid_client" "destination_client" {
realm_id = "${keycloak_realm.source_realm.id}"
client_id = "destination_client"
client_secret = "secret"
access_type = "CONFIDENTIAL"
standard_flow_enabled = true
valid_redirect_uris = [
"http://localhost:8080/*",
]
}
resource "keycloak_user" "source_user" {
realm_id = "${keycloak_realm.source_realm.id}"
username = "%s"
initial_password {
value = "source"
temporary = false
}
}
resource "keycloak_realm" "destination_realm" {
realm = "%s"
enabled = true
}
resource keycloak_oidc_identity_provider source_oidc_idp {
realm = "${keycloak_realm.destination_realm.id}"
alias = "source"
authorization_url = "http://localhost:8080/auth/realms/${keycloak_realm.source_realm.id}/protocol/openid-connect/auth"
token_url = "http://localhost:8080/auth/realms/${keycloak_realm.source_realm.id}/protocol/openid-connect/token"
client_id = "${keycloak_openid_client.destination_client.client_id}"
client_secret = "${keycloak_openid_client.destination_client.client_secret}"
default_scopes = "openid"
}
resource "keycloak_user" "destination_user" {
realm_id = "${keycloak_realm.destination_realm.id}"
username = "my_destination_username"
federated_identity {
identity_provider = "${keycloak_oidc_identity_provider.source_oidc_idp.alias}"
user_id = "${keycloak_user.source_user.id}"
user_name = "${keycloak_user.source_user.username}"
}
}
`, sourceRealmUserName, destinationRealmId)
}
| [
"\"KEYCLOAK_URL\""
]
| []
| [
"KEYCLOAK_URL"
]
| [] | ["KEYCLOAK_URL"] | go | 1 | 0 | |
latency-logger/main.py | #!/usr/bin/env python
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import logging
import os
import time
import random
from collections.abc import Coroutine
import asyncpg
logger = logging.getLogger()
# configuration for the database
db_user: str = os.environ["DB_USER"] # e.g. "my-db-user"
db_pass: str = os.environ["DB_PASS"] # e.g. "my-db-password"
db_name: str = os.environ["DB_NAME"] # e.g. "my-database"
db_host: str = os.environ.get("DB_HOST") # e.g. 127.0.0.1:5432
db_socket: str = os.environ.get("DB_SOCKET") # e.g.
# intialize the pool to the databse
def init_pool():
db_opts: Mapping = {
"user": db_user,
"password": db_pass,
"database": db_name,
"min_size": 0,
"max_size": 10,
}
if db_host:
# Extract host and port from db_host
host_args = db_host.split(":")
db_opts["host"] = host_args[0]
db_opts["port"] = int(host_args[1])
elif db_socket:
db_opts["host"] = db_socket
else:
logger.fatal("db_host or db_dir not set")
raise Exception("db_host or db_dir must be set")
return asyncpg.create_pool(**db_opts)
# schedules a function to be called at a fixed rate
async def schedule_fixed_rate(sec: int, func: Coroutine):
while True:
await asyncio.sleep(sec)
asyncio.create_task(func())
# tests connection to the server and logs the time to complete
async def connect_with_pool(pool: asyncpg.pool.Pool):
now = time.monotonic()
connStart, connEnd = now, now
tranStart, tranEnd = 0, 0
try:
async with pool.acquire() as conn:
now = time.monotonic()
tranStart, tranEnd = now, now
try:
await conn.execute("SELECT 1;", timeout=10)
finally:
tranEnd = time.monotonic()
except Exception:
logger.exception(f"Connection failed!")
finally:
connEnd = time.monotonic()
connDiff, tranDiff = connEnd - connStart, tranEnd - tranStart
logger.info(
f" connect complete: conn={connDiff*100:.2f}ms, trans={tranDiff*100:.2f}ms, diff={(connDiff-tranDiff)*100:.2f}ms"
)
async def main():
async with init_pool() as pool:
await schedule_fixed_rate(.5, lambda: connect_with_pool(pool))
logger.info("Hello world!")
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
asyncio.get_event_loop().run_until_complete(main())
| []
| []
| [
"DB_HOST",
"DB_NAME",
"DB_PASS",
"DB_SOCKET",
"DB_USER"
]
| [] | ["DB_HOST", "DB_NAME", "DB_PASS", "DB_SOCKET", "DB_USER"] | python | 5 | 0 | |
tests/unit/test__http.py | # Copyright 2014 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import unittest
import warnings
import mock
import requests
from six.moves import http_client
class TestConnection(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud._http import Connection
return Connection
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_constructor_defaults(self):
from google.api_core.client_info import ClientInfo
client = object()
conn = self._make_one(client)
self.assertIs(conn._client, client)
self.assertIsInstance(conn._client_info, ClientInfo)
def test_constructor_explicit(self):
client = object()
client_info = object()
conn = self._make_one(client, client_info=client_info)
self.assertIs(conn._client, client)
def test_user_agent_all_caps_getter_deprecated(self):
client = object()
conn = self._make_one(client)
with mock.patch.object(warnings, "warn", autospec=True) as warn:
self.assertEqual(conn.USER_AGENT, conn._client_info.to_user_agent())
warn.assert_called_once_with(mock.ANY, DeprecationWarning, stacklevel=2)
def test_user_agent_all_caps_setter_deprecated(self):
conn = self._make_one(object())
user_agent = "testing"
with mock.patch.object(warnings, "warn", autospec=True) as warn:
conn.USER_AGENT = user_agent
self.assertEqual(conn._client_info.user_agent, user_agent)
warn.assert_called_once_with(mock.ANY, DeprecationWarning, stacklevel=2)
def test_user_agent_getter(self):
conn = self._make_one(object())
self.assertEqual(conn.user_agent, conn._client_info.to_user_agent())
def test_user_agent_setter(self):
conn = self._make_one(object())
user_agent = "testing"
conn.user_agent = user_agent
self.assertEqual(conn._client_info.user_agent, user_agent)
def test_extra_headers_all_caps_getter_deprecated(self):
client = object()
conn = self._make_one(client)
expected = conn._extra_headers = {"foo": "bar"}
with mock.patch.object(warnings, "warn", autospec=True) as warn:
self.assertEqual(conn._EXTRA_HEADERS, expected)
warn.assert_called_once_with(mock.ANY, DeprecationWarning, stacklevel=2)
def test_extra_headers_all_caps_setter_deprecated(self):
conn = self._make_one(object())
extra_headers = {"foo": "bar"}
with mock.patch.object(warnings, "warn", autospec=True) as warn:
conn._EXTRA_HEADERS = extra_headers
self.assertEqual(conn._extra_headers, extra_headers)
warn.assert_called_once_with(mock.ANY, DeprecationWarning, stacklevel=2)
def test_extra_headers_getter_default(self):
conn = self._make_one(object())
expected = {}
self.assertEqual(conn.extra_headers, expected)
def test_extra_headers_getter_overridden(self):
conn = self._make_one(object())
expected = conn._extra_headers = {"foo": "bar"}
self.assertEqual(conn.extra_headers, expected)
def test_extra_headers_item_assignment(self):
conn = self._make_one(object())
expected = {"foo": "bar"}
conn.extra_headers["foo"] = "bar"
self.assertEqual(conn._extra_headers, expected)
def test_extra_headers_setter(self):
conn = self._make_one(object())
expected = {"foo": "bar"}
conn.extra_headers = expected
self.assertEqual(conn._extra_headers, expected)
def test_credentials_property(self):
client = mock.Mock(spec=["_credentials"])
conn = self._make_one(client)
self.assertIs(conn.credentials, client._credentials)
def test_http_property(self):
client = mock.Mock(spec=["_http"])
conn = self._make_one(client)
self.assertIs(conn.http, client._http)
def make_response(status=http_client.OK, content=b"", headers={}):
response = requests.Response()
response.status_code = status
response._content = content
response.headers = headers
response.request = requests.Request()
return response
def make_requests_session(responses):
session = mock.create_autospec(requests.Session, instance=True)
session.request.side_effect = responses
return session
class TestJSONConnection(unittest.TestCase):
JSON_HEADERS = {"content-type": "application/json"}
EMPTY_JSON_RESPONSE = make_response(content=b"{}", headers=JSON_HEADERS)
@staticmethod
def _get_default_timeout():
from google.cloud._http import _DEFAULT_TIMEOUT
return _DEFAULT_TIMEOUT
@staticmethod
def _get_target_class():
from google.cloud._http import JSONConnection
return JSONConnection
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def _make_mock_one(self, *args, **kw):
class MockConnection(self._get_target_class()):
API_URL_TEMPLATE = "{api_base_url}/mock/{api_version}{path}"
API_BASE_URL = "http://mock"
API_BASE_MTLS_URL = "https://mock.mtls"
API_VERSION = "vMOCK"
return MockConnection(*args, **kw)
def test_class_defaults(self):
klass = self._get_target_class()
self.assertIsNone(klass.API_URL_TEMPLATE)
self.assertIsNone(klass.API_BASE_URL)
self.assertIsNone(klass.API_VERSION)
def test_constructor(self):
client = object()
conn = self._make_one(client)
self.assertIs(conn._client, client)
def test_build_api_url_no_extra_query_params(self):
client = object()
conn = self._make_mock_one(client)
# Intended to emulate self.mock_template
URI = "/".join(
[conn.API_BASE_URL, "mock", conn.API_VERSION, "foo?prettyPrint=false"]
)
self.assertEqual(conn.build_api_url("/foo"), URI)
def test_build_api_url_w_pretty_print_query_params(self):
client = object()
conn = self._make_mock_one(client)
uri = conn.build_api_url("/foo", {"prettyPrint": "true"})
URI = "/".join(
[conn.API_BASE_URL, "mock", conn.API_VERSION, "foo?prettyPrint=true"]
)
self.assertEqual(uri, URI)
def test_build_api_url_w_extra_query_params(self):
from six.moves.urllib.parse import parse_qs
from six.moves.urllib.parse import urlsplit
client = object()
conn = self._make_mock_one(client)
uri = conn.build_api_url("/foo", {"bar": "baz", "qux": ["quux", "corge"]})
scheme, netloc, path, qs, _ = urlsplit(uri)
self.assertEqual("%s://%s" % (scheme, netloc), conn.API_BASE_URL)
# Intended to emulate mock_template
PATH = "/".join(["", "mock", conn.API_VERSION, "foo"])
self.assertEqual(path, PATH)
parms = dict(parse_qs(qs))
self.assertEqual(parms["bar"], ["baz"])
self.assertEqual(parms["qux"], ["quux", "corge"])
self.assertEqual(parms["prettyPrint"], ["false"])
def test_build_api_url_w_extra_query_params_tuples(self):
from six.moves.urllib.parse import parse_qs
from six.moves.urllib.parse import urlsplit
client = object()
conn = self._make_mock_one(client)
uri = conn.build_api_url(
"/foo", [("bar", "baz"), ("qux", "quux"), ("qux", "corge")]
)
scheme, netloc, path, qs, _ = urlsplit(uri)
self.assertEqual("%s://%s" % (scheme, netloc), conn.API_BASE_URL)
# Intended to emulate mock_template
PATH = "/".join(["", "mock", conn.API_VERSION, "foo"])
self.assertEqual(path, PATH)
parms = dict(parse_qs(qs))
self.assertEqual(parms["bar"], ["baz"])
self.assertEqual(parms["qux"], ["quux", "corge"])
self.assertEqual(parms["prettyPrint"], ["false"])
def test_get_api_base_url_for_mtls_w_api_base_url(self):
client = object()
conn = self._make_mock_one(client)
uri = conn.get_api_base_url_for_mtls(api_base_url="http://foo")
self.assertEqual(uri, "http://foo")
def test_get_api_base_url_for_mtls_env_always(self):
client = object()
conn = self._make_mock_one(client)
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
uri = conn.get_api_base_url_for_mtls()
self.assertEqual(uri, "https://mock.mtls")
def test_get_api_base_url_for_mtls_env_never(self):
client = object()
conn = self._make_mock_one(client)
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
uri = conn.get_api_base_url_for_mtls()
self.assertEqual(uri, "http://mock")
def test_get_api_base_url_for_mtls_env_auto(self):
client = mock.Mock()
client._http = mock.Mock()
client._http.is_mtls = False
conn = self._make_mock_one(client)
# ALLOW_AUTO_SWITCH_TO_MTLS_URL is False, so use regular endpoint.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}):
uri = conn.get_api_base_url_for_mtls()
self.assertEqual(uri, "http://mock")
# ALLOW_AUTO_SWITCH_TO_MTLS_URL is True, so now endpoint dependes
# on client._http.is_mtls
conn.ALLOW_AUTO_SWITCH_TO_MTLS_URL = True
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}):
uri = conn.get_api_base_url_for_mtls()
self.assertEqual(uri, "http://mock")
client._http.is_mtls = True
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}):
uri = conn.get_api_base_url_for_mtls()
self.assertEqual(uri, "https://mock.mtls")
def test__make_request_no_data_no_content_type_no_headers(self):
from google.cloud._http import CLIENT_INFO_HEADER
http = make_requests_session([make_response()])
client = mock.Mock(_http=http, spec=["_http"])
conn = self._make_one(client)
url = "http://example.com/test"
response = conn._make_request("GET", url)
self.assertEqual(response.status_code, http_client.OK)
self.assertEqual(response.content, b"")
expected_headers = {
"Accept-Encoding": "gzip",
"User-Agent": conn.user_agent,
CLIENT_INFO_HEADER: conn.user_agent,
}
http.request.assert_called_once_with(
method="GET",
url=url,
headers=expected_headers,
data=None,
timeout=self._get_default_timeout(),
)
def test__make_request_w_data_no_extra_headers(self):
from google.cloud._http import CLIENT_INFO_HEADER
http = make_requests_session([make_response()])
client = mock.Mock(_http=http, spec=["_http"])
conn = self._make_one(client)
url = "http://example.com/test"
data = b"data"
conn._make_request("GET", url, data, "application/json")
expected_headers = {
"Accept-Encoding": "gzip",
"Content-Type": "application/json",
"User-Agent": conn.user_agent,
CLIENT_INFO_HEADER: conn.user_agent,
}
http.request.assert_called_once_with(
method="GET",
url=url,
headers=expected_headers,
data=data,
timeout=self._get_default_timeout(),
)
def test__make_request_w_extra_headers(self):
from google.cloud._http import CLIENT_INFO_HEADER
http = make_requests_session([make_response()])
client = mock.Mock(_http=http, spec=["_http"])
conn = self._make_one(client)
url = "http://example.com/test"
conn._make_request("GET", url, headers={"X-Foo": "foo"})
expected_headers = {
"Accept-Encoding": "gzip",
"X-Foo": "foo",
"User-Agent": conn.user_agent,
CLIENT_INFO_HEADER: conn.user_agent,
}
http.request.assert_called_once_with(
method="GET",
url=url,
headers=expected_headers,
data=None,
timeout=self._get_default_timeout(),
)
def test__make_request_w_timeout(self):
from google.cloud._http import CLIENT_INFO_HEADER
http = make_requests_session([make_response()])
client = mock.Mock(_http=http, spec=["_http"])
conn = self._make_one(client)
url = "http://example.com/test"
conn._make_request("GET", url, timeout=(5.5, 2.8))
expected_headers = {
"Accept-Encoding": "gzip",
"User-Agent": conn.user_agent,
CLIENT_INFO_HEADER: conn.user_agent,
}
http.request.assert_called_once_with(
method="GET",
url=url,
headers=expected_headers,
data=None,
timeout=(5.5, 2.8),
)
def test_api_request_defaults(self):
from google.cloud._http import CLIENT_INFO_HEADER
http = make_requests_session(
[make_response(content=b"{}", headers=self.JSON_HEADERS)]
)
client = mock.Mock(_http=http, spec=["_http"])
conn = self._make_mock_one(client)
path = "/path/required"
self.assertEqual(conn.api_request("GET", path), {})
expected_headers = {
"Accept-Encoding": "gzip",
"User-Agent": conn.user_agent,
CLIENT_INFO_HEADER: conn.user_agent,
}
expected_url = "{base}/mock/{version}{path}?prettyPrint=false".format(
base=conn.API_BASE_URL, version=conn.API_VERSION, path=path
)
http.request.assert_called_once_with(
method="GET",
url=expected_url,
headers=expected_headers,
data=None,
timeout=self._get_default_timeout(),
)
def test_api_request_w_non_json_response(self):
http = make_requests_session([make_response(content=b"content")])
client = mock.Mock(_http=http, spec=["_http"])
conn = self._make_mock_one(client)
with self.assertRaises(ValueError):
conn.api_request("GET", "/")
def test_api_request_wo_json_expected(self):
http = make_requests_session([make_response(content=b"content")])
client = mock.Mock(_http=http, spec=["_http"])
conn = self._make_mock_one(client)
result = conn.api_request("GET", "/", expect_json=False)
self.assertEqual(result, b"content")
def test_api_request_w_query_params(self):
from six.moves.urllib.parse import parse_qs
from six.moves.urllib.parse import urlsplit
from google.cloud._http import CLIENT_INFO_HEADER
http = make_requests_session([self.EMPTY_JSON_RESPONSE])
client = mock.Mock(_http=http, spec=["_http"])
conn = self._make_mock_one(client)
result = conn.api_request("GET", "/", {"foo": "bar", "baz": ["qux", "quux"]})
self.assertEqual(result, {})
expected_headers = {
"Accept-Encoding": "gzip",
"User-Agent": conn.user_agent,
CLIENT_INFO_HEADER: conn.user_agent,
}
http.request.assert_called_once_with(
method="GET",
url=mock.ANY,
headers=expected_headers,
data=None,
timeout=self._get_default_timeout(),
)
url = http.request.call_args[1]["url"]
scheme, netloc, path, qs, _ = urlsplit(url)
self.assertEqual("%s://%s" % (scheme, netloc), conn.API_BASE_URL)
# Intended to emulate self.mock_template
PATH = "/".join(["", "mock", conn.API_VERSION, ""])
self.assertEqual(path, PATH)
parms = dict(parse_qs(qs))
self.assertEqual(parms["foo"], ["bar"])
self.assertEqual(parms["baz"], ["qux", "quux"])
def test_api_request_w_headers(self):
from google.cloud._http import CLIENT_INFO_HEADER
http = make_requests_session([self.EMPTY_JSON_RESPONSE])
client = mock.Mock(_http=http, spec=["_http"])
conn = self._make_mock_one(client)
result = conn.api_request("GET", "/", headers={"X-Foo": "bar"})
self.assertEqual(result, {})
expected_headers = {
"Accept-Encoding": "gzip",
"User-Agent": conn.user_agent,
"X-Foo": "bar",
CLIENT_INFO_HEADER: conn.user_agent,
}
http.request.assert_called_once_with(
method="GET",
url=mock.ANY,
headers=expected_headers,
data=None,
timeout=self._get_default_timeout(),
)
def test_api_request_w_extra_headers(self):
from google.cloud._http import CLIENT_INFO_HEADER
http = make_requests_session([self.EMPTY_JSON_RESPONSE])
client = mock.Mock(_http=http, spec=["_http"])
conn = self._make_mock_one(client)
conn.extra_headers = {
"X-Baz": "dax-quux",
"X-Foo": "not-bar", # Collision with ``headers``.
}
result = conn.api_request("GET", "/", headers={"X-Foo": "bar"})
self.assertEqual(result, {})
expected_headers = {
"Accept-Encoding": "gzip",
"User-Agent": conn.user_agent,
"X-Foo": "not-bar", # The one passed-in is overridden.
"X-Baz": "dax-quux",
CLIENT_INFO_HEADER: conn.user_agent,
}
http.request.assert_called_once_with(
method="GET",
url=mock.ANY,
headers=expected_headers,
data=None,
timeout=self._get_default_timeout(),
)
def test_api_request_w_data(self):
from google.cloud._http import CLIENT_INFO_HEADER
http = make_requests_session([self.EMPTY_JSON_RESPONSE])
client = mock.Mock(_http=http, spec=["_http"])
conn = self._make_mock_one(client)
data = {"foo": "bar"}
self.assertEqual(conn.api_request("POST", "/", data=data), {})
expected_data = json.dumps(data)
expected_headers = {
"Accept-Encoding": "gzip",
"Content-Type": "application/json",
"User-Agent": conn.user_agent,
CLIENT_INFO_HEADER: conn.user_agent,
}
http.request.assert_called_once_with(
method="POST",
url=mock.ANY,
headers=expected_headers,
data=expected_data,
timeout=self._get_default_timeout(),
)
def test_api_request_w_timeout(self):
from google.cloud._http import CLIENT_INFO_HEADER
http = make_requests_session(
[make_response(content=b"{}", headers=self.JSON_HEADERS)]
)
client = mock.Mock(_http=http, spec=["_http"])
conn = self._make_mock_one(client)
path = "/path/required"
self.assertEqual(conn.api_request("GET", path, timeout=(2.2, 3.3)), {})
expected_headers = {
"Accept-Encoding": "gzip",
"User-Agent": conn.user_agent,
CLIENT_INFO_HEADER: conn.user_agent,
}
expected_url = "{base}/mock/{version}{path}?prettyPrint=false".format(
base=conn.API_BASE_URL, version=conn.API_VERSION, path=path
)
http.request.assert_called_once_with(
method="GET",
url=expected_url,
headers=expected_headers,
data=None,
timeout=(2.2, 3.3),
)
def test_api_request_w_404(self):
from google.cloud import exceptions
http = make_requests_session([make_response(http_client.NOT_FOUND)])
client = mock.Mock(_http=http, spec=["_http"])
conn = self._make_mock_one(client)
with self.assertRaises(exceptions.NotFound):
conn.api_request("GET", "/")
def test_api_request_w_500(self):
from google.cloud import exceptions
http = make_requests_session([make_response(http_client.INTERNAL_SERVER_ERROR)])
client = mock.Mock(_http=http, spec=["_http"])
conn = self._make_mock_one(client)
with self.assertRaises(exceptions.InternalServerError):
conn.api_request("GET", "/")
| []
| []
| []
| [] | [] | python | 0 | 0 | |
cmd/ssh-proxy-agent/commands/ssh-proxy-agent_command.go | package commands
import (
"os"
"strings"
"github.com/spf13/cobra"
"github.com/miquella/ssh-proxy-agent/operations"
)
// SSHProxyAgentCommand is the root command for the `ssh-proxy-agent` entrypoint
var SSHProxyAgentCommand = &cobra.Command{
Use: "ssh-proxy-agent",
Short: "SSH-Proxy-Agent creates an ssh-agent proxy",
RunE: shellRunE,
SilenceUsage: true,
Version: "0.2.unstable",
}
var interactive bool
var shell = operations.Spawn{}
var validPrincipals []string
func init() {
SSHProxyAgentCommand.Flags().BoolVarP(&interactive, "shell", "l", false, "spawn an interactive shell")
SSHProxyAgentCommand.Flags().BoolVar(&shell.NoProxy, "no-proxy", false, "disable forwarding to an upstream agent (default: false)")
SSHProxyAgentCommand.Flags().BoolVar(&shell.GenerateKey, "generate-key", false, "generate RSA key pair (default: false)")
SSHProxyAgentCommand.Flags().StringSliceVar(&validPrincipals, "valid-principals", []string{os.Getenv("USER")}, "valid principals for Vault key signing")
SSHProxyAgentCommand.Flags().StringVar(&shell.VaultSigningUrl, "vault-signing-url", "", "HashiCorp Vault url to sign SSH keys")
}
func shellRunE(cmd *cobra.Command, args []string) error {
if interactive {
shell.Command = loginShellCommand()
shell.ValidPrincipals = strings.Join(validPrincipals, ",")
return shell.Run()
} else {
return cmd.Usage()
}
}
func loginShellCommand() []string {
shell := os.Getenv("SHELL")
if shell == "" {
shell = "/bin/sh"
}
return []string{shell, "--login"}
}
| [
"\"USER\"",
"\"SHELL\""
]
| []
| [
"SHELL",
"USER"
]
| [] | ["SHELL", "USER"] | go | 2 | 0 | |
crypt/sops/pgp.go | package sops
import (
"os"
sops "go.mozilla.org/sops/v3"
"go.mozilla.org/sops/v3/pgp"
)
func init() {
Configs["pgp"] = &PGPConfig{}
}
type PGPConfig struct{}
func (c *PGPConfig) IsActivated() bool {
_, ok := os.LookupEnv("TF_BACKEND_HTTP_SOPS_PGP_FP")
return ok
}
func (c *PGPConfig) KeyGroup() (sops.KeyGroup, error) {
fp := os.Getenv("TF_BACKEND_HTTP_SOPS_PGP_FP")
var keyGroup sops.KeyGroup
for _, k := range pgp.MasterKeysFromFingerprintString(fp) {
keyGroup = append(keyGroup, k)
}
return keyGroup, nil
}
| [
"\"TF_BACKEND_HTTP_SOPS_PGP_FP\""
]
| []
| [
"TF_BACKEND_HTTP_SOPS_PGP_FP"
]
| [] | ["TF_BACKEND_HTTP_SOPS_PGP_FP"] | go | 1 | 0 | |
cmd/podman/build.go | package main
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/containers/buildah"
"github.com/containers/buildah/imagebuildah"
buildahcli "github.com/containers/buildah/pkg/cli"
"github.com/containers/image/v5/types"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/pkg/adapter"
"github.com/docker/go-units"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
var (
buildCommand cliconfig.BuildValues
buildDescription = "Builds an OCI or Docker image using instructions from one or more Containerfiles and a specified build context directory."
layerValues buildahcli.LayerResults
budFlagsValues buildahcli.BudResults
fromAndBudValues buildahcli.FromAndBudResults
userNSValues buildahcli.UserNSResults
namespaceValues buildahcli.NameSpaceResults
podBuildValues cliconfig.PodmanBuildResults
_buildCommand = &cobra.Command{
Use: "build [flags] CONTEXT",
Short: "Build an image using instructions from Containerfiles",
Long: buildDescription,
RunE: func(cmd *cobra.Command, args []string) error {
buildCommand.InputArgs = args
buildCommand.GlobalFlags = MainGlobalOpts
buildCommand.BudResults = &budFlagsValues
buildCommand.UserNSResults = &userNSValues
buildCommand.FromAndBudResults = &fromAndBudValues
buildCommand.LayerResults = &layerValues
buildCommand.NameSpaceResults = &namespaceValues
buildCommand.PodmanBuildResults = &podBuildValues
buildCommand.Remote = remoteclient
return buildCmd(&buildCommand)
},
Example: `podman build .
podman build --creds=username:password -t imageName -f Containerfile.simple .
podman build --layers --force-rm --tag imageName .`,
}
)
func initBuild() {
buildCommand.Command = _buildCommand
buildCommand.SetHelpTemplate(HelpTemplate())
buildCommand.SetUsageTemplate(UsageTemplate())
flags := buildCommand.Flags()
flags.SetInterspersed(true)
budFlags := buildahcli.GetBudFlags(&budFlagsValues)
flag := budFlags.Lookup("pull")
if err := flag.Value.Set("true"); err != nil {
logrus.Error("unable to set pull flag to true")
}
flag.DefValue = "true"
layerFlags := buildahcli.GetLayerFlags(&layerValues)
flag = layerFlags.Lookup("layers")
if err := flag.Value.Set(useLayers()); err != nil {
logrus.Error("unable to set uselayers")
}
flag.DefValue = useLayers()
flag = layerFlags.Lookup("force-rm")
if err := flag.Value.Set("true"); err != nil {
logrus.Error("unable to set force-rm flag to true")
}
flag.DefValue = "true"
podmanBuildFlags := GetPodmanBuildFlags(&podBuildValues)
flag = podmanBuildFlags.Lookup("squash-all")
if err := flag.Value.Set("false"); err != nil {
logrus.Error("unable to set squash-all flag to false")
}
flag.DefValue = "true"
fromAndBugFlags, err := buildahcli.GetFromAndBudFlags(&fromAndBudValues, &userNSValues, &namespaceValues)
if err != nil {
logrus.Errorf("failed to setup podman build flags: %v", err)
}
flags.AddFlagSet(&budFlags)
flags.AddFlagSet(&fromAndBugFlags)
flags.AddFlagSet(&layerFlags)
flags.AddFlagSet(&podmanBuildFlags)
markFlagHidden(flags, "signature-policy")
}
// GetPodmanBuildFlags flags used only by `podman build` and not by
// `buildah bud`.
func GetPodmanBuildFlags(flags *cliconfig.PodmanBuildResults) pflag.FlagSet {
fs := pflag.FlagSet{}
fs.BoolVar(&flags.SquashAll, "squash-all", false, "Squash all layers into a single layer.")
return fs
}
func getContainerfiles(files []string) []string {
var containerfiles []string
for _, f := range files {
if f == "-" {
containerfiles = append(containerfiles, "/dev/stdin")
} else {
containerfiles = append(containerfiles, f)
}
}
return containerfiles
}
func getNsValues(c *cliconfig.BuildValues) ([]buildah.NamespaceOption, error) {
var ret []buildah.NamespaceOption
if c.Network != "" {
switch {
case c.Network == "host":
ret = append(ret, buildah.NamespaceOption{
Name: string(specs.NetworkNamespace),
Host: true,
})
case c.Network == "container":
ret = append(ret, buildah.NamespaceOption{
Name: string(specs.NetworkNamespace),
})
case c.Network[0] == '/':
ret = append(ret, buildah.NamespaceOption{
Name: string(specs.NetworkNamespace),
Path: c.Network,
})
default:
return nil, fmt.Errorf("unsupported configuration network=%s", c.Network)
}
}
return ret, nil
}
func buildCmd(c *cliconfig.BuildValues) error {
if (c.Flags().Changed("squash") && c.Flags().Changed("layers")) ||
(c.Flags().Changed("squash-all") && c.Flags().Changed("layers")) ||
(c.Flags().Changed("squash-all") && c.Flags().Changed("squash")) {
return fmt.Errorf("cannot specify squash, squash-all and layers options together")
}
// The following was taken directly from containers/buildah/cmd/bud.go
// TODO Find a away to vendor more of this in rather than copy from bud
output := ""
tags := []string{}
if c.Flag("tag").Changed {
tags = c.Tag
if len(tags) > 0 {
output = tags[0]
tags = tags[1:]
}
}
if c.BudResults.Authfile != "" {
if _, err := os.Stat(c.BudResults.Authfile); err != nil {
return errors.Wrapf(err, "error getting authfile %s", c.BudResults.Authfile)
}
}
pullPolicy := imagebuildah.PullNever
if c.Pull {
pullPolicy = imagebuildah.PullIfMissing
}
if c.PullAlways {
pullPolicy = imagebuildah.PullAlways
}
args := make(map[string]string)
if c.Flag("build-arg").Changed {
for _, arg := range c.BuildArg {
av := strings.SplitN(arg, "=", 2)
if len(av) > 1 {
args[av[0]] = av[1]
} else {
delete(args, av[0])
}
}
}
containerfiles := getContainerfiles(c.File)
format, err := getFormat(&c.PodmanCommand)
if err != nil {
return nil
}
contextDir := ""
cliArgs := c.InputArgs
layers := c.Layers // layers for podman defaults to true
// Check to see if the BUILDAH_LAYERS environment variable is set and override command-line
if _, ok := os.LookupEnv("BUILDAH_LAYERS"); ok {
layers = buildahcli.UseLayers()
}
if len(cliArgs) > 0 {
// The context directory could be a URL. Try to handle that.
tempDir, subDir, err := imagebuildah.TempDirForURL("", "buildah", cliArgs[0])
if err != nil {
return errors.Wrapf(err, "error prepping temporary context directory")
}
if tempDir != "" {
// We had to download it to a temporary directory.
// Delete it later.
defer func() {
if err = os.RemoveAll(tempDir); err != nil {
logrus.Errorf("error removing temporary directory %q: %v", contextDir, err)
}
}()
contextDir = filepath.Join(tempDir, subDir)
} else {
// Nope, it was local. Use it as is.
absDir, err := filepath.Abs(cliArgs[0])
if err != nil {
return errors.Wrapf(err, "error determining path to directory %q", cliArgs[0])
}
contextDir = absDir
}
} else {
// No context directory or URL was specified. Try to use the
// home of the first locally-available Containerfile.
for i := range containerfiles {
if strings.HasPrefix(containerfiles[i], "http://") ||
strings.HasPrefix(containerfiles[i], "https://") ||
strings.HasPrefix(containerfiles[i], "git://") ||
strings.HasPrefix(containerfiles[i], "github.com/") {
continue
}
absFile, err := filepath.Abs(containerfiles[i])
if err != nil {
return errors.Wrapf(err, "error determining path to file %q", containerfiles[i])
}
contextDir = filepath.Dir(absFile)
break
}
}
if contextDir == "" {
return errors.Errorf("no context directory specified, and no containerfile specified")
}
if !fileIsDir(contextDir) {
return errors.Errorf("context must be a directory: %v", contextDir)
}
if len(containerfiles) == 0 {
if checkIfFileExists(filepath.Join(contextDir, "Containerfile")) {
containerfiles = append(containerfiles, filepath.Join(contextDir, "Containerfile"))
} else {
containerfiles = append(containerfiles, filepath.Join(contextDir, "Dockerfile"))
}
}
runtime, err := adapter.GetRuntime(getContext(), &c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
runtimeFlags := []string{}
for _, arg := range c.RuntimeFlags {
runtimeFlags = append(runtimeFlags, "--"+arg)
}
conf, err := runtime.GetConfig()
if err != nil {
return err
}
if conf != nil && conf.CgroupManager == define.SystemdCgroupsManager {
runtimeFlags = append(runtimeFlags, "--systemd-cgroup")
}
// end from buildah
defer runtime.DeferredShutdown(false)
var stdout, stderr, reporter *os.File
stdout = os.Stdout
stderr = os.Stderr
reporter = os.Stderr
if c.Flag("logfile").Changed {
f, err := os.OpenFile(c.Logfile, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600)
if err != nil {
return errors.Errorf("error opening logfile %q: %v", c.Logfile, err)
}
defer f.Close()
logrus.SetOutput(f)
stdout = f
stderr = f
reporter = f
}
var memoryLimit, memorySwap int64
if c.Flags().Changed("memory") {
memoryLimit, err = units.RAMInBytes(c.Memory)
if err != nil {
return err
}
}
if c.Flags().Changed("memory-swap") {
memorySwap, err = units.RAMInBytes(c.MemorySwap)
if err != nil {
return err
}
}
nsValues, err := getNsValues(c)
if err != nil {
return err
}
buildOpts := buildah.CommonBuildOptions{
AddHost: c.AddHost,
CgroupParent: c.CgroupParent,
CPUPeriod: c.CPUPeriod,
CPUQuota: c.CPUQuota,
CPUShares: c.CPUShares,
CPUSetCPUs: c.CPUSetCPUs,
CPUSetMems: c.CPUSetMems,
Memory: memoryLimit,
MemorySwap: memorySwap,
ShmSize: c.ShmSize,
Ulimit: c.Ulimit,
Volumes: c.Volumes,
}
// `buildah bud --layers=false` acts like `docker build --squash` does.
// That is all of the new layers created during the build process are
// condensed into one, any layers present prior to this build are retained
// without condensing. `buildah bud --squash` squashes both new and old
// layers down into one. Translate Podman commands into Buildah.
// Squash invoked, retain old layers, squash new layers into one.
if c.Flags().Changed("squash") && c.Squash {
c.Squash = false
layers = false
}
// Squash-all invoked, squash both new and old layers into one.
if c.Flags().Changed("squash-all") {
c.Squash = true
layers = false
}
options := imagebuildah.BuildOptions{
Architecture: c.Arch,
CommonBuildOpts: &buildOpts,
AdditionalTags: tags,
Annotations: c.Annotation,
Args: args,
CNIConfigDir: c.CNIConfigDir,
CNIPluginPath: c.CNIPlugInPath,
Compression: imagebuildah.Gzip,
ContextDirectory: contextDir,
DefaultMountsFilePath: c.GlobalFlags.DefaultMountsFile,
Err: stderr,
In: os.Stdin,
ForceRmIntermediateCtrs: c.ForceRm,
IIDFile: c.Iidfile,
Labels: c.Label,
Layers: layers,
NamespaceOptions: nsValues,
NoCache: c.NoCache,
OS: c.OS,
Out: stdout,
Output: output,
OutputFormat: format,
PullPolicy: pullPolicy,
Quiet: c.Quiet,
RemoveIntermediateCtrs: c.Rm,
ReportWriter: reporter,
RuntimeArgs: runtimeFlags,
SignaturePolicyPath: c.SignaturePolicy,
Squash: c.Squash,
SystemContext: &types.SystemContext{
OSChoice: c.OverrideOS,
ArchitectureChoice: c.OverrideArch,
},
Target: c.Target,
}
_, _, err = runtime.Build(getContext(), c, options, containerfiles)
return err
}
// useLayers returns false if BUILDAH_LAYERS is set to "0" or "false"
// otherwise it returns true
func useLayers() string {
layers := os.Getenv("BUILDAH_LAYERS")
if strings.ToLower(layers) == "false" || layers == "0" {
return "false"
}
return "true"
}
| [
"\"BUILDAH_LAYERS\""
]
| []
| [
"BUILDAH_LAYERS"
]
| [] | ["BUILDAH_LAYERS"] | go | 1 | 0 | |
pkg/secretmgr/vault/client/factory_test.go | package client_test
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/jenkins-x-labs/helmboot/pkg/secretmgr/vault/client"
"github.com/jenkins-x-labs/helmboot/pkg/secretmgr/vault/client/fake"
"github.com/jenkins-x/jx/pkg/jxfactory"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestClient(t *testing.T) {
// lets only test if we have a local vault
if os.Getenv("TEST_VAULT") != "true" {
t.SkipNow()
return
}
jxf := jxfactory.NewFactory()
f, err := client.NewFactoryFromJX(jxf)
require.NoError(t, err, "could not create vault factory")
AssertVaultClientOperations(t, f)
}
func TestClientWithFakeServer(t *testing.T) {
f, _ := fake.NewVaultClientWithFakeKubernetes(t)
// lets create a fake test vault server...
server := fake.NewFakeVaultServer(t)
defer server.Close()
AssertVaultClientOperations(t, f)
}
// AssertVaultClientOperations performs tests on the vault client to check it works
func AssertVaultClientOperations(t *testing.T, f *client.Factory) {
// lets create a temp file
tempDir, err := ioutil.TempDir("", "vault-cert-")
require.NoError(t, err, "failed to create a temporary file")
f.CertFile = filepath.Join(tempDir, "vault-ca.crt")
defer os.RemoveAll(tempDir)
vaultClient, err := client.NewVaultClient(f)
require.NoError(t, err, "could not create vault client")
t.Logf("Created Vault client")
path := "thingy"
expectedData := map[string]interface{}{
"hmacToken": "TODO",
"another": "thing",
"adminUser": map[string]interface{}{
"username": "admin",
"password": "dummypwd",
},
"pipelineUser": map[string]interface{}{
"username": "somegithyser",
"token": "sometoken",
},
}
err = vaultClient.Write(path, expectedData)
require.NoError(t, err, "failed to write data %v to vault", expectedData)
actual, err := vaultClient.Read(path)
require.NoError(t, err, "could not read from vault")
require.NotNil(t, actual, "no data found in vault")
for k, v := range actual {
t.Logf(" %s -> %+v", k, v)
}
t.Logf("Finished reading Vault for path: %s\n", path)
assert.Equal(t, expectedData, actual, "data read from vault")
}
| [
"\"TEST_VAULT\""
]
| []
| [
"TEST_VAULT"
]
| [] | ["TEST_VAULT"] | go | 1 | 0 | |
internal/publication/aws/snsPublication.go | package aws
import (
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/sns"
"log"
"os"
)
type SnsPublication struct{}
var snsSvc *sns.SNS
func init() {
region := os.Getenv("AWS_REGION")
if ses, err := session.NewSession(&aws.Config{
Region: ®ion,
}); err != nil {
fmt.Println(fmt.Sprintf("Failed to connect to AWS: %s", err.Error()))
} else {
snsSvc = sns.New(ses)
}
}
func (SnsPublication) PublishMetadataCreated(metadataUrl string) (err error) {
topic := os.Getenv("SNS_METADATA_CREATED")
params := &sns.PublishInput{
Message: aws.String(metadataUrl),
TopicArn: aws.String(topic),
}
resp, err := snsSvc.Publish(params)
if err != nil {
log.Printf(err.Error())
}
log.Println(resp.MessageId)
return err
}
| [
"\"AWS_REGION\"",
"\"SNS_METADATA_CREATED\""
]
| []
| [
"SNS_METADATA_CREATED",
"AWS_REGION"
]
| [] | ["SNS_METADATA_CREATED", "AWS_REGION"] | go | 2 | 0 | |
examples/mysql/examples_mysql.go | /*
MySQL Dao example.
$ go run examples_mysql.go
MySQL Dao implementation guideline:
- Must implement method godal.IGenericDao.GdaoCreateFilter(storageId string, bo godal.IGenericBo) godal.FilterOpt
(already implemented by common.DaoAppSql)
- If application uses its own BOs instead of godal.IGenericBo, it is recommended to implement a utility method
to transform godal.IGenericBo to application's BO and vice versa.
*/
package main
import (
"fmt"
"math/rand"
"os"
"strconv"
"strings"
"time"
"github.com/btnguyen2k/godal/examples/common"
"github.com/btnguyen2k/prom"
_ "github.com/go-sql-driver/mysql"
"github.com/btnguyen2k/godal"
"github.com/btnguyen2k/godal/sql"
)
// DaoAppMysql is MySQL-implementation of IDaoApp.
type DaoAppMysql struct {
*common.DaoAppSql
}
// NewDaoAppMysql is helper function to create MySQL-implementation of IDaoApp.
func NewDaoAppMysql(sqlC *prom.SqlConnect, tableName string) common.IDaoApp {
dao := &DaoAppMysql{}
dao.DaoAppSql = &common.DaoAppSql{TableName: tableName}
dao.IGenericDaoSql = sql.NewGenericDaoSql(sqlC, godal.NewAbstractGenericDao(dao))
dao.SetSqlFlavor(prom.FlavorMySql)
dao.SetRowMapper(&sql.GenericRowMapperSql{
NameTransformation: sql.NameTransfLowerCase,
ColumnsListMap: map[string][]string{tableName: common.ColsSql}})
return dao
}
/*----------------------------------------------------------------------*/
func createSqlConnectForMysql() *prom.SqlConnect {
driver := strings.ReplaceAll(os.Getenv("MYSQL_DRIVER"), `"`, "")
dsn := strings.ReplaceAll(os.Getenv("MYSQL_URL"), `"`, "")
if driver == "" || dsn == "" {
panic("Please define env MYSQL_DRIVER, MYSQL_DRIVER and optionally TIMEZONE")
}
timeZone := strings.ReplaceAll(os.Getenv("TIMEZONE"), `"`, "")
if timeZone == "" {
timeZone = "UTC"
}
urlTimezone := strings.ReplaceAll(timeZone, "/", "%2f")
dsn = strings.ReplaceAll(dsn, "${loc}", urlTimezone)
dsn = strings.ReplaceAll(dsn, "${tz}", urlTimezone)
dsn = strings.ReplaceAll(dsn, "${timezone}", urlTimezone)
sqlConnect, err := prom.NewSqlConnect(driver, dsn, 10000, nil)
if sqlConnect == nil || err != nil {
if err != nil {
fmt.Println("Error:", err)
}
if sqlConnect == nil {
panic("error creating [prom.SqlConnect] instance")
}
}
loc, _ := time.LoadLocation(timeZone)
sqlConnect.SetLocation(loc)
return sqlConnect
}
func initDataMysql(sqlC *prom.SqlConnect, table string) {
sql := fmt.Sprintf("DROP TABLE IF EXISTS %s", table)
_, err := sqlC.GetDB().Exec(sql)
if err != nil {
fmt.Printf("Error while executing query [%s]: %s\n", sql, err)
}
types := []string{"VARCHAR(16)", "VARCHAR(255)", "CHAR(1)", "BIGINT", "DOUBLE", "VARCHAR(256)",
"TIME", "TIME", "DATE", "DATE", "DATETIME", "DATETIME", "TIMESTAMP DEFAULT CURRENT_TIMESTAMP", "TIMESTAMP DEFAULT CURRENT_TIMESTAMP",
"JSON", "JSON"}
sql = fmt.Sprintf("CREATE TABLE %s (", table)
for i := range common.ColsSql {
sql += common.ColsSql[i] + " " + types[i] + ","
}
sql += "PRIMARY KEY(id))"
fmt.Println("Query:", sql)
_, err = sqlC.GetDB().Exec(sql)
if err != nil {
panic(err)
}
}
func demoMysqlInsertRows(loc *time.Location, table string, txMode bool) {
sqlC := createSqlConnectForMysql()
defer sqlC.Close()
initDataMysql(sqlC, table)
dao := NewDaoAppMysql(sqlC, table)
dao.EnableTxMode(txMode)
fmt.Printf("-== Insert rows to table (TxMode=%v) ==-\n", txMode)
// insert a row
t := time.Unix(int64(rand.Int31()), rand.Int63()%1000000000).In(loc)
bo := common.BoApp{
Id: "log",
Description: t.String(),
ValBool: rand.Int31()%2 == 0,
ValInt: rand.Int(),
ValFloat: rand.Float64(),
ValString: fmt.Sprintf("Logging application (TxMode=%v)", txMode),
ValTime: t,
ValTimeZ: t,
ValDate: t,
ValDateZ: t,
ValDatetime: t,
ValDatetimeZ: t,
ValTimestamp: t,
ValTimestampZ: t,
ValList: []interface{}{true, 0, "1", 2.3, "system", "utility"},
ValMap: map[string]interface{}{"tags": []string{"system", "utility"}, "age": 103, "active": true},
}
fmt.Println("\tCreating bo:", string(bo.ToJson()))
result, err := dao.Create(&bo)
if err != nil {
fmt.Printf("\t\tError: %s\n", err)
} else {
fmt.Printf("\t\tResult: %v\n", result)
}
// insert another row
t = time.Unix(int64(rand.Int31()), rand.Int63()%1000000000).In(loc)
bo = common.BoApp{
Id: "login",
Description: t.String(),
ValBool: rand.Int31()%2 == 0,
ValInt: rand.Int(),
ValFloat: rand.Float64(),
ValString: fmt.Sprintf("Authentication application (TxMode=%v)", txMode),
ValTime: t,
ValTimeZ: t,
ValDate: t,
ValDateZ: t,
ValDatetime: t,
ValDatetimeZ: t,
ValTimestamp: t,
ValTimestampZ: t,
ValList: []interface{}{false, 9.8, "7", 6, "system", "security"},
ValMap: map[string]interface{}{"tags": []string{"system", "security"}, "age": 81, "active": false},
}
fmt.Println("\tCreating bo:", string(bo.ToJson()))
result, err = dao.Create(&bo)
if err != nil {
fmt.Printf("\t\tError: %s\n", err)
} else {
fmt.Printf("\t\tResult: %v\n", result)
}
// insert another row with duplicated id
bo.Id = "login"
bo.ValString = "Authentication application (TxMode=true)(again)"
bo.ValList = []interface{}{"duplicated"}
fmt.Println("\tCreating bo:", string(bo.ToJson()))
result, err = dao.Create(&bo)
if err != nil {
fmt.Printf("\t\tError: %s\n", err)
} else {
fmt.Printf("\t\tResult: %v\n", result)
}
fmt.Println(common.SEP)
}
func demoMysqlFetchRowById(table string, ids ...string) {
sqlC := createSqlConnectForMysql()
defer sqlC.Close()
dao := NewDaoAppMysql(sqlC, table)
dao.EnableTxMode(false)
fmt.Printf("-== Fetch rows by id ==-\n")
for _, id := range ids {
bo, err := dao.Get(id)
if err != nil {
fmt.Printf("\tError while fetching app [%s]: %s\n", id, err)
} else if bo != nil {
common.PrintApp(bo)
} else {
fmt.Printf("\tApp [%s] does not exist\n", id)
}
}
fmt.Println(common.SEP)
}
func demoMysqlFetchAllRows(table string) {
sqlC := createSqlConnectForMysql()
defer sqlC.Close()
dao := NewDaoAppMysql(sqlC, table)
dao.EnableTxMode(false)
fmt.Println("-== Fetch all rows in table ==-")
boList, err := dao.GetAll()
if err != nil {
fmt.Printf("\tError while fetching apps: %s\n", err)
} else {
for _, bo := range boList {
common.PrintApp(bo)
}
}
fmt.Println(common.SEP)
}
func demoMysqlDeleteRow(table string, ids ...string) {
sqlC := createSqlConnectForMysql()
defer sqlC.Close()
dao := NewDaoAppMysql(sqlC, table)
dao.EnableTxMode(false)
fmt.Println("-== Delete rows from table ==-")
for _, id := range ids {
bo, err := dao.Get(id)
if err != nil {
fmt.Printf("\tError while fetching app [%s]: %s\n", id, err)
} else if bo == nil {
fmt.Printf("\tApp [%s] does not exist, no need to delete\n", id)
} else {
fmt.Println("\tDeleting bo:", string(bo.ToJson()))
result, err := dao.Delete(bo)
if err != nil {
fmt.Printf("\t\tError: %s\n", err)
} else {
fmt.Printf("\t\tResult: %v\n", result)
}
app, err := dao.Get(id)
if err != nil {
fmt.Printf("\t\tError while fetching app [%s]: %s\n", id, err)
} else if app != nil {
fmt.Printf("\t\tApp [%s] info: %v\n", app.Id, string(app.ToJson()))
} else {
fmt.Printf("\t\tApp [%s] no longer exist\n", id)
result, err = dao.Delete(bo)
fmt.Printf("\t\tDeleting app [%s] again: %v / %s\n", id, result, err)
}
}
}
fmt.Println(common.SEP)
}
func demoMysqlUpdateRows(loc *time.Location, table string, ids ...string) {
sqlC := createSqlConnectForMysql()
defer sqlC.Close()
dao := NewDaoAppMysql(sqlC, table)
dao.EnableTxMode(false)
fmt.Println("-== Update rows from table ==-")
for _, id := range ids {
t := time.Unix(int64(rand.Int31()), rand.Int63()%1000000000).In(loc)
bo, err := dao.Get(id)
if err != nil {
fmt.Printf("\tError while fetching app [%s]: %s\n", id, err)
} else if bo == nil {
fmt.Printf("\tApp [%s] does not exist\n", id)
bo = &common.BoApp{
Id: id,
Description: t.String(),
ValString: "(updated)",
ValTime: t,
ValTimeZ: t,
ValDate: t,
ValDateZ: t,
ValDatetime: t,
ValDatetimeZ: t,
ValTimestamp: t,
ValTimestampZ: t,
}
} else {
fmt.Println("\tExisting bo:", string(bo.ToJson()))
bo.Description = t.String()
bo.ValString += "(updated)"
bo.ValTime = t
bo.ValTimeZ = t
bo.ValDate = t
bo.ValDateZ = t
bo.ValDatetime = t
bo.ValDatetimeZ = t
bo.ValTimestamp = t
bo.ValTimestampZ = t
}
fmt.Println("\t\tUpdating bo:", string(bo.ToJson()))
result, err := dao.Update(bo)
if err != nil {
fmt.Printf("\t\tError while updating app [%s]: %s\n", id, err)
} else {
fmt.Printf("\t\tResult: %v\n", result)
bo, err = dao.Get(id)
if err != nil {
fmt.Printf("\t\tError while fetching app [%s]: %s\n", id, err)
} else if bo != nil {
fmt.Printf("\t\tApp [%s] info: %v\n", bo.Id, string(bo.ToJson()))
} else {
fmt.Printf("\t\tApp [%s] does not exist\n", id)
}
}
}
fmt.Println(common.SEP)
}
func demoMysqlUpsertRows(loc *time.Location, table string, txMode bool, ids ...string) {
sqlC := createSqlConnectForMysql()
defer sqlC.Close()
dao := NewDaoAppMysql(sqlC, table)
dao.EnableTxMode(txMode)
fmt.Printf("-== Upsert rows to table (TxMode=%v) ==-\n", txMode)
for _, id := range ids {
t := time.Unix(int64(rand.Int31()), rand.Int63()%1000000000).In(loc)
bo, err := dao.Get(id)
if err != nil {
fmt.Printf("\tError while fetching app [%s]: %s\n", id, err)
} else if bo == nil {
fmt.Printf("\tApp [%s] does not exist\n", id)
bo = &common.BoApp{
Id: id,
Description: t.String(),
ValString: fmt.Sprintf("(upsert,txmode=%v)", txMode),
ValTime: t,
ValTimeZ: t,
ValDate: t,
ValDateZ: t,
ValDatetime: t,
ValDatetimeZ: t,
ValTimestamp: t,
ValTimestampZ: t,
}
} else {
fmt.Println("\tExisting bo:", string(bo.ToJson()))
bo.Description = t.String()
bo.ValString += fmt.Sprintf("(upsert,txmode=%v)", txMode)
bo.ValTime = t
bo.ValTimeZ = t
bo.ValDate = t
bo.ValDateZ = t
bo.ValDatetime = t
bo.ValDatetimeZ = t
bo.ValTimestamp = t
bo.ValTimestampZ = t
}
fmt.Println("\t\tUpserting bo:", string(bo.ToJson()))
result, err := dao.Upsert(bo)
if err != nil {
fmt.Printf("\t\tError while upserting app [%s]: %s\n", id, err)
} else {
fmt.Printf("\t\tResult: %v\n", result)
bo, err = dao.Get(id)
if err != nil {
fmt.Printf("\t\tError while fetching app [%s]: %s\n", id, err)
} else if bo != nil {
fmt.Printf("\t\tApp [%s] info: %v\n", bo.Id, string(bo.ToJson()))
} else {
fmt.Printf("\t\tApp [%s] does not exist\n", id)
}
}
}
fmt.Println(common.SEP)
}
func demoMysqlSelectSortingAndLimit(loc *time.Location, table string) {
sqlC := createSqlConnectForMysql()
defer sqlC.Close()
initDataMysql(sqlC, table)
dao := NewDaoAppMysql(sqlC, table)
dao.EnableTxMode(false)
fmt.Println("-== Fetch rows from table with sorting and limit ==-")
n := 100
fmt.Printf("\tInserting %d rows...\n", n)
for i := 0; i < n; i++ {
id := strconv.Itoa(i)
for len(id) < 3 {
id = "0" + id
}
t := time.Unix(int64(rand.Int31()), rand.Int63()%1000000000).In(loc)
bo := common.BoApp{
Id: id,
Description: t.String(),
ValBool: rand.Int31()%2 == 0,
ValInt: rand.Int(),
ValFloat: rand.Float64(),
ValString: id + " (sorting and limit)",
ValTime: t,
ValTimeZ: t,
ValDate: t,
ValDateZ: t,
ValDatetime: t,
ValDatetimeZ: t,
ValTimestamp: t,
ValTimestampZ: t,
ValList: []interface{}{rand.Int31()%2 == 0, i, id},
ValMap: map[string]interface{}{"tags": []interface{}{id, i}},
}
_, err := dao.Create(&bo)
if err != nil {
panic(err)
}
}
startOffset := rand.Intn(n)
numRows := rand.Intn(10) + 1
fmt.Printf("\tFetching %d rows, starting from offset %d...\n", numRows, startOffset)
boList, err := dao.GetN(startOffset, numRows)
if err != nil {
fmt.Printf("\t\tError while fetching apps: %s\n", err)
} else {
for _, bo := range boList {
fmt.Printf("\t\tApp [%s] info: %v\n", bo.Id, string(bo.ToJson()))
}
}
fmt.Println(common.SEP)
}
func main() {
rand.Seed(time.Now().UnixNano())
timeZone := strings.ReplaceAll(os.Getenv("TIMEZONE"), `"`, "")
loc, _ := time.LoadLocation(timeZone)
table := "tbl_app"
demoMysqlInsertRows(loc, table, true)
demoMysqlInsertRows(loc, table, false)
demoMysqlFetchRowById(table, "login", "loggin")
demoMysqlFetchAllRows(table)
demoMysqlDeleteRow(table, "login", "loggin")
demoMysqlUpdateRows(loc, table, "log", "logging")
demoMysqlUpsertRows(loc, table, true, "log", "logging")
demoMysqlUpsertRows(loc, table, false, "log", "loggging")
demoMysqlSelectSortingAndLimit(loc, table)
}
| [
"\"MYSQL_DRIVER\"",
"\"MYSQL_URL\"",
"\"TIMEZONE\"",
"\"TIMEZONE\""
]
| []
| [
"MYSQL_URL",
"TIMEZONE",
"MYSQL_DRIVER"
]
| [] | ["MYSQL_URL", "TIMEZONE", "MYSQL_DRIVER"] | go | 3 | 0 | |
pysparkling/tests/test_textFile.py | from __future__ import print_function
import logging
import os
import pickle
import random
import sys
import tempfile
import unittest
import pytest
from pysparkling import Context
from pysparkling.fileio import File
try:
import py7zlib
except ImportError:
py7zlib = None
AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')
S3_TEST_PATH = os.getenv('S3_TEST_PATH')
OAUTH2_CLIENT_ID = os.getenv('OAUTH2_CLIENT_ID')
GS_TEST_PATH = os.getenv('GS_TEST_PATH')
HDFS_TEST_PATH = os.getenv('HDFS_TEST_PATH')
LOCAL_TEST_PATH = os.path.dirname(__file__)
def test_cache():
# this crashes in version 0.2.28
lines = Context().textFile('{}/*textFil*.py'.format(LOCAL_TEST_PATH))
lines = lines.map(lambda l: '-' + l).cache()
print(len(lines.collect()))
lines = lines.map(lambda l: '+' + l)
lines = lines.map(lambda l: '-' + l).cache()
lines = lines.collect()
print(lines)
assert '-+-from pysparkling import Context' in lines
def test_local_textFile_1():
lines = Context().textFile('{}/*textFil*.py'.format(LOCAL_TEST_PATH))
lines = lines.collect()
print(lines)
assert 'from pysparkling import Context' in lines
def test_local_textFile_2():
line_count = Context().textFile('{}/*.py'.format(LOCAL_TEST_PATH)).count()
print(line_count)
assert line_count > 90
def test_local_textFile_name():
name = Context().textFile('{}/*.py'.format(LOCAL_TEST_PATH)).name()
print(name)
assert name.startswith('{}/*.py'.format(LOCAL_TEST_PATH))
def test_wholeTextFiles():
all_files = Context().wholeTextFiles('{}/*.py'.format(LOCAL_TEST_PATH))
this_file = all_files.lookup(__file__)
print(this_file)
assert 'test_wholeTextFiles' in this_file[0]
@pytest.mark.skipif(not AWS_ACCESS_KEY_ID, reason='no AWS env')
def test_s3_textFile():
myrdd = Context().textFile(
's3n://aws-publicdatasets/common-crawl/crawl-data/'
'CC-MAIN-2015-11/warc.paths.*'
)
assert (
'common-crawl/crawl-data/CC-MAIN-2015-11/segments/1424937481488.49/'
'warc/CC-MAIN-20150226075801-00329-ip-10-28-5-156.ec2.'
'internal.warc.gz' in myrdd.collect()
)
@pytest.mark.skipif(not AWS_ACCESS_KEY_ID, reason='no AWS env')
def test_s3_textFile_loop():
random.seed()
fn = '{}/pysparkling_test_{:d}.txt'.format(
S3_TEST_PATH, random.random() * 999999.0
)
rdd = Context().parallelize('Line {0}'.format(n) for n in range(200))
rdd.saveAsTextFile(fn)
rdd_check = Context().textFile(fn)
assert (
rdd.count() == rdd_check.count() and
all(e1 == e2 for e1, e2 in zip(rdd.collect(), rdd_check.collect()))
)
@pytest.mark.skipif(not HDFS_TEST_PATH, reason='no HDFS env')
def test_hdfs_textFile_loop():
random.seed()
fn = '{}/pysparkling_test_{:d}.txt'.format(
HDFS_TEST_PATH, random.random() * 999999.0)
print('HDFS test file: {0}'.format(fn))
rdd = Context().parallelize('Hello World {0}'.format(x) for x in range(10))
rdd.saveAsTextFile(fn)
read_rdd = Context().textFile(fn)
print(rdd.collect())
print(read_rdd.collect())
assert (
rdd.count() == read_rdd.count() and
all(r1 == r2 for r1, r2 in zip(rdd.collect(), read_rdd.collect()))
)
@pytest.mark.skipif(not HDFS_TEST_PATH, reason='no HDFS env')
def test_hdfs_file_exists():
random.seed()
fn1 = '{}/pysparkling_test_{:d}.txt'.format(
HDFS_TEST_PATH, random.random() * 999999.0)
fn2 = '{}/pysparkling_test_{:d}.txt'.format(
HDFS_TEST_PATH, random.random() * 999999.0)
rdd = Context().parallelize('Hello World {0}'.format(x) for x in range(10))
rdd.saveAsTextFile(fn1)
assert File(fn1).exists() and not File(fn2).exists()
@pytest.mark.skipif(not GS_TEST_PATH, reason='no GS env')
@pytest.mark.skipif(not OAUTH2_CLIENT_ID, reason='no OAUTH env')
def test_gs_textFile_loop():
random.seed()
fn = '{}/pysparkling_test_{:d}.txt'.format(
GS_TEST_PATH, random.random() * 999999.0)
rdd = Context().parallelize('Line {0}'.format(n) for n in range(200))
rdd.saveAsTextFile(fn)
rdd_check = Context().textFile(fn)
assert (
rdd.count() == rdd_check.count() and
all(e1 == e2 for e1, e2 in zip(rdd.collect(), rdd_check.collect()))
)
@pytest.mark.skipif(not AWS_ACCESS_KEY_ID, reason='no AWS env')
@pytest.mark.skipif(not S3_TEST_PATH, reason='no S3 env')
def test_dumpToFile():
random.seed()
fn = '{}/pysparkling_test_{:d}.pickle'.format(
S3_TEST_PATH, random.random() * 999999.0)
File(fn).dump(pickle.dumps({'hello': 'world'}))
def test_http_textFile():
myrdd = Context().textFile(
'https://s3-us-west-2.amazonaws.com/human-microbiome-project/DEMO/'
'HM16STR/46333/by_subject/1139.fsa'
)
assert u'TGCTGCGGTGAATGCGTTCCCGGGTCT' in myrdd.collect()
def test_saveAsTextFile():
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
Context().parallelize(range(10)).saveAsTextFile(tempFile.name)
with open(tempFile.name, 'r') as f:
r = f.readlines()
print(r)
assert '5\n' in r
def test_saveAsTextFile_tar():
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
Context().parallelize(range(10)).saveAsTextFile(tempFile.name + '.tar')
read_rdd = Context().textFile(tempFile.name + '.tar')
print(read_rdd.collect())
assert '5' in read_rdd.collect()
@unittest.skipIf(hasattr(sys, 'pypy_version_info'), 'skip on pypy')
def test_saveAsTextFile_targz():
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
Context().parallelize(range(10)).saveAsTextFile(tempFile.name + '.tar.gz')
read_rdd = Context().textFile(tempFile.name + '.tar.gz')
print(read_rdd.collect())
assert '5' in read_rdd.collect()
def test_saveAsTextFile_tarbz2():
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
Context().parallelize(range(10)).saveAsTextFile(tempFile.name + '.tar.bz2')
read_rdd = Context().textFile(tempFile.name + '.tar.bz2')
print(read_rdd.collect())
assert '5' in read_rdd.collect()
def test_saveAsTextFile_gz():
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
Context().parallelize(range(10)).saveAsTextFile(tempFile.name + '.gz')
read_rdd = Context().textFile(tempFile.name + '.gz')
assert '5' in read_rdd.collect()
def test_saveAsTextFile_zip():
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
Context().parallelize(range(10)).saveAsTextFile(tempFile.name + '.zip')
read_rdd = Context().textFile(tempFile.name + '.zip')
print(read_rdd.collect())
assert '5' in read_rdd.collect()
def test_saveAsTextFile_bz2():
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
Context().parallelize(range(10)).saveAsTextFile(tempFile.name + '.bz2')
read_rdd = Context().textFile(tempFile.name + '.bz2')
assert '5' in read_rdd.collect()
def test_saveAsTextFile_lzma():
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
Context().parallelize(range(10)).saveAsTextFile(tempFile.name + '.lzma')
read_rdd = Context().textFile(tempFile.name + '.lzma')
assert '5' in read_rdd.collect()
@unittest.skipIf(py7zlib is None,
'py7zlib import failed, is pylzma installed?')
def test_read_7z():
# file was created with:
# 7z a tests/data.7z tests/readme_example.py
# (brew install p7zip)
rdd = Context().textFile('{}/data.7z'.format(LOCAL_TEST_PATH))
print(rdd.collect())
assert 'from pysparkling import Context' in rdd.collect()
def test_read_tar_gz():
# file was created with:
# tar -cvzf data.tar.gz hello.txt
rdd = Context().textFile('{}/data.tar.gz'.format(LOCAL_TEST_PATH))
print(rdd.collect())
assert 'Hello pysparkling!' in rdd.collect()
@unittest.skipIf(os.getenv('TRAVIS') is not None,
'skip 20news test on Travis')
def test_read_tar_gz_20news():
# 20 news dataset has some '0xff' characters that lead to encoding
# errors before. Adding this as a test case.
src = 'http://qwone.com/~jason/20Newsgroups/20news-19997.tar.gz'
rdd = Context().textFile(src, use_unicode=False)
assert '}|> 1. Mechanical driven odometer:' in rdd.top(500)
def test_pyspark_compatibility_txt():
kv = Context().textFile(
'{}/pyspark/key_value.txt'.format(LOCAL_TEST_PATH)).collect()
print(kv)
assert u"('a', 1)" in kv and u"('b', 2)" in kv and len(kv) == 2
def test_pyspark_compatibility_bz2():
kv = Context().textFile(
'{}/pyspark/key_value.txt.bz2'.format(LOCAL_TEST_PATH)).collect()
print(kv)
assert u"a\t1" in kv and u"b\t2" in kv and len(kv) == 2
def test_pyspark_compatibility_gz():
kv = Context().textFile(
'{}/pyspark/key_value.txt.gz'.format(LOCAL_TEST_PATH)).collect()
print(kv)
assert u"a\t1" in kv and u"b\t2" in kv and len(kv) == 2
def test_local_regex_read():
# was not working before 0.3.19
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
Context().parallelize(range(30), 30).saveAsTextFile(tempFile.name)
d = Context().textFile(tempFile.name + '/part-0000*').collect()
print(d)
assert len(d) == 10
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
test_local_regex_read()
| []
| []
| [
"HDFS_TEST_PATH",
"OAUTH2_CLIENT_ID",
"TRAVIS",
"AWS_ACCESS_KEY_ID",
"GS_TEST_PATH",
"S3_TEST_PATH"
]
| [] | ["HDFS_TEST_PATH", "OAUTH2_CLIENT_ID", "TRAVIS", "AWS_ACCESS_KEY_ID", "GS_TEST_PATH", "S3_TEST_PATH"] | python | 6 | 0 | |
core/ledger/testutil/test_helper.go | /*
Copyright IBM Corp. 2016 All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testutil
import (
"testing"
"github.com/golang/protobuf/proto"
"github.com/hyperledger/fabric/common/util"
"github.com/hyperledger/fabric/protos/common"
ptestutils "github.com/hyperledger/fabric/protos/testutils"
)
//BlockGenerator generates a series of blocks for testing
type BlockGenerator struct {
blockNum uint64
previousHash []byte
t *testing.T
}
// NewBlockGenerator instantiates new BlockGenerator for testing
func NewBlockGenerator(t *testing.T) *BlockGenerator {
return &BlockGenerator{1, []byte{}, t}
}
// NextBlock constructs next block in sequence that includes a number of transactions - one per simulationResults
func (bg *BlockGenerator) NextBlock(simulationResults [][]byte, sign bool) *common.Block {
envs := []*common.Envelope{}
for i := 0; i < len(simulationResults); i++ {
env, _, err := ConstructTransaction(bg.t, simulationResults[i], sign)
if err != nil {
bg.t.Fatalf("ConstructTestTransaction failed, err %s", err)
}
envs = append(envs, env)
}
block := newBlock(envs, bg.blockNum, bg.previousHash)
bg.blockNum++
bg.previousHash = block.Header.Hash()
return block
}
// NextTestBlock constructs next block in sequence block with 'numTx' number of transactions for testing
func (bg *BlockGenerator) NextTestBlock(numTx int, txSize int) *common.Block {
simulationResults := [][]byte{}
for i := 0; i < numTx; i++ {
simulationResults = append(simulationResults, ConstructRandomBytes(bg.t, txSize))
}
return bg.NextBlock(simulationResults, false)
}
// NextTestBlocks constructs 'numBlocks' number of blocks for testing
func (bg *BlockGenerator) NextTestBlocks(numBlocks int) []*common.Block {
blocks := []*common.Block{}
for i := 0; i < numBlocks; i++ {
blocks = append(blocks, bg.NextTestBlock(10, 100))
}
return blocks
}
// ConstructBlock constructs a single block with blockNum=1
func ConstructBlock(t *testing.T, simulationResults [][]byte, sign bool) *common.Block {
bg := NewBlockGenerator(t)
return bg.NextBlock(simulationResults, sign)
}
// ConstructTestBlock constructs a single block with blocknum=1
func ConstructTestBlock(t *testing.T, numTx int, txSize int) *common.Block {
bg := NewBlockGenerator(t)
return bg.NextTestBlock(numTx, txSize)
}
// ConstructTestBlocks returns a series of blocks starting with blockNum=1
func ConstructTestBlocks(t *testing.T, numBlocks int) []*common.Block {
bg := NewBlockGenerator(t)
return bg.NextTestBlocks(numBlocks)
}
// ConstructTransaction constructs a transaction for testing
func ConstructTransaction(t *testing.T, simulationResults []byte, sign bool) (*common.Envelope, string, error) {
ccName := "foo"
txID := util.GenerateUUID()
var txEnv *common.Envelope
var err error
if sign {
txEnv, err = ptestutils.ConstructSingedTxEnvWithDefaultSigner(txID, util.GetTestChainID(), ccName, simulationResults, nil, nil)
} else {
txEnv, err = ptestutils.ConstructUnsingedTxEnv(txID, util.GetTestChainID(), ccName, simulationResults, nil, nil)
}
return txEnv, txID, err
}
func newBlock(env []*common.Envelope, blockNum uint64, previousHash []byte) *common.Block {
block := common.NewBlock(blockNum, previousHash)
for i := 0; i < len(env); i++ {
txEnvBytes, _ := proto.Marshal(env[i])
block.Data.Data = append(block.Data.Data, txEnvBytes)
}
block.Header.DataHash = block.Data.Hash()
return block
}
| []
| []
| []
| [] | [] | go | null | null | null |
socialMessage/views.py | from django.views.generic import ListView, UpdateView, CreateView, DeleteView
from contact.models import AdressEntery
from django.core.mail import send_mail
import os
class SendMessages(ListView):
model = AdressEntery
template_name = "main/sendMessage.html"
fields = ['phoneNumber']
context_object_name = "obj"
def get_queryset(self, *args, **kwargs) -> str:
"""
:usr get sender email from settings
:message html form input
:to_email target email
:subject email title
:user_password get sender password from settings
:connection configuration
- if button send is clicked
- send_mail -> send an email
- *turn off the settings on the email (less security) and avast
"""
queryset = super(SendMessages, self).get_queryset()
usr = self.request.user #get an email from the currently logged in user
message = self.request.GET.get('text') #html input message
to_email = [self.request.GET.get('email'),] # input target email
subject = self.request.GET.get('subject') #email subject/title
user_password = os.getenv("DJANGO_EMAIL_PASSWORD")#docker env get password
connection = [usr.email, user_password, False,] #connect to email, if it doesn't work, turn off the settings on the email (less security) and avast
if self.request.GET.get('send') == "Send": #pressed button
send_mail(subject, message, usr.email, to_email, connection) #send an emal
return queryset | []
| []
| [
"DJANGO_EMAIL_PASSWORD"
]
| [] | ["DJANGO_EMAIL_PASSWORD"] | python | 1 | 0 | |
api/utils/logger.py | import logging
import os
def get_logger(name='default', level='INFO', log_path=None, log_format = '%(asctime)s - %(levelname)s - %(pathname)s - Line: %(lineno)d - ', prefix=""):
if log_path is None:
log_path = os.getenv('LOG_PATH', '/tmp')
logger = logging.getLogger(name)
formatter = logging.Formatter(fmt=log_format+str(prefix)+" %(message)s")
file_handler = logging.FileHandler(log_path + '/' + name + ".log")
file_handler.setFormatter(formatter)
logger.handlers = []
logger.addHandler(file_handler)
logger.setLevel(level)
logger.propagate = False
return logger | []
| []
| [
"LOG_PATH"
]
| [] | ["LOG_PATH"] | python | 1 | 0 | |
test/test_unicode.py | """This test needs to connect to an actual server.
Set the environment variable TEST_SA_CONNSTR to something like:
mssql+turbodbc://username:pwd@localhost:1433/test?driver=FreeTDS
"""
import os
import pytest
from sqlalchemy import create_engine
@pytest.mark.skipif(os.environ.get('TEST_SA_CONNSTR') is None,
reason="No environment variable for test db connection")
class TestUnicode:
@pytest.fixture
def connection(self):
"""Connection fixture to ensure rollback in case of failures."""
connstr = os.environ.get('TEST_SA_CONNSTR')
engine = create_engine(connstr)
cnxn = engine.connect()
tx = cnxn.begin()
yield cnxn
tx.rollback()
cnxn.close()
engine.dispose()
def test_sa_insert_unicode_emoji(self, connection):
"""Test SQLAlchemy connection insert emoji."""
text = u'test 1 2 3 \U0001F602 foo bar'
connection.execute("CREATE TABLE dbo.test_unicode (val NVARCHAR(100))")
connection.execute("INSERT INTO dbo.test_unicode (val) VALUES (?)", (text,))
result = connection.execute("SELECT * FROM dbo.test_unicode")
rows = result.fetchall()
assert rows[-1][0] == text
| []
| []
| [
"TEST_SA_CONNSTR"
]
| [] | ["TEST_SA_CONNSTR"] | python | 1 | 0 | |
src/autoscaler/operator/cmd/operator/operator_suite_test.go | package main_test
import (
"database/sql"
"io/ioutil"
"net/http"
"os"
"os/exec"
"regexp"
"strings"
"testing"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gexec"
"github.com/onsi/gomega/ghttp"
_ "github.com/go-sql-driver/mysql"
"gopkg.in/yaml.v2"
"autoscaler/cf"
"autoscaler/db"
"autoscaler/models"
"autoscaler/operator/config"
)
var (
prPath string
cfg config.Config
configFile *os.File
cfServer *ghttp.Server
healthHttpClient *http.Client
healthport int
appSummaryRegPath = regexp.MustCompile(`^/v2/apps/.*/summary$`)
)
func TestOperator(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Operator Main Suite")
}
var _ = SynchronizedBeforeSuite(func() []byte {
pr, err := gexec.Build("autoscaler/operator/cmd/operator", "-race")
Expect(err).NotTo(HaveOccurred())
return []byte(pr)
}, func(pathsByte []byte) {
prPath = string(pathsByte)
initConfig()
healthHttpClient = &http.Client{}
configFile = writeConfig(&cfg)
})
var _ = SynchronizedAfterSuite(func() {
os.Remove(configFile.Name())
}, func() {
gexec.CleanupBuildArtifacts()
})
func initConfig() {
cfServer = ghttp.NewServer()
cfServer.RouteToHandler("GET", "/v2/info", ghttp.RespondWithJSONEncoded(http.StatusOK,
cf.Endpoints{
TokenEndpoint: cfServer.URL(),
DopplerEndpoint: strings.Replace(cfServer.URL(), "http", "ws", 1),
}))
cfServer.RouteToHandler("POST", "/oauth/token", ghttp.RespondWithJSONEncoded(http.StatusOK, cf.Tokens{}))
appState := models.AppStatusStarted
cfServer.RouteToHandler("GET", appSummaryRegPath, ghttp.RespondWithJSONEncoded(http.StatusOK,
models.AppEntity{Instances: 2, State: &appState}))
cfg.CF = cf.CFConfig{
API: cfServer.URL(),
ClientID: "client-id",
Secret: "secret",
}
healthport = 8000 + GinkgoParallelProcess()
cfg.Health.Port = healthport
cfg.Logging.Level = "debug"
dbURL := os.Getenv("DBURL")
if dbURL == "" {
Fail("environment variable $DBURL is not set")
}
cfg.InstanceMetricsDB.DB = db.DatabaseConfig{
URL: dbURL,
MaxOpenConnections: 10,
MaxIdleConnections: 5,
ConnectionMaxLifetime: 10 * time.Second,
}
cfg.InstanceMetricsDB.RefreshInterval = 12 * time.Hour
cfg.InstanceMetricsDB.CutoffDuration = 20 * 24 * time.Hour
cfg.AppMetricsDB.DB = db.DatabaseConfig{
URL: dbURL,
MaxOpenConnections: 10,
MaxIdleConnections: 5,
ConnectionMaxLifetime: 10 * time.Second,
}
cfg.AppMetricsDB.RefreshInterval = 12 * time.Hour
cfg.AppMetricsDB.CutoffDuration = 20 * 24 * time.Hour
cfg.ScalingEngineDB.DB = db.DatabaseConfig{
URL: dbURL,
MaxOpenConnections: 10,
MaxIdleConnections: 5,
ConnectionMaxLifetime: 10 * time.Second,
}
cfg.ScalingEngineDB.RefreshInterval = 12 * time.Hour
cfg.ScalingEngineDB.CutoffDuration = 20 * 24 * time.Hour
cfg.ScalingEngine = config.ScalingEngineConfig{
URL: "http://localhost:8082",
SyncInterval: 10 * time.Second,
}
cfg.Scheduler = config.SchedulerConfig{
URL: "http://localhost:8083",
SyncInterval: 10 * time.Second,
}
cfg.DBLock.DB = db.DatabaseConfig{
URL: os.Getenv("DBURL"),
MaxOpenConnections: 10,
MaxIdleConnections: 5,
ConnectionMaxLifetime: 10 * time.Second,
}
cfg.DBLock.LockTTL = 15 * time.Second
cfg.DBLock.LockRetryInterval = 5 * time.Second
cfg.AppSyncer.DB = db.DatabaseConfig{
URL: dbURL,
MaxOpenConnections: 10,
MaxIdleConnections: 5,
ConnectionMaxLifetime: 10 * time.Second,
}
cfg.AppSyncer.SyncInterval = 60 * time.Second
cfg.HttpClientTimeout = 10 * time.Second
cfg.Health.HealthCheckUsername = "operatorhealthcheckuser"
cfg.Health.HealthCheckPassword = "operatorhealthcheckuser"
}
func writeConfig(c *config.Config) *os.File {
cfg, err := ioutil.TempFile("", "pr")
Expect(err).NotTo(HaveOccurred())
defer cfg.Close()
var bytes []byte
bytes, err = yaml.Marshal(c)
Expect(err).NotTo(HaveOccurred())
_, err = cfg.Write(bytes)
Expect(err).NotTo(HaveOccurred())
return cfg
}
type OperatorRunner struct {
configPath string
startCheck string
acquiredLockCheck string
Session *gexec.Session
}
func NewOperatorRunner() *OperatorRunner {
return &OperatorRunner{
configPath: configFile.Name(),
startCheck: "operator.started",
acquiredLockCheck: "operator.lock.acquire-lock-succeeded",
}
}
func (pr *OperatorRunner) Start() {
prSession, err := gexec.Start(exec.Command(
prPath,
"-c",
pr.configPath,
),
gexec.NewPrefixedWriter("\x1b[32m[o]\x1b[32m[pr]\x1b[0m ", GinkgoWriter),
gexec.NewPrefixedWriter("\x1b[91m[e]\x1b[32m[pr]\x1b[0m ", GinkgoWriter),
)
Expect(err).NotTo(HaveOccurred())
pr.Session = prSession
}
func (pr *OperatorRunner) Interrupt() {
if pr.Session != nil {
pr.Session.Interrupt().Wait(5 * time.Second)
}
}
func (pr *OperatorRunner) KillWithFire() {
if pr.Session != nil {
pr.Session.Kill().Wait(5 * time.Second)
}
}
func (pr *OperatorRunner) ClearLockDatabase() {
database, err := db.GetConnection(os.Getenv("DBURL"))
Expect(err).NotTo(HaveOccurred())
lockDB, err := sql.Open(database.DriverName, database.DSN)
Expect(err).NotTo(HaveOccurred())
_, err = lockDB.Exec("DELETE FROM operator_lock")
Expect(err).NotTo(HaveOccurred())
}
| [
"\"DBURL\"",
"\"DBURL\"",
"\"DBURL\""
]
| []
| [
"DBURL"
]
| [] | ["DBURL"] | go | 1 | 0 | |
leancloud/client_test.go | package leancloud
import (
"errors"
"os"
"testing"
)
func TestNewClient(t *testing.T) {
appID, appKey, masterKey, serverURL := os.Getenv("LEANCLOUD_APP_ID"), os.Getenv("LEANCLOUD_APP_KEY"), os.Getenv("LEANCLOUD_APP_MASTER_KEY"), os.Getenv("LEANCLOUD_API_SERVER")
options := &ClientOptions{
AppID: appID,
AppKey: appKey,
MasterKey: masterKey,
ServerURL: serverURL,
}
t.Run("Production", func(t *testing.T) {
client := NewClient(options)
if client == nil {
t.Fatal(errors.New("unable to create a client"))
}
if client.appID != appID {
t.Fatal(errors.New("LEANCLOUD_APP_ID unmatch"))
}
if client.appKey != appKey {
t.Fatal(errors.New("LEANCLOUD_APP_KEY unmatch"))
}
if client.masterKey != masterKey {
t.Fatal(errors.New("LEANCLOUD_APP_MASTER_KEY unmatch"))
}
})
t.Run("Debug", func(t *testing.T) {
if err := os.Setenv("LEANCLOUD_DEBUG", "true"); err != nil {
t.Fatal("unable to set debugging flag")
}
client := NewClient(options)
if client == nil {
t.Fatal(errors.New("unable to create a client"))
}
if client.appID != appID {
t.Fatal(errors.New("LEANCLOUD_APP_ID unmatch"))
}
if client.appKey != appKey {
t.Fatal(errors.New("LEANCLOUD_APP_KEY unmatch"))
}
if client.masterKey != masterKey {
t.Fatal(errors.New("LEANCLOUD_APP_MASTER_KEY unmatch"))
}
if client.requestLogger == nil {
t.Fatal(errors.New("unable to set logger"))
}
})
}
func TestNewEnvClient(t *testing.T) {
appID, appKey, masterKey, serverURL := os.Getenv("LEANCLOUD_APP_ID"), os.Getenv("LEANCLOUD_APP_KEY"), os.Getenv("LEANCLOUD_APP_MASTER_KEY"), os.Getenv("LEANCLOUD_API_SERVER")
t.Run("Production", func(t *testing.T) {
client := NewEnvClient()
if client == nil {
t.Fatal(errors.New("unable to create a client"))
}
if client.appID != appID {
t.Fatal(errors.New("LEANCLOUD_APP_ID unmatch"))
}
if client.appKey != appKey {
t.Fatal(errors.New("LEANCLOUD_APP_KEY unmatch"))
}
if client.masterKey != masterKey {
t.Fatal(errors.New("LEANCLOUD_APP_MASTER_KEY unmatch"))
}
if client.serverURL != serverURL {
t.Fatal(errors.New("LEANCLOUD_API_SERVER unmatch"))
}
})
t.Run("Debug", func(t *testing.T) {
if err := os.Setenv("LEANCLOUD_DEBUG", "true"); err != nil {
t.Fatal("unable to set debugging flag")
}
client := NewEnvClient()
if client == nil {
t.Fatal(errors.New("unable to create a client"))
}
if client.appID != appID {
t.Fatal(errors.New("LEANCLOUD_APP_ID unmatch"))
}
if client.appKey != appKey {
t.Fatal(errors.New("LEANCLOUD_APP_KEY unmatch"))
}
if client.masterKey != masterKey {
t.Fatal(errors.New("LEANCLOUD_APP_MASTER_KEY unmatch"))
}
if client.requestLogger == nil {
t.Fatal(errors.New("unable to set logger"))
}
})
}
func TestClientClass(t *testing.T) {
client := &Client{}
class := client.Class("class")
if class.c != client {
t.Fatal(errors.New("client unmatch"))
}
if class.Name != "class" {
t.Fatal(errors.New("name of class unmatch"))
}
}
func TestClientObject(t *testing.T) {
client := &Client{}
ref := client.Class("class").ID("f47ac10b58cc4372a5670e02b2c3d479")
if ref.c != client {
t.Fatal(errors.New("client unmatch"))
}
if ref.class != "class" {
t.Fatal(errors.New("name of class unmatch"))
}
if ref.ID != "f47ac10b58cc4372a5670e02b2c3d479" {
t.Fatal(errors.New("ID unmatch"))
}
}
| [
"\"LEANCLOUD_APP_ID\"",
"\"LEANCLOUD_APP_KEY\"",
"\"LEANCLOUD_APP_MASTER_KEY\"",
"\"LEANCLOUD_API_SERVER\"",
"\"LEANCLOUD_APP_ID\"",
"\"LEANCLOUD_APP_KEY\"",
"\"LEANCLOUD_APP_MASTER_KEY\"",
"\"LEANCLOUD_API_SERVER\""
]
| []
| [
"LEANCLOUD_APP_ID",
"LEANCLOUD_API_SERVER",
"LEANCLOUD_APP_MASTER_KEY",
"LEANCLOUD_APP_KEY"
]
| [] | ["LEANCLOUD_APP_ID", "LEANCLOUD_API_SERVER", "LEANCLOUD_APP_MASTER_KEY", "LEANCLOUD_APP_KEY"] | go | 4 | 0 | |
api/user.go | /*
Copyright (c) 2016 VMware, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package api
import (
"fmt"
"net/http"
"os"
"regexp"
"strconv"
"strings"
"github.com/vmware/harbor/dao"
"github.com/vmware/harbor/models"
"github.com/vmware/harbor/utils/log"
)
// UserAPI handles request to /api/users/{}
type UserAPI struct {
BaseAPI
currentUserID int
userID int
SelfRegistration bool
IsAdmin bool
AuthMode string
}
type passwordReq struct {
OldPassword string `json:"old_password"`
NewPassword string `json:"new_password"`
}
// Prepare validates the URL and parms
func (ua *UserAPI) Prepare() {
authMode := strings.ToLower(os.Getenv("AUTH_MODE"))
if authMode == "" {
authMode = "db_auth"
}
ua.AuthMode = authMode
selfRegistration := strings.ToLower(os.Getenv("SELF_REGISTRATION"))
if selfRegistration == "on" {
ua.SelfRegistration = true
}
if ua.Ctx.Input.IsPost() {
sessionUserID := ua.GetSession("userId")
_, _, ok := ua.Ctx.Request.BasicAuth()
if sessionUserID == nil && !ok {
return
}
}
ua.currentUserID = ua.ValidateUser()
id := ua.Ctx.Input.Param(":id")
if id == "current" {
ua.userID = ua.currentUserID
} else if len(id) > 0 {
var err error
ua.userID, err = strconv.Atoi(id)
if err != nil {
log.Errorf("Invalid user id, error: %v", err)
ua.CustomAbort(http.StatusBadRequest, "Invalid user Id")
}
userQuery := models.User{UserID: ua.userID}
u, err := dao.GetUser(userQuery)
if err != nil {
log.Errorf("Error occurred in GetUser, error: %v", err)
ua.CustomAbort(http.StatusInternalServerError, "Internal error.")
}
if u == nil {
log.Errorf("User with Id: %d does not exist", ua.userID)
ua.CustomAbort(http.StatusNotFound, "")
}
}
var err error
ua.IsAdmin, err = dao.IsAdminRole(ua.currentUserID)
if err != nil {
log.Errorf("Error occurred in IsAdminRole:%v", err)
ua.CustomAbort(http.StatusInternalServerError, "Internal error.")
}
}
// Get ...
func (ua *UserAPI) Get() {
if ua.userID == 0 { //list users
if !ua.IsAdmin {
log.Errorf("Current user, id: %d does not have admin role, can not list users", ua.currentUserID)
ua.RenderError(http.StatusForbidden, "User does not have admin role")
return
}
username := ua.GetString("username")
userQuery := models.User{}
if len(username) > 0 {
userQuery.Username = "%" + username + "%"
}
userList, err := dao.ListUsers(userQuery)
if err != nil {
log.Errorf("Failed to get data from database, error: %v", err)
ua.RenderError(http.StatusInternalServerError, "Failed to query from database")
return
}
ua.Data["json"] = userList
} else if ua.userID == ua.currentUserID || ua.IsAdmin {
userQuery := models.User{UserID: ua.userID}
u, err := dao.GetUser(userQuery)
if err != nil {
log.Errorf("Error occurred in GetUser, error: %v", err)
ua.CustomAbort(http.StatusInternalServerError, "Internal error.")
}
ua.Data["json"] = u
} else {
log.Errorf("Current user, id: %d does not have admin role, can not view other user's detail", ua.currentUserID)
ua.RenderError(http.StatusForbidden, "User does not have admin role")
return
}
ua.ServeJSON()
}
// Put ...
func (ua *UserAPI) Put() {
ldapAdminUser := (ua.AuthMode == "ldap_auth" && ua.userID == 1 && ua.userID == ua.currentUserID)
if !(ua.AuthMode == "db_auth" || ldapAdminUser) {
ua.CustomAbort(http.StatusForbidden, "")
}
if !ua.IsAdmin {
if ua.userID != ua.currentUserID {
log.Warning("Guests can only change their own account.")
ua.CustomAbort(http.StatusForbidden, "Guests can only change their own account.")
}
}
user := models.User{UserID: ua.userID}
ua.DecodeJSONReq(&user)
err := commonValidate(user)
if err != nil {
log.Warning("Bad request in change user profile: %v", err)
ua.RenderError(http.StatusBadRequest, "change user profile error:"+err.Error())
return
}
userQuery := models.User{UserID: ua.userID}
u, err := dao.GetUser(userQuery)
if err != nil {
log.Errorf("Error occurred in GetUser, error: %v", err)
ua.CustomAbort(http.StatusInternalServerError, "Internal error.")
}
if u == nil {
log.Errorf("User with Id: %d does not exist", ua.userID)
ua.CustomAbort(http.StatusNotFound, "")
}
if u.Email != user.Email {
emailExist, err := dao.UserExists(user, "email")
if err != nil {
log.Errorf("Error occurred in change user profile: %v", err)
ua.CustomAbort(http.StatusInternalServerError, "Internal error.")
}
if emailExist {
log.Warning("email has already been used!")
ua.RenderError(http.StatusConflict, "email has already been used!")
return
}
}
if err := dao.ChangeUserProfile(user); err != nil {
log.Errorf("Failed to update user profile, error: %v", err)
ua.CustomAbort(http.StatusInternalServerError, err.Error())
}
}
// Post ...
func (ua *UserAPI) Post() {
if !(ua.AuthMode == "db_auth") {
ua.CustomAbort(http.StatusForbidden, "")
}
if !(ua.SelfRegistration || ua.IsAdmin) {
log.Warning("Registration can only be used by admin role user when self-registration is off.")
ua.CustomAbort(http.StatusForbidden, "")
}
user := models.User{}
ua.DecodeJSONReq(&user)
err := validate(user)
if err != nil {
log.Warning("Bad request in Register: %v", err)
ua.RenderError(http.StatusBadRequest, "register error:"+err.Error())
return
}
userExist, err := dao.UserExists(user, "username")
if err != nil {
log.Errorf("Error occurred in Register: %v", err)
ua.CustomAbort(http.StatusInternalServerError, "Internal error.")
}
if userExist {
log.Warning("username has already been used!")
ua.RenderError(http.StatusConflict, "username has already been used!")
return
}
emailExist, err := dao.UserExists(user, "email")
if err != nil {
log.Errorf("Error occurred in change user profile: %v", err)
ua.CustomAbort(http.StatusInternalServerError, "Internal error.")
}
if emailExist {
log.Warning("email has already been used!")
ua.RenderError(http.StatusConflict, "email has already been used!")
return
}
userID, err := dao.Register(user)
if err != nil {
log.Errorf("Error occurred in Register: %v", err)
ua.CustomAbort(http.StatusInternalServerError, "Internal error.")
}
ua.Redirect(http.StatusCreated, strconv.FormatInt(userID, 10))
}
// Delete ...
func (ua *UserAPI) Delete() {
if !ua.IsAdmin {
log.Warningf("current user, id: %d does not have admin role, can not remove user", ua.currentUserID)
ua.RenderError(http.StatusForbidden, "User does not have admin role")
return
}
if ua.currentUserID == ua.userID {
ua.CustomAbort(http.StatusForbidden, "can not delete yourself")
}
var err error
err = dao.DeleteUser(ua.userID)
if err != nil {
log.Errorf("Failed to delete data from database, error: %v", err)
ua.RenderError(http.StatusInternalServerError, "Failed to delete User")
return
}
}
// ChangePassword handles PUT to /api/users/{}/password
func (ua *UserAPI) ChangePassword() {
ldapAdminUser := (ua.AuthMode == "ldap_auth" && ua.userID == 1 && ua.userID == ua.currentUserID)
if !(ua.AuthMode == "db_auth" || ldapAdminUser) {
ua.CustomAbort(http.StatusForbidden, "")
}
if !ua.IsAdmin {
if ua.userID != ua.currentUserID {
log.Error("Guests can only change their own account.")
ua.CustomAbort(http.StatusForbidden, "Guests can only change their own account.")
}
}
var req passwordReq
ua.DecodeJSONReq(&req)
if req.OldPassword == "" {
log.Error("Old password is blank")
ua.CustomAbort(http.StatusBadRequest, "Old password is blank")
}
queryUser := models.User{UserID: ua.userID, Password: req.OldPassword}
user, err := dao.CheckUserPassword(queryUser)
if err != nil {
log.Errorf("Error occurred in CheckUserPassword: %v", err)
ua.CustomAbort(http.StatusInternalServerError, "Internal error.")
}
if user == nil {
log.Warning("Password input is not correct")
ua.CustomAbort(http.StatusForbidden, "old_password_is_not_correct")
}
if req.NewPassword == "" {
ua.CustomAbort(http.StatusBadRequest, "please_input_new_password")
}
updateUser := models.User{UserID: ua.userID, Password: req.NewPassword, Salt: user.Salt}
err = dao.ChangeUserPassword(updateUser, req.OldPassword)
if err != nil {
log.Errorf("Error occurred in ChangeUserPassword: %v", err)
ua.CustomAbort(http.StatusInternalServerError, "Internal error.")
}
}
// ToggleUserAdminRole handles PUT api/users/{}/sysadmin
func (ua *UserAPI) ToggleUserAdminRole() {
if !ua.IsAdmin {
log.Warningf("current user, id: %d does not have admin role, can not update other user's role", ua.currentUserID)
ua.RenderError(http.StatusForbidden, "User does not have admin role")
return
}
userQuery := models.User{UserID: ua.userID}
ua.DecodeJSONReq(&userQuery)
if err := dao.ToggleUserAdminRole(userQuery.UserID, userQuery.HasAdminRole); err != nil {
log.Errorf("Error occurred in ToggleUserAdminRole: %v", err)
ua.CustomAbort(http.StatusInternalServerError, "Internal error.")
}
}
// validate only validate when user register
func validate(user models.User) error {
if isIllegalLength(user.Username, 0, 20) {
return fmt.Errorf("Username with illegal length.")
}
if isContainIllegalChar(user.Username, []string{",", "~", "#", "$", "%"}) {
return fmt.Errorf("Username contains illegal characters.")
}
if isIllegalLength(user.Password, 0, 20) {
return fmt.Errorf("Password with illegal length.")
}
if err := commonValidate(user); err != nil {
return err
}
return nil
}
//commonValidate validates email, realname, comment information when user register or change their profile
func commonValidate(user models.User) error {
if len(user.Email) > 0 {
if m, _ := regexp.MatchString(`^(([^<>()[\]\\.,;:\s@\"]+(\.[^<>()[\]\\.,;:\s@\"]+)*)|(\".+\"))@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\])|(([a-zA-Z\-0-9]+\.)+[a-zA-Z]{2,}))$`, user.Email); !m {
return fmt.Errorf("Email with illegal format.")
}
} else {
return fmt.Errorf("Email can't be empty")
}
if isIllegalLength(user.Realname, 0, 20) {
return fmt.Errorf("Realname with illegal length.")
}
if isContainIllegalChar(user.Realname, []string{",", "~", "#", "$", "%"}) {
return fmt.Errorf("Realname contains illegal characters.")
}
if isIllegalLength(user.Comment, -1, 30) {
return fmt.Errorf("Comment with illegal length.")
}
return nil
}
func isIllegalLength(s string, min int, max int) bool {
if min == -1 {
return (len(s) > max)
}
if max == -1 {
return (len(s) <= min)
}
return (len(s) < min || len(s) > max)
}
func isContainIllegalChar(s string, illegalChar []string) bool {
for _, c := range illegalChar {
if strings.Index(s, c) >= 0 {
return true
}
}
return false
}
| [
"\"AUTH_MODE\"",
"\"SELF_REGISTRATION\""
]
| []
| [
"AUTH_MODE",
"SELF_REGISTRATION"
]
| [] | ["AUTH_MODE", "SELF_REGISTRATION"] | go | 2 | 0 | |
python/arcpy_startup.py | # -*- coding: UTF-8 -*-
import os
import sys
import re
import subprocess
# Import fmeobjects
if os.path.exists(r'C:\Program Files\FME\fmeobjects\python27'):
sys.path.append(r'C:\Program Files\FME\fmeobjects\python27')
try:
import arc_utils
except Exception:
pass
try:
import arcpy
from arcpy.sa import * # noqa: F403,F401
arcpy.env.overwriteOutput = True
try:
arcpy.CheckOutExtension('Spatial')
except Exception:
pass
arcpy.ImportToolbox(
os.path.join(
os.environ.get('USERPROFILE'),
'aml/python/Nordpil.tbx'
)
)
arcpy.env.workspace = r'c:\data\ws'
arcpy.env.scratchworkspace = r'f:\temp\scratch.gdb'
if os.environ.get('COMPUTERNAME').lower() == '5CD7232N5D80F9':
arcpy.env.workspace = r'c:\project\work'
arcpy.env.scratchworkspace = r'c:\project\work\scratch.gdb'
except Exception:
print 'arcpy not installed'
def curCenterCoord():
e = curExtent()
return [int(e.XMin + e.width / 2), int(e.YMin + e.height / 2)]
def lnsspan():
xy = curCenterCoord()
xy = [float(c) / 100 for c in xy]
height = curExtent().YMax - curExtent().YMin
if height < 250:
zoom = 5
elif height < 320:
zoom = 4
elif height < 577:
zoom = 3
elif height < 1800:
zoom = 2
else:
zoom = 1
subprocess.call([
r'C:\Program Files\Mozilla Firefox\firefox.exe',
(
'https://span.skanova.se/span/api/content/span-web/lns.html',
'#template=undefined&zoom={}&'.format(zoom),
'lon={}&lat={}'.format(*xy),
'&layers=BTTTTTTTTTTTTTTTTTTTTFFFFFFFFFFTTTTTT',
)
])
def curCenter():
xy = curCenterCoord()
print 'N = %s -- E = %s' % (xy[1], xy[0])
def gotoXY(s):
if type(s) is list and len(s) == 2:
if not(type(s) is float):
s = [float(c) for c in s]
else:
s = re.sub(r'[^\x00-\x7f]+', ' ', s)
if s.find(' ') > -1:
s = s.replace('Meters', '')
s = s.replace('meters', '')
s = s.split(' ')
s = [c.replace(',', '.') for c in s]
s = [c.replace(' ', '') for c in s]
s = [float(c) for c in s]
else:
s = re.sub(r'[^\d|^,|^.|^ |^\t]', '', s) # trim any non number chars
if re.search('\t', s):
s = s.split('\t')
elif re.subn(',', '', s)[1] == 1:
s = s.split(',')
else:
s = s.split(' ')
s = [c.strip() for c in s] # just plain strip
s = [c.replace(',', '.') for c in s] # normalize to dot decimal separator
s = [c.replace(' ', '') for c in s] # remove spaces
s = [float(c) for c in s]
print s
df = curFrame()
newExtent = df.extent
newExtent.XMin, newExtent.YMin = s[0] - df.extent.width / 2, s[1] - df.extent.height / 2
newExtent.XMax, newExtent.YMax = s[0] + df.extent.width / 2, s[1] + df.extent.height / 2
df.extent = newExtent
arcpy.RefreshActiveView()
def updateNames():
for l in arcpy.mapping.ListLayers(curDoc()):
if hasattr(l, 'dataSource') and l.dataSource != '' and l.workspacePath[-4:] == '.sde':
stage = [f for f in l.dataSource.split('\\') if f.lower().find('stage') > -1][0].split('@')[0].lower()
l.name = re.sub('(?i)..stage', '', l.name).strip()
if l.name.lower().find(l.datasetName.split('.')[0].lower()) == -1:
l.name = '{} {}'.format(l.datasetName.split('.')[0].upper(), l.name)
if l.name.lower().find(stage) == -1:
l.name = '{} {}'.format(stage, l.name)
arcpy.RefreshTOC()
def switchSDE(toStage):
toStage = toStage.lower()
if toStage == 'ow':
toStage = 'owstage'
elif toStage == 'dl':
toStage = 'dlstage'
for l in arcpy.mapping.ListLayers(curDoc()):
if hasattr(l, 'dataSource') and l.dataSource != '' and l.workspacePath[-4:] == '.sde':
try:
if toStage is None:
if l.workspacePath.find('owstage') != -1:
newWs = l.workspacePath.lower().replace('owstage', 'dlstage')
else:
newWs = l.workspacePath.lower().replace('dlstage', 'owstage')
else:
newWs = l.workspacePath.lower().replace('owstage', toStage)
newWs = newWs.lower().replace('dlstage', toStage)
print('Updating %s to %s' % (l.name, newWs))
l.replaceDataSource(
newWs,
'SDE_WORKSPACE',
l.datasetName
)
if re.search('\w\wstage', l.name, re.IGNORECASE):
l.name = re.sub('\w\wstage', toStage, l.name, flags=re.IGNORECASE)
else:
l.name = toStage + ' ' + l.name
except Exception:
print('FAILED UPDATING %s to %s' % (l.name, newWs))
print Exception
print sys.exc_info()
updateNames()
def fixSde():
for l in arcpy.mapping.ListLayers(curDoc()):
if hasattr(l, 'dataSource') and l.dataSource != '' and l.workspacePath[-4:] == '.sde':
oldStage = l.dataSource.split(u'\\')[1][11:12]
newStage = int(oldStage) + 1
if newStage == 7:
newStage = 1
newWs = l.workspacePath.replace('sde%s' % str(oldStage), 'sde%s' % str(newStage))
print('Updating %s to %s' % (l.name, newWs))
l.replaceDataSource(
newWs,
'SDE_WORKSPACE',
l.datasetName
)
arcpy.RefreshTOC()
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def wcd(newDir):
# Uses wcd to change directory
subprocess.call([
'c:/winbin/wcd/wcdwin64.exe', '-i', '-w', newDir
])
thisDir = ''
with open(os.path.join(os.environ['WCDHOME'], 'wcdgo.bat'), 'r') as wcdFile:
try:
thisDir = [x for x in wcdFile.readlines() if x[0:3] == 'cd '][0].split('"')[1]
os.chdir(thisDir)
except Exception:
return False
def gPing(msg, isArc=0):
try:
if (isArc == 1):
subprocess.call([
'cmd.exe', '/c', 'start', 'growlnotify.exe', '/p:2',
'/t:"ArcPy processing on ' + os.environ.get('COMPUTERNAME') + '"',
r'/ai:"%s\bin\icon_arcgis.png"' % os.environ['HOME'], msg
])
else:
if 'fmeobjects' in sys.modules or 'fme' in sys.modules:
subprocess.call([
'cmd.exe', '/c', 'start', 'growlnotify.exe', '/p:2',
'/t:"FME processing on ' + os.environ.get('COMPUTERNAME') + '"',
r'/ai:"%s\bin\icon_fme.png"' % os.environ['HOME'], msg
])
else:
subprocess.call([
'cmd.exe', '/c', 'start', 'growlnotify.exe', '/p:2',
'/t:"Python processing on ' + os.environ.get('COMPUTERNAME') + '"',
r'/ai:"%s\bin\icon_python.png"' % os.environ['HOME'], msg
])
except RuntimeError:
subprocess.call([
'msg.exe', os.environ['USERNAME'], msg
])
def curExtent():
return arcpy.mapping.ListDataFrames(arcpy.mapping.MapDocument("CURRENT"))[0].extent
def curExtentWkt():
e = curExtent()
return "sde.st_polygon ('polygon (({}))', {})".format(
'{} {}, {} {}, {} {}, {} {}, {} {}'.format(
e.XMax, e.YMin,
e.XMin, e.YMin,
e.XMin, e.YMax,
e.XMax, e.YMax,
e.XMax, e.YMin,
),
curExtent().spatialReference.PCSCode
)
def curExtentWktSql():
return "sde.st_within(SHAPE, {}) = 1".format(curExtentWkt())
def curDoc():
return arcpy.mapping.MapDocument("CURRENT")
def isArcMap():
try:
arcpy.mapping.MapDocument("CURRENT")
return True
except Exception:
return False
def curFrame():
return arcpy.mapping.ListDataFrames(arcpy.mapping.MapDocument("CURRENT"))[0]
def curProjection():
return arcpy.mapping.ListDataFrames(arcpy.mapping.MapDocument("CURRENT"))[0].spatialReference.exportToString()
def ExtentToFeatureclass(fcName):
pointArray = arcpy.Array()
node = arcpy.Point()
node.X = curExtent().XMin
node.Y = curExtent().YMax
pointArray.add(node)
node.X = curExtent().XMax
node.Y = curExtent().YMax
pointArray.add(node)
node.X = curExtent().XMax
node.Y = curExtent().YMin
pointArray.add(node)
node.X = curExtent().XMin
node.Y = curExtent().YMin
pointArray.add(node)
node.X = curExtent().XMin
node.Y = curExtent().YMax
pointArray.add(node)
box = arcpy.Polyline(pointArray, curProjection())
arcpy.CopyFeatures_management(box, fcName)
return 'Prepared extent to fc: ' + fcName
try:
import pyreadline.rlmain
pyreadline.rlmain.config_path = r"~\pyreadlineconfig.ini"
import readline
import atexit
import pyreadline.unicode_helper
# Normally the codepage for pyreadline is set to be sys.stdout.encoding
# if you need to change this uncomment the following line
# pyreadline.unicode_helper.pyreadline_codepage="utf8"
except ImportError:
print "Module readline not available."
else:
# import tab completion functionality
import rlcompleter
# activate tab completion
readline.parse_and_bind("tab: complete")
readline.read_history_file()
atexit.register(readline.write_history_file)
del readline, rlcompleter, atexit
# VIM: let g:flake8_ignore=g:flake8_ignore . ",F401" | []
| []
| [
"USERNAME",
"WCDHOME",
"USERPROFILE",
"COMPUTERNAME",
"HOME"
]
| [] | ["USERNAME", "WCDHOME", "USERPROFILE", "COMPUTERNAME", "HOME"] | python | 5 | 0 | |
provision/kubernetes/provisioner.go | // Copyright 2016 tsuru authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package kubernetes
import (
"context"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"net/url"
"os"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/pkg/errors"
"github.com/tsuru/config"
"github.com/tsuru/tsuru/action"
"github.com/tsuru/tsuru/api/shutdown"
"github.com/tsuru/tsuru/app"
"github.com/tsuru/tsuru/app/bind"
"github.com/tsuru/tsuru/app/image"
tsuruErrors "github.com/tsuru/tsuru/errors"
"github.com/tsuru/tsuru/log"
tsuruNet "github.com/tsuru/tsuru/net"
"github.com/tsuru/tsuru/provision"
"github.com/tsuru/tsuru/provision/cluster"
"github.com/tsuru/tsuru/provision/dockercommon"
_ "github.com/tsuru/tsuru/provision/kubernetes/authplugin/gcpwithproxy" // import custom authplugin that have proxy support
tsuruv1 "github.com/tsuru/tsuru/provision/kubernetes/pkg/apis/tsuru/v1"
"github.com/tsuru/tsuru/provision/node"
"github.com/tsuru/tsuru/provision/pool"
"github.com/tsuru/tsuru/provision/servicecommon"
"github.com/tsuru/tsuru/servicemanager"
"github.com/tsuru/tsuru/set"
appTypes "github.com/tsuru/tsuru/types/app"
provTypes "github.com/tsuru/tsuru/types/provision"
volumeTypes "github.com/tsuru/tsuru/types/volume"
appsv1 "k8s.io/api/apps/v1"
apiv1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1beta1"
v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
k8sErrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/types"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" // gcp default auth plugin
"k8s.io/client-go/tools/remotecommand"
)
const (
provisionerName = "kubernetes"
defaultKubeAPITimeout = time.Minute
defaultPodReadyTimeout = time.Minute
defaultPodRunningTimeout = 10 * time.Minute
defaultDeploymentProgressTimeout = 10 * time.Minute
defaultAttachTimeoutAfterContainerFinished = time.Minute
defaultSidecarImageName = "tsuru/deploy-agent:0.8.4"
defaultPreStopSleepSeconds = 10
)
var defaultEphemeralStorageLimit = resource.MustParse("100Mi")
type kubernetesProvisioner struct {
mu sync.Mutex
clusterControllers map[string]*clusterController
}
var (
_ provision.Provisioner = &kubernetesProvisioner{}
_ provision.NodeProvisioner = &kubernetesProvisioner{}
_ provision.NodeContainerProvisioner = &kubernetesProvisioner{}
_ provision.MessageProvisioner = &kubernetesProvisioner{}
_ provision.SleepableProvisioner = &kubernetesProvisioner{}
_ provision.VolumeProvisioner = &kubernetesProvisioner{}
_ provision.BuilderDeploy = &kubernetesProvisioner{}
_ provision.BuilderDeployKubeClient = &kubernetesProvisioner{}
_ provision.InitializableProvisioner = &kubernetesProvisioner{}
_ provision.InterAppProvisioner = &kubernetesProvisioner{}
_ provision.HCProvisioner = &kubernetesProvisioner{}
_ provision.VersionsProvisioner = &kubernetesProvisioner{}
_ provision.LogsProvisioner = &kubernetesProvisioner{}
_ provision.MetricsProvisioner = &kubernetesProvisioner{}
_ provision.AutoScaleProvisioner = &kubernetesProvisioner{}
_ cluster.ClusteredProvisioner = &kubernetesProvisioner{}
_ provision.UpdatableProvisioner = &kubernetesProvisioner{}
mainKubernetesProvisioner *kubernetesProvisioner
)
func init() {
mainKubernetesProvisioner = &kubernetesProvisioner{
clusterControllers: map[string]*clusterController{},
}
provision.Register(provisionerName, func() (provision.Provisioner, error) {
return mainKubernetesProvisioner, nil
})
shutdown.Register(mainKubernetesProvisioner)
}
func GetProvisioner() *kubernetesProvisioner {
return mainKubernetesProvisioner
}
type kubernetesConfig struct {
LogLevel int
DeploySidecarImage string
DeployInspectImage string
APITimeout time.Duration
// PodReadyTimeout is the timeout for a pod to become ready after already
// running.
PodReadyTimeout time.Duration
// PodRunningTimeout is the timeout for a pod to become running, should
// include time necessary to pull remote image.
PodRunningTimeout time.Duration
// DeploymentProgressTimeout is the timeout for a deployment to
// successfully complete.
DeploymentProgressTimeout time.Duration
// AttachTimeoutAfterContainerFinished is the time tsuru will wait for an
// attach call to finish after the attached container has finished.
AttachTimeoutAfterContainerFinished time.Duration
// HeadlessServicePort is the port used in headless service, by default the
// same port number used for container is used.
HeadlessServicePort int
// RegisterNode if set will make tsuru add a node object to the kubernetes
// API. Otherwise tsuru will expect the node to be already registered.
RegisterNode bool
}
func getKubeConfig() kubernetesConfig {
conf := kubernetesConfig{}
conf.LogLevel, _ = config.GetInt("kubernetes:log-level")
conf.DeploySidecarImage, _ = config.GetString("kubernetes:deploy-sidecar-image")
if conf.DeploySidecarImage == "" {
conf.DeploySidecarImage = defaultSidecarImageName
}
conf.DeployInspectImage, _ = config.GetString("kubernetes:deploy-inspect-image")
if conf.DeployInspectImage == "" {
conf.DeployInspectImage = defaultSidecarImageName
}
apiTimeout, _ := config.GetFloat("kubernetes:api-timeout")
if apiTimeout != 0 {
conf.APITimeout = time.Duration(apiTimeout * float64(time.Second))
} else {
conf.APITimeout = defaultKubeAPITimeout
}
podReadyTimeout, _ := config.GetFloat("kubernetes:pod-ready-timeout")
if podReadyTimeout != 0 {
conf.PodReadyTimeout = time.Duration(podReadyTimeout * float64(time.Second))
} else {
conf.PodReadyTimeout = defaultPodReadyTimeout
}
podRunningTimeout, _ := config.GetFloat("kubernetes:pod-running-timeout")
if podRunningTimeout != 0 {
conf.PodRunningTimeout = time.Duration(podRunningTimeout * float64(time.Second))
} else {
conf.PodRunningTimeout = defaultPodRunningTimeout
}
deploymentTimeout, _ := config.GetFloat("kubernetes:deployment-progress-timeout")
if deploymentTimeout != 0 {
conf.DeploymentProgressTimeout = time.Duration(deploymentTimeout * float64(time.Second))
} else {
conf.DeploymentProgressTimeout = defaultDeploymentProgressTimeout
}
attachTimeout, _ := config.GetFloat("kubernetes:attach-after-finish-timeout")
if attachTimeout != 0 {
conf.AttachTimeoutAfterContainerFinished = time.Duration(attachTimeout * float64(time.Second))
} else {
conf.AttachTimeoutAfterContainerFinished = defaultAttachTimeoutAfterContainerFinished
}
conf.HeadlessServicePort, _ = config.GetInt("kubernetes:headless-service-port")
if conf.HeadlessServicePort == 0 {
conf.HeadlessServicePort, _ = strconv.Atoi(provision.WebProcessDefaultPort())
}
conf.RegisterNode, _ = config.GetBool("kubernetes:register-node")
return conf
}
func (p *kubernetesProvisioner) Initialize() error {
conf := getKubeConfig()
if conf.LogLevel > 0 {
// These flags are used by golang/glog package which in turn is used by
// kubernetes to control logging. Unfortunately it doesn't seem like
// there's a better way to control glog.
flag.CommandLine.Parse([]string{"-v", strconv.Itoa(conf.LogLevel), "-logtostderr"})
}
initLocalCluster()
err := initAllControllers(p)
if err == provTypes.ErrNoCluster {
return nil
}
return err
}
func initLocalCluster() {
ctx := context.Background()
if os.Getenv("KUBERNETES_SERVICE_HOST") == "" || os.Getenv("KUBERNETES_SERVICE_PORT") == "" {
return // not running inside a kubernetes cluster
}
log.Debugf("[kubernetes-provisioner] tsuru is running inside a kubernetes cluster")
clusters, err := servicemanager.Cluster.List(ctx)
if err != nil && err != provTypes.ErrNoCluster {
log.Errorf("[kubernetes-provisioner] could not list clusters: %s", err.Error())
return
}
if len(clusters) > 0 {
return
}
log.Debugf("[kubernetes-provisioner] no kubernetes clusters found, adding default")
err = servicemanager.Cluster.Create(ctx, provTypes.Cluster{
Name: "local",
Default: true,
Local: true,
Provisioner: provisionerName,
CustomData: map[string]string{
enableLogsFromAPIServerKey: "true",
disableDefaultNodeSelectorKey: "true",
disableUnitRegisterCmdKey: "true",
disableNodeContainers: "true",
},
})
if err != nil {
log.Errorf("[kubernetes-provisioner] could not create default cluster: %v", err)
}
pools, err := servicemanager.Pool.List(ctx)
if err != nil {
log.Errorf("[kubernetes-provisioner] could not list pools: %v", err)
}
if len(pools) > 0 {
return
}
log.Debugf("[kubernetes-provisioner] no pool found, adding default")
err = pool.AddPool(ctx, pool.AddPoolOptions{
Name: "local",
Provisioner: provisionerName,
Default: true,
})
if err != nil {
log.Errorf("[kubernetes-provisioner] could not create default pool: %v", err)
}
}
func (p *kubernetesProvisioner) InitializeCluster(c *provTypes.Cluster) error {
clusterClient, err := NewClusterClient(c)
if err != nil {
return err
}
stopClusterController(p, clusterClient)
_, err = getClusterController(p, clusterClient)
return err
}
func (p *kubernetesProvisioner) ValidateCluster(c *provTypes.Cluster) error {
multiErrors := tsuruErrors.NewMultiError()
if _, ok := c.CustomData[singlePoolKey]; ok && len(c.Pools) != 1 {
multiErrors.Add(errors.Errorf("only one pool is allowed to use entire cluster as single-pool. %d pools found", len(c.Pools)))
}
if c.KubeConfig != nil {
if len(c.Addresses) > 1 {
multiErrors.Add(errors.New("when kubeConfig is set the use of addresses is not used"))
}
if c.CaCert != nil {
multiErrors.Add(errors.New("when kubeConfig is set the use of cacert is not used"))
}
if c.ClientCert != nil {
multiErrors.Add(errors.New("when kubeConfig is set the use of clientcert is not used"))
}
if c.ClientKey != nil {
multiErrors.Add(errors.New("when kubeConfig is set the use of clientkey is not used"))
}
if c.KubeConfig.Cluster.Server == "" {
multiErrors.Add(errors.New("kubeConfig.cluster.server field is required"))
}
}
return multiErrors.ToError()
}
func (p *kubernetesProvisioner) ClusterHelp() provTypes.ClusterHelpInfo {
return provTypes.ClusterHelpInfo{
CustomDataHelp: clusterHelp,
ProvisionerHelp: "Represents a kubernetes cluster, the address parameter must point to a valid kubernetes apiserver endpoint.",
}
}
func (p *kubernetesProvisioner) DeleteCluster(ctx context.Context, c *provTypes.Cluster) error {
stopClusterControllerByName(p, c.Name)
return nil
}
func (p *kubernetesProvisioner) GetName() string {
return provisionerName
}
func (p *kubernetesProvisioner) Provision(ctx context.Context, a provision.App) error {
client, err := clusterForPool(ctx, a.GetPool())
if err != nil {
return err
}
return ensureAppCustomResourceSynced(ctx, client, a)
}
func (p *kubernetesProvisioner) Destroy(ctx context.Context, a provision.App) error {
client, err := clusterForPool(ctx, a.GetPool())
if err != nil {
return err
}
tclient, err := TsuruClientForConfig(client.restConfig)
if err != nil {
return err
}
app, err := tclient.TsuruV1().Apps(client.Namespace()).Get(ctx, a.GetName(), metav1.GetOptions{})
if err != nil {
return err
}
if err := p.removeResources(ctx, client, app, a); err != nil {
return err
}
return tclient.TsuruV1().Apps(client.Namespace()).Delete(ctx, a.GetName(), metav1.DeleteOptions{})
}
func (p *kubernetesProvisioner) removeResources(ctx context.Context, client *ClusterClient, tsuruApp *tsuruv1.App, app provision.App) error {
deps, err := allDeploymentsForAppNS(ctx, client, tsuruApp.Spec.NamespaceName, app)
if err != nil {
return err
}
svcs, err := allServicesForAppNS(ctx, client, tsuruApp.Spec.NamespaceName, app)
if err != nil {
return err
}
multiErrors := tsuruErrors.NewMultiError()
for _, dd := range deps {
err = cleanupSingleDeployment(ctx, client, &dd)
if err != nil {
multiErrors.Add(err)
}
}
for _, ss := range svcs {
err = client.CoreV1().Services(tsuruApp.Spec.NamespaceName).Delete(ctx, ss.Name, metav1.DeleteOptions{
PropagationPolicy: propagationPtr(metav1.DeletePropagationForeground),
})
if err != nil && !k8sErrors.IsNotFound(err) {
multiErrors.Add(errors.WithStack(err))
}
}
vols, err := servicemanager.Volume.ListByApp(ctx, app.GetName())
if err != nil {
multiErrors.Add(errors.WithStack(err))
} else {
for _, vol := range vols {
vol.Binds, err = servicemanager.Volume.Binds(ctx, &vol)
if err != nil {
continue
}
bindedToOtherApps := false
for _, b := range vol.Binds {
if b.ID.App != app.GetName() {
bindedToOtherApps = true
break
}
}
if !bindedToOtherApps {
err = deleteVolume(ctx, client, vol.Name)
if err != nil {
multiErrors.Add(errors.WithStack(err))
}
}
}
}
err = client.CoreV1().ServiceAccounts(tsuruApp.Spec.NamespaceName).Delete(ctx, tsuruApp.Spec.ServiceAccountName, metav1.DeleteOptions{})
if err != nil && !k8sErrors.IsNotFound(err) {
multiErrors.Add(errors.WithStack(err))
}
return multiErrors.ToError()
}
func versionsForAppProcess(ctx context.Context, client *ClusterClient, a provision.App, process string) ([]appTypes.AppVersion, error) {
grouped, err := deploymentsDataForApp(ctx, client, a)
if err != nil {
return nil, err
}
versionSet := map[int]struct{}{}
for v, deps := range grouped.versioned {
for _, depData := range deps {
if process == "" || process == depData.process {
versionSet[v] = struct{}{}
}
}
}
var versions []appTypes.AppVersion
for v := range versionSet {
version, err := servicemanager.AppVersion.VersionByImageOrVersion(ctx, a, strconv.Itoa(v))
if err != nil {
return nil, err
}
versions = append(versions, version)
}
return versions, nil
}
func changeState(ctx context.Context, a provision.App, process string, version appTypes.AppVersion, state servicecommon.ProcessState, w io.Writer) error {
client, err := clusterForPool(ctx, a.GetPool())
if err != nil {
return err
}
err = ensureAppCustomResourceSynced(ctx, client, a)
if err != nil {
return err
}
var versions []appTypes.AppVersion
if version == nil {
versions, err = versionsForAppProcess(ctx, client, a, process)
if err != nil {
return err
}
} else {
versions = append(versions, version)
}
var multiErr tsuruErrors.MultiError
for _, v := range versions {
err = servicecommon.ChangeAppState(ctx, &serviceManager{
client: client,
writer: w,
}, a, process, state, v)
if err != nil {
multiErr.Add(errors.Wrapf(err, "unable to update version v%d", v.Version()))
}
}
return multiErr.ToError()
}
func changeUnits(ctx context.Context, a provision.App, units int, processName string, version appTypes.AppVersion, w io.Writer) error {
if units == 0 {
return errors.New("cannot change 0 units")
}
client, err := clusterForPool(ctx, a.GetPool())
if err != nil {
return err
}
err = ensureAppCustomResourceSynced(ctx, client, a)
if err != nil {
return err
}
if processName == "" {
var cmdData dockercommon.ContainerCmdsData
cmdData, err = dockercommon.ContainerCmdsDataFromVersion(version)
if err != nil {
return err
}
_, processName, err = dockercommon.ProcessCmdForVersion(processName, cmdData)
if err != nil {
return errors.WithStack(err)
}
}
dep, err := deploymentForVersion(ctx, client, a, processName, version.Version())
if k8sErrors.IsNotFound(err) {
return servicecommon.ChangeUnits(ctx, &serviceManager{
client: client,
writer: w,
}, a, units, processName, version)
}
if err != nil {
return err
}
zero := int32(0)
if dep.Spec.Replicas == nil {
dep.Spec.Replicas = &zero
}
newReplicas := int(*dep.Spec.Replicas) + units
if w == nil {
w = ioutil.Discard
}
fmt.Fprintf(w, "---- Patching from %d to %d units ----\n", *dep.Spec.Replicas, newReplicas)
patchType, patch, err := replicasPatch(newReplicas)
if err != nil {
return err
}
ns, err := client.AppNamespace(ctx, a)
if err != nil {
return err
}
newDep, err := client.AppsV1().Deployments(ns).Patch(ctx, dep.Name, patchType, patch, metav1.PatchOptions{})
if err != nil {
return errors.WithStack(err)
}
events, err := client.CoreV1().Events(ns).List(ctx, listOptsForResourceEvent("Pod", ""))
if err != nil {
return errors.WithStack(err)
}
_, err = monitorDeployment(ctx, client, newDep, a, processName, w, events.ResourceVersion, version)
if err != nil {
if _, ok := err.(provision.ErrUnitStartup); ok {
return err
}
return provision.ErrUnitStartup{Err: err}
}
return nil
}
func replicasPatch(replicas int) (types.PatchType, []byte, error) {
patch, err := json.Marshal([]interface{}{
map[string]interface{}{
"op": "replace",
"path": "/spec/replicas",
"value": replicas,
},
})
if err != nil {
return "", nil, errors.WithStack(err)
}
return types.JSONPatchType, patch, nil
}
func (p *kubernetesProvisioner) AddUnits(ctx context.Context, a provision.App, units uint, processName string, version appTypes.AppVersion, w io.Writer) error {
return changeUnits(ctx, a, int(units), processName, version, w)
}
func (p *kubernetesProvisioner) RemoveUnits(ctx context.Context, a provision.App, units uint, processName string, version appTypes.AppVersion, w io.Writer) error {
return changeUnits(ctx, a, -int(units), processName, version, w)
}
func (p *kubernetesProvisioner) Restart(ctx context.Context, a provision.App, process string, version appTypes.AppVersion, w io.Writer) error {
return changeState(ctx, a, process, version, servicecommon.ProcessState{Start: true, Restart: true}, w)
}
func (p *kubernetesProvisioner) Start(ctx context.Context, a provision.App, process string, version appTypes.AppVersion) error {
return changeState(ctx, a, process, version, servicecommon.ProcessState{Start: true}, nil)
}
func (p *kubernetesProvisioner) Stop(ctx context.Context, a provision.App, process string, version appTypes.AppVersion) error {
return changeState(ctx, a, process, version, servicecommon.ProcessState{Stop: true}, nil)
}
func (p *kubernetesProvisioner) Sleep(ctx context.Context, a provision.App, process string, version appTypes.AppVersion) error {
return changeState(ctx, a, process, version, servicecommon.ProcessState{Stop: true, Sleep: true}, nil)
}
var stateMap = map[apiv1.PodPhase]provision.Status{
apiv1.PodPending: provision.StatusCreated,
apiv1.PodRunning: provision.StatusStarted,
apiv1.PodSucceeded: provision.StatusStopped,
apiv1.PodFailed: provision.StatusError,
apiv1.PodUnknown: provision.StatusError,
}
func (p *kubernetesProvisioner) podsToUnits(ctx context.Context, client *ClusterClient, pods []apiv1.Pod, baseApp provision.App) ([]provision.Unit, error) {
var apps []provision.App
if baseApp != nil {
apps = append(apps, baseApp)
}
return p.podsToUnitsMultiple(ctx, client, pods, apps)
}
func (p *kubernetesProvisioner) podsToUnitsMultiple(ctx context.Context, client *ClusterClient, pods []apiv1.Pod, baseApps []provision.App) ([]provision.Unit, error) {
var err error
if len(pods) == 0 {
return nil, nil
}
appMap := map[string]provision.App{}
portsMap := map[string][]int32{}
for _, baseApp := range baseApps {
appMap[baseApp.GetName()] = baseApp
}
controller, err := getClusterController(p, client)
if err != nil {
return nil, err
}
svcInformer, err := controller.getServiceInformer()
if err != nil {
return nil, err
}
var units []provision.Unit
for _, pod := range pods {
if isTerminating(pod) || isEvicted(pod) || isFailedByNodeAffinity(pod) {
continue
}
l := labelSetFromMeta(&pod.ObjectMeta)
podApp, ok := appMap[l.AppName()]
if !ok {
podApp, err = app.GetByName(ctx, l.AppName())
if err != nil {
return nil, errors.WithStack(err)
}
appMap[podApp.GetName()] = podApp
}
u := &url.URL{
Scheme: "http",
Host: pod.Status.HostIP,
}
urls := []url.URL{}
appProcess := l.AppProcess()
appVersion := l.AppVersion()
isRoutable := l.IsRoutable()
if appVersion == 0 {
isRoutable = true
if len(pod.Spec.Containers) > 0 {
_, tag := image.SplitImageName(pod.Spec.Containers[0].Image)
appVersion, _ = strconv.Atoi(strings.TrimPrefix(tag, "v"))
}
}
if appProcess != "" {
var srvName string
if isRoutable {
srvName = serviceNameForAppBase(podApp, appProcess)
} else {
srvName = serviceNameForApp(podApp, appProcess, appVersion)
}
ports, ok := portsMap[srvName]
if !ok {
ports, err = getServicePorts(svcInformer, srvName, pod.ObjectMeta.Namespace)
if err != nil {
return nil, err
}
portsMap[srvName] = ports
}
if len(ports) > 0 {
u.Host = fmt.Sprintf("%s:%d", u.Host, ports[0])
for _, p := range ports {
urls = append(urls, url.URL{Scheme: "http", Host: fmt.Sprintf("%s:%d", pod.Status.HostIP, p)})
}
}
}
var status provision.Status
if pod.Status.Phase == apiv1.PodRunning {
status = extractStatusFromContainerStatuses(pod.Status.ContainerStatuses)
} else {
status = stateMap[pod.Status.Phase]
}
createdAt := pod.CreationTimestamp.Time.In(time.UTC)
units = append(units, provision.Unit{
ID: pod.Name,
Name: pod.Name,
AppName: l.AppName(),
ProcessName: appProcess,
Type: l.AppPlatform(),
IP: pod.Status.HostIP,
Status: status,
Address: u,
Addresses: urls,
Version: appVersion,
Routable: isRoutable,
Restarts: containersRestarts(pod.Status.ContainerStatuses),
CreatedAt: &createdAt,
Ready: containersReady(pod.Status.ContainerStatuses),
})
}
return units, nil
}
func containersRestarts(containersStatus []apiv1.ContainerStatus) *int32 {
restarts := int32(0)
for _, containerStatus := range containersStatus {
restarts += containerStatus.RestartCount
}
return &restarts
}
func containersReady(containersStatus []apiv1.ContainerStatus) *bool {
ready := len(containersStatus) > 0
for _, containerStatus := range containersStatus {
if !containerStatus.Ready {
ready = false
break
}
}
return &ready
}
func extractStatusFromContainerStatuses(statuses []apiv1.ContainerStatus) provision.Status {
for _, containerStatus := range statuses {
if containerStatus.Ready {
continue
}
if containerStatus.LastTerminationState.Terminated != nil {
return provision.StatusError
}
return provision.StatusStarting
}
return provision.StatusStarted
}
// merged from https://github.com/kubernetes/kubernetes/blob/1f69c34478800e150acd022f6313a15e1cb7a97c/pkg/quota/evaluator/core/pods.go#L333
// and https://github.com/kubernetes/kubernetes/blob/560e15fb9acee4b8391afbc21fc3aea7b771e2c4/pkg/printers/internalversion/printers.go#L606
func isTerminating(pod apiv1.Pod) bool {
return pod.Spec.ActiveDeadlineSeconds != nil && *pod.Spec.ActiveDeadlineSeconds >= int64(0) || pod.DeletionTimestamp != nil
}
func isEvicted(pod apiv1.Pod) bool {
return pod.Status.Phase == apiv1.PodFailed && strings.ToLower(pod.Status.Reason) == "evicted"
}
func isFailedByNodeAffinity(pod apiv1.Pod) bool {
return pod.Status.Phase == apiv1.PodFailed && strings.ToLower(pod.Status.Reason) == "nodeaffinity"
}
func (p *kubernetesProvisioner) Units(ctx context.Context, apps ...provision.App) ([]provision.Unit, error) {
cApps, err := clustersForApps(ctx, apps)
if err != nil {
return nil, err
}
var units []provision.Unit
for _, cApp := range cApps {
pods, err := p.podsForApps(ctx, cApp.client, cApp.apps)
if err != nil {
return nil, err
}
clusterUnits, err := p.podsToUnitsMultiple(ctx, cApp.client, pods, cApp.apps)
if err != nil {
return nil, err
}
units = append(units, clusterUnits...)
}
return units, nil
}
func (p *kubernetesProvisioner) podsForApps(ctx context.Context, client *ClusterClient, apps []provision.App) ([]apiv1.Pod, error) {
inSelectorMap := map[string][]string{}
for _, a := range apps {
l, err := provision.ServiceLabels(ctx, provision.ServiceLabelsOpts{
App: a,
ServiceLabelExtendedOpts: provision.ServiceLabelExtendedOpts{
Prefix: tsuruLabelPrefix,
Provisioner: provisionerName,
},
})
if err != nil {
return nil, err
}
appSel := l.ToAppSelector()
for k, v := range appSel {
inSelectorMap[k] = append(inSelectorMap[k], v)
}
}
sel := labels.NewSelector()
for k, v := range inSelectorMap {
if len(v) == 0 {
continue
}
req, err := labels.NewRequirement(k, selection.In, v)
if err != nil {
return nil, err
}
sel = sel.Add(*req)
}
controller, err := getClusterController(p, client)
if err != nil {
return nil, err
}
informer, err := controller.getPodInformer()
if err != nil {
return nil, err
}
pods, err := informer.Lister().List(sel)
if err != nil {
return nil, err
}
podCopies := make([]apiv1.Pod, len(pods))
for i, p := range pods {
podCopies[i] = *p.DeepCopy()
}
return podCopies, nil
}
func (p *kubernetesProvisioner) RoutableAddresses(ctx context.Context, a provision.App) ([]appTypes.RoutableAddresses, error) {
client, err := clusterForPool(ctx, a.GetPool())
if err != nil {
return nil, err
}
version, err := servicemanager.AppVersion.LatestSuccessfulVersion(ctx, a)
if err != nil {
if err != appTypes.ErrNoVersionsAvailable {
return nil, err
}
return nil, nil
}
webProcessName, err := version.WebProcess()
if err != nil {
return nil, err
}
controller, err := getClusterController(p, client)
if err != nil {
return nil, err
}
svcInformer, err := controller.getServiceInformer()
if err != nil {
return nil, err
}
ns, err := client.AppNamespace(ctx, a)
if err != nil {
return nil, err
}
svcs, err := allServicesForAppInformer(ctx, svcInformer, ns, a)
if err != nil {
return nil, err
}
var allAddrs []appTypes.RoutableAddresses
for _, svc := range svcs {
ls := labelOnlySetFromMeta(&svc.ObjectMeta)
if ls.IsHeadlessService() {
continue
}
processName := ls.AppProcess()
version := ls.AppVersion()
var rAddr appTypes.RoutableAddresses
if processName == webProcessName {
var prefix string
if version != 0 {
prefix = fmt.Sprintf("v%d.version", version)
}
rAddr, err = p.routableAddrForProcess(ctx, client, a, processName, prefix, version, svc)
if err != nil {
return nil, err
}
allAddrs = append(allAddrs, rAddr)
}
var prefix string
if version == 0 {
prefix = fmt.Sprintf("%s.process", processName)
} else {
prefix = fmt.Sprintf("v%d.version.%s.process", version, processName)
}
rAddr, err = p.routableAddrForProcess(ctx, client, a, processName, prefix, version, svc)
if err != nil {
return nil, err
}
allAddrs = append(allAddrs, rAddr)
}
return allAddrs, nil
}
func (p *kubernetesProvisioner) routableAddrForProcess(ctx context.Context, client *ClusterClient, a provision.App, processName, prefix string, version int, svc apiv1.Service) (appTypes.RoutableAddresses, error) {
var routableAddrs appTypes.RoutableAddresses
var pubPort int32
if len(svc.Spec.Ports) > 0 {
pubPort = svc.Spec.Ports[0].NodePort
}
if pubPort == 0 {
return routableAddrs, nil
}
addrs, err := p.addressesForApp(ctx, client, a, processName, pubPort, version)
if err != nil || addrs == nil {
return routableAddrs, err
}
return appTypes.RoutableAddresses{
Prefix: prefix,
Addresses: addrs,
ExtraData: map[string]string{
"service": svc.Name,
"namespace": svc.Namespace,
},
}, nil
}
func (p *kubernetesProvisioner) addressesForApp(ctx context.Context, client *ClusterClient, a provision.App, processName string, pubPort int32, version int) ([]*url.URL, error) {
pods, err := p.podsForApps(ctx, client, []provision.App{a})
if err != nil {
return nil, err
}
addrs := make([]*url.URL, 0)
for _, pod := range pods {
labelSet := labelSetFromMeta(&pod.ObjectMeta)
if labelSet.IsIsolatedRun() {
continue
}
if labelSet.AppProcess() != processName {
continue
}
if version != 0 && labelSet.AppVersion() != version {
continue
}
if isPodReady(&pod) {
addrs = append(addrs, &url.URL{
Scheme: "http",
Host: fmt.Sprintf("%s:%d", pod.Status.HostIP, pubPort),
})
}
}
return addrs, nil
}
func (p *kubernetesProvisioner) RegisterUnit(ctx context.Context, a provision.App, unitID string, customData map[string]interface{}) error {
client, err := clusterForPool(ctx, a.GetPool())
if err != nil {
return err
}
ns, err := client.AppNamespace(ctx, a)
if err != nil {
return err
}
pod, err := client.CoreV1().Pods(ns).Get(ctx, unitID, metav1.GetOptions{})
if err != nil {
if k8sErrors.IsNotFound(err) {
return &provision.UnitNotFoundError{ID: unitID}
}
return errors.WithStack(err)
}
units, err := p.podsToUnits(ctx, client, []apiv1.Pod{*pod}, a)
if err != nil {
return err
}
if len(units) == 0 {
return errors.Errorf("unable to convert pod to unit: %#v", pod)
}
if customData == nil {
return nil
}
l := labelSetFromMeta(&pod.ObjectMeta)
buildingImage := l.BuildImage()
if buildingImage == "" {
return nil
}
version, err := servicemanager.AppVersion.VersionByPendingImage(ctx, a, buildingImage)
if err != nil {
return errors.WithStack(err)
}
err = version.AddData(appTypes.AddVersionDataArgs{
CustomData: customData,
})
return errors.WithStack(err)
}
func (p *kubernetesProvisioner) ListNodes(ctx context.Context, addressFilter []string) ([]provision.Node, error) {
var nodes []provision.Node
err := forEachCluster(ctx, func(c *ClusterClient) error {
clusterNodes, err := p.listNodesForCluster(c, nodeFilter{addresses: addressFilter})
if err != nil {
return err
}
nodes = append(nodes, clusterNodes...)
return nil
})
if err == provTypes.ErrNoCluster {
return nil, nil
}
if err != nil {
return nil, err
}
return nodes, nil
}
func (p *kubernetesProvisioner) InternalAddresses(ctx context.Context, a provision.App) ([]provision.AppInternalAddress, error) {
client, err := clusterForPool(ctx, a.GetPool())
if err != nil {
return nil, err
}
ns, err := client.AppNamespace(ctx, a)
if err != nil {
return nil, err
}
controller, err := getClusterController(p, client)
if err != nil {
return nil, err
}
svcInformer, err := controller.getServiceInformer()
if err != nil {
return nil, err
}
svcs, err := allServicesForAppInformer(ctx, svcInformer, ns, a)
if err != nil {
return nil, err
}
sort.Slice(svcs, func(i, j int) (x bool) {
iVersion := svcs[i].ObjectMeta.Labels[tsuruLabelAppVersion]
jVersion := svcs[j].ObjectMeta.Labels[tsuruLabelAppVersion]
iProcess := svcs[i].ObjectMeta.Labels[tsuruLabelAppProcess]
jProcess := svcs[j].ObjectMeta.Labels[tsuruLabelAppProcess]
// we priorize the web process without versioning
// in the most cases will be address used to bind related services
// the list of services will send to tsuru services, then they uses the first address to automatic bind
if iProcess == "web" && iVersion == "" {
return true
} else if jProcess == "web" && jVersion == "" {
return false
}
if iVersion != jVersion {
return iVersion < jVersion
}
return iProcess < jProcess
})
addresses := []provision.AppInternalAddress{}
for _, service := range svcs {
// we can't show headless services
if service.Spec.ClusterIP == "None" {
continue
}
for _, port := range service.Spec.Ports {
addresses = append(addresses, provision.AppInternalAddress{
Domain: fmt.Sprintf("%s.%s.svc.cluster.local", service.Name, ns),
Protocol: string(port.Protocol),
Port: port.Port,
Version: service.ObjectMeta.Labels[tsuruLabelAppVersion],
Process: service.ObjectMeta.Labels[tsuruLabelAppProcess],
})
}
}
return addresses, nil
}
type nodeFilter struct {
addresses []string
metadata map[string]string
}
func (p *kubernetesProvisioner) listNodesForCluster(cluster *ClusterClient, filter nodeFilter) ([]provision.Node, error) {
var addressSet set.Set
if len(filter.addresses) > 0 {
addressSet = set.FromSlice(filter.addresses)
}
controller, err := getClusterController(p, cluster)
if err != nil {
return nil, err
}
nodeInformer, err := controller.getNodeInformer()
if err != nil {
return nil, err
}
nodeList, err := nodeInformer.Lister().List(labels.Everything())
if err != nil {
return nil, errors.WithStack(err)
}
var nodes []provision.Node
for i := range nodeList {
n := &kubernetesNodeWrapper{
node: nodeList[i].DeepCopy(),
prov: p,
cluster: cluster,
}
matchesAddresses := len(addressSet) == 0 || addressSet.Includes(n.Address())
matchesMetadata := len(filter.metadata) == 0 || node.HasAllMetadata(n.MetadataNoPrefix(), filter.metadata)
if matchesAddresses && matchesMetadata {
nodes = append(nodes, n)
}
}
return nodes, nil
}
func (p *kubernetesProvisioner) ListNodesByFilter(ctx context.Context, filter *provTypes.NodeFilter) ([]provision.Node, error) {
var nodes []provision.Node
err := forEachCluster(ctx, func(c *ClusterClient) error {
clusterNodes, err := p.listNodesForCluster(c, nodeFilter{metadata: filter.Metadata})
if err != nil {
return err
}
nodes = append(nodes, clusterNodes...)
return nil
})
if err == provTypes.ErrNoCluster {
return nil, nil
}
if err != nil {
return nil, err
}
return nodes, nil
}
func (p *kubernetesProvisioner) GetNode(ctx context.Context, address string) (provision.Node, error) {
_, node, err := p.findNodeByAddress(ctx, address)
if err != nil {
return nil, err
}
return node, nil
}
func setNodeMetadata(node *apiv1.Node, pool, iaasID string, meta map[string]string) {
if node.Labels == nil {
node.Labels = map[string]string{}
}
if node.Annotations == nil {
node.Annotations = map[string]string{}
}
for k, v := range meta {
k = tsuruLabelPrefix + strings.TrimPrefix(k, tsuruLabelPrefix)
switch k {
case tsuruExtraAnnotationsMeta:
appendKV(v, ",", "=", node.Annotations)
case tsuruExtraLabelsMeta:
appendKV(v, ",", "=", node.Labels)
}
if v == "" {
delete(node.Annotations, k)
continue
}
node.Annotations[k] = v
}
baseNodeLabels := provision.NodeLabels(provision.NodeLabelsOpts{
IaaSID: iaasID,
Pool: pool,
Prefix: tsuruLabelPrefix,
})
for k, v := range baseNodeLabels.ToLabels() {
if v == "" {
continue
}
delete(node.Annotations, k)
node.Labels[k] = v
}
}
func appendKV(s, outSep, innSep string, m map[string]string) {
kvs := strings.Split(s, outSep)
for _, kv := range kvs {
parts := strings.SplitN(kv, innSep, 2)
if len(parts) != 2 {
continue
}
if parts[1] == "" {
delete(m, parts[1])
continue
}
m[parts[0]] = parts[1]
}
}
func (p *kubernetesProvisioner) AddNode(ctx context.Context, opts provision.AddNodeOptions) (err error) {
client, err := clusterForPool(ctx, opts.Pool)
if err != nil {
return err
}
defer func() {
if err == nil {
servicecommon.RebuildRoutesPoolApps(opts.Pool)
}
}()
hostAddr := tsuruNet.URLToHost(opts.Address)
conf := getKubeConfig()
if conf.RegisterNode {
node := &apiv1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: hostAddr,
},
}
setNodeMetadata(node, opts.Pool, opts.IaaSID, opts.Metadata)
_, err = client.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{})
if err == nil {
return nil
}
if !k8sErrors.IsAlreadyExists(err) {
return errors.WithStack(err)
}
}
return p.internalNodeUpdate(ctx, provision.UpdateNodeOptions{
Address: hostAddr,
Metadata: opts.Metadata,
Pool: opts.Pool,
}, opts.IaaSID)
}
func (p *kubernetesProvisioner) RemoveNode(ctx context.Context, opts provision.RemoveNodeOptions) error {
client, nodeWrapper, err := p.findNodeByAddress(ctx, opts.Address)
if err != nil {
return err
}
node := nodeWrapper.node
if opts.Rebalance {
node.Spec.Unschedulable = true
_, err = client.CoreV1().Nodes().Update(ctx, node, metav1.UpdateOptions{})
if err != nil {
return errors.WithStack(err)
}
var pods []apiv1.Pod
pods, err = podsFromNode(ctx, client, node.Name, tsuruLabelPrefix+provision.LabelAppPool)
if err != nil {
return err
}
for _, pod := range pods {
err = client.CoreV1().Pods(pod.Namespace).Evict(ctx, &policy.Eviction{
ObjectMeta: metav1.ObjectMeta{
Name: pod.Name,
Namespace: pod.Namespace,
},
})
if err != nil {
return errors.WithStack(err)
}
}
}
err = client.CoreV1().Nodes().Delete(ctx, node.Name, metav1.DeleteOptions{})
if err != nil {
return errors.WithStack(err)
}
servicecommon.RebuildRoutesPoolApps(nodeWrapper.Pool())
return nil
}
func (p *kubernetesProvisioner) NodeForNodeData(ctx context.Context, nodeData provision.NodeStatusData) (provision.Node, error) {
return node.FindNodeByAddrs(ctx, p, nodeData.Addrs)
}
func (p *kubernetesProvisioner) findNodeByAddress(ctx context.Context, address string) (*ClusterClient, *kubernetesNodeWrapper, error) {
var (
foundNode *kubernetesNodeWrapper
foundCluster *ClusterClient
)
err := forEachCluster(ctx, func(c *ClusterClient) error {
if foundNode != nil {
return nil
}
node, err := p.getNodeByAddr(ctx, c, address)
if err == nil {
foundNode = &kubernetesNodeWrapper{
node: node,
prov: p,
cluster: c,
}
foundCluster = c
return nil
}
if err != provision.ErrNodeNotFound {
return err
}
return nil
})
if err != nil {
if err == provTypes.ErrNoCluster {
return nil, nil, provision.ErrNodeNotFound
}
return nil, nil, err
}
if foundNode == nil {
return nil, nil, provision.ErrNodeNotFound
}
return foundCluster, foundNode, nil
}
func (p *kubernetesProvisioner) UpdateNode(ctx context.Context, opts provision.UpdateNodeOptions) error {
return p.internalNodeUpdate(ctx, opts, "")
}
func (p *kubernetesProvisioner) internalNodeUpdate(ctx context.Context, opts provision.UpdateNodeOptions, iaasID string) error {
client, nodeWrapper, err := p.findNodeByAddress(ctx, opts.Address)
if err != nil {
return err
}
if nodeWrapper.IaaSID() != "" {
iaasID = ""
}
node := nodeWrapper.node
shouldRemove := map[string]bool{
tsuruInProgressTaint: true,
tsuruNodeDisabledTaint: opts.Enable,
}
taints := node.Spec.Taints
var isDisabled bool
for i := 0; i < len(taints); i++ {
if taints[i].Key == tsuruNodeDisabledTaint {
isDisabled = true
}
if remove := shouldRemove[taints[i].Key]; remove {
taints[i] = taints[len(taints)-1]
taints = taints[:len(taints)-1]
i--
}
}
if !isDisabled && opts.Disable {
taints = append(taints, apiv1.Taint{
Key: tsuruNodeDisabledTaint,
Effect: apiv1.TaintEffectNoSchedule,
})
}
node.Spec.Taints = taints
setNodeMetadata(node, opts.Pool, iaasID, opts.Metadata)
_, err = client.CoreV1().Nodes().Update(ctx, node, metav1.UpdateOptions{})
return errors.WithStack(err)
}
func (p *kubernetesProvisioner) Deploy(ctx context.Context, args provision.DeployArgs) (string, error) {
client, err := clusterForPool(ctx, args.App.GetPool())
if err != nil {
return "", err
}
if err = ensureAppCustomResourceSynced(ctx, client, args.App); err != nil {
return "", err
}
if args.Version.VersionInfo().DeployImage == "" {
deployPodName := deployPodNameForApp(args.App, args.Version)
ns, nsErr := client.AppNamespace(ctx, args.App)
if nsErr != nil {
return "", nsErr
}
defer cleanupPod(tsuruNet.WithoutCancel(ctx), client, deployPodName, ns)
params := createPodParams{
app: args.App,
client: client,
podName: deployPodName,
sourceImage: args.Version.VersionInfo().BuildImage,
destinationImages: []string{args.Version.BaseImageName()},
attachOutput: args.Event,
attachInput: strings.NewReader("."),
inputFile: "/dev/null",
}
err = createDeployPod(ctx, params)
if err != nil {
return "", err
}
err = args.Version.CommitBaseImage()
if err != nil {
return "", err
}
}
manager := &serviceManager{
client: client,
writer: args.Event,
}
var oldVersionNumber int
if !args.PreserveVersions {
oldVersionNumber, err = baseVersionForApp(ctx, client, args.App)
if err != nil {
return "", err
}
}
err = servicecommon.RunServicePipeline(ctx, manager, oldVersionNumber, args, nil)
if err != nil {
return "", errors.WithStack(err)
}
err = ensureAppCustomResourceSynced(ctx, client, args.App)
if err != nil {
return "", err
}
return args.Version.VersionInfo().DeployImage, nil
}
func (p *kubernetesProvisioner) UpgradeNodeContainer(ctx context.Context, name string, pool string, writer io.Writer) error {
m := nodeContainerManager{}
return servicecommon.UpgradeNodeContainer(&m, name, pool, writer)
}
func (p *kubernetesProvisioner) RemoveNodeContainer(ctx context.Context, name string, pool string, writer io.Writer) error {
err := forEachCluster(ctx, func(cluster *ClusterClient) error {
return cleanupDaemonSet(ctx, cluster, name, pool)
})
if err == provTypes.ErrNoCluster {
return nil
}
return err
}
func (p *kubernetesProvisioner) ExecuteCommand(ctx context.Context, opts provision.ExecOptions) error {
client, err := clusterForPool(ctx, opts.App.GetPool())
if err != nil {
return err
}
var size *remotecommand.TerminalSize
if opts.Width != 0 && opts.Height != 0 {
size = &remotecommand.TerminalSize{
Width: uint16(opts.Width),
Height: uint16(opts.Height),
}
}
if opts.Term != "" {
opts.Cmds = append([]string{"/usr/bin/env", "TERM=" + opts.Term}, opts.Cmds...)
}
eOpts := execOpts{
client: client,
app: opts.App,
cmds: opts.Cmds,
stdout: opts.Stdout,
stderr: opts.Stderr,
stdin: opts.Stdin,
termSize: size,
tty: opts.Stdin != nil,
}
if len(opts.Units) == 0 {
return runIsolatedCmdPod(ctx, client, eOpts)
}
for _, u := range opts.Units {
eOpts.unit = u
err := execCommand(ctx, eOpts)
if err != nil {
return err
}
}
return nil
}
func runIsolatedCmdPod(ctx context.Context, client *ClusterClient, opts execOpts) error {
baseName := execCommandPodNameForApp(opts.app)
labels, err := provision.ServiceLabels(ctx, provision.ServiceLabelsOpts{
App: opts.app,
ServiceLabelExtendedOpts: provision.ServiceLabelExtendedOpts{
Prefix: tsuruLabelPrefix,
Provisioner: provisionerName,
IsIsolatedRun: true,
},
})
if err != nil {
return errors.WithStack(err)
}
var version appTypes.AppVersion
if opts.image == "" {
version, err = servicemanager.AppVersion.LatestSuccessfulVersion(ctx, opts.app)
if err != nil {
return errors.WithStack(err)
}
opts.image = version.VersionInfo().DeployImage
}
appEnvs := provision.EnvsForApp(opts.app, "", false, version)
var envs []apiv1.EnvVar
for _, envData := range appEnvs {
envs = append(envs, apiv1.EnvVar{Name: envData.Name, Value: envData.Value})
}
return runPod(ctx, runSinglePodArgs{
client: client,
eventsOutput: opts.eventsOutput,
stdout: opts.stdout,
stderr: opts.stderr,
stdin: opts.stdin,
termSize: opts.termSize,
image: opts.image,
labels: labels,
cmds: opts.cmds,
envs: envs,
name: baseName,
app: opts.app,
})
}
func (p *kubernetesProvisioner) StartupMessage() (string, error) {
clusters, err := allClusters(context.TODO())
if err != nil {
if err == provTypes.ErrNoCluster {
return "", nil
}
return "", err
}
var out string
for _, c := range clusters {
nodeList, err := p.listNodesForCluster(c, nodeFilter{})
if err != nil {
return "", err
}
out += fmt.Sprintf("Kubernetes provisioner on cluster %q - %s:\n", c.Name, c.restConfig.Host)
if len(nodeList) == 0 {
out += " No Kubernetes nodes available\n"
}
sort.Slice(nodeList, func(i, j int) bool {
return nodeList[i].Address() < nodeList[j].Address()
})
for _, node := range nodeList {
out += fmt.Sprintf(" Kubernetes node: %s\n", node.Address())
}
}
return out, nil
}
func (p *kubernetesProvisioner) DeleteVolume(ctx context.Context, volumeName, pool string) error {
client, err := clusterForPool(ctx, pool)
if err != nil {
return err
}
return deleteVolume(ctx, client, volumeName)
}
func (p *kubernetesProvisioner) IsVolumeProvisioned(ctx context.Context, volumeName, pool string) (bool, error) {
client, err := clusterForPool(ctx, pool)
if err != nil {
return false, err
}
return volumeExists(ctx, client, volumeName)
}
func (p *kubernetesProvisioner) UpdateApp(ctx context.Context, old, new provision.App, w io.Writer) error {
if old.GetPool() == new.GetPool() {
return nil
}
client, err := clusterForPool(ctx, old.GetPool())
if err != nil {
return err
}
newClient, err := clusterForPool(ctx, new.GetPool())
if err != nil {
return err
}
sameCluster := client.GetCluster().Name == newClient.GetCluster().Name
sameNamespace := client.PoolNamespace(old.GetPool()) == client.PoolNamespace(new.GetPool())
if sameCluster && !sameNamespace {
var volumes []volumeTypes.Volume
volumes, err = servicemanager.Volume.ListByApp(ctx, old.GetName())
if err != nil {
return err
}
if len(volumes) > 0 {
return fmt.Errorf("can't change the pool of an app with binded volumes")
}
}
versions, err := versionsForAppProcess(ctx, client, old, "")
if err != nil {
return err
}
params := updatePipelineParams{
old: old,
new: new,
w: w,
p: p,
versions: versions,
}
if !sameCluster {
actions := []*action.Action{
&provisionNewApp,
&restartApp,
&rebuildAppRoutes,
&destroyOldApp,
}
return action.NewPipeline(actions...).Execute(ctx, params)
}
// same cluster and it is not configured with per-pool-namespace, nothing to do.
if sameNamespace {
return nil
}
actions := []*action.Action{
&updateAppCR,
&restartApp,
&rebuildAppRoutes,
&removeOldAppResources,
}
return action.NewPipeline(actions...).Execute(ctx, params)
}
func (p *kubernetesProvisioner) Shutdown(ctx context.Context) error {
err := forEachCluster(ctx, func(client *ClusterClient) error {
stopClusterController(p, client)
return nil
})
if err == provTypes.ErrNoCluster {
return nil
}
return err
}
func ensureAppCustomResourceSynced(ctx context.Context, client *ClusterClient, a provision.App) error {
_, err := loadAndEnsureAppCustomResourceSynced(ctx, client, a)
return err
}
func loadAndEnsureAppCustomResourceSynced(ctx context.Context, client *ClusterClient, a provision.App) (*tsuruv1.App, error) {
err := ensureNamespace(ctx, client, client.Namespace())
if err != nil {
return nil, err
}
err = ensureAppCustomResource(ctx, client, a)
if err != nil {
return nil, err
}
tclient, err := TsuruClientForConfig(client.restConfig)
if err != nil {
return nil, err
}
appCRD, err := tclient.TsuruV1().Apps(client.Namespace()).Get(ctx, a.GetName(), metav1.GetOptions{})
if err != nil {
return nil, err
}
appCRD.Spec.ServiceAccountName = serviceAccountNameForApp(a)
deploys, err := allDeploymentsForApp(ctx, client, a)
if err != nil {
return nil, err
}
sort.Slice(deploys, func(i, j int) bool {
return deploys[i].Name < deploys[j].Name
})
svcs, err := allServicesForApp(ctx, client, a)
if err != nil {
return nil, err
}
sort.Slice(svcs, func(i, j int) bool {
return svcs[i].Name < svcs[j].Name
})
deployments := make(map[string][]string)
services := make(map[string][]string)
for _, dep := range deploys {
l := labelSetFromMeta(&dep.ObjectMeta)
proc := l.AppProcess()
deployments[proc] = append(deployments[proc], dep.Name)
}
for _, svc := range svcs {
l := labelSetFromMeta(&svc.ObjectMeta)
proc := l.AppProcess()
services[proc] = append(services[proc], svc.Name)
}
appCRD.Spec.Services = services
appCRD.Spec.Deployments = deployments
version, err := servicemanager.AppVersion.LatestSuccessfulVersion(ctx, a)
if err != nil && err != appTypes.ErrNoVersionsAvailable {
return nil, err
}
if version != nil {
appCRD.Spec.Configs, err = normalizeConfigs(version)
if err != nil {
return nil, err
}
}
return tclient.TsuruV1().Apps(client.Namespace()).Update(ctx, appCRD, metav1.UpdateOptions{})
}
func ensureAppCustomResource(ctx context.Context, client *ClusterClient, a provision.App) error {
err := ensureCustomResourceDefinitions(ctx, client)
if err != nil {
return err
}
tclient, err := TsuruClientForConfig(client.restConfig)
if err != nil {
return err
}
_, err = tclient.TsuruV1().Apps(client.Namespace()).Get(ctx, a.GetName(), metav1.GetOptions{})
if err == nil {
return nil
}
if !k8sErrors.IsNotFound(err) {
return err
}
_, err = tclient.TsuruV1().Apps(client.Namespace()).Create(ctx, &tsuruv1.App{
ObjectMeta: metav1.ObjectMeta{Name: a.GetName()},
Spec: tsuruv1.AppSpec{NamespaceName: client.PoolNamespace(a.GetPool())},
}, metav1.CreateOptions{})
return err
}
func ensureCustomResourceDefinitions(ctx context.Context, client *ClusterClient) error {
extClient, err := ExtensionsClientForConfig(client.restConfig)
if err != nil {
return err
}
toCreate := appCustomResourceDefinition()
_, err = extClient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(ctx, toCreate, metav1.CreateOptions{})
if err != nil && !k8sErrors.IsAlreadyExists(err) {
return err
}
timeout := time.After(time.Minute)
loop:
for {
crd, errGet := extClient.ApiextensionsV1beta1().CustomResourceDefinitions().Get(ctx, toCreate.GetName(), metav1.GetOptions{})
if errGet != nil {
return errGet
}
for _, c := range crd.Status.Conditions {
if c.Type == v1beta1.Established && c.Status == v1beta1.ConditionTrue {
break loop
}
}
select {
case <-timeout:
return fmt.Errorf("timeout waiting for custom resource definition creation")
case <-time.After(time.Second):
}
}
return nil
}
func appCustomResourceDefinition() *v1beta1.CustomResourceDefinition {
return &v1beta1.CustomResourceDefinition{
ObjectMeta: metav1.ObjectMeta{Name: "apps.tsuru.io"},
Spec: v1beta1.CustomResourceDefinitionSpec{
Group: "tsuru.io",
Version: "v1",
Names: v1beta1.CustomResourceDefinitionNames{
Plural: "apps",
Singular: "app",
Kind: "App",
ListKind: "AppList",
},
},
}
}
func normalizeConfigs(version appTypes.AppVersion) (*provTypes.TsuruYamlKubernetesConfig, error) {
yamlData, err := version.TsuruYamlData()
if err != nil {
return nil, err
}
config := yamlData.Kubernetes
if config == nil {
return nil, nil
}
for _, group := range yamlData.Kubernetes.Groups {
for procName, proc := range group {
ports, err := getProcessPortsForVersion(version, procName)
if err == nil {
proc.Ports = ports
group[procName] = proc
}
}
}
return config, nil
}
func EnvsForApp(a provision.App, process string, version appTypes.AppVersion, isDeploy bool) []bind.EnvVar {
envs := provision.EnvsForApp(a, process, isDeploy, version)
if isDeploy {
return envs
}
portsConfig, err := getProcessPortsForVersion(version, process)
if err != nil {
return envs
}
if len(portsConfig) == 0 {
return removeDefaultPortEnvs(envs)
}
portValue := make([]string, len(portsConfig))
for i, portConfig := range portsConfig {
targetPort := portConfig.TargetPort
if targetPort == 0 {
targetPort = portConfig.Port
}
portValue[i] = fmt.Sprintf("%d", targetPort)
}
portEnv := bind.EnvVar{Name: fmt.Sprintf("PORT_%s", process), Value: strings.Join(portValue, ",")}
if !isDefaultPort(portsConfig) {
envs = removeDefaultPortEnvs(envs)
}
return append(envs, portEnv)
}
func removeDefaultPortEnvs(envs []bind.EnvVar) []bind.EnvVar {
envsWithoutPort := []bind.EnvVar{}
defaultPortEnvs := provision.DefaultWebPortEnvs()
for _, env := range envs {
isDefaultPortEnv := false
for _, defaultEnv := range defaultPortEnvs {
if env.Name == defaultEnv.Name {
isDefaultPortEnv = true
break
}
}
if !isDefaultPortEnv {
envsWithoutPort = append(envsWithoutPort, env)
}
}
return envsWithoutPort
}
func isDefaultPort(portsConfig []provTypes.TsuruYamlKubernetesProcessPortConfig) bool {
if len(portsConfig) != 1 {
return false
}
defaultPort := defaultKubernetesPodPortConfig()
return portsConfig[0].Protocol == defaultPort.Protocol &&
portsConfig[0].Port == defaultPort.Port &&
portsConfig[0].TargetPort == defaultPort.TargetPort
}
func (p *kubernetesProvisioner) HandlesHC() bool {
return true
}
func (p *kubernetesProvisioner) ToggleRoutable(ctx context.Context, a provision.App, version appTypes.AppVersion, isRoutable bool) error {
client, err := clusterForPool(ctx, a.GetPool())
if err != nil {
return err
}
depsData, err := deploymentsDataForApp(ctx, client, a)
if err != nil {
return err
}
depsForVersion, ok := depsData.versioned[version.Version()]
if !ok {
return errors.Errorf("no deployment found for version %v", version.Version())
}
for _, depData := range depsForVersion {
err = toggleRoutableDeployment(ctx, client, version.Version(), depData.dep, isRoutable)
if err != nil {
return err
}
}
return ensureAutoScale(ctx, client, a, "")
}
func toggleRoutableDeployment(ctx context.Context, client *ClusterClient, version int, dep *appsv1.Deployment, isRoutable bool) (err error) {
ls := labelOnlySetFromMetaPrefix(&dep.ObjectMeta, false)
ls.ToggleIsRoutable(isRoutable)
ls.SetVersion(version)
dep.Spec.Paused = true
dep.ObjectMeta.Labels = ls.WithoutVersion().ToLabels()
dep.Spec.Template.ObjectMeta.Labels = ls.ToLabels()
_, err = client.AppsV1().Deployments(dep.Namespace).Update(ctx, dep, metav1.UpdateOptions{})
if err != nil {
return errors.WithStack(err)
}
defer func() {
if err != nil {
return
}
dep, err = client.AppsV1().Deployments(dep.Namespace).Get(ctx, dep.Name, metav1.GetOptions{})
if err != nil {
err = errors.WithStack(err)
return
}
dep.Spec.Paused = false
_, err = client.AppsV1().Deployments(dep.Namespace).Update(ctx, dep, metav1.UpdateOptions{})
if err != nil {
err = errors.WithStack(err)
}
}()
rs, err := activeReplicaSetForDeployment(ctx, client, dep)
if err != nil {
if k8sErrors.IsNotFound(errors.Cause(err)) {
return nil
}
return err
}
ls = labelOnlySetFromMetaPrefix(&rs.ObjectMeta, false)
ls.ToggleIsRoutable(isRoutable)
ls.SetVersion(version)
rs.ObjectMeta.Labels = ls.ToLabels()
rs.Spec.Template.ObjectMeta.Labels = ls.ToLabels()
_, err = client.AppsV1().ReplicaSets(rs.Namespace).Update(ctx, rs, metav1.UpdateOptions{})
if err != nil {
return errors.WithStack(err)
}
pods, err := podsForReplicaSet(ctx, client, rs)
if err != nil {
return err
}
for _, pod := range pods {
ls = labelOnlySetFromMetaPrefix(&pod.ObjectMeta, false)
ls.ToggleIsRoutable(isRoutable)
ls.SetVersion(version)
pod.ObjectMeta.Labels = ls.ToLabels()
_, err = client.CoreV1().Pods(pod.Namespace).Update(ctx, &pod, metav1.UpdateOptions{})
if err != nil {
return errors.WithStack(err)
}
}
return nil
}
func (p *kubernetesProvisioner) DeployedVersions(ctx context.Context, a provision.App) ([]int, error) {
client, err := clusterForPool(ctx, a.GetPool())
if err != nil {
return nil, err
}
deps, err := deploymentsDataForApp(ctx, client, a)
if err != nil {
return nil, err
}
var versions []int
for v := range deps.versioned {
versions = append(versions, v)
}
return versions, nil
}
| [
"\"KUBERNETES_SERVICE_HOST\"",
"\"KUBERNETES_SERVICE_PORT\""
]
| []
| [
"KUBERNETES_SERVICE_HOST",
"KUBERNETES_SERVICE_PORT"
]
| [] | ["KUBERNETES_SERVICE_HOST", "KUBERNETES_SERVICE_PORT"] | go | 2 | 0 | |
test/conftest.py | import pytest
import pystatuspage
import os
SECRETS_FILE = 'secrets.json'
@pytest.fixture(scope="module")
def secrets():
if os.path.exists(SECRETS_FILE):
import json
with open(SECRETS_FILE) as json_file:
secrets = json.load(json_file)
else:
secrets = {}
secrets['organization_id'] = os.environ.get('STATUSPAGE_ORG_ID', '12345678')
secrets['key'] = os.environ.get('STATUSPAGE_API_KEY', 'No Key Found')
return secrets
@pytest.fixture(scope="module")
def init_statuspage_with_key(secrets):
organization_id = secrets['organization_id']
key = secrets['key']
api = pystatuspage.StatusPageApi(organization_id, key)
return api
@pytest.fixture(scope="module")
def init_statuspage_without_key(secrets):
organization_id = secrets['organization_id']
api = pystatuspage.StatusPageApi(organization_id)
return api
| []
| []
| [
"STATUSPAGE_ORG_ID",
"STATUSPAGE_API_KEY"
]
| [] | ["STATUSPAGE_ORG_ID", "STATUSPAGE_API_KEY"] | python | 2 | 0 | |
roles/modeling/fast/model.py | # https://www.tensorflow.org/tutorials/load_data/images
# https://www.tensorflow.org/tutorials/keras/overfit_and_underfit
# https://www.tensorflow.org/tutorials/keras/save_and_load
import tensorflow as tf
import logging
import os
import os.path
from tensorflow.keras import layers
from google.cloud import storage
import tarfile
import time
import pandas as pd
import shutil
logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger(__name__)
LOGGER.info(f'TensorFlow version: {tf.version.VERSION}\n')
version = 1
export_path = os.path.join('saved_model/model', str(version))
LOGGER.info(f'Export path: {export_path}\n')
#import pathlib
#dataset_url = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz"
#data_dir = tf.keras.utils.get_file(origin=dataset_url,
# fname='trainig',
# untar=True)
#data_dir = pathlib.Path(data_dir)
if not os.path.exists("./data/"):
os.mkdir("./data/")
if not os.path.exists("./data/negative/"):
os.mkdir("./data/negative/")
if not os.path.exists("./data/positive/"):
os.mkdir("./data/positive/")
if not os.path.exists("./training_1/"):
os.mkdir("./training_1/")
if not os.path.exists("./saved_model/"):
os.mkdir("./saved_model/")
if not os.path.exists(f'./saved_model/{version}'):
os.mkdir(f'./saved_model/{version}')
checkpoint_path = "training_1/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
bucket_name = os.getenv('GCS_BUCKET')
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
BATCH_SIZE = 16
IMG_HEIGHT = 518
IMG_WIDTH = 518
NUM_CLASSES = 2
EPOCHS = 50
VALIDATION_SPLIT = 0.2
def download_blobs(userid):
"""Downloads a blob from the bucket."""
LOGGER.info(f'Beginning to download images from bucket {bucket_name} with url users/{userid}/preferences.csv')
blob = bucket.blob(f'users/{userid}/preferences.csv')
blob.download_to_filename(f'./data/preferences.csv')
LOGGER.info(f'Blobs downloaded\n')
def my_list_bucket(bucket_name):
resource_list = []
l_list = []
a_bucket = storage_client.lookup_bucket(bucket_name)
bucket_iterator = a_bucket.list_blobs()
for resource in bucket_iterator:
if 'users' in resource.name:
resource_list.append(resource.name)
userid = resource.name.split('/')[1]
if userid not in userid_list:
userid_list.append(userid)
for userid in userid_list:
if sum(1 for s in resource_list if userid in s) == 1:
l_list.append(userid)
return(l_list)
def watcher():
userid_list = []
trainer_list = []
while True:
if len(trainer_list) > 0:
LOGGER.info(f'userid(s) {trainer_list} found!')
for userid in trainer_list:
download_blobs(userid)
create_datasets()
train(userid)
upload_blob(userid)
cleanup()
LOGGER.info(f'Process complete for userid: {userid}')
watcher()
else:
LOGGER.info(f'Watching {bucket_name}/users/')
#time.sleep(60*30)
time.sleep(10)
trainer_list = my_list_bucket(bucket)
def create_datasets():
source_image_folder = '../images/unclassified/female'
images = [f for f in os.listdir(f'{source_image_folder}') if os.path.isfile(os.path.join(f'{source_image_folder}', f))]
user_preferences = pd.read_csv(f'./data/preferences.csv')
likes = user_preferences['likes'].tolist()
dislikes = user_preferences['dislikes'].tolist()
for image in images:
if image in likes:
LOGGER.info(f'copying {image} to positive')
shutil.copyfile(f'../images/unclassified/female/{image}', f'./data/positive/{image}')
if image in dislikes:
LOGGER.info(f'copying {image} to negative')
shutil.copyfile(f'../images/unclassified/female/{image}', f'./data/negative/{image}')
def train(userid):
base_source_image = './data/'
positive_source_image = "./data/positive/"
negative_source_image = "./data/negative/"
positive_samples = [f for f in os.listdir(f'{positive_source_image}') if os.path.isfile(os.path.join(f'{positive_source_image}', f))]
negative_samples = [f for f in os.listdir(f'{negative_source_image}') if os.path.isfile(os.path.join(f'{negative_source_image}', f))]
LOGGER.info(f'positive samples: {len(positive_samples)}')
LOGGER.info(f'positive samples: {len(negative_samples)}')
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
base_source_image,
validation_split=VALIDATION_SPLIT,
subset="training",
seed=123,
image_size=(IMG_HEIGHT, IMG_WIDTH),
batch_size=BATCH_SIZE)
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
base_source_image,
validation_split=VALIDATION_SPLIT,
subset="validation",
seed=123,
image_size=(IMG_HEIGHT, IMG_WIDTH),
batch_size=BATCH_SIZE)
class_names = train_ds.class_names
LOGGER.info(f'class names: {class_names}')
for image_batch, labels_batch in train_ds:
LOGGER.info(f'image batch shape: {image_batch.shape}')
LOGGER.info(f'labels batch shape:{labels_batch.shape}')
break
AUTOTUNE = tf.data.experimental.AUTOTUNE
train_ds = train_ds.cache().prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
data_augmentation = tf.keras.Sequential([
layers.experimental.preprocessing.RandomRotation(0.1),
])
model = make_model(input_shape=(IMG_HEIGHT, IMG_WIDTH) + (3,), num_classes=NUM_CLASSES)
#model = create_model()
# Create a callback that saves the model's weights
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
save_weights_only=True,
verbose=1)
model.compile(
optimizer=tf.keras.optimizers.Adam(1e-3),
loss="binary_crossentropy",
metrics=["accuracy"],
)
model.fit(
train_ds,
validation_data=val_ds,
epochs=EPOCHS,
callbacks=[cp_callback]
)
#model.save('saved_model/model')
tf.keras.models.save_model(
model,
export_path,
overwrite=True,
include_optimizer=True,
save_format=None,
signatures=None,
options=None
)
def make_tarfile(output_filename, source_dir):
with tarfile.open(output_filename, "w:gz") as tar:
tar.add(source_dir, arcname=os.path.basename(source_dir))
#LOGGER.info(f'\nCreating archive of model weights\n')
#make_tarfile('model-weights.archive.tar.gz', './training_1')
LOGGER.info(f'\nCreating tar.gz of saved_model\n')
make_tarfile('model.tar.gz', './saved_model')
def upload_blob(userid):
LOGGER.info(f'\nUploading files to storage\n')
client = storage.Client()
bucket_name = os.getenv('GCS_BUCKET')
bucket = client.get_bucket(bucket_name)
blob = bucket.blob(f'users/{userid}/saved_model/model.tar.gz')
blob.upload_from_filename('model.tar.gz')
LOGGER.info(f'File model.tar.gz uploaded to {bucket_name}/users/{userid}/saved_model/model.tar.gz')
def cleanup():
if os.path.exists("model-weights.archive.tar.gz"):
LOGGER.info("Clean up: removing archive file")
os.remove('model-weights.archive.tar.gz')
def _cleanup(name):
time.sleep(5)
if os.path.exists(name):
LOGGER.info("Clean up: removing model checkpoints")
folder = name
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
LOGGER.info('Failed to delete %s. Reason: %s' % (file_path, e))
_cleanup("./training_1")
_cleanup("./saved_model")
# Define a simple sequential model
def make_model(input_shape, num_classes):
inputs = tf.keras.Input(shape=input_shape)
# Image augmentation block
x = data_augmentation(inputs)
# Entry block
x = layers.experimental.preprocessing.Rescaling(1.0 / 255)(x)
x = layers.Conv2D(32, 3, strides=2, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.Conv2D(64, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
previous_block_activation = x # Set aside residual
for size in [128, 256, 512, 728]:
x = layers.Activation("relu")(x)
x = layers.SeparableConv2D(size, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.SeparableConv2D(size, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.MaxPooling2D(3, strides=2, padding="same")(x)
# Project residual
residual = layers.Conv2D(size, 1, strides=2, padding="same")(
previous_block_activation
)
x = layers.add([x, residual]) # Add back residual
previous_block_activation = x # Set aside next residual
x = layers.SeparableConv2D(1024, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.GlobalAveragePooling2D()(x)
if num_classes == 2:
activation = "sigmoid"
units = 1
else:
activation = "softmax"
units = num_classes
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(units, activation=activation)(x)
return tf.keras.Model(inputs, outputs)
watcher() | []
| []
| [
"GCS_BUCKET"
]
| [] | ["GCS_BUCKET"] | python | 1 | 0 | |
egs/voxceleb/v1/nnet/lib/extract.py | import argparse
import numpy as np
import os
import sys
import numpy, scipy, sklearn
from model.trainer import Trainer
from misc.utils import Params
from dataset.kaldi_io import FeatureReader, open_or_fd, read_mat_ark, write_vec_flt
from six.moves import range
parser = argparse.ArgumentParser()
parser.add_argument("-g", "--gpu", type=int, default=-1, help="The GPU id. GPU disabled if -1.")
parser.add_argument("-m", "--min-chunk-size", type=int, default=25, help="The minimum length of the segments. Any segment shorted than this value will be ignored.")
parser.add_argument("-s", "--chunk-size", type=int, default=10000, help="The length of the segments used to extract the embeddings. "
"Segments longer than this value will be splited before extraction. "
"Then the splited embeddings will be averaged to get the final embedding. "
"L2 normalizaion will be applied before the averaging if specified.")
parser.add_argument("-n", "--normalize", action="store_true", help="Normalize the embedding before averaging and output.")
parser.add_argument("--node", type=str, default="", help="The node to output the embeddings.")
parser.add_argument("model_dir", type=str, help="The model directory.")
parser.add_argument("rspecifier", type=str, help="Kaldi feature rspecifier (or ark file).")
parser.add_argument("wspecifier", type=str, help="Kaldi output wspecifier (or ark file).")
args = parser.parse_args()
if args.gpu == -1:
# Disable GPU
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# In the GPU situation, it is difficult to know how to specify the GPU id.
# If the program is launched locally, you can set CUDA_VISIBLE_DEVICES to the id.
# However, if SGE is used, we cannot simply set CUDA_VISIBLE_DEVICES.
# So it is better to specify the GPU id outside the program.
# Give an arbitrary number (except for -1) to --gpu can enable it. Leave it blank if you want to disable gpu.
import tensorflow as tf
if __name__ == '__main__':
tf.reset_default_graph()
tf.logging.set_verbosity(tf.logging.INFO)
nnet_dir = os.path.join(args.model_dir, "nnet")
config_json = os.path.join(args.model_dir, "nnet/config.json")
if not os.path.isfile(config_json):
sys.exit("Cannot find params.json in %s" % config_json)
params = Params(config_json)
# Change the output node if necessary
if len(args.node) != 0:
params.embedding_node = args.node
tf.logging.info("Extract embedding from %s" % params.embedding_node)
with open(os.path.join(nnet_dir, "feature_dim"), "r") as f:
dim = int(f.readline().strip())
#trainer = Trainer(params, args.model_dir, dim, single_cpu=True)
trainer = Trainer(params, args.model_dir, dim)
trainer.build("predict")
if args.rspecifier.rsplit(".", 1)[1] == "scp":
# The rspecifier cannot be scp
sys.exit("The rspecifier must be ark or input pipe")
fp_out = open_or_fd(args.wspecifier, "wb")
# import pdb;pdb.set_trace()
# args.rspecifier=args.rspecifier.replace('JOB', '1')
for index, (key, feature) in enumerate(read_mat_ark(args.rspecifier)):
if feature.shape[0] < args.min_chunk_size:
tf.logging.info("[INFO] Key %s length too short, %d < %d, skip." % (key, feature.shape[0], args.min_chunk_size))
continue
if feature.shape[0] > args.chunk_size:
feature_array = []
feature_length = []
num_chunks = int(np.ceil(float(feature.shape[0] - args.chunk_size) / (args.chunk_size / 2))) + 1
tf.logging.info("[INFO] Key %s length %d > %d, split to %d segments." % (key, feature.shape[0], args.chunk_size, num_chunks))
for i in range(num_chunks):
start = int(i * (args.chunk_size / 2))
this_chunk_size = args.chunk_size if feature.shape[0] - start > args.chunk_size else feature.shape[0] - start
feature_length.append(this_chunk_size)
feature_array.append(feature[start:start+this_chunk_size])
feature_length = np.expand_dims(np.array(feature_length), axis=1)
# Except for the last feature, the length of other features should be the same (=chunk_size)
embeddings = trainer.predict(np.array(feature_array[:-1], dtype=np.float32))
embedding_last = trainer.predict(feature_array[-1])
embeddings = np.concatenate([embeddings, np.expand_dims(embedding_last, axis=0)], axis=0)
if args.normalize:
embeddings /= np.sqrt(np.sum(np.square(embeddings), axis=1, keepdims=True))
embedding = np.sum(embeddings * feature_length, axis=0) / np.sum(feature_length)
else:
tf.logging.info("[INFO] Key %s length %d." % (key, feature.shape[0]))
embedding = trainer.predict(feature)
tf.logging.info("[INFO] Key %s finished predicting" % (key))
if args.normalize:
embedding /= np.sqrt(np.sum(np.square(embedding)))
write_vec_flt(fp_out, embedding, key=key)
tf.logging.info("[INFO] Key %s finished writing" % (key))
fp_out.close()
trainer.close()
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
src/client/client.go | package client
import (
"crypto/x509"
"io/ioutil"
"net/http"
"os"
"github.com/gauravgahlot/tink-wizard/src/pkg/redis"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/tinkerbell/tink/protos/hardware"
"github.com/tinkerbell/tink/protos/template"
"github.com/tinkerbell/tink/protos/workflow"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
// gRPC clients
var (
cache *redis.Cache
templateClient template.TemplateClient
hardwareClient hardware.HardwareServiceClient
workflowClient workflow.WorkflowSvcClient
)
// Init initializes a gRPC connection with server
func Init() {
conn, err := getConnection()
if err != nil {
log.Fatal(err)
}
templateClient = template.NewTemplateClient(conn)
hardwareClient = hardware.NewHardwareServiceClient(conn)
workflowClient = workflow.NewWorkflowSvcClient(conn)
}
// GetConnection returns a gRPC client connection
func getConnection() (*grpc.ClientConn, error) {
certURL := os.Getenv("TINKERBELL_CERT_URL")
if certURL == "" {
return nil, errors.New("undefined TINKERBELL_CERT_URL")
}
resp, err := http.Get(certURL)
if err != nil {
return nil, errors.Wrap(err, "fetch cert")
}
defer resp.Body.Close()
certs, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, errors.Wrap(err, "read cert")
}
cp := x509.NewCertPool()
ok := cp.AppendCertsFromPEM(certs)
if !ok {
return nil, errors.Wrap(err, "parse cert")
}
grpcAuthority := os.Getenv("TINKERBELL_GRPC_AUTHORITY")
if grpcAuthority == "" {
return nil, errors.New("undefined TINKERBELL_GRPC_AUTHORITY")
}
creds := credentials.NewClientTLSFromCert(cp, "")
conn, err := grpc.Dial(grpcAuthority, grpc.WithTransportCredentials(creds))
if err != nil {
return nil, errors.Wrap(err, "connect to tinkerbell server")
}
return conn, nil
}
func init() {
cache = redis.Instance()
}
| [
"\"TINKERBELL_CERT_URL\"",
"\"TINKERBELL_GRPC_AUTHORITY\""
]
| []
| [
"TINKERBELL_GRPC_AUTHORITY",
"TINKERBELL_CERT_URL"
]
| [] | ["TINKERBELL_GRPC_AUTHORITY", "TINKERBELL_CERT_URL"] | go | 2 | 0 | |
SW_Deberta/make_oofs.py | from sklearn.model_selection import StratifiedKFold
import os, sys
# DECLARE HOW MANY GPUS YOU WISH TO USE.
# KAGGLE ONLY HAS 1, BUT OFFLINE, YOU CAN USE MORE
import argparse
def get_args():
parser = argparse.ArgumentParser()
#parser.add_argument('--disc_type', type=int, default=0, help='disc_type')
parser.add_argument('--fold', type=int, default=0, help='fold')
parser.add_argument('--gpu_id', type=str, default='0', help='gpu_id')
opts = parser.parse_args()
return opts
args=get_args()
os.environ["CUDA_VISIBLE_DEVICES"]= str(args.gpu_id) #0,1,2,3 for four gpu
out_dir='seqclassifiers_v3'
os.system(f'mkdir {out_dir}')
# VERSION FOR SAVING MODEL WEIGHTS
VER=26
# IF VARIABLE IS NONE, THEN NOTEBOOK COMPUTES TOKENS
# OTHERWISE NOTEBOOK LOADS TOKENS FROM PATH
LOAD_TOKENS_FROM = '../../input/py-bigbird-v26'
# IF VARIABLE IS NONE, THEN NOTEBOOK TRAINS A NEW MODEL
# OTHERWISE IT LOADS YOUR PREVIOUSLY TRAINED MODEL
LOAD_MODEL_FROM = 'models'
# Use the entire ensemble.
ENSEMBLE_IDS = [args.fold]
# Setting Fold = None leaves out an arbitrary 10% of the dataset for sequence classifier training.
# Setting Fold to one of [0,1,2,3,4] leaves out the portion of the dataset not trained on by the corresponding ensemble model.
# 'half' leaves out an arbitrary 50%.
FOLD = args.fold
# print(FOLD)
# exit()
# IF FOLLOWING IS NONE, THEN NOTEBOOK
# USES INTERNET AND DOWNLOADS HUGGINGFACE
# CONFIG, TOKENIZER, AND MODEL
DOWNLOADED_MODEL_PATH = '../../input/deberta-xlarge/'
if DOWNLOADED_MODEL_PATH is None:
DOWNLOADED_MODEL_PATH = 'model'
MODEL_NAME = 'allenai/longformer-large-4096'
# Tune the probability threshold for sequence classifiers to maximize F1
TRAIN_SEQ_CLASSIFIERS = False
KAGGLE_CACHE = 'cache' #location of valid_pred files
cache = 'cache' #save location of valid_seqds files
cacheExists = os.path.exists(cache)
if not cacheExists:
os.makedirs(cache)
print(ENSEMBLE_IDS)
# In[90]:
# skopt optimizer has a bug when scipy is installed with its default version
if TRAIN_SEQ_CLASSIFIERS:
os.system('pip install --no-dependencies scipy==1.5.2 ')
# In[91]:
from torch import cuda
config = {'model_name': MODEL_NAME,
'max_length': 2048,
'train_batch_size':4,
'valid_batch_size':1,
'epochs':5,
'learning_rates': [2.5e-5, 2.5e-5, 2.5e-6, 2.5e-6, 2.5e-7],
'max_grad_norm':10,
'device': 'cuda' if cuda.is_available() else 'cpu'}
# # How To Submit PyTorch Without Internet
# Many people ask me, how do I submit PyTorch models without internet? With HuggingFace Transformer, it's easy. Just download the following 3 things (1) model weights, (2) tokenizer files, (3) config file, and upload them to a Kaggle dataset. Below shows code how to get the files from HuggingFace for Google's BigBird-base. But this same code can download any transformer, like for example roberta-base.
# In[92]:
from transformers import *
if DOWNLOADED_MODEL_PATH == 'model':
os.mkdir('model')
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, add_prefix_space=True)
tokenizer.save_pretrained('model')
config_model = AutoConfig.from_pretrained(MODEL_NAME)
config_model.num_labels = 15
config_model.save_pretrained('model')
backbone = AutoModelForTokenClassification.from_pretrained(MODEL_NAME,
config=config_model)
backbone.save_pretrained('model')
# # Load Data and Libraries
# In addition to loading the train dataframe, we will load all the train and text files and save them in a dataframe.
# In[93]:
import numpy as np, os
from scipy import stats
import pandas as pd, gc
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModelForTokenClassification, AdamW
from torch.utils.data import Dataset, DataLoader
import torch
from sklearn.metrics import accuracy_score
from torch.cuda import amp
# In[94]:
train_df = pd.read_csv('../../input/feedback-prize-2021/train.csv')
print( train_df.shape )
train_df.head()
# In[95]:
# https://www.kaggle.com/raghavendrakotala/fine-tunned-on-roberta-base-as-ner-problem-0-533
test_names, test_texts = [], []
for f in list(os.listdir('../../input/feedback-prize-2021/test')):
test_names.append(f.replace('.txt', ''))
test_texts.append(open('../../input/feedback-prize-2021/test/' + f, 'r').read())
test_texts = pd.DataFrame({'id': test_names, 'text': test_texts})
test_texts['len']=test_texts['text'].apply(lambda x:len(x.split()))
test_texts=test_texts.sort_values(by=['len']).reset_index()
test_texts
SUBMISSION = False
if len(test_names) > 5:
SUBMISSION = True
test_texts.head()
# In[96]:
# https://www.kaggle.com/raghavendrakotala/fine-tunned-on-roberta-base-as-ner-problem-0-533
test_names, train_texts = [], []
for f in tqdm(list(os.listdir('../../input/feedback-prize-2021/train'))):
test_names.append(f.replace('.txt', ''))
train_texts.append(open('../../input/feedback-prize-2021/train/' + f, 'r').read())
train_text_df = pd.DataFrame({'id': test_names, 'text': train_texts})
train_text_df.head()
# # Convert Train Text to NER Labels
# We will now convert all text words into NER labels and save in a dataframe.
# In[97]:
if not LOAD_TOKENS_FROM:
all_entities = []
for ii,i in enumerate(train_text_df.iterrows()):
if ii%100==0: print(ii,', ',end='')
total = i[1]['text'].split().__len__()
entities = ["O"]*total
for j in train_df[train_df['id'] == i[1]['id']].iterrows():
discourse = j[1]['discourse_type']
list_ix = [int(x) for x in j[1]['predictionstring'].split(' ')]
entities[list_ix[0]] = f"B-{discourse}"
for k in list_ix[1:]: entities[k] = f"I-{discourse}"
all_entities.append(entities)
train_text_df['entities'] = all_entities
train_text_df.to_csv('train_NER.csv',index=False)
else:
from ast import literal_eval
train_text_df = pd.read_csv(f'{LOAD_TOKENS_FROM}/train_NER.csv')
# pandas saves lists as string, we must convert back
train_text_df.entities = train_text_df.entities.apply(lambda x: literal_eval(x) )
print( train_text_df.shape )
train_text_df.head()
# In[98]:
# CREATE DICTIONARIES THAT WE CAN USE DURING TRAIN AND INFER
output_labels = ['O', 'B-Lead', 'I-Lead', 'B-Position', 'I-Position', 'B-Claim', 'I-Claim', 'B-Counterclaim', 'I-Counterclaim',
'B-Rebuttal', 'I-Rebuttal', 'B-Evidence', 'I-Evidence', 'B-Concluding Statement', 'I-Concluding Statement']
labels_to_ids = {v:k for k,v in enumerate(output_labels)}
ids_to_labels = {k:v for k,v in enumerate(output_labels)}
disc_type_to_ids = {'Evidence':(11,12),'Claim':(5,6),'Lead':(1,2),'Position':(3,4),'Counterclaim':(7,8),'Rebuttal':(9,10),'Concluding Statement':(13,14)}
# In[99]:
labels_to_ids
# # Define the dataset function
# Below is our PyTorch dataset function. It always outputs tokens and attention. During training it also provides labels. And during inference it also provides word ids to help convert token predictions into word predictions.
#
# Note that we use `text.split()` and `is_split_into_words=True` when we convert train text to labeled train tokens. This is how the HugglingFace tutorial does it. However, this removes characters like `\n` new paragraph. If you want your model to see new paragraphs, then we need to map words to tokens ourselves using `return_offsets_mapping=True`. See my TensorFlow notebook [here][1] for an example.
#
# Some of the following code comes from the example at HuggingFace [here][2]. However I think the code at that link is wrong. The HuggingFace original code is [here][3]. With the flag `LABEL_ALL` we can either label just the first subword token (when one word has more than one subword token). Or we can label all the subword tokens (with the word's label). In this notebook version, we label all the tokens. There is a Kaggle discussion [here][4]
#
# [1]: https://www.kaggle.com/cdeotte/tensorflow-longformer-ner-cv-0-617
# [2]: https://huggingface.co/docs/transformers/custom_datasets#tok_ner
# [3]: https://github.com/huggingface/transformers/blob/86b40073e9aee6959c8c85fcba89e47b432c4f4d/examples/pytorch/token-classification/run_ner.py#L371
# [4]: https://www.kaggle.com/c/feedback-prize-2021/discussion/296713
# In[100]:
# Return an array that maps character index to index of word in list of split() words
def split_mapping(unsplit):
splt = unsplit.split()
offset_to_wordidx = np.full(len(unsplit),-1)
txt_ptr = 0
for split_index, full_word in enumerate(splt):
while unsplit[txt_ptr:txt_ptr + len(full_word)] != full_word:
txt_ptr += 1
offset_to_wordidx[txt_ptr:txt_ptr + len(full_word)] = split_index
txt_ptr += len(full_word)
return offset_to_wordidx
def iter_split(data,labels,fold,nfolds=5,seed=2020):
splits = StratifiedKFold(n_splits=nfolds, random_state=seed, shuffle=True)
splits = list(splits.split(data,labels))
# splits = np.zeros(len(data)).astype(np.int)
# for i in range(nfolds): splits[splits[i][1]] = i
# indices=np.arange(len(data))
train_indices=splits[fold][0]
val_indices=splits[fold][1]
return train_indices, val_indices
# In[101]:
class dataset(Dataset):
def __init__(self, dataframe, tokenizer, max_len, get_wids):
self.len = len(dataframe)
self.data = dataframe
self.tokenizer = tokenizer
self.max_len = max_len
self.get_wids = get_wids # for validation
def __getitem__(self, index):
# GET TEXT AND WORD LABELS
text = self.data.text[index]
word_labels = self.data.entities[index] if not self.get_wids else None
# TOKENIZE TEXT
encoding = self.tokenizer(text,
return_offsets_mapping=True,
padding=False,
truncation=True,
max_length=self.max_len)
word_ids = encoding.word_ids()
split_word_ids = np.full(len(word_ids),-1)
offset_to_wordidx = split_mapping(text)
offsets = encoding['offset_mapping']
# CREATE TARGETS AND MAPPING OF TOKENS TO SPLIT() WORDS
label_ids = []
# Iterate in reverse to label whitespace tokens until a Begin token is encountered
for token_idx, word_idx in reversed(list(enumerate(word_ids))):
if word_idx is None:
if not self.get_wids: label_ids.append(-100)
else:
if offsets[token_idx][0] != offsets[token_idx][1]:
#Choose the split word that shares the most characters with the token if any
split_idxs = offset_to_wordidx[offsets[token_idx][0]:offsets[token_idx][1]]
split_index = stats.mode(split_idxs[split_idxs != -1]).mode[0] if len(np.unique(split_idxs)) > 1 else split_idxs[0]
if split_index != -1:
if not self.get_wids: label_ids.append( labels_to_ids[word_labels[split_index]] )
split_word_ids[token_idx] = split_index
else:
# Even if we don't find a word, continue labeling 'I' tokens until a 'B' token is found
if label_ids and label_ids[-1] != -100 and ids_to_labels[label_ids[-1]][0] == 'I':
split_word_ids[token_idx] = split_word_ids[token_idx + 1]
if not self.get_wids: label_ids.append(label_ids[-1])
else:
if not self.get_wids: label_ids.append(-100)
else:
if not self.get_wids: label_ids.append(-100)
encoding['labels'] = list(reversed(label_ids))
# CONVERT TO TORCH TENSORS
item = {key: torch.as_tensor(val) for key, val in encoding.items()}
if self.get_wids:
item['wids'] = torch.as_tensor(split_word_ids)
return item
def __len__(self):
return self.len
class CustomCollate:
def __init__(self,tokenizer,sliding_window=None):
self.tokenizer=tokenizer
self.sliding_window=sliding_window
def __call__(self,data):
"""
need to collate: input_ids, attention_mask, labels
input_ids is padded with 1, attention_mask 0, labels -100
"""
bs=len(data)
lengths=[]
for i in range(bs):
lengths.append(len(data[i]['input_ids']))
# print(data[i]['input_ids'].shape)
# print(data[i]['attention_mask'].shape)
# print(data[i]['labels'].shape)
max_len=max(lengths)
if self.sliding_window is not None and max_len > self.sliding_window:
max_len= int((np.floor(max_len/self.sliding_window-1e-6)+1)*self.sliding_window)
#always pad the right side
input_ids, attention_mask, labels, BIO_labels, discourse_labels=[],[],[],[],[]
#if np.random.uniform()>0.5:
#print(data[0].keys())
# print(data[0].keys())
# exit()
#print(max_len)
if 'wids' in data[0]:
get_wids=True
else:
get_wids=False
#print(get_wids)
wids = []
#wids.append(torch.nn.functional.pad(data[i]['wids'],(0,max_len-lengths[i]),value=-1))
for i in range(bs):
input_ids.append(torch.nn.functional.pad(data[i]['input_ids'],(0,max_len-lengths[i]),value=self.tokenizer.pad_token_id))
attention_mask.append(torch.nn.functional.pad(data[i]['attention_mask'],(0,max_len-lengths[i]),value=0))
#labels.append(torch.nn.functional.pad(data[i]['labels'],(0,max_len-lengths[i]),value=-100))
#BIO_labels.append(torch.nn.functional.pad(data[i]['BIO_labels'],(0,max_len-lengths[i]),value=-100))
#discourse_labels.append(torch.nn.functional.pad(data[i]['discourse_labels'],(0,max_len-lengths[i]),value=-100))
if get_wids:
wids.append(torch.nn.functional.pad(data[i]['wids'],(0,max_len-lengths[i]),value=-1))
# else:
# for i in range(bs):
# input_ids.append(torch.nn.functional.pad(data[i]['input_ids'],(max_len-lengths[i],0),value=1))
# attention_mask.append(torch.nn.functional.pad(data[i]['attention_mask'],(max_len-lengths[i],0),value=0))
# labels.append(torch.nn.functional.pad(data[i]['labels'],(max_len-lengths[i],0),value=-100))
input_ids=torch.stack(input_ids)
attention_mask=torch.stack(attention_mask)
#labels=torch.stack(labels)
#BIO_labels=torch.stack(BIO_labels)
#discourse_labels=torch.stack(discourse_labels)
if get_wids:
wids=torch.stack(wids)
#exit()
if get_wids:
return {"input_ids":input_ids,"attention_mask":attention_mask,
"labels":labels,"BIO_labels":BIO_labels,"discourse_labels":discourse_labels,
"wids":wids}
else:
return {"input_ids":input_ids,"attention_mask":attention_mask,
"labels":labels,"BIO_labels":BIO_labels,"discourse_labels":discourse_labels}
# # Create Train and Validation Dataloaders
# We will use the same train and validation subsets as my TensorFlow notebook [here][1]. Then we can compare results. And/or experiment with ensembling the validation fold predictions.
#
# [1]: https://www.kaggle.com/cdeotte/tensorflow-longformer-ner-cv-0-617
# In[102]:
# CHOOSE VALIDATION INDEXES (that match my TF notebook)
IDS = train_df.id.unique()
np.random.seed(42)
if FOLD == 'half':
train_idx = np.random.choice(np.arange(len(IDS)),int(0.5*len(IDS)),replace=False)
valid_idx = np.setdiff1d(np.arange(len(IDS)),train_idx)
elif FOLD == 'full':
train_idx = np.random.choice(np.arange(len(IDS)),int(0.5*len(IDS)),replace=False)
valid_idx = np.arange(len(IDS))
# elif FOLD is not None:
# print('There are',len(IDS),'train texts. We will split 93% 7% for ensemble training.')
# shuffled_ids = np.arange(len(IDS))
# np.random.shuffle(shuffled_ids)
#
# valid_len = int(.07 * len(IDS))
# valid_idx = shuffled_ids[FOLD*valid_len:(FOLD+1)*valid_len]
# train_idx = np.setdiff1d(np.arange(len(IDS)),valid_idx)
else:
print('There are',len(IDS),'train texts. We will split 90% 10% for ensemble training.')
#train_idx = np.random.choice(np.arange(len(IDS)),int(0.9*len(IDS)),replace=False)
#valid_idx = np.setdiff1d(np.arange(len(IDS)),train_idx)
train_idx, valid_idx= iter_split(np.arange(len(IDS)),np.ones(len(IDS)),args.fold,nfolds=8)
TRAIN_IDS=IDS[train_idx]
VAL_IDS=IDS[valid_idx]
# print(len(valid_idx))
# exit()
# print(VAL_IDS)
# exit()
np.random.seed(None)
# In[103]:
# CREATE TRAIN SUBSET AND VALID SUBSET
data = train_text_df[['id','text', 'entities']]
train_dataset = data.loc[data['id'].isin(IDS[train_idx]),['text', 'entities']].reset_index(drop=True)
test_dataset = data.loc[data['id'].isin(IDS[valid_idx])].reset_index(drop=True)
print(test_dataset.id)
# # print(VAL_IDS)
#exit()
print("FULL Dataset: {}".format(data.shape))
print("TRAIN Dataset: {}".format(train_dataset.shape))
print("TEST Dataset: {}".format(test_dataset.shape))
tokenizer = AutoTokenizer.from_pretrained(DOWNLOADED_MODEL_PATH)
training_set = dataset(train_dataset, tokenizer, config['max_length'], False)
testing_set = dataset(test_dataset, tokenizer, config['max_length'], True)
# In[111]:
# TRAIN DATASET AND VALID DATASET
train_params = {'batch_size': config['train_batch_size'],
'shuffle': True,
'num_workers': 2,
'pin_memory':True
}
test_params = {'batch_size': config['valid_batch_size'],
'shuffle': False,
'num_workers': 2,
'pin_memory':True
}
training_loader = DataLoader(training_set, **train_params)
testing_loader = DataLoader(testing_set, **test_params,collate_fn=CustomCollate(tokenizer))
# TEST DATASET
test_texts_set = dataset(test_texts, tokenizer, config['max_length'], True)
test_texts_loader = DataLoader(test_texts_set, **test_params,collate_fn=CustomCollate(tokenizer))
#exit()
# In[112]:
from transformers import *
import torch.nn as nn
import torch.nn.functional as F
rearrange_indices=[14, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
from transformers import *
import torch.nn as nn
import torch.nn.functional as F
class ResidualLSTM(nn.Module):
def __init__(self, d_model, rnn='GRU'):
super(ResidualLSTM, self).__init__()
self.downsample=nn.Linear(d_model,d_model//2)
if rnn=='GRU':
self.LSTM=nn.GRU(d_model//2, d_model//2, num_layers=2, bidirectional=False, dropout=0.2)
else:
self.LSTM=nn.LSTM(d_model//2, d_model//2, num_layers=2, bidirectional=False, dropout=0.2)
self.dropout1=nn.Dropout(0.2)
self.norm1= nn.LayerNorm(d_model//2)
self.linear1=nn.Linear(d_model//2, d_model*4)
self.linear2=nn.Linear(d_model*4, d_model)
self.dropout2=nn.Dropout(0.2)
self.norm2= nn.LayerNorm(d_model)
def forward(self, x):
x=x.permute(1,0,2)
res=x
x=self.downsample(x)
x, _ = self.LSTM(x)
x=self.dropout1(x)
x=self.norm1(x)
x=F.relu(self.linear1(x))
x=self.linear2(x)
x=self.dropout2(x)
x=res+x
x=x.permute(1,0,2)
return self.norm2(x)
def noop(x): return x
class ResBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding="same", use_bn=False):
super().__init__()
self.idconv = noop if in_channels == out_channels \
else nn.Conv1d(in_channels, out_channels, 1, stride=1)
if padding == "same":
padding = kernel_size // 2 * dilation
if use_bn:
self.conv = nn.Sequential(
nn.Conv1d(in_channels, out_channels, kernel_size, padding=padding, stride=stride, dilation=dilation),
nn.BatchNorm1d(out_channels),
)
else:
self.conv = nn.Sequential(
nn.Conv1d(in_channels, out_channels, kernel_size, padding=padding, stride=stride, dilation=dilation),
)
def forward(self, x):
return F.relu(self.conv(x) + self.idconv(x))
class ResNet(nn.Module):
def __init__(self, use_msd=False,
cnn_dim=512, input_dim=1024, kernel_sizes=[3,5,7,9], use_bn=False):
super().__init__()
self.use_msd = use_msd
self.cnn = nn.Sequential(
ResBlock(input_dim, cnn_dim, kernel_size=kernel_sizes[0], use_bn=use_bn),
ResBlock(cnn_dim, cnn_dim, kernel_size=kernel_sizes[1], use_bn=use_bn),
# ResBlock(cnn_dim, cnn_dim, kernel_size=kernel_sizes[2], use_bn=use_bn),
# ResBlock(cnn_dim, cnn_dim, kernel_size=kernel_sizes[3], use_bn=use_bn),
)
self.logits = nn.Linear(cnn_dim, 1024)
self.high_dropout = nn.Dropout(p=0.5)
self.dropout1 = nn.Dropout(p=0.1)
self.dropout2 = nn.Dropout(p=0.1)
def forward(self, x):
x = x.permute(0,2,1)
features = self.cnn(self.dropout1(x)).permute(0, 2, 1) # [Bs x T x nb_ft]
# print(f'features: {features.shape}')
#if self.use_msd and self.training:
features = torch.mean(
torch.stack(
[self.high_dropout(features) for _ in range(5)],
dim=0,
),
dim=0,
)
features=self.logits(features)
# else:
# logits = self.logits(self.dropout2(features))
# print(f'logits: {logits.shape}')
return features
class SlidingWindowTransformerModel(nn.Module):
def __init__(self,DOWNLOADED_MODEL_PATH, rnn='GRU', window_size=512, edge_len=64, no_backbone=False):
super(SlidingWindowTransformerModel, self).__init__()
config_model = AutoConfig.from_pretrained(DOWNLOADED_MODEL_PATH+'/config.json')
self.no_backbone=no_backbone
if no_backbone:
pass
else:
self.backbone=AutoModel.from_pretrained(
DOWNLOADED_MODEL_PATH+'/pytorch_model.bin',config=config_model)
if rnn=="GRU" or rnn=='LSTM':
self.lstm=ResidualLSTM(1024,rnn)
else:
self.lstm=ResNet()
self.classification_head=nn.Linear(1024,15)
self.window_size=window_size
self.edge_len=edge_len
self.inner_len=window_size-edge_len*2
def forward(self,input_ids,attention_mask,return_transformer_hidden_states=False):
# print(L)
# exit()
#x=self.backbone(input_ids=input_ids,attention_mask=attention_mask,return_dict=False)[0]
if self.no_backbone==False:
B,L=input_ids.shape
if L<=self.window_size:
x=self.backbone(input_ids=input_ids,attention_mask=attention_mask,return_dict=False)[0]
#pass
else:
#print("####")
#print(input_ids.shape)
segments=(L-self.window_size)//self.inner_len
if (L-self.window_size)%self.inner_len>self.edge_len:
segments+=1
elif segments==0:
segments+=1
x=self.backbone(input_ids=input_ids[:,:self.window_size],attention_mask=attention_mask[:,:self.window_size],return_dict=False)[0]
for i in range(1,segments+1):
start=self.window_size-self.edge_len+(i-1)*self.inner_len
end=self.window_size-self.edge_len+(i-1)*self.inner_len+self.window_size
end=min(end,L)
x_next=input_ids[:,start:end]
mask_next=attention_mask[:,start:end]
x_next=self.backbone(input_ids=x_next,attention_mask=mask_next,return_dict=False)[0]
#L_next=x_next.shape[1]-self.edge_len,
if i==segments:
x_next=x_next[:,self.edge_len:]
else:
x_next=x_next[:,self.edge_len:self.edge_len+self.inner_len]
#print(x_next.shape)
x=torch.cat([x,x_next],1)
#print(start,end)
#print(x.shape)
if return_transformer_hidden_states:
transformer_hidden_states=x
x=self.lstm(x)
x=self.classification_head(x)
else:
transformer_hidden_states=input_ids
x=self.lstm(transformer_hidden_states)
x=self.classification_head(x)
if return_transformer_hidden_states:
return [x[:,:,rearrange_indices]], transformer_hidden_states
else:
return [x[:,:,rearrange_indices]]#, BIO_output
model = SlidingWindowTransformerModel(DOWNLOADED_MODEL_PATH).to(config['device'])
import warnings
warnings.filterwarnings('ignore', '.*__floordiv__ is deprecated.*',)
# LOOP TO TRAIN MODEL (or load model)
if not LOAD_MODEL_FROM:
for epoch in range(config['epochs']):
print(f"### Training epoch: {epoch + 1}")
for g in optimizer.param_groups:
g['lr'] = config['learning_rates'][epoch]
lr = optimizer.param_groups[0]['lr']
print(f'### LR = {lr}\n')
train(epoch)
torch.cuda.empty_cache()
gc.collect()
torch.save(model.state_dict(), f'bigbird_v{VER}.pt')
# # Inference and Validation Code
# We will infer in batches using our data loader which is faster than inferring one text at a time with a for-loop. The metric code is taken from Rob Mulla's great notebook [here][2]. Our model achieves validation F1 score 0.615!
#
# During inference our model will make predictions for each subword token. Some single words consist of multiple subword tokens. In the code below, we use a word's first subword token prediction as the label for the entire word. We can try other approaches, like averaging all subword predictions or taking `B` labels before `I` labels etc.
#
# [1]: https://www.kaggle.com/raghavendrakotala/fine-tunned-on-roberta-base-as-ner-problem-0-533
# [2]: https://www.kaggle.com/robikscube/student-writing-competition-twitch
# In[115]:
# Returns per-word, mean class prediction probability over all tokens corresponding to each word
def inference(data_loader, model_ids):
gc.collect()
torch.cuda.empty_cache()
ensemble_preds = np.zeros((len(data_loader.dataset), config['max_length'], len(labels_to_ids)), dtype=np.float32)
wids = np.full((len(data_loader.dataset), config['max_length']), -100)
for model_i, model_id in enumerate(model_ids):
model.load_state_dict(torch.load(f'{LOAD_MODEL_FROM}/fold{model_id}.pt', map_location=config['device']))
# put model in training mode
model.eval()
for batch_i, batch in tqdm(enumerate(data_loader),total=len(data_loader)):
if model_i == 0: wids[batch_i*config['valid_batch_size']:(batch_i+1)*config['valid_batch_size'],:batch['wids'].shape[1]] = batch['wids'].numpy()
# MOVE BATCH TO GPU AND INFER
ids = batch["input_ids"].to(config['device'])
mask = batch["attention_mask"].to(config['device'])
with torch.no_grad():
with amp.autocast():
outputs, hidden_states = model(ids, attention_mask=mask,return_transformer_hidden_states=True)
all_preds = torch.nn.functional.softmax(outputs[0], dim=2).cpu().detach().numpy()
#all_preds/=2
ensemble_preds[batch_i*config['valid_batch_size']:(batch_i+1)*config['valid_batch_size'],:all_preds.shape[1]] += all_preds
del ids
del mask
del outputs
del all_preds
gc.collect()
torch.cuda.empty_cache()
ensemble_preds /= len(model_ids)
predictions = []
# INTERATE THROUGH EACH TEXT AND GET PRED
for text_i in range(ensemble_preds.shape[0]):
token_preds = ensemble_preds[text_i]
prediction = []
previous_word_idx = -1
prob_buffer = []
word_ids = wids[text_i][wids[text_i] != -100]
for idx,word_idx in enumerate(word_ids):
if word_idx == -1:
pass
elif word_idx != previous_word_idx:
if prob_buffer:
prediction.append(np.mean(prob_buffer, dtype=np.float32, axis=0))
prob_buffer = []
prob_buffer.append(token_preds[idx])
previous_word_idx = word_idx
else:
prob_buffer.append(token_preds[idx])
prediction.append(np.mean(prob_buffer, dtype=np.float32, axis=0))
predictions.append(prediction)
gc.collect()
torch.cuda.empty_cache()
return predictions
# In[117]:
import pickle
valid = train_df.loc[train_df['id'].isin(IDS[valid_idx])]
print('Predicting with BigBird...')
if not SUBMISSION:
try:
with open( KAGGLE_CACHE + f"/valid_preds_fold{args.fold}.p", "rb" ) as validFile:
valid_word_preds = pickle.load( validFile )
print("preds loaded")
except:
valid_word_preds = inference(testing_loader, ENSEMBLE_IDS)
with open( cache + f"/valid_preds_fold{args.fold}.p", "wb+" ) as validFile:
pickle.dump( valid_word_preds, validFile )
else: valid_word_preds = []
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
setup.py | import os
import re
import sys
import platform
import shutil
import subprocess
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
from distutils.command.build import build
from distutils.version import LooseVersion
class CopyPreBuild(build):
def initialize_options(self):
build.initialize_options(self)
# We just overwrite this because the default "build/lib" clashes with
# directories many developers have in their source trees;
# this can create confusing results with "pip install .", which clones
# the whole source tree by default
self.build_lib = '_tmppythonbuild'
def run(self):
build.run(self)
# matches: libwarpx.(2d|3d|rz).(so|pyd)
re_libprefix = re.compile(r"libwarpx\...\.(?:so|pyd)")
libs_found = []
for lib_name in os.listdir(PYWARPX_LIB_DIR):
if re_libprefix.match(lib_name):
lib_path = os.path.join(PYWARPX_LIB_DIR, lib_name)
libs_found.append(lib_path)
if len(libs_found) == 0:
raise RuntimeError("Error: no pre-build WarpX libraries found in "
"PYWARPX_LIB_DIR='{}'".format(PYWARPX_LIB_DIR))
# copy external libs into collection of files in a temporary build dir
dst_path = os.path.join(self.build_lib, "pywarpx")
for lib_path in libs_found:
shutil.copy(lib_path, dst_path)
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def run(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError(
"CMake 3.15.0+ must be installed to build the following " +
"extensions: " +
", ".join(e.name for e in self.extensions))
cmake_version = LooseVersion(re.search(
r'version\s*([\d.]+)',
out.decode()
).group(1))
if cmake_version < '3.15.0':
raise RuntimeError("CMake >= 3.15.0 is required")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(
self.get_ext_fullpath(ext.name)
))
# required for auto-detection of auxiliary "native" libs
if not extdir.endswith(os.path.sep):
extdir += os.path.sep
r_dim = re.search(r'warpx_(2|3|rz)(?:d*)', ext.name)
dims = r_dim.group(1).upper()
cmake_args = [
'-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' +
os.path.join(extdir, "pywarpx"),
'-DCMAKE_RUNTIME_OUTPUT_DIRECTORY=' + extdir,
'-DWarpX_DIMS=' + dims,
'-DWarpX_APP:BOOL=OFF',
'-DWarpX_LIB:BOOL=ON',
## variants
'-DWarpX_COMPUTE=' + WarpX_COMPUTE,
'-DWarpX_MPI:BOOL=' + WarpX_MPI,
'-DWarpX_OPENPMD:BOOL=' + WarpX_OPENPMD,
'-DWarpX_PRECISION=' + WarpX_PRECISION,
'-DWarpX_PSATD:BOOL=' + WarpX_PSATD,
'-DWarpX_QED:BOOL=' + WarpX_QED,
'-DWarpX_QED_TABLE_GEN:BOOL=' + WarpX_QED_TABLE_GEN,
## dependency control (developers & package managers)
'-DWarpX_amrex_internal=' + WarpX_amrex_internal,
# see PICSAR and openPMD below
## static/shared libs
'-DBUILD_SHARED_LIBS:BOOL=' + BUILD_SHARED_LIBS,
## Unix: rpath to current dir when packaged
## needed for shared (here non-default) builds and ADIOS1
## wrapper libraries
'-DCMAKE_BUILD_WITH_INSTALL_RPATH:BOOL=ON',
'-DCMAKE_INSTALL_RPATH_USE_LINK_PATH:BOOL=OFF',
# Windows: has no RPath concept, all `.dll`s must be in %PATH%
# or same dir as calling executable
]
if WarpX_QED.upper() in ['1', 'ON', 'TRUE', 'YES']:
cmake_args.append('-DWarpX_picsar_internal=' + WarpX_picsar_internal)
if WarpX_OPENPMD.upper() in ['1', 'ON', 'TRUE', 'YES']:
cmake_args += [
'-DHDF5_USE_STATIC_LIBRARIES:BOOL=' + HDF5_USE_STATIC_LIBRARIES,
'-DADIOS_USE_STATIC_LIBS:BOOL=' + ADIOS_USE_STATIC_LIBS,
'-DWarpX_openpmd_internal=' + WarpX_openpmd_internal,
]
# further dependency control (developers & package managers)
if WarpX_amrex_src:
cmake_args.append('-DWarpX_amrex_src=' + WarpX_amrex_src)
if WarpX_openpmd_src:
cmake_args.append('-DWarpX_openpmd_src=' + WarpX_openpmd_src)
if WarpX_picsar_src:
cmake_args.append('-DWarpX_picsar_src=' + WarpX_picsar_src)
if sys.platform == "darwin":
cmake_args.append('-DCMAKE_INSTALL_RPATH=@loader_path')
else:
# values: linux*, aix, freebsd, ...
# just as well win32 & cygwin (although Windows has no RPaths)
cmake_args.append('-DCMAKE_INSTALL_RPATH=$ORIGIN')
cfg = 'Debug' if self.debug else 'Release'
build_args = ['--config', cfg]
if platform.system() == "Windows":
cmake_args += [
'-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(
cfg.upper(),
os.path.join(extdir, "pywarpx")
)
]
if sys.maxsize > 2**32:
cmake_args += ['-A', 'x64']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--parallel', BUILD_PARALLEL]
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(
env.get('CXXFLAGS', ''),
self.distribution.get_version()
)
build_dir = os.path.join(self.build_temp, dims)
os.makedirs(build_dir, exist_ok=True)
subprocess.check_call(
['cmake', ext.sourcedir] + cmake_args,
cwd=build_dir,
env=env
)
subprocess.check_call(
['cmake', '--build', '.'] + build_args,
cwd=build_dir
)
# note that this does not call install;
# we pick up artifacts directly from the build output dirs
with open('./README.md', encoding='utf-8') as f:
long_description = f.read()
# Allow to control options via environment vars.
# Work-around for https://github.com/pypa/setuptools/issues/1712
# Pick up existing WarpX libraries or...
PYWARPX_LIB_DIR = os.environ.get('PYWARPX_LIB_DIR')
# ... build WarpX libraries with CMake
# note: changed default for SHARED, MPI, TESTING and EXAMPLES
WarpX_COMPUTE = os.environ.get('WarpX_COMPUTE', 'OMP')
WarpX_MPI = os.environ.get('WarpX_MPI', 'OFF')
WarpX_OPENPMD = os.environ.get('WarpX_OPENPMD', 'OFF')
WarpX_PRECISION = os.environ.get('WarpX_PRECISION', 'DOUBLE')
WarpX_PSATD = os.environ.get('WarpX_PSATD', 'OFF')
WarpX_QED = os.environ.get('WarpX_QED', 'ON')
WarpX_QED_TABLE_GEN = os.environ.get('WarpX_QED_TABLE_GEN', 'OFF')
WarpX_DIMS = os.environ.get('WarpX_DIMS', '2;3;RZ')
BUILD_PARALLEL = os.environ.get('BUILD_PARALLEL', '2')
BUILD_SHARED_LIBS = os.environ.get('WarpX_BUILD_SHARED_LIBS',
'OFF')
#BUILD_TESTING = os.environ.get('WarpX_BUILD_TESTING',
# 'OFF')
#BUILD_EXAMPLES = os.environ.get('WarpX_BUILD_EXAMPLES',
# 'OFF')
# openPMD-api sub-control
HDF5_USE_STATIC_LIBRARIES = os.environ.get('HDF5_USE_STATIC_LIBRARIES', 'OFF')
ADIOS_USE_STATIC_LIBS = os.environ.get('ADIOS_USE_STATIC_LIBS', 'OFF')
# CMake dependency control (developers & package managers)
WarpX_amrex_src = os.environ.get('WarpX_amrex_src')
WarpX_amrex_internal = os.environ.get('WarpX_amrex_internal', 'ON')
WarpX_openpmd_src = os.environ.get('WarpX_openpmd_src')
WarpX_openpmd_internal = os.environ.get('WarpX_openpmd_internal', 'ON')
WarpX_picsar_src = os.environ.get('WarpX_picsar_src')
WarpX_picsar_internal = os.environ.get('WarpX_picsar_internal', 'ON')
# https://cmake.org/cmake/help/v3.0/command/if.html
if WarpX_MPI.upper() in ['1', 'ON', 'TRUE', 'YES']:
WarpX_MPI = "ON"
else:
WarpX_MPI = "OFF"
# for CMake
cxx_modules = [] # values: warpx_2d, warpx_3d, warpx_rz
cmdclass = {} # build extensions
# externally pre-built: pick up pre-built WarpX libraries
if PYWARPX_LIB_DIR:
cmdclass=dict(build=CopyPreBuild)
# CMake: build WarpX libraries ourselves
else:
cmdclass = dict(build_ext=CMakeBuild)
for dim in [x.lower() for x in WarpX_DIMS.split(';')]:
name = dim if dim == "rz" else dim + "d"
cxx_modules.append(CMakeExtension("warpx_" + name))
# Get the package requirements from the requirements.txt file
install_requires = []
with open('./requirements.txt') as f:
install_requires = [line.strip('\n') for line in f.readlines()]
if WarpX_MPI == "ON":
install_requires.append('mpi4py>=2.1.0')
# keyword reference:
# https://packaging.python.org/guides/distributing-packages-using-setuptools
setup(
name='pywarpx',
# note PEP-440 syntax: x.y.zaN but x.y.z.devN
version = '21.02',
packages = ['pywarpx'],
package_dir = {'pywarpx': 'Python/pywarpx'},
author='Jean-Luc Vay, David P. Grote, Maxence Thévenet, Rémi Lehe, Andrew Myers, Weiqun Zhang, Axel Huebl, et al.',
author_email='[email protected], [email protected], [email protected], [email protected], [email protected], [email protected], [email protected]',
maintainer='Axel Huebl, David P. Grote, Rémi Lehe', # wheel/pypi packages
maintainer_email='[email protected], [email protected], [email protected]',
description='WarpX is an advanced electromagnetic Particle-In-Cell code.',
long_description=long_description,
long_description_content_type='text/markdown',
keywords=('WarpX openscience mpi hpc research pic particle-in-cell '
'plasma laser-plasma accelerator modeling simulation'),
url='https://ecp-warpx.github.io',
project_urls={
'Documentation': 'https://warpx.readthedocs.io',
'Doxygen': 'https://warpx.readthedocs.io/en/latest/_static/doxyhtml/index.html',
#'Reference': 'https://doi.org/...', (Paper and/or Zenodo)
'Source': 'https://github.com/ECP-WarpX/WarpX',
'Tracker': 'https://github.com/ECP-WarpX/WarpX/issues',
},
# CMake: self-built as extension module
ext_modules=cxx_modules,
cmdclass=cmdclass,
# scripts=['warpx_2d', 'warpx_3d', 'warpx_rz'],
zip_safe=False,
python_requires='>=3.6, <3.10',
# tests_require=['pytest'],
install_requires=install_requires,
# see: src/bindings/python/cli
#entry_points={
# 'console_scripts': [
# 'warpx_3d = warpx.3d.__main__:main'
# ]
#},
extras_require={
'all': ['openPMD-api~=0.13.0', 'openPMD-viewer~=1.1', 'yt~=3.6', 'matplotlib'],
},
# cmdclass={'test': PyTest},
# platforms='any',
classifiers=[
'Development Status :: 4 - Beta',
'Natural Language :: English',
'Environment :: Console',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Physics',
'Programming Language :: C++',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
('License :: OSI Approved :: '
'BSD License'), # TODO: use real SPDX: BSD-3-Clause-LBNL
],
# new PEP 639 format
license='BSD-3-Clause-LBNL',
license_files = ['LICENSE.txt', 'LEGAL.txt'],
)
| []
| []
| [
"WarpX_amrex_src",
"WarpX_PRECISION",
"BUILD_PARALLEL",
"WarpX_PSATD",
"WarpX_COMPUTE",
"WarpX_DIMS",
"WarpX_openpmd_src",
"WarpX_BUILD_EXAMPLES",
"WarpX_QED_TABLE_GEN",
"HDF5_USE_STATIC_LIBRARIES",
"WarpX_picsar_src",
"WarpX_BUILD_TESTING",
"WarpX_BUILD_SHARED_LIBS",
"WarpX_OPENPMD",
"WarpX_picsar_internal",
"WarpX_openpmd_internal",
"WarpX_amrex_internal",
"WarpX_MPI",
"ADIOS_USE_STATIC_LIBS",
"PYWARPX_LIB_DIR",
"WarpX_QED"
]
| [] | ["WarpX_amrex_src", "WarpX_PRECISION", "BUILD_PARALLEL", "WarpX_PSATD", "WarpX_COMPUTE", "WarpX_DIMS", "WarpX_openpmd_src", "WarpX_BUILD_EXAMPLES", "WarpX_QED_TABLE_GEN", "HDF5_USE_STATIC_LIBRARIES", "WarpX_picsar_src", "WarpX_BUILD_TESTING", "WarpX_BUILD_SHARED_LIBS", "WarpX_OPENPMD", "WarpX_picsar_internal", "WarpX_openpmd_internal", "WarpX_amrex_internal", "WarpX_MPI", "ADIOS_USE_STATIC_LIBS", "PYWARPX_LIB_DIR", "WarpX_QED"] | python | 21 | 0 | |
train/paths_lstm_classifier_tf.py | import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import math
import json
import tensorflow as tf
from lstm_common import *
from sklearn import metrics
from sklearn.base import BaseEstimator
NUM_LAYERS = 2
LSTM_HIDDEN_DIM = 60
LEMMA_DIM = 50
POS_DIM = 4
DEP_DIM = 5
DIR_DIM = 1
EMPTY_PATH = ((0, 0, 0, 0),)
MAX_PATH_LEN = 6
BATCH_SIZE = 10
UNK_INDEX = 0
LSTM_OUTPUT_DIM = LSTM_HIDDEN_DIM
LSTM_INPUT_DIM = LEMMA_DIM + POS_DIM + DEP_DIM + DIR_DIM
class PathLSTMClassifier(BaseEstimator):
def __init__(self, num_lemmas, num_pos, num_dep, num_directions=5, n_epochs=10, num_relations=2,
lemma_embeddings=None, dropout=0.0, num_hidden_layers=0):
"""'
Initialize the LSTM
:param num_lemmas Number of distinct lemmas in the paths + words in the (x, y) pairs
:param num_pos Number of distinct part of speech tags
:param num_dep Number of distinct depenedency labels
:param num_directions Number of distinct path directions (e.g. >,<)
:param n_epochs Number of training epochs
:param num_relations Number of classes (e.g. binary = 2)
:param lemma_embeddings Pre-trained word embedding vectors for the path-based component
:param dropout Dropout rate
:param num_hidden_layers The number of hidden layers for the term-pair classification network
"""
self.n_epochs = n_epochs
self.num_lemmas = num_lemmas
self.num_pos = num_pos
self.num_dep = num_dep
self.num_directions = num_directions
self.num_relations = num_relations
self.dropout = dropout
self.num_hidden_layers = num_hidden_layers
self.lemma_vectors = None
if lemma_embeddings is not None:
self.lemma_vectors = lemma_embeddings
# Create the network
self.model_parameters = create_computation_graph(self.num_lemmas, self.num_pos, self.num_dep, self.num_directions,
self.num_relations, self.lemma_vectors, self.num_hidden_layers)
self.session = tf.Session()
@classmethod
def load_model(cls, model_file_prefix):
"""
Load the trained model from a file
:param model_file_prefix the path + file name (no extension) where the model files are saved
"""
# Load the parameters from the json file
with open(model_file_prefix + '.params') as f_in:
params = json.load(f_in)
classifier = PathLSTMClassifier(params['num_lemmas'], params['num_pos'], params['num_dep'],
params['num_directions'], num_relations=params['num_relations'],
num_hidden_layers=params['num_hidden_layers'])
# Initialize the session and start training
classifier.session.run(tf.global_variables_initializer())
# Load the model
tf.train.Saver().restore(classifier.session, model_file_prefix)
# Get the variables
variable_names = ['W1', 'b1', 'lemma_lookup', 'pos_lookup', 'dep_lookup', 'dir_lookup']
if classifier.num_hidden_layers == 1:
variable_names += ['W2', 'b2']
classifier.model_parameters.update({ v.name : v for v in tf.global_variables() })
# Load the dictionaries from the json file
with open(model_file_prefix + '.dict') as f_in:
dictionaries = json.load(f_in)
word_index, pos_index, dep_index, dir_index = dictionaries
return classifier, word_index, pos_index, dep_index, dir_index
def save_model(self, output_prefix, dictionaries):
"""
Save the trained model to a file
:param output_prefix Where to save the model
:param dictionaries hyper-parameters to save
"""
tf.train.Saver().save(self.session, output_prefix)
# Save the model hyper-parameters
params = { 'num_relations' : self.num_relations, 'num_hidden_layers' : self.num_hidden_layers,
'num_lemmas' : self.num_lemmas, 'num_pos' : self.num_pos, 'num_directions' : self.num_directions,
'num_dep' : self.num_dep }
with open(output_prefix + '.params', 'w') as f_out:
json.dump(params, f_out, indent=2)
# Save the dictionaries
with open(output_prefix + '.dict', 'w') as f_out:
json.dump(dictionaries, f_out, indent=2)
def close(self):
"""
Close the session
"""
self.session.close()
tf.reset_default_graph()
def fit(self, X_train, y_train, x_y_vectors=None):
"""
Train the model
:param X_train the train instances (paths)
:param y_train the train labels
:param x_y_vectors the train (x, y) vector indices
"""
print 'Training the model...'
train(self.session, self.model_parameters, X_train, y_train, self.n_epochs, self.num_relations, self.num_lemmas,
self.num_pos, self.num_dep, self.num_directions, x_y_vectors, self.dropout)
print 'Done!'
def predict(self, X_test, x_y_vectors=None):
"""
Predict the classification of the test set
"""
predictions, scores = zip(*self.predict_with_score(X_test, x_y_vectors))
return np.array(predictions)
def predict_with_score(self, X_test, x_y_vectors=None):
"""
Predict the classification of the test set
:param X_test the test instances (paths)
:param x_y_vectors the test (x, y) vector indices
"""
model_parameters = self.model_parameters
# Define the neural network model (predict every 100 instances together)
batch_paths = model_parameters['batch_paths']
seq_lengths = model_parameters['seq_lengths']
num_batch_paths = model_parameters['num_batch_paths']
path_lists = model_parameters['path_lists']
path_counts = model_parameters['path_counts']
x_vector_inputs = model_parameters['x_vector_inputs']
y_vector_inputs = model_parameters['y_vector_inputs']
predictions = model_parameters['predictions']
# Sort the pairs by number of paths, and add the empty path to pairs with no paths
num_paths = np.array([len(instance) for instance in X_test])
sorted_indices = np.argsort(num_paths)
x_y_vectors = [x_y_vectors[i] for i in sorted_indices]
X_test = [X_test[i] if len(X_test[i]) > 0 else { EMPTY_PATH : 1 } for i in sorted_indices]
pad = lambda lst : lst if len(lst) == BATCH_SIZE else lst + [0] * (BATCH_SIZE - len(lst))
test_pred = [0] * (len(sorted_indices))
for chunk in xrange(0, len(X_test), BATCH_SIZE):
# Initialize the variables with the current batch data
batch_indices = list(range(chunk, min(chunk + BATCH_SIZE, len(X_test))))
actual_batch_size = len(batch_indices)
batch_indices = pad(batch_indices)
curr_batch_paths, curr_path_lists, curr_path_counts, curr_labels, x_vectors, y_vectors,\
curr_seq_lengths = prepare_batch(x_y_vectors, X_test, batch_indices, self.num_relations)
curr_predictions = self.session.run(predictions, feed_dict={ batch_paths : curr_batch_paths,
num_batch_paths : curr_batch_paths.shape[0],
seq_lengths : curr_seq_lengths,
path_lists : curr_path_lists,
path_counts : curr_path_counts,
x_vector_inputs : x_vectors,
y_vector_inputs : y_vectors })
for index_in_batch, index_in_dataset in enumerate(batch_indices[:actual_batch_size]):
vec = curr_predictions[index_in_batch]
test_pred[sorted_indices[index_in_dataset]] = (np.argmax(vec), vec[np.argmax(vec)])
return test_pred
def mlp_model(model_parameters, path_embeddings, num_relations, num_hidden_layers=0):
"""
Defines the MLP operations
:param model_parameters: the network parameters
:param path_embeddings: the matrix of paths variable computed by the LSTM
:param num_relations: the number of classes in the output layer
:param num_hidden_layers: the number of hidden layers (supports 0 and 1)
:return: the prediction object to be computed in a Session
"""
lemma_lookup = model_parameters['lemma_lookup']
W1 = model_parameters['W1']
b1 = model_parameters['b1']
W2 = None
b2 = None
if num_hidden_layers == 1:
W2 = model_parameters['W2']
b2 = model_parameters['b2']
# Define the place holders
path_lists = tf.placeholder(tf.int32, (BATCH_SIZE, None)) # list of paths for each item in the batch
path_counts = tf.placeholder(tf.int32, (BATCH_SIZE, None)) # list of path counts for each item in the batch
x_vector_inputs = tf.placeholder(tf.int32, shape=[BATCH_SIZE])
y_vector_inputs = tf.placeholder(tf.int32, shape=[BATCH_SIZE])
labels = tf.placeholder(tf.int32, shape=[BATCH_SIZE, num_relations])
# Define the operations
num_paths = tf.reduce_sum(tf.cast(path_counts, tf.float32), 1) # number of paths for each pair [BATCH_SIZE, 1]
curr_path_embeddings = [tf.squeeze(tf.gather(path_embeddings, path_list))
for path_list in tf.split(path_lists, BATCH_SIZE, axis=0)] # a list of [MAX_PATHS, 60]
path_counts_lst = tf.split(path_counts, BATCH_SIZE, axis=0) # a list of [MAX_PATHS, 1]
path_counts_tiled = [tf.transpose(tf.tile(tf.cast(path_count, tf.float32), tf.stack([LSTM_HIDDEN_DIM, 1])))
for path_count in path_counts_lst] # a list of [MAX_PATHS, 60]
weighted = [tf.multiply(curr_path_embedding, path_count) for (curr_path_embedding, path_count)
in zip(curr_path_embeddings, path_counts_tiled)]
weighted_sum = [tf.reduce_sum(weighted[i], 0) for i in range(BATCH_SIZE)]
pair_path_embeddings = tf.stack([tf.div(weighted_sum_item, num_paths_item)
for weighted_sum_item, num_paths_item in zip(weighted_sum, tf.unstack(num_paths))])
# Concatenate the path embedding to the word embeddings and feed it to the MLP
x_vectors = tf.nn.embedding_lookup(lemma_lookup, x_vector_inputs)
y_vectors = tf.nn.embedding_lookup(lemma_lookup, y_vector_inputs)
network_input = tf.concat([x_vectors, pair_path_embeddings, y_vectors], 1)
h = tf.add(tf.matmul(network_input, W1), b1)
output = h
if num_hidden_layers == 1:
output = tf.add(tf.matmul(tf.nn.tanh(h), W2), b2)
predictions = tf.nn.softmax(output)
return path_lists, path_counts, x_vector_inputs, y_vector_inputs, predictions, output, labels
def lstm_model(model_parameters):
"""
Defines the LSTM operations
:param model_parameters: the network parameters
:return: a matrix of path embeddings
"""
lemma_lookup = model_parameters['lemma_lookup']
pos_lookup = model_parameters['pos_lookup']
dep_lookup = model_parameters['dep_lookup']
dir_lookup = model_parameters['dir_lookup']
# Define the place holders
batch_paths = tf.placeholder(tf.int32, shape=[None, MAX_PATH_LEN, 4]) # the paths to compute in this batch
seq_lengths = tf.placeholder(tf.int32, shape=[None]) # the length of each path
num_batch_paths = tf.placeholder(tf.int32)
lookup_tables = [lemma_lookup, pos_lookup, dep_lookup, dir_lookup]
edges = tf.split(batch_paths, MAX_PATH_LEN, axis=1)
edge_components = [tf.split(edge, 4, axis=2) for edge in edges]
path_matrix = [tf.concat([tf.nn.embedding_lookup(lookup_table, component)
for lookup_table, component in zip(lookup_tables, edge)], -1)
for edge in edge_components]
path_matrix = [tf.concat(lst, -1) for lst in path_matrix]
path_matrix = tf.squeeze(tf.stack(path_matrix, 0))
path_matrix = tf.reshape(path_matrix, tf.stack([num_batch_paths, MAX_PATH_LEN, LSTM_INPUT_DIM]))
# Define the operations
lstm_cell = tf.contrib.rnn.BasicLSTMCell(LSTM_HIDDEN_DIM)
initial_state = lstm_cell.zero_state(num_batch_paths, tf.float32)
lstm_outputs, _ = tf.nn.dynamic_rnn(lstm_cell, path_matrix, initial_state=initial_state, sequence_length=seq_lengths)
# Get the last output from each item in the batch
path_embeddings = extract_last_relevant(lstm_outputs, num_batch_paths, seq_lengths)
return batch_paths, seq_lengths, path_embeddings, num_batch_paths
def extract_last_relevant(data, dim1, length):
"""
From: https://danijar.com/variable-sequence-lengths-in-tensorflow/
Get specified elements along the second axis of a tensor
:param data: tensor to be subsetted
:param dim1: the size of dimension 1
:param ind: indices to take (one for each element along axis 1 of data)
:return: Subsetted tensor
"""
out_size = int(data.get_shape()[2])
index = tf.range(0, dim1) * MAX_PATH_LEN + (length - 1)
flat = tf.reshape(data, [-1, out_size])
relevant = tf.gather(flat, index)
return relevant
def train(session, model_parameters, X_train, y_train, nepochs, num_relations, num_lemmas, num_pos, num_dep, num_dir,
x_y_vectors=None, dropout=0.0):
"""
Train the LSTM
:param model_parameters: the model parameters
:param X_train: the train instances
:param y_train: the train labels
:param nepochs: number of epochs
:param num_relations: the number of possible output classes
:param num_lemmas Number of distinct lemmas in the paths + words in the (x, y) pairs
:param num_pos Number of distinct part of speech tags
:param num_dep Number of distinct depenedency labels
:param num_directions Number of distinct path directions (e.g. >,<)
:param x_y_vectors: the word vectors of x and y
:param dropout The word dropout rate
"""
# Define the batches
n_batches = int(math.ceil(len(y_train) / BATCH_SIZE))
# Define the neural network model
batch_paths = model_parameters['batch_paths']
seq_lengths = model_parameters['seq_lengths']
num_batch_paths = model_parameters['num_batch_paths']
path_lists = model_parameters['path_lists']
path_counts = model_parameters['path_counts']
x_vector_inputs = model_parameters['x_vector_inputs']
y_vector_inputs = model_parameters['y_vector_inputs']
predictions = model_parameters['predictions']
labels = model_parameters['labels']
# Define the loss function and the optimization algorithm
loss_fn = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=predictions, labels=labels))
optimizer = tf.train.AdamOptimizer().minimize(loss_fn)
# Initialize the session and start training
session.run(tf.global_variables_initializer())
# Apply dropout on every component of every path
print 'Applying dropout...'
dropouts = []
for num in [num_lemmas, num_pos, num_dep, num_dir]:
mask = np.random.binomial(1, dropout, num)
dropouts.append(set([i for i, value in enumerate(mask) if value == 1]))
X_train = [instance if len(instance) > 0 else { EMPTY_PATH : 1 } for instance in X_train]
X_train = [{ tuple([tuple([component if component not in dropouts[comp_num] else UNK_INDEX
for comp_num, component in enumerate(edge)]) for edge in path]) : count
for path, count in instance.iteritems() } for instance in X_train]
print 'Training...'
# Sort the pairs by number of paths, and add the empty path to pairs with no paths
num_paths = np.array([len(instance) for instance in X_train])
sorted_indices = np.argsort(num_paths)
X_train = [X_train[i] for i in sorted_indices]
y_train = [y_train[i] for i in sorted_indices]
x_y_vectors = [x_y_vectors[i] for i in sorted_indices]
for epoch in range(nepochs):
epoch_loss = 0.0
epoch_indices = list(range(len(y_train)))
y_pred = np.zeros(len(y_train))
for minibatch in range(n_batches):
batch_indices = epoch_indices[minibatch * BATCH_SIZE:(minibatch + 1) * BATCH_SIZE]
# Compute each path in the batch once, create a matrix of path embeddings, and average for each word-pair
curr_batch_paths, curr_path_lists, curr_path_counts, curr_labels, x_vectors, y_vectors, curr_seq_lengths \
= prepare_batch(x_y_vectors, X_train, batch_indices, num_relations, labels=y_train)
_, curr_loss, curr_predictions = session.run([optimizer, loss_fn, predictions],
feed_dict={ batch_paths : curr_batch_paths, # distinct paths in the batch
num_batch_paths : curr_batch_paths.shape[0],
seq_lengths : curr_seq_lengths, # the length of each path
path_lists : curr_path_lists, # paths for each pair
path_counts : curr_path_counts, # count for each path
labels : curr_labels,
x_vector_inputs : x_vectors,
y_vector_inputs : y_vectors })
epoch_loss += curr_loss
curr_predictions = np.argmax(curr_predictions, 1)
for i in range(len(batch_indices)):
y_pred[batch_indices[i]] = curr_predictions[i]
epoch_loss /= len(y_train)
precision, recall, f1, support = metrics.precision_recall_fscore_support(y_train, y_pred, average='weighted')
print 'Epoch: %d/%d, Loss: %f, Precision: %.3f, Recall: %.3f, F1: %.3f' % \
(epoch + 1, nepochs, epoch_loss, precision, recall, f1)
return session
def prepare_batch(x_y_vectors, instances, batch_indices, num_relations, labels=None):
"""
Populate the variables for the current batch
:param x_y_vectors: the word vectors of x and y
:param instances: the train instances
:param batch_indices: the indices from the train set to use in the current batch
:param num_relations: the number of possible output classes
:param labels: the train labels
:return:
"""
batch_size = len(batch_indices)
# Get all the distinct paths in the batch
batch = [instances[batch_indices[i]] for i in range(batch_size)]
index_to_path = list(set([path for instance in batch for path in instance]))
path_to_index = { path : i for i, path in enumerate(index_to_path) }
batch_paths = np.stack([np.vstack(pad_path(path)) for path in index_to_path])
seq_lengths = np.array([len(path) for path in path_to_index])
# Get the paths for each instance
max_path_per_ins = max([len(instances[batch_indices[i]]) for i in range(batch_size)])
pad = lambda lst : lst[:max_path_per_ins] if len(lst) >= max_path_per_ins \
else lst + [0] * (max_path_per_ins - len(lst))
curr_path_lists = np.vstack([pad([path_to_index[path] for path in instances[batch_indices[i]]])
for i in range(batch_size)])
curr_path_counts = np.vstack([pad(instances[batch_indices[i]].values()) for i in range(batch_size)])
curr_labels = np.zeros((batch_size, num_relations))
if labels is not None:
curr_labels_temp = np.array([labels[batch_indices[i]] for i in range(batch_size)])
curr_labels = np.eye(batch_size, num_relations)[curr_labels_temp]
x_vectors = np.array([x_y_vectors[batch_indices[i]][0] for i in range(batch_size)])
y_vectors = np.array([x_y_vectors[batch_indices[i]][1] for i in range(batch_size)])
return batch_paths, curr_path_lists, curr_path_counts, curr_labels, x_vectors, y_vectors, seq_lengths
def pad_path(path):
"""
Pad the path with empty edges to make it MAX_PATH_LEN long
:param path: the original path
:return: the padded path
"""
path = list(path)
if len(path) < MAX_PATH_LEN:
path += [(0, 0, 0, 0)] * (MAX_PATH_LEN - len(path))
return [np.array(list(edge)) for edge in path]
def create_computation_graph(num_lemmas, num_pos, num_dep, num_directions, num_relations, wv=None, num_hidden_layers=0):
"""
Initialize the model
:param num_lemmas Number of distinct lemmas
:param num_pos Number of distinct part of speech tags
:param num_dep Number of distinct depenedency labels
:param num_directions Number of distinct path directions (e.g. >,<)
:param num_relations Number of classes (e.g. binary = 2)
:param wv Pre-trained word embeddings file
:param num_hidden_layers The number of hidden layers for the term-pair classification network
:return: the model parameters: LSTM, parameters and lookup tables
"""
model_parameters = {}
initializer = tf.contrib.layers.xavier_initializer()
# Define the MLP
network_input = LSTM_OUTPUT_DIM + 2 * LEMMA_DIM
# 'the optimal size of the hidden layer is usually between the size of the input and size of the output layers'
hidden_dim = int((network_input + num_relations) / 2)
if num_hidden_layers == 0:
model_parameters['W1'] = tf.get_variable('W1', shape=[network_input, num_relations], initializer=initializer)
model_parameters['b1'] = tf.get_variable('b1', shape=[num_relations], initializer=initializer)
elif num_hidden_layers == 1:
model_parameters['W1'] = tf.get_variable('W1', shape=[network_input, hidden_dim], initializer=initializer)
model_parameters['b1'] = tf.get_variable('b1', shape=[hidden_dim], initializer=initializer)
model_parameters['W2'] = tf.get_variable('W2', shape=[hidden_dim, num_relations], initializer=initializer)
model_parameters['b2'] = tf.get_variable('b2', shape=[num_relations], initializer=initializer)
else:
raise ValueError('Only 0 or 1 hidden layers are supported')
# Create the embeddings lookup
if wv != None:
model_parameters['lemma_lookup'] = tf.Variable(wv, name='lemma_lookup', dtype=tf.float32)
else:
model_parameters['lemma_lookup'] = tf.get_variable('lemma_lookup', shape=[num_lemmas, LEMMA_DIM],
initializer=initializer)
model_parameters['pos_lookup'] = tf.get_variable('pos_lookup', shape=[num_pos, POS_DIM], initializer=initializer)
model_parameters['dep_lookup'] = tf.get_variable('dep_lookup', shape=[num_dep, DEP_DIM], initializer=initializer)
model_parameters['dir_lookup'] = tf.get_variable('dir_lookup', shape=[num_directions, DIR_DIM], initializer=initializer)
# Define the neural network model
batch_paths, seq_lengths, path_embeddings, num_batch_paths = lstm_model(model_parameters)
path_lists, path_counts, x_vector_inputs, y_vector_inputs, predictions, output, labels = \
mlp_model(model_parameters, path_embeddings, num_relations, num_hidden_layers)
model_parameters.update({ 'batch_paths' : batch_paths, 'seq_lengths' : seq_lengths,
'path_embeddings' : path_embeddings, 'num_batch_paths' : num_batch_paths,
'path_lists' : path_lists, 'path_counts' : path_counts,
'x_vector_inputs' : x_vector_inputs, 'y_vector_inputs' : y_vector_inputs,
'predictions' : predictions, 'output' : output, 'labels' : labels})
return model_parameters
| []
| []
| [
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
go/teams/loader.go | package teams
import (
"errors"
"fmt"
"os"
"sort"
"sync"
"time"
"golang.org/x/net/context"
"github.com/keybase/client/go/gregor"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/protocol/keybase1"
hidden "github.com/keybase/client/go/teams/hidden"
storage "github.com/keybase/client/go/teams/storage"
)
// Show detailed team profiling
var teamEnv struct {
Profile bool
UserPreloadEnable bool
UserPreloadParallel bool
UserPreloadWait bool
ProofSetParallel bool
}
func init() {
teamEnv.Profile = os.Getenv("KEYBASE_TEAM_PROF") == "1"
teamEnv.UserPreloadEnable = os.Getenv("KEYBASE_TEAM_PE") == "1"
teamEnv.UserPreloadParallel = os.Getenv("KEYBASE_TEAM_PP") == "1"
teamEnv.UserPreloadWait = os.Getenv("KEYBASE_TEAM_PW") == "1"
teamEnv.ProofSetParallel = os.Getenv("KEYBASE_TEAM_SP") == "0"
}
// How long until the tail of a team sigchain is considered non-fresh
const freshnessLimit = time.Duration(1) * time.Hour
// Load a Team from the TeamLoader.
// Can be called from inside the teams package.
func Load(ctx context.Context, g *libkb.GlobalContext, lArg keybase1.LoadTeamArg) (*Team, error) {
teamData, hidden, err := g.GetTeamLoader().Load(ctx, lArg)
if err != nil {
return nil, err
}
ret := NewTeam(ctx, g, teamData, hidden)
if lArg.RefreshUIDMapper {
// If we just loaded the group, then inform the UIDMapper of any UID->EldestSeqno
// mappings, so that we're guaranteed they aren't stale.
ret.refreshUIDMapper(ctx, g)
}
return ret, nil
}
// Loader of keybase1.TeamData objects. Handles caching.
// Because there is one of this global object and it is attached to G,
// its Load interface must return a keybase1.TeamData not a teams.Team.
// To load a teams.Team use the package-level function Load.
// Threadsafe.
type TeamLoader struct {
libkb.Contextified
world LoaderContext
storage *storage.Storage
merkleStorage *storage.Merkle
// Single-flight locks per team ID.
// (Private and public loads of the same ID will block each other, should be fine)
locktab *libkb.LockTable
// Cache lookups of team name -> ID for a few seconds, to absorb bursts of lookups
// from the frontend
nameLookupBurstCache *libkb.BurstCache
// We can get pushed by the server into "force repoll" mode, in which we're
// not getting cache invalidations. An example: when Coyne or Nojima revokes
// a device. We want to cut down on notification spam. So instead, all attempts
// to load a team result in a preliminary poll for freshness, which this state is enabled.
forceRepollMutex sync.RWMutex
forceRepollUntil gregor.TimeOrOffset
}
var _ libkb.TeamLoader = (*TeamLoader)(nil)
func NewTeamLoader(g *libkb.GlobalContext, world LoaderContext, storage *storage.Storage, merkleStorage *storage.Merkle) *TeamLoader {
return &TeamLoader{
Contextified: libkb.NewContextified(g),
world: world,
storage: storage,
merkleStorage: merkleStorage,
nameLookupBurstCache: libkb.NewBurstCache(g, 100, 10*time.Second, "SubteamNameToID"),
locktab: libkb.NewLockTable(),
}
}
// NewTeamLoaderAndInstall creates a new loader and installs it into G.
func NewTeamLoaderAndInstall(g *libkb.GlobalContext) *TeamLoader {
world := NewLoaderContextFromG(g)
st := storage.NewStorage(g)
mst := storage.NewMerkle()
l := NewTeamLoader(g, world, st, mst)
g.SetTeamLoader(l)
g.AddLogoutHook(l, "teamLoader")
g.AddDbNukeHook(l, "teamLoader")
return l
}
func (l *TeamLoader) Load(ctx context.Context, lArg keybase1.LoadTeamArg) (res *keybase1.TeamData, hidden *keybase1.HiddenTeamChain, err error) {
me, err := l.world.getMe(ctx)
if err != nil {
return nil, nil, err
}
if me.IsNil() && !lArg.Public {
return nil, nil, libkb.NewLoginRequiredError("login required to load a private team")
}
return l.load1(ctx, me, lArg)
}
func newFrozenChain(chain *keybase1.TeamSigChainState) keybase1.TeamSigChainState {
return keybase1.TeamSigChainState{
Id: chain.Id,
Public: chain.Public,
LastSeqno: chain.LastSeqno,
LastLinkID: chain.LastLinkID,
}
}
func (l *TeamLoader) Freeze(ctx context.Context, teamID keybase1.TeamID) (err error) {
defer l.G().CTraceTimed(ctx, fmt.Sprintf("TeamLoader#Freeze(%s)", teamID), func() error { return err })()
lock := l.locktab.AcquireOnName(ctx, l.G(), teamID.String())
defer lock.Release(ctx)
mctx := libkb.NewMetaContext(ctx, l.G())
td, frozen, tombstoned := l.storage.Get(mctx, teamID, teamID.IsPublic())
if frozen || td == nil {
return nil
}
newTD := &keybase1.TeamData{
Frozen: true,
Tombstoned: tombstoned,
Chain: newFrozenChain(&td.Chain),
}
l.storage.Put(mctx, newTD)
return nil
}
func (l *TeamLoader) Tombstone(ctx context.Context, teamID keybase1.TeamID) (err error) {
defer l.G().CTraceTimed(ctx, fmt.Sprintf("TeamLoader#Tombstone(%s)", teamID), func() error { return err })()
lock := l.locktab.AcquireOnName(ctx, l.G(), teamID.String())
defer lock.Release(ctx)
mctx := libkb.NewMetaContext(ctx, l.G())
td, frozen, tombstoned := l.storage.Get(mctx, teamID, teamID.IsPublic())
if tombstoned || td == nil {
return nil
}
newTD := &keybase1.TeamData{
Frozen: frozen,
Tombstoned: true,
Chain: newFrozenChain(&td.Chain),
}
l.storage.Put(mctx, newTD)
return nil
}
func (l *TeamLoader) HintLatestSeqno(ctx context.Context, teamID keybase1.TeamID, seqno keybase1.Seqno) error {
// Single-flight lock by team ID.
lock := l.locktab.AcquireOnName(ctx, l.G(), teamID.String())
defer lock.Release(ctx)
mctx := libkb.NewMetaContext(ctx, l.G())
// Load from the cache
td, frozen, tombstoned := l.storage.Get(mctx, teamID, teamID.IsPublic())
if frozen || tombstoned || td == nil {
// Nothing to store the hint on.
return nil
}
if seqno < td.LatestSeqnoHint {
// The hint is behind the times, ignore.
return nil
}
td.LatestSeqnoHint = seqno
l.storage.Put(mctx, td)
return nil
}
type nameLookupBurstCacheKey struct {
teamName keybase1.TeamName
public bool
}
func (n nameLookupBurstCacheKey) String() string {
return fmt.Sprintf("%s:%v", n.teamName.String(), n.public)
}
// Resolve a team name to a team ID.
// Will always hit the server for subteams. The server can lie in this return value.
func (l *TeamLoader) ResolveNameToIDUntrusted(ctx context.Context, teamName keybase1.TeamName, public bool, allowCache bool) (id keybase1.TeamID, err error) {
defer l.G().CVTrace(ctx, libkb.VLog0, fmt.Sprintf("resolveNameToUIDUntrusted(%s,%v,%v)", teamName.String(), public, allowCache), func() error { return err })()
// For root team names, just hash.
if teamName.IsRootTeam() {
return teamName.ToTeamID(public), nil
}
if !allowCache {
return resolveNameToIDUntrustedAPICall(ctx, l.G(), teamName, public)
}
var idVoidPointer interface{}
key := nameLookupBurstCacheKey{teamName, public}
idVoidPointer, err = l.nameLookupBurstCache.Load(ctx, key, l.makeNameLookupBurstCacheLoader(ctx, l.G(), key))
if err != nil {
return keybase1.TeamID(""), err
}
if idPointer, ok := idVoidPointer.(*keybase1.TeamID); ok && idPointer != nil {
id = *idPointer
} else {
return keybase1.TeamID(""), errors.New("bad cast out of nameLookupBurstCache")
}
return id, nil
}
func resolveNameToIDUntrustedAPICall(ctx context.Context, g *libkb.GlobalContext, teamName keybase1.TeamName, public bool) (id keybase1.TeamID, err error) {
mctx := libkb.NewMetaContext(ctx, g)
arg := libkb.NewAPIArg("team/get")
arg.SessionType = libkb.APISessionTypeREQUIRED
arg.Args = libkb.HTTPArgs{
"name": libkb.S{Val: teamName.String()},
"lookup_only": libkb.B{Val: true},
"public": libkb.B{Val: public},
}
var rt rawTeam
if err := mctx.G().API.GetDecode(mctx, arg, &rt); err != nil {
return id, err
}
id = rt.ID
if !id.Exists() {
return id, fmt.Errorf("could not resolve team name: %v", teamName.String())
}
return id, nil
}
func (l *TeamLoader) makeNameLookupBurstCacheLoader(ctx context.Context, g *libkb.GlobalContext, key nameLookupBurstCacheKey) libkb.BurstCacheLoader {
return func() (obj interface{}, err error) {
id, err := resolveNameToIDUntrustedAPICall(ctx, g, key.teamName, key.public)
if err != nil {
return nil, err
}
return &id, nil
}
}
// Load1 unpacks the loadArg, calls load2, and does some final checks.
// The key difference between load1 and load2 is that load2 is recursive (for subteams).
func (l *TeamLoader) load1(ctx context.Context, me keybase1.UserVersion, lArg keybase1.LoadTeamArg) (*keybase1.TeamData, *keybase1.HiddenTeamChain, error) {
mctx := libkb.NewMetaContext(ctx, l.G())
err := l.checkArg(ctx, lArg)
if err != nil {
return nil, nil, err
}
var teamName *keybase1.TeamName
if len(lArg.Name) > 0 {
teamNameParsed, err := keybase1.TeamNameFromString(lArg.Name)
if err != nil {
return nil, nil, fmt.Errorf("invalid team name: %v", err)
}
teamName = &teamNameParsed
}
teamID := lArg.ID
// Resolve the name to team ID. Will always hit the server for subteams.
// It is safe for the answer to be wrong because the name is checked on the way out,
// and the merkle tree check guarantees one sigchain per team id.
if !teamID.Exists() {
teamID, err = l.ResolveNameToIDUntrusted(ctx, *teamName, lArg.Public, lArg.AllowNameLookupBurstCache)
if err != nil {
mctx.Debug("TeamLoader looking up team by name failed: %v -> %v", *teamName, err)
if code, ok := libkb.GetAppStatusCode(err); ok && code == keybase1.StatusCode_SCTeamNotFound {
mctx.Debug("replacing error: %v", err)
return nil, nil, NewTeamDoesNotExistError(lArg.Public, teamName.String())
}
return nil, nil, err
}
}
mungedForceRepoll := lArg.ForceRepoll
mungedWantMembers, err := l.mungeWantMembers(ctx, lArg.Refreshers.WantMembers)
if err != nil {
mctx.Debug("TeamLoader munge failed: %v", err)
// drop the error and just force a repoll.
mungedForceRepoll = true
mungedWantMembers = nil
}
ret, err := l.load2(ctx, load2ArgT{
teamID: teamID,
needAdmin: lArg.NeedAdmin,
needKeyGeneration: lArg.Refreshers.NeedKeyGeneration,
needApplicationsAtGenerations: lArg.Refreshers.NeedApplicationsAtGenerations,
needApplicationsAtGenerationsWithKBFS: lArg.Refreshers.NeedApplicationsAtGenerationsWithKBFS,
needKBFSKeyGeneration: lArg.Refreshers.NeedKBFSKeyGeneration,
wantMembers: mungedWantMembers,
wantMembersRole: lArg.Refreshers.WantMembersRole,
forceFullReload: lArg.ForceFullReload,
forceRepoll: mungedForceRepoll,
staleOK: lArg.StaleOK,
public: lArg.Public,
auditMode: lArg.AuditMode,
skipNeedHiddenRotateCheck: lArg.SkipNeedHiddenRotateCheck,
needSeqnos: nil,
readSubteamID: nil,
me: me,
})
switch err := err.(type) {
case TeamDoesNotExistError:
if teamName == nil {
return nil, nil, err
}
// Replace the not found error so that it has a name instead of team ID.
// If subteams are involved the name might not correspond to the ID
// but it's better to have this understandable error message that's accurate
// most of the time than one with an ID that's always accurate.
mctx.Debug("replacing error: %v", err)
return nil, nil, NewTeamDoesNotExistError(lArg.Public, teamName.String())
case nil:
default:
return nil, nil, err
}
if ret == nil {
return nil, nil, fmt.Errorf("team loader fault: got nil from load2")
}
// Public teams are allowed to be behind on secrets since you can load a
// public team you're not in. Restricted bot members don't have any secrets
// and are also exempt.
if !l.hasSyncedSecrets(mctx, ret.teamShim()) &&
!(ret.team.Chain.Public || ret.team.Chain.UserRole(me).IsRestrictedBot()) {
// this should not happen
return nil, nil, fmt.Errorf("missing secrets for team")
}
// Check team name on the way out
// The snapshot may have already been written to cache, but that should be ok,
// because the cache is keyed by ID.
if teamName != nil {
// (TODO: this won't work for renamed level 3 teams or above. There's work on this in miles/teamloader-names)
if !teamName.Eq(ret.team.Name) {
return nil, nil, fmt.Errorf("team name mismatch: %v != %v", ret.team.Name, teamName.String())
}
}
if ShouldRunBoxAudit(mctx) {
newMctx, shouldReload := VerifyBoxAudit(mctx, teamID)
if shouldReload {
return l.load1(newMctx.Ctx(), me, lArg)
}
} else {
mctx.Debug("Box auditor feature flagged off; not checking jail during team load...")
}
return &ret.team, ret.hidden, nil
}
func (l *TeamLoader) checkArg(ctx context.Context, lArg keybase1.LoadTeamArg) error {
hasID := lArg.ID.Exists()
hasName := len(lArg.Name) > 0
if hasID {
id, err := keybase1.TeamIDFromString(lArg.ID.String())
if err != nil {
return fmt.Errorf("team load arg has invalid ID: %v", lArg.ID)
}
if id.IsPublic() != lArg.Public {
return libkb.NewTeamVisibilityError(lArg.Public, id.IsPublic())
}
}
if !hasID && !hasName {
return fmt.Errorf("team load arg must have either ID or Name")
}
return nil
}
// Mostly the same as the public keybase.LoadTeamArg
// but only supports loading by ID, and has neededSeqnos.
type load2ArgT struct {
teamID keybase1.TeamID
reason string // optional tag for debugging why this load is happening
needAdmin bool
needKeyGeneration keybase1.PerTeamKeyGeneration
needApplicationsAtGenerations map[keybase1.PerTeamKeyGeneration][]keybase1.TeamApplication
needApplicationsAtGenerationsWithKBFS map[keybase1.PerTeamKeyGeneration][]keybase1.TeamApplication
needKBFSKeyGeneration keybase1.TeamKBFSKeyRefresher
// wantMembers here is different from wantMembers on LoadTeamArg:
// The EldestSeqno's should not be 0.
wantMembers []keybase1.UserVersion
wantMembersRole keybase1.TeamRole
forceFullReload bool
forceRepoll bool
staleOK bool
public bool
skipNeedHiddenRotateCheck bool
skipSeedCheck bool
auditMode keybase1.AuditMode
needSeqnos []keybase1.Seqno
// Non-nil if we are loading an ancestor for the greater purpose of
// loading a subteam. This parameter helps the server figure out whether
// to give us a subteam-reader version of the team.
// If and only if this is set, load2 is allowed to return a secret-less TeamData.
// Load1 can return secret-less TeamData if the team is public or the
// current user is a restricted bot member.
readSubteamID *keybase1.TeamID
// If the user is logged out, this will be a nil UserVersion, meaning
/// me.IsNil() will be true.
me keybase1.UserVersion
}
type load2ResT struct {
team keybase1.TeamData
hidden *keybase1.HiddenTeamChain
didRepoll bool
}
func (l load2ResT) teamShim() *TeamShim {
return &TeamShim{Data: &l.team, Hidden: l.hidden}
}
// Load2 does the rest of the work loading a team.
// It is `playchain` described in the pseudocode in teamplayer.txt
func (l *TeamLoader) load2(ctx context.Context, arg load2ArgT) (ret *load2ResT, err error) {
ctx = libkb.WithLogTag(ctx, "LT") // Load team
if arg.reason != "" {
ctx = libkb.WithLogTag(ctx, "LT2") // Load team recursive
}
traceLabel := fmt.Sprintf("TeamLoader#load2(%v, public:%v)", arg.teamID, arg.public)
if len(arg.reason) > 0 {
traceLabel = traceLabel + " '" + arg.reason + "'"
}
defer l.G().CTraceTimed(ctx, traceLabel, func() error { return err })()
ret, err = l.load2Inner(ctx, arg)
return ret, err
}
func (l *TeamLoader) load2Inner(ctx context.Context, arg load2ArgT) (*load2ResT, error) {
// Single-flight lock by team ID.
lock := l.locktab.AcquireOnName(ctx, l.G(), arg.teamID.String())
defer lock.Release(ctx)
return l.load2InnerLocked(ctx, arg)
}
func (l *TeamLoader) load2InnerLocked(ctx context.Context, arg load2ArgT) (res *load2ResT, err error) {
const nRetries = 3
for i := 0; i < nRetries; i++ {
res, err = l.load2InnerLockedRetry(ctx, arg)
switch err.(type) {
case nil:
return res, nil
case ProofError:
if arg.forceRepoll {
return res, err
}
// Something went wrong, throw out the cache and try again.
l.G().Log.CDebugf(ctx, "Got proof error (%s); trying again with forceRepoll=true", err.Error())
arg.forceRepoll = true
arg.forceFullReload = true
origErr := err
res, err = l.load2InnerLockedRetry(ctx, arg)
if err == nil {
l.G().Log.CDebugf(ctx, "Found an unexpected TeamLoader case in which busting the cache saved the day (original error was: %s)", origErr.Error())
}
return res, err
case GreenLinkError:
// Try again
l.G().Log.CDebugf(ctx, "TeamLoader retrying after green link")
arg.forceRepoll = true
continue
}
return res, err
}
if err == nil {
// Should never happen
return res, fmt.Errorf("failed retryable team load")
}
// Return the last error
return res, err
}
func (l *TeamLoader) load2InnerLockedRetry(ctx context.Context, arg load2ArgT) (*load2ResT, error) {
ctx, tbs := l.G().CTimeBuckets(ctx)
mctx := libkb.NewMetaContext(ctx, l.G())
tracer := l.G().CTimeTracer(ctx, "TeamLoader.load2ILR", teamEnv.Profile)
defer tracer.Finish()
defer tbs.LogIfNonZero(ctx, "API.request")
var err error
var didRepoll bool
lkc := newLoadKeyCache()
// Fetch from cache
tracer.Stage("cache load")
tailCheckRet, frozen, tombstoned := l.storage.Get(mctx, arg.teamID, arg.public)
if tombstoned {
return nil, NewTeamTombstonedError()
}
// Fetch last polled time from merkle cache
merklePolledAt := l.merkleStorage.Get(mctx, arg.teamID, arg.public)
var ret *keybase1.TeamData
if !frozen && !arg.forceFullReload {
// Load from cache
ret = tailCheckRet
}
if ret != nil && !ret.Chain.Reader.Eq(arg.me) {
// Check that we are the same person as when this team was last loaded as a courtesy.
// This should never happen. We shouldn't be able to decrypt someone else's snapshot.
mctx.Warning("TeamLoader discarding snapshot for wrong user: (%v, %v) != (%v, %v)",
arg.me.Uid, arg.me.EldestSeqno, ret.Chain.Reader.Uid, ret.Chain.Reader.EldestSeqno)
ret = nil
}
var cachedName *keybase1.TeamName
if ret != nil && !ret.Name.IsNil() {
cachedName = &ret.Name
}
hiddenPackage, err := l.hiddenPackage(mctx, arg.teamID, ret, arg.me)
if err != nil {
return nil, err
}
teamShim := func() *TeamShim {
return &TeamShim{Data: ret, Hidden: hiddenPackage.ChainData()}
}
// Determine whether to repoll merkle.
discardCache, repoll := l.load2DecideRepoll(mctx, arg, teamShim(), merklePolledAt)
if discardCache {
ret = nil
repoll = true
}
tracer.Stage("deepcopy")
if ret != nil {
// If we're pulling from a previous snapshot (that, let's say, we got from a shared cache),
// then make sure to DeepCopy() data out of it before we start mutating it below. We used
// to do this every step through the new links, but that was very expensive in terms of CPU
// for big teams, since it was hidden quadratic behavior.
tmp := ret.DeepCopy()
ret = &tmp
} else {
mctx.Debug("TeamLoader not using snapshot")
}
tracer.Stage("merkle")
var lastSeqno keybase1.Seqno
var lastLinkID keybase1.LinkID
var hiddenIsFresh bool
if (ret == nil) || repoll {
mctx.Debug("TeamLoader looking up merkle leaf (force:%v)", arg.forceRepoll)
// Request also, without an additional RTT, freshness information about the hidden chain;
// we're going to send up information we know about the visible and hidden chains to both prove
// membership and show what we know about.
harg, err := hiddenPackage.MerkleLoadArg(mctx)
if err != nil {
return nil, err
}
// Reference the merkle tree to fetch the sigchain tail leaf for the team.
lastSeqno, lastLinkID, hiddenIsFresh, err = l.world.merkleLookupWithHidden(ctx, arg.teamID, arg.public, harg)
if err != nil {
return nil, err
}
didRepoll = true
} else {
lastSeqno = ret.Chain.LastSeqno
lastLinkID = ret.Chain.LastLinkID
hiddenIsFresh = true
}
// For child calls to load2, the subteam reader ID is carried up
// or if it doesn't exist, start at this team.
readSubteamID := arg.teamID
if arg.readSubteamID != nil {
readSubteamID = *arg.readSubteamID
}
proofSet := newProofSet(l.G())
var parentChildOperations []*parentChildOperation
// Backfill stubbed links that need to be filled now.
tracer.Stage("backfill")
var filledInStubbedLinks bool
if ret != nil && len(arg.needSeqnos) > 0 {
ret, proofSet, parentChildOperations, err = l.fillInStubbedLinks(
ctx, arg.me, arg.teamID, ret, arg.needSeqnos, readSubteamID, proofSet, parentChildOperations, lkc)
if err != nil {
return nil, err
}
filledInStubbedLinks = true
}
tracer.Stage("pre-fetch")
var fetchLinksAndOrSecrets bool
if ret == nil {
mctx.Debug("TeamLoader fetching: no cache")
// We have no cache
fetchLinksAndOrSecrets = true
} else if ret.Chain.LastSeqno < lastSeqno {
mctx.Debug("TeamLoader fetching: chain update")
// The cache is definitely behind
fetchLinksAndOrSecrets = true
} else if !hiddenIsFresh {
mctx.Debug("TeamLoader fetching: hidden chain wasn't fresh")
fetchLinksAndOrSecrets = true
} else if !l.hasSyncedSecrets(mctx, teamShim()) {
// The cached secrets are behind the cached chain.
// We may need to hit the server for secrets, even though there are no new links.
if arg.needAdmin {
mctx.Debug("TeamLoader fetching: NeedAdmin")
// Admins should always have up-to-date secrets. But not necessarily RKMs.
fetchLinksAndOrSecrets = true
}
if err := l.satisfiesNeedKeyGeneration(mctx, arg.needKeyGeneration, teamShim()); err != nil {
mctx.Debug("TeamLoader fetching: NeedKeyGeneration: %v", err)
fetchLinksAndOrSecrets = true
}
if err := l.satisfiesNeedsKBFSKeyGeneration(mctx, arg.needKBFSKeyGeneration, teamShim()); err != nil {
mctx.Debug("TeamLoader fetching: KBFSNeedKeyGeneration: %v", err)
fetchLinksAndOrSecrets = true
}
if arg.readSubteamID == nil {
// This is not a recursive load. We should have the keys.
// This may be an extra round trip for public teams you're not in.
mctx.Debug("TeamLoader fetching: primary load")
fetchLinksAndOrSecrets = true
}
}
// hasSyncedSecrets does not account for RKMs. So check RKM refreshers separeately.
if err := l.satisfiesNeedApplicationsAtGenerations(mctx, arg.needApplicationsAtGenerations, teamShim()); err != nil {
mctx.Debug("TeamLoader fetching: NeedApplicationsAtGenerations: %v", err)
fetchLinksAndOrSecrets = true
}
if err := l.satisfiesNeedApplicationsAtGenerationsWithKBFS(mctx,
arg.needApplicationsAtGenerationsWithKBFS, teamShim()); err != nil {
mctx.Debug("TeamLoader fetching: NeedApplicationsAtGenerationsWithKBFS: %v", err)
fetchLinksAndOrSecrets = true
}
// Pull new links from the server
tracer.Stage("fetch")
var teamUpdate *rawTeam
if fetchLinksAndOrSecrets {
lows := l.lows(mctx, ret, hiddenPackage)
mctx.Debug("TeamLoader getting links from server (%+v)", lows)
teamUpdate, err = l.world.getNewLinksFromServer(ctx, arg.teamID, lows, arg.readSubteamID)
if err != nil {
return nil, err
}
mctx.Debug("TeamLoader got %v links", len(teamUpdate.Chain))
hiddenPackage.SetRatchetBlindingKeySet(teamUpdate.RatchetBlindingKeySet)
}
tracer.Stage("unpack")
links, err := teamUpdate.unpackLinks(ctx)
if err != nil {
return nil, err
}
var prev libkb.LinkID
if ret != nil {
prev, err = TeamSigChainState{inner: ret.Chain}.GetLatestLibkbLinkID()
if err != nil {
return nil, err
}
}
// A link which was signed by an admin. Sloppily the latest such link.
// Sloppy because this calculation misses out on e.g. a rotate_key signed by an admin.
// This value is used for skipping fullVerify on team.leave links, see `verifyLink`.
var fullVerifyCutoff keybase1.Seqno
for i := len(links) - 1; i >= 0; i-- {
if links[i].LinkType().RequiresAtLeastRole().IsAdminOrAbove() {
fullVerifyCutoff = links[i].Seqno()
break
}
}
if fullVerifyCutoff > 0 {
mctx.Debug("fullVerifyCutoff: %v", fullVerifyCutoff)
}
tracer.Stage("userPreload enable:%v parallel:%v wait:%v",
teamEnv.UserPreloadEnable, teamEnv.UserPreloadParallel, teamEnv.UserPreloadWait)
preloadCancel := l.userPreload(ctx, links, fullVerifyCutoff)
defer preloadCancel()
tracer.Stage("linkloop (%v)", len(links))
parentsCache := make(parentChainCache)
// Don't log in the middle links if there are a great many links.
suppressLoggingStart := 5
suppressLoggingUpto := len(links) - 5
for i, link := range links {
var err error
ret, prev, err = l.doOneLink(mctx, arg, ret, hiddenPackage, link, i, suppressLoggingStart, suppressLoggingUpto, lastSeqno, &parentChildOperations, prev, fullVerifyCutoff, readSubteamID, proofSet, lkc, &parentsCache)
if err != nil {
return nil, err
}
}
if ret == nil {
return nil, fmt.Errorf("team loader fault: got nil from load2")
}
encKID, gen, role, err := l.hiddenPackageGetter(mctx, arg.teamID, ret, arg.me)()
if err != nil {
return nil, err
}
// Update the hidden package with team metadata once we process all of the
// links. This is necessary since we need the role to be up to date to know
// if we should skip seed checks on the hidden chain if we are loading as a
// RESTRICTEDBOT.
hiddenPackage.UpdateTeamMetadata(encKID, gen, role)
// Be sure to update the hidden chain after the main chain, since the latter can "ratchet" the former
if teamUpdate != nil {
err = hiddenPackage.Update(mctx, teamUpdate.GetHiddenChain())
if err != nil {
return nil, err
}
err = hiddenPackage.CheckPTKsForDuplicates(mctx, func(g keybase1.PerTeamKeyGeneration) bool {
_, ok := ret.Chain.PerTeamKeys[g]
return ok
})
if err != nil {
return nil, err
}
}
// The hidden team has pointers from the hidden chain up to the visible chain; check that they
// match the loaded team. We should have a full load of the team, so all parent pointers
// better hit their mark.
err = hiddenPackage.CheckParentPointersOnFullLoad(mctx, ret)
if err != nil {
return nil, err
}
preloadCancel()
if len(links) > 0 {
tbs.Log(ctx, "TeamLoader.verifyLink")
tbs.Log(ctx, "TeamLoader.applyNewLink")
tbs.Log(ctx, "SigChain.LoadFromServer.ReadAll")
tbs.Log(ctx, "loadKeyCache.loadKeyV2")
if teamEnv.Profile {
tbs.Log(ctx, "LoaderContextG.loadKeyV2")
tbs.Log(ctx, "CachedUPAKLoader.LoadKeyV2") // note LoadKeyV2 calls Load2
tbs.Log(ctx, "CachedUPAKLoader.LoadV2")
tbs.Log(ctx, "CachedUPAKLoader.DeepCopy")
mctx.Debug("TeamLoader lkc cache hits: %v", lkc.cacheHits)
}
}
if !ret.Chain.LastLinkID.Eq(lastLinkID) {
return nil, fmt.Errorf("wrong sigchain link ID: %v != %v",
ret.Chain.LastLinkID, lastLinkID)
}
if tailCheckRet != nil {
// If we previously discarded cache due to forceFullReload, or left the
// team, froze it, and are rejoining, make sure the previous tail is
// still in the chain.
// The chain loader ensures it is part of a well-formed chain with correct prevs.
linkID := ret.Chain.LinkIDs[tailCheckRet.Chain.LastSeqno]
if !linkID.Eq(tailCheckRet.Chain.LastLinkID) {
return nil, fmt.Errorf("got wrong sigchain link ID for seqno %d: expected %v from previous cache entry (frozen=%t); got %v in new chain", tailCheckRet.Chain.LastSeqno,
tailCheckRet.Chain.LastLinkID, ret.Frozen, linkID)
}
}
tracer.Stage("pco")
err = l.checkParentChildOperations(ctx,
arg.me, arg.teamID, ret.Chain.ParentID, readSubteamID, parentChildOperations, proofSet)
if err != nil {
return nil, err
}
tracer.Stage("checkproofs")
err = l.checkProofs(ctx, ret, proofSet)
if err != nil {
return nil, err
}
tracer.Stage("secrets")
if teamUpdate != nil {
if teamUpdate.SubteamReader {
// Only allow subteam-reader results if we are in a recursive load.
if arg.readSubteamID == nil {
return nil, fmt.Errorf("unexpected subteam reader result")
}
} else {
stateWrapper := newTeamSigChainState(teamShim())
role, err := stateWrapper.GetUserRole(arg.me)
if err != nil {
role = keybase1.TeamRole_NONE
}
// Add the secrets.
// If it's a public team, there might not be secrets. (If we're not in the team)
// Restricted bots don't have any team secrets, so we alos short circuit.
if !role.IsRestrictedBot() && (!ret.Chain.Public || (teamUpdate.Box != nil)) {
err = l.addSecrets(mctx, teamShim(), arg.me, teamUpdate.Box, teamUpdate.Prevs, teamUpdate.ReaderKeyMasks)
if err != nil {
return nil, fmt.Errorf("loading team secrets: %v", err)
}
err = l.computeSeedChecks(ctx, ret)
if err != nil {
return nil, err
}
if teamUpdate.LegacyTLFUpgrade != nil {
err = l.addKBFSCryptKeys(mctx, teamShim(), teamUpdate.LegacyTLFUpgrade)
if err != nil {
return nil, fmt.Errorf("loading KBFS crypt keys: %v", err)
}
}
}
if role.IsRestrictedBot() {
// Clear out any secrets we may have had in memory if we were a
// previous role that had PTK access.
state := teamShim().MainChain()
state.PerTeamKeySeedsUnverified = make(map[keybase1.PerTeamKeyGeneration]keybase1.PerTeamKeySeedItem)
state.ReaderKeyMasks = make(map[keybase1.TeamApplication]map[keybase1.PerTeamKeyGeneration]keybase1.MaskB64)
state.TlfCryptKeys = make(map[keybase1.TeamApplication][]keybase1.CryptKey)
}
}
}
// Note that we might have done so just above after adding secrets, but before adding
// KBFS crypt keys. But it's cheap to run this method twice in a row.
tracer.Stage("computeSeedChecks")
err = l.computeSeedChecks(ctx, ret)
if err != nil {
return nil, err
}
if !arg.skipSeedCheck {
err = hiddenPackage.CheckUpdatesAgainstSeedsWithMap(mctx, ret.PerTeamKeySeedsUnverified)
if err != nil {
return nil, err
}
}
// Make sure public works out
if ret.Chain.Public != arg.public {
return nil, fmt.Errorf("team public mismatch: chain:%v != arg:%v", ret.Chain.Public, arg.public)
}
if ret.Chain.Id.IsPublic() != ret.Chain.Public {
return nil, fmt.Errorf("team public mismatch: id:%v != chain:%v", ret.Chain.Id.IsPublic(), ret.Chain.Public)
}
// Sanity check the id
if !ret.Chain.Id.Eq(arg.teamID) {
return nil, fmt.Errorf("team id mismatch: %v != %v", ret.Chain.Id.String(), arg.teamID.String())
}
// Recalculate the team name.
// This must always run to pick up changes on chain and off-chain with ancestor renames.
// Also because without this a subteam could claim any parent in its name.
tracer.Stage("namecalc")
newName, err := l.calculateName(ctx, ret, arg.me, readSubteamID, arg.staleOK)
if err != nil {
return nil, fmt.Errorf("error recalculating name for %v: %v", ret.Name, err)
}
if !ret.Name.Eq(newName) {
// This deep copy is an absurd price to pay, but these mid-team renames should be quite rare.
copy := ret.DeepCopy()
ret = ©
ret.Name = newName
}
var needHiddenRotate bool
if !arg.skipNeedHiddenRotateCheck {
needHiddenRotate, err = l.checkNeedRotate(mctx, ret, arg.me, hiddenPackage)
if err != nil {
return nil, err
}
}
err = hiddenPackage.Commit(mctx)
if err != nil {
return nil, err
}
l.logIfUnsyncedSecrets(ctx, ret)
// Mutating this field is safe because only TeamLoader
// while holding the single-flight lock reads or writes this field.
ret.CachedAt = keybase1.ToTime(l.G().Clock().Now())
// Clear the untrusted seqno hint.
// Mutating this field is safe because only TeamLoader
// while holding the single-flight lock reads or writes this field.
ret.LatestSeqnoHint = 0
tracer.Stage("audit")
err = l.audit(ctx, readSubteamID, &ret.Chain, arg.auditMode)
if err != nil {
return nil, err
}
// Cache the validated result if it was actually updated via the team/get endpoint. In many cases, we're not
// actually mutating the teams. Also, if we wound up filling in stubbed links, let's also restore the cache.
if teamUpdate != nil || filledInStubbedLinks {
tracer.Stage("put")
l.storage.Put(mctx, ret)
}
// If we wound up repolling the merkle tree for this team, say that we did.
if didRepoll {
l.merkleStorage.Put(mctx, arg.teamID, arg.public, keybase1.ToTime(mctx.G().Clock().Now()))
}
tracer.Stage("notify")
if cachedName != nil && !cachedName.Eq(newName) {
chain := TeamSigChainState{inner: ret.Chain, hidden: hiddenPackage.ChainData()}
// Send a notification if we used to have the name cached and it has changed at all.
changeSet := keybase1.TeamChangeSet{Renamed: true}
go l.G().NotifyRouter.HandleTeamChangedByID(context.Background(),
chain.GetID(), chain.GetLatestSeqno(), chain.IsImplicit(), changeSet, chain.GetLatestHiddenSeqno())
go l.G().NotifyRouter.HandleTeamChangedByName(context.Background(),
cachedName.String(), chain.GetLatestSeqno(), chain.IsImplicit(), changeSet, chain.GetLatestHiddenSeqno())
go l.G().NotifyRouter.HandleTeamChangedByName(context.Background(),
newName.String(), chain.GetLatestSeqno(), chain.IsImplicit(), changeSet, chain.GetLatestHiddenSeqno())
}
// Check request constraints
tracer.Stage("postcheck")
err = l.load2CheckReturn(mctx, arg, teamShim())
if err != nil {
return nil, err
}
load2res := load2ResT{
team: *ret,
didRepoll: didRepoll,
}
if hd := hiddenPackage.ChainData(); hd != nil {
hd.NeedRotate = needHiddenRotate
load2res.hidden = hd
}
if needHiddenRotate {
l.G().GetTeamBoxAuditor().MaybeScheduleDelayedBoxAuditTeam(mctx, arg.teamID)
}
return &load2res, nil
}
func (l *TeamLoader) hiddenPackageGetter(mctx libkb.MetaContext, id keybase1.TeamID, team *keybase1.TeamData, me keybase1.UserVersion) func() (encKID keybase1.KID, gen keybase1.PerTeamKeyGeneration, role keybase1.TeamRole, err error) {
return func() (encKID keybase1.KID, gen keybase1.PerTeamKeyGeneration,
role keybase1.TeamRole, err error) {
if team == nil {
return encKID, gen, keybase1.TeamRole_NONE, nil
}
state := TeamSigChainState{inner: team.Chain}
ptk, err := state.GetLatestPerTeamKey(mctx)
if err != nil {
return encKID, gen, keybase1.TeamRole_NONE, err
}
role, err = state.GetUserRole(me)
if err != nil {
return encKID, gen, keybase1.TeamRole_NONE, err
}
return ptk.EncKID, ptk.Gen, role, nil
}
}
func (l *TeamLoader) hiddenPackage(mctx libkb.MetaContext, id keybase1.TeamID, team *keybase1.TeamData, me keybase1.UserVersion) (ret *hidden.LoaderPackage, err error) {
getter := l.hiddenPackageGetter(mctx, id, team, me)
return hidden.NewLoaderPackage(mctx, id, getter)
}
func (l *TeamLoader) isAllowedKeyerOf(mctx libkb.MetaContext, chain *keybase1.TeamData, me keybase1.UserVersion, them keybase1.UserVersion) (ret bool, err error) {
state := TeamSigChainState{inner: chain.Chain}
mctx = mctx.WithLogTag("IAKO")
defer mctx.Trace(fmt.Sprintf("TeamLoader#isAllowedKeyerOf(%s, %s)", state.GetID(), them), func() error { return err })()
role, err := state.GetUserRole(them)
if err != nil {
return false, err
}
switch role {
case keybase1.TeamRole_WRITER, keybase1.TeamRole_ADMIN, keybase1.TeamRole_OWNER:
mctx.Debug("user fits explicit role (%s)", role)
return true, nil
}
if state.GetParentID() == nil {
mctx.Debug("user is not an allowed keyer of the team")
return false, nil
}
// now check implict adminship
yes, err := l.isImplicitAdminOf(mctx.Ctx(), state.GetID(), state.GetParentID(), me, them)
if err != nil {
return false, err
}
if yes {
mctx.Debug("user is an implicit admin of the team")
return true, err
}
mctx.Debug("user is not an allowed keyer of the team")
return false, nil
}
func (l *TeamLoader) checkNeedRotate(mctx libkb.MetaContext, chain *keybase1.TeamData, me keybase1.UserVersion, hiddenPackage *hidden.LoaderPackage) (ret bool, err error) {
signer := hiddenPackage.LastReaderKeyRotator(mctx)
if signer == nil {
mctx.Debug("not checking need rotate, since last signer of hidden chain was nil")
return false, nil
}
return l.checkNeedRotateWithSigner(mctx, chain, me, *signer)
}
func (l *TeamLoader) checkNeedRotateWithSigner(mctx libkb.MetaContext, chain *keybase1.TeamData, me keybase1.UserVersion, signer keybase1.Signer) (ret bool, err error) {
defer mctx.Trace(fmt.Sprintf("TeamLoader::checkNeedRotateWithSigner(%+v)", signer), func() error { return err })()
uv := signer.UserVersion()
var isKeyer, amIKeyer bool
amIKeyer, err = l.isAllowedKeyerOf(mctx, chain, me, me)
if err != nil {
return false, err
}
if !amIKeyer {
mctx.Debug("I am not a keyer for this team, so I can't rotate it even if required")
return false, nil
}
isKeyer, err = l.isAllowedKeyerOf(mctx, chain, me, uv)
if err != nil {
return false, err
}
if !isKeyer {
mctx.Debug("need rotate since %+v isn't an allowed keyer of the team", uv)
return true, nil
}
var found bool
var revokedAt *keybase1.KeybaseTime
found, revokedAt, _, err = mctx.G().GetUPAKLoader().CheckKIDForUID(mctx.Ctx(), uv.Uid, signer.K)
if err != nil {
return false, err
}
if !found || revokedAt != nil {
var s string
if revokedAt != nil {
tm := revokedAt.Unix.Time()
s = fmt.Sprintf(" (revoked at %s [%s ago])", tm, mctx.G().Clock().Now().Sub(tm))
}
mctx.Debug("KID %s wasn't found for %+v%s", signer, s)
return true, nil
}
return false, nil
}
func (l *TeamLoader) doOneLink(mctx libkb.MetaContext, arg load2ArgT, ret *keybase1.TeamData, hiddenPackage *hidden.LoaderPackage, link *ChainLinkUnpacked, i int, suppressLoggingStart int, suppressLoggingUpto int, lastSeqno keybase1.Seqno, parentChildOperations *[](*parentChildOperation), prev libkb.LinkID, fullVerifyCutoff keybase1.Seqno, readSubteamID keybase1.TeamID, proofSet *proofSetT, lkc *loadKeyCache, parentsCache *parentChainCache) (*keybase1.TeamData, libkb.LinkID, error) {
var nilPrev libkb.LinkID
ctx := mctx.Ctx()
if suppressLoggingStart <= i && i < suppressLoggingUpto {
if i == suppressLoggingStart {
mctx.Debug("TeamLoader suppressing logs until %v", suppressLoggingUpto)
}
ctx = WithSuppressLogging(ctx, true)
mctx = mctx.WithContext(ctx)
}
if !ShouldSuppressLogging(ctx) {
mctx.Debug("TeamLoader processing link seqno:%v", link.Seqno())
}
if link.Seqno() > lastSeqno {
// This link came from a point in the chain after when we checked the merkle leaf.
// Processing it would require re-checking merkle.
// It would be tricky to ignore it because off-chain data is asserted to be in sync with the chain.
// So, return an error that the caller will retry.
mctx.Debug("TeamLoader found green link seqno:%v", link.Seqno())
return nil, nilPrev, NewGreenLinkError(link.Seqno())
}
if err := l.checkStubbed(ctx, arg, link); err != nil {
return nil, nilPrev, err
}
if !link.Prev().Eq(prev) {
return nil, nilPrev, NewPrevError("team replay failed: prev chain broken at link %d (%v != %v)",
i, link.Prev(), prev)
}
if err := consumeRatchets(mctx, hiddenPackage, link); err != nil {
return nil, nilPrev, err
}
if err := checkPTKGenerationNotOnHiddenChain(mctx, hiddenPackage, link); err != nil {
return nil, nilPrev, err
}
var signer *SignerX
var err error
signer, err = l.verifyLink(ctx, arg.teamID, ret, arg.me, link, fullVerifyCutoff,
readSubteamID, proofSet, lkc, *parentsCache)
if err != nil {
return nil, nilPrev, err
}
if l.isParentChildOperation(ctx, link) {
pco, err := l.toParentChildOperation(ctx, link)
if err != nil {
return nil, nilPrev, err
}
*parentChildOperations = append(*parentChildOperations, pco)
}
ret, err = l.applyNewLink(ctx, ret, hiddenPackage.ChainData(), link, signer, arg.me)
if err != nil {
return nil, nilPrev, err
}
return ret, link.LinkID(), nil
}
// userPreload warms the upak cache with users who will probably need to be loaded to verify the chain.
// Uses teamEnv and may be disabled.
func (l *TeamLoader) userPreload(ctx context.Context, links []*ChainLinkUnpacked, fullVerifyCutoff keybase1.Seqno) (cancel func()) {
ctx, cancel = context.WithCancel(ctx)
if teamEnv.UserPreloadEnable {
uidSet := make(map[keybase1.UID]struct{})
for _, link := range links {
// fullVerify definition copied from verifyLink
fullVerify := (link.LinkType() != libkb.SigchainV2TypeTeamLeave) ||
(link.Seqno() >= fullVerifyCutoff) ||
(link.source.EldestSeqno == 0)
if !link.isStubbed() && fullVerify {
uidSet[link.inner.Body.Key.UID] = struct{}{}
}
}
l.G().Log.CDebugf(ctx, "TeamLoader userPreload uids: %v", len(uidSet))
if teamEnv.UserPreloadParallel {
// Note this is full-parallel. Probably want pipelining if this is to be turned on by default.
var wg sync.WaitGroup
for uid := range uidSet {
wg.Add(1)
go func(uid keybase1.UID) {
_, _, err := l.G().GetUPAKLoader().LoadV2(
libkb.NewLoadUserArg(l.G()).WithUID(uid).WithPublicKeyOptional().WithNetContext(ctx))
if err != nil {
l.G().Log.CDebugf(ctx, "error preloading uid %v", uid)
}
wg.Done()
}(uid)
}
if teamEnv.UserPreloadWait {
wg.Wait()
}
} else {
for uid := range uidSet {
_, _, err := l.G().GetUPAKLoader().LoadV2(
libkb.NewLoadUserArg(l.G()).WithUID(uid).WithPublicKeyOptional().WithNetContext(ctx))
if err != nil {
l.G().Log.CDebugf(ctx, "error preloading uid %v", uid)
}
}
}
}
return cancel
}
// Decide whether to repoll merkle based on load arg.
// Returns (discardCache, repoll)
// discardCache - the caller should throw out their cached copy and repoll.
// repoll - hit up merkle for the latest tail
// Considers:
// - NeedAdmin
// - NeedKeyGeneration
// - NeedApplicationsAtGenerations
// - WantMembers
// - ForceRepoll
// - Cache freshness / StaleOK
// - NeedSeqnos
// - JustUpdated
// - If this user is in global "force repoll" mode, where it would be too spammy to
// push out individual team changed notifications, so all team loads need a repoll.
func (l *TeamLoader) load2DecideRepoll(mctx libkb.MetaContext, arg load2ArgT, fromCache Teamer, cachedPolledAt *keybase1.Time) (discardCache bool, repoll bool) {
var reason string
defer func() {
if discardCache || repoll || reason != "" {
mctx.Debug("load2DecideRepoll -> (discardCache:%v, repoll:%v) %v", discardCache, repoll, reason)
}
}()
// NeedAdmin is a special constraint where we start from scratch.
// Because of admin-only invite links.
if arg.needAdmin {
if !l.satisfiesNeedAdmin(mctx, arg.me, fromCache) {
// Start from scratch if we are newly admin
reason = "!satisfiesNeedAdmin"
return true, true
}
}
if arg.forceRepoll {
reason = "forceRepoll"
return false, true
}
// Repoll if the server has previously hinted that the team has new links.
if fromCache != nil && fromCache.MainChain() != nil && fromCache.MainChain().Chain.LastSeqno < fromCache.MainChain().LatestSeqnoHint {
reason = "behind seqno hint"
return false, true
}
if fromCache != nil && fromCache.HiddenChain() != nil && fromCache.HiddenChain().IsStale() {
reason = "behind hidden seqno hint"
return false, true
}
// Repoll to get a new key generation
if arg.needKeyGeneration > 0 {
if err := l.satisfiesNeedKeyGeneration(mctx, arg.needKeyGeneration, fromCache); err != nil {
reason = fmt.Sprintf("satisfiesNeedKeyGeneration -> %v", err)
return false, true
}
}
// Repoll to get new applications at generations
if len(arg.needApplicationsAtGenerations) > 0 {
if err := l.satisfiesNeedApplicationsAtGenerations(mctx, arg.needApplicationsAtGenerations, fromCache); err != nil {
reason = fmt.Sprintf("satisfiesNeedApplicationsAtGenerations -> %v", err)
return false, true
}
}
if arg.needKBFSKeyGeneration.Generation > 0 {
if err := l.satisfiesNeedsKBFSKeyGeneration(mctx, arg.needKBFSKeyGeneration, fromCache); err != nil {
reason = fmt.Sprintf("satisfiesNeedsKBFSKeyGeneration -> %v", err)
return false, true
}
}
if len(arg.needApplicationsAtGenerationsWithKBFS) > 0 {
if err := l.satisfiesNeedApplicationsAtGenerationsWithKBFS(mctx,
arg.needApplicationsAtGenerationsWithKBFS, fromCache); err != nil {
reason = fmt.Sprintf("satisfiesNeedApplicationsAtGenerationsWithKBFS -> %v", err)
return false, true
}
}
// Repoll because it might help get the wanted members
if len(arg.wantMembers) > 0 {
if err := l.satisfiesWantMembers(mctx, arg.wantMembers, arg.wantMembersRole, fromCache); err != nil {
reason = fmt.Sprintf("satisfiesWantMembers -> %v", err)
return false, true
}
}
// Repoll if we need a seqno not in the cache.
// Does not force a repoll if we just need to fill in previous links
if len(arg.needSeqnos) > 0 {
if fromCache == nil || fromCache.MainChain() == nil {
reason = "need seqnos and no cache"
return false, true
}
if fromCache.MainChain().Chain.LastSeqno < l.seqnosMax(arg.needSeqnos) {
reason = "need seqnos"
return false, true
}
}
if fromCache == nil || fromCache.MainChain() == nil {
reason = "no cache"
// We need a merkle leaf when starting from scratch.
return false, true
}
cachedAt := fromCache.MainChain().CachedAt
if cachedPolledAt != nil && *cachedPolledAt > cachedAt {
cachedAt = *cachedPolledAt
}
cacheIsOld := !l.isFresh(mctx, cachedAt)
if cacheIsOld && !arg.staleOK {
// We need a merkle leaf
reason = "cacheIsOld"
return false, true
}
// InForceRepoll needs to a acquire a lock, so avoid it by checking it last.
if l.InForceRepollMode(mctx) {
reason = "InForceRepollMode"
return false, true
}
return false, false
}
// Check whether the load produced a snapshot that can be returned to the caller.
// This should not check anything that is critical to the validity of the snapshot
// because the snapshot is put into the cache before this check.
// Considers:
// - NeedAdmin
// - NeedKeyGeneration
// - NeedSeqnos
func (l *TeamLoader) load2CheckReturn(mctx libkb.MetaContext, arg load2ArgT, shim Teamer) error {
if arg.needAdmin {
if !l.satisfiesNeedAdmin(mctx, arg.me, shim) {
mctx.Debug("user %v is not an admin of team %v at seqno:%v", arg.me, arg.teamID, shim.MainChain().Chain.LastSeqno)
return fmt.Errorf("user %v is not an admin of the team", arg.me)
}
}
// Repoll to get a new key generation
if arg.needKeyGeneration > 0 {
if err := l.satisfiesNeedKeyGeneration(mctx, arg.needKeyGeneration, shim); err != nil {
return err
}
}
if len(arg.needApplicationsAtGenerations) > 0 {
if err := l.satisfiesNeedApplicationsAtGenerations(mctx, arg.needApplicationsAtGenerations, shim); err != nil {
return err
}
}
if arg.needKBFSKeyGeneration.Generation > 0 {
if err := l.satisfiesNeedsKBFSKeyGeneration(mctx, arg.needKBFSKeyGeneration, shim); err != nil {
return err
}
}
if len(arg.needApplicationsAtGenerationsWithKBFS) > 0 {
if err := l.satisfiesNeedApplicationsAtGenerationsWithKBFS(mctx, arg.needApplicationsAtGenerationsWithKBFS, shim); err != nil {
return err
}
}
if len(arg.needSeqnos) > 0 {
if err := l.checkNeededSeqnos(mctx.Ctx(), shim.MainChain(), arg.needSeqnos); err != nil {
return err
}
}
return nil
}
// Whether the user is an admin at the snapshot, and there are no stubbed links, and keys are up to date.
func (l *TeamLoader) satisfiesNeedAdmin(mctx libkb.MetaContext, me keybase1.UserVersion, team Teamer) bool {
if team == nil || team.MainChain() == nil {
return false
}
state := newTeamSigChainState(team)
if state.HasAnyStubbedLinks() {
return false
}
if !l.hasSyncedSecrets(mctx, team) {
return false
}
role, err := state.GetUserRole(me)
if err != nil {
mctx.Debug("TeamLoader error getting my role: %v", err)
return false
}
if !role.IsAdminOrAbove() {
if !state.IsSubteam() {
return false
}
yes, err := l.isImplicitAdminOf(mctx.Ctx(), state.GetID(), state.GetParentID(), me, me)
if err != nil {
mctx.Debug("TeamLoader error getting checking implicit admin: %s", err)
return false
}
if !yes {
return false
}
}
return true
}
// Check whether a user is an implicit admin of a team.
func (l *TeamLoader) isImplicitAdminOf(ctx context.Context, teamID keybase1.TeamID, ancestorID *keybase1.TeamID,
me keybase1.UserVersion, uv keybase1.UserVersion) (bool, error) {
// IDs of ancestors that were not freshly polled.
// Check them again with forceRepoll if the affirmative is not found cached.
checkAgain := make(map[keybase1.TeamID]bool)
check1 := func(chain *TeamSigChainState) bool {
role, err := chain.GetUserRole(uv)
if err != nil {
return false
}
return role.IsAdminOrAbove()
}
i := 0
for {
i++
if i >= 100 {
// Break in case there's a bug in this loop.
return false, fmt.Errorf("stuck in a loop while checking for implicit admin: %v", ancestorID)
}
// Use load2 so that we can use subteam-reader and get secretless teams.
ancestor, err := l.load2(ctx, load2ArgT{
teamID: *ancestorID,
reason: "isImplicitAdminOf-1",
me: me,
readSubteamID: &teamID,
})
if err != nil {
return false, err
}
// Be wary, `ancestor` could be, and is likely, a secretless team.
// Do not let it out of sight.
ancestorChain := TeamSigChainState{inner: ancestor.team.Chain}
if !ancestor.didRepoll {
checkAgain[ancestorChain.GetID()] = true
}
if check1(&ancestorChain) {
return true, nil
}
if !ancestorChain.IsSubteam() {
break
}
// Get the next level up.
ancestorID = ancestorChain.GetParentID()
}
// The answer was not found to be yes in the cache.
// Try again with the teams that were not polled as they might have unseen updates.
for ancestorID := range checkAgain {
ancestor, err := l.load2(ctx, load2ArgT{
teamID: ancestorID,
reason: "isImplicitAdminOf-again",
me: me,
forceRepoll: true, // Get the latest info.
readSubteamID: &teamID,
})
if err != nil {
return false, err
}
// Be wary, `ancestor` could be, and is likely, a secretless team.
// Do not let it out of sight.
ancestorChain := TeamSigChainState{inner: ancestor.team.Chain}
if check1(&ancestorChain) {
return true, nil
}
}
return false, nil
}
func (l *TeamLoader) satisfiesNeedsKBFSKeyGeneration(mctx libkb.MetaContext,
kbfs keybase1.TeamKBFSKeyRefresher, state Teamer) error {
if kbfs.Generation == 0 {
return nil
}
if state == nil {
return fmt.Errorf("nil team does not contain KBFS key generation: %#v", kbfs)
}
gen, err := newTeamSigChainState(state).GetLatestKBFSGeneration(kbfs.AppType)
if err != nil {
return err
}
if kbfs.Generation > gen {
return NewKBFSKeyGenerationError(kbfs.Generation, gen)
}
return nil
}
// Whether the snapshot has loaded at least up to the key generation and has the secret.
func (l *TeamLoader) satisfiesNeedKeyGeneration(mctx libkb.MetaContext, needKeyGeneration keybase1.PerTeamKeyGeneration, state Teamer) error {
if needKeyGeneration == 0 {
return nil
}
if state == nil {
return fmt.Errorf("nil team does not contain key generation: %v", needKeyGeneration)
}
key, err := newTeamSigChainState(state).GetLatestPerTeamKey(mctx)
if err != nil {
return err
}
if needKeyGeneration > key.Gen {
return fmt.Errorf("team key generation too low: %v < %v", key.Gen, needKeyGeneration)
}
_, ok := state.MainChain().PerTeamKeySeedsUnverified[needKeyGeneration]
if !ok {
return fmt.Errorf("team key secret missing for generation: %v", needKeyGeneration)
}
return nil
}
// Whether the snapshot has loaded the reader key masks and key generations we
// need.
func (l *TeamLoader) satisfiesNeedApplicationsAtGenerations(mctx libkb.MetaContext,
needApplicationsAtGenerations map[keybase1.PerTeamKeyGeneration][]keybase1.TeamApplication, team Teamer) error {
if len(needApplicationsAtGenerations) == 0 {
return nil
}
if team == nil || team.MainChain() == nil {
return fmt.Errorf("nil team does not contain applications: %v", needApplicationsAtGenerations)
}
for ptkGen, apps := range needApplicationsAtGenerations {
for _, app := range apps {
if _, err := ApplicationKeyAtGeneration(mctx, team, app, ptkGen); err != nil {
return err
}
}
}
return nil
}
func (l *TeamLoader) satisfiesNeedApplicationsAtGenerationsWithKBFS(mctx libkb.MetaContext,
needApplicationsAtGenerations map[keybase1.PerTeamKeyGeneration][]keybase1.TeamApplication,
state Teamer) error {
if len(needApplicationsAtGenerations) == 0 {
return nil
}
if state == nil || state.MainChain() == nil {
return fmt.Errorf("nil team does not contain applications: %v", needApplicationsAtGenerations)
}
for ptkGen, apps := range needApplicationsAtGenerations {
for _, app := range apps {
if _, err := ApplicationKeyAtGenerationWithKBFS(mctx, state, app, ptkGen); err != nil {
return err
}
}
}
return nil
}
// Whether the snapshot has each of `wantMembers` as a member.
func (l *TeamLoader) satisfiesWantMembers(mctx libkb.MetaContext,
wantMembers []keybase1.UserVersion, wantMembersRole keybase1.TeamRole, state Teamer) error {
if wantMembersRole == keybase1.TeamRole_NONE {
// Default to writer.
wantMembersRole = keybase1.TeamRole_WRITER
}
if len(wantMembers) == 0 {
return nil
}
if state == nil {
return fmt.Errorf("nil team does not have wanted members")
}
for _, uv := range wantMembers {
role, err := newTeamSigChainState(state).GetUserRole(uv)
if err != nil {
return fmt.Errorf("could not get wanted user role: %v", err)
}
if !role.IsOrAbove(wantMembersRole) {
return fmt.Errorf("wanted user %v is a %v which is not at least %v", uv, role, wantMembersRole)
}
}
return nil
}
func (l *TeamLoader) mungeWantMembers(ctx context.Context, wantMembers []keybase1.UserVersion) (res []keybase1.UserVersion, err error) {
for _, uv1 := range wantMembers {
uv2 := uv1
if uv2.EldestSeqno == 0 {
// Lookup the latest eldest seqno for that uid.
// This value may come from a cache.
uv2.EldestSeqno, err = l.world.lookupEldestSeqno(ctx, uv2.Uid)
if err != nil {
return res, err
}
l.G().Log.CDebugf(ctx, "TeamLoader resolved wantMember %v -> %v", uv2.Uid, uv2.EldestSeqno)
}
res = append(res, uv2)
}
return res, err
}
// Whether y is in xs.
func (l *TeamLoader) seqnosContains(xs []keybase1.Seqno, y keybase1.Seqno) bool {
for _, x := range xs {
if x.Eq(y) {
return true
}
}
return false
}
// Return the max in a list of positive seqnos. Returns 0 if the list is empty
func (l *TeamLoader) seqnosMax(seqnos []keybase1.Seqno) (ret keybase1.Seqno) {
for _, x := range seqnos {
if x > ret {
ret = x
}
}
return ret
}
// Whether a TeamData from the cache is fresh.
func (l *TeamLoader) isFresh(mctx libkb.MetaContext, cachedAt keybase1.Time) bool {
if cachedAt.IsZero() {
// This should never happen.
mctx.Warning("TeamLoader encountered zero cached time")
return false
}
diff := mctx.G().Clock().Now().Sub(cachedAt.Time())
fresh := (diff <= freshnessLimit)
if !fresh {
mctx.Debug("TeamLoader cached snapshot is old: %v", diff)
}
return fresh
}
// Whether the teams secrets are synced to the same point as its sigchain
// Does not check RKMs.
func (l *TeamLoader) hasSyncedSecrets(mctx libkb.MetaContext, team Teamer) bool {
state := team.MainChain()
n := len(team.MainChain().Chain.PerTeamKeys)
offChainGen := len(state.PerTeamKeySeedsUnverified)
mctx.Debug("TeamLoader#hasSyncedSecrets: found %d PTKs on the main chain (versus %d seeds)", n, offChainGen)
if team.HiddenChain() != nil {
m := len(team.HiddenChain().ReaderPerTeamKeys)
mctx.Debug("TeamLoader#hasSyncedSecrets: found another %d PTKs on the hidden chain", m)
n += m
}
return (n == offChainGen)
}
func (l *TeamLoader) logIfUnsyncedSecrets(ctx context.Context, state *keybase1.TeamData) {
onChainGen := keybase1.PerTeamKeyGeneration(len(state.Chain.PerTeamKeys))
offChainGen := keybase1.PerTeamKeyGeneration(len(state.PerTeamKeySeedsUnverified))
if onChainGen != offChainGen {
l.G().Log.CDebugf(ctx, "TeamLoader unsynced secrets local:%v != chain:%v ", offChainGen, onChainGen)
}
}
func (l *TeamLoader) lows(mctx libkb.MetaContext, state *keybase1.TeamData, hp *hidden.LoaderPackage) getLinksLows {
var lows getLinksLows
if state != nil {
chain := TeamSigChainState{inner: state.Chain}
lows.Seqno = chain.GetLatestSeqno()
lows.PerTeamKey = keybase1.PerTeamKeyGeneration(len(state.PerTeamKeySeedsUnverified))
// Use an arbitrary application to get the number of known RKMs.
// TODO: using an arbitrary RKM is wrong and could lead to stuck caches.
// See CORE-8445
rkms, ok := state.ReaderKeyMasks[keybase1.TeamApplication_CHAT]
if ok {
lows.ReaderKeyMask = keybase1.PerTeamKeyGeneration(len(rkms))
}
}
if hp != nil {
lows.HiddenChainSeqno = hp.LastFullSeqno()
}
return lows
}
func (l *TeamLoader) OnLogout(mctx libkb.MetaContext) error {
l.storage.ClearMem()
return nil
}
func (l *TeamLoader) OnDbNuke(mctx libkb.MetaContext) error {
l.storage.ClearMem()
return nil
}
// Clear the in-memory cache.
func (l *TeamLoader) ClearMem() {
l.storage.ClearMem()
}
func (l *TeamLoader) VerifyTeamName(ctx context.Context, id keybase1.TeamID, name keybase1.TeamName) error {
if name.IsRootTeam() {
if !name.ToTeamID(id.IsPublic()).Eq(id) {
return NewResolveError(name, id)
}
return nil
}
teamData, _, err := l.Load(ctx, keybase1.LoadTeamArg{
ID: id,
Public: id.IsPublic(),
})
if err != nil {
return err
}
gotName := teamData.Name
if !gotName.Eq(name) {
return NewResolveError(name, id)
}
return nil
}
// List all the admins of ancestor teams.
// Includes admins of the specified team only if they are also admins of ancestor teams.
// The specified team must be a subteam, or an error is returned.
// Always sends a flurry of RPCs to get the most up to date info.
func (l *TeamLoader) ImplicitAdmins(ctx context.Context, teamID keybase1.TeamID) (impAdmins []keybase1.UserVersion, err error) {
impAdminsMap := make(map[string]keybase1.UserVersion) // map to remove dups
err = l.MapTeamAncestors(ctx, func(t keybase1.TeamSigChainState) error {
ancestorChain := TeamSigChainState{inner: t}
// Gather the admins.
adminRoles := []keybase1.TeamRole{keybase1.TeamRole_OWNER, keybase1.TeamRole_ADMIN}
for _, role := range adminRoles {
uvs, err := ancestorChain.GetUsersWithRole(role)
if err != nil {
return err
}
for _, uv := range uvs {
impAdminsMap[uv.String()] = uv
}
}
return nil
}, teamID, "implicitAdminsAncestor", func(keybase1.TeamSigChainState) bool { return true })
if err != nil {
return nil, err
}
for _, uv := range impAdminsMap {
impAdmins = append(impAdmins, uv)
}
return impAdmins, nil
}
// MapTeamAncestors does NOT map over the team itself.
func (l *TeamLoader) MapTeamAncestors(ctx context.Context, f func(t keybase1.TeamSigChainState) error, teamID keybase1.TeamID, reason string, forceFullReloadOnceToAssert func(t keybase1.TeamSigChainState) bool) (err error) {
me, err := l.world.getMe(ctx)
if err != nil {
return err
}
// Load the argument team
team, _, err := l.load1(ctx, me, keybase1.LoadTeamArg{
ID: teamID,
Public: teamID.IsPublic(),
StaleOK: true, // We only use immutable fields.
})
if err != nil {
return err
}
teamChain := TeamSigChainState{inner: team.Chain}
if !teamChain.IsSubteam() {
return fmt.Errorf("cannot map over parents of a root team: %v", teamID)
}
return l.mapTeamAncestorsHelper(ctx, f, teamID, teamChain.GetParentID(), reason, forceFullReloadOnceToAssert)
}
func (l *TeamLoader) mapTeamAncestorsHelper(ctx context.Context, f func(t keybase1.TeamSigChainState) error, teamID keybase1.TeamID, ancestorID *keybase1.TeamID, reason string, forceFullReloadOnceToAssert func(t keybase1.TeamSigChainState) bool) (err error) {
me, err := l.world.getMe(ctx)
if err != nil {
return err
}
i := 0
for {
i++
if i >= 100 {
// Break in case there's a bug in this loop.
return fmt.Errorf("stuck in a loop while mapping over team parents: %v", ancestorID)
}
load2Arg := load2ArgT{
teamID: *ancestorID,
reason: reason,
me: me,
forceRepoll: true, // Get the latest info.
readSubteamID: &teamID,
}
var ancestor *load2ResT
for {
var err error
// Use load2 so that we can use subteam-reader and get secretless teams.
ancestor, err = l.load2(ctx, load2Arg)
if err != nil {
return err
}
if forceFullReloadOnceToAssert(ancestor.team.Chain) {
break
}
if load2Arg.forceFullReload {
return fmt.Errorf("failed to assert predicate in ancestor %v after full force reload", ancestor.team.ID())
}
load2Arg.forceFullReload = true
}
// Be wary, `ancestor` could be, and is likely, a secretless team.
// Do not let it out of sight.
ancestorChain := TeamSigChainState{inner: ancestor.team.Chain}
err = f(ancestor.team.Chain)
if err != nil {
return err
}
if !ancestorChain.IsSubteam() {
break
}
// Get the next level up.
ancestorID = ancestorChain.GetParentID()
}
return nil
}
func (l *TeamLoader) NotifyTeamRename(ctx context.Context, id keybase1.TeamID, newName string) error {
// ignore newName from the server
// Load up the ancestor chain with ForceRepoll.
// Then load down the ancestor chain without it (expect cache hits).
// Not the most elegant way, but it will get the job done.
// Each load on the way down will recalculate that team's name.
var ancestorIDs []keybase1.TeamID
me, err := l.world.getMe(ctx)
if err != nil {
return err
}
loopID := &id
for loopID != nil {
load2Res, err := l.load2(ctx, load2ArgT{
teamID: *loopID,
reason: "NotifyTeamRename-force",
forceRepoll: true,
readSubteamID: &id,
me: me,
})
if err != nil {
return err
}
ancestorIDs = append(ancestorIDs, *loopID)
chain := TeamSigChainState{inner: load2Res.team.Chain}
if chain.IsSubteam() {
loopID = chain.GetParentID()
} else {
loopID = nil
}
}
// reverse ancestorIDs so the root team appears first
sort.SliceStable(ancestorIDs, func(i, j int) bool { return i > j })
for _, loopID := range ancestorIDs {
_, err := l.load2(ctx, load2ArgT{
teamID: loopID,
reason: "NotifyTeamRename-quick",
readSubteamID: &id,
me: me,
})
if err != nil {
return err
}
}
return nil
}
func (l *TeamLoader) getHeadMerkleSeqno(mctx libkb.MetaContext, readSubteamID keybase1.TeamID, state *keybase1.TeamSigChainState) (ret keybase1.Seqno, err error) {
defer mctx.Trace("TeamLoader#getHeadMerkleSeqno", func() error { return err })()
if state.HeadMerkle != nil {
return state.HeadMerkle.Seqno, nil
}
headSeqno := keybase1.Seqno(1)
expectedLinkRaw, ok := state.LinkIDs[headSeqno]
if !ok {
return ret, fmt.Errorf("couldn't find head link in team state during audit")
}
expectedLink, err := libkb.ImportLinkID(expectedLinkRaw)
if err != nil {
return ret, err
}
teamUpdate, err := l.world.getLinksFromServer(mctx.Ctx(), state.Id, []keybase1.Seqno{headSeqno}, &readSubteamID)
if err != nil {
return ret, err
}
newLinks, err := teamUpdate.unpackLinks(mctx.Ctx())
if err != nil {
return ret, err
}
if len(newLinks) != 1 {
return ret, fmt.Errorf("expected only one chainlink back; got %d", len(newLinks))
}
headLink := newLinks[0]
err = headLink.AssertInnerOuterMatch()
if err != nil {
return ret, err
}
if headLink.Seqno() != headSeqno {
return ret, NewInvalidLink(headLink, "wrong head seqno; wanted 1 but got something else")
}
if !headLink.LinkID().Eq(expectedLink) {
return ret, NewInvalidLink(headLink, "wrong head link hash: %s != %s", headLink.LinkID(), expectedLink)
}
if headLink.isStubbed() {
return ret, NewInvalidLink(headLink, "got a stubbed head link, but wasn't expecting that")
}
headMerkle := headLink.inner.Body.MerkleRoot.ToMerkleRootV2()
state.HeadMerkle = &headMerkle
return headMerkle.Seqno, nil
}
func (l *TeamLoader) audit(ctx context.Context, readSubteamID keybase1.TeamID, state *keybase1.TeamSigChainState, auditMode keybase1.AuditMode) (err error) {
mctx := libkb.NewMetaContext(ctx, l.G())
if l.G().Env.Test.TeamSkipAudit {
mctx.Debug("skipping audit in test due to flag")
return nil
}
headMerklSeqno, err := l.getHeadMerkleSeqno(mctx, readSubteamID, state)
if err != nil {
return err
}
err = mctx.G().GetTeamAuditor().AuditTeam(mctx, state.Id, state.Public, headMerklSeqno, state.LinkIDs, state.LastSeqno, auditMode)
return err
}
func (l *TeamLoader) ForceRepollUntil(ctx context.Context, dtime gregor.TimeOrOffset) error {
l.G().Log.CDebugf(ctx, "TeamLoader#ForceRepollUntil(%+v)", dtime)
l.forceRepollMutex.Lock()
defer l.forceRepollMutex.Unlock()
l.forceRepollUntil = dtime
return nil
}
func (l *TeamLoader) InForceRepollMode(mctx libkb.MetaContext) bool {
l.forceRepollMutex.Lock()
defer l.forceRepollMutex.Unlock()
if l.forceRepollUntil == nil {
return false
}
if !l.forceRepollUntil.Before(mctx.G().Clock().Now()) {
mctx.Debug("TeamLoader#InForceRepollMode: returning true")
return true
}
l.forceRepollUntil = nil
return false
}
| [
"\"KEYBASE_TEAM_PROF\"",
"\"KEYBASE_TEAM_PE\"",
"\"KEYBASE_TEAM_PP\"",
"\"KEYBASE_TEAM_PW\"",
"\"KEYBASE_TEAM_SP\""
]
| []
| [
"KEYBASE_TEAM_PP",
"KEYBASE_TEAM_PW",
"KEYBASE_TEAM_SP",
"KEYBASE_TEAM_PROF",
"KEYBASE_TEAM_PE"
]
| [] | ["KEYBASE_TEAM_PP", "KEYBASE_TEAM_PW", "KEYBASE_TEAM_SP", "KEYBASE_TEAM_PROF", "KEYBASE_TEAM_PE"] | go | 5 | 0 | |
tests/vmi_multus_test.go | /*
* This file is part of the kubevirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright 2018 Red Hat, Inc.
*
*/
package tests_test
import (
"flag"
"fmt"
"os"
"strings"
"time"
expect "github.com/google/goexpect"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
k8sv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v13 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"
v1 "kubevirt.io/client-go/api/v1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/kubevirt/tests"
)
const (
postUrl = "/apis/k8s.cni.cncf.io/v1/namespaces/%s/network-attachment-definitions/%s"
linuxBridgeConfCRD = `{"apiVersion":"k8s.cni.cncf.io/v1","kind":"NetworkAttachmentDefinition","metadata":{"name":"%s","namespace":"%s"},"spec":{"config":"{ \"cniVersion\": \"0.3.1\", \"name\": \"mynet\", \"plugins\": [{\"type\": \"bridge\", \"bridge\": \"br10\", \"vlan\": 100, \"ipam\": {}},{\"type\": \"tuning\"}]}"}}`
ptpConfCRD = `{"apiVersion":"k8s.cni.cncf.io/v1","kind":"NetworkAttachmentDefinition","metadata":{"name":"%s","namespace":"%s"},"spec":{"config":"{ \"cniVersion\": \"0.3.1\", \"name\": \"mynet\", \"plugins\": [{\"type\": \"ptp\", \"ipam\": { \"type\": \"host-local\", \"subnet\": \"10.1.1.0/24\" }},{\"type\": \"tuning\"}]}"}}`
sriovConfCRD = `{"apiVersion":"k8s.cni.cncf.io/v1","kind":"NetworkAttachmentDefinition","metadata":{"name":"%s","namespace":"%s","annotations":{"k8s.v1.cni.cncf.io/resourceName":"%s"}},"spec":{"config":"{ \"name\": \"sriov\", \"type\": \"sriov\", \"ipam\": { \"type\": \"host-local\", \"subnet\": \"10.1.1.0/24\" } }"}}`
sriovLinkEnableConfCRD = `{"apiVersion":"k8s.cni.cncf.io/v1","kind":"NetworkAttachmentDefinition","metadata":{"name":"%s","namespace":"%s","annotations":{"k8s.v1.cni.cncf.io/resourceName":"%s"}},"spec":{"config":"{ \"name\": \"sriov\", \"type\": \"sriov\", \"link_state\": \"enable\", \"ipam\": { \"type\": \"host-local\", \"subnet\": \"10.1.1.0/24\" } }"}}`
)
var _ = Describe("Multus", func() {
tests.FlagParse()
virtClient, err := kubecli.GetKubevirtClient()
tests.PanicOnError(err)
var nodes *k8sv1.NodeList
defaultInterface := v1.Interface{
Name: "default",
InterfaceBindingMethod: v1.InterfaceBindingMethod{
Masquerade: &v1.InterfaceMasquerade{},
},
}
linuxBridgeInterface := v1.Interface{
Name: "linux-bridge",
InterfaceBindingMethod: v1.InterfaceBindingMethod{
Bridge: &v1.InterfaceBridge{},
},
}
defaultNetwork := v1.Network{
Name: "default",
NetworkSource: v1.NetworkSource{
Pod: &v1.PodNetwork{},
},
}
linuxBridgeNetwork := v1.Network{
Name: "linux-bridge",
NetworkSource: v1.NetworkSource{
Multus: &v1.MultusNetwork{
NetworkName: "linux-bridge-net-vlan100",
},
},
}
BeforeEach(func() {
// Multus tests need to ensure that old VMIs are gone
Expect(virtClient.RestClient().Delete().Namespace(tests.NamespaceTestDefault).Resource("virtualmachineinstances").Do().Error()).To(Succeed())
Expect(virtClient.RestClient().Delete().Namespace(tests.NamespaceTestAlternative).Resource("virtualmachineinstances").Do().Error()).To(Succeed())
Eventually(func() int {
list1, err := virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).List(&v13.ListOptions{})
Expect(err).ToNot(HaveOccurred())
list2, err := virtClient.VirtualMachineInstance(tests.NamespaceTestAlternative).List(&v13.ListOptions{})
Expect(err).ToNot(HaveOccurred())
return len(list1.Items) + len(list2.Items)
}, 6*time.Minute, 1*time.Second).Should(BeZero())
})
createVMIOnNode := func(interfaces []v1.Interface, networks []v1.Network) *v1.VirtualMachineInstance {
vmi := tests.NewRandomVMIWithEphemeralDiskAndUserdata(tests.ContainerDiskFor(tests.ContainerDiskAlpine), "#!/bin/bash\n")
vmi.Spec.Domain.Devices.Interfaces = interfaces
vmi.Spec.Networks = networks
// Arbitrarily select one compute node in the cluster, on which it is possible to create a VMI
// (i.e. a schedulable node).
nodeName := nodes.Items[0].Name
tests.StartVmOnNode(vmi, nodeName)
return vmi
}
tests.BeforeAll(func() {
tests.BeforeTestCleanup()
nodes = tests.GetAllSchedulableNodes(virtClient)
Expect(len(nodes.Items) > 0).To(BeTrue())
configureNodeNetwork(virtClient)
result := virtClient.RestClient().
Post().
RequestURI(fmt.Sprintf(postUrl, tests.NamespaceTestDefault, "linux-bridge-net-vlan100")).
Body([]byte(fmt.Sprintf(linuxBridgeConfCRD, "linux-bridge-net-vlan100", tests.NamespaceTestDefault))).
Do()
Expect(result.Error()).NotTo(HaveOccurred())
// Create ptp crds with tuning plugin enabled in two different namespaces
result = virtClient.RestClient().
Post().
RequestURI(fmt.Sprintf(postUrl, tests.NamespaceTestDefault, "ptp-conf-1")).
Body([]byte(fmt.Sprintf(ptpConfCRD, "ptp-conf-1", tests.NamespaceTestDefault))).
Do()
Expect(result.Error()).NotTo(HaveOccurred())
result = virtClient.RestClient().
Post().
RequestURI(fmt.Sprintf(postUrl, tests.NamespaceTestAlternative, "ptp-conf-2")).
Body([]byte(fmt.Sprintf(ptpConfCRD, "ptp-conf-2", tests.NamespaceTestAlternative))).
Do()
Expect(result.Error()).NotTo(HaveOccurred())
})
Describe("[rfe_id:694][crit:medium][vendor:[email protected]][level:component]VirtualMachineInstance using different types of interfaces.", func() {
Context("VirtualMachineInstance with cni ptp plugin interface", func() {
It("[test_id:1751]should create a virtual machine with one interface", func() {
By("checking virtual machine instance can ping 10.1.1.1 using ptp cni plugin")
detachedVMI := tests.NewRandomVMIWithEphemeralDiskAndUserdata(tests.ContainerDiskFor(tests.ContainerDiskCirros), "#!/bin/bash\necho 'hello'\n")
detachedVMI.Spec.Domain.Devices.Interfaces = []v1.Interface{{Name: "ptp", InterfaceBindingMethod: v1.InterfaceBindingMethod{Bridge: &v1.InterfaceBridge{}}}}
detachedVMI.Spec.Networks = []v1.Network{
{Name: "ptp", NetworkSource: v1.NetworkSource{
Multus: &v1.MultusNetwork{NetworkName: "ptp-conf-1"},
}},
}
_, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Create(detachedVMI)
Expect(err).ToNot(HaveOccurred())
tests.WaitUntilVMIReady(detachedVMI, tests.LoggedInCirrosExpecter)
pingVirtualMachine(detachedVMI, "10.1.1.1", "\\$ ")
})
It("[test_id:1752]should create a virtual machine with one interface with network definition from different namespace", func() {
tests.SkipIfOpenShift4("OpenShift 4 does not support usage of the network definition from the different namespace")
By("checking virtual machine instance can ping 10.1.1.1 using ptp cni plugin")
detachedVMI := tests.NewRandomVMIWithEphemeralDiskAndUserdata(tests.ContainerDiskFor(tests.ContainerDiskCirros), "#!/bin/bash\necho 'hello'\n")
detachedVMI.Spec.Domain.Devices.Interfaces = []v1.Interface{{Name: "ptp", InterfaceBindingMethod: v1.InterfaceBindingMethod{Bridge: &v1.InterfaceBridge{}}}}
detachedVMI.Spec.Networks = []v1.Network{
{Name: "ptp", NetworkSource: v1.NetworkSource{
Multus: &v1.MultusNetwork{NetworkName: fmt.Sprintf("%s/%s", tests.NamespaceTestAlternative, "ptp-conf-2")},
}},
}
_, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Create(detachedVMI)
Expect(err).ToNot(HaveOccurred())
tests.WaitUntilVMIReady(detachedVMI, tests.LoggedInCirrosExpecter)
pingVirtualMachine(detachedVMI, "10.1.1.1", "\\$ ")
})
It("[test_id:1753]should create a virtual machine with two interfaces", func() {
By("checking virtual machine instance can ping 10.1.1.1 using ptp cni plugin")
detachedVMI := tests.NewRandomVMIWithEphemeralDiskAndUserdata(tests.ContainerDiskFor(tests.ContainerDiskCirros), "#!/bin/bash\necho 'hello'\n")
detachedVMI.Spec.Domain.Devices.Interfaces = []v1.Interface{
defaultInterface,
{Name: "ptp", InterfaceBindingMethod: v1.InterfaceBindingMethod{Bridge: &v1.InterfaceBridge{}}}}
detachedVMI.Spec.Networks = []v1.Network{
defaultNetwork,
{Name: "ptp", NetworkSource: v1.NetworkSource{
Multus: &v1.MultusNetwork{NetworkName: "ptp-conf-1"},
}},
}
_, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Create(detachedVMI)
Expect(err).ToNot(HaveOccurred())
tests.WaitUntilVMIReady(detachedVMI, tests.LoggedInCirrosExpecter)
cmdCheck := "sudo /sbin/cirros-dhcpc up eth1 > /dev/null\n"
err = tests.CheckForTextExpecter(detachedVMI, []expect.Batcher{
&expect.BSnd{S: "\n"},
&expect.BExp{R: "\\$ "},
&expect.BSnd{S: cmdCheck},
&expect.BExp{R: "\\$ "},
&expect.BSnd{S: "ip addr show eth1 | grep 10.1.1 | wc -l"},
&expect.BExp{R: "1"},
}, 15)
Expect(err).ToNot(HaveOccurred())
By("checking virtual machine instance has two interfaces")
checkInterface(detachedVMI, "eth0", "\\$ ")
checkInterface(detachedVMI, "eth1", "\\$ ")
pingVirtualMachine(detachedVMI, "10.1.1.1", "\\$ ")
})
})
Context("VirtualMachineInstance with multus network as default network", func() {
It("[test_id:1751]should create a virtual machine with one interface with multus default network definition", func() {
detachedVMI := tests.NewRandomVMIWithEphemeralDiskAndUserdata(tests.ContainerDiskFor(tests.ContainerDiskCirros), "#!/bin/bash\necho 'hello'\n")
detachedVMI.Spec.Domain.Devices.Interfaces = []v1.Interface{{Name: "ptp", InterfaceBindingMethod: v1.InterfaceBindingMethod{Bridge: &v1.InterfaceBridge{}}}}
detachedVMI.Spec.Networks = []v1.Network{
{Name: "ptp", NetworkSource: v1.NetworkSource{
Multus: &v1.MultusNetwork{
NetworkName: fmt.Sprintf("%s/%s", tests.NamespaceTestDefault, "ptp-conf-1"),
Default: true,
}}},
}
_, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Create(detachedVMI)
Expect(err).ToNot(HaveOccurred())
tests.WaitUntilVMIReady(detachedVMI, tests.LoggedInCirrosExpecter)
By("checking virtual machine instance can ping 10.1.1.1 using ptp cni plugin")
pingVirtualMachine(detachedVMI, "10.1.1.1", "\\$ ")
By("checking virtual machine instance only has one interface")
// lo0, eth0
err = tests.CheckForTextExpecter(detachedVMI, []expect.Batcher{
&expect.BSnd{S: "\n"},
&expect.BExp{R: "\\$ "},
&expect.BSnd{S: "ip link show | grep -c UP\n"},
&expect.BExp{R: "2"},
}, 15)
Expect(err).ToNot(HaveOccurred())
By("checking pod has only one interface")
// lo0, eth0, k6t-eth0, vnet0
output := tests.RunCommandOnVmiPod(detachedVMI, []string{"/bin/bash", "-c", "/usr/sbin/ip link show|grep -c UP"})
ExpectWithOffset(1, strings.TrimSpace(output)).To(Equal("4"))
})
})
Context("VirtualMachineInstance with cni ptp plugin interface with custom MAC address", func() {
It("[test_id:1705]should configure valid custom MAC address on ptp interface when using tuning plugin", func() {
customMacAddress := "50:00:00:00:90:0d"
ptpInterface := v1.Interface{
Name: "ptp",
InterfaceBindingMethod: v1.InterfaceBindingMethod{
Bridge: &v1.InterfaceBridge{},
},
}
ptpNetwork := v1.Network{
Name: "ptp",
NetworkSource: v1.NetworkSource{
Multus: &v1.MultusNetwork{
NetworkName: "ptp-conf-1",
},
},
}
interfaces := []v1.Interface{ptpInterface}
networks := []v1.Network{ptpNetwork}
By("Creating a VM with custom MAC address on its ptp interface.")
interfaces[0].MacAddress = customMacAddress
vmiOne := createVMIOnNode(interfaces, networks)
tests.WaitUntilVMIReady(vmiOne, tests.LoggedInAlpineExpecter)
By("Configuring static IP address to ptp interface.")
configInterface(vmiOne, "eth0", "10.1.1.1/24", "localhost:~#")
By("Verifying the desired custom MAC is the one that was actually configured on the interface.")
ipLinkShow := fmt.Sprintf("ip link show eth0 | grep -i \"%s\" | wc -l\n", customMacAddress)
err = tests.CheckForTextExpecter(vmiOne, []expect.Batcher{
&expect.BSnd{S: ipLinkShow},
&expect.BExp{R: "1"},
}, 15)
Expect(err).ToNot(HaveOccurred())
By("Verifying the desired custom MAC is not configured inside the pod namespace.")
vmiPod := tests.GetRunningPodByVirtualMachineInstance(vmiOne, tests.NamespaceTestDefault)
out, err := tests.ExecuteCommandOnPod(
virtClient,
vmiPod,
"compute",
[]string{"sh", "-c", "ip a"},
)
Expect(err).ToNot(HaveOccurred())
Expect(strings.Contains(out, customMacAddress)).To(BeFalse())
})
})
Context("VirtualMachineInstance with Linux bridge plugin interface", func() {
It("[test_id:1577]should create two virtual machines with one interface", func() {
By("checking virtual machine instance can ping the secondary virtual machine instance using Linux bridge CNI plugin")
interfaces := []v1.Interface{linuxBridgeInterface}
networks := []v1.Network{linuxBridgeNetwork}
vmiOne := createVMIOnNode(interfaces, networks)
vmiTwo := createVMIOnNode(interfaces, networks)
tests.WaitUntilVMIReady(vmiOne, tests.LoggedInAlpineExpecter)
tests.WaitUntilVMIReady(vmiTwo, tests.LoggedInAlpineExpecter)
configInterface(vmiOne, "eth0", "10.1.1.1/24", "localhost:~#")
By("checking virtual machine interface eth0 state")
checkInterface(vmiOne, "eth0", "localhost:~#")
configInterface(vmiTwo, "eth0", "10.1.1.2/24", "localhost:~#")
By("checking virtual machine interface eth0 state")
checkInterface(vmiTwo, "eth0", "localhost:~#")
By("ping between virtual machines")
pingVirtualMachine(vmiOne, "10.1.1.2", "localhost:~#")
})
It("[test_id:1578]should create two virtual machines with two interfaces", func() {
By("checking the first virtual machine instance can ping 10.1.1.2 using Linux bridge CNI plugin")
interfaces := []v1.Interface{
defaultInterface,
linuxBridgeInterface,
}
networks := []v1.Network{
defaultNetwork,
linuxBridgeNetwork,
}
vmiOne := createVMIOnNode(interfaces, networks)
vmiTwo := createVMIOnNode(interfaces, networks)
tests.WaitUntilVMIReady(vmiOne, tests.LoggedInAlpineExpecter)
tests.WaitUntilVMIReady(vmiTwo, tests.LoggedInAlpineExpecter)
configInterface(vmiOne, "eth1", "10.1.1.1/24", "localhost:~#")
By("checking virtual machine interface eth1 state")
checkInterface(vmiOne, "eth1", "localhost:~#")
configInterface(vmiTwo, "eth1", "10.1.1.2/24", "localhost:~#")
By("checking virtual machine interface eth1 state")
checkInterface(vmiTwo, "eth1", "localhost:~#")
By("ping between virtual machines")
pingVirtualMachine(vmiOne, "10.1.1.2", "localhost:~#")
})
})
Context("VirtualMachineInstance with Linux bridge CNI plugin interface and custom MAC address.", func() {
interfaces := []v1.Interface{linuxBridgeInterface}
networks := []v1.Network{linuxBridgeNetwork}
linuxBridgeIfIdx := 0
customMacAddress := "50:00:00:00:90:0d"
It("[test_id:676]should configure valid custom MAC address on Linux bridge CNI interface.", func() {
By("Creating a VM with Linux bridge CNI network interface and default MAC address.")
vmiTwo := createVMIOnNode(interfaces, networks)
tests.WaitUntilVMIReady(vmiTwo, tests.LoggedInAlpineExpecter)
By("Creating another VM with custom MAC address on its Linux bridge CNI interface.")
interfaces[linuxBridgeIfIdx].MacAddress = customMacAddress
vmiOne := createVMIOnNode(interfaces, networks)
tests.WaitUntilVMIReady(vmiOne, tests.LoggedInAlpineExpecter)
By("Configuring static IP address to the Linux bridge interface.")
configInterface(vmiOne, "eth0", "10.1.1.1/24", "localhost:~#")
configInterface(vmiTwo, "eth0", "10.1.1.2/24", "localhost:~#")
By("Verifying the desired custom MAC is the one that were actually configured on the interface.")
ipLinkShow := fmt.Sprintf("ip link show eth0 | grep -i \"%s\" | wc -l\n", customMacAddress)
err = tests.CheckForTextExpecter(vmiOne, []expect.Batcher{
&expect.BSnd{S: ipLinkShow},
&expect.BExp{R: "1"},
}, 15)
Expect(err).ToNot(HaveOccurred())
By("Verifying the desired custom MAC is not configured inside the pod namespace.")
vmiPod := tests.GetRunningPodByVirtualMachineInstance(vmiOne, tests.NamespaceTestDefault)
out, err := tests.ExecuteCommandOnPod(
virtClient,
vmiPod,
"compute",
[]string{"sh", "-c", "ip a"},
)
Expect(err).ToNot(HaveOccurred())
Expect(strings.Contains(out, customMacAddress)).To(BeFalse())
By("Ping from the VM with the custom MAC to the other VM.")
pingVirtualMachine(vmiOne, "10.1.1.2", "localhost:~#")
})
})
Context("Single VirtualMachineInstance with Linux bridge CNI plugin interface", func() {
It("[test_id:1756]should report all interfaces in Status", func() {
interfaces := []v1.Interface{
defaultInterface,
linuxBridgeInterface,
}
networks := []v1.Network{
defaultNetwork,
linuxBridgeNetwork,
}
vmiOne := createVMIOnNode(interfaces, networks)
tests.WaitUntilVMIReady(vmiOne, tests.LoggedInAlpineExpecter)
updatedVmi, err := virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Get(vmiOne.ObjectMeta.Name, &metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
Expect(len(updatedVmi.Status.Interfaces)).To(Equal(2))
interfacesByName := make(map[string]v1.VirtualMachineInstanceNetworkInterface)
for _, ifc := range updatedVmi.Status.Interfaces {
interfacesByName[ifc.Name] = ifc
}
for _, network := range networks {
ifc, is_present := interfacesByName[network.Name]
Expect(is_present).To(BeTrue())
Expect(ifc.MAC).To(Not(BeZero()))
}
Expect(interfacesByName["default"].MAC).To(Not(Equal(interfacesByName["linux-bridge"].MAC)))
err = tests.CheckForTextExpecter(updatedVmi, []expect.Batcher{
&expect.BSnd{S: fmt.Sprintf("ip addr show eth0 | grep %s | wc -l", interfacesByName["default"].MAC)},
&expect.BExp{R: "1"},
}, 15)
err = tests.CheckForTextExpecter(updatedVmi, []expect.Batcher{
&expect.BSnd{S: fmt.Sprintf("ip addr show eth1 | grep %s | wc -l", interfacesByName["linux-bridge"].MAC)},
&expect.BExp{R: "1"},
}, 15)
})
})
Context("VirtualMachineInstance with invalid MAC addres", func() {
BeforeEach(func() {
tests.BeforeTestCleanup()
})
It("[test_id:1713]should failed to start with invalid MAC address", func() {
By("Start VMI")
linuxBridgeIfIdx := 1
vmi := tests.NewRandomVMIWithEphemeralDiskAndUserdata(tests.ContainerDiskFor(tests.ContainerDiskAlpine), "#!/bin/bash\n")
vmi.Spec.Domain.Devices.Interfaces = []v1.Interface{
defaultInterface,
linuxBridgeInterface,
}
vmi.Spec.Domain.Devices.Interfaces[linuxBridgeIfIdx].MacAddress = "de:00c:00c:00:00:de:abc"
vmi.Spec.Networks = []v1.Network{
defaultNetwork,
linuxBridgeNetwork,
}
_, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Create(vmi)
Expect(err).To(HaveOccurred())
testErr := err.(*errors.StatusError)
Expect(testErr.ErrStatus.Reason).To(BeEquivalentTo("Invalid"))
})
})
})
Describe("[rfe_id:1758][crit:medium][vendor:[email protected]][level:component]VirtualMachineInstance definition", func() {
Context("with quemu guest agent", func() {
It("[test_id:1757] should report guest interfaces in VMI status", func() {
interfaces := []v1.Interface{
defaultInterface,
linuxBridgeInterface,
}
networks := []v1.Network{
defaultNetwork,
linuxBridgeNetwork,
}
ep1Ip := "1.0.0.10/24"
ep2Ip := "1.0.0.11/24"
ep1IpV6 := "fe80::ce3d:82ff:fe52:24c0/64"
ep2IpV6 := "fe80::ce3d:82ff:fe52:24c1/64"
userdata := fmt.Sprintf(`#!/bin/bash
echo "fedora" |passwd fedora --stdin
setenforce 0
ip link add ep1 type veth peer name ep2
ip addr add %s dev ep1
ip addr add %s dev ep2
ip addr add %s dev ep1
ip addr add %s dev ep2
mkdir -p /usr/local/bin
curl %s > /usr/local/bin/qemu-ga
chmod +x /usr/local/bin/qemu-ga
systemd-run --unit=guestagent /usr/local/bin/qemu-ga
`, ep1Ip, ep2Ip, ep1IpV6, ep2IpV6, tests.GuestAgentHttpUrl)
agentVMI := tests.NewRandomVMIWithEphemeralDiskAndUserdata(tests.ContainerDiskFor(tests.ContainerDiskFedora), userdata)
agentVMI.Spec.Domain.Devices.Interfaces = interfaces
agentVMI.Spec.Networks = networks
agentVMI.Spec.Domain.Resources.Requests[k8sv1.ResourceMemory] = resource.MustParse("1024M")
By("Starting a VirtualMachineInstance")
agentVMI, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Create(agentVMI)
Expect(err).ToNot(HaveOccurred(), "Should create VMI successfully")
tests.WaitForSuccessfulVMIStart(agentVMI)
// Need to wait for cloud init to finish and start the agent inside the vmi.
tests.WaitAgentConnected(virtClient, agentVMI)
getOptions := &metav1.GetOptions{}
Eventually(func() bool {
updatedVmi, err := virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Get(agentVMI.Name, getOptions)
if err != nil {
return false
}
return len(updatedVmi.Status.Interfaces) == 4
}, 420*time.Second, 4).Should(BeTrue(), "Should have interfaces in vmi status")
updatedVmi, err := virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Get(agentVMI.Name, getOptions)
Expect(err).ToNot(HaveOccurred())
Expect(len(updatedVmi.Status.Interfaces)).To(Equal(4))
interfaceByIfcName := make(map[string]v1.VirtualMachineInstanceNetworkInterface)
for _, ifc := range updatedVmi.Status.Interfaces {
interfaceByIfcName[ifc.InterfaceName] = ifc
}
Expect(interfaceByIfcName["eth0"].Name).To(Equal("default"))
Expect(interfaceByIfcName["eth0"].InterfaceName).To(Equal("eth0"))
Expect(interfaceByIfcName["eth1"].Name).To(Equal("linux-bridge"))
Expect(interfaceByIfcName["eth1"].InterfaceName).To(Equal("eth1"))
Expect(interfaceByIfcName["ep1"].Name).To(Equal(""))
Expect(interfaceByIfcName["ep1"].InterfaceName).To(Equal("ep1"))
Expect(interfaceByIfcName["ep1"].IP).To(Equal(ep1Ip))
Expect(interfaceByIfcName["ep1"].IPs).To(Equal([]string{ep1Ip, ep1IpV6}))
Expect(interfaceByIfcName["ep2"].Name).To(Equal(""))
Expect(interfaceByIfcName["ep2"].InterfaceName).To(Equal("ep2"))
Expect(interfaceByIfcName["ep2"].IP).To(Equal(ep2Ip))
Expect(interfaceByIfcName["ep2"].IPs).To(Equal([]string{ep2Ip, ep2IpV6}))
})
})
})
})
var _ = Describe("SRIOV", func() {
flag.Parse()
virtClient, err := kubecli.GetKubevirtClient()
tests.PanicOnError(err)
sriovResourceName := os.Getenv("SRIOV_RESOURCE_NAME")
if sriovResourceName == "" {
sriovResourceName = "openshift.io/sriov_net"
}
tests.BeforeAll(func() {
tests.BeforeTestCleanup()
// Create two sriov networks referring to the same resource name
result := virtClient.RestClient().
Post().
RequestURI(fmt.Sprintf(postUrl, tests.NamespaceTestDefault, "sriov")).
Body([]byte(fmt.Sprintf(sriovConfCRD, "sriov", tests.NamespaceTestDefault, sriovResourceName))).
Do()
Expect(result.Error()).NotTo(HaveOccurred())
result = virtClient.RestClient().
Post().
RequestURI(fmt.Sprintf(postUrl, tests.NamespaceTestDefault, "sriov2")).
Body([]byte(fmt.Sprintf(sriovConfCRD, "sriov2", tests.NamespaceTestDefault, sriovResourceName))).
Do()
Expect(result.Error()).NotTo(HaveOccurred())
result = virtClient.RestClient().
Post().
RequestURI(fmt.Sprintf(postUrl, tests.NamespaceTestDefault, "sriov-link-enabled")).
Body([]byte(fmt.Sprintf(sriovLinkEnableConfCRD, "sriov-link-enabled", tests.NamespaceTestDefault, sriovResourceName))).
Do()
Expect(result.Error()).NotTo(HaveOccurred())
})
BeforeEach(func() {
// Multus tests need to ensure that old VMIs are gone
Expect(virtClient.RestClient().Delete().Namespace(tests.NamespaceTestDefault).Resource("virtualmachineinstances").Do().Error()).To(Succeed())
Expect(virtClient.RestClient().Delete().Namespace(tests.NamespaceTestAlternative).Resource("virtualmachineinstances").Do().Error()).To(Succeed())
Eventually(func() int {
list1, err := virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).List(&v13.ListOptions{})
Expect(err).ToNot(HaveOccurred())
list2, err := virtClient.VirtualMachineInstance(tests.NamespaceTestAlternative).List(&v13.ListOptions{})
Expect(err).ToNot(HaveOccurred())
return len(list1.Items) + len(list2.Items)
}, 6*time.Minute, 1*time.Second).Should(BeZero())
})
Context("VirtualMachineInstance with sriov plugin interface", func() {
getSriovVmi := func(networks []string) (vmi *v1.VirtualMachineInstance) {
// If we run on a host with Mellanox SR-IOV cards then we'll need to load in corresponding kernel modules.
// Stop NetworkManager to not interfere with manual IP configuration for SR-IOV interfaces.
// Use agent to signal about cloud-init phase completion.
userData := fmt.Sprintf(`#!/bin/sh
echo "fedora" |passwd fedora --stdin
dnf install -y kernel-modules-$(uname -r)
modprobe mlx5_ib
systemctl stop NetworkManager
mkdir -p /usr/local/bin
curl %s > /usr/local/bin/qemu-ga
chmod +x /usr/local/bin/qemu-ga
systemd-run --unit=guestagent /usr/local/bin/qemu-ga`, tests.GuestAgentHttpUrl)
ports := []v1.Port{}
vmi = tests.NewRandomVMIWithMasqueradeInterfaceEphemeralDiskAndUserdata(tests.ContainerDiskFor(tests.ContainerDiskFedora), userData, ports)
for _, name := range networks {
iface := v1.Interface{Name: name, InterfaceBindingMethod: v1.InterfaceBindingMethod{SRIOV: &v1.InterfaceSRIOV{}}}
network := v1.Network{Name: name, NetworkSource: v1.NetworkSource{Multus: &v1.MultusNetwork{NetworkName: name}}}
vmi.Spec.Domain.Devices.Interfaces = append(vmi.Spec.Domain.Devices.Interfaces, iface)
vmi.Spec.Networks = append(vmi.Spec.Networks, network)
}
// fedora requires some more memory to boot without kernel panics
vmi.Spec.Domain.Resources.Requests[k8sv1.ResourceName("memory")] = resource.MustParse("1024M")
return
}
startVmi := func(vmi *v1.VirtualMachineInstance) {
_, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Create(vmi)
Expect(err).ToNot(HaveOccurred())
return
}
waitVmi := func(vmi *v1.VirtualMachineInstance) {
// Need to wait for cloud init to finish and start the agent inside the vmi.
tests.WaitAgentConnected(virtClient, vmi)
tests.WaitUntilVMIReady(vmi, tests.LoggedInFedoraExpecter)
return
}
checkDefaultInterfaceInPod := func(vmi *v1.VirtualMachineInstance) {
vmiPod := tests.GetRunningPodByVirtualMachineInstance(vmi, tests.NamespaceTestDefault)
By("checking default interface is present")
_, err = tests.ExecuteCommandOnPod(
virtClient,
vmiPod,
"compute",
[]string{"ip", "address", "show", "eth0"},
)
Expect(err).ToNot(HaveOccurred())
By("checking default interface is attached to VMI")
_, err = tests.ExecuteCommandOnPod(
virtClient,
vmiPod,
"compute",
[]string{"ip", "address", "show", "k6t-eth0"},
)
Expect(err).ToNot(HaveOccurred())
}
checkInterfacesInGuest := func(vmi *v1.VirtualMachineInstance, interfaces []string) {
for _, iface := range interfaces {
checkInterface(vmi, iface, "#")
}
}
It("[test_id:1754]should create a virtual machine with sriov interface", func() {
vmi := getSriovVmi([]string{"sriov"})
startVmi(vmi)
waitVmi(vmi)
By("checking KUBEVIRT_RESOURCE_NAME_<networkName> variable is defined in pod")
vmiPod := tests.GetRunningPodByVirtualMachineInstance(vmi, tests.NamespaceTestDefault)
out, err := tests.ExecuteCommandOnPod(
virtClient,
vmiPod,
"compute",
[]string{"sh", "-c", "echo $KUBEVIRT_RESOURCE_NAME_sriov"},
)
Expect(err).ToNot(HaveOccurred())
expectedSriovResourceName := fmt.Sprintf("%s\n", sriovResourceName)
Expect(out).To(Equal(expectedSriovResourceName))
checkDefaultInterfaceInPod(vmi)
By("checking virtual machine instance has two interfaces")
checkInterfacesInGuest(vmi, []string{"eth0", "eth1"})
// there is little we can do beyond just checking two devices are present: PCI slots are different inside
// the guest, and DP doesn't pass information about vendor IDs of allocated devices into the pod, so
// it's hard to match them.
})
It("should create a virtual machine with sriov interface with custom MAC address", func() {
vmi := getSriovVmi([]string{"sriov"})
vmi.Spec.Domain.Devices.Interfaces[1].MacAddress = "de:ad:00:00:be:ef"
startVmi(vmi)
waitVmi(vmi)
By("checking virtual machine instance has an interface with the requested MAC address")
checkMacAddress(vmi, "eth1", "de:ad:00:00:be:ef")
})
It("[test_id:1755]should create a virtual machine with two sriov interfaces referring the same resource", func() {
vmi := getSriovVmi([]string{"sriov", "sriov2"})
startVmi(vmi)
waitVmi(vmi)
By("checking KUBEVIRT_RESOURCE_NAME_<networkName> variables are defined in pod")
vmiPod := tests.GetRunningPodByVirtualMachineInstance(vmi, tests.NamespaceTestDefault)
for _, name := range []string{"sriov", "sriov"} {
out, err := tests.ExecuteCommandOnPod(
virtClient,
vmiPod,
"compute",
[]string{"sh", "-c", fmt.Sprintf("echo $KUBEVIRT_RESOURCE_NAME_%s", name)},
)
Expect(err).ToNot(HaveOccurred())
expectedSriovResourceName := fmt.Sprintf("%s\n", sriovResourceName)
Expect(out).To(Equal(expectedSriovResourceName))
}
checkDefaultInterfaceInPod(vmi)
By("checking virtual machine instance has three interfaces")
checkInterfacesInGuest(vmi, []string{"eth0", "eth1", "eth2"})
// there is little we can do beyond just checking three devices are present: PCI slots are different inside
// the guest, and DP doesn't pass information about vendor IDs of allocated devices into the pod, so
// it's hard to match them.
})
// Note: test case assumes interconnectivity between SR-IOV
// interfaces. It can be achieved either by configuring the external switch
//properly, or via in-PF switching for VFs (works for some NIC models)
It("should connect to another machine with sriov interface", func() {
// start peer machines with sriov interfaces from the same resource pool
vmi1 := getSriovVmi([]string{"sriov-link-enabled"})
vmi2 := getSriovVmi([]string{"sriov-link-enabled"})
// Explicitly choose different random mac addresses instead of relying on kubemacpool to do it:
// 1) we don't at the moment deploy kubemacpool in kind providers
// 2) even if we would do, it's probably a good idea to have the suite not depend on this fact
//
// This step is needed to guarantee that no VFs on the PF carry a duplicate MAC address that may affect
// ability of VMIs to send and receive ICMP packets on their ports.
mac1, err := tests.GenerateRandomMac()
Expect(err).ToNot(HaveOccurred())
mac2, err := tests.GenerateRandomMac()
Expect(err).ToNot(HaveOccurred())
vmi1.Spec.Domain.Devices.Interfaces[1].MacAddress = mac1.String()
vmi2.Spec.Domain.Devices.Interfaces[1].MacAddress = mac2.String()
startVmi(vmi1)
startVmi(vmi2)
waitVmi(vmi1)
waitVmi(vmi2)
// manually configure IP/link on sriov interfaces because there is
// no DHCP server to serve the address to the guest
configInterface(vmi1, "eth1", "192.168.1.1/24", "#")
configInterface(vmi2, "eth1", "192.168.1.2/24", "#")
// now check ICMP goes both ways
pingVirtualMachine(vmi1, "192.168.1.2", "#")
pingVirtualMachine(vmi2, "192.168.1.1", "#")
})
})
})
func configInterface(vmi *v1.VirtualMachineInstance, interfaceName, interfaceAddress, prompt string) {
cmdCheck := fmt.Sprintf("ip addr add %s dev %s\n", interfaceAddress, interfaceName)
err := tests.CheckForTextExpecter(vmi, []expect.Batcher{
&expect.BSnd{S: "\n"},
&expect.BExp{R: prompt},
&expect.BSnd{S: cmdCheck},
&expect.BExp{R: prompt},
&expect.BSnd{S: "echo $?\n"},
&expect.BExp{R: "0"},
}, 15)
Expect(err).ToNot(HaveOccurred(), "Failed to configure address %s for interface %s on VMI %s", interfaceAddress, interfaceName, vmi.Name)
cmdCheck = fmt.Sprintf("ip link set %s up\n", interfaceName)
err = tests.CheckForTextExpecter(vmi, []expect.Batcher{
&expect.BSnd{S: "\n"},
&expect.BExp{R: prompt},
&expect.BSnd{S: cmdCheck},
&expect.BExp{R: prompt},
&expect.BSnd{S: "echo $?\n"},
&expect.BExp{R: "0"},
}, 15)
Expect(err).ToNot(HaveOccurred(), "Failed to set interface %s up on VMI %s", interfaceName, vmi.Name)
}
func checkInterface(vmi *v1.VirtualMachineInstance, interfaceName, prompt string) {
cmdCheck := fmt.Sprintf("ip link show %s\n", interfaceName)
err := tests.CheckForTextExpecter(vmi, []expect.Batcher{
&expect.BSnd{S: "\n"},
&expect.BExp{R: prompt},
&expect.BSnd{S: cmdCheck},
&expect.BExp{R: prompt},
&expect.BSnd{S: "echo $?\n"},
&expect.BExp{R: "0"},
}, 15)
Expect(err).ToNot(HaveOccurred(), "Interface %q was not found in the VMI %s within the given timeout", interfaceName, vmi.Name)
}
func checkMacAddress(vmi *v1.VirtualMachineInstance, interfaceName, macAddress string) {
cmdCheck := fmt.Sprintf("ip link show %s\n", interfaceName)
err := tests.CheckForTextExpecter(vmi, []expect.Batcher{
&expect.BSnd{S: "\n"},
&expect.BExp{R: "#"},
&expect.BSnd{S: cmdCheck},
&expect.BExp{R: macAddress},
&expect.BExp{R: "#"},
&expect.BSnd{S: "echo $?\n"},
&expect.BExp{R: "0"},
}, 15)
Expect(err).ToNot(HaveOccurred(), "MAC %q was not found in the VMI %s within the given timeout", macAddress, vmi.Name)
}
func pingVirtualMachine(vmi *v1.VirtualMachineInstance, ipAddr, prompt string) {
cmdCheck := fmt.Sprintf("ping %s -c 1 -w 5\n", ipAddr)
err := tests.CheckForTextExpecter(vmi, []expect.Batcher{
&expect.BSnd{S: "\n"},
&expect.BExp{R: prompt},
&expect.BSnd{S: cmdCheck},
&expect.BExp{R: prompt},
&expect.BSnd{S: "echo $?\n"},
&expect.BExp{R: "0"},
}, 30)
Expect(err).ToNot(HaveOccurred(), "Failed to ping VMI %s within the given timeout", vmi.Name)
}
// Tests in Multus suite are expecting a Linux bridge to be available on each node, with iptables allowing
// traffic to go through. This function creates a Daemon Set on the cluster (if not exists yet), this Daemon
// Set creates a linux bridge and configures the firewall. We use iptables-compat in order to work with
// both iptables and newer nftables.
// TODO: Once kubernetes-nmstate is ready, we should use it instead
func configureNodeNetwork(virtClient kubecli.KubevirtClient) {
// Fetching the kubevirt-operator image from the pod makes this independent from the installation method / image used
pods, err := virtClient.CoreV1().Pods(tests.KubeVirtInstallNamespace).List(metav1.ListOptions{LabelSelector: "kubevirt.io=virt-operator"})
Expect(err).ToNot(HaveOccurred())
Expect(pods.Items).ToNot(BeEmpty())
virtOperatorImage := pods.Items[0].Spec.Containers[0].Image
// Privileged DaemonSet configuring host networking as needed
networkConfigDaemonSet := appsv1.DaemonSet{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "DaemonSet",
},
ObjectMeta: metav1.ObjectMeta{
Name: "network-config",
Namespace: metav1.NamespaceSystem,
},
Spec: appsv1.DaemonSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"name": "network-config"},
},
Template: k8sv1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"name": "network-config"},
},
Spec: k8sv1.PodSpec{
Containers: []k8sv1.Container{
{
Name: "network-config",
// Reuse image which is already installed in the cluster. All we need is chroot.
// Local OKD cluster doesn't allow us to pull from the outside.
Image: virtOperatorImage,
Command: []string{
"sh",
"-c",
"set -x; chroot /host ip link add br10 type bridge; chroot /host iptables -I FORWARD 1 -i br10 -j ACCEPT; touch /tmp/ready; sleep INF",
},
SecurityContext: &k8sv1.SecurityContext{
Privileged: pointer.BoolPtr(true),
RunAsUser: pointer.Int64Ptr(0),
},
ReadinessProbe: &k8sv1.Probe{
Handler: k8sv1.Handler{
Exec: &k8sv1.ExecAction{
Command: []string{"cat", "/tmp/ready"},
},
},
},
VolumeMounts: []k8sv1.VolumeMount{
k8sv1.VolumeMount{
Name: "host",
MountPath: "/host",
},
},
},
},
Volumes: []k8sv1.Volume{
k8sv1.Volume{
Name: "host",
VolumeSource: k8sv1.VolumeSource{
HostPath: &k8sv1.HostPathVolumeSource{
Path: "/",
},
},
},
},
HostNetwork: true,
},
},
},
}
// Helper function returning existing network-config DaemonSet if exists
getNetworkConfigDaemonSet := func() *appsv1.DaemonSet {
daemonSet, err := virtClient.AppsV1().DaemonSets(metav1.NamespaceSystem).Get(networkConfigDaemonSet.Name, metav1.GetOptions{})
if errors.IsNotFound(err) {
return nil
}
Expect(err).NotTo(HaveOccurred())
return daemonSet
}
// If the DaemonSet haven't been created yet, do so
runningNetworkConfigDaemonSet := getNetworkConfigDaemonSet()
if runningNetworkConfigDaemonSet == nil {
_, err := virtClient.AppsV1().DaemonSets(metav1.NamespaceSystem).Create(&networkConfigDaemonSet)
Expect(err).NotTo(HaveOccurred())
}
// Make sure that all pods in the Daemon Set finished the configuration
nodes := tests.GetAllSchedulableNodes(virtClient)
Eventually(func() int {
daemonSet := getNetworkConfigDaemonSet()
return int(daemonSet.Status.NumberAvailable)
}, time.Minute, time.Second).Should(Equal(len(nodes.Items)))
}
| [
"\"SRIOV_RESOURCE_NAME\""
]
| []
| [
"SRIOV_RESOURCE_NAME"
]
| [] | ["SRIOV_RESOURCE_NAME"] | go | 1 | 0 | |
upload_tool/pgy_upload_android.py | # coding=utf-8
"""
* User: fraj
* Email: [email protected]
* Date: 18/2/1
* Time: 10:00
"""
import time
import urllib2
import time
import json
import mimetypes
import os
import smtplib
from email.mime.base import MIMEBase
from email.MIMEText import MIMEText
from email.MIMEMultipart import MIMEMultipart
from email import encoders
import json
# encoding=utf8
import sys
reload(sys)
sys.setdefaultencoding('utf8')
#蒲公英应用上传地址
url = 'https://www.pgyer.com/apiv2/app/upload'
#蒲公英提供的 用户Key
uKey = 'AAAAAAAAAAAAAAAAAAAAAA'
#蒲公英提供的 API Key
_api_key = 'BBBBBBBBBBBBBBBBBBBBB'
#(选填)应用安装方式,值为(1,2,3)。1:公开,2:密码安装,3:邀请安装。默认为1公开
buildInstallType = '2'
#(选填) 设置App安装密码,如果不想设置密码,请传空字符串,或不传。
buildPassword = '123456'
# 运行时环境变量字典
environsDict = os.environ
#print environsDict
#此次 jenkins 构建版本号
jenkins_build_number = environsDict['BUILD_TAG']
print jenkins_build_number
#此次 jenkins 构建环境 ZHDJ_COMMON 为商用环境 ZHDJ_TEST 为商测环境
sel_product_build = os.getenv('BUILD')
#渠道号
sel_product_flavors = os.getenv('FLAVORS')
print sel_product_flavors
#此次 jenkins 构建变更记录
changelog = os.getenv('SCM_CHANGELOG')
print '*******changelog****'
print changelog
#获取 apk 文件路径
def get_apk_file_path():
#安装包路径
apk_file_workspace_path = './your_app.apk'
if os.path.exists(apk_file_workspace_path):
return apk_file_workspace_path
# while get_apk_file_path() is None:
# time.sleep(5)
#apk 文件路径
apk_file_path = get_apk_file_path()
print apk_file_path
#请求字典编码
def _encode_multipart(params_dict):
boundary = '----------%s' % hex(int(time.time() * 1000))
data = []
for k, v in params_dict.items():
data.append('--%s' % boundary)
if hasattr(v, 'read'):
filename = getattr(v, 'name', '')
content = v.read()
decoded_content = content.decode('ISO-8859-1')
data.append('Content-Disposition: form-data; name="%s"; filename="zhdj.apk"' % k)
data.append('Content-Type: application/octet-stream\r\n')
data.append(decoded_content)
else:
data.append('Content-Disposition: form-data; name="%s"\r\n' % k)
data.append(v if isinstance(v, str) else v.decode('utf-8'))
data.append('--%s--\r\n' % boundary)
return '\r\n'.join(data), boundary
#处理 蒲公英 上传结果
def handle_resule(result):
json_result = json.loads(result)
print '*******上传蒲公英****'
print json_result
if json_result['code'] is 0:
print '*******文件上传成功****'
# print json_result
send_Email(json_result)
#发送邮件
def send_Email(json_result):
print '*******开始发送邮件****'
buildName = json_result['data']['buildName']
buildKey = json_result['data']['buildKey']
buildVersion = json_result['data']['buildVersion']
buildBuildVersion = json_result['data']['buildBuildVersion']
buildShortcutUrl = json_result['data']['buildShortcutUrl']
buildQRCodeURL = json_result['data']['buildQRCodeURL']
buildUpdated = json_result['data']['buildUpdated']
#邮件接受者
mail_receiver = ['[email protected]','[email protected]']
#根据不同邮箱配置 host,user,和pwd
mail_host = 'your mail host'
mail_port = 25
mail_user = 'your email'
mail_pwd = 'email password'
mail_to = ','.join(mail_receiver)
msg = MIMEMultipart()
environsString = '<h3>本次打包相关信息</h3><p>'
environsString += '<p>应用名称:'+ str(buildName) +'</p>'
environsString += '<p>版本号:'+ str(buildVersion) +'</p>'
environsString += '<p>更新时间:'+ str(buildUpdated) +'</p>'
environsString += '<p>安装密码:'+ str(buildPassword) +'</p>'
if changelog:
print "changelog not empty"
environsString += '<p>变更记录:</p>'
environsString += '<p>'+ str(changelog) +'</p>'
else:
print "changelog empty"
# environsString += '<p>你可从蒲公英网站在线安装 : ' + 'http://www.pgyer.com/' + str(buildShortcutUrl) + '<p>'
environsString += '<img src="'+ str(buildQRCodeURL) +'" alt="二维码" />'
environsString += '<p>扫码直接安装</p>'
message = environsString
body = MIMEText(message, _subtype='html', _charset='utf-8')
# 添加附件
part = MIMEBase('application', 'octet-stream') # 'octet-stream': binary data 创建附件对象
source_path = get_apk_file_path()
part.set_payload(open(source_path, 'rb').read()) # 将附件源文件加载到附件对象
encoders.encode_base64(part)
nowTime = time.strftime("%Y-%m-%d", time.localtime())
part_name = 'your_app-' + nowTime + '_'+ sel_product_flavors +'.apk'
part_name = part_name.decode('utf-8').encode(sys.getfilesystemencoding())
print part_name
part.add_header('Content-Disposition', 'attachment; filename="' + part_name +'"') # 给附件添加头文件
msg.attach(body)
msg.attach(part) # 将附件附加到根容器
msg['To'] = mail_to
msg['from'] = mail_user
msg['subject'] = 'Android打包文件: ' + sel_product_build + '-' + sel_product_flavors + ' ' + buildName +' '+ buildVersion
try:
s = smtplib.SMTP()
# 设置为调试模式,就是在会话过程中会有输出信息
s.set_debuglevel(1)
s.connect(mail_host)
s.login(mail_user, mail_pwd)
s.sendmail(mail_user, mail_receiver, msg.as_string())
s.close()
print '*******邮件发送成功****'
except Exception, e:
print e
#############################################################
#请求参数字典
params = {
'_api_key': _api_key,
'file': open(apk_file_path, 'rb'),
'buildInstallType': buildInstallType,
'buildPassword': buildPassword
}
coded_params, boundary = _encode_multipart(params)
req = urllib2.Request(url, coded_params.encode('ISO-8859-1'))
req.add_header('Content-Type', 'multipart/form-data; boundary=%s' % boundary)
try:
print '*******开始文件上传****'
resp = urllib2.urlopen(req)
body = resp.read().decode('utf-8')
handle_resule(body)
except urllib2.HTTPError as e:
print(e.fp.read())
| []
| []
| [
"BUILD",
"FLAVORS",
"SCM_CHANGELOG"
]
| [] | ["BUILD", "FLAVORS", "SCM_CHANGELOG"] | python | 3 | 0 | |
app.py | #!/usr/bin/env python
import urllib
import urllib2
import json
import os
import re
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
intent_name="string"
QR=['0','1','2','3','4','5','6']
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
res = processRequest(req)
res = json.dumps(res, indent=4)
# print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def processRequest(req):
if req.get("result").get("action") != "yahooWeatherForecast":
return {}
global city_names
city_names=processlocation(req)
global QR
global intent_name
intent_name=processIntentName(req)
if "ChooseCity" in intent_name:
QR[0]="Sector in "+city_names
QR[1]="Other City?Specify"
QR[2]="Hot Property"
QR[3]="Price Range"
QR[4]="Land Area"
QR[5]="Property Type"
elif "ChooseSector" in intent_name:
QR[0]="(Y)"
QR[1]="Other Sector?Specify"
QR[2]="Hot Property"
QR[3]="Price Range"
QR[4]="Land Area"
QR[5]="Property Type"
elif "ChangeType" in intent_name:
QR[0]="(Y)"
QR[1]="Other Type?Specify"
QR[2]="Hot Property"
QR[3]="Price Range"
QR[4]="Land Area"
QR[5]="Change Location"
elif "ChooseHotProperties" in intent_name:
QR[0]="(Y)"
QR[1]="Change Location"
QR[2]="Hot Property"
QR[3]="Price Range"
QR[4]="Land Area"
QR[5]="Change City"
elif "ChoosePlotArea" in intent_name:
QR[0]="(Y)"
QR[1]="Other Area?Specify"
QR[2]="Hot Property"
QR[3]="Price Range"
QR[4]="Land Area"
QR[5]="Change Location"
elif "DefinePriceRange" in intent_name:
QR[0]="(Y)"
QR[1]="Other Range?Specify"
QR[2]="Hot Property"
QR[3]="Price Range"
QR[4]="Land Area"
QR[5]="Change Location"
city_names=processlocation(req)
sector_names=processSector(req)
property_type=processPropertyType(req)
unit_property=processUnit(req)
area_property=processArea(req)
NoOfDays=processDate(req)
DateUnit=processDateUnit(req)
school=processSchool(req)
malls=processMalls(req)
transport=processTransport(req)
security=processSecurity(req)
airport=processAirport(req)
fuel=processFuel(req)
#minimum_value=processMinimum(req)
maximum_value=processMaximum(req)
latest=processLatestProperties(req)
#if minimum_value > maximum_value:
# minimum_value,maximum_value=maximum_value,minimum_value
#else:
# minimum_value,maximum_value=minimum_value,maximum_value
baseurl = "https://aarz.pk/bot/index.php?city_name="+city_names+"§or_name="+sector_names+"&minPrice="+maximum_value+"&type="+property_type+"&LatestProperties="+latest+"&UnitArea="+area_property+"&Unit="+unit_property+"&school="+school+"&airport="+airport+"&transport="+transport+"&security="+security+"&shopping_mall="+malls+"&fuel="+fuel
result = urllib.urlopen(baseurl).read()
data = json.loads(result)
res = makeWebhookResult(data)
return res
def processIntentName(req):
result = req.get("result")
parameters = result.get("metadata")
intent = parameters.get("intentName")
return intent
def sendSparkPOST(url, data):
request = urllib2.Request(url, json.dumps(data),
headers={"Accept" : "application/json","Content-Type":"application/json"})
request.add_header("Authorization", "Bearer "+bearer)
contents = urllib2.urlopen(request).read()
return contents
def processlocation(req):
result = req.get("result")
parameters = result.get("parameters")
city = parameters.get("city")
return city
def processSector(req):
result = req.get("result")
parameters = result.get("parameters")
sector = parameters.get("Location")
return sector
def processMinimum(req):
result = req.get("result")
parameters = result.get("parameters")
minimum = parameters.get("number")
return minimum
def processMaximum(req):
result = req.get("result")
parameters = result.get("parameters")
maximum = parameters.get("number1")
return maximum
def processPropertyType(req):
result = req.get("result")
parameters = result.get("parameters")
propertyType = parameters.get("PropertyType")
return propertyType
def processLatestProperties(req):
result = req.get("result")
parameters = result.get("parameters")
latest = parameters.get("LatestProperties")
return latest
def processUnit(req):
result = req.get("result")
parameters = result.get("parameters")
unit = parameters.get("Unit")
return unit
def processArea(req):
result = req.get("result")
parameters = result.get("parameters")
area = parameters.get("AreaNumber")
return area
def processDate(req):
result = req.get("result")
parameters = result.get("parameters")
days = parameters.get("NoOfDays")
return days
def processDateUnit(req):
result = req.get("result")
parameters = result.get("parameters")
dayUnit = parameters.get("DayUnit")
return dayUnit
def processSchool(req):
result = req.get("result")
parameters = result.get("parameters")
school = parameters.get("school")
return school
def processMalls(req):
result = req.get("result")
parameters = result.get("parameters")
malls = parameters.get("malls")
return malls
def processTransport(req):
result = req.get("result")
parameters = result.get("parameters")
transport = parameters.get("transport")
return transport
def processSecurity(req):
result = req.get("result")
parameters = result.get("parameters")
security = parameters.get("security")
return security
def processAirport(req):
result = req.get("result")
parameters = result.get("parameters")
airport = parameters.get("airport")
return airport
def processFuel(req):
result = req.get("result")
parameters = result.get("parameters")
fuel = parameters.get("fuelstation")
return fuel
def makeWebhookResult(data):
i=0
length=len(data)
row_id=['test','test1','test2','test3','test4']
row_title=['test','test1','test2','test3','test4']
row_location=['test','test1','test2','test3','test4']
row_price=['test','test1','test2','test3','test4']
row_slug=['test','test1','test2','test3','test4']
row_number=['test','test1','test2','test3','test4']
while (i <length):
row_id[i]=data[i]['p_id']
row_title[i]=data[i]['title']
row_location[i]=data[i]['address']
row_price[i]=data[i]['price']
row_slug[i]=data[i]['slug']
row_number[i]=data[i]['mobile_number']
i+=1
speech = "Here are some properties with your choice: "+"\n"+row_title[0] +" in "+ row_location[0] + " with price "+ row_price[0] +"\n"+ row_title[1] +" in "+ row_location[1] + " with price "+ row_price[1]
if "unable" in row_title[0]:
message={
"text":row_title[0],
"quick_replies": [
{
"content_type":"text",
"title": "Purchase plot",
"payload": "YOUR_DEFINED_PAYLOAD_FOR_NEXT_IMAGE"
}
]
}
elif length==1:
message={
"attachment":{
"type":"template",
"payload":{
"template_type":"generic",
"elements":[
{
"title":row_title[0],
"item_url": "https://www.aarz.pk/property-detail/"+row_slug[0],
"image_url":"http://www.aarz.pk/assets/images/properties/"+row_id[0]+"/"+row_id[0]+".actual.0.jpg" ,
"subtitle":row_location[0],
"buttons":[
{
"type":"element_share"
},
{
"type":"phone_number",
"title":"Call Agent",
"payload":row_number[0]
}
]
}
]
}
},
"quick_replies": [
{
"content_type":"text",
"title": QR[0],
"payload": "YOUR_DEFINED_PAYLOAD_FOR_NEXT_IMAGE"
},
{
"content_type":"text",
"title": QR[1],
"payload": "YOUR_DEFINED_PAYLOAD_FOR_NEXT_IMAGE"
},
{
"content_type":"text",
"title": QR[2],
"payload": "YOUR_DEFINED_PAYLOAD_FOR_NEXT_IMAGE"
},
{
"content_type":"text",
"title": QR[3],
"payload": "YOUR_DEFINED_PAYLOAD_FOR_NEXT_IMAGE"
},
{
"content_type":"text",
"title": QR[4],
"payload": "YOUR_DEFINED_PAYLOAD_FOR_NEXT_IMAGE"
},
{
"content_type":"text",
"title": "Purchase Property",
"payload": "YOUR_DEFINED_PAYLOAD_FOR_NEXT_IMAGE"
}
]
}
elif length==2:
message= {
"attachment": {
"type": "template",
"payload": {
"template_type": "generic",
"elements": [{
"title": row_title[0],
"subtitle": row_location[0],
"item_url": "https://www.aarz.pk/property-detail/"+row_slug[0],
"image_url":"http://www.aarz.pk/assets/images/properties/"+row_id[0]+"/"+row_id[0]+".actual.0.jpg" ,
"buttons": [ {
"type":"element_share"
},
{
"type":"phone_number",
"title":"Call Agent",
"payload":row_number[0]
}
],
},
{
"title": row_title[1],
"subtitle": row_location[1],
"item_url": "https://www.aarz.pk/property-detail/"+row_slug[1],
"image_url":"http://www.aarz.pk/assets/images/properties/"+row_id[1]+"/"+row_id[1]+".actual.0.jpg" ,
"buttons": [ {
"type":"element_share"
},
{
"type":"phone_number",
"title":"Call Agent",
"payload":row_number[1]
}
]
}]
}
},
"quick_replies": [
{
"content_type":"text",
"title": QR[0],
"payload": "YOUR_DEFINED_PAYLOAD_FOR_NEXT_IMAGE"
},
{
"content_type":"text",
"title": QR[1],
"payload": "YOUR_DEFINED_PAYLOAD_FOR_NEXT_IMAGE"
},
{
"content_type":"text",
"title": QR[2],
"payload": "YOUR_DEFINED_PAYLOAD_FOR_NEXT_IMAGE"
},
{
"content_type":"text",
"title": QR[3],
"payload": "YOUR_DEFINED_PAYLOAD_FOR_NEXT_IMAGE"
},
{
"content_type":"text",
"title": QR[4],
"payload": "YOUR_DEFINED_PAYLOAD_FOR_NEXT_IMAGE"
},
{
"content_type":"text",
"title": "Purchase plot",
"payload": "YOUR_DEFINED_PAYLOAD_FOR_NEXT_IMAGE"
}
]
}
else:
message= {
"attachment": {
"type": "template",
"payload": {
"template_type": "generic",
"elements": [
{
"title": row_title[0],
"subtitle": row_location[0],
"item_url": "https://www.aarz.pk/property-detail/"+row_slug[0],
"image_url":"http://www.aarz.pk/assets/images/properties/"+row_id[0]+"/"+row_id[0]+".actual.0.jpg" ,
"buttons": [ {
"type":"element_share"
},
{
"type":"phone_number",
"title":"Call Agent",
"payload":row_number[0]
}
],
},
{
"title": row_title[1],
"subtitle": row_location[1],
"item_url": "https://www.aarz.pk/property-detail/"+row_slug[1],
"image_url":"http://www.aarz.pk/assets/images/properties/"+row_id[1]+"/"+row_id[1]+".actual.0.jpg" ,
"buttons": [ {
"type":"element_share"
},
{
"type":"phone_number",
"title":"Call Agent",
"payload":row_number[1]
}
],
},
{
"title": row_title[2],
"subtitle": row_location[2],
"item_url": "https://www.aarz.pk/property-detail/"+row_slug[2],
"image_url":"http://www.aarz.pk/assets/images/properties/"+row_id[2]+"/"+row_id[2]+".actual.0.jpg" ,
"buttons": [ {
"type":"element_share"
},
{
"type":"phone_number",
"title":"Call Agent",
"payload":row_number[2]
}
],
},
{
"title": row_title[3],
"subtitle": row_location[3],
"item_url": "https://www.aarz.pk/property-detail/"+row_slug[3],
"image_url":"http://www.aarz.pk/assets/images/properties/"+row_id[3]+"/"+row_id[3]+".actual.0.jpg" ,
"buttons": [ {
"type":"element_share"
},
{
"type":"phone_number",
"title":"Call Agent",
"payload":row_number[3]
}
]
}]
}
},
"quick_replies": [
{
"content_type":"text",
"title": QR[0],
"payload": "YOUR_DEFINED_PAYLOAD_FOR_NEXT_IMAGE"
},
{
"content_type":"text",
"title": QR[1],
"payload": "YOUR_DEFINED_PAYLOAD_FOR_NEXT_IMAGE"
},
{
"content_type":"text",
"title": QR[2],
"payload": "YOUR_DEFINED_PAYLOAD_FOR_NEXT_IMAGE"
},
{
"content_type":"text",
"title": QR[3],
"payload": "YOUR_DEFINED_PAYLOAD_FOR_NEXT_IMAGE"
},
{
"content_type":"text",
"title": QR[4],
"payload": "YOUR_DEFINED_PAYLOAD_FOR_NEXT_IMAGE"
},
{
"content_type":"text",
"title": "Purchase plot",
"payload": "YOUR_DEFINED_PAYLOAD_FOR_NEXT_IMAGE"
}
]
}
return {
"speech": speech,
"displayText": speech,
"data":{"facebook":message}
# "contextOut": [],
#"source": "apiai-weather-webhook-sample"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print "Starting app on port %d" % port
app.run(debug=False, port=port, host='0.0.0.0')
| []
| []
| [
"PORT"
]
| [] | ["PORT"] | python | 1 | 0 | |
codes/projects/test_continuous_parameter/utils_project/training_routine_vae_full_model_aware.py | '''Training routine for the case where posterior model possesses a full
covariance and the parameter-to-observable map is learned
In preparation for prediction and plotting, this script will:
1) Specify which GPU to use for optimization
2) Form the batches for the training, validation and testing sets
3) Specify the input_dimensions and latent_dimensions
4) Specify the probability density for the initial guess of the weights and bias
5) Instantiate the neural network
6) Specify the optimizer
7) Call the optimization routine
Inputs:
- hyperp: dictionary storing set hyperparameter values
- options: dictionary storing the set options
- filepaths: instance of the FilePaths class storing the default strings for
importing and exporting required objects.
- data_dict: dictionary storing the dataset related objects
- prior_dict: dictionary storing the prior related objects
Author: Hwan Goh, Oden Institute, Austin, Texas 2020
'''
import os
import sys
import tensorflow as tf
import numpy as np
import pandas as pd
# Import src code
from utils_training.form_train_val_test import form_train_val_test_tf_batches
from neural_networks.nn_vae_full import VAE
from optimize.single.optimize_vae_full_model_aware import optimize
from optimize.distributed.optimize_distributed_vae_full_model_aware import optimize_distributed
from utils_misc.positivity_constraints import positivity_constraint_exp,\
positivity_constraint_log_exp
import pdb
###############################################################################
# Training #
###############################################################################
def training(hyperp, options, filepaths,
data_dict, prior_dict):
#=== GPU Settings ===#
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
if options.distributed_training == 0:
os.environ["CUDA_VISIBLE_DEVICES"] = options.which_gpu
if options.distributed_training == 1:
os.environ["CUDA_VISIBLE_DEVICES"] = options.dist_which_gpus
gpus = tf.config.experimental.list_physical_devices('GPU')
#=== Construct Validation Set and Batches ===#
input_and_latent_train, input_and_latent_val, input_and_latent_test,\
num_batches_train, num_batches_val, num_batches_test\
= form_train_val_test_tf_batches(
data_dict["state_obs_train"], data_dict["parameter_train"],
data_dict["state_obs_test"], data_dict["parameter_test"],
hyperp.batch_size, options.random_seed)
#=== Data and Latent Dimensions of Autoencoder ===#
input_dimensions = data_dict["obs_dimensions"]
latent_dimensions = options.parameter_dimensions
#=== Neural Network Regularizers ===#
kernel_initializer = tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.05)
bias_initializer = 'zeros'
#=== Non-distributed Training ===#
if options.distributed_training == 0:
#=== Neural Network ===#
nn = VAE(hyperp, options,
input_dimensions, latent_dimensions,
kernel_initializer, bias_initializer,
tf.identity)
#=== Optimizer ===#
optimizer = tf.keras.optimizers.Adam()
#=== Training ===#
optimize(hyperp, options, filepaths,
nn, optimizer,
input_and_latent_train, input_and_latent_val, input_and_latent_test,
input_dimensions, latent_dimensions, num_batches_train,
data_dict["noise_regularization_matrix"],
prior_dict["prior_mean"], prior_dict["prior_covariance_inverse"])
#=== Distributed Training ===#
if options.distributed_training == 1:
dist_strategy = tf.distribute.MirroredStrategy()
with dist_strategy.scope():
#=== Neural Network ===#
nn = VAE(hyperp, options,
input_dimensions, latent_dimensions,
kernel_initializer, bias_initializer,
tf.identity)
#=== Optimizer ===#
optimizer = tf.keras.optimizers.Adam()
#=== Training ===#
optimize_distributed(dist_strategy,
hyperp, options, filepaths,
nn, optimizer,
input_and_latent_train, input_and_latent_val, input_and_latent_test,
input_dimensions, latent_dimensions, num_batches_train,
data_dict["noise_regularization_matrix"],
prior_dict["prior_mean"], prior_dict["prior_covariance_inverse"])
| []
| []
| [
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"] | python | 2 | 0 | |
fabfile.py | """
Fabfile for boostrapping and deploying a
Django/Nginx/Apache/Postgres/Postfix stack
on Ubuntu 10.4 LTS.
Deploy a new version:
fab -i ~/aws/flybychat-west.pem -H seddit.com stage_production simple_deploy
"""
from __future__ import with_statement
from functools import partial
import os, os.path, time
import functools
from fabric.api import *
from fabric.contrib.files import append, exists, comment, contains
from fabric.contrib.files import upload_template as orig_upload_template
# Stuff you're likely to change
PROJECT_NAME = 'redditchat' # If this changes, change ejabberd_extauth
DOMAIN = 'seddit.com'
GIT_CLONE_PATH = 'reverie/FlyByChat.git'
PRODUCTION_USERNAME = 'ubuntu'
PRODUCTION_HOST = DOMAIN # Change this to an IP if your DNS isn't resolving yet
ADMIN_EMAIL = '[email protected]'
# Probably don't change:
DJANGO_PORT = 81
BRANCH = 'master'
SERVER_GROUP = 'app'
ROLES = ['nginx', 'django', 'database', 'smtp']
PROJECT_DIR = '/project/%s' % PROJECT_NAME # Not templatized in config files
VIRTUALENV = '/envs/%s' % PROJECT_NAME # Not templatized in config files
DB_PASS = 'foo' # Should not contain quotes; coupled w/settings.py # IGNORED, postgres is now configured to trust local connections
GIT_CLONE_USERNAME = 'git'
GIT_CLONE_HOST = 'github.com'
GIT_CLONE_PSEUDOHOST = PROJECT_NAME # Used to specify site-specific behavior for SSH if multiple projects are hosted on e.g. github.com
PG_VERSION = (9, 1)
PYTHON_VERSION = (2,7)
#
# Fabric Hacks
#
# Fabric excludes `run` and `sudo` from being tasks, for no apparent reason
# This works around that:
run = functools.partial(run)
sudo = functools.partial(sudo)
def upload_template(src, dest, *args, **kwargs):
"""
Wrapper around Fabric's upload_template that sets +r.
upload_template does not preserve file permissions, http://code.fabfile.org/issues/show/117
"""
orig_upload_template(src, dest, *args, **kwargs)
sudo('chmod +r %s' % dest)
def upload_simple_template(src, dest, ctx, use_sudo=False):
"""
For files that can't use Python string interpolation (Fabric's default) or Jinja-style
templating. For example, Erlang files, which are full of percents and braces.
Does a simple search-and-replace on the ctx dictionary key-value pairs.
"""
from StringIO import StringIO
src = open(src).read()
for key, val in ctx.items():
src = src.replace(key, val)
src = StringIO(src)
put(src, dest, use_sudo=use_sudo)
def boxed_task(name):
"""
So you can use e.g. Pip.install_requirements as a task.
E.g.: 'boxed_task:Pip.install_requirements'
"""
# TODO: switch to fabric namespaces
box, task = name.split('.', 1)
box = globals()[box]
task = getattr(box, task)
task()
#
# Helpers
#
def home_dir(*args):
# For some reason ~/.ssh/... stopped working as `put` arg, need to expand ~
if env.user == 'root':
return os.path.join("/root", *args)
return os.path.join("/home/%s" % env.user, *args)
#
# Stage management
#
def stage_dev():
global DOMAIN
DOMAIN = 'sayyit.com'
env.user = 'root'
env.stage = {
'hostname': 'dev.' + DOMAIN
}
env.hosts = [env.stage['hostname']]
def stage_staging():
global DOMAIN
DOMAIN = 'sayyit.com'
env.user = PRODUCTION_USERNAME
env.stage = {
'hostname': 'staging.' + DOMAIN
}
env.hosts = [env.stage['hostname']]
def stage_production():
env.user = PRODUCTION_USERNAME
env.stage = {
'hostname': 'www.' + DOMAIN
}
env.hosts = [PRODUCTION_HOST]
#
# Tasks
#
class Apt(object):
@staticmethod
def install(*pkgs):
sudo('apt-get install -y %s' % ' '.join(pkgs))
@staticmethod
def upgrade():
sudo('apt-get update -y')
sudo('apt-get upgrade -y')
class Pip(object):
@staticmethod
def install_virtualenv():
sudo('pip install virtualenv')
@staticmethod
def install(*pkgs):
pip = os.path.join(VIRTUALENV, 'bin', 'pip')
for pkg in pkgs:
run('%s install -U %s' % (pip, pkg))
@staticmethod
def install_requirements():
REMOTE_FILENAME = './tmp_requirements.txt'
pip = os.path.join(VIRTUALENV, 'bin', 'pip')
put('./server/requirements.txt', REMOTE_FILENAME)
run('%s install -r %s' % (pip, REMOTE_FILENAME))
run('rm %s' % REMOTE_FILENAME)
def set_up_permissions(dirname):
sudo('chown -R %s:%s %s' % (env.user, SERVER_GROUP, dirname))
sudo('chmod -R g+w %s' % dirname)
def adduser(username):
# Idempotent (non-failing) version of adduser
base_cmd = 'useradd --user-group %s' % username
sudo(base_cmd + ' || [[ $? == 9 ]]') # 9 is failure code for already exists
# alt: getent passwd username || useradd, also thanks to \amethyst
def bootstrap_everything():
print "bootstrap everything"
install_common()
install_nginx()
install_database()
install_django()
install_xmpp()
install_smtp()
configure_nginx()
configure_django()
configure_database()
configure_smtp()
configure_xmpp()
restart_database() # Must be done before deploy so that syncdb works
simple_deploy()
restart_xmpp()
restart_database()
restart_django() # Must be done before nginx so that port 80 is free
restart_nginx()
restart_smtp()
def bootstrap_database():
install_common()
install_database()
configure_database()
restart_database()
def bootstrap_nginx():
install_common()
install_nginx()
configure_nginx()
deploy()
restart_nginx()
def bootstrap_django():
install_common()
install_django()
configure_django()
deploy()
restart_django()
def bootstrap_xmpp():
install_common()
install_xmpp()
configure_xmpp()
restart_xmpp()
def bootstrap_smtp():
install_common()
install_smtp()
def install_hosts():
# TODO: make it work after repeated applications
# TODO: decouple these with configure_django
XMPP_DOMAIN = 'chat.' + env.stage['hostname']
XMPP_MUC_DOMAIN = 'conference.' + XMPP_DOMAIN;
hostnames = [env.stage['hostname'], XMPP_DOMAIN, XMPP_MUC_DOMAIN]
text = "127.0.0.1 " + ' '.join(hostnames)
append('/etc/hosts', text, use_sudo=True)
def install_common():
print "install common"
# If you get prompted to configure grub, make a preseed file
# and use it next time.
#put('./server/grub_preseed.cfg', 'grub_preseed.cfg')
#sudo('debconf-set-selections grub_preseed.cfg')
Apt.upgrade()
sudo('echo LANG=\\"en_US.UTF-8\\" > /etc/default/locale')
locale_env = [
'LANGUAGE="en_US.utf8"',
'LANG="en_US.utf8"'
]
append('/etc/environment', locale_env, use_sudo=True)
Apt.install('python-setuptools', 'python-pycurl', 'vim', 'screen', 'language-pack-en', 'git-core',
'subversion', 'cron', 'curl', 'man', 'build-essential', 'python-dev', 'libpq-dev',
'python-psycopg2', 'libcurl4-gnutls-dev', 'debconf-utils', 'ntp',
'memcached', 'python-memcache', # TODO: use a different memcached client
'openjdk-7-jre-headless', # need Java to run Closure Compiler
'ack-grep', 'erlang', 'libxslt-dev' # Needed for lxml on Ubuntu
)
sudo('easy_install -U setuptools')
sudo('easy_install pip')
adduser(SERVER_GROUP)
for dirname in ['releases', 'packages', 'bin', 'log']:
sudo('mkdir -p %s' % os.path.join(PROJECT_DIR, dirname))
set_up_permissions('/project')
log_dir = os.path.join(PROJECT_DIR, 'log')
sudo('chmod g+s %s' % log_dir)
install_keys()
install_hosts()
def _key_destination(public=True):
key_name = PROJECT_NAME + '_rsa' + ('.pub' if public else '')
path = '.ssh/' + key_name
return home_dir(path)
def install_keys():
run('mkdir -p ~/.ssh')
# Failing here? You need to generate a keypair and put it in the 'server' directory.
# The point is to use this as a GitHub (or other) 'deploy key'. Delete this stuff
# if you don't want that.
put('./server/id_rsa', _key_destination(public=False))
put('./server/id_rsa.pub', _key_destination())
run('chmod 600 %s' % _key_destination(public=False))
# So we can git clone from [email protected] w/o manual confirmation:
put('./server/known_hosts', home_dir('.ssh/known_hosts'))
# Add SSH configuration...
config_file = home_dir('.ssh/config')
lines = [
'Host ' + GIT_CLONE_PSEUDOHOST,
' HostName ' + GIT_CLONE_HOST,
' IdentityFile ' + _key_destination(public=False)
]
# Fabric's `append` does not run if the line is already in there.
# Multiple projects will have the same HostName line.
# We can't make the lines unique with a comment because ssh chokes; wants comments on their own line.
# Therefore, just append for now until something needs to change. (SSH uses first match.)
for l in lines:
assert "'" not in l
run("echo '%s' >> %s" % (l, config_file))
run("echo '%s' >> %s" % ('', config_file))
def install_nginx():
Apt.install('nginx')
assert exists('/etc/nginx/sites-enabled') # Right package install format?
if exists('/etc/nginx/sites-enabled/default'):
sudo('rm /etc/nginx/sites-enabled/default')
install_processor()
def install_processor():
"""
Stuff to compile javascript
"""
put('./server/processor/compiler.jar', os.path.join(PROJECT_DIR, 'bin', 'compiler.jar'))
put('./server/processor/processor', os.path.join(PROJECT_DIR, 'bin', 'processor'))
sudo('chmod +x %s' % os.path.join(PROJECT_DIR, 'bin', 'processor'))
def install_django():
Pip.install_virtualenv()
if not exists(VIRTUALENV): # TODO: better test than `exists`?
sudo('mkdir -p %s' % VIRTUALENV)
sudo('virtualenv %s' % VIRTUALENV)
set_up_permissions(VIRTUALENV)
Pip.install_requirements()
Apt.install('apache2', 'postgresql-client', 'libapache2-mod-wsgi')
if exists('/etc/apache2/sites-enabled/000-default'):
sudo('rm /etc/apache2/sites-enabled/000-default')
sudo('usermod -G %s -a www-data' % SERVER_GROUP)
def install_smtp():
# this next line is really configuration, but it has to happen before installing
# the package or else it will prompt for configuration
put('./server/postfix_preseed.cfg', 'postfix_preseed.cfg')
sudo('debconf-set-selections postfix_preseed.cfg')
Apt.install('postfix')
run('rm postfix_preseed.cfg')
def install_database():
# This uses whatever the default encoding and locale get set to on your system.
# For me, this started being UTF-8 and and en_US.UTF8 by default, which is what
# I want. If you want something different, you might need to drop and recreate
# your cluster.
Apt.install('postgresql')
restart_database()
def install_xmpp():
# Install ejabberd
put('./server/xmpp/processone-ejabberd-v2.1.10-0-gbcdae19.tar.gz', '/tmp/')
with cd('/tmp/'):
run('tar vzxf processone-ejabberd-v2.1.10-0-gbcdae19.tar.gz')
with cd('/tmp/processone-ejabberd-1357dfb/src'):
run('./configure')
run('make')
sudo('make install')
# Start on boot
sudo('cp ejabberd.init /etc/init.d/ejabberd')
sudo('update-rc.d ejabberd defaults')
# mod_log_chat: logs PMs
put('./server/xmpp/mod_log_chat.beam', '/lib/ejabberd/ebin', use_sudo=True)
put('./server/xmpp/mod_muc_admin.beam', '/lib/ejabberd/ebin', use_sudo=True)
sudo('mkdir -p /var/log/ejabberd/p2p') # Coupled with ejabberd.cfg
# mod_muc_log: logs chatrooms
sudo('mkdir -p /var/log/ejabberd/muc') # Coupled with ejabberd.cfg
def configure_nginx():
put('./server/nginx/nginx.conf', '/etc/nginx/nginx.conf', use_sudo=True)
upload_template( './server/nginx/site', '/etc/nginx/sites-available/%s' % PROJECT_NAME,
use_sudo=True, use_jinja=True, context={
'hostname': env.stage['hostname'],
'django_host': '127.0.0.1', # Change this on switch to a multi-server setup
'XMPP_HOST': '127.0.0.1', #Roledefs.get_internal_ip('xmpp'),
'DJANGO_PORT': DJANGO_PORT,
'DOMAIN': DOMAIN,
'PROJECT_NAME': PROJECT_NAME
})
if not exists('/etc/nginx/sites-enabled/%s' % PROJECT_NAME):
sudo('ln -s /etc/nginx/sites-available/%s /etc/nginx/sites-enabled/%s' % (PROJECT_NAME, PROJECT_NAME))
def configure_django():
upload_template('./server/django/wsgi.py', os.path.join(PROJECT_DIR, 'wsgi.py'), use_jinja=True, context={
'PROJECT_NAME': PROJECT_NAME,
'PYTHON_VERSION_STR': "%d.%d" % PYTHON_VERSION,
})
upload_template('./server/django/vhost', '/etc/apache2/sites-available/%s' % PROJECT_NAME, use_sudo=True, use_jinja=True, context={
'DJANGO_PORT': DJANGO_PORT,
'PROJECT_NAME': PROJECT_NAME,
'DOMAIN': DOMAIN, # Should we use env.stage['hostname']?
'ADMIN_EMAIL': ADMIN_EMAIL
})
upload_template('./server/django/ports.conf', '/etc/apache2/ports.conf', use_sudo=True, use_jinja=True, context={
'DJANGO_PORT': DJANGO_PORT,
})
# XMPP_* coupled with our Strophe.js
XMPP_DOMAIN = 'chat.' + env.stage['hostname']
XMPP_MUC_DOMAIN = 'conference.' + XMPP_DOMAIN;
upload_template('./server/django/stagesettings.py', os.path.join(PROJECT_DIR, 'stagesettings.py'), use_sudo=True,
use_jinja=True, context={
'XMPP_DOMAIN': XMPP_DOMAIN,
'XMPP_MUC_DOMAIN': XMPP_MUC_DOMAIN,
})
if not exists('/etc/apache2/sites-enabled/%s' % PROJECT_NAME):
sudo('ln -s /etc/apache2/sites-available/%s /etc/apache2/sites-enabled/%s' % (PROJECT_NAME, PROJECT_NAME))
def configure_smtp():
main_cf = '/etc/postfix/main.cf'
comment(main_cf, "^inet_interfaces = all$", use_sudo=True)
append(main_cf, "inet_interfaces = loopback-only", use_sudo=True)
def configure_xmpp():
# Extauth
extauth_location = os.path.join(PROJECT_DIR, 'ejabberd_extauth')
put('./server/xmpp/ejabberd_extauth', extauth_location)
#sudo('chown ejabberd:ejabberd %s' % extauth_location)
sudo("chmod +x %s" % extauth_location)
# TODO: DRY Python version, project name, file paths (?) in ejabberd_extauth
# Config
cfg_destination = '/etc/ejabberd/ejabberd.cfg'
upload_simple_template('./server/xmpp/ejabberd.cfg', cfg_destination, {
'HOSTNAME': 'chat.' + env.stage['hostname'],
'EXTAUTH_LOC': extauth_location,
}, use_sudo=True)
put('./server/xmpp/ejabberdctl.cfg', '/etc/ejabberd/ejabberdctl.cfg', use_sudo=True)
#sudo('chown ejabberd:ejabberd %s' % cfg_destination)
def run_with_safe_error(cmd, safe_error, use_sudo=False, user=None):
# Todo: use _run_command in 1.0
if user:
assert use_sudo
if use_sudo:
runner = partial(sudo, user=user)
else:
runner = run
with settings(warn_only=True):
result = runner(cmd)
if not result.failed:
return result
if result == safe_error: # Will probably end up using 'in' instead of '==', but wait and see
return result
# FAIL: this can't work right now b/c we don't have access to stderr. Wait for Fabric 1.0
return result # Remove this.
abort("Command had unexpected error:\n" +
" Command: %s\n" % cmd +
" Expected error: %s\n" % safe_error +
" Actual error: %s" % result
)
def configure_database():
config_dir = '/etc/postgresql/%d.%d/main' % PG_VERSION
sudo('mkdir -p %s' % config_dir)
for filename in ['environment', 'pg_ctl.conf', 'pg_hba.conf', 'pg_ident.conf', 'postgresql.conf', 'start.conf']:
local_file = os.path.join('./server/database', filename)
remote_file = os.path.join(config_dir, filename)
upload_template( local_file, remote_file, use_sudo=True, use_jinja=True, context={
'PROJECT_NAME': PROJECT_NAME,
'PG_VERSION_STRING': "%d.%d" % PG_VERSION,
})
sudo('chown %s:%s %s' % ('postgres', 'postgres', remote_file))
run_with_safe_error("createdb %s" % PROJECT_NAME, 'some dumb error', use_sudo=True, user='postgres')
run_with_safe_error("""psql -c "create user %s with createdb encrypted password '%s'" """ % (PROJECT_NAME, DB_PASS), "some dumb error", use_sudo=True, user='postgres')
sudo("""psql -c "grant all privileges on database %s to %s" """ % (PROJECT_NAME, PROJECT_NAME), user='postgres')
def make_symlink_atomically(new_target, symlink_location, sudo=False):
# From http://blog.moertel.com/articles/2005/08/22/how-to-change-symlinks-atomically
runner = sudo if sudo else run
params = {
'new_target': new_target,
'symlink_location': symlink_location,
'tempname': 'current_tmp',
}
cmd = "ln -s %(new_target)s %(tempname)s && mv -Tf %(tempname)s %(symlink_location)s" % params
runner(cmd)
class Deploy(object):
run_time = time.time()
@staticmethod
def get_current_commit():
branch = os.environ.get('DEPLOY_BRANCH', BRANCH)
return local('git rev-parse --verify %s' % branch, capture=True).strip()
@staticmethod
def get_time_str():
return time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(Deploy.run_time))
@staticmethod
def get_release_name():
return Deploy.get_time_str() + '_' + Deploy.get_current_commit()
@staticmethod
def switch_symlink(name):
assert name
new_target = os.path.join(PROJECT_DIR, 'releases', name)
symlink_location = os.path.join(PROJECT_DIR, 'current')
make_symlink_atomically(new_target, symlink_location)
@staticmethod
def get_release_dir(name):
assert name
return os.path.join(PROJECT_DIR, 'releases', name)
@staticmethod
def get_git_repo():
return GIT_CLONE_USERNAME + '@' + GIT_CLONE_PSEUDOHOST + ':' + GIT_CLONE_PATH
@staticmethod
def upload_new_release():
name = Deploy.get_release_name()
release_dir = Deploy.get_release_dir(name)
if exists(release_dir):
assert release_dir.startswith(os.path.join(PROJECT_DIR, 'releases'))
run('rm -rf %s' % release_dir)
run('git clone %s %s' % (Deploy.get_git_repo(), release_dir))
set_up_permissions(release_dir)
current_commit = Deploy.get_current_commit()
with cd(release_dir):
run('git reset --hard %s' % current_commit)
return name
@staticmethod
def prep_release(name):
"""Prepares all the files in the release dir."""
assert name
release_dir = Deploy.get_release_dir(name)
django_dir = os.path.join(release_dir, PROJECT_NAME)
# Run static file processor
if int(os.environ.get('FBC_DEBUG', False)):
raw_input("You are deploying a debug release. Press any key to continue.")
else:
run(os.path.join(PROJECT_DIR, 'bin', 'processor') + ' ' + release_dir)
print 'Setting up Django settings symlinks'
with cd(django_dir):
run('ln -nfs %s .' % os.path.join(PROJECT_DIR, 'stagesettings.py'))
run('ln -nfs %s .' % os.path.join(PROJECT_DIR, 'localsettings.py'))
print 'Doing Django database updates'
with cd(django_dir):
with_ve = 'source ' + os.path.join(VIRTUALENV, 'bin', 'activate') + ' && '
run(with_ve + 'python manage.py syncdb --noinput')
run(with_ve + 'python manage.py migrate --noinput')
run(with_ve + 'python manage.py loaddata initial_data')
#run(with_ve + 'python manage.py collectstatic --noinput')
print 'Installing crontab'
crontab_path = os.path.join(release_dir, 'server/crontab')
# need to use the stdin formulation. For some reason the path in the normal form
# gets truncated.
run('sudo crontab - < %s' % crontab_path)
@staticmethod
def cleanup_release(name):
pkg_filename = "%s.tar.gz" % name
if os.path.exists(pkg_filename):
local('rm %s' % pkg_filename)
def list_releases():
with cd(os.path.join(PROJECT_DIR, 'releases')):
run('''ls -ltc | grep -v total | awk '{print $6 " " $7 " " $8}' | head -n 10''')
run('ls -l %s | cut -d " " -f "10"' % os.path.join(PROJECT_DIR, CURRENT_RELEASE_DIR))
# Two-step Deploy; use this for HA multi-server setup:
# 1. deploy_prep_new_release
# 2. deploy_activate_release:<release_name>
def deploy_prep_new_release():
local('git push gh')
release_name = Deploy.upload_new_release()
Deploy.prep_release(release_name)
print '*'*20
print "Prepped new release", release_name
print 'You probably want to deploy_activate_release:%s' % release_name
print '*'*20
def deploy_activate_release(release_name):
assert release_name
Deploy.switch_symlink(release_name)
restart_after_deploy()
Deploy.cleanup_release(release_name)
# One-step Deploy; use this for one-server setup or if lazy
# 1. simple_deploy
def deploy():
release_name = Deploy.upload_new_release()
Deploy.prep_release(release_name)
Deploy.switch_symlink(release_name)
Deploy.cleanup_release(release_name)
def restart_after_deploy():
restart_django()
def simple_deploy():
local('git push')
deploy()
restart_after_deploy()
def dev_deploy():
# Simplified deploy for speedy develment. Overwrites things. Inconsistent. Fun!
current = os.path.join(PROJECT_DIR, 'current')
static_dir = os.path.join(current, 'static')
sudo('rm -rf %s' % static_dir)
put('static', current)
#
# Service control
#
def reload_nginx():
sudo('initctl reload nginx')
def reload_django():
sudo('apache2ctl graceful')
def reload_database():
if PG_VERSION < (9, 0):
sudo('/etc/init.d/postgresql-8.4 reload')
else:
sudo('/etc/init.d/postgresql reload')
def restart_nginx():
sudo('/etc/init.d/nginx restart')
def restart_django():
sudo('apache2ctl graceful || apache2ctl start')
def restart_database():
if PG_VERSION < (9, 0):
sudo('/etc/init.d/postgresql-8.4 restart || /etc/init.d/postgresql-8.4 start')
else:
sudo('/etc/init.d/postgresql restart || /etc/init.d/postgresql start')
def restart_smtp():
sudo('/etc/init.d/postfix restart')
def restart_xmpp():
sudo('/etc/init.d/ejabberd restart || /etc/init.d/ejabberd start')
def down_for_maintenance():
with os.path.join(PROJECT_DIR, 'current', 'static'):
run('cp index.html index.html.bak')
run('cp down.html index.html')
def comingsoon():
with os.path.join(PROJECT_DIR, 'current', 'static'):
run('cp index.html index.html.bak')
run('cp comingsoon.html index.html')
| []
| []
| [
"FBC_DEBUG",
"DEPLOY_BRANCH"
]
| [] | ["FBC_DEBUG", "DEPLOY_BRANCH"] | python | 2 | 0 | |
main.go | /*
* Bot Server API
*
* This is a bot API to let bots battle
*
* API version: 1.0.0
* Contact: [email protected]
* Generated by: OpenAPI Generator (https://openapi-generator.tech)
*/
// Package main sets up the dependencies and runs the program
package main
import (
"botServer/core"
"botServer/web"
"github.com/google/logger"
"log"
"net/http"
"os"
"strconv"
)
func main() {
logger.Init("Bot Server", false, false, os.Stdout)
logger.Infof("Server started")
port, err := strconv.Atoi(os.Getenv("PORT"))
if err != nil {
logger.Infof("Could not get port from env variables, falling back to 8080")
port = 8080
}
ConnectAPIService := web.NewConnectAPIService()
ConnectAPIController := web.NewConnectAPIController(ConnectAPIService)
PlayAPIService := web.NewPlayAPIService()
PlayAPIController := web.NewPlayAPIController(PlayAPIService)
router := web.NewRouter(ConnectAPIController, PlayAPIController)
core.StartCleaner()
log.Fatal(http.ListenAndServe(":"+strconv.Itoa(port), router))
}
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
src/specs/test_helpers/bosh.go | package test_helpers
import (
"errors"
"fmt"
"os"
"os/exec"
"strings"
"time"
"github.com/cloudfoundry-incubator/cf-test-helpers/commandreporter"
boshdir "github.com/cloudfoundry/bosh-cli/director"
boshuaa "github.com/cloudfoundry/bosh-cli/uaa"
boshlog "github.com/cloudfoundry/bosh-utils/logger"
"github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gexec"
)
var (
BoshDeployment boshdir.Deployment
BoshCredhubPrefix string
)
const boshPath = "/usr/local/bin/bosh"
func BuildBoshDirector() (boshdir.Director, error) {
logger := boshlog.NewLogger(boshlog.LevelError)
factory := boshdir.NewFactory(logger)
// Build a Director config from address-like string.
// HTTPS is required and certificates are always verified.
config, err := boshdir.NewConfigFromURL(BoshEnvironment())
if err != nil {
return nil, fmt.Errorf("building director config: %s", err)
}
// Configure custom trusted CA certificates.
// If nothing is provided default system certificates are used.
config.CACert = BoshCaCert()
// Allow Director to fetch UAA tokens when necessary.
uaa, err := buildUAA()
if err != nil {
return nil, fmt.Errorf("building uaa: %s", err)
}
config.TokenFunc = boshuaa.NewClientTokenSession(uaa).TokenFunc
return factory.New(config, boshdir.NewNoopTaskReporter(), boshdir.NewNoopFileReporter())
}
func BoshDeploymentName() string {
return os.Getenv("BOSH_DEPLOYMENT")
}
func BoshEnvironment() string {
return os.Getenv("BOSH_ENVIRONMENT")
}
func BoshClient() string {
return os.Getenv("BOSH_CLIENT")
}
func BoshClientSecret() string {
return os.Getenv("BOSH_CLIENT_SECRET")
}
func BoshCaCert() string {
return os.Getenv("BOSH_CA_CERT")
}
func ExecuteBosh(args []string, timeout time.Duration) *gexec.Session {
command := exec.Command(boshPath, args...)
reporter := commandreporter.NewCommandReporter(ginkgo.GinkgoWriter)
reporter.Report(time.Now(), command)
session, err := gexec.Start(command, ginkgo.GinkgoWriter, ginkgo.GinkgoWriter)
ExpectWithOffset(1, err).ToNot(HaveOccurred())
session.Wait(timeout)
return session
}
func ExecuteMysqlQueryAsAdmin(deploymentName, instanceIndex, sqlQuery string) string {
command := fmt.Sprintf(`sudo mysql --defaults-file=/var/vcap/jobs/pxc-mysql/config/mylogin.cnf --silent --silent --execute "%s"`,
sqlQuery)
session := MustSucceed(executeMysqlQuery(deploymentName, instanceIndex, command))
return strings.TrimSpace(string(session.Out.Contents()))
}
// ExecuteMysqlQuery executes sqlQuery against the MySQL deployment denoted by
// deploymentName and instance instanceIndex, using credentials in userName and
// password. It returns a pointer to a gexec.Session to be consumed.
func ExecuteMysqlQuery(deploymentName, instanceIndex, userName, password, sqlQuery string) *gexec.Session {
command := fmt.Sprintf(`MYSQL_PWD="%s" mysql -u %s --silent --silent --execute "%s"`,
password,
userName,
sqlQuery)
return executeMysqlQuery(deploymentName, instanceIndex, command)
}
func executeMysqlQuery(deploymentName, instanceIndex, command string) *gexec.Session {
args := []string{
"--deployment",
deploymentName,
"ssh",
"mysql/" + instanceIndex,
"--results",
"--column=Stdout",
"--command",
command,
}
return ExecuteBosh(args, 2*time.Minute)
}
func MustSucceed(session *gexec.Session) *gexec.Session {
stdout := string(session.Out.Contents())
stderr := string(session.Err.Contents())
ExpectWithOffset(1, session.ExitCode()).To(BeZero(), fmt.Sprintf("stdout:\n%s\nstderr:\n%s\n", stdout, stderr))
return session
}
func buildUAA() (boshuaa.UAA, error) {
logger := boshlog.NewLogger(boshlog.LevelError)
factory := boshuaa.NewFactory(logger)
// Build a UAA config from a URL.
// HTTPS is required and certificates are always verified.
config, err := boshuaa.NewConfigFromURL(fmt.Sprintf("https://%s:8443", BoshEnvironment()))
if err != nil {
return nil, fmt.Errorf("ERROR build uaa config: %s", err)
}
// Set client credentials for authentication.
// Machine level access should typically use a client instead of a particular user.
config.Client = BoshClient()
config.ClientSecret = BoshClientSecret()
// Configure trusted CA certificates.
// If nothing is provided default system certificates are used.
config.CACert = BoshCaCert()
return factory.New(config)
}
func HostsForInstanceGroup(deployment boshdir.Deployment, instanceGroupName string) ([]string, error) {
instances, err := deployment.Instances()
if err != nil {
return nil, err
}
var addresses []string
for _, instance := range instances {
if instance.Group == instanceGroupName {
addresses = append(addresses, instance.IPs[0])
}
}
return addresses, nil
}
func SetupBoshDeployment() {
var err error
director, err := BuildBoshDirector()
ExpectWithOffset(1, err).NotTo(HaveOccurred())
info, err := director.Info()
Expect(err).NotTo(HaveOccurred())
BoshCredhubPrefix = "/" + info.Name
BoshDeployment, err = director.FindDeployment(BoshDeploymentName())
ExpectWithOffset(1, err).NotTo(HaveOccurred())
}
func MySQLHosts(boshDeployment boshdir.Deployment) ([]string, error) {
return HostsForInstanceGroup(boshDeployment, "mysql")
}
func FirstProxyHost(boshDeployment boshdir.Deployment) (string, error) {
proxyHosts, err := HostsForInstanceGroup(boshDeployment, "proxy")
if err != nil {
return "", err
}
if len(proxyHosts) == 0 {
return "", errors.New("no proxies found")
}
return proxyHosts[0], nil
}
| [
"\"BOSH_DEPLOYMENT\"",
"\"BOSH_ENVIRONMENT\"",
"\"BOSH_CLIENT\"",
"\"BOSH_CLIENT_SECRET\"",
"\"BOSH_CA_CERT\""
]
| []
| [
"BOSH_ENVIRONMENT",
"BOSH_CLIENT_SECRET",
"BOSH_CLIENT",
"BOSH_CA_CERT",
"BOSH_DEPLOYMENT"
]
| [] | ["BOSH_ENVIRONMENT", "BOSH_CLIENT_SECRET", "BOSH_CLIENT", "BOSH_CA_CERT", "BOSH_DEPLOYMENT"] | go | 5 | 0 | |
qa/pull-tester/rpc-tests.py | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Run Regression Test Suite
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts, other
than:
- `-extended`: run the "extended" test suite in addition to the basic one.
- `-win`: signal that this is running in a Windows environment, and we
should run the tests.
- `--coverage`: this generates a basic coverage report for the RPC
interface.
For a description of arguments recognized by test scripts, see
`qa/pull-tester/test_framework/test_framework.py:UtabitTestFramework.main`.
"""
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
from tests_config import *
#If imported values are not defined then set to zero (or disabled)
if not vars().has_key('ENABLE_WALLET'):
ENABLE_WALLET=0
if not vars().has_key('ENABLE_UTABITD'):
ENABLE_UTABITD=0
if not vars().has_key('ENABLE_UTILS'):
ENABLE_UTILS=0
if not vars().has_key('ENABLE_ZMQ'):
ENABLE_ZMQ=0
ENABLE_COVERAGE=0
#Create a set to store arguments and create the passOn string
opts = set()
passOn = ""
p = re.compile("^--")
bold = ("","")
if (os.name == 'posix'):
bold = ('\033[0m', '\033[1m')
for arg in sys.argv[1:]:
if arg == '--coverage':
ENABLE_COVERAGE = 1
elif (p.match(arg) or arg == "-h"):
passOn += " " + arg
else:
opts.add(arg)
#Set env vars
buildDir = BUILDDIR
if "UTABITD" not in os.environ:
os.environ["UTABITD"] = buildDir + '/src/utabitd' + EXEEXT
if "UTABITCLI" not in os.environ:
os.environ["UTABITCLI"] = buildDir + '/src/utabit-cli' + EXEEXT
#Disable Windows tests by default
if EXEEXT == ".exe" and "-win" not in opts:
print "Win tests currently disabled. Use -win option to enable"
sys.exit(0)
#Tests
testScripts = [
'bip68-112-113-p2p.py',
'wallet.py',
'listtransactions.py',
'receivedby.py',
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rawtransactions.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'mempool_limit.py',
'httpbasics.py',
'multi_rpc.py',
'zapwallettxes.py',
'proxy_test.py',
'merkle_blocks.py',
'fundrawtransaction.py',
'signrawtransactions.py',
'walletbackup.py',
'nodehandling.py',
'reindex.py',
'addressindex.py',
'timestampindex.py',
'spentindex.py',
'decodescript.py',
'p2p-fullblocktest.py',
'blockchain.py',
'disablewallet.py',
'sendheaders.py',
'keypool.py',
'prioritise_transaction.py',
'invalidblockrequest.py',
'invalidtxrequest.py',
'abandonconflict.py',
'p2p-versionbits-warning.py',
]
testScriptsExt = [
'bip9-softforks.py',
'bip65-cltv.py',
'bip65-cltv-p2p.py',
'bip68-sequence.py',
'bipdersig-p2p.py',
'bipdersig.py',
'getblocktemplate_longpoll.py',
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
'pruning.py',
'forknotify.py',
'invalidateblock.py',
# 'rpcbind_test.py', #temporary, bug in libevent, see #6655
'smartfees.py',
'maxblocksinflight.py',
'p2p-acceptblock.py',
'mempool_packages.py',
'maxuploadtarget.py',
'replace-by-fee.py',
]
#Enable ZMQ tests
if ENABLE_ZMQ == 1:
testScripts.append('zmq_test.py')
def runtests():
coverage = None
if ENABLE_COVERAGE:
coverage = RPCCoverage()
print("Initializing coverage directory at %s\n" % coverage.dir)
if(ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_UTABITD == 1):
rpcTestDir = buildDir + '/qa/rpc-tests/'
run_extended = '-extended' in opts
cov_flag = coverage.flag if coverage else ''
flags = " --srcdir %s/src %s %s" % (buildDir, cov_flag, passOn)
#Run Tests
for i in range(len(testScripts)):
if (len(opts) == 0
or (len(opts) == 1 and "-win" in opts )
or run_extended
or testScripts[i] in opts
or re.sub(".py$", "", testScripts[i]) in opts ):
print("Running testscript %s%s%s ..." % (bold[1], testScripts[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScripts[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
# exit if help is called so we print just one set of
# instructions
p = re.compile(" -h| --help")
if p.match(passOn):
sys.exit(0)
# Run Extended Tests
for i in range(len(testScriptsExt)):
if (run_extended or testScriptsExt[i] in opts
or re.sub(".py$", "", testScriptsExt[i]) in opts):
print(
"Running 2nd level testscript "
+ "%s%s%s ..." % (bold[1], testScriptsExt[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScriptsExt[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
print("Cleaning up coverage data")
coverage.cleanup()
else:
print "No rpc tests to run. Wallet, utils, and utabitd must all be enabled"
class RPCCoverage(object):
"""
Coverage reporting utilities for pull-tester.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `utabit-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: qa/rpc-tests/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir %s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `qa/rpc-tests/test-framework/coverage.py`
REFERENCE_FILENAME = 'rpc_interface.txt'
COVERAGE_FILE_PREFIX = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, REFERENCE_FILENAME)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(COVERAGE_FILE_PREFIX):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
runtests()
| []
| []
| [
"UTABITCLI",
"UTABITD"
]
| [] | ["UTABITCLI", "UTABITD"] | python | 2 | 0 | |
web/config.py | import os
class Config(object):
SECRET_KEY = os.environ['SECRET_KEY']
DEBUG = os.environ['DEBUG']
MONGODB_HOST = os.environ['MONGODB_HOST']
MONGODB_PORT = int(os.environ['MONGODB_PORT'])
MONGODB_DB = os.environ['MONGODB_DATABASE']
MONGODB_USERNAME = os.environ['MONGODB_USER']
MONGODB_PASSWORD = os.environ['MONGODB_PASS']
| []
| []
| [
"MONGODB_PORT",
"MONGODB_USER",
"MONGODB_HOST",
"MONGODB_PASS",
"SECRET_KEY",
"DEBUG",
"MONGODB_DATABASE"
]
| [] | ["MONGODB_PORT", "MONGODB_USER", "MONGODB_HOST", "MONGODB_PASS", "SECRET_KEY", "DEBUG", "MONGODB_DATABASE"] | python | 7 | 0 | |
appdata_test.go | // Copyright (c) 2013-2014 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package partutil_test
import (
"os"
"os/user"
"path/filepath"
"runtime"
"testing"
"unicode"
partutil "github.com/particl/partsuite_partutil"
)
// TestAppDataDir tests the API for AppDataDir to ensure it gives expected
// results for various operating systems.
func TestAppDataDir(t *testing.T) {
// App name plus upper and lowercase variants.
appName := "myapp"
appNameUpper := string(unicode.ToUpper(rune(appName[0]))) + appName[1:]
appNameLower := string(unicode.ToLower(rune(appName[0]))) + appName[1:]
// When we're on Windows, set the expected local and roaming directories
// per the environment vars. When we aren't on Windows, the function
// should return the current directory when forced to provide the
// Windows path since the environment variables won't exist.
winLocal := "."
winRoaming := "."
if runtime.GOOS == "windows" {
localAppData := os.Getenv("LOCALAPPDATA")
roamingAppData := os.Getenv("APPDATA")
if localAppData == "" {
localAppData = roamingAppData
}
winLocal = filepath.Join(localAppData, appNameUpper)
winRoaming = filepath.Join(roamingAppData, appNameUpper)
}
// Get the home directory to use for testing expected results.
var homeDir string
usr, err := user.Current()
if err != nil {
t.Errorf("user.Current: %v", err)
return
}
homeDir = usr.HomeDir
// Mac app data directory.
macAppData := filepath.Join(homeDir, "Library", "Application Support")
tests := []struct {
goos string
appName string
roaming bool
want string
}{
// Various combinations of application name casing, leading
// period, operating system, and roaming flags.
{"windows", appNameLower, false, winLocal},
{"windows", appNameUpper, false, winLocal},
{"windows", "." + appNameLower, false, winLocal},
{"windows", "." + appNameUpper, false, winLocal},
{"windows", appNameLower, true, winRoaming},
{"windows", appNameUpper, true, winRoaming},
{"windows", "." + appNameLower, true, winRoaming},
{"windows", "." + appNameUpper, true, winRoaming},
{"linux", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"linux", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"linux", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"linux", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"darwin", appNameLower, false, filepath.Join(macAppData, appNameUpper)},
{"darwin", appNameUpper, false, filepath.Join(macAppData, appNameUpper)},
{"darwin", "." + appNameLower, false, filepath.Join(macAppData, appNameUpper)},
{"darwin", "." + appNameUpper, false, filepath.Join(macAppData, appNameUpper)},
{"openbsd", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"openbsd", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"openbsd", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"openbsd", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"freebsd", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"freebsd", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"freebsd", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"freebsd", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"netbsd", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"netbsd", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"netbsd", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"netbsd", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"plan9", appNameLower, false, filepath.Join(homeDir, appNameLower)},
{"plan9", appNameUpper, false, filepath.Join(homeDir, appNameLower)},
{"plan9", "." + appNameLower, false, filepath.Join(homeDir, appNameLower)},
{"plan9", "." + appNameUpper, false, filepath.Join(homeDir, appNameLower)},
{"unrecognized", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"unrecognized", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"unrecognized", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"unrecognized", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
// No application name provided, so expect current directory.
{"windows", "", false, "."},
{"windows", "", true, "."},
{"linux", "", false, "."},
{"darwin", "", false, "."},
{"openbsd", "", false, "."},
{"freebsd", "", false, "."},
{"netbsd", "", false, "."},
{"plan9", "", false, "."},
{"unrecognized", "", false, "."},
// Single dot provided for application name, so expect current
// directory.
{"windows", ".", false, "."},
{"windows", ".", true, "."},
{"linux", ".", false, "."},
{"darwin", ".", false, "."},
{"openbsd", ".", false, "."},
{"freebsd", ".", false, "."},
{"netbsd", ".", false, "."},
{"plan9", ".", false, "."},
{"unrecognized", ".", false, "."},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
ret := partutil.TstAppDataDir(test.goos, test.appName, test.roaming)
if ret != test.want {
t.Errorf("appDataDir #%d (%s) does not match - "+
"expected got %s, want %s", i, test.goos, ret,
test.want)
continue
}
}
}
| [
"\"LOCALAPPDATA\"",
"\"APPDATA\""
]
| []
| [
"APPDATA",
"LOCALAPPDATA"
]
| [] | ["APPDATA", "LOCALAPPDATA"] | go | 2 | 0 | |
utils/utils.go | package utils
import (
"fmt"
"net/url"
"os"
"strings"
"github.com/pkg/errors"
)
// Fail prints out the error struct if STEPDEBUG is true otherwise it just
// prints out the error message. Finally, it exits with an error code of 1.
func Fail(err error) {
if err != nil {
if os.Getenv("STEPDEBUG") == "1" {
fmt.Fprintf(os.Stderr, "%+v\n", err)
} else {
fmt.Fprintln(os.Stderr, err)
}
os.Exit(1)
}
}
// CompleteURL parses and validates the given URL. It supports general
// URLs like https://ca.smallstep.com[:port][/path], and incomplete URLs like
// ca.smallstep.com[:port][/path].
func CompleteURL(rawurl string) (string, error) {
u, err := url.Parse(rawurl)
if err != nil {
return "", errors.Wrapf(err, "error parsing url '%s'", rawurl)
}
// URLs are generally parsed as:
// [scheme:][//[userinfo@]host][/]path[?query][#fragment]
// But URLs that do not start with a slash after the scheme are interpreted as
// scheme:opaque[?query][#fragment]
if u.Opaque == "" {
if u.Scheme == "" {
u.Scheme = "https"
}
if u.Host == "" {
// rawurl looks like ca.smallstep.com or ca.smallstep.com/1.0/sign
if u.Path != "" {
parts := strings.SplitN(u.Path, "/", 2)
u.Host = parts[0]
if len(parts) == 2 {
u.Path = parts[1]
} else {
u.Path = ""
}
return CompleteURL(u.String())
}
return "", errors.Errorf("error parsing url '%s'", rawurl)
}
return u.String(), nil
}
// scheme:opaque[?query][#fragment]
// rawurl looks like ca.smallstep.com:443 or ca.smallstep.com:443/1.0/sign
return CompleteURL("https://" + rawurl)
}
| [
"\"STEPDEBUG\""
]
| []
| [
"STEPDEBUG"
]
| [] | ["STEPDEBUG"] | go | 1 | 0 | |
main.go | package main
//go:generate errorgen
import (
"flag"
"fmt"
"os"
"os/signal"
"path/filepath"
"runtime"
"strings"
"syscall"
"v2ray.com/core"
"v2ray.com/core/common/platform"
_ "v2ray.com/core/infra/conf/command"
"v2ray.com/core/infra/control"
"v2ray.com/core/main/confloader"
_ "v2ray.com/core/main/distro/all"
)
var (
isctl = flag.Bool("ctl", false, "change to v2rayctl.")
configFile = flag.String("config", "", "Config file for V2Ray.")
version = flag.Bool("version", false, "Show current version of V2Ray.")
test = flag.Bool("test", false, "Test config file only, without launching V2Ray server.")
format = flag.String("format", "json", "Format of input file.")
)
func getCommandName() string {
if len(os.Args) > 2 {
return os.Args[2]
}
return ""
}
func ctlmain() {
name := getCommandName()
cmd := control.GetCommand(name)
if cmd == nil {
fmt.Fprintln(os.Stderr, "Unknown command:", name)
fmt.Fprintln(os.Stderr)
fmt.Println("v2ctl <command>")
fmt.Println("Available commands:")
control.PrintUsage()
return
}
if err := cmd.Execute(os.Args[2:]); err != nil {
hasError := false
if err != flag.ErrHelp {
fmt.Fprintln(os.Stderr, err.Error())
fmt.Fprintln(os.Stderr)
hasError = true
}
for _, line := range cmd.Description().Usage {
fmt.Println(line)
}
if hasError {
os.Exit(-1)
}
}
}
func fileExists(file string) bool {
info, err := os.Stat(file)
return err == nil && !info.IsDir()
}
func getConfigFilePath() string {
if len(*configFile) > 0 {
return *configFile
}
if workingDir, err := os.Getwd(); err == nil {
configFile := filepath.Join(workingDir, "config.json")
if fileExists(configFile) {
return configFile
}
}
if configFile := platform.GetConfigurationPath(); fileExists(configFile) {
return configFile
}
return ""
}
func GetConfigFormat() string {
switch strings.ToLower(*format) {
case "pb", "protobuf":
return "protobuf"
default:
return "json"
}
}
func startV2Ray() (core.Server, error) {
configFile := getConfigFilePath()
configInput, err := confloader.LoadConfig(configFile)
if err != nil {
return nil, newError("failed to load config: ", configFile).Base(err)
}
defer configInput.Close()
config, err := core.LoadConfig(GetConfigFormat(), configFile, configInput)
if err != nil {
return nil, newError("failed to read config file: ", configFile).Base(err)
}
server, err := core.New(config)
if err != nil {
return nil, newError("failed to create server").Base(err)
}
return server, nil
}
func printVersion() {
version := core.VersionStatement()
for _, s := range version {
fmt.Println(s)
}
}
func main() {
flag.Parse()
if *isctl {
ctlmain()
return
}
env_port := os.Getenv("PORT")
fmt.Println(env_port)
printVersion()
if *version {
return
}
server, err := startV2Ray()
if err != nil {
fmt.Println(err.Error())
// Configuration error. Exit with a special value to prevent systemd from restarting.
os.Exit(23)
}
if *test {
fmt.Println("Configuration OK.")
os.Exit(0)
}
if err := server.Start(); err != nil {
fmt.Println("Failed to start", err)
os.Exit(-1)
}
defer server.Close()
// Explicitly triggering GC to remove garbage from config loading.
runtime.GC()
{
osSignals := make(chan os.Signal, 1)
signal.Notify(osSignals, os.Interrupt, os.Kill, syscall.SIGTERM)
<-osSignals
}
}
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
peregrine/test_settings.py | import os
from collections import OrderedDict
INDEX_CLIENT = {
"host": "http://localhost:8000/", 'version': 'v0',
"auth": None}
AUTH = 'https://fake_auth_url'
INTERNAL_AUTH = 'https://fake_auth_url'
AUTH_ADMIN_CREDS = {
'domain_name': 'some_domain',
'username': 'iama_username',
'password': 'iama_password',
'auth_url': 'https://fake_auth_url',
'user_domain_name': 'some_domain',
}
SUBMISSION = {
"bucket": 'test_submission',
"host": 'host',
}
STORAGE = {"s3": {"keys": {}, "kwargs": {}}}
STORAGE["s3"]["keys"]["host"] = {"access_key": "fake",
"secret_key": "sooper_sekrit"}
STORAGE["s3"]["kwargs"]["host"] = {}
PSQLGRAPH = {
'host': "localhost",
'user': "test",
'password': "test",
'database': "automated_test",
}
GDC_PORTAL_ENDPOINT = 'http://fake_portal_endpoint_for_tests'
PEREGRINE_HOST = "localhost"
PEREGRINE_PORT = "443"
# Slicing settings
SLICING = {
'host': 'localhost',
'gencode': 'REPLACEME',
}
FLASK_SECRET_KEY = 'flask_test_key'
from cryptography.fernet import Fernet
HMAC_ENCRYPTION_KEY = Fernet.generate_key()
OAUTH2 = {
"client_id": "",
"client_secret": "",
"oauth_provider": "",
"redirect_uri": "",
}
DICTIONARY_URL = os.environ.get('DICTIONARY_URL','https://s3.amazonaws.com/dictionary-artifacts/datadictionary/develop/schema.json')
USER_API = "http://localhost"
VERIFY_PROJECT = False
AUTH_SUBMISSION_LIST = False
JWT_KEYPAIR_FILES = OrderedDict([
(
'key-test',
('resources/keys/test_public_key.pem', 'resources/keys/test_private_key.pem'),
)
])
| []
| []
| [
"DICTIONARY_URL"
]
| [] | ["DICTIONARY_URL"] | python | 1 | 0 | |
pkg/util/netsh/netsh_test.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package netsh
import (
"net"
"os"
"testing"
"k8s.io/utils/exec"
fakeexec "k8s.io/utils/exec/testing"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
)
func fakeCommonRunner() *runner {
fakeCmd := fakeexec.FakeCmd{
CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{
// Success
func() ([]byte, error) {
return []byte{}, nil
},
// utilexec.ExitError exists, and status is not 0
func() ([]byte, error) {
return nil, &fakeexec.FakeExitError{Status: 1}
},
// utilexec.ExitError exists, and status is 0
func() ([]byte, error) {
return nil, &fakeexec.FakeExitError{Status: 0}
},
// other error exists
func() ([]byte, error) {
return nil, errors.New("not ExitError")
},
},
}
return &runner{
exec: &fakeexec.FakeExec{
CommandScript: []fakeexec.FakeCommandAction{
func(cmd string, args ...string) exec.Cmd {
return fakeexec.InitFakeCmd(&fakeCmd, cmd, args...)
},
func(cmd string, args ...string) exec.Cmd {
return fakeexec.InitFakeCmd(&fakeCmd, cmd, args...)
},
func(cmd string, args ...string) exec.Cmd {
return fakeexec.InitFakeCmd(&fakeCmd, cmd, args...)
},
func(cmd string, args ...string) exec.Cmd {
return fakeexec.InitFakeCmd(&fakeCmd, cmd, args...)
},
},
},
}
}
func TestEnsurePortProxyRule(t *testing.T) {
runner := fakeCommonRunner()
tests := []struct {
name string
arguments []string
expectedResult bool
expectedError bool
}{
{"Success", []string{"ensure-port-proxy-rule"}, true, false},
{"utilexec.ExitError exists, and status is not 0", []string{"ensure-port-proxy-rule"}, false, false},
{"utilexec.ExitError exists, and status is 0", []string{"ensure-port-proxy-rule"}, false, true},
{"other error exists", []string{"ensure-port-proxy-rule"}, false, true},
}
for _, test := range tests {
result, err := runner.EnsurePortProxyRule(test.arguments)
if test.expectedError {
assert.Errorf(t, err, "Failed to test: %s", test.name)
} else {
if err != nil {
assert.NoErrorf(t, err, "Failed to test: %s", test.name)
} else {
assert.EqualValuesf(t, test.expectedResult, result, "Failed to test: %s", test.name)
}
}
}
}
func TestDeletePortProxyRule(t *testing.T) {
runner := fakeCommonRunner()
tests := []struct {
name string
arguments []string
expectedError bool
}{
{"Success", []string{"delete-port-proxy-rule"}, false},
{"utilexec.ExitError exists, and status is not 0", []string{"delete-port-proxy-rule"}, true},
{"utilexec.ExitError exists, and status is 0", []string{"delete-port-proxy-rule"}, false},
{"other error exists", []string{"delete-port-proxy-rule"}, true},
}
for _, test := range tests {
err := runner.DeletePortProxyRule(test.arguments)
if test.expectedError {
assert.Errorf(t, err, "Failed to test: %s", test.name)
} else {
assert.NoErrorf(t, err, "Failed to test: %s", test.name)
}
}
}
func TestEnsureIPAddress(t *testing.T) {
tests := []struct {
name string
arguments []string
ip net.IP
fakeCmdAction []fakeexec.FakeCommandAction
expectedError bool
expectedResult bool
}{
{
"IP address exists",
[]string{"delete-port-proxy-rule"},
net.IPv4(10, 10, 10, 20),
[]fakeexec.FakeCommandAction{
func(cmd string, args ...string) exec.Cmd {
return fakeexec.InitFakeCmd(&fakeexec.FakeCmd{
CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{
// IP address exists
func() ([]byte, error) {
return []byte("IP Address:10.10.10.10\nIP Address:10.10.10.20"), nil
},
},
}, cmd, args...)
},
},
false,
true,
},
{
"IP address not exists, but set successful(find it in the second time)",
[]string{"ensure-ip-address"},
net.IPv4(10, 10, 10, 20),
[]fakeexec.FakeCommandAction{
func(cmd string, args ...string) exec.Cmd {
return fakeexec.InitFakeCmd(&fakeexec.FakeCmd{
CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{
// IP address not exists
func() ([]byte, error) {
return []byte("IP Address:10.10.10.10"), nil
},
},
}, cmd, args...)
},
func(cmd string, args ...string) exec.Cmd {
return fakeexec.InitFakeCmd(&fakeexec.FakeCmd{
CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{
// Success to set ip
func() ([]byte, error) {
return []byte(""), nil
},
},
}, cmd, args...)
},
func(cmd string, args ...string) exec.Cmd {
return fakeexec.InitFakeCmd(&fakeexec.FakeCmd{
CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{
// IP address still not exists
func() ([]byte, error) {
return []byte("IP Address:10.10.10.10"), nil
},
},
}, cmd, args...)
},
func(cmd string, args ...string) exec.Cmd {
return fakeexec.InitFakeCmd(&fakeexec.FakeCmd{
CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{
// IP address exists
func() ([]byte, error) {
return []byte("IP Address:10.10.10.10\nIP Address:10.10.10.20"), nil
},
},
}, cmd, args...)
},
},
false,
true,
},
{
"IP address not exists, utilexec.ExitError exists, but status is not 0)",
[]string{"ensure-ip-address"},
net.IPv4(10, 10, 10, 20),
[]fakeexec.FakeCommandAction{
func(cmd string, args ...string) exec.Cmd {
return fakeexec.InitFakeCmd(&fakeexec.FakeCmd{
CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{
// IP address not exists
func() ([]byte, error) {
return []byte("IP Address:10.10.10.10"), nil
},
},
}, cmd, args...)
},
func(cmd string, args ...string) exec.Cmd {
return fakeexec.InitFakeCmd(&fakeexec.FakeCmd{
CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{
// Failed to set ip, utilexec.ExitError exists, and status is not 0
func() ([]byte, error) {
return nil, &fakeexec.FakeExitError{Status: 1}
},
},
}, cmd, args...)
},
},
false,
false,
},
{
"IP address not exists, utilexec.ExitError exists, and status is 0)",
[]string{"ensure-ip-address"},
net.IPv4(10, 10, 10, 20),
[]fakeexec.FakeCommandAction{
func(cmd string, args ...string) exec.Cmd {
return fakeexec.InitFakeCmd(&fakeexec.FakeCmd{
CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{
// IP address not exists
func() ([]byte, error) {
return []byte("IP Address:10.10.10.10"), nil
},
},
}, cmd, args...)
},
func(cmd string, args ...string) exec.Cmd {
return fakeexec.InitFakeCmd(&fakeexec.FakeCmd{
CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{
// Failed to set ip, utilexec.ExitError exists, and status is 0
func() ([]byte, error) {
return nil, &fakeexec.FakeExitError{Status: 0}
},
},
}, cmd, args...)
},
},
true,
false,
},
{
"IP address not exists, and error is not utilexec.ExitError)",
[]string{"ensure-ip-address"},
net.IPv4(10, 10, 10, 20),
[]fakeexec.FakeCommandAction{
func(cmd string, args ...string) exec.Cmd {
return fakeexec.InitFakeCmd(&fakeexec.FakeCmd{
CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{
// IP address not exists
func() ([]byte, error) {
return []byte("IP Address:10.10.10.10"), nil
},
},
}, cmd, args...)
},
func(cmd string, args ...string) exec.Cmd {
return fakeexec.InitFakeCmd(&fakeexec.FakeCmd{
CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{
// Failed to set ip, other error exists
func() ([]byte, error) {
return nil, errors.New("not ExitError")
},
},
}, cmd, args...)
},
},
true,
false,
},
}
for _, test := range tests {
runner := New(&fakeexec.FakeExec{CommandScript: test.fakeCmdAction})
result, err := runner.EnsureIPAddress(test.arguments, test.ip)
if test.expectedError {
assert.Errorf(t, err, "Failed to test: %s", test.name)
} else {
if err != nil {
assert.NoErrorf(t, err, "Failed to test: %s", test.name)
} else {
assert.EqualValuesf(t, test.expectedResult, result, "Failed to test: %s", test.name)
}
}
}
}
func TestDeleteIPAddress(t *testing.T) {
runner := fakeCommonRunner()
tests := []struct {
name string
arguments []string
expectedError bool
}{
{"Success", []string{"delete-ip-address"}, false},
{"utilexec.ExitError exists, and status is not 0", []string{"delete-ip-address"}, true},
{"utilexec.ExitError exists, and status is 0", []string{"delete-ip-address"}, false},
{"other error exists", []string{"delete-ip-address"}, true},
}
for _, test := range tests {
err := runner.DeleteIPAddress(test.arguments)
if test.expectedError {
assert.Errorf(t, err, "Failed to test: %s", test.name)
} else {
assert.NoErrorf(t, err, "Failed to test: %s", test.name)
}
}
}
func TestGetInterfaceToAddIP(t *testing.T) {
// backup env 'INTERFACE_TO_ADD_SERVICE_IP'
backupValue := os.Getenv("INTERFACE_TO_ADD_SERVICE_IP")
// recover env
defer os.Setenv("INTERFACE_TO_ADD_SERVICE_IP", backupValue)
tests := []struct {
name string
envToBeSet string
expectedResult string
}{
{"env_value_is_empty", "", "vEthernet (HNS Internal NIC)"},
{"env_value_is_not_empty", "eth0", "eth0"},
}
fakeExec := fakeexec.FakeExec{
CommandScript: []fakeexec.FakeCommandAction{},
}
netsh := New(&fakeExec)
for _, test := range tests {
os.Setenv("INTERFACE_TO_ADD_SERVICE_IP", test.envToBeSet)
result := netsh.GetInterfaceToAddIP()
assert.EqualValuesf(t, test.expectedResult, result, "Failed to test: %s", test.name)
}
}
func TestRestore(t *testing.T) {
runner := New(&fakeexec.FakeExec{
CommandScript: []fakeexec.FakeCommandAction{},
})
result := runner.Restore([]string{})
assert.NoErrorf(t, result, "The return value must be nil")
}
func TestCheckIPExists(t *testing.T) {
fakeCmd := fakeexec.FakeCmd{
CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{
// Error exists
func() ([]byte, error) {
return nil, &fakeexec.FakeExitError{Status: 1}
},
// IP address string is empty
func() ([]byte, error) {
return []byte(""), nil
},
// "IP Address:" field not exists
func() ([]byte, error) {
return []byte("10.10.10.10"), nil
},
// IP not exists
func() ([]byte, error) {
return []byte("IP Address:10.10.10.10"), nil
},
// IP exists
func() ([]byte, error) {
return []byte("IP Address:10.10.10.10\nIP Address:10.10.10.20"), nil
},
},
}
fakeExec := fakeexec.FakeExec{
CommandScript: []fakeexec.FakeCommandAction{
func(cmd string, args ...string) exec.Cmd {
return fakeexec.InitFakeCmd(&fakeCmd, cmd, args...)
},
func(cmd string, args ...string) exec.Cmd {
return fakeexec.InitFakeCmd(&fakeCmd, cmd, args...)
},
func(cmd string, args ...string) exec.Cmd {
return fakeexec.InitFakeCmd(&fakeCmd, cmd, args...)
},
func(cmd string, args ...string) exec.Cmd {
return fakeexec.InitFakeCmd(&fakeCmd, cmd, args...)
},
func(cmd string, args ...string) exec.Cmd {
return fakeexec.InitFakeCmd(&fakeCmd, cmd, args...)
},
},
}
fakeRunner := &runner{
exec: &fakeExec,
}
tests := []struct {
name string
ipToCheck string
arguments []string
expectedError bool
expectedResult bool
}{
{"Error exists", "10.10.10.20", []string{"check-IP-exists"}, true, false},
{"IP address string is empty", "10.10.10.20", []string{"check-IP-exists"}, false, false},
{"'IP Address:' field not exists", "10.10.10.20", []string{"check-IP-exists"}, false, false},
{"IP not exists", "10.10.10.20", []string{"check-IP-exists"}, false, false},
{"IP exists", "10.10.10.20", []string{"check-IP-exists"}, false, true},
}
for _, test := range tests {
result, err := checkIPExists(test.ipToCheck, test.arguments, fakeRunner)
if test.expectedError {
assert.Errorf(t, err, "Failed to test: %s", test.name)
} else {
assert.EqualValuesf(t, test.expectedResult, result, "Failed to test: %s", test.name)
}
}
}
func TestGetIP(t *testing.T) {
testcases := []struct {
showAddress string
expectAddress string
}{
{
showAddress: "IP 地址: 10.96.0.2",
expectAddress: "10.96.0.2",
},
{
showAddress: "IP Address: 10.96.0.3",
expectAddress: "10.96.0.3",
},
{
showAddress: "IP Address:10.96.0.4",
expectAddress: "10.96.0.4",
},
}
for _, tc := range testcases {
address := getIP(tc.showAddress)
if address != tc.expectAddress {
t.Errorf("expected address=%q, got %q", tc.expectAddress, address)
}
}
}
| [
"\"INTERFACE_TO_ADD_SERVICE_IP\""
]
| []
| [
"INTERFACE_TO_ADD_SERVICE_IP"
]
| [] | ["INTERFACE_TO_ADD_SERVICE_IP"] | go | 1 | 0 | |
database/ffldb/driver.go | // Copyright (c) 2017-2020 The Elastos Foundation
// Use of this source code is governed by an MIT
// license that can be found in the LICENSE file.
//
// Copyright (c) 2013-2016 The btcsuite developers
package ffldb
import (
"fmt"
"github.com/elastos/Elastos.ELA/database"
"github.com/elastos/Elastos.ELA/utils/elalog"
"github.com/btcsuite/btcd/wire"
)
var log = elalog.Disabled
const (
dbType = "ffldb"
)
// parseArgs parses the arguments from the database Open/Create methods.
func parseArgs(funcName string, args ...interface{}) (string, wire.BitcoinNet, error) {
if len(args) != 2 {
return "", 0, fmt.Errorf("invalid arguments to %s.%s -- "+
"expected database path and block network", dbType,
funcName)
}
dbPath, ok := args[0].(string)
if !ok {
return "", 0, fmt.Errorf("first argument to %s.%s is invalid -- "+
"expected database path string", dbType, funcName)
}
network, ok := args[1].(wire.BitcoinNet)
if !ok {
return "", 0, fmt.Errorf("second argument to %s.%s is invalid -- "+
"expected block network", dbType, funcName)
}
return dbPath, network, nil
}
// openDBDriver is the callback provided during driver registration that opens
// an existing database for use.
func openDBDriver(args ...interface{}) (database.DB, error) {
dbPath, network, err := parseArgs("Open", args...)
if err != nil {
return nil, err
}
return openDB(dbPath, network, false)
}
// createDBDriver is the callback provided during driver registration that
// creates, initializes, and opens a database for use.
func createDBDriver(args ...interface{}) (database.DB, error) {
dbPath, network, err := parseArgs("Create", args...)
if err != nil {
return nil, err
}
return openDB(dbPath, network, true)
}
// useLogger is the callback provided during driver registration that sets the
// current logger to the provided one.
func useLogger(logger elalog.Logger) {
log = logger
}
func init() {
// Register the driver.
driver := database.Driver{
DbType: dbType,
Create: createDBDriver,
Open: openDBDriver,
UseLogger: useLogger,
}
if err := database.RegisterDriver(driver); err != nil {
panic(fmt.Sprintf("Failed to regiser database driver '%s': %v",
dbType, err))
}
}
| []
| []
| []
| [] | [] | go | null | null | null |
server/sqlite_program.py | import sqlite3
conn = sqlite3.connect('smart_meter.db')
c=conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS consumption_data ( meter_id TEXT, current REAL, usage REAL, time_date TEXT''')
#to execute data
c.execute('''INSERT INTO consumption_data values ("sm01",4.68,2.37,"2021-01-30 13:41:35")''')
conn.commit()
conn.close() | []
| []
| []
| [] | [] | python | null | null | null |
alipay/aop/api/request/AlipayPcreditHuabeiAuthSettleApplyRequest.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayPcreditHuabeiAuthSettleApplyModel import AlipayPcreditHuabeiAuthSettleApplyModel
class AlipayPcreditHuabeiAuthSettleApplyRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayPcreditHuabeiAuthSettleApplyModel):
self._biz_content = value
else:
self._biz_content = AlipayPcreditHuabeiAuthSettleApplyModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.pcredit.huabei.auth.settle.apply'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| []
| []
| []
| [] | [] | python | null | null | null |
reverseproxy/reverseproxy_test.go | package reverseproxy
import (
"fmt"
"net/url"
"os"
"strconv"
"strings"
"testing"
"time"
"github.com/fatih/color"
"github.com/koding/kite"
"github.com/koding/kite/config"
"github.com/koding/kite/kontrol"
"github.com/koding/kite/protocol"
"github.com/koding/kite/testkeys"
"github.com/koding/kite/testutil"
)
func TestWebSocketProxy(t *testing.T) {
color.Blue("====> Starting WebSocket test")
conf := config.New()
conf.Username = "testuser"
conf.KontrolURL = "http://localhost:5555/kite"
conf.KontrolKey = testkeys.Public
conf.KontrolUser = "testuser"
conf.KiteKey = testutil.NewKiteKey().Raw
conf.ReadEnvironmentVariables()
// start kontrol
color.Green("Starting kontrol")
kontrol.DefaultPort = 5555
kon := kontrol.New(conf.Copy(), "0.1.0", testkeys.Public, testkeys.Private)
switch os.Getenv("KONTROL_STORAGE") {
case "etcd":
kon.SetStorage(kontrol.NewEtcd(nil, kon.Kite.Log))
case "postgres":
kon.SetStorage(kontrol.NewPostgres(nil, kon.Kite.Log))
default:
kon.SetStorage(kontrol.NewEtcd(nil, kon.Kite.Log))
}
go kon.Run()
<-kon.Kite.ServerReadyNotify()
// start proxy
color.Green("Starting Proxy and registering to Kontrol")
proxyConf := conf.Copy()
proxyConf.Port = 4999
proxy := New(proxyConf)
proxy.PublicHost = "localhost"
proxy.PublicPort = proxyConf.Port
proxy.Scheme = "http"
go proxy.Run()
<-proxy.ReadyNotify()
proxyRegisterURL := &url.URL{
Scheme: proxy.Scheme,
Host: proxy.PublicHost + ":" + strconv.Itoa(proxy.PublicPort),
Path: "/kite",
}
fmt.Printf("proxyRegisterURL %+v\n", proxyRegisterURL)
_, err := proxy.Kite.Register(proxyRegisterURL)
if err != nil {
t.Error(err)
}
// start now backend kite
color.Green("Starting BackendKite")
backendKite := kite.New("backendKite", "1.0.0")
backendKite.Config = conf.Copy()
backendKite.HandleFunc("foo", func(r *kite.Request) (interface{}, error) {
return "bar", nil
})
backendKite.Config.Port = 7777
kiteUrl := &url.URL{Scheme: "http", Host: "localhost:7777", Path: "/kite"}
go backendKite.Run()
<-backendKite.ServerReadyNotify()
// now search for a proxy from kontrol
color.Green("BackendKite is searching proxy from kontrol")
kites, err := backendKite.GetKites(&protocol.KontrolQuery{
Username: "testuser",
Environment: config.DefaultConfig.Environment,
Name: Name,
})
if err != nil {
t.Fatal(err)
}
proxyKite := kites[0]
err = proxyKite.Dial()
if err != nil {
t.Fatal(err)
}
// backendKite is connected to proxy, now let us register to proxy and get
// a proxy url. We send our url to proxy, it needs it in order to proxy us
color.Green("Backendkite found proxy, now registering to it")
result, err := proxyKite.TellWithTimeout("register", 4*time.Second, kiteUrl.String())
if err != nil {
t.Fatal(err)
}
proxyURL := result.MustString()
if !strings.Contains(proxyURL, "/proxy") {
t.Fatalf("Invalid proxy URL: %s", proxyURL)
}
registerURL, err := url.Parse(proxyURL)
if err != nil {
t.Fatal(err)
}
// register ourself to kontrol with this proxyUrl
color.Green("BackendKite is registering to Kontrol with the result from proxy")
go backendKite.RegisterForever(registerURL)
<-backendKite.KontrolReadyNotify()
// now another completely foreign kite and will search for our backend
// kite, connect to it and execute the "foo" method
color.Green("Foreign kite started")
foreignKite := kite.New("foreignKite", "1.0.0")
foreignKite.Config = conf.Copy()
color.Green("Querying backendKite now")
backendKites, err := foreignKite.GetKites(&protocol.KontrolQuery{
Username: "testuser",
Environment: config.DefaultConfig.Environment,
Name: "backendKite",
})
remoteBackendKite := backendKites[0]
color.Green("Dialing BackendKite")
err = remoteBackendKite.Dial()
if err != nil {
t.Fatal(err)
}
// foreignKite is connected to backendKite via proxy kite, fire our call...
color.Green("Calling BackendKite's foo method")
result, err = remoteBackendKite.TellWithTimeout("foo", 4*time.Second)
if err != nil {
t.Fatal(err)
}
s := result.MustString()
if s != "bar" {
t.Fatalf("Wrong reply: %s", s)
}
}
| [
"\"KONTROL_STORAGE\""
]
| []
| [
"KONTROL_STORAGE"
]
| [] | ["KONTROL_STORAGE"] | go | 1 | 0 | |
bcs-services/bcs-webconsole/route/middleware.go | /*
* Tencent is pleased to support the open source community by making Blueking Container Service available.
* Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
* http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under
* the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package route
import (
"fmt"
"net/http"
"os"
"strings"
"github.com/Tencent/bk-bcs/bcs-services/bcs-webconsole/console/components/bcs"
"github.com/Tencent/bk-bcs/bcs-services/bcs-webconsole/console/config"
"github.com/Tencent/bk-bcs/bcs-services/bcs-webconsole/console/sessions"
"github.com/Tencent/bk-bcs/bcs-services/bcs-webconsole/console/types"
bcsJwt "github.com/Tencent/bk-bcs/bcs-common/pkg/auth/jwt"
"github.com/dgrijalva/jwt-go"
"github.com/gin-gonic/gin"
"github.com/google/uuid"
"github.com/pkg/errors"
)
var (
UnauthorizedError = errors.New("用户未登入")
)
func RequestIdGenerator() string {
uid := uuid.New().String()
requestId := strings.Replace(uid, "-", "", -1)
return requestId
}
// AuthContext :
type AuthContext struct {
RequestId string `json:"request_id"`
Operator string `json:"operator"`
Username string `json:"username"`
ProjectId string `json:"project_id"`
ProjectCode string `json:"project_code"`
ClusterId string `json:"cluster_id"`
BindEnv *EnvToken `json:"bind_env"`
BindBCS *bcsJwt.UserClaimsInfo `json:"bind_bcs"`
BindAPIGW *APIGWToken `json:"bind_apigw"`
BindCluster *bcs.Cluster `json:"bind_cluster"`
BindProject *bcs.Project `json:"bind_project"`
BindSession *types.PodContext `json:"bind_session"`
}
// WebAuthRequired Web类型, 不需要鉴权
func WebAuthRequired() gin.HandlerFunc {
return func(c *gin.Context) {
authCtx := &AuthContext{
RequestId: RequestIdGenerator(),
}
c.Set("auth_context", authCtx)
c.Next()
}
}
// APIAuthRequired API类型, 兼容多种鉴权模式
func APIAuthRequired() gin.HandlerFunc {
return func(c *gin.Context) {
authCtx := &AuthContext{
RequestId: RequestIdGenerator(),
}
c.Set("auth_context", authCtx)
if c.Request.Method == http.MethodOptions {
c.Next()
return
}
// websocket 协议单独鉴权
if c.IsWebsocket() {
c.Next()
return
}
switch {
case initContextWithPortalSession(c, authCtx):
case initContextWithAPIGW(c, authCtx):
case initContextWithBCSJwt(c, authCtx):
case initContextWithDevEnv(c, authCtx):
default:
c.AbortWithStatusJSON(http.StatusUnauthorized, types.APIResponse{
Code: types.ApiErrorCode,
Message: UnauthorizedError.Error(),
RequestID: authCtx.RequestId,
})
return
}
c.Next()
}
}
type EnvToken struct {
Username string
}
// initContextWithDevEnv Dev环境, 可以设置环境变量
func initContextWithDevEnv(c *gin.Context, authCtx *AuthContext) bool {
if config.G.Base.RunEnv != config.DevEnv {
return false
}
// 本地用户认证
username := os.Getenv("WEBCONSOLE_USERNAME")
if username != "" {
authCtx.BindEnv = &EnvToken{Username: username}
authCtx.Username = username
return true
}
// AppCode 认证
appCode := c.GetHeader("X-BKAPI-JWT-APPCODE")
if appCode != "" {
authCtx.BindAPIGW = &APIGWToken{
App: &APIGWApp{AppCode: appCode, Verified: true},
}
return true
}
return false
}
func BCSJWTDecode(jwtToken string) (*bcsJwt.UserClaimsInfo, error) {
if config.G.BCS.JWTPubKeyObj == nil {
return nil, errors.New("jwt public key not set")
}
token, err := jwt.ParseWithClaims(jwtToken, &bcsJwt.UserClaimsInfo{}, func(token *jwt.Token) (interface{}, error) {
return config.G.BCS.JWTPubKeyObj, nil
})
if err != nil {
return nil, err
}
if !token.Valid {
return nil, errors.New("jwt token not valid")
}
claims, ok := token.Claims.(*bcsJwt.UserClaimsInfo)
if !ok {
return nil, errors.New("jwt token not bcs issuer")
}
return claims, nil
}
type APIGWApp struct {
AppCode string `json:"app_code"`
Verified bool `json:"verified"`
}
type APIGWUser struct {
Username string `json:"username"`
Verified bool `json:"verified"`
}
// APIGWToken 返回信息
type APIGWToken struct {
App *APIGWApp `json:"app"`
User *APIGWUser `json:"user"`
*jwt.StandardClaims
}
func (a *APIGWToken) String() string {
return fmt.Sprintf("<%s, %v>", a.App.AppCode, a.App.Verified)
}
func BKAPIGWJWTDecode(jwtToken string) (*APIGWToken, error) {
if config.G.BKAPIGW.JWTPubKeyObj == nil {
return nil, errors.New("jwt public key not set")
}
token, err := jwt.ParseWithClaims(jwtToken, &APIGWToken{}, func(token *jwt.Token) (interface{}, error) {
return config.G.BKAPIGW.JWTPubKeyObj, nil
})
if err != nil {
return nil, err
}
if !token.Valid {
return nil, errors.New("jwt token not valid")
}
claims, ok := token.Claims.(*APIGWToken)
if !ok {
return nil, errors.New("jwt token not BKAPIGW issuer")
}
return claims, nil
}
// initContextWithBCSJwt BCS APISix JWT 鉴权
func initContextWithBCSJwt(c *gin.Context, authCtx *AuthContext) bool {
tokenString := c.GetHeader("Authorization")
if len(tokenString) == 0 || !strings.HasPrefix(tokenString, "Bearer ") {
return false
}
tokenString = tokenString[7:]
claims, err := BCSJWTDecode(tokenString)
if err != nil {
return false
}
authCtx.BindBCS = claims
authCtx.Username = claims.UserName
return true
}
func initContextWithAPIGW(c *gin.Context, authCtx *AuthContext) bool {
// get jwt info from headers
tokenString := c.GetHeader("X-Bkapi-Jwt")
if tokenString == "" {
return false
}
token, err := BKAPIGWJWTDecode(tokenString)
if err != nil {
return false
}
authCtx.BindAPIGW = token
return true
}
func initContextWithPortalSession(c *gin.Context, authCtx *AuthContext) bool {
// get jwt info from headers
sessionId := GetSessionId(c)
if sessionId == "" {
return false
}
store := sessions.NewRedisStore("open-session", "open-session")
podCtx, err := store.Get(c.Request.Context(), sessionId)
if err != nil {
return false
}
authCtx.BindSession = podCtx
return true
}
// GetAuthContext 查询鉴权信息
func MustGetAuthContext(c *gin.Context) *AuthContext {
authCtxObj := c.MustGet("auth_context")
authCtx, ok := authCtxObj.(*AuthContext)
if !ok {
panic("not valid auth_context")
}
return authCtx
}
func GetProjectIdOrCode(c *gin.Context) string {
if c.Param("projectId") != "" {
return c.Param("projectId")
}
return ""
}
func GetClusterId(c *gin.Context) string {
if c.Param("clusterId") != "" {
return c.Param("clusterId")
}
return ""
}
func GetSessionId(c *gin.Context) string {
if c.Param("sessionId") != "" {
return c.Param("sessionId")
}
return ""
}
| [
"\"WEBCONSOLE_USERNAME\""
]
| []
| [
"WEBCONSOLE_USERNAME"
]
| [] | ["WEBCONSOLE_USERNAME"] | go | 1 | 0 | |
molecule/old_repo/tests/test_default.py | import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_hosts_file(host):
f = host.file('/etc/hosts')
assert f.exists
assert f.user == 'root'
assert f.group == 'root'
def test_crystal_output(host):
crystal = host.ansible("shell",
"crystal --version | awk '{print $2}'",
check=False)["stdout"]
assert crystal >= "0.26.1"
| []
| []
| [
"MOLECULE_INVENTORY_FILE"
]
| [] | ["MOLECULE_INVENTORY_FILE"] | python | 1 | 0 | |
pandas/io/tests/test_pytables.py | import nose
import sys
import os
import warnings
import tempfile
from contextlib import contextmanager
import datetime
import numpy as np
import pandas
from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range,
date_range, Index, DatetimeIndex, isnull)
from pandas.io.pytables import (HDFStore, get_store, Term, read_hdf,
IncompatibilityWarning, PerformanceWarning,
AttributeConflictWarning, DuplicateWarning,
PossibleDataLossError, ClosedFileError)
from pandas.io import pytables as pytables
import pandas.util.testing as tm
from pandas.util.testing import (assert_panel4d_equal,
assert_panel_equal,
assert_frame_equal,
assert_series_equal)
from pandas import concat, Timestamp
from pandas import compat, _np_version_under1p7
from pandas.compat import range, lrange, u
from pandas.util.testing import assert_produces_warning
try:
import tables
except ImportError:
raise nose.SkipTest('no pytables')
from distutils.version import LooseVersion
_default_compressor = LooseVersion(tables.__version__) >= '2.2' \
and 'blosc' or 'zlib'
_multiprocess_can_split_ = False
# contextmanager to ensure the file cleanup
def safe_remove(path):
if path is not None:
try:
os.remove(path)
except:
pass
def safe_close(store):
try:
if store is not None:
store.close()
except:
pass
def create_tempfile(path):
""" create an unopened named temporary file """
return os.path.join(tempfile.gettempdir(),path)
@contextmanager
def ensure_clean_store(path, mode='a', complevel=None, complib=None,
fletcher32=False):
try:
# put in the temporary path if we don't have one already
if not len(os.path.dirname(path)):
path = create_tempfile(path)
store = HDFStore(path, mode=mode, complevel=complevel,
complib=complib, fletcher32=False)
yield store
finally:
safe_close(store)
if mode == 'w' or mode == 'a':
safe_remove(path)
@contextmanager
def ensure_clean_path(path):
"""
return essentially a named temporary file that is not opened
and deleted on existing; if path is a list, then create and
return list of filenames
"""
try:
if isinstance(path, list):
filenames = [ create_tempfile(p) for p in path ]
yield filenames
else:
filenames = [ create_tempfile(path) ]
yield filenames[0]
finally:
for f in filenames:
safe_remove(f)
# set these parameters so we don't have file sharing
tables.parameters.MAX_NUMEXPR_THREADS = 1
tables.parameters.MAX_BLOSC_THREADS = 1
tables.parameters.MAX_THREADS = 1
def _maybe_remove(store, key):
"""For tests using tables, try removing the table to be sure there is
no content from previous tests using the same table name."""
try:
store.remove(key)
except:
pass
def compat_assert_produces_warning(w,f):
""" don't produce a warning under PY3 """
if compat.PY3:
f()
else:
with tm.assert_produces_warning(expected_warning=w):
f()
class TestHDFStore(tm.TestCase):
def setUp(self):
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.path = 'tmp.__%s__.h5' % tm.rands(10)
def tearDown(self):
pass
def test_factory_fun(self):
try:
with get_store(self.path) as tbl:
raise ValueError('blah')
except ValueError:
pass
finally:
safe_remove(self.path)
try:
with get_store(self.path) as tbl:
tbl['a'] = tm.makeDataFrame()
with get_store(self.path) as tbl:
self.assertEquals(len(tbl), 1)
self.assertEquals(type(tbl['a']), DataFrame)
finally:
safe_remove(self.path)
def test_conv_read_write(self):
try:
def roundtrip(key, obj,**kwargs):
obj.to_hdf(self.path, key,**kwargs)
return read_hdf(self.path, key)
o = tm.makeTimeSeries()
assert_series_equal(o, roundtrip('series',o))
o = tm.makeStringSeries()
assert_series_equal(o, roundtrip('string_series',o))
o = tm.makeDataFrame()
assert_frame_equal(o, roundtrip('frame',o))
o = tm.makePanel()
assert_panel_equal(o, roundtrip('panel',o))
# table
df = DataFrame(dict(A=lrange(5), B=lrange(5)))
df.to_hdf(self.path,'table',append=True)
result = read_hdf(self.path, 'table', where = ['index>2'])
assert_frame_equal(df[df.index>2],result)
finally:
safe_remove(self.path)
def test_api(self):
# GH4584
# API issue when to_hdf doesn't acdept append AND format args
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path,'df',append=True,format='table')
df.iloc[10:].to_hdf(path,'df',append=True,format='table')
assert_frame_equal(read_hdf(path,'df'),df)
# append to False
df.iloc[:10].to_hdf(path,'df',append=False,format='table')
df.iloc[10:].to_hdf(path,'df',append=True,format='table')
assert_frame_equal(read_hdf(path,'df'),df)
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path,'df',append=True)
df.iloc[10:].to_hdf(path,'df',append=True,format='table')
assert_frame_equal(read_hdf(path,'df'),df)
# append to False
df.iloc[:10].to_hdf(path,'df',append=False,format='table')
df.iloc[10:].to_hdf(path,'df',append=True)
assert_frame_equal(read_hdf(path,'df'),df)
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.to_hdf(path,'df',append=False,format='fixed')
assert_frame_equal(read_hdf(path,'df'),df)
df.to_hdf(path,'df',append=False,format='f')
assert_frame_equal(read_hdf(path,'df'),df)
df.to_hdf(path,'df',append=False)
assert_frame_equal(read_hdf(path,'df'),df)
df.to_hdf(path,'df')
assert_frame_equal(read_hdf(path,'df'),df)
with ensure_clean_store(self.path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store,'df')
store.append('df',df.iloc[:10],append=True,format='table')
store.append('df',df.iloc[10:],append=True,format='table')
assert_frame_equal(store.select('df'),df)
# append to False
_maybe_remove(store,'df')
store.append('df',df.iloc[:10],append=False,format='table')
store.append('df',df.iloc[10:],append=True,format='table')
assert_frame_equal(store.select('df'),df)
# formats
_maybe_remove(store,'df')
store.append('df',df.iloc[:10],append=False,format='table')
store.append('df',df.iloc[10:],append=True,format='table')
assert_frame_equal(store.select('df'),df)
_maybe_remove(store,'df')
store.append('df',df.iloc[:10],append=False,format='table')
store.append('df',df.iloc[10:],append=True,format=None)
assert_frame_equal(store.select('df'),df)
with ensure_clean_path(self.path) as path:
# invalid
df = tm.makeDataFrame()
self.assertRaises(ValueError, df.to_hdf, path,'df',append=True,format='f')
self.assertRaises(ValueError, df.to_hdf, path,'df',append=True,format='fixed')
self.assertRaises(TypeError, df.to_hdf, path,'df',append=True,format='foo')
self.assertRaises(TypeError, df.to_hdf, path,'df',append=False,format='bar')
def test_api_default_format(self):
# default_format option
with ensure_clean_store(self.path) as store:
df = tm.makeDataFrame()
pandas.set_option('io.hdf.default_format','fixed')
_maybe_remove(store,'df')
store.put('df',df)
self.assert_(not store.get_storer('df').is_table)
self.assertRaises(ValueError, store.append, 'df2',df)
pandas.set_option('io.hdf.default_format','table')
_maybe_remove(store,'df')
store.put('df',df)
self.assert_(store.get_storer('df').is_table)
_maybe_remove(store,'df2')
store.append('df2',df)
self.assert_(store.get_storer('df').is_table)
pandas.set_option('io.hdf.default_format',None)
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
pandas.set_option('io.hdf.default_format','fixed')
df.to_hdf(path,'df')
with get_store(path) as store:
self.assert_(not store.get_storer('df').is_table)
self.assertRaises(ValueError, df.to_hdf, path,'df2', append=True)
pandas.set_option('io.hdf.default_format','table')
df.to_hdf(path,'df3')
with get_store(path) as store:
self.assert_(store.get_storer('df3').is_table)
df.to_hdf(path,'df4',append=True)
with get_store(path) as store:
self.assert_(store.get_storer('df4').is_table)
pandas.set_option('io.hdf.default_format',None)
def test_keys(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeStringSeries()
store['c'] = tm.makeDataFrame()
store['d'] = tm.makePanel()
store['foo/bar'] = tm.makePanel()
self.assertEquals(len(store), 5)
self.assert_(set(
store.keys()) == set(['/a', '/b', '/c', '/d', '/foo/bar']))
def test_repr(self):
with ensure_clean_store(self.path) as store:
repr(store)
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeStringSeries()
store['c'] = tm.makeDataFrame()
store['d'] = tm.makePanel()
store['foo/bar'] = tm.makePanel()
store.append('e', tm.makePanel())
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
df['timestamp1'] = Timestamp('20010102')
df['timestamp2'] = Timestamp('20010103')
df['datetime1'] = datetime.datetime(2001,1,2,0,0)
df['datetime2'] = datetime.datetime(2001,1,3,0,0)
df.ix[3:6,['obj1']] = np.nan
df = df.consolidate().convert_objects()
warnings.filterwarnings('ignore', category=PerformanceWarning)
store['df'] = df
warnings.filterwarnings('always', category=PerformanceWarning)
# make a random group in hdf space
store._handle.createGroup(store._handle.root,'bah')
repr(store)
str(store)
# storers
with ensure_clean_store(self.path) as store:
df = tm.makeDataFrame()
store.append('df',df)
s = store.get_storer('df')
repr(s)
str(s)
def test_contains(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeDataFrame()
store['foo/bar'] = tm.makeDataFrame()
self.assert_('a' in store)
self.assert_('b' in store)
self.assert_('c' not in store)
self.assert_('foo/bar' in store)
self.assert_('/foo/bar' in store)
self.assert_('/foo/b' not in store)
self.assert_('bar' not in store)
# GH 2694
warnings.filterwarnings('ignore', category=tables.NaturalNameWarning)
store['node())'] = tm.makeDataFrame()
self.assert_('node())' in store)
def test_versioning(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df1')
store.append('df1', df[:10])
store.append('df1', df[10:])
self.assert_(store.root.a._v_attrs.pandas_version == '0.10.1')
self.assert_(store.root.b._v_attrs.pandas_version == '0.10.1')
self.assert_(store.root.df1._v_attrs.pandas_version == '0.10.1')
# write a file and wipe its versioning
_maybe_remove(store, 'df2')
store.append('df2', df)
# this is an error because its table_type is appendable, but no version
# info
store.get_node('df2')._v_attrs.pandas_version = None
self.assertRaises(Exception, store.select, 'df2')
def test_mode(self):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(self.path) as path:
# constructor
if mode in ['r','r+']:
self.assertRaises(IOError, HDFStore, path, mode=mode)
else:
store = HDFStore(path,mode=mode)
self.assert_(store._handle.mode == mode)
store.close()
with ensure_clean_path(self.path) as path:
# context
if mode in ['r','r+']:
def f():
with get_store(path,mode=mode) as store:
pass
self.assertRaises(IOError, f)
else:
with get_store(path,mode=mode) as store:
self.assert_(store._handle.mode == mode)
with ensure_clean_path(self.path) as path:
# conv write
if mode in ['r','r+']:
self.assertRaises(IOError, df.to_hdf, path, 'df', mode=mode)
df.to_hdf(path,'df',mode='w')
else:
df.to_hdf(path,'df',mode=mode)
# conv read
if mode in ['w']:
self.assertRaises(KeyError, read_hdf, path, 'df', mode=mode)
else:
result = read_hdf(path,'df',mode=mode)
assert_frame_equal(result,df)
check('r')
check('r+')
check('a')
check('w')
def test_reopen_handle(self):
with ensure_clean_path(self.path) as path:
store = HDFStore(path,mode='a')
store['a'] = tm.makeTimeSeries()
# invalid mode change
self.assertRaises(PossibleDataLossError, store.open, 'w')
store.close()
self.assert_(not store.is_open)
# truncation ok here
store.open('w')
self.assert_(store.is_open)
self.assertEquals(len(store), 0)
store.close()
self.assert_(not store.is_open)
store = HDFStore(path,mode='a')
store['a'] = tm.makeTimeSeries()
# reopen as read
store.open('r')
self.assert_(store.is_open)
self.assertEquals(len(store), 1)
self.assert_(store._mode == 'r')
store.close()
self.assert_(not store.is_open)
# reopen as append
store.open('a')
self.assert_(store.is_open)
self.assertEquals(len(store), 1)
self.assert_(store._mode == 'a')
store.close()
self.assert_(not store.is_open)
# reopen as append (again)
store.open('a')
self.assert_(store.is_open)
self.assertEquals(len(store), 1)
self.assert_(store._mode == 'a')
store.close()
self.assert_(not store.is_open)
def test_open_args(self):
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(path,mode='a',driver='H5FD_CORE',driver_core_backing_store=0)
store['df'] = df
store.append('df2',df)
tm.assert_frame_equal(store['df'],df)
tm.assert_frame_equal(store['df2'],df)
store.close()
# only supported on pytable >= 3.0.0
if LooseVersion(tables.__version__) >= '3.0.0':
# the file should not have actually been written
self.assert_(os.path.exists(path) is False)
def test_flush(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
left = store.get('a')
right = store['a']
tm.assert_series_equal(left, right)
left = store.get('/a')
right = store['/a']
tm.assert_series_equal(left, right)
self.assertRaises(KeyError, store.get, 'b')
def test_getattr(self):
with ensure_clean_store(self.path) as store:
s = tm.makeTimeSeries()
store['a'] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store,'a')
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store['df'] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
self.assertRaises(AttributeError, getattr, store, 'd')
for x in ['mode','path','handle','complib']:
self.assertRaises(AttributeError, getattr, store, x)
# not stores
for x in ['mode','path','handle','complib']:
getattr(store,"_%s" % x)
def test_put(self):
with ensure_clean_store(self.path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store['a'] = ts
store['b'] = df[:10]
store['foo/bar/bah'] = df[:10]
store['foo'] = df[:10]
store['/foo'] = df[:10]
store.put('c', df[:10], format='table')
# not OK, not a table
self.assertRaises(
ValueError, store.put, 'b', df[10:], append=True)
# node does not currently exist, test _is_table_type returns False in
# this case
# _maybe_remove(store, 'f')
# self.assertRaises(ValueError, store.put, 'f', df[10:], append=True)
# can't put to a table (use append instead)
self.assertRaises(ValueError, store.put, 'c', df[10:], append=True)
# overwrite table
store.put('c', df[:10], format='table', append=False)
tm.assert_frame_equal(df[:10], store['c'])
def test_put_string_index(self):
with ensure_clean_store(self.path) as store:
index = Index(
["I am a very long string index: %s" % i for i in range(20)])
s = Series(np.arange(20), index=index)
df = DataFrame({'A': s, 'B': s})
store['a'] = s
tm.assert_series_equal(store['a'], s)
store['b'] = df
tm.assert_frame_equal(store['b'], df)
# mixed length
index = Index(['abcdefghijklmnopqrstuvwxyz1234567890'] + ["I am a very long string index: %s" % i for i in range(20)])
s = Series(np.arange(21), index=index)
df = DataFrame({'A': s, 'B': s})
store['a'] = s
tm.assert_series_equal(store['a'], s)
store['b'] = df
tm.assert_frame_equal(store['b'], df)
def test_put_compression(self):
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
store.put('c', df, format='table', complib='zlib')
tm.assert_frame_equal(store['c'], df)
# can't compress if format='fixed'
self.assertRaises(ValueError, store.put, 'b', df,
format='fixed', complib='zlib')
def test_put_compression_blosc(self):
tm.skip_if_no_package('tables', '2.2', app='blosc support')
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
# can't compress if format='fixed'
self.assertRaises(ValueError, store.put, 'b', df,
format='fixed', complib='blosc')
store.put('c', df, format='table', complib='blosc')
tm.assert_frame_equal(store['c'], df)
def test_put_integer(self):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal)
def test_put_mixed_type(self):
df = tm.makeTimeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
df['timestamp1'] = Timestamp('20010102')
df['timestamp2'] = Timestamp('20010103')
df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0)
df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0)
df.ix[3:6, ['obj1']] = np.nan
df = df.consolidate().convert_objects()
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
# cannot use assert_produces_warning here for some reason
# a PendingDeprecationWarning is also raised?
warnings.filterwarnings('ignore', category=PerformanceWarning)
store.put('df',df)
warnings.filterwarnings('always', category=PerformanceWarning)
expected = store.get('df')
tm.assert_frame_equal(expected,df)
def test_append(self):
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df1')
store.append('df1', df[:10])
store.append('df1', df[10:])
tm.assert_frame_equal(store['df1'], df)
_maybe_remove(store, 'df2')
store.put('df2', df[:10], format='table')
store.append('df2', df[10:])
tm.assert_frame_equal(store['df2'], df)
_maybe_remove(store, 'df3')
store.append('/df3', df[:10])
store.append('/df3', df[10:])
tm.assert_frame_equal(store['df3'], df)
# this is allowed by almost always don't want to do it
with tm.assert_produces_warning(expected_warning=tables.NaturalNameWarning):
_maybe_remove(store, '/df3 foo')
store.append('/df3 foo', df[:10])
store.append('/df3 foo', df[10:])
tm.assert_frame_equal(store['df3 foo'], df)
# panel
wp = tm.makePanel()
_maybe_remove(store, 'wp1')
store.append('wp1', wp.ix[:, :10, :])
store.append('wp1', wp.ix[:, 10:, :])
assert_panel_equal(store['wp1'], wp)
# ndim
p4d = tm.makePanel4D()
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :])
store.append('p4d', p4d.ix[:, :, 10:, :])
assert_panel4d_equal(store['p4d'], p4d)
# test using axis labels
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :], axes=[
'items', 'major_axis', 'minor_axis'])
store.append('p4d', p4d.ix[:, :, 10:, :], axes=[
'items', 'major_axis', 'minor_axis'])
assert_panel4d_equal(store['p4d'], p4d)
# test using differnt number of items on each axis
p4d2 = p4d.copy()
p4d2['l4'] = p4d['l1']
p4d2['l5'] = p4d['l1']
_maybe_remove(store, 'p4d2')
store.append(
'p4d2', p4d2, axes=['items', 'major_axis', 'minor_axis'])
assert_panel4d_equal(store['p4d2'], p4d2)
# test using differt order of items on the non-index axes
_maybe_remove(store, 'wp1')
wp_append1 = wp.ix[:, :10, :]
store.append('wp1', wp_append1)
wp_append2 = wp.ix[:, 10:, :].reindex(items=wp.items[::-1])
store.append('wp1', wp_append2)
assert_panel_equal(store['wp1'], wp)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df['mixed_column'] = 'testing'
df.ix[2, 'mixed_column'] = np.nan
_maybe_remove(store, 'df')
store.append('df', df)
tm.assert_frame_equal(store['df'], df)
# uints - test storage of uints
uint_data = DataFrame({'u08' : Series(np.random.random_integers(0, high=255, size=5), dtype=np.uint8),
'u16' : Series(np.random.random_integers(0, high=65535, size=5), dtype=np.uint16),
'u32' : Series(np.random.random_integers(0, high=2**30, size=5), dtype=np.uint32),
'u64' : Series([2**58, 2**59, 2**60, 2**61, 2**62], dtype=np.uint64)},
index=np.arange(5))
_maybe_remove(store, 'uints')
store.append('uints', uint_data)
tm.assert_frame_equal(store['uints'], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, 'uints')
store.append('uints', uint_data, data_columns=['u08','u16','u32']) # 64-bit indices not yet supported
tm.assert_frame_equal(store['uints'], uint_data)
def test_append_series(self):
with ensure_clean_store(self.path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append('ss', ss)
result = store['ss']
tm.assert_series_equal(result, ss)
self.assert_(result.name is None)
store.append('ts', ts)
result = store['ts']
tm.assert_series_equal(result, ts)
self.assert_(result.name is None)
ns.name = 'foo'
store.append('ns', ns)
result = store['ns']
tm.assert_series_equal(result, ns)
self.assert_(result.name == ns.name)
# select on the values
expected = ns[ns>60]
result = store.select('ns',Term('foo>60'))
tm.assert_series_equal(result,expected)
# select on the index and values
expected = ns[(ns>70) & (ns.index<90)]
result = store.select('ns',[Term('foo>70'), Term('index<90')])
tm.assert_series_equal(result,expected)
# multi-index
mi = DataFrame(np.random.randn(5,1),columns=['A'])
mi['B'] = np.arange(len(mi))
mi['C'] = 'foo'
mi.loc[3:5,'C'] = 'bar'
mi.set_index(['C','B'],inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append('mi', s)
tm.assert_series_equal(store['mi'], s)
def test_store_index_types(self):
# GH5386
# test storing various index types
with ensure_clean_store(self.path) as store:
def check(format,index):
df = DataFrame(np.random.randn(10,2),columns=list('AB'))
df.index = index(len(df))
_maybe_remove(store, 'df')
store.put('df',df,format=format)
assert_frame_equal(df,store['df'])
for index in [ tm.makeFloatIndex, tm.makeStringIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex ]:
check('table',index)
check('fixed',index)
# unicode
index = tm.makeUnicodeIndex
if compat.PY3:
check('table',index)
check('fixed',index)
else:
# only support for fixed types (and they have a perf warning)
self.assertRaises(TypeError, check, 'table', index)
with tm.assert_produces_warning(expected_warning=PerformanceWarning):
check('fixed',index)
def test_encoding(self):
if LooseVersion(tables.__version__) < '3.0.0':
raise nose.SkipTest('tables version does not support proper encoding')
if sys.byteorder != 'little':
raise nose.SkipTest('system byteorder is not little')
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(A='foo',B='bar'),index=range(5))
df.loc[2,'A'] = np.nan
df.loc[3,'B'] = np.nan
_maybe_remove(store, 'df')
store.append('df', df, encoding='ascii')
tm.assert_frame_equal(store['df'], df)
expected = df.reindex(columns=['A'])
result = store.select('df',Term('columns=A',encoding='ascii'))
tm.assert_frame_equal(result,expected)
def test_append_some_nans(self):
with ensure_clean_store(self.path) as store:
df = DataFrame({'A' : Series(np.random.randn(20)).astype('int32'),
'A1' : np.random.randn(20),
'A2' : np.random.randn(20),
'B' : 'foo', 'C' : 'bar', 'D' : Timestamp("20010101"), 'E' : datetime.datetime(2001,1,2,0,0) },
index=np.arange(20))
# some nans
_maybe_remove(store, 'df1')
df.ix[0:15,['A1','B','D','E']] = np.nan
store.append('df1', df[:10])
store.append('df1', df[10:])
tm.assert_frame_equal(store['df1'], df)
# first column
df1 = df.copy()
df1.ix[:,'A1'] = np.nan
_maybe_remove(store, 'df1')
store.append('df1', df1[:10])
store.append('df1', df1[10:])
tm.assert_frame_equal(store['df1'], df1)
# 2nd column
df2 = df.copy()
df2.ix[:,'A2'] = np.nan
_maybe_remove(store, 'df2')
store.append('df2', df2[:10])
store.append('df2', df2[10:])
tm.assert_frame_equal(store['df2'], df2)
# datetimes
df3 = df.copy()
df3.ix[:,'E'] = np.nan
_maybe_remove(store, 'df3')
store.append('df3', df3[:10])
store.append('df3', df3[10:])
tm.assert_frame_equal(store['df3'], df3)
def test_append_all_nans(self):
with ensure_clean_store(self.path) as store:
df = DataFrame({'A1' : np.random.randn(20),
'A2' : np.random.randn(20)},
index=np.arange(20))
df.ix[0:15,:] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, 'df')
store.append('df', df[:10], dropna=True)
store.append('df', df[10:], dropna=True)
tm.assert_frame_equal(store['df'], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, 'df2')
store.append('df2', df[:10], dropna=False)
store.append('df2', df[10:], dropna=False)
tm.assert_frame_equal(store['df2'], df)
# tests the option io.hdf.dropna_table
pandas.set_option('io.hdf.dropna_table',False)
_maybe_remove(store, 'df3')
store.append('df3', df[:10])
store.append('df3', df[10:])
tm.assert_frame_equal(store['df3'], df)
pandas.set_option('io.hdf.dropna_table',True)
_maybe_remove(store, 'df4')
store.append('df4', df[:10])
store.append('df4', df[10:])
tm.assert_frame_equal(store['df4'], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame({'A1' : np.random.randn(20),
'A2' : np.random.randn(20),
'B' : 'foo', 'C' : 'bar'},
index=np.arange(20))
df.ix[0:15,:] = np.nan
_maybe_remove(store, 'df')
store.append('df', df[:10], dropna=True)
store.append('df', df[10:], dropna=True)
tm.assert_frame_equal(store['df'], df)
_maybe_remove(store, 'df2')
store.append('df2', df[:10], dropna=False)
store.append('df2', df[10:], dropna=False)
tm.assert_frame_equal(store['df2'], df)
# nan some entire rows (but since we have dates they are still written!)
df = DataFrame({'A1' : np.random.randn(20),
'A2' : np.random.randn(20),
'B' : 'foo', 'C' : 'bar', 'D' : Timestamp("20010101"), 'E' : datetime.datetime(2001,1,2,0,0) },
index=np.arange(20))
df.ix[0:15,:] = np.nan
_maybe_remove(store, 'df')
store.append('df', df[:10], dropna=True)
store.append('df', df[10:], dropna=True)
tm.assert_frame_equal(store['df'], df)
_maybe_remove(store, 'df2')
store.append('df2', df[:10], dropna=False)
store.append('df2', df[10:], dropna=False)
tm.assert_frame_equal(store['df2'], df)
def test_append_frame_column_oriented(self):
with ensure_clean_store(self.path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df1')
store.append('df1', df.ix[:, :2], axes=['columns'])
store.append('df1', df.ix[:, 2:])
tm.assert_frame_equal(store['df1'], df)
result = store.select('df1', 'columns=A')
expected = df.reindex(columns=['A'])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select(
'df1', ('columns=A', Term('index=df.index[0:4]')))
expected = df.reindex(columns=['A'], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
self.assertRaises(TypeError, store.select, 'df1', (
'columns=A', Term('index>df.index[4]')))
def test_append_with_different_block_ordering(self):
#GH 4096; using same frames, but different block orderings
with ensure_clean_store(self.path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10,2),columns=list('AB'))
df['index'] = range(10)
df['index'] += i*10
df['int64'] = Series([1]*len(df),dtype='int64')
df['int16'] = Series([1]*len(df),dtype='int16')
if i % 2 == 0:
del df['int64']
df['int64'] = Series([1]*len(df),dtype='int64')
if i % 3 == 0:
a = df.pop('A')
df['A'] = a
df.set_index('index',inplace=True)
store.append('df',df)
# test a different ordering but with more fields (like invalid combinate)
with ensure_clean_store(self.path) as store:
df = DataFrame(np.random.randn(10,2),columns=list('AB'), dtype='float64')
df['int64'] = Series([1]*len(df),dtype='int64')
df['int16'] = Series([1]*len(df),dtype='int16')
store.append('df',df)
# store additonal fields in different blocks
df['int16_2'] = Series([1]*len(df),dtype='int16')
self.assertRaises(ValueError, store.append, 'df', df)
# store multile additonal fields in different blocks
df['float_3'] = Series([1.]*len(df),dtype='float64')
self.assertRaises(ValueError, store.append, 'df', df)
def test_ndim_indexables(self):
""" test using ndim tables in new ways"""
with ensure_clean_store(self.path) as store:
p4d = tm.makePanel4D()
def check_indexers(key, indexers):
for i, idx in enumerate(indexers):
self.assert_(getattr(getattr(
store.root, key).table.description, idx)._v_pos == i)
# append then change (will take existing schema)
indexers = ['items', 'major_axis', 'minor_axis']
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers)
store.append('p4d', p4d.ix[:, :, 10:, :])
assert_panel4d_equal(store.select('p4d'), p4d)
check_indexers('p4d', indexers)
# same as above, but try to append with differnt axes
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers)
store.append('p4d', p4d.ix[:, :, 10:, :], axes=[
'labels', 'items', 'major_axis'])
assert_panel4d_equal(store.select('p4d'), p4d)
check_indexers('p4d', indexers)
# pass incorrect number of axes
_maybe_remove(store, 'p4d')
self.assertRaises(ValueError, store.append, 'p4d', p4d.ix[
:, :, :10, :], axes=['major_axis', 'minor_axis'])
# different than default indexables #1
indexers = ['labels', 'major_axis', 'minor_axis']
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers)
store.append('p4d', p4d.ix[:, :, 10:, :])
assert_panel4d_equal(store['p4d'], p4d)
check_indexers('p4d', indexers)
# different than default indexables #2
indexers = ['major_axis', 'labels', 'minor_axis']
_maybe_remove(store, 'p4d')
store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers)
store.append('p4d', p4d.ix[:, :, 10:, :])
assert_panel4d_equal(store['p4d'], p4d)
check_indexers('p4d', indexers)
# partial selection
result = store.select('p4d', ['labels=l1'])
expected = p4d.reindex(labels=['l1'])
assert_panel4d_equal(result, expected)
# partial selection2
result = store.select('p4d', [Term(
'labels=l1'), Term('items=ItemA'), Term('minor_axis=B')])
expected = p4d.reindex(
labels=['l1'], items=['ItemA'], minor_axis=['B'])
assert_panel4d_equal(result, expected)
# non-existant partial selection
result = store.select('p4d', [Term(
'labels=l1'), Term('items=Item1'), Term('minor_axis=B')])
expected = p4d.reindex(labels=['l1'], items=[], minor_axis=['B'])
assert_panel4d_equal(result, expected)
def test_append_with_strings(self):
with ensure_clean_store(self.path) as store:
wp = tm.makePanel()
wp2 = wp.rename_axis(
dict([(x, "%s_extra" % x) for x in wp.minor_axis]), axis=2)
def check_col(key,name,size):
self.assert_(getattr(store.get_storer(key).table.description,name).itemsize == size)
store.append('s1', wp, min_itemsize=20)
store.append('s1', wp2)
expected = concat([wp, wp2], axis=2)
expected = expected.reindex(minor_axis=sorted(expected.minor_axis))
assert_panel_equal(store['s1'], expected)
check_col('s1', 'minor_axis', 20)
# test dict format
store.append('s2', wp, min_itemsize={'minor_axis': 20})
store.append('s2', wp2)
expected = concat([wp, wp2], axis=2)
expected = expected.reindex(minor_axis=sorted(expected.minor_axis))
assert_panel_equal(store['s2'], expected)
check_col('s2', 'minor_axis', 20)
# apply the wrong field (similar to #1)
store.append('s3', wp, min_itemsize={'major_axis': 20})
self.assertRaises(ValueError, store.append, 's3', wp2)
# test truncation of bigger strings
store.append('s4', wp)
self.assertRaises(ValueError, store.append, 's4', wp2)
# avoid truncation on elements
df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']])
store.append('df_big', df)
tm.assert_frame_equal(store.select('df_big'), df)
check_col('df_big', 'values_block_1', 15)
# appending smaller string ok
df2 = DataFrame([[124, 'asdqy'], [346, 'dggnhefbdfb']])
store.append('df_big', df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select('df_big'), expected)
check_col('df_big', 'values_block_1', 15)
# avoid truncation on elements
df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']])
store.append('df_big2', df, min_itemsize={'values': 50})
tm.assert_frame_equal(store.select('df_big2'), df)
check_col('df_big2', 'values_block_1', 50)
# bigger string on next append
store.append('df_new', df)
df_new = DataFrame(
[[124, 'abcdefqhij'], [346, 'abcdefghijklmnopqrtsuvwxyz']])
self.assertRaises(ValueError, store.append, 'df_new', df_new)
# with nans
_maybe_remove(store, 'df')
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df.ix[1:4, 'string'] = np.nan
df['string2'] = 'bar'
df.ix[4:8, 'string2'] = np.nan
df['string3'] = 'bah'
df.ix[1:, 'string3'] = np.nan
store.append('df', df)
result = store.select('df')
tm.assert_frame_equal(result, df)
with ensure_clean_store(self.path) as store:
def check_col(key,name,size):
self.assert_(getattr(store.get_storer(key).table.description,name).itemsize == size)
df = DataFrame(dict(A = 'foo', B = 'bar'),index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, 'df')
store.append('df', df, min_itemsize={'A' : 200 })
check_col('df', 'A', 200)
self.assert_(store.get_storer('df').data_columns == ['A'])
# a min_itemsize that creates a data_column2
_maybe_remove(store, 'df')
store.append('df', df, data_columns = ['B'], min_itemsize={'A' : 200 })
check_col('df', 'A', 200)
self.assert_(store.get_storer('df').data_columns == ['B','A'])
# a min_itemsize that creates a data_column2
_maybe_remove(store, 'df')
store.append('df', df, data_columns = ['B'], min_itemsize={'values' : 200 })
check_col('df', 'B', 200)
check_col('df', 'values_block_0', 200)
self.assert_(store.get_storer('df').data_columns == ['B'])
# infer the .typ on subsequent appends
_maybe_remove(store, 'df')
store.append('df', df[:5], min_itemsize=200)
store.append('df', df[5:], min_itemsize=200)
tm.assert_frame_equal(store['df'], df)
# invalid min_itemsize keys
df = DataFrame(['foo','foo','foo','barh','barh','barh'],columns=['A'])
_maybe_remove(store, 'df')
self.assertRaises(ValueError, store.append, 'df', df, min_itemsize={'foo' : 20, 'foobar' : 20})
def test_append_with_data_columns(self):
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
df.loc[:,'B'].iloc[0] = 1.
_maybe_remove(store, 'df')
store.append('df', df[:2], data_columns=['B'])
store.append('df', df[2:])
tm.assert_frame_equal(store['df'], df)
# check that we have indicies created
assert(store._handle.root.df.table.cols.index.is_indexed is True)
assert(store._handle.root.df.table.cols.B.is_indexed is True)
# data column searching
result = store.select('df', [Term('B>0')])
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select(
'df', [Term('B>0'), Term('index>df.index[3]')])
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new['string'] = 'foo'
df_new['string'][1:4] = np.nan
df_new['string'][5:6] = 'bar'
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string'])
result = store.select('df', [Term('string=foo')])
expected = df_new[df_new.string == 'foo']
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key,name,size):
self.assert_(getattr(store.get_storer(key).table.description,name).itemsize == size)
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string'],
min_itemsize={'string': 30})
check_col('df', 'string', 30)
_maybe_remove(store, 'df')
store.append(
'df', df_new, data_columns=['string'], min_itemsize=30)
check_col('df', 'string', 30)
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string'],
min_itemsize={'values': 30})
check_col('df', 'string', 30)
with ensure_clean_store(self.path) as store:
df_new['string2'] = 'foobarbah'
df_new['string_block1'] = 'foobarbah1'
df_new['string_block2'] = 'foobarbah2'
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string', 'string2'], min_itemsize={'string': 30, 'string2': 40, 'values': 50})
check_col('df', 'string', 30)
check_col('df', 'string2', 40)
check_col('df', 'values_block_1', 50)
with ensure_clean_store(self.path) as store:
# multiple data columns
df_new = df.copy()
df_new.loc[:,'A'].iloc[0] = 1.
df_new.loc[:,'B'].iloc[0] = -1.
df_new['string'] = 'foo'
df_new['string'][1:4] = np.nan
df_new['string'][5:6] = 'bar'
df_new['string2'] = 'foo'
df_new['string2'][2:5] = np.nan
df_new['string2'][7:8] = 'bar'
_maybe_remove(store, 'df')
store.append(
'df', df_new, data_columns=['A', 'B', 'string', 'string2'])
result = store.select('df', [Term('string=foo'), Term(
'string2=foo'), Term('A>0'), Term('B<0')])
expected = df_new[(df_new.string == 'foo') & (
df_new.string2 == 'foo') & (df_new.A > 0) & (df_new.B < 0)]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select('df', [Term('string=foo'), Term(
'string2=cool')])
expected = df_new[(df_new.string == 'foo') & (
df_new.string2 == 'cool')]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(self.path) as store:
# doc example
df_dc = df.copy()
df_dc['string'] = 'foo'
df_dc.ix[4:6, 'string'] = np.nan
df_dc.ix[7:9, 'string'] = 'bar'
df_dc['string2'] = 'cool'
df_dc['datetime'] = Timestamp('20010102')
df_dc = df_dc.convert_objects()
df_dc.ix[3:5, ['A', 'B', 'datetime']] = np.nan
_maybe_remove(store, 'df_dc')
store.append('df_dc', df_dc, data_columns=['B', 'C',
'string', 'string2', 'datetime'])
result = store.select('df_dc', [Term('B>0')])
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select(
'df_dc', ['B > 0', 'C > 0', 'string == foo'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (
df_dc.string == 'foo')]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(self.path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range('1/1/2000', periods=8)
df_dc = DataFrame(np.random.randn(8, 3), index=index,
columns=['A', 'B', 'C'])
df_dc['string'] = 'foo'
df_dc.ix[4:6,'string'] = np.nan
df_dc.ix[7:9,'string'] = 'bar'
df_dc.ix[:,['B','C']] = df_dc.ix[:,['B','C']].abs()
df_dc['string2'] = 'cool'
# on-disk operations
store.append('df_dc', df_dc, data_columns = ['B', 'C', 'string', 'string2'])
result = store.select('df_dc', [ Term('B>0') ])
expected = df_dc[df_dc.B>0]
tm.assert_frame_equal(result,expected)
result = store.select('df_dc', ['B > 0', 'C > 0', 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == 'foo')]
tm.assert_frame_equal(result,expected)
with ensure_clean_store(self.path) as store:
# panel
# GH5717 not handling data_columns
np.random.seed(1234)
p = tm.makePanel()
store.append('p1',p)
tm.assert_panel_equal(store.select('p1'),p)
store.append('p2',p,data_columns=True)
tm.assert_panel_equal(store.select('p2'),p)
result = store.select('p2',where='ItemA>0')
expected = p.to_frame()
expected = expected[expected['ItemA']>0]
tm.assert_frame_equal(result.to_frame(),expected)
result = store.select('p2',where='ItemA>0 & minor_axis=["A","B"]')
expected = p.to_frame()
expected = expected[expected['ItemA']>0]
expected = expected[expected.reset_index(level=['major']).index.isin(['A','B'])]
tm.assert_frame_equal(result.to_frame(),expected)
def test_create_table_index(self):
with ensure_clean_store(self.path) as store:
def col(t,column):
return getattr(store.get_storer(t).table.cols,column)
# index=False
wp = tm.makePanel()
store.append('p5', wp, index=False)
store.create_table_index('p5', columns=['major_axis'])
assert(col('p5', 'major_axis').is_indexed is True)
assert(col('p5', 'minor_axis').is_indexed is False)
# index=True
store.append('p5i', wp, index=True)
assert(col('p5i', 'major_axis').is_indexed is True)
assert(col('p5i', 'minor_axis').is_indexed is True)
# default optlevels
store.get_storer('p5').create_index()
assert(col('p5', 'major_axis').index.optlevel == 6)
assert(col('p5', 'minor_axis').index.kind == 'medium')
# let's change the indexing scheme
store.create_table_index('p5')
assert(col('p5', 'major_axis').index.optlevel == 6)
assert(col('p5', 'minor_axis').index.kind == 'medium')
store.create_table_index('p5', optlevel=9)
assert(col('p5', 'major_axis').index.optlevel == 9)
assert(col('p5', 'minor_axis').index.kind == 'medium')
store.create_table_index('p5', kind='full')
assert(col('p5', 'major_axis').index.optlevel == 9)
assert(col('p5', 'minor_axis').index.kind == 'full')
store.create_table_index('p5', optlevel=1, kind='light')
assert(col('p5', 'major_axis').index.optlevel == 1)
assert(col('p5', 'minor_axis').index.kind == 'light')
# data columns
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df['string2'] = 'bar'
store.append('f', df, data_columns=['string', 'string2'])
assert(col('f', 'index').is_indexed is True)
assert(col('f', 'string').is_indexed is True)
assert(col('f', 'string2').is_indexed is True)
# specify index=columns
store.append(
'f2', df, index=['string'], data_columns=['string', 'string2'])
assert(col('f2', 'index').is_indexed is False)
assert(col('f2', 'string').is_indexed is True)
assert(col('f2', 'string2').is_indexed is False)
# try to index a non-table
_maybe_remove(store, 'f2')
store.put('f2', df)
self.assertRaises(TypeError, store.create_table_index, 'f2')
# try to change the version supports flag
from pandas.io import pytables
pytables._table_supports_index = False
self.assertRaises(Exception, store.create_table_index, 'f')
# test out some versions
original = tables.__version__
for v in ['2.2', '2.2b']:
pytables._table_mod = None
pytables._table_supports_index = False
tables.__version__ = v
self.assertRaises(Exception, store.create_table_index, 'f')
for v in ['2.3.1', '2.3.1b', '2.4dev', '2.4', original]:
pytables._table_mod = None
pytables._table_supports_index = False
tables.__version__ = v
store.create_table_index('f')
pytables._table_mod = None
pytables._table_supports_index = False
tables.__version__ = original
def test_big_table_frame(self):
raise nose.SkipTest('no big table frame')
# create and write a big table
df = DataFrame(np.random.randn(2000 * 100, 100), index=range(
2000 * 100), columns=['E%03d' % i for i in range(100)])
for x in range(20):
df['String%03d' % x] = 'string%03d' % x
import time
x = time.time()
with ensure_clean_store(self.path,mode='w') as store:
store.append('df', df)
rows = store.root.df.table.nrows
recons = store.select('df')
assert isinstance(recons, DataFrame)
print("\nbig_table frame [%s] -> %5.2f" % (rows, time.time() - x))
def test_big_table2_frame(self):
# this is a really big table: 1m rows x 60 float columns, 20 string, 20 datetime
# columns
raise nose.SkipTest('no big table2 frame')
# create and write a big table
print("\nbig_table2 start")
import time
start_time = time.time()
df = DataFrame(np.random.randn(1000 * 1000, 60), index=range(int(
1000 * 1000)), columns=['E%03d' % i for i in range(60)])
for x in range(20):
df['String%03d' % x] = 'string%03d' % x
for x in range(20):
df['datetime%03d' % x] = datetime.datetime(2001, 1, 2, 0, 0)
print("\nbig_table2 frame (creation of df) [rows->%s] -> %5.2f"
% (len(df.index), time.time() - start_time))
def f(chunksize):
with ensure_clean_store(self.path,mode='w') as store:
store.append('df', df, chunksize=chunksize)
r = store.root.df.table.nrows
return r
for c in [10000, 50000, 250000]:
start_time = time.time()
print("big_table2 frame [chunk->%s]" % c)
rows = f(c)
print("big_table2 frame [rows->%s,chunk->%s] -> %5.2f"
% (rows, c, time.time() - start_time))
def test_big_put_frame(self):
raise nose.SkipTest('no big put frame')
print("\nbig_put start")
import time
start_time = time.time()
df = DataFrame(np.random.randn(1000 * 1000, 60), index=range(int(
1000 * 1000)), columns=['E%03d' % i for i in range(60)])
for x in range(20):
df['String%03d' % x] = 'string%03d' % x
for x in range(20):
df['datetime%03d' % x] = datetime.datetime(2001, 1, 2, 0, 0)
print("\nbig_put frame (creation of df) [rows->%s] -> %5.2f"
% (len(df.index), time.time() - start_time))
with ensure_clean_store(self.path, mode='w') as store:
start_time = time.time()
store = HDFStore(self.path, mode='w')
store.put('df', df)
print(df.get_dtype_counts())
print("big_put frame [shape->%s] -> %5.2f"
% (df.shape, time.time() - start_time))
def test_big_table_panel(self):
raise nose.SkipTest('no big table panel')
# create and write a big table
wp = Panel(
np.random.randn(20, 1000, 1000), items=['Item%03d' % i for i in range(20)],
major_axis=date_range('1/1/2000', periods=1000), minor_axis=['E%03d' % i for i in range(1000)])
wp.ix[:, 100:200, 300:400] = np.nan
for x in range(100):
wp['String%03d'] = 'string%03d' % x
import time
x = time.time()
with ensure_clean_store(self.path, mode='w') as store:
store.append('wp', wp)
rows = store.root.wp.table.nrows
recons = store.select('wp')
assert isinstance(recons, Panel)
print("\nbig_table panel [%s] -> %5.2f" % (rows, time.time() - x))
def test_append_diff_item_order(self):
wp = tm.makePanel()
wp1 = wp.ix[:, :10, :]
wp2 = wp.ix[['ItemC', 'ItemB', 'ItemA'], 10:, :]
with ensure_clean_store(self.path) as store:
store.put('panel', wp1, format='table')
self.assertRaises(ValueError, store.put, 'panel', wp2,
append=True)
def test_append_hierarchical(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo', 'bar'])
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
with ensure_clean_store(self.path) as store:
store.append('mi', df)
result = store.select('mi')
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select('mi',columns=['A','B'])
expected = df.reindex(columns=['A','B'])
tm.assert_frame_equal(result,expected)
with ensure_clean_path('test.hdf') as path:
df.to_hdf(path,'df',format='table')
result = read_hdf(path,'df',columns=['A','B'])
expected = df.reindex(columns=['A','B'])
tm.assert_frame_equal(result,expected)
def test_column_multiindex(self):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples([('A','a'), ('A','b'), ('B','a'), ('B','b')], names=['first','second'])
df = DataFrame(np.arange(12).reshape(3,4), columns=index)
with ensure_clean_store(self.path) as store:
store.put('df',df)
tm.assert_frame_equal(store['df'],df,check_index_type=True,check_column_type=True)
store.put('df1',df,format='table')
tm.assert_frame_equal(store['df1'],df,check_index_type=True,check_column_type=True)
self.assertRaises(ValueError, store.put, 'df2',df,format='table',data_columns=['A'])
self.assertRaises(ValueError, store.put, 'df3',df,format='table',data_columns=True)
# non_index_axes name
df = DataFrame(np.arange(12).reshape(3,4), columns=Index(list('ABCD'),name='foo'))
with ensure_clean_store(self.path) as store:
store.put('df1',df,format='table')
tm.assert_frame_equal(store['df1'],df,check_index_type=True,check_column_type=True)
def test_store_multiindex(self):
# validate multi-index names
# GH 5527
with ensure_clean_store(self.path) as store:
def make_index(names=None):
return MultiIndex.from_tuples([( datetime.datetime(2013,12,d), s, t) for d in range(1,3) for s in range(2) for t in range(3)],
names=names)
# no names
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index())
store.append('df',df)
tm.assert_frame_equal(store.select('df'),df)
# partial names
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index(['date',None,None]))
store.append('df',df)
tm.assert_frame_equal(store.select('df'),df)
# series
_maybe_remove(store, 's')
s = Series(np.zeros(12), index=make_index(['date',None,None]))
store.append('s',s)
tm.assert_series_equal(store.select('s'),s)
# dup with column
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index(['date','a','t']))
self.assertRaises(ValueError, store.append, 'df',df)
# dup within level
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index(['date','date','date']))
self.assertRaises(ValueError, store.append, 'df',df)
# fully names
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12,2)), columns=['a','b'], index=make_index(['date','s','t']))
store.append('df',df)
tm.assert_frame_equal(store.select('df'),df)
def test_pass_spec_to_storer(self):
df = tm.makeDataFrame()
with ensure_clean_store(self.path) as store:
store.put('df',df)
self.assertRaises(TypeError, store.select, 'df', columns=['A'])
self.assertRaises(TypeError, store.select, 'df',where=[('columns=A')])
def test_append_misc(self):
with ensure_clean_store(self.path) as store:
# unsuported data types for non-tables
p4d = tm.makePanel4D()
self.assertRaises(TypeError, store.put,'p4d',p4d)
# unsuported data types
self.assertRaises(TypeError, store.put,'abc',None)
self.assertRaises(TypeError, store.put,'abc','123')
self.assertRaises(TypeError, store.put,'abc',123)
self.assertRaises(TypeError, store.put,'abc',np.arange(5))
df = tm.makeDataFrame()
store.append('df', df, chunksize=1)
result = store.select('df')
tm.assert_frame_equal(result, df)
store.append('df1', df, expectedrows=10)
result = store.select('df1')
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(self.path,mode='w') as store:
store.append('obj', obj, chunksize=c)
result = store.select('obj')
comparator(result,obj)
df = tm.makeDataFrame()
df['string'] = 'foo'
df['float322'] = 1.
df['float322'] = df['float322'].astype('float32')
df['bool'] = df['float322'] > 0
df['time1'] = Timestamp('20130101')
df['time2'] = Timestamp('20130102')
check(df, tm.assert_frame_equal)
p = tm.makePanel()
check(p, assert_panel_equal)
p4d = tm.makePanel4D()
check(p4d, assert_panel4d_equal)
# empty frame, GH4273
with ensure_clean_store(self.path) as store:
# 0 len
df_empty = DataFrame(columns=list('ABC'))
store.append('df',df_empty)
self.assertRaises(KeyError,store.select, 'df')
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10,3),columns=list('ABC'))
store.append('df',df)
assert_frame_equal(store.select('df'),df)
store.append('df',df_empty)
assert_frame_equal(store.select('df'),df)
# store
df = DataFrame(columns=list('ABC'))
store.put('df2',df)
assert_frame_equal(store.select('df2'),df)
# 0 len
p_empty = Panel(items=list('ABC'))
store.append('p',p_empty)
self.assertRaises(KeyError,store.select, 'p')
# repeated append of 0/non-zero frames
p = Panel(np.random.randn(3,4,5),items=list('ABC'))
store.append('p',p)
assert_panel_equal(store.select('p'),p)
store.append('p',p_empty)
assert_panel_equal(store.select('p'),p)
# store
store.put('p2',p_empty)
assert_panel_equal(store.select('p2'),p_empty)
def test_append_raise(self):
with ensure_clean_store(self.path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df['invalid'] = [['a']] * len(df)
self.assert_(df.dtypes['invalid'] == np.object_)
self.assertRaises(TypeError, store.append,'df',df)
# multiple invalid columns
df['invalid2'] = [['a']] * len(df)
df['invalid3'] = [['a']] * len(df)
self.assertRaises(TypeError, store.append,'df',df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001,1,2),index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df['invalid'] = s
self.assert_(df.dtypes['invalid'] == np.object_)
self.assertRaises(TypeError, store.append,'df', df)
# directy ndarray
self.assertRaises(TypeError, store.append,'df',np.arange(10))
# series directly
self.assertRaises(TypeError, store.append,'df',Series(np.arange(10)))
# appending an incompatbile table
df = tm.makeDataFrame()
store.append('df',df)
df['foo'] = 'foo'
self.assertRaises(ValueError, store.append,'df',df)
def test_table_index_incompatible_dtypes(self):
df1 = DataFrame({'a': [1, 2, 3]})
df2 = DataFrame({'a': [4, 5, 6]},
index=date_range('1/1/2000', periods=3))
with ensure_clean_store(self.path) as store:
store.put('frame', df1, format='table')
self.assertRaises(TypeError, store.put, 'frame', df2,
format='table', append=True)
def test_table_values_dtypes_roundtrip(self):
with ensure_clean_store(self.path) as store:
df1 = DataFrame({'a': [1, 2, 3]}, dtype='f8')
store.append('df_f8', df1)
assert_series_equal(df1.dtypes,store['df_f8'].dtypes)
df2 = DataFrame({'a': [1, 2, 3]}, dtype='i8')
store.append('df_i8', df2)
assert_series_equal(df2.dtypes,store['df_i8'].dtypes)
# incompatible dtype
self.assertRaises(ValueError, store.append, 'df_i8', df1)
# check creation/storage/retrieval of float32 (a bit hacky to actually create them thought)
df1 = DataFrame(np.array([[1],[2],[3]],dtype='f4'),columns = ['A'])
store.append('df_f4', df1)
assert_series_equal(df1.dtypes,store['df_f4'].dtypes)
assert df1.dtypes[0] == 'float32'
# check with mixed dtypes
df1 = DataFrame(dict([ (c,Series(np.random.randn(5),dtype=c)) for c in
['float32','float64','int32','int64','int16','int8'] ]))
df1['string'] = 'foo'
df1['float322'] = 1.
df1['float322'] = df1['float322'].astype('float32')
df1['bool'] = df1['float32'] > 0
df1['time1'] = Timestamp('20130101')
df1['time2'] = Timestamp('20130102')
store.append('df_mixed_dtypes1', df1)
result = store.select('df_mixed_dtypes1').get_dtype_counts()
expected = Series({ 'float32' : 2, 'float64' : 1,'int32' : 1, 'bool' : 1,
'int16' : 1, 'int8' : 1, 'int64' : 1, 'object' : 1,
'datetime64[ns]' : 2})
result.sort()
expected.sort()
tm.assert_series_equal(result,expected)
def test_table_mixed_dtypes(self):
# frame
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
df['timestamp1'] = Timestamp('20010102')
df['timestamp2'] = Timestamp('20010103')
df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0)
df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0)
df.ix[3:6, ['obj1']] = np.nan
df = df.consolidate().convert_objects()
with ensure_clean_store(self.path) as store:
store.append('df1_mixed', df)
tm.assert_frame_equal(store.select('df1_mixed'), df)
# panel
wp = tm.makePanel()
wp['obj1'] = 'foo'
wp['obj2'] = 'bar'
wp['bool1'] = wp['ItemA'] > 0
wp['bool2'] = wp['ItemB'] > 0
wp['int1'] = 1
wp['int2'] = 2
wp = wp.consolidate()
with ensure_clean_store(self.path) as store:
store.append('p1_mixed', wp)
assert_panel_equal(store.select('p1_mixed'), wp)
# ndim
wp = tm.makePanel4D()
wp['obj1'] = 'foo'
wp['obj2'] = 'bar'
wp['bool1'] = wp['l1'] > 0
wp['bool2'] = wp['l2'] > 0
wp['int1'] = 1
wp['int2'] = 2
wp = wp.consolidate()
with ensure_clean_store(self.path) as store:
store.append('p4d_mixed', wp)
assert_panel4d_equal(store.select('p4d_mixed'), wp)
def test_unimplemented_dtypes_table_columns(self):
with ensure_clean_store(self.path) as store:
l = [('date', datetime.date(2001, 1, 2))]
# py3 ok for unicode
if not compat.PY3:
l.append(('unicode', u('\\u03c3')))
### currently not supported dtypes ####
for n, f in l:
df = tm.makeDataFrame()
df[n] = f
self.assertRaises(
TypeError, store.append, 'df1_%s' % n, df)
# frame
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['datetime1'] = datetime.date(2001, 1, 2)
df = df.consolidate().convert_objects()
with ensure_clean_store(self.path) as store:
# this fails because we have a date in the object block......
self.assertRaises(TypeError, store.append, 'df_unimplemented', df)
def test_append_with_timezones(self):
from datetime import timedelta
def compare(a,b):
tm.assert_frame_equal(a,b)
# compare the zones on each element
for c in a.columns:
for i in a.index:
a_e = a[c][i]
b_e = b[c][i]
if not (a_e == b_e and a_e.tz == b_e.tz):
raise AssertionError("invalid tz comparsion [%s] [%s]" % (a_e,b_e))
# as columns
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A = [ Timestamp('20130102 2:00:00',tz='US/Eastern') + timedelta(hours=1)*i for i in range(5) ]))
store.append('df_tz',df,data_columns=['A'])
result = store['df_tz']
compare(result,df)
assert_frame_equal(result,df)
# select with tz aware
compare(store.select('df_tz',where=Term('A>=df.A[3]')),df[df.A>=df.A[3]])
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130103',tz='US/Eastern')),index=range(5))
store.append('df_tz',df)
result = store['df_tz']
compare(result,df)
assert_frame_equal(result,df)
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130102',tz='EET')),index=range(5))
self.assertRaises(TypeError, store.append, 'df_tz', df)
# this is ok
_maybe_remove(store, 'df_tz')
store.append('df_tz',df,data_columns=['A','B'])
result = store['df_tz']
compare(result,df)
assert_frame_equal(result,df)
# can't append with diff timezone
df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130102',tz='CET')),index=range(5))
self.assertRaises(ValueError, store.append, 'df_tz', df)
# as index
with ensure_clean_store(self.path) as store:
# GH 4098 example
df = DataFrame(dict(A = Series(lrange(3), index=date_range('2000-1-1',periods=3,freq='H', tz='US/Eastern'))))
_maybe_remove(store, 'df')
store.put('df',df)
result = store.select('df')
assert_frame_equal(result,df)
_maybe_remove(store, 'df')
store.append('df',df)
result = store.select('df')
assert_frame_equal(result,df)
def test_store_timezone(self):
# GH2852
# issue storing datetime.date with a timezone as it resets when read back in a new timezone
import platform
if platform.system() == "Windows":
raise nose.SkipTest("timezone setting not supported on windows")
import datetime
import time
import os
# original method
with ensure_clean_store(self.path) as store:
today = datetime.date(2013,9,10)
df = DataFrame([1,2,3], index = [today, today, today])
store['obj1'] = df
result = store['obj1']
assert_frame_equal(result, df)
# with tz setting
orig_tz = os.environ.get('TZ')
def setTZ(tz):
if tz is None:
try:
del os.environ['TZ']
except:
pass
else:
os.environ['TZ']=tz
time.tzset()
try:
with ensure_clean_store(self.path) as store:
setTZ('EST5EDT')
today = datetime.date(2013,9,10)
df = DataFrame([1,2,3], index = [today, today, today])
store['obj1'] = df
setTZ('CST6CDT')
result = store['obj1']
assert_frame_equal(result, df)
finally:
setTZ(orig_tz)
def test_append_with_timedelta(self):
if _np_version_under1p7:
raise nose.SkipTest("requires numpy >= 1.7")
# GH 3577
# append timedelta
from datetime import timedelta
df = DataFrame(dict(A = Timestamp('20130101'), B = [ Timestamp('20130101') + timedelta(days=i,seconds=10) for i in range(10) ]))
df['C'] = df['A']-df['B']
df.ix[3:5,'C'] = np.nan
with ensure_clean_store(self.path) as store:
# table
_maybe_remove(store, 'df')
store.append('df',df,data_columns=True)
result = store.select('df')
assert_frame_equal(result,df)
result = store.select('df',Term("C<100000"))
assert_frame_equal(result,df)
result = store.select('df',Term("C","<",-3*86400))
assert_frame_equal(result,df.iloc[3:])
result = store.select('df',"C<'-3D'")
assert_frame_equal(result,df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select('df',"C<'-500000s'")
result = result.dropna(subset=['C'])
assert_frame_equal(result,df.iloc[6:])
result = store.select('df',"C<'-3.5D'")
result = result.iloc[1:]
assert_frame_equal(result,df.iloc[4:])
# fixed
_maybe_remove(store, 'df2')
store.put('df2',df)
result = store.select('df2')
assert_frame_equal(result,df)
def test_remove(self):
with ensure_clean_store(self.path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store['a'] = ts
store['b'] = df
_maybe_remove(store, 'a')
self.assertEquals(len(store), 1)
tm.assert_frame_equal(df, store['b'])
_maybe_remove(store, 'b')
self.assertEquals(len(store), 0)
# nonexistence
self.assertRaises(KeyError, store.remove, 'a_nonexistent_store')
# pathing
store['a'] = ts
store['b/foo'] = df
_maybe_remove(store, 'foo')
_maybe_remove(store, 'b/foo')
self.assertEquals(len(store), 1)
store['a'] = ts
store['b/foo'] = df
_maybe_remove(store, 'b')
self.assertEquals(len(store), 1)
# __delitem__
store['a'] = ts
store['b'] = df
del store['a']
del store['b']
self.assertEquals(len(store), 0)
def test_remove_where(self):
with ensure_clean_store(self.path) as store:
# non-existance
crit1 = Term('index>foo')
self.assertRaises(KeyError, store.remove, 'a', [crit1])
# try to remove non-table (with crit)
# non-table ok (where = None)
wp = tm.makePanel()
store.put('wp', wp, format='table')
store.remove('wp', ["minor_axis=['A', 'D']"])
rs = store.select('wp')
expected = wp.reindex(minor_axis=['B', 'C'])
assert_panel_equal(rs, expected)
# empty where
_maybe_remove(store, 'wp')
store.put('wp', wp, format='table')
# deleted number (entire table)
n = store.remove('wp', [])
assert(n == 120)
# non - empty where
_maybe_remove(store, 'wp')
store.put('wp', wp, format='table')
self.assertRaises(ValueError, store.remove,
'wp', ['foo'])
# selectin non-table with a where
# store.put('wp2', wp, format='f')
# self.assertRaises(ValueError, store.remove,
# 'wp2', [('column', ['A', 'D'])])
def test_remove_crit(self):
with ensure_clean_store(self.path) as store:
wp = tm.makePanel()
# group row removal
date4 = wp.major_axis.take([0, 1, 2, 4, 5, 6, 8, 9, 10])
crit4 = Term('major_axis=date4')
store.put('wp3', wp, format='t')
n = store.remove('wp3', where=[crit4])
assert(n == 36)
result = store.select('wp3')
expected = wp.reindex(major_axis=wp.major_axis - date4)
assert_panel_equal(result, expected)
# upper half
store.put('wp', wp, format='table')
date = wp.major_axis[len(wp.major_axis) // 2]
crit1 = Term('major_axis>date')
crit2 = Term("minor_axis=['A', 'D']")
n = store.remove('wp', where=[crit1])
assert(n == 56)
n = store.remove('wp', where=[crit2])
assert(n == 32)
result = store['wp']
expected = wp.truncate(after=date).reindex(minor=['B', 'C'])
assert_panel_equal(result, expected)
# individual row elements
store.put('wp2', wp, format='table')
date1 = wp.major_axis[1:3]
crit1 = Term('major_axis=date1')
store.remove('wp2', where=[crit1])
result = store.select('wp2')
expected = wp.reindex(major_axis=wp.major_axis - date1)
assert_panel_equal(result, expected)
date2 = wp.major_axis[5]
crit2 = Term('major_axis=date2')
store.remove('wp2', where=[crit2])
result = store['wp2']
expected = wp.reindex(
major_axis=wp.major_axis - date1 - Index([date2]))
assert_panel_equal(result, expected)
date3 = [wp.major_axis[7], wp.major_axis[9]]
crit3 = Term('major_axis=date3')
store.remove('wp2', where=[crit3])
result = store['wp2']
expected = wp.reindex(
major_axis=wp.major_axis - date1 - Index([date2]) - Index(date3))
assert_panel_equal(result, expected)
# corners
store.put('wp4', wp, format='table')
n = store.remove(
'wp4', where=[Term('major_axis>wp.major_axis[-1]')])
result = store.select('wp4')
assert_panel_equal(result, wp)
def test_invalid_terms(self):
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df.ix[0:4,'string'] = 'bar'
wp = tm.makePanel()
p4d = tm.makePanel4D()
store.put('df', df, format='table')
store.put('wp', wp, format='table')
store.put('p4d', p4d, format='table')
# some invalid terms
self.assertRaises(ValueError, store.select, 'wp', "minor=['A', 'B']")
self.assertRaises(ValueError, store.select, 'wp', ["index=['20121114']"])
self.assertRaises(ValueError, store.select, 'wp', ["index=['20121114', '20121114']"])
self.assertRaises(TypeError, Term)
# more invalid
self.assertRaises(ValueError, store.select, 'df','df.index[3]')
self.assertRaises(SyntaxError, store.select, 'df','index>')
self.assertRaises(ValueError, store.select, 'wp', "major_axis<'20000108' & minor_axis['A', 'B']")
# from the docs
with ensure_clean_path(self.path) as path:
dfq = DataFrame(np.random.randn(10,4),columns=list('ABCD'),index=date_range('20130101',periods=10))
dfq.to_hdf(path,'dfq',format='table',data_columns=True)
# check ok
read_hdf(path,'dfq',where="index>Timestamp('20130104') & columns=['A', 'B']")
read_hdf(path,'dfq',where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(self.path) as path:
dfq = DataFrame(np.random.randn(10,4),columns=list('ABCD'),index=date_range('20130101',periods=10))
dfq.to_hdf(path,'dfq',format='table')
self.assertRaises(ValueError, read_hdf, path,'dfq',where="A>0 or C>0")
def test_terms(self):
with ensure_clean_store(self.path) as store:
wp = tm.makePanel()
p4d = tm.makePanel4D()
store.put('wp', wp, table=True)
store.put('p4d', p4d, table=True)
# panel
result = store.select('wp', [Term(
'major_axis<"20000108"'), Term("minor_axis=['A', 'B']")])
expected = wp.truncate(after='20000108').reindex(minor=['A', 'B'])
assert_panel_equal(result, expected)
# with deprecation
result = store.select('wp', [Term(
'major_axis','<',"20000108"), Term("minor_axis=['A', 'B']")])
expected = wp.truncate(after='20000108').reindex(minor=['A', 'B'])
tm.assert_panel_equal(result, expected)
# p4d
result = store.select('p4d', [Term('major_axis<"20000108"'),
Term("minor_axis=['A', 'B']"),
Term("items=['ItemA', 'ItemB']")])
expected = p4d.truncate(after='20000108').reindex(
minor=['A', 'B'], items=['ItemA', 'ItemB'])
assert_panel4d_equal(result, expected)
# back compat invalid terms
terms = [
dict(field='major_axis', op='>', value='20121114'),
[ dict(field='major_axis', op='>', value='20121114') ],
[ "minor_axis=['A','B']", dict(field='major_axis', op='>', value='20121114') ]
]
for t in terms:
with tm.assert_produces_warning(expected_warning=DeprecationWarning):
Term(t)
# valid terms
terms = [
('major_axis=20121114'),
('major_axis>20121114'),
(("major_axis=['20121114', '20121114']"),),
('major_axis=datetime.datetime(2012, 11, 14)'),
'major_axis> 20121114',
'major_axis >20121114',
'major_axis > 20121114',
(("minor_axis=['A', 'B']"),),
(("minor_axis=['A', 'B']"),),
((("minor_axis==['A', 'B']"),),),
(("items=['ItemA', 'ItemB']"),),
('items=ItemA'),
]
for t in terms:
store.select('wp', t)
store.select('p4d', t)
# valid for p4d only
terms = [
(("labels=['l1', 'l2']"),),
Term("labels=['l1', 'l2']"),
]
for t in terms:
store.select('p4d', t)
def test_term_compat(self):
with ensure_clean_store(self.path) as store:
wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
store.append('wp',wp)
result = store.select('wp', [Term('major_axis>20000102'),
Term('minor_axis', '=', ['A','B']) ])
expected = wp.loc[:,wp.major_axis>Timestamp('20000102'),['A','B']]
assert_panel_equal(result, expected)
store.remove('wp', Term('major_axis>20000103'))
result = store.select('wp')
expected = wp.loc[:,wp.major_axis<=Timestamp('20000103'),:]
assert_panel_equal(result, expected)
with ensure_clean_store(self.path) as store:
wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
store.append('wp',wp)
# stringified datetimes
result = store.select('wp', [Term('major_axis','>',datetime.datetime(2000,1,2))])
expected = wp.loc[:,wp.major_axis>Timestamp('20000102')]
assert_panel_equal(result, expected)
result = store.select('wp', [Term('major_axis','>',datetime.datetime(2000,1,2,0,0))])
expected = wp.loc[:,wp.major_axis>Timestamp('20000102')]
assert_panel_equal(result, expected)
result = store.select('wp', [Term('major_axis','=',[datetime.datetime(2000,1,2,0,0),datetime.datetime(2000,1,3,0,0)])])
expected = wp.loc[:,[Timestamp('20000102'),Timestamp('20000103')]]
assert_panel_equal(result, expected)
result = store.select('wp', [Term('minor_axis','=',['A','B'])])
expected = wp.loc[:,:,['A','B']]
assert_panel_equal(result, expected)
def test_same_name_scoping(self):
with ensure_clean_store(self.path) as store:
import pandas as pd
df = DataFrame(np.random.randn(20, 2),index=pd.date_range('20130101',periods=20))
store.put('df', df, table=True)
expected = df[df.index>pd.Timestamp('20130105')]
import datetime
result = store.select('df','index>datetime.datetime(2013,1,5)')
assert_frame_equal(result,expected)
from datetime import datetime
# technically an error, but allow it
result = store.select('df','index>datetime.datetime(2013,1,5)')
assert_frame_equal(result,expected)
result = store.select('df','index>datetime(2013,1,5)')
assert_frame_equal(result,expected)
def test_series(self):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object),
dtype=object))
self._check_roundtrip(ts3, tm.assert_series_equal)
def test_sparse_series(self):
s = tm.makeStringSeries()
s[3:5] = np.nan
ss = s.to_sparse()
self._check_roundtrip(ss, tm.assert_series_equal,
check_series_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_roundtrip(ss2, tm.assert_series_equal,
check_series_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_roundtrip(ss3, tm.assert_series_equal,
check_series_type=True)
def test_sparse_frame(self):
s = tm.makeDataFrame()
s.ix[3:5, 1:3] = np.nan
s.ix[8:10, -2] = np.nan
ss = s.to_sparse()
self._check_double_roundtrip(ss, tm.assert_frame_equal,
check_frame_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_double_roundtrip(ss2, tm.assert_frame_equal,
check_frame_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_double_roundtrip(ss3, tm.assert_frame_equal,
check_frame_type=True)
def test_sparse_panel(self):
items = ['x', 'y', 'z']
p = Panel(dict((i, tm.makeDataFrame().ix[:2, :2]) for i in items))
sp = p.to_sparse()
self._check_double_roundtrip(sp, assert_panel_equal,
check_panel_type=True)
sp2 = p.to_sparse(kind='integer')
self._check_double_roundtrip(sp2, assert_panel_equal,
check_panel_type=True)
sp3 = p.to_sparse(fill_value=0)
self._check_double_roundtrip(sp3, assert_panel_equal,
check_panel_type=True)
def test_float_index(self):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal)
def test_tuple_index(self):
# GH #492
col = np.arange(10)
idx = [(0., 1.), (2., 3.), (4., 5.)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
with tm.assert_produces_warning(expected_warning=PerformanceWarning):
self._check_roundtrip(DF, tm.assert_frame_equal)
def test_index_types(self):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(l, r,
check_dtype=True,
check_index_type=True,
check_series_type=True)
with tm.assert_produces_warning(expected_warning=PerformanceWarning):
ser = Series(values, [0, 'y'])
self._check_roundtrip(ser, func)
with tm.assert_produces_warning(expected_warning=PerformanceWarning):
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func)
with tm.assert_produces_warning(expected_warning=PerformanceWarning):
ser = Series(values, ['y', 0])
self._check_roundtrip(ser, func)
with tm.assert_produces_warning(expected_warning=PerformanceWarning):
ser = Series(values, [datetime.date.today(), 'a'])
self._check_roundtrip(ser, func)
with tm.assert_produces_warning(expected_warning=PerformanceWarning):
ser = Series(values, [1.23, 'b'])
self._check_roundtrip(ser, func)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func)
ser = Series(values, [datetime.datetime(
2012, 1, 1), datetime.datetime(2012, 1, 2)])
self._check_roundtrip(ser, func)
def test_timeseries_preepoch(self):
if sys.version_info[0] == 2 and sys.version_info[1] < 7:
raise nose.SkipTest("won't work on Python < 2.7")
dr = bdate_range('1/1/1940', '1/1/1960')
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal)
except OverflowError:
raise nose.SkipTest('known failer on some windows platforms')
def test_frame(self):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(df, tm.assert_frame_equal)
self._check_roundtrip(df, tm.assert_frame_equal)
self._check_roundtrip_table(df, tm.assert_frame_equal,
compression=True)
self._check_roundtrip(df, tm.assert_frame_equal,
compression=True)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(tdf, tm.assert_frame_equal)
self._check_roundtrip(tdf, tm.assert_frame_equal,
compression=True)
with ensure_clean_store(self.path) as store:
# not consolidated
df['foo'] = np.random.randn(len(df))
store['df'] = df
recons = store['df']
self.assert_(recons._data.is_consolidated())
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal)
def test_empty_series_frame(self):
s0 = Series()
s1 = Series(name='myseries')
df0 = DataFrame()
df1 = DataFrame(index=['a', 'b', 'c'])
df2 = DataFrame(columns=['d', 'e', 'f'])
self._check_roundtrip(s0, tm.assert_series_equal)
self._check_roundtrip(s1, tm.assert_series_equal)
self._check_roundtrip(df0, tm.assert_frame_equal)
self._check_roundtrip(df1, tm.assert_frame_equal)
self._check_roundtrip(df2, tm.assert_frame_equal)
def test_empty_series(self):
for dtype in [np.int64, np.float64, np.object, 'm8[ns]', 'M8[ns]']:
s = Series(dtype=dtype)
self._check_roundtrip(s, tm.assert_series_equal)
def test_can_serialize_dates(self):
rng = [x.date() for x in bdate_range('1/1/2000', '1/30/2000')]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal)
def test_timezones(self):
rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store['frame'] = frame
recons = store['frame']
self.assert_(recons.index.equals(rng))
self.assertEquals(rng.tz, recons.index.tz)
def test_fixed_offset_tz(self):
rng = date_range('1/1/2000 00:00:00-07:00', '1/30/2000 00:00:00-07:00')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store['frame'] = frame
recons = store['frame']
self.assert_(recons.index.equals(rng))
self.assertEquals(rng.tz, recons.index.tz)
def test_store_hierarchical(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo', 'bar'])
frame = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
self._check_roundtrip(frame, tm.assert_frame_equal)
self._check_roundtrip(frame.T, tm.assert_frame_equal)
self._check_roundtrip(frame['A'], tm.assert_series_equal)
# check that the names are stored
with ensure_clean_store(self.path) as store:
store['frame'] = frame
recons = store['frame']
assert(recons.index.names == ('foo', 'bar'))
def test_store_index_name(self):
df = tm.makeDataFrame()
df.index.name = 'foo'
with ensure_clean_store(self.path) as store:
store['frame'] = df
recons = store['frame']
assert(recons.index.name == 'foo')
def test_store_series_name(self):
df = tm.makeDataFrame()
series = df['A']
with ensure_clean_store(self.path) as store:
store['series'] = series
recons = store['series']
assert(recons.name == 'A')
def test_store_mixed(self):
def _make_one():
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['int1'] = 1
df['int2'] = 2
return df.consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal)
self._check_roundtrip(df2, tm.assert_frame_equal)
with ensure_clean_store(self.path) as store:
store['obj'] = df1
tm.assert_frame_equal(store['obj'], df1)
store['obj'] = df2
tm.assert_frame_equal(store['obj'], df2)
# check that can store Series of all of these types
self._check_roundtrip(df1['obj1'], tm.assert_series_equal)
self._check_roundtrip(df1['bool1'], tm.assert_series_equal)
self._check_roundtrip(df1['int1'], tm.assert_series_equal)
# try with compression
self._check_roundtrip(df1['obj1'], tm.assert_series_equal,
compression=True)
self._check_roundtrip(df1['bool1'], tm.assert_series_equal,
compression=True)
self._check_roundtrip(df1['int1'], tm.assert_series_equal,
compression=True)
self._check_roundtrip(df1, tm.assert_frame_equal,
compression=True)
def test_wide(self):
wp = tm.makePanel()
self._check_roundtrip(wp, assert_panel_equal)
def test_wide_table(self):
wp = tm.makePanel()
self._check_roundtrip_table(wp, assert_panel_equal)
def test_select_with_dups(self):
# single dtypes
df = DataFrame(np.random.randn(10,4),columns=['A','A','B','B'])
df.index = date_range('20130101 9:30',periods=10,freq='T')
with ensure_clean_store(self.path) as store:
store.append('df',df)
result = store.select('df')
expected = df
assert_frame_equal(result,expected,by_blocks=True)
result = store.select('df',columns=df.columns)
expected = df
assert_frame_equal(result,expected,by_blocks=True)
result = store.select('df',columns=['A'])
expected = df.loc[:,['A']]
assert_frame_equal(result,expected)
# dups accross dtypes
df = concat([DataFrame(np.random.randn(10,4),columns=['A','A','B','B']),
DataFrame(np.random.randint(0,10,size=20).reshape(10,2),columns=['A','C'])],
axis=1)
df.index = date_range('20130101 9:30',periods=10,freq='T')
with ensure_clean_store(self.path) as store:
store.append('df',df)
result = store.select('df')
expected = df
assert_frame_equal(result,expected,by_blocks=True)
result = store.select('df',columns=df.columns)
expected = df
assert_frame_equal(result,expected,by_blocks=True)
expected = df.loc[:,['A']]
result = store.select('df',columns=['A'])
assert_frame_equal(result,expected,by_blocks=True)
expected = df.loc[:,['B','A']]
result = store.select('df',columns=['B','A'])
assert_frame_equal(result,expected,by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(self.path) as store:
store.append('df',df)
store.append('df',df)
expected = df.loc[:,['B','A']]
expected = concat([expected, expected])
result = store.select('df',columns=['B','A'])
assert_frame_equal(result,expected,by_blocks=True)
def test_wide_table_dups(self):
wp = tm.makePanel()
with ensure_clean_store(self.path) as store:
store.put('panel', wp, format='table')
store.put('panel', wp, format='table', append=True)
with tm.assert_produces_warning(expected_warning=DuplicateWarning):
recons = store['panel']
assert_panel_equal(recons, wp)
def test_long(self):
def _check(left, right):
assert_panel_equal(left.to_panel(), right.to_panel())
wp = tm.makePanel()
self._check_roundtrip(wp.to_frame(), _check)
# empty
# self._check_roundtrip(wp.to_frame()[:0], _check)
def test_longpanel(self):
pass
def test_overwrite_node(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store['a'] = ts
tm.assert_series_equal(store['a'], ts)
def test_sparse_with_compression(self):
# GH 2931
# make sparse dataframe
df = DataFrame(np.random.binomial(n=1, p=.01, size=(1e3, 10))).to_sparse(fill_value=0)
# case 1: store uncompressed
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression = False,
check_frame_type=True)
# case 2: store compressed (works)
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression = 'zlib',
check_frame_type=True)
# set one series to be completely sparse
df[0] = np.zeros(1e3)
# case 3: store df with completely sparse series uncompressed
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression = False,
check_frame_type=True)
# case 4: try storing df with completely sparse series compressed (fails)
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression = 'zlib',
check_frame_type=True)
def test_select(self):
wp = tm.makePanel()
with ensure_clean_store(self.path) as store:
# put/select ok
_maybe_remove(store, 'wp')
store.put('wp', wp, format='table')
store.select('wp')
# non-table ok (where = None)
_maybe_remove(store, 'wp')
store.put('wp2', wp)
store.select('wp2')
# selection on the non-indexable with a large number of columns
wp = Panel(
np.random.randn(100, 100, 100), items=['Item%03d' % i for i in range(100)],
major_axis=date_range('1/1/2000', periods=100), minor_axis=['E%03d' % i for i in range(100)])
_maybe_remove(store, 'wp')
store.append('wp', wp)
items = ['Item%03d' % i for i in range(80)]
result = store.select('wp', Term('items=items'))
expected = wp.reindex(items=items)
assert_panel_equal(expected, result)
# selectin non-table with a where
# self.assertRaises(ValueError, store.select,
# 'wp2', ('column', ['A', 'D']))
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df')
store.append('df', df)
result = store.select('df', columns=['A', 'B'])
expected = df.reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# equivalentsly
result = store.select('df', [("columns=['A', 'B']")])
expected = df.reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['A'])
result = store.select('df', ['A > 0'], columns=['A', 'B'])
expected = df[df.A > 0].reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, 'df')
store.append('df', df, data_columns=True)
result = store.select('df', ['A > 0'], columns=['A', 'B'])
expected = df[df.A > 0].reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['A'])
result = store.select('df', ['A > 0'], columns=['C', 'D'])
expected = df[df.A > 0].reindex(columns=['C', 'D'])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(self):
with ensure_clean_store(self.path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(dict(ts=bdate_range('2012-01-01', periods=300), A=np.random.randn(300)))
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['ts', 'A'])
result = store.select('df', [Term("ts>=Timestamp('2012-02-01')")])
expected = df[df.ts >= Timestamp('2012-02-01')]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5,2), columns =['A','B'])
df['object'] = 'foo'
df.ix[4:5,'object'] = 'bar'
df['boolv'] = df['A'] > 0
_maybe_remove(store, 'df')
store.append('df', df, data_columns = True)
expected = df[df.boolv == True].reindex(columns=['A','boolv'])
for v in [True,'true',1]:
result = store.select('df', Term('boolv == %s' % str(v)), columns = ['A','boolv'])
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False ].reindex(columns=['A','boolv'])
for v in [False,'false',0]:
result = store.select('df', Term('boolv == %s' % str(v)), columns = ['A','boolv'])
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
_maybe_remove(store, 'df_int')
store.append('df_int', df)
result = store.select(
'df_int', [Term("index<10"), Term("columns=['A']")])
expected = df.reindex(index=list(df.index)[0:10],columns=['A'])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(dict(A=np.random.rand(
20), B=np.random.rand(20), index=np.arange(20, dtype='f8')))
_maybe_remove(store, 'df_float')
store.append('df_float', df)
result = store.select(
'df_float', [Term("index<10.0"), Term("columns=['A']")])
expected = df.reindex(index=list(df.index)[0:10],columns=['A'])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(self.path) as store:
# floats w/o NaN
df = DataFrame(dict(cols = range(11), values = range(11)),dtype='float64')
df['cols'] = (df['cols']+10).apply(str)
store.append('df1',df,data_columns=True)
result = store.select(
'df1', where='values>2.0')
expected = df[df['values']>2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df['values']>2.0]
store.append('df2',df,data_columns=True,index=False)
result = store.select(
'df2', where='values>2.0')
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
#store.append('df3',df,data_columns=True)
#result = store.select(
# 'df3', where='values>2.0')
#tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame(dict(cols = range(11), values = range(11)),dtype='float64')
df['cols'] = (df['cols']+10).apply(str)
df.iloc[1] = np.nan
expected = df[df['values']>2.0]
store.append('df4',df,data_columns=True)
result = store.select(
'df4', where='values>2.0')
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(self):
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(ts=bdate_range('2012-01-01', periods=300),
A=np.random.randn(300),
B=range(300),
users = ['a']*50 + ['b']*50 + ['c']*100 + ['a%03d' % i for i in range(100)]))
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['ts', 'A', 'B', 'users'])
# regular select
result = store.select('df', [Term("ts>=Timestamp('2012-02-01')")])
expected = df[df.ts >= Timestamp('2012-02-01')]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select('df', [Term("ts>=Timestamp('2012-02-01') & users=['a','b','c']")])
expected = df[ (df.ts >= Timestamp('2012-02-01')) & df.users.isin(['a','b','c']) ]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = [ 'a','b','c' ] + [ 'a%03d' % i for i in range(60) ]
result = store.select('df', [Term("ts>=Timestamp('2012-02-01')"),Term('users=selector')])
expected = df[ (df.ts >= Timestamp('2012-02-01')) & df.users.isin(selector) ]
tm.assert_frame_equal(expected, result)
selector = range(100,200)
result = store.select('df', [Term('B=selector')])
expected = df[ df.B.isin(selector) ]
tm.assert_frame_equal(expected, result)
self.assert_(len(result) == 100)
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select('df', [Term('ts=selector')])
expected = df[ df.ts.isin(selector.values) ]
tm.assert_frame_equal(expected, result)
self.assert_(len(result) == 100)
def test_select_iterator(self):
# single table
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, 'df')
store.append('df', df)
expected = store.select('df')
results = []
for s in store.select('df',iterator=True):
results.append(s)
result = concat(results)
tm.assert_frame_equal(expected, result)
results = []
for s in store.select('df',chunksize=100):
results.append(s)
self.assert_(len(results) == 5)
result = concat(results)
tm.assert_frame_equal(expected, result)
results = []
for s in store.select('df',chunksize=150):
results.append(s)
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(self.path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path,'df_non_table')
self.assertRaises(TypeError, read_hdf, path,'df_non_table',chunksize=100)
self.assertRaises(TypeError, read_hdf, path,'df_non_table',iterator=True)
with ensure_clean_path(self.path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path,'df',format='table')
results = []
for x in read_hdf(path,'df',chunksize=100):
results.append(x)
self.assert_(len(results) == 5)
result = concat(results)
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path,'df'))
# multiple
with ensure_clean_store(self.path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append('df1',df1,data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns=lambda x: "%s_2" % x)
df2['foo'] = 'bar'
store.append('df2',df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(
['df1', 'df2'], selector='df1')
results = []
for s in store.select_as_multiple(
['df1', 'df2'], selector='df1', chunksize=150):
results.append(s)
result = concat(results)
tm.assert_frame_equal(expected, result)
# where selection
#expected = store.select_as_multiple(
# ['df1', 'df2'], where= Term('A>0'), selector='df1')
#results = []
#for s in store.select_as_multiple(
# ['df1', 'df2'], where= Term('A>0'), selector='df1', chunksize=25):
# results.append(s)
#result = concat(results)
#tm.assert_frame_equal(expected, result)
def test_retain_index_attributes(self):
# GH 3499, losing frequency info on index recreation
df = DataFrame(dict(A = Series(lrange(3),
index=date_range('2000-1-1',periods=3,freq='H'))))
with ensure_clean_store(self.path) as store:
_maybe_remove(store,'data')
store.put('data', df, format='table')
result = store.get('data')
tm.assert_frame_equal(df,result)
for attr in ['freq','tz','name']:
for idx in ['index','columns']:
self.assert_(getattr(getattr(df,idx),attr,None) == getattr(getattr(result,idx),attr,None))
# try to append a table with a different frequency
with tm.assert_produces_warning(expected_warning=AttributeConflictWarning):
df2 = DataFrame(dict(A = Series(lrange(3),
index=date_range('2002-1-1',periods=3,freq='D'))))
store.append('data',df2)
self.assert_(store.get_storer('data').info['index']['freq'] is None)
# this is ok
_maybe_remove(store,'df2')
df2 = DataFrame(dict(A = Series(lrange(3),
index=[Timestamp('20010101'),Timestamp('20010102'),Timestamp('20020101')])))
store.append('df2',df2)
df3 = DataFrame(dict(A = Series(lrange(3),index=date_range('2002-1-1',periods=3,freq='D'))))
store.append('df2',df3)
def test_retain_index_attributes2(self):
with ensure_clean_path(self.path) as path:
with tm.assert_produces_warning(expected_warning=AttributeConflictWarning):
df = DataFrame(dict(A = Series(lrange(3), index=date_range('2000-1-1',periods=3,freq='H'))))
df.to_hdf(path,'data',mode='w',append=True)
df2 = DataFrame(dict(A = Series(lrange(3), index=date_range('2002-1-1',periods=3,freq='D'))))
df2.to_hdf(path,'data',append=True)
idx = date_range('2000-1-1',periods=3,freq='H')
idx.name = 'foo'
df = DataFrame(dict(A = Series(lrange(3), index=idx)))
df.to_hdf(path,'data',mode='w',append=True)
self.assert_(read_hdf(path,'data').index.name == 'foo')
with tm.assert_produces_warning(expected_warning=AttributeConflictWarning):
idx2 = date_range('2001-1-1',periods=3,freq='H')
idx2.name = 'bar'
df2 = DataFrame(dict(A = Series(lrange(3), index=idx2)))
df2.to_hdf(path,'data',append=True)
self.assert_(read_hdf(path,'data').index.name is None)
def test_panel_select(self):
wp = tm.makePanel()
with ensure_clean_store(self.path) as store:
store.put('wp', wp, format='table')
date = wp.major_axis[len(wp.major_axis) // 2]
crit1 = ('major_axis>=date')
crit2 = ("minor_axis=['A', 'D']")
result = store.select('wp', [crit1, crit2])
expected = wp.truncate(before=date).reindex(minor=['A', 'D'])
assert_panel_equal(result, expected)
result = store.select(
'wp', ['major_axis>="20000124"', ("minor_axis=['A', 'B']")])
expected = wp.truncate(before='20000124').reindex(minor=['A', 'B'])
assert_panel_equal(result, expected)
def test_frame_select(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
store.put('frame', df,format='table')
date = df.index[len(df) // 2]
crit1 = Term('index>=date')
crit2 = ("columns=['A', 'D']")
crit3 = ('columns=A')
result = store.select('frame', [crit1, crit2])
expected = df.ix[date:, ['A', 'D']]
tm.assert_frame_equal(result, expected)
result = store.select('frame', [crit3])
expected = df.ix[:, ['A']]
tm.assert_frame_equal(result, expected)
# invalid terms
df = tm.makeTimeDataFrame()
store.append('df_time', df)
self.assertRaises(
ValueError, store.select, 'df_time', [Term("index>0")])
# can't select if not written as table
# store['frame'] = df
# self.assertRaises(ValueError, store.select,
# 'frame', [crit1, crit2])
def test_frame_select_complex(self):
# select via complex criteria
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df.loc[df.index[0:4],'string'] = 'bar'
with ensure_clean_store(self.path) as store:
store.put('df', df, table=True, data_columns=['string'])
# empty
result = store.select('df', 'index>df.index[3] & string="bar"')
expected = df.loc[(df.index>df.index[3]) & (df.string=='bar')]
tm.assert_frame_equal(result, expected)
result = store.select('df', 'index>df.index[3] & string="foo"')
expected = df.loc[(df.index>df.index[3]) & (df.string=='foo')]
tm.assert_frame_equal(result, expected)
# or
result = store.select('df', 'index>df.index[3] | string="bar"')
expected = df.loc[(df.index>df.index[3]) | (df.string=='bar')]
tm.assert_frame_equal(result, expected)
result = store.select('df', '(index>df.index[3] & index<=df.index[6]) | string="bar"')
expected = df.loc[((df.index>df.index[3]) & (df.index<=df.index[6])) | (df.string=='bar')]
tm.assert_frame_equal(result, expected)
# invert
result = store.select('df', 'string!="bar"')
expected = df.loc[df.string!='bar']
tm.assert_frame_equal(result, expected)
# invert not implemented in numexpr :(
self.assertRaises(NotImplementedError, store.select, 'df', '~(string="bar")')
# invert ok for filters
result = store.select('df', "~(columns=['A','B'])")
expected = df.loc[:,df.columns-['A','B']]
tm.assert_frame_equal(result, expected)
# in
result = store.select('df', "index>df.index[3] & columns in ['A','B']")
expected = df.loc[df.index>df.index[3]].reindex(columns=['A','B'])
tm.assert_frame_equal(result, expected)
def test_frame_select_complex2(self):
with ensure_clean_path(['parms.hdf','hist.hdf']) as paths:
pp, hh = paths
# use non-trivial selection criteria
parms = DataFrame({ 'A' : [1,1,2,2,3] })
parms.to_hdf(pp,'df',mode='w',format='table',data_columns=['A'])
selection = read_hdf(pp,'df',where='A=[2,3]')
hist = DataFrame(np.random.randn(25,1),columns=['data'],
index=MultiIndex.from_tuples([ (i,j) for i in range(5) for j in range(5) ],
names=['l1','l2']))
hist.to_hdf(hh,'df',mode='w',format='table')
expected = read_hdf(hh,'df',where=Term('l1','=',[2,3,4]))
# list like
result = read_hdf(hh,'df',where=Term('l1','=',selection.index.tolist()))
assert_frame_equal(result, expected)
l = selection.index.tolist()
# sccope with list like
store = HDFStore(hh)
result = store.select('df',where='l1=l')
assert_frame_equal(result, expected)
store.close()
result = read_hdf(hh,'df',where='l1=l')
assert_frame_equal(result, expected)
# index
index = selection.index
result = read_hdf(hh,'df',where='l1=index')
assert_frame_equal(result, expected)
result = read_hdf(hh,'df',where='l1=selection.index')
assert_frame_equal(result, expected)
result = read_hdf(hh,'df',where='l1=selection.index.tolist()')
assert_frame_equal(result, expected)
result = read_hdf(hh,'df',where='l1=list(selection.index)')
assert_frame_equal(result, expected)
# sccope with index
store = HDFStore(hh)
result = store.select('df',where='l1=index')
assert_frame_equal(result, expected)
result = store.select('df',where='l1=selection.index')
assert_frame_equal(result, expected)
result = store.select('df',where='l1=selection.index.tolist()')
assert_frame_equal(result, expected)
result = store.select('df',where='l1=list(selection.index)')
assert_frame_equal(result, expected)
store.close()
def test_invalid_filtering(self):
# can't use more than one filter (atm)
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
store.put('df', df, table=True)
# not implemented
self.assertRaises(NotImplementedError, store.select, 'df', "columns=['A'] | columns=['B']")
# in theory we could deal with this
self.assertRaises(NotImplementedError, store.select, 'df', "columns=['A','B'] & columns=['C']")
def test_string_select(self):
# GH 2973
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
# test string ==/!=
df['x'] = 'none'
df.ix[2:7,'x'] = ''
store.append('df',df,data_columns=['x'])
result = store.select('df',Term('x=none'))
expected = df[df.x == 'none']
assert_frame_equal(result,expected)
try:
result = store.select('df',Term('x!=none'))
expected = df[df.x != 'none']
assert_frame_equal(result,expected)
except Exception as detail:
print("[{0}]".format(detail))
print(store)
print(expected)
df2 = df.copy()
df2.loc[df2.x=='','x'] = np.nan
store.append('df2',df2,data_columns=['x'])
result = store.select('df2',Term('x!=none'))
expected = df2[isnull(df2.x)]
assert_frame_equal(result,expected)
# int ==/!=
df['int'] = 1
df.ix[2:7,'int'] = 2
store.append('df3',df,data_columns=['int'])
result = store.select('df3',Term('int=2'))
expected = df[df.int==2]
assert_frame_equal(result,expected)
result = store.select('df3',Term('int!=2'))
expected = df[df.int!=2]
assert_frame_equal(result,expected)
def test_read_column(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
store.append('df', df)
# error
self.assertRaises(KeyError, store.select_column, 'df', 'foo')
def f():
store.select_column('df', 'index', where = ['index>5'])
self.assertRaises(Exception, f)
# valid
result = store.select_column('df', 'index')
tm.assert_almost_equal(result.values, Series(df.index).values)
self.assert_(isinstance(result,Series))
# not a data indexable column
self.assertRaises(
ValueError, store.select_column, 'df', 'values_block_0')
# a data column
df2 = df.copy()
df2['string'] = 'foo'
store.append('df2', df2, data_columns=['string'])
result = store.select_column('df2', 'string')
tm.assert_almost_equal(result.values, df2['string'].values)
# a data column with NaNs, result excludes the NaNs
df3 = df.copy()
df3['string'] = 'foo'
df3.ix[4:6, 'string'] = np.nan
store.append('df3', df3, data_columns=['string'])
result = store.select_column('df3', 'string')
tm.assert_almost_equal(result.values, df3['string'].values)
def test_coordinates(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
store.append('df', df)
# all
c = store.select_as_coordinates('df')
assert((c.values == np.arange(len(df.index))).all() == True)
# get coordinates back & test vs frame
_maybe_remove(store, 'df')
df = DataFrame(dict(A=lrange(5), B=lrange(5)))
store.append('df', df)
c = store.select_as_coordinates('df', ['index<3'])
assert((c.values == np.arange(3)).all() == True)
result = store.select('df', where=c)
expected = df.ix[0:2, :]
tm.assert_frame_equal(result, expected)
c = store.select_as_coordinates('df', ['index>=3', 'index<=4'])
assert((c.values == np.arange(2) + 3).all() == True)
result = store.select('df', where=c)
expected = df.ix[3:4, :]
tm.assert_frame_equal(result, expected)
self.assert_(isinstance(c, Index))
# multiple tables
_maybe_remove(store, 'df1')
_maybe_remove(store, 'df2')
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
store.append('df1', df1, data_columns=['A', 'B'])
store.append('df2', df2)
c = store.select_as_coordinates('df1', ['A>0', 'B>0'])
df1_result = store.select('df1', c)
df2_result = store.select('df2', c)
result = concat([df1_result, df2_result], axis=1)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# pass array/mask as the coordinates
with ensure_clean_store(self.path) as store:
df = DataFrame(np.random.randn(1000,2),index=date_range('20000101',periods=1000))
store.append('df',df)
c = store.select_column('df','index')
where = c[DatetimeIndex(c).month==5].index
expected = df.iloc[where]
# locations
result = store.select('df',where=where)
tm.assert_frame_equal(result,expected)
# boolean
result = store.select('df',where=where)
tm.assert_frame_equal(result,expected)
# invalid
self.assertRaises(ValueError, store.select, 'df',where=np.arange(len(df),dtype='float64'))
self.assertRaises(ValueError, store.select, 'df',where=np.arange(len(df)+1))
self.assertRaises(ValueError, store.select, 'df',where=np.arange(len(df)),start=5)
self.assertRaises(ValueError, store.select, 'df',where=np.arange(len(df)),start=5,stop=10)
# list
df = DataFrame(np.random.randn(10,2))
store.append('df2',df)
result = store.select('df2',where=[0,3,5])
expected = df.iloc[[0,3,5]]
tm.assert_frame_equal(result,expected)
# boolean
where = [True] * 10
where[-2] = False
result = store.select('df2',where=where)
expected = df.loc[where]
tm.assert_frame_equal(result,expected)
def test_append_to_multiple(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df2['foo'] = 'bar'
df = concat([df1, df2], axis=1)
with ensure_clean_store(self.path) as store:
# exceptions
self.assertRaises(ValueError, store.append_to_multiple,
{'df1': ['A', 'B'], 'df2': None}, df, selector='df3')
self.assertRaises(ValueError, store.append_to_multiple,
{'df1': None, 'df2': None}, df, selector='df3')
self.assertRaises(
ValueError, store.append_to_multiple, 'df1', df, 'df1')
# regular operation
store.append_to_multiple(
{'df1': ['A', 'B'], 'df2': None}, df, selector='df1')
result = store.select_as_multiple(
['df1', 'df2'], where=['A>0', 'B>0'], selector='df1')
expected = df[(df.A > 0) & (df.B > 0)]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple_dropna(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df1.ix[1, ['A', 'B']] = np.nan
df = concat([df1, df2], axis=1)
with ensure_clean_store(self.path) as store:
# dropna=True should guarantee rows are synchronized
store.append_to_multiple(
{'df1': ['A', 'B'], 'df2': None}, df, selector='df1',
dropna=True)
result = store.select_as_multiple(['df1', 'df2'])
expected = df.dropna()
tm.assert_frame_equal(result, expected)
tm.assert_index_equal(store.select('df1').index,
store.select('df2').index)
# dropna=False shouldn't synchronize row indexes
store.append_to_multiple(
{'df1': ['A', 'B'], 'df2': None}, df, selector='df1',
dropna=False)
self.assertRaises(
ValueError, store.select_as_multiple, ['df1', 'df2'])
assert not store.select('df1').index.equals(
store.select('df2').index)
def test_select_as_multiple(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df2['foo'] = 'bar'
with ensure_clean_store(self.path) as store:
# no tables stored
self.assertRaises(Exception, store.select_as_multiple,
None, where=['A>0', 'B>0'], selector='df1')
store.append('df1', df1, data_columns=['A', 'B'])
store.append('df2', df2)
# exceptions
self.assertRaises(Exception, store.select_as_multiple,
None, where=['A>0', 'B>0'], selector='df1')
self.assertRaises(Exception, store.select_as_multiple,
[None], where=['A>0', 'B>0'], selector='df1')
self.assertRaises(TypeError, store.select_as_multiple,
['df1','df3'], where=['A>0', 'B>0'], selector='df1')
self.assertRaises(KeyError, store.select_as_multiple,
['df3'], where=['A>0', 'B>0'], selector='df1')
self.assertRaises(ValueError, store.select_as_multiple,
['df1','df2'], where=['A>0', 'B>0'], selector='df4')
# default select
result = store.select('df1', ['A>0', 'B>0'])
expected = store.select_as_multiple(
['df1'], where=['A>0', 'B>0'], selector='df1')
tm.assert_frame_equal(result, expected)
expected = store.select_as_multiple(
'df1', where=['A>0', 'B>0'], selector='df1')
tm.assert_frame_equal(result, expected)
# multiple
result = store.select_as_multiple(
['df1', 'df2'], where=['A>0', 'B>0'], selector='df1')
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# multiple (diff selector)
result = store.select_as_multiple(['df1', 'df2'], where=[Term(
'index>df2.index[4]')], selector='df2')
expected = concat([df1, df2], axis=1)
expected = expected[5:]
tm.assert_frame_equal(result, expected)
# test excpection for diff rows
store.append('df3', tm.makeTimeDataFrame(nper=50))
self.assertRaises(ValueError, store.select_as_multiple,
['df1','df3'], where=['A>0', 'B>0'], selector='df1')
def test_start_stop(self):
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
store.append('df', df)
result = store.select(
'df', [Term("columns=['A']")], start=0, stop=5)
expected = df.ix[0:4, ['A']]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select(
'df', [Term("columns=['A']")], start=30, stop=40)
assert(len(result) == 0)
assert(type(result) == DataFrame)
def test_select_filter_corner(self):
df = DataFrame(np.random.randn(50, 100))
df.index = ['%.3d' % c for c in df.index]
df.columns = ['%.3d' % c for c in df.columns]
with ensure_clean_store(self.path) as store:
store.put('frame', df, format='table')
crit = Term('columns=df.columns[:75]')
result = store.select('frame', [crit])
tm.assert_frame_equal(result, df.ix[:, df.columns[:75]])
def _check_roundtrip(self, obj, comparator, compression=False, **kwargs):
options = {}
if compression:
options['complib'] = _default_compressor
with ensure_clean_store(self.path, 'w', **options) as store:
store['obj'] = obj
retrieved = store['obj']
comparator(retrieved, obj, **kwargs)
def _check_double_roundtrip(self, obj, comparator, compression=False,
**kwargs):
options = {}
if compression:
options['complib'] = compression or _default_compressor
with ensure_clean_store(self.path, 'w', **options) as store:
store['obj'] = obj
retrieved = store['obj']
comparator(retrieved, obj, **kwargs)
store['obj'] = retrieved
again = store['obj']
comparator(again, obj, **kwargs)
def _check_roundtrip_table(self, obj, comparator, compression=False):
options = {}
if compression:
options['complib'] = _default_compressor
with ensure_clean_store(self.path, 'w', **options) as store:
store.put('obj', obj, format='table')
retrieved = store['obj']
# sorted_obj = _test_sort(obj)
comparator(retrieved, obj)
def test_multiple_open_close(self):
# GH 4409, open & close multiple times
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.to_hdf(path,'df',mode='w',format='table')
# single
store = HDFStore(path)
self.assert_('CLOSED' not in str(store))
self.assert_(store.is_open)
store.close()
self.assert_('CLOSED' in str(store))
self.assert_(not store.is_open)
with ensure_clean_path(self.path) as path:
if pytables._table_file_open_policy_is_strict:
# multiples
store1 = HDFStore(path)
def f():
HDFStore(path)
self.assertRaises(ValueError, f)
store1.close()
else:
# multiples
store1 = HDFStore(path)
store2 = HDFStore(path)
self.assert_('CLOSED' not in str(store1))
self.assert_('CLOSED' not in str(store2))
self.assert_(store1.is_open)
self.assert_(store2.is_open)
store1.close()
self.assert_('CLOSED' in str(store1))
self.assert_(not store1.is_open)
self.assert_('CLOSED' not in str(store2))
self.assert_(store2.is_open)
store2.close()
self.assert_('CLOSED' in str(store1))
self.assert_('CLOSED' in str(store2))
self.assert_(not store1.is_open)
self.assert_(not store2.is_open)
# nested close
store = HDFStore(path,mode='w')
store.append('df',df)
store2 = HDFStore(path)
store2.append('df2',df)
store2.close()
self.assert_('CLOSED' in str(store2))
self.assert_(not store2.is_open)
store.close()
self.assert_('CLOSED' in str(store))
self.assert_(not store.is_open)
# double closing
store = HDFStore(path,mode='w')
store.append('df', df)
store2 = HDFStore(path)
store.close()
self.assert_('CLOSED' in str(store))
self.assert_(not store.is_open)
store2.close()
self.assert_('CLOSED' in str(store2))
self.assert_(not store2.is_open)
# ops on a closed store
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.to_hdf(path,'df',mode='w',format='table')
store = HDFStore(path)
store.close()
self.assertRaises(ClosedFileError, store.keys)
self.assertRaises(ClosedFileError, lambda : 'df' in store)
self.assertRaises(ClosedFileError, lambda : len(store))
self.assertRaises(ClosedFileError, lambda : store['df'])
self.assertRaises(ClosedFileError, lambda : store.df)
self.assertRaises(ClosedFileError, store.select, 'df')
self.assertRaises(ClosedFileError, store.get, 'df')
self.assertRaises(ClosedFileError, store.append, 'df2', df)
self.assertRaises(ClosedFileError, store.put, 'df3', df)
self.assertRaises(ClosedFileError, store.get_storer, 'df2')
self.assertRaises(ClosedFileError, store.remove, 'df2')
def f():
store.select('df')
tm.assertRaisesRegexp(ClosedFileError, 'file is not open', f)
def test_pytables_native_read(self):
try:
store = HDFStore(tm.get_data_path('legacy_hdf/pytables_native.h5'), 'r')
d2 = store['detector/readout']
assert isinstance(d2, DataFrame)
finally:
safe_close(store)
try:
store = HDFStore(tm.get_data_path('legacy_hdf/pytables_native2.h5'), 'r')
str(store)
d1 = store['detector']
assert isinstance(d1, DataFrame)
finally:
safe_close(store)
def test_legacy_read(self):
try:
store = HDFStore(tm.get_data_path('legacy_hdf/legacy.h5'), 'r')
store['a']
store['b']
store['c']
store['d']
finally:
safe_close(store)
def test_legacy_table_read(self):
# legacy table types
try:
store = HDFStore(tm.get_data_path('legacy_hdf/legacy_table.h5'), 'r')
store.select('df1')
store.select('df2')
store.select('wp1')
# force the frame
store.select('df2', typ='legacy_frame')
# old version warning
with tm.assert_produces_warning(expected_warning=IncompatibilityWarning):
self.assertRaises(
Exception, store.select, 'wp1', Term('minor_axis=B'))
df2 = store.select('df2')
result = store.select('df2', Term('index>df2.index[2]'))
expected = df2[df2.index > df2.index[2]]
assert_frame_equal(expected, result)
finally:
safe_close(store)
def test_legacy_0_10_read(self):
# legacy from 0.10
try:
store = HDFStore(tm.get_data_path('legacy_hdf/legacy_0.10.h5'), 'r')
str(store)
for k in store.keys():
store.select(k)
finally:
safe_close(store)
def test_legacy_0_11_read(self):
# legacy from 0.11
try:
path = os.path.join('legacy_hdf', 'legacy_table_0.11.h5')
store = HDFStore(tm.get_data_path(path), 'r')
str(store)
assert 'df' in store
assert 'df1' in store
assert 'mi' in store
df = store.select('df')
df1 = store.select('df1')
mi = store.select('mi')
assert isinstance(df, DataFrame)
assert isinstance(df1, DataFrame)
assert isinstance(mi, DataFrame)
finally:
safe_close(store)
def test_copy(self):
def do_copy(f = None, new_f = None, keys = None, propindexes = True, **kwargs):
try:
if f is None:
f = tm.get_data_path(os.path.join('legacy_hdf',
'legacy_0.10.h5'))
store = HDFStore(f, 'r')
if new_f is None:
import tempfile
fd, new_f = tempfile.mkstemp()
tstore = store.copy(new_f, keys = keys, propindexes = propindexes, **kwargs)
# check keys
if keys is None:
keys = store.keys()
self.assert_(set(keys) == set(tstore.keys()))
# check indicies & nrows
for k in tstore.keys():
if tstore.get_storer(k).is_table:
new_t = tstore.get_storer(k)
orig_t = store.get_storer(k)
self.assert_(orig_t.nrows == new_t.nrows)
# check propindixes
if propindexes:
for a in orig_t.axes:
if a.is_indexed:
self.assert_(new_t[a.name].is_indexed == True)
finally:
safe_close(store)
safe_close(tstore)
try:
os.close(fd)
except:
pass
safe_remove(new_f)
do_copy()
do_copy(keys = ['/a','/b','/df1_mixed'])
do_copy(propindexes = False)
# new table
df = tm.makeDataFrame()
try:
st = HDFStore(self.path)
st.append('df', df, data_columns = ['A'])
st.close()
do_copy(f = self.path)
do_copy(f = self.path, propindexes = False)
finally:
safe_remove(self.path)
def test_legacy_table_write(self):
raise nose.SkipTest("skipping for now")
store = HDFStore(tm.get_data_path('legacy_hdf/legacy_table_%s.h5' % pandas.__version__), 'a')
df = tm.makeDataFrame()
wp = tm.makePanel()
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo', 'bar'])
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
store.append('mi', df)
df = DataFrame(dict(A = 'foo', B = 'bar'),index=lrange(10))
store.append('df', df, data_columns = ['B'], min_itemsize={'A' : 200 })
store.append('wp', wp)
store.close()
def test_store_datetime_fractional_secs(self):
with ensure_clean_store(self.path) as store:
dt = datetime.datetime(2012, 1, 2, 3, 4, 5, 123456)
series = Series([0], [dt])
store['a'] = series
self.assertEquals(store['a'].index[0], dt)
def test_tseries_indices_series(self):
with ensure_clean_store(self.path) as store:
idx = tm.makeDateIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store['a'] = ser
result = store['a']
assert_series_equal(result, ser)
self.assertEquals(type(result.index), type(ser.index))
self.assertEquals(result.index.freq, ser.index.freq)
idx = tm.makePeriodIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store['a'] = ser
result = store['a']
assert_series_equal(result, ser)
self.assertEquals(type(result.index), type(ser.index))
self.assertEquals(result.index.freq, ser.index.freq)
def test_tseries_indices_frame(self):
with ensure_clean_store(self.path) as store:
idx = tm.makeDateIndex(10)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
store['a'] = df
result = store['a']
assert_frame_equal(result, df)
self.assertEquals(type(result.index), type(df.index))
self.assertEquals(result.index.freq, df.index.freq)
idx = tm.makePeriodIndex(10)
df = DataFrame(np.random.randn(len(idx), 3), idx)
store['a'] = df
result = store['a']
assert_frame_equal(result, df)
self.assertEquals(type(result.index), type(df.index))
self.assertEquals(result.index.freq, df.index.freq)
def test_unicode_index(self):
unicode_values = [u('\u03c3'), u('\u03c3\u03c3')]
def f():
s = Series(np.random.randn(len(unicode_values)), unicode_values)
self._check_roundtrip(s, tm.assert_series_equal)
compat_assert_produces_warning(PerformanceWarning,f)
def test_store_datetime_mixed(self):
df = DataFrame(
{'a': [1, 2, 3], 'b': [1., 2., 3.], 'c': ['a', 'b', 'c']})
ts = tm.makeTimeSeries()
df['d'] = ts.index[:3]
self._check_roundtrip(df, tm.assert_frame_equal)
# def test_cant_write_multiindex_table(self):
# # for now, #1848
# df = DataFrame(np.random.randn(10, 4),
# index=[np.arange(5).repeat(2),
# np.tile(np.arange(2), 5)])
# self.assertRaises(Exception, store.put, 'foo', df, format='table')
def test_append_with_diff_col_name_types_raises_value_error(self):
df = DataFrame(np.random.randn(10, 1))
df2 = DataFrame({'a': np.random.randn(10)})
df3 = DataFrame({(1, 2): np.random.randn(10)})
df4 = DataFrame({('1', 2): np.random.randn(10)})
df5 = DataFrame({('1', 2, object): np.random.randn(10)})
with ensure_clean_store(self.path) as store:
name = 'df_%s' % tm.rands(10)
store.append(name, df)
for d in (df2, df3, df4, df5):
with tm.assertRaises(ValueError):
store.append(name, d)
def _test_sort(obj):
if isinstance(obj, DataFrame):
return obj.reindex(sorted(obj.index))
elif isinstance(obj, Panel):
return obj.reindex(major=sorted(obj.major_axis))
else:
raise ValueError('type not supported here')
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| []
| []
| [
"TZ"
]
| [] | ["TZ"] | python | 1 | 0 | |
zeppelin-plugins/launcher/yarn/src/main/java/org/apache/zeppelin/interpreter/launcher/YarnRemoteInterpreterProcess.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.zeppelin.interpreter.launcher;
import com.google.common.collect.Lists;
import com.google.common.io.Files;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceType;
import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.client.api.YarnClient;
import org.apache.hadoop.yarn.client.api.YarnClientApplication;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.util.Records;
import org.apache.zeppelin.conf.ZeppelinConfiguration;
import org.apache.zeppelin.interpreter.remote.RemoteInterpreterProcess;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.net.MalformedURLException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.zip.ZipEntry;
import java.util.zip.ZipOutputStream;
/**
* Start interpreter in yarn container.
*/
public class YarnRemoteInterpreterProcess extends RemoteInterpreterProcess {
private static Logger LOGGER = LoggerFactory.getLogger(YarnRemoteInterpreterProcess.class);
private String host;
private int port = -1;
private ZeppelinConfiguration zConf;
private final InterpreterLaunchContext launchContext;
private final Properties properties;
private final Map<String, String> envs;
private AtomicBoolean isYarnAppRunning = new AtomicBoolean(false);
private String errorMessage;
/************** Hadoop related **************************/
private Configuration hadoopConf;
private FileSystem fs;
private FileSystem localFs;
private YarnClient yarnClient;
private ApplicationId appId;
private Path stagingDir;
// App files are world-wide readable and owner writable -> rw-r--r--
private static final FsPermission APP_FILE_PERMISSION =
FsPermission.createImmutable(Short.parseShort("644", 8));
public YarnRemoteInterpreterProcess(
InterpreterLaunchContext launchContext,
Properties properties,
Map<String, String> envs,
int connectTimeout,
int connectionPoolSize) {
super(connectTimeout, connectionPoolSize, launchContext.getIntpEventServerHost(), launchContext.getIntpEventServerPort());
this.zConf = ZeppelinConfiguration.create();
this.launchContext = launchContext;
this.properties = properties;
this.envs = envs;
yarnClient = YarnClient.createYarnClient();
this.hadoopConf = new YarnConfiguration();
// Add core-site.xml and yarn-site.xml. This is for integration test where using MiniHadoopCluster.
if (properties.containsKey("HADOOP_CONF_DIR") &&
!org.apache.commons.lang3.StringUtils.isBlank(properties.getProperty("HADOOP_CONF_DIR"))) {
File hadoopConfDir = new File(properties.getProperty("HADOOP_CONF_DIR"));
if (hadoopConfDir.exists() && hadoopConfDir.isDirectory()) {
File coreSite = new File(hadoopConfDir, "core-site.xml");
try {
this.hadoopConf.addResource(coreSite.toURI().toURL());
} catch (MalformedURLException e) {
LOGGER.warn("Fail to add core-site.xml: " + coreSite.getAbsolutePath(), e);
}
File yarnSite = new File(hadoopConfDir, "yarn-site.xml");
try {
this.hadoopConf.addResource(yarnSite.toURI().toURL());
} catch (MalformedURLException e) {
LOGGER.warn("Fail to add yarn-site.xml: " + yarnSite.getAbsolutePath(), e);
}
} else {
throw new RuntimeException("HADOOP_CONF_DIR: " + hadoopConfDir.getAbsolutePath() +
" doesn't exist or is not a directory");
}
}
yarnClient.init(this.hadoopConf);
yarnClient.start();
try {
this.fs = FileSystem.get(hadoopConf);
this.localFs = FileSystem.getLocal(hadoopConf);
} catch (IOException e) {
throw new RuntimeException("Fail to create FileSystem", e);
}
}
@Override
public void processStarted(int port, String host) {
this.port = port;
this.host = host;
}
@Override
public String getErrorMessage() {
return this.errorMessage;
}
@Override
public String getInterpreterGroupId() {
return launchContext.getInterpreterGroupId();
}
@Override
public String getInterpreterSettingName() {
return launchContext.getInterpreterSettingName();
}
@Override
public void start(String userName) throws IOException {
try {
LOGGER.info("Submitting zeppelin-interpreter app to yarn");
final YarnClientApplication yarnApplication = yarnClient.createApplication();
final GetNewApplicationResponse appResponse = yarnApplication.getNewApplicationResponse();
this.appId = appResponse.getApplicationId();
ApplicationSubmissionContext appContext = yarnApplication.getApplicationSubmissionContext();
appContext = createApplicationSubmissionContext(appContext);
yarnClient.submitApplication(appContext);
long start = System.currentTimeMillis();
ApplicationReport appReport = getApplicationReport(appId);
while (appReport.getYarnApplicationState() != YarnApplicationState.FAILED &&
appReport.getYarnApplicationState() != YarnApplicationState.FINISHED &&
appReport.getYarnApplicationState() != YarnApplicationState.KILLED &&
appReport.getYarnApplicationState() != YarnApplicationState.RUNNING) {
LOGGER.info("Wait for zeppelin interpreter yarn app to be started");
Thread.sleep(2000);
if ((System.currentTimeMillis() - start) > getConnectTimeout()) {
yarnClient.killApplication(this.appId);
throw new IOException("Launching zeppelin interpreter in yarn is time out, kill it now");
}
appReport = getApplicationReport(appId);
}
if (appReport.getYarnApplicationState() != YarnApplicationState.RUNNING) {
this.errorMessage = appReport.getDiagnostics();
throw new Exception("Failed to submit application to YARN"
+ ", applicationId=" + appId
+ ", diagnostics=" + appReport.getDiagnostics());
}
isYarnAppRunning.set(true);
} catch (Exception e) {
LOGGER.error("Fail to launch yarn interpreter process", e);
throw new IOException(e);
} finally {
if (stagingDir != null) {
this.fs.delete(stagingDir, true);
}
}
}
private ApplicationReport getApplicationReport(ApplicationId appId) throws YarnException, IOException {
ApplicationReport report = yarnClient.getApplicationReport(appId);
if (report.getYarnApplicationState() == null) {
// The state can be null when the ResourceManager does not know about the app but the YARN
// application history server has an incomplete entry for it. Treat this scenario as if the
// application does not exist, since the final app status cannot be determined. This also
// matches the behavior for this scenario if the history server was not configured.
throw new ApplicationNotFoundException("YARN reports no state for application "
+ appId);
}
return report;
}
private ApplicationSubmissionContext createApplicationSubmissionContext(
ApplicationSubmissionContext appContext) throws Exception {
setResources(appContext);
setPriority(appContext);
setQueue(appContext);
appContext.setApplicationId(appId);
setApplicationName(appContext);
appContext.setApplicationType("ZEPPELIN INTERPRETER");
appContext.setMaxAppAttempts(1);
ContainerLaunchContext amContainer = setUpAMLaunchContext();
appContext.setAMContainerSpec(amContainer);
appContext.setCancelTokensWhenComplete(true);
return appContext;
}
private ContainerLaunchContext setUpAMLaunchContext() throws IOException {
ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);
// Set the resources to localize
this.stagingDir = new Path(fs.getHomeDirectory() + "/.zeppelinStaging", appId.toString());
Map<String, LocalResource> localResources = new HashMap<>();
File interpreterZip = createInterpreterZip();
Path srcPath = localFs.makeQualified(new Path(interpreterZip.toURI()));
Path destPath = copyFileToRemote(stagingDir, srcPath, (short) 1);
addResource(fs, destPath, localResources, LocalResourceType.ARCHIVE, "zeppelin");
FileUtils.forceDelete(interpreterZip);
// TODO(zjffdu) Should not add interpreter specific logic here.
if (launchContext.getInterpreterSettingGroup().equals("flink")) {
File flinkZip = createFlinkZip();
srcPath = localFs.makeQualified(new Path(flinkZip.toURI()));
destPath = copyFileToRemote(stagingDir, srcPath, (short) 1);
addResource(fs, destPath, localResources, LocalResourceType.ARCHIVE, "flink");
FileUtils.forceDelete(flinkZip);
String hiveConfDir = launchContext.getProperties().getProperty("HIVE_CONF_DIR");
if (!org.apache.commons.lang3.StringUtils.isBlank(hiveConfDir)) {
File hiveConfZipFile = createHiveConfZip(new File(hiveConfDir));
srcPath = localFs.makeQualified(new Path(hiveConfZipFile.toURI()));
destPath = copyFileToRemote(stagingDir, srcPath, (short) 1);
addResource(fs, destPath, localResources, LocalResourceType.ARCHIVE, "hive_conf");
}
}
amContainer.setLocalResources(localResources);
// Setup the command to run the AM
List<String> vargs = new ArrayList<>();
vargs.add(ApplicationConstants.Environment.PWD.$() + "/zeppelin/bin/interpreter.sh");
vargs.add("-d");
vargs.add(ApplicationConstants.Environment.PWD.$() + "/zeppelin/interpreter/"
+ launchContext.getInterpreterSettingGroup());
vargs.add("-c");
vargs.add(launchContext.getIntpEventServerHost());
vargs.add("-p");
vargs.add(launchContext.getIntpEventServerPort() + "");
vargs.add("-r");
vargs.add(zConf.getInterpreterPortRange() + "");
vargs.add("-i");
vargs.add(launchContext.getInterpreterGroupId());
vargs.add("-l");
vargs.add(ApplicationConstants.Environment.PWD.$() + "/zeppelin/" +
ZeppelinConfiguration.ConfVars.ZEPPELIN_INTERPRETER_LOCALREPO.getStringValue()
+ "/" + launchContext.getInterpreterSettingName());
vargs.add("-g");
vargs.add(launchContext.getInterpreterSettingName());
vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR +
File.separator + ApplicationConstants.STDOUT);
vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR +
File.separator + ApplicationConstants.STDERR);
// Setup ContainerLaunchContext for AM container
amContainer.setCommands(vargs);
// pass the interpreter ENV to yarn container and also add hadoop jars to CLASSPATH
populateHadoopClasspath(this.envs);
if (this.launchContext.getInterpreterSettingGroup().equals("flink")) {
// Update the flink related env because the all these are different in yarn container
this.envs.put("FLINK_HOME", ApplicationConstants.Environment.PWD.$() + "/flink");
this.envs.put("FLINK_CONF_DIR", ApplicationConstants.Environment.PWD.$() + "/flink/conf");
this.envs.put("FLINK_LIB_DIR", ApplicationConstants.Environment.PWD.$() + "/flink/lib");
this.envs.put("FLINK_PLUGINS_DIR", ApplicationConstants.Environment.PWD.$() + "/flink/plugins");
this.envs.put("HIVE_CONF_DIR", ApplicationConstants.Environment.PWD.$() + "/hive_conf");
}
// set -Xmx
int memory = Integer.parseInt(
properties.getProperty("zeppelin.interpreter.yarn.resource.memory", "1024"));
this.envs.put("ZEPPELIN_INTP_MEM", "-Xmx" + memory + "m");
amContainer.setEnvironment(this.envs);
return amContainer;
}
/**
* Populate the classpath entry in the given environment map with any application
* classpath specified through the Hadoop and Yarn configurations.
*/
private void populateHadoopClasspath(Map<String, String> envs) {
List<String> yarnClassPath = Lists.newArrayList(getYarnAppClasspath());
List<String> mrClassPath = Lists.newArrayList(getMRAppClasspath());
yarnClassPath.addAll(mrClassPath);
LOGGER.info("Adding hadoop classpath: " + org.apache.commons.lang3.StringUtils.join(yarnClassPath, ":"));
for (String path : yarnClassPath) {
String newValue = path;
if (envs.containsKey(ApplicationConstants.Environment.CLASSPATH.name())) {
newValue = envs.get(ApplicationConstants.Environment.CLASSPATH.name()) +
ApplicationConstants.CLASS_PATH_SEPARATOR + newValue;
}
envs.put(ApplicationConstants.Environment.CLASSPATH.name(), newValue);
}
// set HADOOP_MAPRED_HOME explicitly, otherwise it won't work for hadoop3
// see https://stackoverflow.com/questions/50719585/unable-to-run-mapreduce-wordcount
this.envs.put("HADOOP_MAPRED_HOME", "${HADOOP_HOME}");
}
private String[] getYarnAppClasspath() {
String[] classpaths = hadoopConf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH);
if (classpaths == null || classpaths.length == 0) {
return getDefaultYarnApplicationClasspath();
} else {
return classpaths;
}
}
private String[] getMRAppClasspath() {
String[] classpaths = hadoopConf.getStrings("mapreduce.application.classpath");
if (classpaths == null || classpaths.length == 0) {
return getDefaultMRApplicationClasspath();
} else {
return classpaths;
}
}
private String[] getDefaultYarnApplicationClasspath() {
return YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH;
}
private String[] getDefaultMRApplicationClasspath() {
return StringUtils.getStrings(MRJobConfig.DEFAULT_MAPREDUCE_APPLICATION_CLASSPATH);
}
private void setResources(ApplicationSubmissionContext appContext) {
int memory = Integer.parseInt(
properties.getProperty("zeppelin.interpreter.yarn.resource.memory", "1024"));
int memoryOverHead = Integer.parseInt(
properties.getProperty("zeppelin.interpreter.yarn.resource.memoryOverhead", "384"));
if (memoryOverHead < memory * 0.1) {
memoryOverHead = 384;
}
int cores = Integer.parseInt(
properties.getProperty("zeppelin.interpreter.yarn.resource.cores", "1"));
final Resource resource = Resource.newInstance(memory + memoryOverHead, cores);
appContext.setResource(resource);
}
private void setPriority(ApplicationSubmissionContext appContext) {
Priority pri = Records.newRecord(Priority.class);
pri.setPriority(1);
appContext.setPriority(pri);
}
private void setQueue(ApplicationSubmissionContext appContext) {
String queue = properties.getProperty("zeppelin.interpreter.yarn.queue", "default");
appContext.setQueue(queue);
}
private void setApplicationName(ApplicationSubmissionContext appContext) {
appContext.setApplicationName("Zeppelin Interpreter " + launchContext.getInterpreterGroupId());
}
/**
* @param zos
* @param srcFile
* @param parentDirectoryName
* @throws IOException
*/
private void addFileToZipStream(ZipOutputStream zos,
File srcFile,
String parentDirectoryName) throws IOException {
if (srcFile == null || !srcFile.exists()) {
return;
}
String zipEntryName = srcFile.getName();
if (parentDirectoryName != null && !parentDirectoryName.isEmpty()) {
zipEntryName = parentDirectoryName + "/" + srcFile.getName();
}
if (srcFile.isDirectory()) {
for (File file : srcFile.listFiles()) {
addFileToZipStream(zos, file, zipEntryName);
}
} else {
zos.putNextEntry(new ZipEntry(zipEntryName));
Files.copy(srcFile, zos);
zos.closeEntry();
}
}
/**
*
* Create zip file to interpreter.
* The contents are all the stuff under ZEPPELIN_HOME/interpreter/{interpreter_name}
* @return
* @throws IOException
*/
private File createInterpreterZip() throws IOException {
File interpreterArchive = File.createTempFile("zeppelin_interpreter_", ".zip", Files.createTempDir());
ZipOutputStream interpreterZipStream = new ZipOutputStream(new FileOutputStream(interpreterArchive));
interpreterZipStream.setLevel(0);
String zeppelinHomeEnv = System.getenv("ZEPPELIN_HOME");
if (org.apache.commons.lang3.StringUtils.isBlank(zeppelinHomeEnv)) {
throw new IOException("ZEPPELIN_HOME is not specified");
}
File zeppelinHome = new File(zeppelinHomeEnv);
File binDir = new File(zeppelinHome, "bin");
addFileToZipStream(interpreterZipStream, binDir, null);
File confDir = new File(zeppelinHome, "conf");
addFileToZipStream(interpreterZipStream, confDir, null);
File interpreterDir = new File(zeppelinHome, "interpreter/" + launchContext.getInterpreterSettingGroup());
addFileToZipStream(interpreterZipStream, interpreterDir, "interpreter");
File localRepoDir = new File(zConf.getInterpreterLocalRepoPath() + "/"
+ launchContext.getInterpreterSettingName());
if (localRepoDir.exists() && localRepoDir.isDirectory()) {
LOGGER.debug("Adding localRepoDir {} to interpreter zip: ", localRepoDir.getAbsolutePath());
addFileToZipStream(interpreterZipStream, localRepoDir, "local-repo");
}
// add zeppelin-interpreter-shaded jar
File[] interpreterShadedFiles = new File(zeppelinHome, "interpreter").listFiles(
file -> file.getName().startsWith("zeppelin-interpreter-shaded")
&& file.getName().endsWith(".jar"));
if (interpreterShadedFiles.length == 0) {
throw new IOException("No zeppelin-interpreter-shaded jar found under " +
zeppelinHome.getAbsolutePath() + "/interpreter");
}
if (interpreterShadedFiles.length > 1) {
throw new IOException("More than 1 zeppelin-interpreter-shaded jars found under "
+ zeppelinHome.getAbsolutePath() + "/interpreter");
}
addFileToZipStream(interpreterZipStream, interpreterShadedFiles[0], "interpreter");
interpreterZipStream.flush();
interpreterZipStream.close();
return interpreterArchive;
}
private File createFlinkZip() throws IOException {
File flinkArchive = File.createTempFile("flink_", ".zip", Files.createTempDir());
ZipOutputStream flinkZipStream = new ZipOutputStream(new FileOutputStream(flinkArchive));
flinkZipStream.setLevel(0);
String flinkHomeEnv = envs.get("FLINK_HOME");
File flinkHome = new File(flinkHomeEnv);
if (!flinkHome.exists() || !flinkHome.isDirectory()) {
throw new IOException("FLINK_HOME " + flinkHome.getAbsolutePath() +
" doesn't exist or is not a directory.");
}
for (File file : flinkHome.listFiles()) {
addFileToZipStream(flinkZipStream, file, null);
}
flinkZipStream.flush();
flinkZipStream.close();
return flinkArchive;
}
private File createHiveConfZip(File hiveConfDir) throws IOException {
File hiveConfArchive = File.createTempFile("hive_conf", ".zip", Files.createTempDir());
ZipOutputStream hiveConfZipStream = new ZipOutputStream(new FileOutputStream(hiveConfArchive));
hiveConfZipStream.setLevel(0);
if (!hiveConfDir.exists()) {
throw new IOException("HIVE_CONF_DIR " + hiveConfDir.getAbsolutePath() + " doesn't exist");
}
for (File file : hiveConfDir.listFiles()) {
addFileToZipStream(hiveConfZipStream, file, null);
}
hiveConfZipStream.flush();
hiveConfZipStream.close();
return hiveConfArchive;
}
private Path copyFileToRemote(
Path destDir,
Path srcPath,
Short replication) throws IOException {
FileSystem destFs = destDir.getFileSystem(hadoopConf);
FileSystem srcFs = srcPath.getFileSystem(hadoopConf);
Path destPath = new Path(destDir, srcPath.getName());
LOGGER.info("Uploading resource " + srcPath + " to " + destPath);
FileUtil.copy(srcFs, srcPath, destFs, destPath, false, hadoopConf);
destFs.setReplication(destPath, replication);
destFs.setPermission(destPath, APP_FILE_PERMISSION);
return destPath;
}
private void addResource(
FileSystem fs,
Path destPath,
Map<String, LocalResource> localResources,
LocalResourceType resourceType,
String link) throws IOException {
FileStatus destStatus = fs.getFileStatus(destPath);
LocalResource amJarRsrc = Records.newRecord(LocalResource.class);
amJarRsrc.setType(resourceType);
amJarRsrc.setVisibility(LocalResourceVisibility.PUBLIC);
amJarRsrc.setResource(ConverterUtils.getYarnUrlFromPath(destPath));
amJarRsrc.setTimestamp(destStatus.getModificationTime());
amJarRsrc.setSize(destStatus.getLen());
localResources.put(link, amJarRsrc);
}
@Override
public void stop() {
if (isRunning()) {
LOGGER.info("Kill interpreter process");
try {
callRemoteFunction(client -> {
client.shutdown();
return null;
});
} catch (Exception e) {
LOGGER.warn("ignore the exception when shutting down", e);
}
// Shutdown connection
shutdown();
}
yarnClient.stop();
LOGGER.info("Remote process terminated");
}
@Override
public String getHost() {
return this.host;
}
@Override
public int getPort() {
return this.port;
}
@Override
public boolean isRunning() {
return isYarnAppRunning.get();
}
}
| [
"\"ZEPPELIN_HOME\""
]
| []
| [
"ZEPPELIN_HOME"
]
| [] | ["ZEPPELIN_HOME"] | java | 1 | 0 | |
test/functional/ios/helper/desired_capabilities.py | #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Any, Dict, Optional
# Returns abs path relative to this file and not cwd
def PATH(p: str) -> str:
return os.path.abspath(os.path.join(os.path.dirname(__file__), p))
BUNDLE_ID = 'com.example.apple-samplecode.UICatalog'
def get_desired_capabilities(app: Optional[str] = None) -> Dict[str, Any]:
desired_caps: Dict[str, Any] = {
'deviceName': iphone_device_name(),
'platformName': 'iOS',
'platformVersion': '14.3',
'automationName': 'XCUITest',
'allowTouchIdEnroll': True,
'wdaLocalPort': wda_port(),
'simpleIsVisibleCheck': True,
}
if app is not None:
desired_caps['app'] = PATH(os.path.join('../../..', 'apps', app))
return desired_caps
class PytestXdistWorker:
NUMBER: Optional[str] = os.getenv('PYTEST_XDIST_WORKER')
COUNT: Optional[str] = os.getenv('PYTEST_XDIST_WORKER_COUNT') # Return 2 if `-n 2` is passed
@staticmethod
def gw(number: int) -> str:
if PytestXdistWorker.COUNT is None:
return '0'
if number >= int(PytestXdistWorker.COUNT):
return 'gw0'
return f'gw{number}'
# If you run tests with pytest-xdist, you can run tests in parallel.
def wda_port() -> int:
if PytestXdistWorker.NUMBER == PytestXdistWorker.gw(1):
return 8101
return 8100
# Before running tests, you must have iOS simulators named 'iPhone 8 - 8100' and 'iPhone 8 - 8101'
def iphone_device_name() -> str:
if PytestXdistWorker.NUMBER == PytestXdistWorker.gw(0):
return 'iPhone 8 - 8100'
elif PytestXdistWorker.NUMBER == PytestXdistWorker.gw(1):
return 'iPhone 8 - 8101'
return 'iPhone 8'
| []
| []
| [
"PYTEST_XDIST_WORKER_COUNT",
"PYTEST_XDIST_WORKER"
]
| [] | ["PYTEST_XDIST_WORKER_COUNT", "PYTEST_XDIST_WORKER"] | python | 2 | 0 | |
yt_dlp/YoutubeDL.py | #!/usr/bin/env python3
import collections
import contextlib
import datetime
import errno
import fileinput
import functools
import io
import itertools
import json
import locale
import operator
import os
import platform
import random
import re
import shutil
import subprocess
import sys
import tempfile
import time
import tokenize
import traceback
import unicodedata
import urllib.request
from string import ascii_letters
from .cache import Cache
from .compat import (
HAS_LEGACY as compat_has_legacy,
compat_get_terminal_size,
compat_os_name,
compat_shlex_quote,
compat_str,
compat_urllib_error,
compat_urllib_request,
)
from .cookies import load_cookies
from .downloader import FFmpegFD, get_suitable_downloader, shorten_protocol_name
from .downloader.rtmp import rtmpdump_version
from .extractor import gen_extractor_classes, get_info_extractor
from .extractor.openload import PhantomJSwrapper
from .minicurses import format_text
from .postprocessor import _PLUGIN_CLASSES as plugin_postprocessors
from .postprocessor import (
EmbedThumbnailPP,
FFmpegFixupDuplicateMoovPP,
FFmpegFixupDurationPP,
FFmpegFixupM3u8PP,
FFmpegFixupM4aPP,
FFmpegFixupStretchedPP,
FFmpegFixupTimestampPP,
FFmpegMergerPP,
FFmpegPostProcessor,
MoveFilesAfterDownloadPP,
get_postprocessor,
)
from .update import detect_variant
from .utils import (
DEFAULT_OUTTMPL,
LINK_TEMPLATES,
NO_DEFAULT,
NUMBER_RE,
OUTTMPL_TYPES,
POSTPROCESS_WHEN,
STR_FORMAT_RE_TMPL,
STR_FORMAT_TYPES,
ContentTooShortError,
DateRange,
DownloadCancelled,
DownloadError,
EntryNotInPlaylist,
ExistingVideoReached,
ExtractorError,
GeoRestrictedError,
HEADRequest,
InAdvancePagedList,
ISO3166Utils,
LazyList,
MaxDownloadsReached,
Namespace,
PagedList,
PerRequestProxyHandler,
Popen,
PostProcessingError,
ReExtractInfo,
RejectedVideoReached,
SameFileError,
UnavailableVideoError,
YoutubeDLCookieProcessor,
YoutubeDLHandler,
YoutubeDLRedirectHandler,
age_restricted,
args_to_str,
date_from_str,
determine_ext,
determine_protocol,
encode_compat_str,
encodeFilename,
error_to_compat_str,
expand_path,
filter_dict,
float_or_none,
format_bytes,
format_decimal_suffix,
format_field,
formatSeconds,
get_domain,
int_or_none,
iri_to_uri,
join_nonempty,
locked_file,
make_dir,
make_HTTPS_handler,
merge_headers,
network_exceptions,
number_of_digits,
orderedSet,
parse_filesize,
platform_name,
preferredencoding,
prepend_extension,
register_socks_protocols,
remove_terminal_sequences,
render_table,
replace_extension,
sanitize_filename,
sanitize_path,
sanitize_url,
sanitized_Request,
std_headers,
str_or_none,
strftime_or_none,
subtitles_filename,
supports_terminal_sequences,
timetuple_from_msec,
to_high_limit_path,
traverse_obj,
try_get,
url_basename,
variadic,
version_tuple,
windows_enable_vt_mode,
write_json_file,
write_string,
)
from .version import RELEASE_GIT_HEAD, __version__
if compat_os_name == 'nt':
import ctypes
class YoutubeDL:
"""YoutubeDL class.
YoutubeDL objects are the ones responsible of downloading the
actual video file and writing it to disk if the user has requested
it, among some other tasks. In most cases there should be one per
program. As, given a video URL, the downloader doesn't know how to
extract all the needed information, task that InfoExtractors do, it
has to pass the URL to one of them.
For this, YoutubeDL objects have a method that allows
InfoExtractors to be registered in a given order. When it is passed
a URL, the YoutubeDL object handles it to the first InfoExtractor it
finds that reports being able to handle it. The InfoExtractor extracts
all the information about the video or videos the URL refers to, and
YoutubeDL process the extracted information, possibly using a File
Downloader to download the video.
YoutubeDL objects accept a lot of parameters. In order not to saturate
the object constructor with arguments, it receives a dictionary of
options instead. These options are available through the params
attribute for the InfoExtractors to use. The YoutubeDL also
registers itself as the downloader in charge for the InfoExtractors
that are added to it, so this is a "mutual registration".
Available options:
username: Username for authentication purposes.
password: Password for authentication purposes.
videopassword: Password for accessing a video.
ap_mso: Adobe Pass multiple-system operator identifier.
ap_username: Multiple-system operator account username.
ap_password: Multiple-system operator account password.
usenetrc: Use netrc for authentication instead.
verbose: Print additional info to stdout.
quiet: Do not print messages to stdout.
no_warnings: Do not print out anything for warnings.
forceprint: A dict with keys WHEN mapped to a list of templates to
print to stdout. The allowed keys are video or any of the
items in utils.POSTPROCESS_WHEN.
For compatibility, a single list is also accepted
print_to_file: A dict with keys WHEN (same as forceprint) mapped to
a list of tuples with (template, filename)
forcejson: Force printing info_dict as JSON.
dump_single_json: Force printing the info_dict of the whole playlist
(or video) as a single JSON line.
force_write_download_archive: Force writing download archive regardless
of 'skip_download' or 'simulate'.
simulate: Do not download the video files. If unset (or None),
simulate only if listsubtitles, listformats or list_thumbnails is used
format: Video format code. see "FORMAT SELECTION" for more details.
You can also pass a function. The function takes 'ctx' as
argument and returns the formats to download.
See "build_format_selector" for an implementation
allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
ignore_no_formats_error: Ignore "No video formats" error. Usefull for
extracting metadata even if the video is not actually
available for download (experimental)
format_sort: A list of fields by which to sort the video formats.
See "Sorting Formats" for more details.
format_sort_force: Force the given format_sort. see "Sorting Formats"
for more details.
prefer_free_formats: Whether to prefer video formats with free containers
over non-free ones of same quality.
allow_multiple_video_streams: Allow multiple video streams to be merged
into a single file
allow_multiple_audio_streams: Allow multiple audio streams to be merged
into a single file
check_formats Whether to test if the formats are downloadable.
Can be True (check all), False (check none),
'selected' (check selected formats),
or None (check only if requested by extractor)
paths: Dictionary of output paths. The allowed keys are 'home'
'temp' and the keys of OUTTMPL_TYPES (in utils.py)
outtmpl: Dictionary of templates for output names. Allowed keys
are 'default' and the keys of OUTTMPL_TYPES (in utils.py).
For compatibility with youtube-dl, a single string can also be used
outtmpl_na_placeholder: Placeholder for unavailable meta fields.
restrictfilenames: Do not allow "&" and spaces in file names
trim_file_name: Limit length of filename (extension excluded)
windowsfilenames: Force the filenames to be windows compatible
ignoreerrors: Do not stop on download/postprocessing errors.
Can be 'only_download' to ignore only download errors.
Default is 'only_download' for CLI, but False for API
skip_playlist_after_errors: Number of allowed failures until the rest of
the playlist is skipped
force_generic_extractor: Force downloader to use the generic extractor
overwrites: Overwrite all video and metadata files if True,
overwrite only non-video files if None
and don't overwrite any file if False
For compatibility with youtube-dl,
"nooverwrites" may also be used instead
playliststart: Playlist item to start at.
playlistend: Playlist item to end at.
playlist_items: Specific indices of playlist to download.
playlistreverse: Download playlist items in reverse order.
playlistrandom: Download playlist items in random order.
matchtitle: Download only matching titles.
rejecttitle: Reject downloads for matching titles.
logger: Log messages to a logging.Logger instance.
logtostderr: Log messages to stderr instead of stdout.
consoletitle: Display progress in console window's titlebar.
writedescription: Write the video description to a .description file
writeinfojson: Write the video description to a .info.json file
clean_infojson: Remove private fields from the infojson
getcomments: Extract video comments. This will not be written to disk
unless writeinfojson is also given
writeannotations: Write the video annotations to a .annotations.xml file
writethumbnail: Write the thumbnail image to a file
allow_playlist_files: Whether to write playlists' description, infojson etc
also to disk when using the 'write*' options
write_all_thumbnails: Write all thumbnail formats to files
writelink: Write an internet shortcut file, depending on the
current platform (.url/.webloc/.desktop)
writeurllink: Write a Windows internet shortcut file (.url)
writewebloclink: Write a macOS internet shortcut file (.webloc)
writedesktoplink: Write a Linux internet shortcut file (.desktop)
writesubtitles: Write the video subtitles to a file
writeautomaticsub: Write the automatically generated subtitles to a file
listsubtitles: Lists all available subtitles for the video
subtitlesformat: The format code for subtitles
subtitleslangs: List of languages of the subtitles to download (can be regex).
The list may contain "all" to refer to all the available
subtitles. The language can be prefixed with a "-" to
exclude it from the requested languages. Eg: ['all', '-live_chat']
keepvideo: Keep the video file after post-processing
daterange: A DateRange object, download only if the upload_date is in the range.
skip_download: Skip the actual download of the video file
cachedir: Location of the cache files in the filesystem.
False to disable filesystem cache.
noplaylist: Download single video instead of a playlist if in doubt.
age_limit: An integer representing the user's age in years.
Unsuitable videos for the given age are skipped.
min_views: An integer representing the minimum view count the video
must have in order to not be skipped.
Videos without view count information are always
downloaded. None for no limit.
max_views: An integer representing the maximum view count.
Videos that are more popular than that are not
downloaded.
Videos without view count information are always
downloaded. None for no limit.
download_archive: File name of a file where all downloads are recorded.
Videos already present in the file are not downloaded
again.
break_on_existing: Stop the download process after attempting to download a
file that is in the archive.
break_on_reject: Stop the download process when encountering a video that
has been filtered out.
break_per_url: Whether break_on_reject and break_on_existing
should act on each input URL as opposed to for the entire queue
cookiefile: File name or text stream from where cookies should be read and dumped to
cookiesfrombrowser: A tuple containing the name of the browser, the profile
name/pathfrom where cookies are loaded, and the name of the
keyring. Eg: ('chrome', ) or ('vivaldi', 'default', 'BASICTEXT')
legacyserverconnect: Explicitly allow HTTPS connection to servers that do not
support RFC 5746 secure renegotiation
nocheckcertificate: Do not verify SSL certificates
client_certificate: Path to client certificate file in PEM format. May include the private key
client_certificate_key: Path to private key file for client certificate
client_certificate_password: Password for client certificate private key, if encrypted.
If not provided and the key is encrypted, yt-dlp will ask interactively
prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
At the moment, this is only supported by YouTube.
http_headers: A dictionary of custom headers to be used for all requests
proxy: URL of the proxy server to use
geo_verification_proxy: URL of the proxy to use for IP address verification
on geo-restricted sites.
socket_timeout: Time to wait for unresponsive hosts, in seconds
bidi_workaround: Work around buggy terminals without bidirectional text
support, using fridibi
debug_printtraffic:Print out sent and received HTTP traffic
default_search: Prepend this string if an input url is not valid.
'auto' for elaborate guessing
encoding: Use this encoding instead of the system-specified.
extract_flat: Do not resolve URLs, return the immediate result.
Pass in 'in_playlist' to only show this behavior for
playlist items.
wait_for_video: If given, wait for scheduled streams to become available.
The value should be a tuple containing the range
(min_secs, max_secs) to wait between retries
postprocessors: A list of dictionaries, each with an entry
* key: The name of the postprocessor. See
yt_dlp/postprocessor/__init__.py for a list.
* when: When to run the postprocessor. Allowed values are
the entries of utils.POSTPROCESS_WHEN
Assumed to be 'post_process' if not given
progress_hooks: A list of functions that get called on download
progress, with a dictionary with the entries
* status: One of "downloading", "error", or "finished".
Check this first and ignore unknown values.
* info_dict: The extracted info_dict
If status is one of "downloading", or "finished", the
following properties may also be present:
* filename: The final filename (always present)
* tmpfilename: The filename we're currently writing to
* downloaded_bytes: Bytes on disk
* total_bytes: Size of the whole file, None if unknown
* total_bytes_estimate: Guess of the eventual file size,
None if unavailable.
* elapsed: The number of seconds since download started.
* eta: The estimated time in seconds, None if unknown
* speed: The download speed in bytes/second, None if
unknown
* fragment_index: The counter of the currently
downloaded video fragment.
* fragment_count: The number of fragments (= individual
files that will be merged)
Progress hooks are guaranteed to be called at least once
(with status "finished") if the download is successful.
postprocessor_hooks: A list of functions that get called on postprocessing
progress, with a dictionary with the entries
* status: One of "started", "processing", or "finished".
Check this first and ignore unknown values.
* postprocessor: Name of the postprocessor
* info_dict: The extracted info_dict
Progress hooks are guaranteed to be called at least twice
(with status "started" and "finished") if the processing is successful.
merge_output_format: Extension to use when merging formats.
final_ext: Expected final extension; used to detect when the file was
already downloaded and converted
fixup: Automatically correct known faults of the file.
One of:
- "never": do nothing
- "warn": only emit a warning
- "detect_or_warn": check whether we can do anything
about it, warn otherwise (default)
source_address: Client-side IP address to bind to.
sleep_interval_requests: Number of seconds to sleep between requests
during extraction
sleep_interval: Number of seconds to sleep before each download when
used alone or a lower bound of a range for randomized
sleep before each download (minimum possible number
of seconds to sleep) when used along with
max_sleep_interval.
max_sleep_interval:Upper bound of a range for randomized sleep before each
download (maximum possible number of seconds to sleep).
Must only be used along with sleep_interval.
Actual sleep time will be a random float from range
[sleep_interval; max_sleep_interval].
sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
listformats: Print an overview of available video formats and exit.
list_thumbnails: Print a table of all thumbnails and exit.
match_filter: A function that gets called for every video with the signature
(info_dict, *, incomplete: bool) -> Optional[str]
For backward compatibility with youtube-dl, the signature
(info_dict) -> Optional[str] is also allowed.
- If it returns a message, the video is ignored.
- If it returns None, the video is downloaded.
- If it returns utils.NO_DEFAULT, the user is interactively
asked whether to download the video.
match_filter_func in utils.py is one example for this.
no_color: Do not emit color codes in output.
geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
HTTP header
geo_bypass_country:
Two-letter ISO 3166-2 country code that will be used for
explicit geographic restriction bypassing via faking
X-Forwarded-For HTTP header
geo_bypass_ip_block:
IP range in CIDR notation that will be used similarly to
geo_bypass_country
external_downloader: A dictionary of protocol keys and the executable of the
external downloader to use for it. The allowed protocols
are default|http|ftp|m3u8|dash|rtsp|rtmp|mms.
Set the value to 'native' to use the native downloader
compat_opts: Compatibility options. See "Differences in default behavior".
The following options do not work when used through the API:
filename, abort-on-error, multistreams, no-live-chat, format-sort
no-clean-infojson, no-playlist-metafiles, no-keep-subs, no-attach-info-json.
Refer __init__.py for their implementation
progress_template: Dictionary of templates for progress outputs.
Allowed keys are 'download', 'postprocess',
'download-title' (console title) and 'postprocess-title'.
The template is mapped on a dictionary with keys 'progress' and 'info'
retry_sleep_functions: Dictionary of functions that takes the number of attempts
as argument and returns the time to sleep in seconds.
Allowed keys are 'http', 'fragment', 'file_access'
download_ranges: A function that gets called for every video with the signature
(info_dict, *, ydl) -> Iterable[Section].
Only the returned sections will be downloaded. Each Section contains:
* start_time: Start time of the section in seconds
* end_time: End time of the section in seconds
* title: Section title (Optional)
* index: Section number (Optional)
The following parameters are not used by YoutubeDL itself, they are used by
the downloader (see yt_dlp/downloader/common.py):
nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
max_filesize, test, noresizebuffer, retries, file_access_retries, fragment_retries,
continuedl, noprogress, xattr_set_filesize, hls_use_mpegts, http_chunk_size,
external_downloader_args, concurrent_fragment_downloads.
The following options are used by the post processors:
ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
to the binary or its containing directory.
postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
and a list of additional command-line arguments for the
postprocessor/executable. The dict can also have "PP+EXE" keys
which are used when the given exe is used by the given PP.
Use 'default' as the name for arguments to passed to all PP
For compatibility with youtube-dl, a single list of args
can also be used
The following options are used by the extractors:
extractor_retries: Number of times to retry for known errors
dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
hls_split_discontinuity: Split HLS playlists to different formats at
discontinuities such as ad breaks (default: False)
extractor_args: A dictionary of arguments to be passed to the extractors.
See "EXTRACTOR ARGUMENTS" for details.
Eg: {'youtube': {'skip': ['dash', 'hls']}}
mark_watched: Mark videos watched (even with --simulate). Only for YouTube
The following options are deprecated and may be removed in the future:
forceurl: - Use forceprint
Force printing final URL.
forcetitle: - Use forceprint
Force printing title.
forceid: - Use forceprint
Force printing ID.
forcethumbnail: - Use forceprint
Force printing thumbnail URL.
forcedescription: - Use forceprint
Force printing description.
forcefilename: - Use forceprint
Force printing final filename.
forceduration: - Use forceprint
Force printing duration.
allsubtitles: - Use subtitleslangs = ['all']
Downloads all the subtitles of the video
(requires writesubtitles or writeautomaticsub)
include_ads: - Doesn't work
Download ads as well
call_home: - Not implemented
Boolean, true iff we are allowed to contact the
yt-dlp servers for debugging.
post_hooks: - Register a custom postprocessor
A list of functions that get called as the final step
for each video file, after all postprocessors have been
called. The filename will be passed as the only argument.
hls_prefer_native: - Use external_downloader = {'m3u8': 'native'} or {'m3u8': 'ffmpeg'}.
Use the native HLS downloader instead of ffmpeg/avconv
if True, otherwise use ffmpeg/avconv if False, otherwise
use downloader suggested by extractor if None.
prefer_ffmpeg: - avconv support is deprecated
If False, use avconv instead of ffmpeg if both are available,
otherwise prefer ffmpeg.
youtube_include_dash_manifest: - Use extractor_args
If True (default), DASH manifests and related
data will be downloaded and processed by extractor.
You can reduce network I/O by disabling it if you don't
care about DASH. (only for youtube)
youtube_include_hls_manifest: - Use extractor_args
If True (default), HLS manifests and related
data will be downloaded and processed by extractor.
You can reduce network I/O by disabling it if you don't
care about HLS. (only for youtube)
"""
_NUMERIC_FIELDS = {
'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
'timestamp', 'release_timestamp',
'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
'average_rating', 'comment_count', 'age_limit',
'start_time', 'end_time',
'chapter_number', 'season_number', 'episode_number',
'track_number', 'disc_number', 'release_year',
}
_format_fields = {
# NB: Keep in sync with the docstring of extractor/common.py
'url', 'manifest_url', 'manifest_stream_number', 'ext', 'format', 'format_id', 'format_note',
'width', 'height', 'resolution', 'dynamic_range', 'tbr', 'abr', 'acodec', 'asr',
'vbr', 'fps', 'vcodec', 'container', 'filesize', 'filesize_approx',
'player_url', 'protocol', 'fragment_base_url', 'fragments', 'is_from_start',
'preference', 'language', 'language_preference', 'quality', 'source_preference',
'http_headers', 'stretched_ratio', 'no_resume', 'has_drm', 'downloader_options',
'page_url', 'app', 'play_path', 'tc_url', 'flash_version', 'rtmp_live', 'rtmp_conn', 'rtmp_protocol', 'rtmp_real_time'
}
_format_selection_exts = {
'audio': {'m4a', 'mp3', 'ogg', 'aac'},
'video': {'mp4', 'flv', 'webm', '3gp'},
'storyboards': {'mhtml'},
}
def __init__(self, params=None, auto_init=True):
"""Create a FileDownloader object with the given options.
@param auto_init Whether to load the default extractors and print header (if verbose).
Set to 'no_verbose_header' to not print the header
"""
if params is None:
params = {}
self.params = params
self._ies = {}
self._ies_instances = {}
self._pps = {k: [] for k in POSTPROCESS_WHEN}
self._printed_messages = set()
self._first_webpage_request = True
self._post_hooks = []
self._progress_hooks = []
self._postprocessor_hooks = []
self._download_retcode = 0
self._num_downloads = 0
self._num_videos = 0
self._playlist_level = 0
self._playlist_urls = set()
self.cache = Cache(self)
windows_enable_vt_mode()
stdout = sys.stderr if self.params.get('logtostderr') else sys.stdout
self._out_files = Namespace(
out=stdout,
error=sys.stderr,
screen=sys.stderr if self.params.get('quiet') else stdout,
console=None if compat_os_name == 'nt' else next(
filter(supports_terminal_sequences, (sys.stderr, sys.stdout)), None)
)
self._allow_colors = Namespace(**{
type_: not self.params.get('no_color') and supports_terminal_sequences(stream)
for type_, stream in self._out_files.items_ if type_ != 'console'
})
if sys.version_info < (3, 6):
self.report_warning(
'Python version %d.%d is not supported! Please update to Python 3.6 or above' % sys.version_info[:2])
if self.params.get('allow_unplayable_formats'):
self.report_warning(
f'You have asked for {self._format_err("UNPLAYABLE", self.Styles.EMPHASIS)} formats to be listed/downloaded. '
'This is a developer option intended for debugging. \n'
' If you experience any issues while using this option, '
f'{self._format_err("DO NOT", self.Styles.ERROR)} open a bug report')
def check_deprecated(param, option, suggestion):
if self.params.get(param) is not None:
self.report_warning(f'{option} is deprecated. Use {suggestion} instead')
return True
return False
if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
if self.params.get('geo_verification_proxy') is None:
self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
for msg in self.params.get('_warnings', []):
self.report_warning(msg)
for msg in self.params.get('_deprecation_warnings', []):
self.deprecation_warning(msg)
self.params['compat_opts'] = set(self.params.get('compat_opts', ()))
if not compat_has_legacy:
self.params['compat_opts'].add('no-compat-legacy')
if 'list-formats' in self.params['compat_opts']:
self.params['listformats_table'] = False
if 'overwrites' not in self.params and self.params.get('nooverwrites') is not None:
# nooverwrites was unnecessarily changed to overwrites
# in 0c3d0f51778b153f65c21906031c2e091fcfb641
# This ensures compatibility with both keys
self.params['overwrites'] = not self.params['nooverwrites']
elif self.params.get('overwrites') is None:
self.params.pop('overwrites', None)
else:
self.params['nooverwrites'] = not self.params['overwrites']
self.params.setdefault('forceprint', {})
self.params.setdefault('print_to_file', {})
# Compatibility with older syntax
if not isinstance(params['forceprint'], dict):
self.params['forceprint'] = {'video': params['forceprint']}
if self.params.get('bidi_workaround', False):
try:
import pty
master, slave = pty.openpty()
width = compat_get_terminal_size().columns
width_args = [] if width is None else ['-w', str(width)]
sp_kwargs = {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error}
try:
self._output_process = Popen(['bidiv'] + width_args, **sp_kwargs)
except OSError:
self._output_process = Popen(['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
self._output_channel = os.fdopen(master, 'rb')
except OSError as ose:
if ose.errno == errno.ENOENT:
self.report_warning(
'Could not find fribidi executable, ignoring --bidi-workaround. '
'Make sure that fribidi is an executable file in one of the directories in your $PATH.')
else:
raise
if auto_init:
if auto_init != 'no_verbose_header':
self.print_debug_header()
self.add_default_info_extractors()
if (sys.platform != 'win32'
and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
and not self.params.get('restrictfilenames', False)):
# Unicode filesystem API will throw errors (#1474, #13027)
self.report_warning(
'Assuming --restrict-filenames since file system encoding '
'cannot encode all characters. '
'Set the LC_ALL environment variable to fix this.')
self.params['restrictfilenames'] = True
self.outtmpl_dict = self.parse_outtmpl()
# Creating format selector here allows us to catch syntax errors before the extraction
self.format_selector = (
self.params.get('format') if self.params.get('format') in (None, '-')
else self.params['format'] if callable(self.params['format'])
else self.build_format_selector(self.params['format']))
# Set http_headers defaults according to std_headers
self.params['http_headers'] = merge_headers(std_headers, self.params.get('http_headers', {}))
hooks = {
'post_hooks': self.add_post_hook,
'progress_hooks': self.add_progress_hook,
'postprocessor_hooks': self.add_postprocessor_hook,
}
for opt, fn in hooks.items():
for ph in self.params.get(opt, []):
fn(ph)
for pp_def_raw in self.params.get('postprocessors', []):
pp_def = dict(pp_def_raw)
when = pp_def.pop('when', 'post_process')
self.add_post_processor(
get_postprocessor(pp_def.pop('key'))(self, **pp_def),
when=when)
self._setup_opener()
register_socks_protocols()
def preload_download_archive(fn):
"""Preload the archive, if any is specified"""
if fn is None:
return False
self.write_debug(f'Loading archive file {fn!r}')
try:
with locked_file(fn, 'r', encoding='utf-8') as archive_file:
for line in archive_file:
self.archive.add(line.strip())
except OSError as ioe:
if ioe.errno != errno.ENOENT:
raise
return False
return True
self.archive = set()
preload_download_archive(self.params.get('download_archive'))
def warn_if_short_id(self, argv):
# short YouTube ID starting with dash?
idxs = [
i for i, a in enumerate(argv)
if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
if idxs:
correct_argv = (
['yt-dlp']
+ [a for i, a in enumerate(argv) if i not in idxs]
+ ['--'] + [argv[i] for i in idxs]
)
self.report_warning(
'Long argument string detected. '
'Use -- to separate parameters and URLs, like this:\n%s' %
args_to_str(correct_argv))
def add_info_extractor(self, ie):
"""Add an InfoExtractor object to the end of the list."""
ie_key = ie.ie_key()
self._ies[ie_key] = ie
if not isinstance(ie, type):
self._ies_instances[ie_key] = ie
ie.set_downloader(self)
def _get_info_extractor_class(self, ie_key):
ie = self._ies.get(ie_key)
if ie is None:
ie = get_info_extractor(ie_key)
self.add_info_extractor(ie)
return ie
def get_info_extractor(self, ie_key):
"""
Get an instance of an IE with name ie_key, it will try to get one from
the _ies list, if there's no instance it will create a new one and add
it to the extractor list.
"""
ie = self._ies_instances.get(ie_key)
if ie is None:
ie = get_info_extractor(ie_key)()
self.add_info_extractor(ie)
return ie
def add_default_info_extractors(self):
"""
Add the InfoExtractors returned by gen_extractors to the end of the list
"""
for ie in gen_extractor_classes():
self.add_info_extractor(ie)
def add_post_processor(self, pp, when='post_process'):
"""Add a PostProcessor object to the end of the chain."""
self._pps[when].append(pp)
pp.set_downloader(self)
def add_post_hook(self, ph):
"""Add the post hook"""
self._post_hooks.append(ph)
def add_progress_hook(self, ph):
"""Add the download progress hook"""
self._progress_hooks.append(ph)
def add_postprocessor_hook(self, ph):
"""Add the postprocessing progress hook"""
self._postprocessor_hooks.append(ph)
for pps in self._pps.values():
for pp in pps:
pp.add_progress_hook(ph)
def _bidi_workaround(self, message):
if not hasattr(self, '_output_channel'):
return message
assert hasattr(self, '_output_process')
assert isinstance(message, compat_str)
line_count = message.count('\n') + 1
self._output_process.stdin.write((message + '\n').encode())
self._output_process.stdin.flush()
res = ''.join(self._output_channel.readline().decode()
for _ in range(line_count))
return res[:-len('\n')]
def _write_string(self, message, out=None, only_once=False):
if only_once:
if message in self._printed_messages:
return
self._printed_messages.add(message)
write_string(message, out=out, encoding=self.params.get('encoding'))
def to_stdout(self, message, skip_eol=False, quiet=None):
"""Print message to stdout"""
if quiet is not None:
self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument quiet. Use "YoutubeDL.to_screen" instead')
if skip_eol is not False:
self.deprecation_warning('"YoutubeDL.to_stdout" no longer accepts the argument skip_eol. Use "YoutubeDL.to_screen" instead')
self._write_string(f'{self._bidi_workaround(message)}\n', self._out_files.out)
def to_screen(self, message, skip_eol=False, quiet=None):
"""Print message to screen if not in quiet mode"""
if self.params.get('logger'):
self.params['logger'].debug(message)
return
if (self.params.get('quiet') if quiet is None else quiet) and not self.params.get('verbose'):
return
self._write_string(
'%s%s' % (self._bidi_workaround(message), ('' if skip_eol else '\n')),
self._out_files.screen)
def to_stderr(self, message, only_once=False):
"""Print message to stderr"""
assert isinstance(message, compat_str)
if self.params.get('logger'):
self.params['logger'].error(message)
else:
self._write_string(f'{self._bidi_workaround(message)}\n', self._out_files.error, only_once=only_once)
def _send_console_code(self, code):
if compat_os_name == 'nt' or not self._out_files.console:
return
self._write_string(code, self._out_files.console)
def to_console_title(self, message):
if not self.params.get('consoletitle', False):
return
message = remove_terminal_sequences(message)
if compat_os_name == 'nt':
if ctypes.windll.kernel32.GetConsoleWindow():
# c_wchar_p() might not be necessary if `message` is
# already of type unicode()
ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
else:
self._send_console_code(f'\033]0;{message}\007')
def save_console_title(self):
if not self.params.get('consoletitle') or self.params.get('simulate'):
return
self._send_console_code('\033[22;0t') # Save the title on stack
def restore_console_title(self):
if not self.params.get('consoletitle') or self.params.get('simulate'):
return
self._send_console_code('\033[23;0t') # Restore the title from stack
def __enter__(self):
self.save_console_title()
return self
def __exit__(self, *args):
self.restore_console_title()
if self.params.get('cookiefile') is not None:
self.cookiejar.save(ignore_discard=True, ignore_expires=True)
def trouble(self, message=None, tb=None, is_error=True):
"""Determine action to take when a download problem appears.
Depending on if the downloader has been configured to ignore
download errors or not, this method may throw an exception or
not when errors are found, after printing the message.
@param tb If given, is additional traceback information
@param is_error Whether to raise error according to ignorerrors
"""
if message is not None:
self.to_stderr(message)
if self.params.get('verbose'):
if tb is None:
if sys.exc_info()[0]: # if .trouble has been called from an except block
tb = ''
if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
tb += encode_compat_str(traceback.format_exc())
else:
tb_data = traceback.format_list(traceback.extract_stack())
tb = ''.join(tb_data)
if tb:
self.to_stderr(tb)
if not is_error:
return
if not self.params.get('ignoreerrors'):
if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
exc_info = sys.exc_info()[1].exc_info
else:
exc_info = sys.exc_info()
raise DownloadError(message, exc_info)
self._download_retcode = 1
Styles = Namespace(
HEADERS='yellow',
EMPHASIS='light blue',
FILENAME='green',
ID='green',
DELIM='blue',
ERROR='red',
WARNING='yellow',
SUPPRESS='light black',
)
def _format_text(self, handle, allow_colors, text, f, fallback=None, *, test_encoding=False):
text = str(text)
if test_encoding:
original_text = text
# handle.encoding can be None. See https://github.com/yt-dlp/yt-dlp/issues/2711
encoding = self.params.get('encoding') or getattr(handle, 'encoding', None) or 'ascii'
text = text.encode(encoding, 'ignore').decode(encoding)
if fallback is not None and text != original_text:
text = fallback
return format_text(text, f) if allow_colors else text if fallback is None else fallback
def _format_out(self, *args, **kwargs):
return self._format_text(self._out_files.out, self._allow_colors.out, *args, **kwargs)
def _format_screen(self, *args, **kwargs):
return self._format_text(self._out_files.screen, self._allow_colors.screen, *args, **kwargs)
def _format_err(self, *args, **kwargs):
return self._format_text(self._out_files.error, self._allow_colors.error, *args, **kwargs)
def report_warning(self, message, only_once=False):
'''
Print the message to stderr, it will be prefixed with 'WARNING:'
If stderr is a tty file the 'WARNING:' will be colored
'''
if self.params.get('logger') is not None:
self.params['logger'].warning(message)
else:
if self.params.get('no_warnings'):
return
self.to_stderr(f'{self._format_err("WARNING:", self.Styles.WARNING)} {message}', only_once)
def deprecation_warning(self, message):
if self.params.get('logger') is not None:
self.params['logger'].warning(f'DeprecationWarning: {message}')
else:
self.to_stderr(f'{self._format_err("DeprecationWarning:", self.Styles.ERROR)} {message}', True)
def report_error(self, message, *args, **kwargs):
'''
Do the same as trouble, but prefixes the message with 'ERROR:', colored
in red if stderr is a tty file.
'''
self.trouble(f'{self._format_err("ERROR:", self.Styles.ERROR)} {message}', *args, **kwargs)
def write_debug(self, message, only_once=False):
'''Log debug message or Print message to stderr'''
if not self.params.get('verbose', False):
return
message = f'[debug] {message}'
if self.params.get('logger'):
self.params['logger'].debug(message)
else:
self.to_stderr(message, only_once)
def report_file_already_downloaded(self, file_name):
"""Report file has already been fully downloaded."""
try:
self.to_screen('[download] %s has already been downloaded' % file_name)
except UnicodeEncodeError:
self.to_screen('[download] The file has already been downloaded')
def report_file_delete(self, file_name):
"""Report that existing file will be deleted."""
try:
self.to_screen('Deleting existing file %s' % file_name)
except UnicodeEncodeError:
self.to_screen('Deleting existing file')
def raise_no_formats(self, info, forced=False, *, msg=None):
has_drm = info.get('_has_drm')
ignored, expected = self.params.get('ignore_no_formats_error'), bool(msg)
msg = msg or has_drm and 'This video is DRM protected' or 'No video formats found!'
if forced or not ignored:
raise ExtractorError(msg, video_id=info['id'], ie=info['extractor'],
expected=has_drm or ignored or expected)
else:
self.report_warning(msg)
def parse_outtmpl(self):
outtmpl_dict = self.params.get('outtmpl', {})
if not isinstance(outtmpl_dict, dict):
outtmpl_dict = {'default': outtmpl_dict}
# Remove spaces in the default template
if self.params.get('restrictfilenames'):
sanitize = lambda x: x.replace(' - ', ' ').replace(' ', '-')
else:
sanitize = lambda x: x
outtmpl_dict.update({
k: sanitize(v) for k, v in DEFAULT_OUTTMPL.items()
if outtmpl_dict.get(k) is None})
for _, val in outtmpl_dict.items():
if isinstance(val, bytes):
self.report_warning('Parameter outtmpl is bytes, but should be a unicode string')
return outtmpl_dict
def get_output_path(self, dir_type='', filename=None):
paths = self.params.get('paths', {})
assert isinstance(paths, dict)
path = os.path.join(
expand_path(paths.get('home', '').strip()),
expand_path(paths.get(dir_type, '').strip()) if dir_type else '',
filename or '')
return sanitize_path(path, force=self.params.get('windowsfilenames'))
@staticmethod
def _outtmpl_expandpath(outtmpl):
# expand_path translates '%%' into '%' and '$$' into '$'
# correspondingly that is not what we want since we need to keep
# '%%' intact for template dict substitution step. Working around
# with boundary-alike separator hack.
sep = ''.join([random.choice(ascii_letters) for _ in range(32)])
outtmpl = outtmpl.replace('%%', f'%{sep}%').replace('$$', f'${sep}$')
# outtmpl should be expand_path'ed before template dict substitution
# because meta fields may contain env variables we don't want to
# be expanded. For example, for outtmpl "%(title)s.%(ext)s" and
# title "Hello $PATH", we don't want `$PATH` to be expanded.
return expand_path(outtmpl).replace(sep, '')
@staticmethod
def escape_outtmpl(outtmpl):
''' Escape any remaining strings like %s, %abc% etc. '''
return re.sub(
STR_FORMAT_RE_TMPL.format('', '(?![%(\0])'),
lambda mobj: ('' if mobj.group('has_key') else '%') + mobj.group(0),
outtmpl)
@classmethod
def validate_outtmpl(cls, outtmpl):
''' @return None or Exception object '''
outtmpl = re.sub(
STR_FORMAT_RE_TMPL.format('[^)]*', '[ljqBUDS]'),
lambda mobj: f'{mobj.group(0)[:-1]}s',
cls._outtmpl_expandpath(outtmpl))
try:
cls.escape_outtmpl(outtmpl) % collections.defaultdict(int)
return None
except ValueError as err:
return err
@staticmethod
def _copy_infodict(info_dict):
info_dict = dict(info_dict)
info_dict.pop('__postprocessors', None)
info_dict.pop('__pending_error', None)
return info_dict
def prepare_outtmpl(self, outtmpl, info_dict, sanitize=False):
""" Make the outtmpl and info_dict suitable for substitution: ydl.escape_outtmpl(outtmpl) % info_dict
@param sanitize Whether to sanitize the output as a filename.
For backward compatibility, a function can also be passed
"""
info_dict.setdefault('epoch', int(time.time())) # keep epoch consistent once set
info_dict = self._copy_infodict(info_dict)
info_dict['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
formatSeconds(info_dict['duration'], '-' if sanitize else ':')
if info_dict.get('duration', None) is not None
else None)
info_dict['autonumber'] = int(self.params.get('autonumber_start', 1) - 1 + self._num_downloads)
info_dict['video_autonumber'] = self._num_videos
if info_dict.get('resolution') is None:
info_dict['resolution'] = self.format_resolution(info_dict, default=None)
# For fields playlist_index, playlist_autonumber and autonumber convert all occurrences
# of %(field)s to %(field)0Nd for backward compatibility
field_size_compat_map = {
'playlist_index': number_of_digits(info_dict.get('__last_playlist_index') or 0),
'playlist_autonumber': number_of_digits(info_dict.get('n_entries') or 0),
'autonumber': self.params.get('autonumber_size') or 5,
}
TMPL_DICT = {}
EXTERNAL_FORMAT_RE = re.compile(STR_FORMAT_RE_TMPL.format('[^)]*', f'[{STR_FORMAT_TYPES}ljqBUDS]'))
MATH_FUNCTIONS = {
'+': float.__add__,
'-': float.__sub__,
}
# Field is of the form key1.key2...
# where keys (except first) can be string, int or slice
FIELD_RE = r'\w*(?:\.(?:\w+|{num}|{num}?(?::{num}?){{1,2}}))*'.format(num=r'(?:-?\d+)')
MATH_FIELD_RE = rf'(?:{FIELD_RE}|-?{NUMBER_RE})'
MATH_OPERATORS_RE = r'(?:%s)' % '|'.join(map(re.escape, MATH_FUNCTIONS.keys()))
INTERNAL_FORMAT_RE = re.compile(rf'''(?x)
(?P<negate>-)?
(?P<fields>{FIELD_RE})
(?P<maths>(?:{MATH_OPERATORS_RE}{MATH_FIELD_RE})*)
(?:>(?P<strf_format>.+?))?
(?P<remaining>
(?P<alternate>(?<!\\),[^|&)]+)?
(?:&(?P<replacement>.*?))?
(?:\|(?P<default>.*?))?
)$''')
def _traverse_infodict(k):
k = k.split('.')
if k[0] == '':
k.pop(0)
return traverse_obj(info_dict, k, is_user_input=True, traverse_string=True)
def get_value(mdict):
# Object traversal
value = _traverse_infodict(mdict['fields'])
# Negative
if mdict['negate']:
value = float_or_none(value)
if value is not None:
value *= -1
# Do maths
offset_key = mdict['maths']
if offset_key:
value = float_or_none(value)
operator = None
while offset_key:
item = re.match(
MATH_FIELD_RE if operator else MATH_OPERATORS_RE,
offset_key).group(0)
offset_key = offset_key[len(item):]
if operator is None:
operator = MATH_FUNCTIONS[item]
continue
item, multiplier = (item[1:], -1) if item[0] == '-' else (item, 1)
offset = float_or_none(item)
if offset is None:
offset = float_or_none(_traverse_infodict(item))
try:
value = operator(value, multiplier * offset)
except (TypeError, ZeroDivisionError):
return None
operator = None
# Datetime formatting
if mdict['strf_format']:
value = strftime_or_none(value, mdict['strf_format'].replace('\\,', ','))
return value
na = self.params.get('outtmpl_na_placeholder', 'NA')
def filename_sanitizer(key, value, restricted=self.params.get('restrictfilenames')):
return sanitize_filename(str(value), restricted=restricted, is_id=(
bool(re.search(r'(^|[_.])id(\.|$)', key))
if 'filename-sanitization' in self.params['compat_opts']
else NO_DEFAULT))
sanitizer = sanitize if callable(sanitize) else filename_sanitizer
sanitize = bool(sanitize)
def _dumpjson_default(obj):
if isinstance(obj, (set, LazyList)):
return list(obj)
return repr(obj)
def create_key(outer_mobj):
if not outer_mobj.group('has_key'):
return outer_mobj.group(0)
key = outer_mobj.group('key')
mobj = re.match(INTERNAL_FORMAT_RE, key)
initial_field = mobj.group('fields') if mobj else ''
value, replacement, default = None, None, na
while mobj:
mobj = mobj.groupdict()
default = mobj['default'] if mobj['default'] is not None else default
value = get_value(mobj)
replacement = mobj['replacement']
if value is None and mobj['alternate']:
mobj = re.match(INTERNAL_FORMAT_RE, mobj['remaining'][1:])
else:
break
fmt = outer_mobj.group('format')
if fmt == 's' and value is not None and key in field_size_compat_map.keys():
fmt = f'0{field_size_compat_map[key]:d}d'
value = default if value is None else value if replacement is None else replacement
flags = outer_mobj.group('conversion') or ''
str_fmt = f'{fmt[:-1]}s'
if fmt[-1] == 'l': # list
delim = '\n' if '#' in flags else ', '
value, fmt = delim.join(map(str, variadic(value, allowed_types=(str, bytes)))), str_fmt
elif fmt[-1] == 'j': # json
value, fmt = json.dumps(value, default=_dumpjson_default, indent=4 if '#' in flags else None), str_fmt
elif fmt[-1] == 'q': # quoted
value = map(str, variadic(value) if '#' in flags else [value])
value, fmt = ' '.join(map(compat_shlex_quote, value)), str_fmt
elif fmt[-1] == 'B': # bytes
value = f'%{str_fmt}'.encode() % str(value).encode()
value, fmt = value.decode('utf-8', 'ignore'), 's'
elif fmt[-1] == 'U': # unicode normalized
value, fmt = unicodedata.normalize(
# "+" = compatibility equivalence, "#" = NFD
'NF%s%s' % ('K' if '+' in flags else '', 'D' if '#' in flags else 'C'),
value), str_fmt
elif fmt[-1] == 'D': # decimal suffix
num_fmt, fmt = fmt[:-1].replace('#', ''), 's'
value = format_decimal_suffix(value, f'%{num_fmt}f%s' if num_fmt else '%d%s',
factor=1024 if '#' in flags else 1000)
elif fmt[-1] == 'S': # filename sanitization
value, fmt = filename_sanitizer(initial_field, value, restricted='#' in flags), str_fmt
elif fmt[-1] == 'c':
if value:
value = str(value)[0]
else:
fmt = str_fmt
elif fmt[-1] not in 'rs': # numeric
value = float_or_none(value)
if value is None:
value, fmt = default, 's'
if sanitize:
if fmt[-1] == 'r':
# If value is an object, sanitize might convert it to a string
# So we convert it to repr first
value, fmt = repr(value), str_fmt
if fmt[-1] in 'csr':
value = sanitizer(initial_field, value)
key = '%s\0%s' % (key.replace('%', '%\0'), outer_mobj.group('format'))
TMPL_DICT[key] = value
return '{prefix}%({key}){fmt}'.format(key=key, fmt=fmt, prefix=outer_mobj.group('prefix'))
return EXTERNAL_FORMAT_RE.sub(create_key, outtmpl), TMPL_DICT
def evaluate_outtmpl(self, outtmpl, info_dict, *args, **kwargs):
outtmpl, info_dict = self.prepare_outtmpl(outtmpl, info_dict, *args, **kwargs)
return self.escape_outtmpl(outtmpl) % info_dict
def _prepare_filename(self, info_dict, *, outtmpl=None, tmpl_type=None):
assert None in (outtmpl, tmpl_type), 'outtmpl and tmpl_type are mutually exclusive'
if outtmpl is None:
outtmpl = self.outtmpl_dict.get(tmpl_type or 'default', self.outtmpl_dict['default'])
try:
outtmpl = self._outtmpl_expandpath(outtmpl)
filename = self.evaluate_outtmpl(outtmpl, info_dict, True)
if not filename:
return None
if tmpl_type in ('', 'temp'):
final_ext, ext = self.params.get('final_ext'), info_dict.get('ext')
if final_ext and ext and final_ext != ext and filename.endswith(f'.{final_ext}'):
filename = replace_extension(filename, ext, final_ext)
elif tmpl_type:
force_ext = OUTTMPL_TYPES[tmpl_type]
if force_ext:
filename = replace_extension(filename, force_ext, info_dict.get('ext'))
# https://github.com/blackjack4494/youtube-dlc/issues/85
trim_file_name = self.params.get('trim_file_name', False)
if trim_file_name:
no_ext, *ext = filename.rsplit('.', 2)
filename = join_nonempty(no_ext[:trim_file_name], *ext, delim='.')
return filename
except ValueError as err:
self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
return None
def prepare_filename(self, info_dict, dir_type='', *, outtmpl=None, warn=False):
"""Generate the output filename"""
if outtmpl:
assert not dir_type, 'outtmpl and dir_type are mutually exclusive'
dir_type = None
filename = self._prepare_filename(info_dict, tmpl_type=dir_type, outtmpl=outtmpl)
if not filename and dir_type not in ('', 'temp'):
return ''
if warn:
if not self.params.get('paths'):
pass
elif filename == '-':
self.report_warning('--paths is ignored when an outputting to stdout', only_once=True)
elif os.path.isabs(filename):
self.report_warning('--paths is ignored since an absolute path is given in output template', only_once=True)
if filename == '-' or not filename:
return filename
return self.get_output_path(dir_type, filename)
def _match_entry(self, info_dict, incomplete=False, silent=False):
""" Returns None if the file should be downloaded """
video_title = info_dict.get('title', info_dict.get('id', 'video'))
def check_filter():
if 'title' in info_dict:
# This can happen when we're just evaluating the playlist
title = info_dict['title']
matchtitle = self.params.get('matchtitle', False)
if matchtitle:
if not re.search(matchtitle, title, re.IGNORECASE):
return '"' + title + '" title did not match pattern "' + matchtitle + '"'
rejecttitle = self.params.get('rejecttitle', False)
if rejecttitle:
if re.search(rejecttitle, title, re.IGNORECASE):
return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
date = info_dict.get('upload_date')
if date is not None:
dateRange = self.params.get('daterange', DateRange())
if date not in dateRange:
return f'{date_from_str(date).isoformat()} upload date is not in range {dateRange}'
view_count = info_dict.get('view_count')
if view_count is not None:
min_views = self.params.get('min_views')
if min_views is not None and view_count < min_views:
return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
max_views = self.params.get('max_views')
if max_views is not None and view_count > max_views:
return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
return 'Skipping "%s" because it is age restricted' % video_title
match_filter = self.params.get('match_filter')
if match_filter is not None:
try:
ret = match_filter(info_dict, incomplete=incomplete)
except TypeError:
# For backward compatibility
ret = None if incomplete else match_filter(info_dict)
if ret is NO_DEFAULT:
while True:
filename = self._format_screen(self.prepare_filename(info_dict), self.Styles.FILENAME)
reply = input(self._format_screen(
f'Download "{filename}"? (Y/n): ', self.Styles.EMPHASIS)).lower().strip()
if reply in {'y', ''}:
return None
elif reply == 'n':
return f'Skipping {video_title}'
elif ret is not None:
return ret
return None
if self.in_download_archive(info_dict):
reason = '%s has already been recorded in the archive' % video_title
break_opt, break_err = 'break_on_existing', ExistingVideoReached
else:
reason = check_filter()
break_opt, break_err = 'break_on_reject', RejectedVideoReached
if reason is not None:
if not silent:
self.to_screen('[download] ' + reason)
if self.params.get(break_opt, False):
raise break_err()
return reason
@staticmethod
def add_extra_info(info_dict, extra_info):
'''Set the keys from extra_info in info dict if they are missing'''
for key, value in extra_info.items():
info_dict.setdefault(key, value)
def extract_info(self, url, download=True, ie_key=None, extra_info=None,
process=True, force_generic_extractor=False):
"""
Return a list with a dictionary for each video extracted.
Arguments:
url -- URL to extract
Keyword arguments:
download -- whether to download videos during extraction
ie_key -- extractor key hint
extra_info -- dictionary containing the extra values to add to each result
process -- whether to resolve all unresolved references (URLs, playlist items),
must be True for download to work.
force_generic_extractor -- force using the generic extractor
"""
if extra_info is None:
extra_info = {}
if not ie_key and force_generic_extractor:
ie_key = 'Generic'
if ie_key:
ies = {ie_key: self._get_info_extractor_class(ie_key)}
else:
ies = self._ies
for ie_key, ie in ies.items():
if not ie.suitable(url):
continue
if not ie.working():
self.report_warning('The program functionality for this site has been marked as broken, '
'and will probably not work.')
temp_id = ie.get_temp_id(url)
if temp_id is not None and self.in_download_archive({'id': temp_id, 'ie_key': ie_key}):
self.to_screen(f'[{ie_key}] {temp_id}: has already been recorded in the archive')
if self.params.get('break_on_existing', False):
raise ExistingVideoReached()
break
return self.__extract_info(url, self.get_info_extractor(ie_key), download, extra_info, process)
else:
self.report_error('no suitable InfoExtractor for URL %s' % url)
def __handle_extraction_exceptions(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
while True:
try:
return func(self, *args, **kwargs)
except (DownloadCancelled, LazyList.IndexError, PagedList.IndexError):
raise
except ReExtractInfo as e:
if e.expected:
self.to_screen(f'{e}; Re-extracting data')
else:
self.to_stderr('\r')
self.report_warning(f'{e}; Re-extracting data')
continue
except GeoRestrictedError as e:
msg = e.msg
if e.countries:
msg += '\nThis video is available in %s.' % ', '.join(
map(ISO3166Utils.short2full, e.countries))
msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
self.report_error(msg)
except ExtractorError as e: # An error we somewhat expected
self.report_error(str(e), e.format_traceback())
except Exception as e:
if self.params.get('ignoreerrors'):
self.report_error(str(e), tb=encode_compat_str(traceback.format_exc()))
else:
raise
break
return wrapper
def _wait_for_video(self, ie_result):
if (not self.params.get('wait_for_video')
or ie_result.get('_type', 'video') != 'video'
or ie_result.get('formats') or ie_result.get('url')):
return
format_dur = lambda dur: '%02d:%02d:%02d' % timetuple_from_msec(dur * 1000)[:-1]
last_msg = ''
def progress(msg):
nonlocal last_msg
self.to_screen(msg + ' ' * (len(last_msg) - len(msg)) + '\r', skip_eol=True)
last_msg = msg
min_wait, max_wait = self.params.get('wait_for_video')
diff = try_get(ie_result, lambda x: x['release_timestamp'] - time.time())
if diff is None and ie_result.get('live_status') == 'is_upcoming':
diff = round(random.uniform(min_wait, max_wait) if (max_wait and min_wait) else (max_wait or min_wait), 0)
self.report_warning('Release time of video is not known')
elif (diff or 0) <= 0:
self.report_warning('Video should already be available according to extracted info')
diff = min(max(diff or 0, min_wait or 0), max_wait or float('inf'))
self.to_screen(f'[wait] Waiting for {format_dur(diff)} - Press Ctrl+C to try now')
wait_till = time.time() + diff
try:
while True:
diff = wait_till - time.time()
if diff <= 0:
progress('')
raise ReExtractInfo('[wait] Wait period ended', expected=True)
progress(f'[wait] Remaining time until next attempt: {self._format_screen(format_dur(diff), self.Styles.EMPHASIS)}')
time.sleep(1)
except KeyboardInterrupt:
progress('')
raise ReExtractInfo('[wait] Interrupted by user', expected=True)
except BaseException as e:
if not isinstance(e, ReExtractInfo):
self.to_screen('')
raise
@__handle_extraction_exceptions
def __extract_info(self, url, ie, download, extra_info, process):
ie_result = ie.extract(url)
if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
return
if isinstance(ie_result, list):
# Backwards compatibility: old IE result format
ie_result = {
'_type': 'compat_list',
'entries': ie_result,
}
if extra_info.get('original_url'):
ie_result.setdefault('original_url', extra_info['original_url'])
self.add_default_extra_info(ie_result, ie, url)
if process:
self._wait_for_video(ie_result)
return self.process_ie_result(ie_result, download, extra_info)
else:
return ie_result
def add_default_extra_info(self, ie_result, ie, url):
if url is not None:
self.add_extra_info(ie_result, {
'webpage_url': url,
'original_url': url,
})
webpage_url = ie_result.get('webpage_url')
if webpage_url:
self.add_extra_info(ie_result, {
'webpage_url_basename': url_basename(webpage_url),
'webpage_url_domain': get_domain(webpage_url),
})
if ie is not None:
self.add_extra_info(ie_result, {
'extractor': ie.IE_NAME,
'extractor_key': ie.ie_key(),
})
def process_ie_result(self, ie_result, download=True, extra_info=None):
"""
Take the result of the ie(may be modified) and resolve all unresolved
references (URLs, playlist items).
It will also download the videos if 'download'.
Returns the resolved ie_result.
"""
if extra_info is None:
extra_info = {}
result_type = ie_result.get('_type', 'video')
if result_type in ('url', 'url_transparent'):
ie_result['url'] = sanitize_url(ie_result['url'])
if ie_result.get('original_url'):
extra_info.setdefault('original_url', ie_result['original_url'])
extract_flat = self.params.get('extract_flat', False)
if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
or extract_flat is True):
info_copy = ie_result.copy()
ie = try_get(ie_result.get('ie_key'), self.get_info_extractor)
if ie and not ie_result.get('id'):
info_copy['id'] = ie.get_temp_id(ie_result['url'])
self.add_default_extra_info(info_copy, ie, ie_result['url'])
self.add_extra_info(info_copy, extra_info)
info_copy, _ = self.pre_process(info_copy)
self.__forced_printings(info_copy, self.prepare_filename(info_copy), incomplete=True)
self._raise_pending_errors(info_copy)
if self.params.get('force_write_download_archive', False):
self.record_download_archive(info_copy)
return ie_result
if result_type == 'video':
self.add_extra_info(ie_result, extra_info)
ie_result = self.process_video_result(ie_result, download=download)
self._raise_pending_errors(ie_result)
additional_urls = (ie_result or {}).get('additional_urls')
if additional_urls:
# TODO: Improve MetadataParserPP to allow setting a list
if isinstance(additional_urls, compat_str):
additional_urls = [additional_urls]
self.to_screen(
'[info] %s: %d additional URL(s) requested' % (ie_result['id'], len(additional_urls)))
self.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls))
ie_result['additional_entries'] = [
self.extract_info(
url, download, extra_info=extra_info,
force_generic_extractor=self.params.get('force_generic_extractor'))
for url in additional_urls
]
return ie_result
elif result_type == 'url':
# We have to add extra_info to the results because it may be
# contained in a playlist
return self.extract_info(
ie_result['url'], download,
ie_key=ie_result.get('ie_key'),
extra_info=extra_info)
elif result_type == 'url_transparent':
# Use the information from the embedding page
info = self.extract_info(
ie_result['url'], ie_key=ie_result.get('ie_key'),
extra_info=extra_info, download=False, process=False)
# extract_info may return None when ignoreerrors is enabled and
# extraction failed with an error, don't crash and return early
# in this case
if not info:
return info
new_result = info.copy()
new_result.update(filter_dict(ie_result, lambda k, v: (
v is not None and k not in {'_type', 'url', 'id', 'extractor', 'extractor_key', 'ie_key'})))
# Extracted info may not be a video result (i.e.
# info.get('_type', 'video') != video) but rather an url or
# url_transparent. In such cases outer metadata (from ie_result)
# should be propagated to inner one (info). For this to happen
# _type of info should be overridden with url_transparent. This
# fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
if new_result.get('_type') == 'url':
new_result['_type'] = 'url_transparent'
return self.process_ie_result(
new_result, download=download, extra_info=extra_info)
elif result_type in ('playlist', 'multi_video'):
# Protect from infinite recursion due to recursively nested playlists
# (see https://github.com/ytdl-org/youtube-dl/issues/27833)
webpage_url = ie_result['webpage_url']
if webpage_url in self._playlist_urls:
self.to_screen(
'[download] Skipping already downloaded playlist: %s'
% ie_result.get('title') or ie_result.get('id'))
return
self._playlist_level += 1
self._playlist_urls.add(webpage_url)
self._fill_common_fields(ie_result, False)
self._sanitize_thumbnails(ie_result)
try:
return self.__process_playlist(ie_result, download)
finally:
self._playlist_level -= 1
if not self._playlist_level:
self._playlist_urls.clear()
elif result_type == 'compat_list':
self.report_warning(
'Extractor %s returned a compat_list result. '
'It needs to be updated.' % ie_result.get('extractor'))
def _fixup(r):
self.add_extra_info(r, {
'extractor': ie_result['extractor'],
'webpage_url': ie_result['webpage_url'],
'webpage_url_basename': url_basename(ie_result['webpage_url']),
'webpage_url_domain': get_domain(ie_result['webpage_url']),
'extractor_key': ie_result['extractor_key'],
})
return r
ie_result['entries'] = [
self.process_ie_result(_fixup(r), download, extra_info)
for r in ie_result['entries']
]
return ie_result
else:
raise Exception('Invalid result type: %s' % result_type)
def _ensure_dir_exists(self, path):
return make_dir(path, self.report_error)
@staticmethod
def _playlist_infodict(ie_result, **kwargs):
return {
**ie_result,
'playlist': ie_result.get('title') or ie_result.get('id'),
'playlist_id': ie_result.get('id'),
'playlist_title': ie_result.get('title'),
'playlist_uploader': ie_result.get('uploader'),
'playlist_uploader_id': ie_result.get('uploader_id'),
'playlist_index': 0,
**kwargs,
}
def __process_playlist(self, ie_result, download):
# We process each entry in the playlist
playlist = ie_result.get('title') or ie_result.get('id')
self.to_screen('[download] Downloading playlist: %s' % playlist)
if 'entries' not in ie_result:
raise EntryNotInPlaylist('There are no entries')
MissingEntry = object()
incomplete_entries = bool(ie_result.get('requested_entries'))
if incomplete_entries:
def fill_missing_entries(entries, indices):
ret = [MissingEntry] * max(indices)
for i, entry in zip(indices, entries):
ret[i - 1] = entry
return ret
ie_result['entries'] = fill_missing_entries(ie_result['entries'], ie_result['requested_entries'])
playlist_results = []
playliststart = self.params.get('playliststart', 1)
playlistend = self.params.get('playlistend')
# For backwards compatibility, interpret -1 as whole list
if playlistend == -1:
playlistend = None
playlistitems_str = self.params.get('playlist_items')
playlistitems = None
if playlistitems_str is not None:
def iter_playlistitems(format):
for string_segment in format.split(','):
if '-' in string_segment:
start, end = string_segment.split('-')
for item in range(int(start), int(end) + 1):
yield int(item)
else:
yield int(string_segment)
playlistitems = orderedSet(iter_playlistitems(playlistitems_str))
ie_entries = ie_result['entries']
if isinstance(ie_entries, list):
playlist_count = len(ie_entries)
msg = f'Collected {playlist_count} videos; downloading %d of them'
ie_result['playlist_count'] = ie_result.get('playlist_count') or playlist_count
def get_entry(i):
return ie_entries[i - 1]
else:
msg = 'Downloading %d videos'
if not isinstance(ie_entries, (PagedList, LazyList)):
ie_entries = LazyList(ie_entries)
elif isinstance(ie_entries, InAdvancePagedList):
if ie_entries._pagesize == 1:
playlist_count = ie_entries._pagecount
def get_entry(i):
return YoutubeDL.__handle_extraction_exceptions(
lambda self, i: ie_entries[i - 1]
)(self, i)
entries, broken = [], False
items = playlistitems if playlistitems is not None else itertools.count(playliststart)
for i in items:
if i == 0:
continue
if playlistitems is None and playlistend is not None and playlistend < i:
break
entry = None
try:
entry = get_entry(i)
if entry is MissingEntry:
raise EntryNotInPlaylist()
except (IndexError, EntryNotInPlaylist):
if incomplete_entries:
raise EntryNotInPlaylist(f'Entry {i} cannot be found')
elif not playlistitems:
break
entries.append(entry)
try:
if entry is not None:
# TODO: Add auto-generated fields
self._match_entry(entry, incomplete=True, silent=True)
except (ExistingVideoReached, RejectedVideoReached):
broken = True
break
ie_result['entries'] = entries
# Save playlist_index before re-ordering
entries = [
((playlistitems[i - 1] if playlistitems else i + playliststart - 1), entry)
for i, entry in enumerate(entries, 1)
if entry is not None]
n_entries = len(entries)
if not (ie_result.get('playlist_count') or broken or playlistitems or playlistend):
ie_result['playlist_count'] = n_entries
if not playlistitems and (playliststart != 1 or playlistend):
playlistitems = list(range(playliststart, playliststart + n_entries))
ie_result['requested_entries'] = playlistitems
_infojson_written = False
write_playlist_files = self.params.get('allow_playlist_files', True)
if write_playlist_files and self.params.get('list_thumbnails'):
self.list_thumbnails(ie_result)
if write_playlist_files and not self.params.get('simulate'):
ie_copy = self._playlist_infodict(ie_result, n_entries=n_entries)
_infojson_written = self._write_info_json(
'playlist', ie_result, self.prepare_filename(ie_copy, 'pl_infojson'))
if _infojson_written is None:
return
if self._write_description('playlist', ie_result,
self.prepare_filename(ie_copy, 'pl_description')) is None:
return
# TODO: This should be passed to ThumbnailsConvertor if necessary
self._write_thumbnails('playlist', ie_copy, self.prepare_filename(ie_copy, 'pl_thumbnail'))
if self.params.get('playlistreverse', False):
entries = entries[::-1]
if self.params.get('playlistrandom', False):
random.shuffle(entries)
x_forwarded_for = ie_result.get('__x_forwarded_for_ip')
self.to_screen(f'[{ie_result["extractor"]}] playlist {playlist}: {msg % n_entries}')
failures = 0
max_failures = self.params.get('skip_playlist_after_errors') or float('inf')
for i, entry_tuple in enumerate(entries, 1):
playlist_index, entry = entry_tuple
if 'playlist-index' in self.params['compat_opts']:
playlist_index = playlistitems[i - 1] if playlistitems else i + playliststart - 1
self.to_screen('[download] Downloading video %s of %s' % (
self._format_screen(i, self.Styles.ID), self._format_screen(n_entries, self.Styles.EMPHASIS)))
# This __x_forwarded_for_ip thing is a bit ugly but requires
# minimal changes
if x_forwarded_for:
entry['__x_forwarded_for_ip'] = x_forwarded_for
extra = {
'n_entries': n_entries,
'__last_playlist_index': max(playlistitems) if playlistitems else (playlistend or n_entries),
'playlist_count': ie_result.get('playlist_count'),
'playlist_index': playlist_index,
'playlist_autonumber': i,
'playlist': playlist,
'playlist_id': ie_result.get('id'),
'playlist_title': ie_result.get('title'),
'playlist_uploader': ie_result.get('uploader'),
'playlist_uploader_id': ie_result.get('uploader_id'),
'extractor': ie_result['extractor'],
'webpage_url': ie_result['webpage_url'],
'webpage_url_basename': url_basename(ie_result['webpage_url']),
'webpage_url_domain': get_domain(ie_result['webpage_url']),
'extractor_key': ie_result['extractor_key'],
}
if self._match_entry(entry, incomplete=True) is not None:
continue
entry_result = self.__process_iterable_entry(entry, download, extra)
if not entry_result:
failures += 1
if failures >= max_failures:
self.report_error(
'Skipping the remaining entries in playlist "%s" since %d items failed extraction' % (playlist, failures))
break
playlist_results.append(entry_result)
ie_result['entries'] = playlist_results
# Write the updated info to json
if _infojson_written is True and self._write_info_json(
'updated playlist', ie_result,
self.prepare_filename(ie_copy, 'pl_infojson'), overwrite=True) is None:
return
ie_result = self.run_all_pps('playlist', ie_result)
self.to_screen(f'[download] Finished downloading playlist: {playlist}')
return ie_result
@__handle_extraction_exceptions
def __process_iterable_entry(self, entry, download, extra_info):
return self.process_ie_result(
entry, download=download, extra_info=extra_info)
def _build_format_filter(self, filter_spec):
" Returns a function to filter the formats according to the filter_spec "
OPERATORS = {
'<': operator.lt,
'<=': operator.le,
'>': operator.gt,
'>=': operator.ge,
'=': operator.eq,
'!=': operator.ne,
}
operator_rex = re.compile(r'''(?x)\s*
(?P<key>width|height|tbr|abr|vbr|asr|filesize|filesize_approx|fps)\s*
(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
(?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
''' % '|'.join(map(re.escape, OPERATORS.keys())))
m = operator_rex.fullmatch(filter_spec)
if m:
try:
comparison_value = int(m.group('value'))
except ValueError:
comparison_value = parse_filesize(m.group('value'))
if comparison_value is None:
comparison_value = parse_filesize(m.group('value') + 'B')
if comparison_value is None:
raise ValueError(
'Invalid value %r in format specification %r' % (
m.group('value'), filter_spec))
op = OPERATORS[m.group('op')]
if not m:
STR_OPERATORS = {
'=': operator.eq,
'^=': lambda attr, value: attr.startswith(value),
'$=': lambda attr, value: attr.endswith(value),
'*=': lambda attr, value: value in attr,
'~=': lambda attr, value: value.search(attr) is not None
}
str_operator_rex = re.compile(r'''(?x)\s*
(?P<key>[a-zA-Z0-9._-]+)\s*
(?P<negation>!\s*)?(?P<op>%s)\s*(?P<none_inclusive>\?\s*)?
(?P<quote>["'])?
(?P<value>(?(quote)(?:(?!(?P=quote))[^\\]|\\.)+|[\w.-]+))
(?(quote)(?P=quote))\s*
''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
m = str_operator_rex.fullmatch(filter_spec)
if m:
if m.group('op') == '~=':
comparison_value = re.compile(m.group('value'))
else:
comparison_value = re.sub(r'''\\([\\"'])''', r'\1', m.group('value'))
str_op = STR_OPERATORS[m.group('op')]
if m.group('negation'):
op = lambda attr, value: not str_op(attr, value)
else:
op = str_op
if not m:
raise SyntaxError('Invalid filter specification %r' % filter_spec)
def _filter(f):
actual_value = f.get(m.group('key'))
if actual_value is None:
return m.group('none_inclusive')
return op(actual_value, comparison_value)
return _filter
def _check_formats(self, formats):
for f in formats:
self.to_screen('[info] Testing format %s' % f['format_id'])
path = self.get_output_path('temp')
if not self._ensure_dir_exists(f'{path}/'):
continue
temp_file = tempfile.NamedTemporaryFile(suffix='.tmp', delete=False, dir=path or None)
temp_file.close()
try:
success, _ = self.dl(temp_file.name, f, test=True)
except (DownloadError, OSError, ValueError) + network_exceptions:
success = False
finally:
if os.path.exists(temp_file.name):
try:
os.remove(temp_file.name)
except OSError:
self.report_warning('Unable to delete temporary file "%s"' % temp_file.name)
if success:
yield f
else:
self.to_screen('[info] Unable to download format %s. Skipping...' % f['format_id'])
def _default_format_spec(self, info_dict, download=True):
def can_merge():
merger = FFmpegMergerPP(self)
return merger.available and merger.can_merge()
prefer_best = (
not self.params.get('simulate')
and download
and (
not can_merge()
or info_dict.get('is_live') and not self.params.get('live_from_start')
or self.outtmpl_dict['default'] == '-'))
compat = (
prefer_best
or self.params.get('allow_multiple_audio_streams', False)
or 'format-spec' in self.params['compat_opts'])
return (
'best/bestvideo+bestaudio' if prefer_best
else 'bestvideo*+bestaudio/best' if not compat
else 'bestvideo+bestaudio/best')
def build_format_selector(self, format_spec):
def syntax_error(note, start):
message = (
'Invalid format specification: '
'{}\n\t{}\n\t{}^'.format(note, format_spec, ' ' * start[1]))
return SyntaxError(message)
PICKFIRST = 'PICKFIRST'
MERGE = 'MERGE'
SINGLE = 'SINGLE'
GROUP = 'GROUP'
FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
allow_multiple_streams = {'audio': self.params.get('allow_multiple_audio_streams', False),
'video': self.params.get('allow_multiple_video_streams', False)}
check_formats = self.params.get('check_formats') == 'selected'
def _parse_filter(tokens):
filter_parts = []
for type, string, start, _, _ in tokens:
if type == tokenize.OP and string == ']':
return ''.join(filter_parts)
else:
filter_parts.append(string)
def _remove_unused_ops(tokens):
# Remove operators that we don't use and join them with the surrounding strings
# for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
ALLOWED_OPS = ('/', '+', ',', '(', ')')
last_string, last_start, last_end, last_line = None, None, None, None
for type, string, start, end, line in tokens:
if type == tokenize.OP and string == '[':
if last_string:
yield tokenize.NAME, last_string, last_start, last_end, last_line
last_string = None
yield type, string, start, end, line
# everything inside brackets will be handled by _parse_filter
for type, string, start, end, line in tokens:
yield type, string, start, end, line
if type == tokenize.OP and string == ']':
break
elif type == tokenize.OP and string in ALLOWED_OPS:
if last_string:
yield tokenize.NAME, last_string, last_start, last_end, last_line
last_string = None
yield type, string, start, end, line
elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
if not last_string:
last_string = string
last_start = start
last_end = end
else:
last_string += string
if last_string:
yield tokenize.NAME, last_string, last_start, last_end, last_line
def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
selectors = []
current_selector = None
for type, string, start, _, _ in tokens:
# ENCODING is only defined in python 3.x
if type == getattr(tokenize, 'ENCODING', None):
continue
elif type in [tokenize.NAME, tokenize.NUMBER]:
current_selector = FormatSelector(SINGLE, string, [])
elif type == tokenize.OP:
if string == ')':
if not inside_group:
# ')' will be handled by the parentheses group
tokens.restore_last_token()
break
elif inside_merge and string in ['/', ',']:
tokens.restore_last_token()
break
elif inside_choice and string == ',':
tokens.restore_last_token()
break
elif string == ',':
if not current_selector:
raise syntax_error('"," must follow a format selector', start)
selectors.append(current_selector)
current_selector = None
elif string == '/':
if not current_selector:
raise syntax_error('"/" must follow a format selector', start)
first_choice = current_selector
second_choice = _parse_format_selection(tokens, inside_choice=True)
current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
elif string == '[':
if not current_selector:
current_selector = FormatSelector(SINGLE, 'best', [])
format_filter = _parse_filter(tokens)
current_selector.filters.append(format_filter)
elif string == '(':
if current_selector:
raise syntax_error('Unexpected "("', start)
group = _parse_format_selection(tokens, inside_group=True)
current_selector = FormatSelector(GROUP, group, [])
elif string == '+':
if not current_selector:
raise syntax_error('Unexpected "+"', start)
selector_1 = current_selector
selector_2 = _parse_format_selection(tokens, inside_merge=True)
if not selector_2:
raise syntax_error('Expected a selector', start)
current_selector = FormatSelector(MERGE, (selector_1, selector_2), [])
else:
raise syntax_error(f'Operator not recognized: "{string}"', start)
elif type == tokenize.ENDMARKER:
break
if current_selector:
selectors.append(current_selector)
return selectors
def _merge(formats_pair):
format_1, format_2 = formats_pair
formats_info = []
formats_info.extend(format_1.get('requested_formats', (format_1,)))
formats_info.extend(format_2.get('requested_formats', (format_2,)))
if not allow_multiple_streams['video'] or not allow_multiple_streams['audio']:
get_no_more = {'video': False, 'audio': False}
for (i, fmt_info) in enumerate(formats_info):
if fmt_info.get('acodec') == fmt_info.get('vcodec') == 'none':
formats_info.pop(i)
continue
for aud_vid in ['audio', 'video']:
if not allow_multiple_streams[aud_vid] and fmt_info.get(aud_vid[0] + 'codec') != 'none':
if get_no_more[aud_vid]:
formats_info.pop(i)
break
get_no_more[aud_vid] = True
if len(formats_info) == 1:
return formats_info[0]
video_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('vcodec') != 'none']
audio_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('acodec') != 'none']
the_only_video = video_fmts[0] if len(video_fmts) == 1 else None
the_only_audio = audio_fmts[0] if len(audio_fmts) == 1 else None
output_ext = self.params.get('merge_output_format')
if not output_ext:
if the_only_video:
output_ext = the_only_video['ext']
elif the_only_audio and not video_fmts:
output_ext = the_only_audio['ext']
else:
output_ext = 'mkv'
filtered = lambda *keys: filter(None, (traverse_obj(fmt, *keys) for fmt in formats_info))
new_dict = {
'requested_formats': formats_info,
'format': '+'.join(filtered('format')),
'format_id': '+'.join(filtered('format_id')),
'ext': output_ext,
'protocol': '+'.join(map(determine_protocol, formats_info)),
'language': '+'.join(orderedSet(filtered('language'))) or None,
'format_note': '+'.join(orderedSet(filtered('format_note'))) or None,
'filesize_approx': sum(filtered('filesize', 'filesize_approx')) or None,
'tbr': sum(filtered('tbr', 'vbr', 'abr')),
}
if the_only_video:
new_dict.update({
'width': the_only_video.get('width'),
'height': the_only_video.get('height'),
'resolution': the_only_video.get('resolution') or self.format_resolution(the_only_video),
'fps': the_only_video.get('fps'),
'dynamic_range': the_only_video.get('dynamic_range'),
'vcodec': the_only_video.get('vcodec'),
'vbr': the_only_video.get('vbr'),
'stretched_ratio': the_only_video.get('stretched_ratio'),
})
if the_only_audio:
new_dict.update({
'acodec': the_only_audio.get('acodec'),
'abr': the_only_audio.get('abr'),
'asr': the_only_audio.get('asr'),
})
return new_dict
def _check_formats(formats):
if not check_formats:
yield from formats
return
yield from self._check_formats(formats)
def _build_selector_function(selector):
if isinstance(selector, list): # ,
fs = [_build_selector_function(s) for s in selector]
def selector_function(ctx):
for f in fs:
yield from f(ctx)
return selector_function
elif selector.type == GROUP: # ()
selector_function = _build_selector_function(selector.selector)
elif selector.type == PICKFIRST: # /
fs = [_build_selector_function(s) for s in selector.selector]
def selector_function(ctx):
for f in fs:
picked_formats = list(f(ctx))
if picked_formats:
return picked_formats
return []
elif selector.type == MERGE: # +
selector_1, selector_2 = map(_build_selector_function, selector.selector)
def selector_function(ctx):
for pair in itertools.product(selector_1(ctx), selector_2(ctx)):
yield _merge(pair)
elif selector.type == SINGLE: # atom
format_spec = selector.selector or 'best'
# TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
if format_spec == 'all':
def selector_function(ctx):
yield from _check_formats(ctx['formats'][::-1])
elif format_spec == 'mergeall':
def selector_function(ctx):
formats = list(_check_formats(
f for f in ctx['formats'] if f.get('vcodec') != 'none' or f.get('acodec') != 'none'))
if not formats:
return
merged_format = formats[-1]
for f in formats[-2::-1]:
merged_format = _merge((merged_format, f))
yield merged_format
else:
format_fallback, seperate_fallback, format_reverse, format_idx = False, None, True, 1
mobj = re.match(
r'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
format_spec)
if mobj is not None:
format_idx = int_or_none(mobj.group('n'), default=1)
format_reverse = mobj.group('bw')[0] == 'b'
format_type = (mobj.group('type') or [None])[0]
not_format_type = {'v': 'a', 'a': 'v'}.get(format_type)
format_modified = mobj.group('mod') is not None
format_fallback = not format_type and not format_modified # for b, w
_filter_f = (
(lambda f: f.get('%scodec' % format_type) != 'none')
if format_type and format_modified # bv*, ba*, wv*, wa*
else (lambda f: f.get('%scodec' % not_format_type) == 'none')
if format_type # bv, ba, wv, wa
else (lambda f: f.get('vcodec') != 'none' and f.get('acodec') != 'none')
if not format_modified # b, w
else lambda f: True) # b*, w*
filter_f = lambda f: _filter_f(f) and (
f.get('vcodec') != 'none' or f.get('acodec') != 'none')
else:
if format_spec in self._format_selection_exts['audio']:
filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none'
elif format_spec in self._format_selection_exts['video']:
filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none' and f.get('vcodec') != 'none'
seperate_fallback = lambda f: f.get('ext') == format_spec and f.get('vcodec') != 'none'
elif format_spec in self._format_selection_exts['storyboards']:
filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') == 'none' and f.get('vcodec') == 'none'
else:
filter_f = lambda f: f.get('format_id') == format_spec # id
def selector_function(ctx):
formats = list(ctx['formats'])
matches = list(filter(filter_f, formats)) if filter_f is not None else formats
if not matches:
if format_fallback and ctx['incomplete_formats']:
# for extractors with incomplete formats (audio only (soundcloud)
# or video only (imgur)) best/worst will fallback to
# best/worst {video,audio}-only format
matches = formats
elif seperate_fallback and not ctx['has_merged_format']:
# for compatibility with youtube-dl when there is no pre-merged format
matches = list(filter(seperate_fallback, formats))
matches = LazyList(_check_formats(matches[::-1 if format_reverse else 1]))
try:
yield matches[format_idx - 1]
except LazyList.IndexError:
return
filters = [self._build_format_filter(f) for f in selector.filters]
def final_selector(ctx):
ctx_copy = dict(ctx)
for _filter in filters:
ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
return selector_function(ctx_copy)
return final_selector
stream = io.BytesIO(format_spec.encode())
try:
tokens = list(_remove_unused_ops(tokenize.tokenize(stream.readline)))
except tokenize.TokenError:
raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
class TokenIterator:
def __init__(self, tokens):
self.tokens = tokens
self.counter = 0
def __iter__(self):
return self
def __next__(self):
if self.counter >= len(self.tokens):
raise StopIteration()
value = self.tokens[self.counter]
self.counter += 1
return value
next = __next__
def restore_last_token(self):
self.counter -= 1
parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
return _build_selector_function(parsed_selector)
def _calc_headers(self, info_dict):
res = merge_headers(self.params['http_headers'], info_dict.get('http_headers') or {})
cookies = self._calc_cookies(info_dict['url'])
if cookies:
res['Cookie'] = cookies
if 'X-Forwarded-For' not in res:
x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
if x_forwarded_for_ip:
res['X-Forwarded-For'] = x_forwarded_for_ip
return res
def _calc_cookies(self, url):
pr = sanitized_Request(url)
self.cookiejar.add_cookie_header(pr)
return pr.get_header('Cookie')
def _sort_thumbnails(self, thumbnails):
thumbnails.sort(key=lambda t: (
t.get('preference') if t.get('preference') is not None else -1,
t.get('width') if t.get('width') is not None else -1,
t.get('height') if t.get('height') is not None else -1,
t.get('id') if t.get('id') is not None else '',
t.get('url')))
def _sanitize_thumbnails(self, info_dict):
thumbnails = info_dict.get('thumbnails')
if thumbnails is None:
thumbnail = info_dict.get('thumbnail')
if thumbnail:
info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
if not thumbnails:
return
def check_thumbnails(thumbnails):
for t in thumbnails:
self.to_screen(f'[info] Testing thumbnail {t["id"]}')
try:
self.urlopen(HEADRequest(t['url']))
except network_exceptions as err:
self.to_screen(f'[info] Unable to connect to thumbnail {t["id"]} URL {t["url"]!r} - {err}. Skipping...')
continue
yield t
self._sort_thumbnails(thumbnails)
for i, t in enumerate(thumbnails):
if t.get('id') is None:
t['id'] = '%d' % i
if t.get('width') and t.get('height'):
t['resolution'] = '%dx%d' % (t['width'], t['height'])
t['url'] = sanitize_url(t['url'])
if self.params.get('check_formats') is True:
info_dict['thumbnails'] = LazyList(check_thumbnails(thumbnails[::-1]), reverse=True)
else:
info_dict['thumbnails'] = thumbnails
def _fill_common_fields(self, info_dict, is_video=True):
# TODO: move sanitization here
if is_video:
# playlists are allowed to lack "title"
title = info_dict.get('title', NO_DEFAULT)
if title is NO_DEFAULT:
raise ExtractorError('Missing "title" field in extractor result',
video_id=info_dict['id'], ie=info_dict['extractor'])
info_dict['fulltitle'] = title
if not title:
if title == '':
self.write_debug('Extractor gave empty title. Creating a generic title')
else:
self.report_warning('Extractor failed to obtain "title". Creating a generic title instead')
info_dict['title'] = f'{info_dict["extractor"].replace(":", "-")} video #{info_dict["id"]}'
if info_dict.get('duration') is not None:
info_dict['duration_string'] = formatSeconds(info_dict['duration'])
for ts_key, date_key in (
('timestamp', 'upload_date'),
('release_timestamp', 'release_date'),
('modified_timestamp', 'modified_date'),
):
if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
# Working around out-of-range timestamp values (e.g. negative ones on Windows,
# see http://bugs.python.org/issue1646728)
with contextlib.suppress(ValueError, OverflowError, OSError):
upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
info_dict[date_key] = upload_date.strftime('%Y%m%d')
live_keys = ('is_live', 'was_live')
live_status = info_dict.get('live_status')
if live_status is None:
for key in live_keys:
if info_dict.get(key) is False:
continue
if info_dict.get(key):
live_status = key
break
if all(info_dict.get(key) is False for key in live_keys):
live_status = 'not_live'
if live_status:
info_dict['live_status'] = live_status
for key in live_keys:
if info_dict.get(key) is None:
info_dict[key] = (live_status == key)
# Auto generate title fields corresponding to the *_number fields when missing
# in order to always have clean titles. This is very common for TV series.
for field in ('chapter', 'season', 'episode'):
if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
def _raise_pending_errors(self, info):
err = info.pop('__pending_error', None)
if err:
self.report_error(err, tb=False)
def process_video_result(self, info_dict, download=True):
assert info_dict.get('_type', 'video') == 'video'
self._num_videos += 1
if 'id' not in info_dict:
raise ExtractorError('Missing "id" field in extractor result', ie=info_dict['extractor'])
elif not info_dict.get('id'):
raise ExtractorError('Extractor failed to obtain "id"', ie=info_dict['extractor'])
def report_force_conversion(field, field_not, conversion):
self.report_warning(
'"%s" field is not %s - forcing %s conversion, there is an error in extractor'
% (field, field_not, conversion))
def sanitize_string_field(info, string_field):
field = info.get(string_field)
if field is None or isinstance(field, compat_str):
return
report_force_conversion(string_field, 'a string', 'string')
info[string_field] = compat_str(field)
def sanitize_numeric_fields(info):
for numeric_field in self._NUMERIC_FIELDS:
field = info.get(numeric_field)
if field is None or isinstance(field, (int, float)):
continue
report_force_conversion(numeric_field, 'numeric', 'int')
info[numeric_field] = int_or_none(field)
sanitize_string_field(info_dict, 'id')
sanitize_numeric_fields(info_dict)
if (info_dict.get('duration') or 0) <= 0 and info_dict.pop('duration', None):
self.report_warning('"duration" field is negative, there is an error in extractor')
if 'playlist' not in info_dict:
# It isn't part of a playlist
info_dict['playlist'] = None
info_dict['playlist_index'] = None
self._sanitize_thumbnails(info_dict)
thumbnail = info_dict.get('thumbnail')
thumbnails = info_dict.get('thumbnails')
if thumbnail:
info_dict['thumbnail'] = sanitize_url(thumbnail)
elif thumbnails:
info_dict['thumbnail'] = thumbnails[-1]['url']
if info_dict.get('display_id') is None and 'id' in info_dict:
info_dict['display_id'] = info_dict['id']
self._fill_common_fields(info_dict)
for cc_kind in ('subtitles', 'automatic_captions'):
cc = info_dict.get(cc_kind)
if cc:
for _, subtitle in cc.items():
for subtitle_format in subtitle:
if subtitle_format.get('url'):
subtitle_format['url'] = sanitize_url(subtitle_format['url'])
if subtitle_format.get('ext') is None:
subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
automatic_captions = info_dict.get('automatic_captions')
subtitles = info_dict.get('subtitles')
info_dict['requested_subtitles'] = self.process_subtitles(
info_dict['id'], subtitles, automatic_captions)
if info_dict.get('formats') is None:
# There's only one format available
formats = [info_dict]
else:
formats = info_dict['formats']
# or None ensures --clean-infojson removes it
info_dict['_has_drm'] = any(f.get('has_drm') for f in formats) or None
if not self.params.get('allow_unplayable_formats'):
formats = [f for f in formats if not f.get('has_drm')]
if info_dict['_has_drm'] and all(
f.get('acodec') == f.get('vcodec') == 'none' for f in formats):
self.report_warning(
'This video is DRM protected and only images are available for download. '
'Use --list-formats to see them')
get_from_start = not info_dict.get('is_live') or bool(self.params.get('live_from_start'))
if not get_from_start:
info_dict['title'] += ' ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
if info_dict.get('is_live') and formats:
formats = [f for f in formats if bool(f.get('is_from_start')) == get_from_start]
if get_from_start and not formats:
self.raise_no_formats(info_dict, msg=(
'--live-from-start is passed, but there are no formats that can be downloaded from the start. '
'If you want to download from the current time, use --no-live-from-start'))
if not formats:
self.raise_no_formats(info_dict)
def is_wellformed(f):
url = f.get('url')
if not url:
self.report_warning(
'"url" field is missing or empty - skipping format, '
'there is an error in extractor')
return False
if isinstance(url, bytes):
sanitize_string_field(f, 'url')
return True
# Filter out malformed formats for better extraction robustness
formats = list(filter(is_wellformed, formats))
formats_dict = {}
# We check that all the formats have the format and format_id fields
for i, format in enumerate(formats):
sanitize_string_field(format, 'format_id')
sanitize_numeric_fields(format)
format['url'] = sanitize_url(format['url'])
if not format.get('format_id'):
format['format_id'] = compat_str(i)
else:
# Sanitize format_id from characters used in format selector expression
format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
format_id = format['format_id']
if format_id not in formats_dict:
formats_dict[format_id] = []
formats_dict[format_id].append(format)
# Make sure all formats have unique format_id
common_exts = set(itertools.chain(*self._format_selection_exts.values()))
for format_id, ambiguous_formats in formats_dict.items():
ambigious_id = len(ambiguous_formats) > 1
for i, format in enumerate(ambiguous_formats):
if ambigious_id:
format['format_id'] = '%s-%d' % (format_id, i)
if format.get('ext') is None:
format['ext'] = determine_ext(format['url']).lower()
# Ensure there is no conflict between id and ext in format selection
# See https://github.com/yt-dlp/yt-dlp/issues/1282
if format['format_id'] != format['ext'] and format['format_id'] in common_exts:
format['format_id'] = 'f%s' % format['format_id']
for i, format in enumerate(formats):
if format.get('format') is None:
format['format'] = '{id} - {res}{note}'.format(
id=format['format_id'],
res=self.format_resolution(format),
note=format_field(format, 'format_note', ' (%s)'),
)
if format.get('protocol') is None:
format['protocol'] = determine_protocol(format)
if format.get('resolution') is None:
format['resolution'] = self.format_resolution(format, default=None)
if format.get('dynamic_range') is None and format.get('vcodec') != 'none':
format['dynamic_range'] = 'SDR'
if (info_dict.get('duration') and format.get('tbr')
and not format.get('filesize') and not format.get('filesize_approx')):
format['filesize_approx'] = int(info_dict['duration'] * format['tbr'] * (1024 / 8))
# Add HTTP headers, so that external programs can use them from the
# json output
full_format_info = info_dict.copy()
full_format_info.update(format)
format['http_headers'] = self._calc_headers(full_format_info)
# Remove private housekeeping stuff
if '__x_forwarded_for_ip' in info_dict:
del info_dict['__x_forwarded_for_ip']
if self.params.get('check_formats') is True:
formats = LazyList(self._check_formats(formats[::-1]), reverse=True)
if not formats or formats[0] is not info_dict:
# only set the 'formats' fields if the original info_dict list them
# otherwise we end up with a circular reference, the first (and unique)
# element in the 'formats' field in info_dict is info_dict itself,
# which can't be exported to json
info_dict['formats'] = formats
info_dict, _ = self.pre_process(info_dict)
if self._match_entry(info_dict, incomplete=self._format_fields) is not None:
return info_dict
self.post_extract(info_dict)
info_dict, _ = self.pre_process(info_dict, 'after_filter')
# The pre-processors may have modified the formats
formats = info_dict.get('formats', [info_dict])
list_only = self.params.get('simulate') is None and (
self.params.get('list_thumbnails') or self.params.get('listformats') or self.params.get('listsubtitles'))
interactive_format_selection = not list_only and self.format_selector == '-'
if self.params.get('list_thumbnails'):
self.list_thumbnails(info_dict)
if self.params.get('listsubtitles'):
if 'automatic_captions' in info_dict:
self.list_subtitles(
info_dict['id'], automatic_captions, 'automatic captions')
self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
if self.params.get('listformats') or interactive_format_selection:
self.list_formats(info_dict)
if list_only:
# Without this printing, -F --print-json will not work
self.__forced_printings(info_dict, self.prepare_filename(info_dict), incomplete=True)
return info_dict
format_selector = self.format_selector
if format_selector is None:
req_format = self._default_format_spec(info_dict, download=download)
self.write_debug('Default format spec: %s' % req_format)
format_selector = self.build_format_selector(req_format)
while True:
if interactive_format_selection:
req_format = input(
self._format_screen('\nEnter format selector: ', self.Styles.EMPHASIS))
try:
format_selector = self.build_format_selector(req_format)
except SyntaxError as err:
self.report_error(err, tb=False, is_error=False)
continue
formats_to_download = list(format_selector({
'formats': formats,
'has_merged_format': any('none' not in (f.get('acodec'), f.get('vcodec')) for f in formats),
'incomplete_formats': (
# All formats are video-only or
all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats)
# all formats are audio-only
or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats)),
}))
if interactive_format_selection and not formats_to_download:
self.report_error('Requested format is not available', tb=False, is_error=False)
continue
break
if not formats_to_download:
if not self.params.get('ignore_no_formats_error'):
raise ExtractorError(
'Requested format is not available. Use --list-formats for a list of available formats',
expected=True, video_id=info_dict['id'], ie=info_dict['extractor'])
self.report_warning('Requested format is not available')
# Process what we can, even without any available formats.
formats_to_download = [{}]
requested_ranges = self.params.get('download_ranges')
if requested_ranges:
requested_ranges = tuple(requested_ranges(info_dict, self))
best_format, downloaded_formats = formats_to_download[-1], []
if download:
if best_format:
def to_screen(*msg):
self.to_screen(f'[info] {info_dict["id"]}: {" ".join(", ".join(variadic(m)) for m in msg)}')
to_screen(f'Downloading {len(formats_to_download)} format(s):',
(f['format_id'] for f in formats_to_download))
if requested_ranges:
to_screen(f'Downloading {len(requested_ranges)} time ranges:',
(f'{int(c["start_time"])}-{int(c["end_time"])}' for c in requested_ranges))
max_downloads_reached = False
for fmt, chapter in itertools.product(formats_to_download, requested_ranges or [{}]):
new_info = self._copy_infodict(info_dict)
new_info.update(fmt)
if chapter:
new_info.update({
'section_start': chapter.get('start_time'),
'section_end': chapter.get('end_time', 0),
'section_title': chapter.get('title'),
'section_number': chapter.get('index'),
})
downloaded_formats.append(new_info)
try:
self.process_info(new_info)
except MaxDownloadsReached:
max_downloads_reached = True
self._raise_pending_errors(new_info)
# Remove copied info
for key, val in tuple(new_info.items()):
if info_dict.get(key) == val:
new_info.pop(key)
if max_downloads_reached:
break
write_archive = {f.get('__write_download_archive', False) for f in downloaded_formats}
assert write_archive.issubset({True, False, 'ignore'})
if True in write_archive and False not in write_archive:
self.record_download_archive(info_dict)
info_dict['requested_downloads'] = downloaded_formats
info_dict = self.run_all_pps('after_video', info_dict)
if max_downloads_reached:
raise MaxDownloadsReached()
# We update the info dict with the selected best quality format (backwards compatibility)
info_dict.update(best_format)
return info_dict
def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
"""Select the requested subtitles and their format"""
available_subs, normal_sub_langs = {}, []
if normal_subtitles and self.params.get('writesubtitles'):
available_subs.update(normal_subtitles)
normal_sub_langs = tuple(normal_subtitles.keys())
if automatic_captions and self.params.get('writeautomaticsub'):
for lang, cap_info in automatic_captions.items():
if lang not in available_subs:
available_subs[lang] = cap_info
if (not self.params.get('writesubtitles') and not
self.params.get('writeautomaticsub') or not
available_subs):
return None
all_sub_langs = tuple(available_subs.keys())
if self.params.get('allsubtitles', False):
requested_langs = all_sub_langs
elif self.params.get('subtitleslangs', False):
# A list is used so that the order of languages will be the same as
# given in subtitleslangs. See https://github.com/yt-dlp/yt-dlp/issues/1041
requested_langs = []
for lang_re in self.params.get('subtitleslangs'):
discard = lang_re[0] == '-'
if discard:
lang_re = lang_re[1:]
if lang_re == 'all':
if discard:
requested_langs = []
else:
requested_langs.extend(all_sub_langs)
continue
current_langs = filter(re.compile(lang_re + '$').match, all_sub_langs)
if discard:
for lang in current_langs:
while lang in requested_langs:
requested_langs.remove(lang)
else:
requested_langs.extend(current_langs)
requested_langs = orderedSet(requested_langs)
elif normal_sub_langs:
requested_langs = ['en'] if 'en' in normal_sub_langs else normal_sub_langs[:1]
else:
requested_langs = ['en'] if 'en' in all_sub_langs else all_sub_langs[:1]
if requested_langs:
self.write_debug('Downloading subtitles: %s' % ', '.join(requested_langs))
formats_query = self.params.get('subtitlesformat', 'best')
formats_preference = formats_query.split('/') if formats_query else []
subs = {}
for lang in requested_langs:
formats = available_subs.get(lang)
if formats is None:
self.report_warning(f'{lang} subtitles not available for {video_id}')
continue
for ext in formats_preference:
if ext == 'best':
f = formats[-1]
break
matches = list(filter(lambda f: f['ext'] == ext, formats))
if matches:
f = matches[-1]
break
else:
f = formats[-1]
self.report_warning(
'No subtitle format found matching "%s" for language %s, '
'using %s' % (formats_query, lang, f['ext']))
subs[lang] = f
return subs
def _forceprint(self, key, info_dict):
if info_dict is None:
return
info_copy = info_dict.copy()
info_copy['formats_table'] = self.render_formats_table(info_dict)
info_copy['thumbnails_table'] = self.render_thumbnails_table(info_dict)
info_copy['subtitles_table'] = self.render_subtitles_table(info_dict.get('id'), info_dict.get('subtitles'))
info_copy['automatic_captions_table'] = self.render_subtitles_table(info_dict.get('id'), info_dict.get('automatic_captions'))
def format_tmpl(tmpl):
mobj = re.match(r'\w+(=?)$', tmpl)
if mobj and mobj.group(1):
return f'{tmpl[:-1]} = %({tmpl[:-1]})r'
elif mobj:
return f'%({tmpl})s'
return tmpl
for tmpl in self.params['forceprint'].get(key, []):
self.to_stdout(self.evaluate_outtmpl(format_tmpl(tmpl), info_copy))
for tmpl, file_tmpl in self.params['print_to_file'].get(key, []):
filename = self.prepare_filename(info_dict, outtmpl=file_tmpl)
tmpl = format_tmpl(tmpl)
self.to_screen(f'[info] Writing {tmpl!r} to: {filename}')
if self._ensure_dir_exists(filename):
with open(filename, 'a', encoding='utf-8') as f:
f.write(self.evaluate_outtmpl(tmpl, info_copy) + '\n')
def __forced_printings(self, info_dict, filename, incomplete):
def print_mandatory(field, actual_field=None):
if actual_field is None:
actual_field = field
if (self.params.get('force%s' % field, False)
and (not incomplete or info_dict.get(actual_field) is not None)):
self.to_stdout(info_dict[actual_field])
def print_optional(field):
if (self.params.get('force%s' % field, False)
and info_dict.get(field) is not None):
self.to_stdout(info_dict[field])
info_dict = info_dict.copy()
if filename is not None:
info_dict['filename'] = filename
if info_dict.get('requested_formats') is not None:
# For RTMP URLs, also include the playpath
info_dict['urls'] = '\n'.join(f['url'] + f.get('play_path', '') for f in info_dict['requested_formats'])
elif info_dict.get('url'):
info_dict['urls'] = info_dict['url'] + info_dict.get('play_path', '')
if (self.params.get('forcejson')
or self.params['forceprint'].get('video')
or self.params['print_to_file'].get('video')):
self.post_extract(info_dict)
self._forceprint('video', info_dict)
print_mandatory('title')
print_mandatory('id')
print_mandatory('url', 'urls')
print_optional('thumbnail')
print_optional('description')
print_optional('filename')
if self.params.get('forceduration') and info_dict.get('duration') is not None:
self.to_stdout(formatSeconds(info_dict['duration']))
print_mandatory('format')
if self.params.get('forcejson'):
self.to_stdout(json.dumps(self.sanitize_info(info_dict)))
def dl(self, name, info, subtitle=False, test=False):
if not info.get('url'):
self.raise_no_formats(info, True)
if test:
verbose = self.params.get('verbose')
params = {
'test': True,
'quiet': self.params.get('quiet') or not verbose,
'verbose': verbose,
'noprogress': not verbose,
'nopart': True,
'skip_unavailable_fragments': False,
'keep_fragments': False,
'overwrites': True,
'_no_ytdl_file': True,
}
else:
params = self.params
fd = get_suitable_downloader(info, params, to_stdout=(name == '-'))(self, params)
if not test:
for ph in self._progress_hooks:
fd.add_progress_hook(ph)
urls = '", "'.join(
(f['url'].split(',')[0] + ',<data>' if f['url'].startswith('data:') else f['url'])
for f in info.get('requested_formats', []) or [info])
self.write_debug(f'Invoking {fd.FD_NAME} downloader on "{urls}"')
# Note: Ideally info should be a deep-copied so that hooks cannot modify it.
# But it may contain objects that are not deep-copyable
new_info = self._copy_infodict(info)
if new_info.get('http_headers') is None:
new_info['http_headers'] = self._calc_headers(new_info)
return fd.download(name, new_info, subtitle)
def existing_file(self, filepaths, *, default_overwrite=True):
existing_files = list(filter(os.path.exists, orderedSet(filepaths)))
if existing_files and not self.params.get('overwrites', default_overwrite):
return existing_files[0]
for file in existing_files:
self.report_file_delete(file)
os.remove(file)
return None
def process_info(self, info_dict):
"""Process a single resolved IE result. (Modifies it in-place)"""
assert info_dict.get('_type', 'video') == 'video'
original_infodict = info_dict
if 'format' not in info_dict and 'ext' in info_dict:
info_dict['format'] = info_dict['ext']
# This is mostly just for backward compatibility of process_info
# As a side-effect, this allows for format-specific filters
if self._match_entry(info_dict) is not None:
info_dict['__write_download_archive'] = 'ignore'
return
# Does nothing under normal operation - for backward compatibility of process_info
self.post_extract(info_dict)
self._num_downloads += 1
# info_dict['_filename'] needs to be set for backward compatibility
info_dict['_filename'] = full_filename = self.prepare_filename(info_dict, warn=True)
temp_filename = self.prepare_filename(info_dict, 'temp')
files_to_move = {}
# Forced printings
self.__forced_printings(info_dict, full_filename, incomplete=('format' not in info_dict))
def check_max_downloads():
if self._num_downloads >= float(self.params.get('max_downloads') or 'inf'):
raise MaxDownloadsReached()
if self.params.get('simulate'):
info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
check_max_downloads()
return
if full_filename is None:
return
if not self._ensure_dir_exists(encodeFilename(full_filename)):
return
if not self._ensure_dir_exists(encodeFilename(temp_filename)):
return
if self._write_description('video', info_dict,
self.prepare_filename(info_dict, 'description')) is None:
return
sub_files = self._write_subtitles(info_dict, temp_filename)
if sub_files is None:
return
files_to_move.update(dict(sub_files))
thumb_files = self._write_thumbnails(
'video', info_dict, temp_filename, self.prepare_filename(info_dict, 'thumbnail'))
if thumb_files is None:
return
files_to_move.update(dict(thumb_files))
infofn = self.prepare_filename(info_dict, 'infojson')
_infojson_written = self._write_info_json('video', info_dict, infofn)
if _infojson_written:
info_dict['infojson_filename'] = infofn
# For backward compatibility, even though it was a private field
info_dict['__infojson_filename'] = infofn
elif _infojson_written is None:
return
# Note: Annotations are deprecated
annofn = None
if self.params.get('writeannotations', False):
annofn = self.prepare_filename(info_dict, 'annotation')
if annofn:
if not self._ensure_dir_exists(encodeFilename(annofn)):
return
if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(annofn)):
self.to_screen('[info] Video annotations are already present')
elif not info_dict.get('annotations'):
self.report_warning('There are no annotations to write.')
else:
try:
self.to_screen('[info] Writing video annotations to: ' + annofn)
with open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
annofile.write(info_dict['annotations'])
except (KeyError, TypeError):
self.report_warning('There are no annotations to write.')
except OSError:
self.report_error('Cannot write annotations file: ' + annofn)
return
# Write internet shortcut files
def _write_link_file(link_type):
url = try_get(info_dict['webpage_url'], iri_to_uri)
if not url:
self.report_warning(
f'Cannot write internet shortcut file because the actual URL of "{info_dict["webpage_url"]}" is unknown')
return True
linkfn = replace_extension(self.prepare_filename(info_dict, 'link'), link_type, info_dict.get('ext'))
if not self._ensure_dir_exists(encodeFilename(linkfn)):
return False
if self.params.get('overwrites', True) and os.path.exists(encodeFilename(linkfn)):
self.to_screen(f'[info] Internet shortcut (.{link_type}) is already present')
return True
try:
self.to_screen(f'[info] Writing internet shortcut (.{link_type}) to: {linkfn}')
with open(encodeFilename(to_high_limit_path(linkfn)), 'w', encoding='utf-8',
newline='\r\n' if link_type == 'url' else '\n') as linkfile:
template_vars = {'url': url}
if link_type == 'desktop':
template_vars['filename'] = linkfn[:-(len(link_type) + 1)]
linkfile.write(LINK_TEMPLATES[link_type] % template_vars)
except OSError:
self.report_error(f'Cannot write internet shortcut {linkfn}')
return False
return True
write_links = {
'url': self.params.get('writeurllink'),
'webloc': self.params.get('writewebloclink'),
'desktop': self.params.get('writedesktoplink'),
}
if self.params.get('writelink'):
link_type = ('webloc' if sys.platform == 'darwin'
else 'desktop' if sys.platform.startswith('linux')
else 'url')
write_links[link_type] = True
if any(should_write and not _write_link_file(link_type)
for link_type, should_write in write_links.items()):
return
def replace_info_dict(new_info):
nonlocal info_dict
if new_info == info_dict:
return
info_dict.clear()
info_dict.update(new_info)
new_info, files_to_move = self.pre_process(info_dict, 'before_dl', files_to_move)
replace_info_dict(new_info)
if self.params.get('skip_download'):
info_dict['filepath'] = temp_filename
info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
info_dict['__files_to_move'] = files_to_move
replace_info_dict(self.run_pp(MoveFilesAfterDownloadPP(self, False), info_dict))
info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
else:
# Download
info_dict.setdefault('__postprocessors', [])
try:
def existing_video_file(*filepaths):
ext = info_dict.get('ext')
converted = lambda file: replace_extension(file, self.params.get('final_ext') or ext, ext)
file = self.existing_file(itertools.chain(*zip(map(converted, filepaths), filepaths)),
default_overwrite=False)
if file:
info_dict['ext'] = os.path.splitext(file)[1][1:]
return file
success = True
merger, fd = FFmpegMergerPP(self), None
if info_dict.get('protocol') or info_dict.get('url'):
fd = get_suitable_downloader(info_dict, self.params, to_stdout=temp_filename == '-')
if fd is not FFmpegFD and (
info_dict.get('section_start') or info_dict.get('section_end')):
msg = ('This format cannot be partially downloaded' if merger.available
else 'You have requested downloading the video partially, but ffmpeg is not installed')
self.report_error(f'{msg}. Aborting')
return
if info_dict.get('requested_formats') is not None:
def compatible_formats(formats):
# TODO: some formats actually allow this (mkv, webm, ogg, mp4), but not all of them.
video_formats = [format for format in formats if format.get('vcodec') != 'none']
audio_formats = [format for format in formats if format.get('acodec') != 'none']
if len(video_formats) > 2 or len(audio_formats) > 2:
return False
# Check extension
exts = {format.get('ext') for format in formats}
COMPATIBLE_EXTS = (
{'mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma'},
{'webm'},
)
for ext_sets in COMPATIBLE_EXTS:
if ext_sets.issuperset(exts):
return True
# TODO: Check acodec/vcodec
return False
requested_formats = info_dict['requested_formats']
old_ext = info_dict['ext']
if self.params.get('merge_output_format') is None:
if not compatible_formats(requested_formats):
info_dict['ext'] = 'mkv'
self.report_warning(
'Requested formats are incompatible for merge and will be merged into mkv')
if (info_dict['ext'] == 'webm'
and info_dict.get('thumbnails')
# check with type instead of pp_key, __name__, or isinstance
# since we dont want any custom PPs to trigger this
and any(type(pp) == EmbedThumbnailPP for pp in self._pps['post_process'])): # noqa: E721
info_dict['ext'] = 'mkv'
self.report_warning(
'webm doesn\'t support embedding a thumbnail, mkv will be used')
new_ext = info_dict['ext']
def correct_ext(filename, ext=new_ext):
if filename == '-':
return filename
filename_real_ext = os.path.splitext(filename)[1][1:]
filename_wo_ext = (
os.path.splitext(filename)[0]
if filename_real_ext in (old_ext, new_ext)
else filename)
return f'{filename_wo_ext}.{ext}'
# Ensure filename always has a correct extension for successful merge
full_filename = correct_ext(full_filename)
temp_filename = correct_ext(temp_filename)
dl_filename = existing_video_file(full_filename, temp_filename)
info_dict['__real_download'] = False
downloaded = []
if dl_filename is not None:
self.report_file_already_downloaded(dl_filename)
elif fd:
for f in requested_formats if fd != FFmpegFD else []:
f['filepath'] = fname = prepend_extension(
correct_ext(temp_filename, info_dict['ext']),
'f%s' % f['format_id'], info_dict['ext'])
downloaded.append(fname)
info_dict['url'] = '\n'.join(f['url'] for f in requested_formats)
success, real_download = self.dl(temp_filename, info_dict)
info_dict['__real_download'] = real_download
else:
if self.params.get('allow_unplayable_formats'):
self.report_warning(
'You have requested merging of multiple formats '
'while also allowing unplayable formats to be downloaded. '
'The formats won\'t be merged to prevent data corruption.')
elif not merger.available:
msg = 'You have requested merging of multiple formats but ffmpeg is not installed'
if not self.params.get('ignoreerrors'):
self.report_error(f'{msg}. Aborting due to --abort-on-error')
return
self.report_warning(f'{msg}. The formats won\'t be merged')
if temp_filename == '-':
reason = ('using a downloader other than ffmpeg' if FFmpegFD.can_merge_formats(info_dict, self.params)
else 'but the formats are incompatible for simultaneous download' if merger.available
else 'but ffmpeg is not installed')
self.report_warning(
f'You have requested downloading multiple formats to stdout {reason}. '
'The formats will be streamed one after the other')
fname = temp_filename
for f in requested_formats:
new_info = dict(info_dict)
del new_info['requested_formats']
new_info.update(f)
if temp_filename != '-':
fname = prepend_extension(
correct_ext(temp_filename, new_info['ext']),
'f%s' % f['format_id'], new_info['ext'])
if not self._ensure_dir_exists(fname):
return
f['filepath'] = fname
downloaded.append(fname)
partial_success, real_download = self.dl(fname, new_info)
info_dict['__real_download'] = info_dict['__real_download'] or real_download
success = success and partial_success
if downloaded and merger.available and not self.params.get('allow_unplayable_formats'):
info_dict['__postprocessors'].append(merger)
info_dict['__files_to_merge'] = downloaded
# Even if there were no downloads, it is being merged only now
info_dict['__real_download'] = True
else:
for file in downloaded:
files_to_move[file] = None
else:
# Just a single file
dl_filename = existing_video_file(full_filename, temp_filename)
if dl_filename is None or dl_filename == temp_filename:
# dl_filename == temp_filename could mean that the file was partially downloaded with --no-part.
# So we should try to resume the download
success, real_download = self.dl(temp_filename, info_dict)
info_dict['__real_download'] = real_download
else:
self.report_file_already_downloaded(dl_filename)
dl_filename = dl_filename or temp_filename
info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
except network_exceptions as err:
self.report_error('unable to download video data: %s' % error_to_compat_str(err))
return
except OSError as err:
raise UnavailableVideoError(err)
except (ContentTooShortError, ) as err:
self.report_error(f'content too short (expected {err.expected} bytes and served {err.downloaded})')
return
self._raise_pending_errors(info_dict)
if success and full_filename != '-':
def fixup():
do_fixup = True
fixup_policy = self.params.get('fixup')
vid = info_dict['id']
if fixup_policy in ('ignore', 'never'):
return
elif fixup_policy == 'warn':
do_fixup = 'warn'
elif fixup_policy != 'force':
assert fixup_policy in ('detect_or_warn', None)
if not info_dict.get('__real_download'):
do_fixup = False
def ffmpeg_fixup(cndn, msg, cls):
if not (do_fixup and cndn):
return
elif do_fixup == 'warn':
self.report_warning(f'{vid}: {msg}')
return
pp = cls(self)
if pp.available:
info_dict['__postprocessors'].append(pp)
else:
self.report_warning(f'{vid}: {msg}. Install ffmpeg to fix this automatically')
stretched_ratio = info_dict.get('stretched_ratio')
ffmpeg_fixup(
stretched_ratio not in (1, None),
f'Non-uniform pixel ratio {stretched_ratio}',
FFmpegFixupStretchedPP)
ffmpeg_fixup(
(info_dict.get('requested_formats') is None
and info_dict.get('container') == 'm4a_dash'
and info_dict.get('ext') == 'm4a'),
'writing DASH m4a. Only some players support this container',
FFmpegFixupM4aPP)
downloader = get_suitable_downloader(info_dict, self.params) if 'protocol' in info_dict else None
downloader = downloader.FD_NAME if downloader else None
if info_dict.get('requested_formats') is None: # Not necessary if doing merger
ffmpeg_fixup(downloader == 'hlsnative' and not self.params.get('hls_use_mpegts')
or info_dict.get('is_live') and self.params.get('hls_use_mpegts') is None,
'Possible MPEG-TS in MP4 container or malformed AAC timestamps',
FFmpegFixupM3u8PP)
ffmpeg_fixup(info_dict.get('is_live') and downloader == 'DashSegmentsFD',
'Possible duplicate MOOV atoms', FFmpegFixupDuplicateMoovPP)
ffmpeg_fixup(downloader == 'web_socket_fragment', 'Malformed timestamps detected', FFmpegFixupTimestampPP)
ffmpeg_fixup(downloader == 'web_socket_fragment', 'Malformed duration detected', FFmpegFixupDurationPP)
fixup()
try:
replace_info_dict(self.post_process(dl_filename, info_dict, files_to_move))
except PostProcessingError as err:
self.report_error('Postprocessing: %s' % str(err))
return
try:
for ph in self._post_hooks:
ph(info_dict['filepath'])
except Exception as err:
self.report_error('post hooks: %s' % str(err))
return
info_dict['__write_download_archive'] = True
assert info_dict is original_infodict # Make sure the info_dict was modified in-place
if self.params.get('force_write_download_archive'):
info_dict['__write_download_archive'] = True
check_max_downloads()
def __download_wrapper(self, func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
res = func(*args, **kwargs)
except UnavailableVideoError as e:
self.report_error(e)
except DownloadCancelled as e:
self.to_screen(f'[info] {e}')
if not self.params.get('break_per_url'):
raise
else:
if self.params.get('dump_single_json', False):
self.post_extract(res)
self.to_stdout(json.dumps(self.sanitize_info(res)))
return wrapper
def download(self, url_list):
"""Download a given list of URLs."""
url_list = variadic(url_list) # Passing a single URL is a common mistake
outtmpl = self.outtmpl_dict['default']
if (len(url_list) > 1
and outtmpl != '-'
and '%' not in outtmpl
and self.params.get('max_downloads') != 1):
raise SameFileError(outtmpl)
for url in url_list:
self.__download_wrapper(self.extract_info)(
url, force_generic_extractor=self.params.get('force_generic_extractor', False))
return self._download_retcode
def download_with_info_file(self, info_filename):
with contextlib.closing(fileinput.FileInput(
[info_filename], mode='r',
openhook=fileinput.hook_encoded('utf-8'))) as f:
# FileInput doesn't have a read method, we can't call json.load
info = self.sanitize_info(json.loads('\n'.join(f)), self.params.get('clean_infojson', True))
try:
self.__download_wrapper(self.process_ie_result)(info, download=True)
except (DownloadError, EntryNotInPlaylist, ReExtractInfo) as e:
if not isinstance(e, EntryNotInPlaylist):
self.to_stderr('\r')
webpage_url = info.get('webpage_url')
if webpage_url is not None:
self.report_warning(f'The info failed to download: {e}; trying with URL {webpage_url}')
return self.download([webpage_url])
else:
raise
return self._download_retcode
@staticmethod
def sanitize_info(info_dict, remove_private_keys=False):
''' Sanitize the infodict for converting to json '''
if info_dict is None:
return info_dict
info_dict.setdefault('epoch', int(time.time()))
info_dict.setdefault('_type', 'video')
if remove_private_keys:
reject = lambda k, v: v is None or k.startswith('__') or k in {
'requested_downloads', 'requested_formats', 'requested_subtitles', 'requested_entries',
'entries', 'filepath', '_filename', 'infojson_filename', 'original_url', 'playlist_autonumber',
}
else:
reject = lambda k, v: False
def filter_fn(obj):
if isinstance(obj, dict):
return {k: filter_fn(v) for k, v in obj.items() if not reject(k, v)}
elif isinstance(obj, (list, tuple, set, LazyList)):
return list(map(filter_fn, obj))
elif obj is None or isinstance(obj, (str, int, float, bool)):
return obj
else:
return repr(obj)
return filter_fn(info_dict)
@staticmethod
def filter_requested_info(info_dict, actually_filter=True):
''' Alias of sanitize_info for backward compatibility '''
return YoutubeDL.sanitize_info(info_dict, actually_filter)
def _delete_downloaded_files(self, *files_to_delete, info={}, msg=None):
for filename in set(filter(None, files_to_delete)):
if msg:
self.to_screen(msg % filename)
try:
os.remove(filename)
except OSError:
self.report_warning(f'Unable to delete file {filename}')
if filename in info.get('__files_to_move', []): # NB: Delete even if None
del info['__files_to_move'][filename]
@staticmethod
def post_extract(info_dict):
def actual_post_extract(info_dict):
if info_dict.get('_type') in ('playlist', 'multi_video'):
for video_dict in info_dict.get('entries', {}):
actual_post_extract(video_dict or {})
return
post_extractor = info_dict.pop('__post_extractor', None) or (lambda: {})
info_dict.update(post_extractor())
actual_post_extract(info_dict or {})
def run_pp(self, pp, infodict):
files_to_delete = []
if '__files_to_move' not in infodict:
infodict['__files_to_move'] = {}
try:
files_to_delete, infodict = pp.run(infodict)
except PostProcessingError as e:
# Must be True and not 'only_download'
if self.params.get('ignoreerrors') is True:
self.report_error(e)
return infodict
raise
if not files_to_delete:
return infodict
if self.params.get('keepvideo', False):
for f in files_to_delete:
infodict['__files_to_move'].setdefault(f, '')
else:
self._delete_downloaded_files(
*files_to_delete, info=infodict, msg='Deleting original file %s (pass -k to keep)')
return infodict
def run_all_pps(self, key, info, *, additional_pps=None):
self._forceprint(key, info)
for pp in (additional_pps or []) + self._pps[key]:
info = self.run_pp(pp, info)
return info
def pre_process(self, ie_info, key='pre_process', files_to_move=None):
info = dict(ie_info)
info['__files_to_move'] = files_to_move or {}
try:
info = self.run_all_pps(key, info)
except PostProcessingError as err:
msg = f'Preprocessing: {err}'
info.setdefault('__pending_error', msg)
self.report_error(msg, is_error=False)
return info, info.pop('__files_to_move', None)
def post_process(self, filename, info, files_to_move=None):
"""Run all the postprocessors on the given file."""
info['filepath'] = filename
info['__files_to_move'] = files_to_move or {}
info = self.run_all_pps('post_process', info, additional_pps=info.get('__postprocessors'))
info = self.run_pp(MoveFilesAfterDownloadPP(self), info)
del info['__files_to_move']
return self.run_all_pps('after_move', info)
def _make_archive_id(self, info_dict):
video_id = info_dict.get('id')
if not video_id:
return
# Future-proof against any change in case
# and backwards compatibility with prior versions
extractor = info_dict.get('extractor_key') or info_dict.get('ie_key') # key in a playlist
if extractor is None:
url = str_or_none(info_dict.get('url'))
if not url:
return
# Try to find matching extractor for the URL and take its ie_key
for ie_key, ie in self._ies.items():
if ie.suitable(url):
extractor = ie_key
break
else:
return
return f'{extractor.lower()} {video_id}'
def in_download_archive(self, info_dict):
fn = self.params.get('download_archive')
if fn is None:
return False
vid_id = self._make_archive_id(info_dict)
if not vid_id:
return False # Incomplete video information
return vid_id in self.archive
def record_download_archive(self, info_dict):
fn = self.params.get('download_archive')
if fn is None:
return
vid_id = self._make_archive_id(info_dict)
assert vid_id
self.write_debug(f'Adding to archive: {vid_id}')
with locked_file(fn, 'a', encoding='utf-8') as archive_file:
archive_file.write(vid_id + '\n')
self.archive.add(vid_id)
@staticmethod
def format_resolution(format, default='unknown'):
if format.get('vcodec') == 'none' and format.get('acodec') != 'none':
return 'audio only'
if format.get('resolution') is not None:
return format['resolution']
if format.get('width') and format.get('height'):
return '%dx%d' % (format['width'], format['height'])
elif format.get('height'):
return '%sp' % format['height']
elif format.get('width'):
return '%dx?' % format['width']
return default
def _list_format_headers(self, *headers):
if self.params.get('listformats_table', True) is not False:
return [self._format_out(header, self.Styles.HEADERS) for header in headers]
return headers
def _format_note(self, fdict):
res = ''
if fdict.get('ext') in ['f4f', 'f4m']:
res += '(unsupported)'
if fdict.get('language'):
if res:
res += ' '
res += '[%s]' % fdict['language']
if fdict.get('format_note') is not None:
if res:
res += ' '
res += fdict['format_note']
if fdict.get('tbr') is not None:
if res:
res += ', '
res += '%4dk' % fdict['tbr']
if fdict.get('container') is not None:
if res:
res += ', '
res += '%s container' % fdict['container']
if (fdict.get('vcodec') is not None
and fdict.get('vcodec') != 'none'):
if res:
res += ', '
res += fdict['vcodec']
if fdict.get('vbr') is not None:
res += '@'
elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
res += 'video@'
if fdict.get('vbr') is not None:
res += '%4dk' % fdict['vbr']
if fdict.get('fps') is not None:
if res:
res += ', '
res += '%sfps' % fdict['fps']
if fdict.get('acodec') is not None:
if res:
res += ', '
if fdict['acodec'] == 'none':
res += 'video only'
else:
res += '%-5s' % fdict['acodec']
elif fdict.get('abr') is not None:
if res:
res += ', '
res += 'audio'
if fdict.get('abr') is not None:
res += '@%3dk' % fdict['abr']
if fdict.get('asr') is not None:
res += ' (%5dHz)' % fdict['asr']
if fdict.get('filesize') is not None:
if res:
res += ', '
res += format_bytes(fdict['filesize'])
elif fdict.get('filesize_approx') is not None:
if res:
res += ', '
res += '~' + format_bytes(fdict['filesize_approx'])
return res
def render_formats_table(self, info_dict):
if not info_dict.get('formats') and not info_dict.get('url'):
return None
formats = info_dict.get('formats', [info_dict])
if not self.params.get('listformats_table', True) is not False:
table = [
[
format_field(f, 'format_id'),
format_field(f, 'ext'),
self.format_resolution(f),
self._format_note(f)
] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
return render_table(['format code', 'extension', 'resolution', 'note'], table, extra_gap=1)
delim = self._format_out('\u2502', self.Styles.DELIM, '|', test_encoding=True)
table = [
[
self._format_out(format_field(f, 'format_id'), self.Styles.ID),
format_field(f, 'ext'),
format_field(f, func=self.format_resolution, ignore=('audio only', 'images')),
format_field(f, 'fps', '\t%d'),
format_field(f, 'dynamic_range', '%s', ignore=(None, 'SDR')).replace('HDR', ''),
delim,
format_field(f, 'filesize', ' \t%s', func=format_bytes) + format_field(f, 'filesize_approx', '~\t%s', func=format_bytes),
format_field(f, 'tbr', '\t%dk'),
shorten_protocol_name(f.get('protocol', '')),
delim,
format_field(f, 'vcodec', default='unknown').replace(
'none', 'images' if f.get('acodec') == 'none'
else self._format_out('audio only', self.Styles.SUPPRESS)),
format_field(f, 'vbr', '\t%dk'),
format_field(f, 'acodec', default='unknown').replace(
'none', '' if f.get('vcodec') == 'none'
else self._format_out('video only', self.Styles.SUPPRESS)),
format_field(f, 'abr', '\t%dk'),
format_field(f, 'asr', '\t%dHz'),
join_nonempty(
self._format_out('UNSUPPORTED', 'light red') if f.get('ext') in ('f4f', 'f4m') else None,
format_field(f, 'language', '[%s]'),
join_nonempty(format_field(f, 'format_note'),
format_field(f, 'container', ignore=(None, f.get('ext'))),
delim=', '),
delim=' '),
] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
header_line = self._list_format_headers(
'ID', 'EXT', 'RESOLUTION', '\tFPS', 'HDR', delim, '\tFILESIZE', '\tTBR', 'PROTO',
delim, 'VCODEC', '\tVBR', 'ACODEC', '\tABR', '\tASR', 'MORE INFO')
return render_table(
header_line, table, hide_empty=True,
delim=self._format_out('\u2500', self.Styles.DELIM, '-', test_encoding=True))
def render_thumbnails_table(self, info_dict):
thumbnails = list(info_dict.get('thumbnails') or [])
if not thumbnails:
return None
return render_table(
self._list_format_headers('ID', 'Width', 'Height', 'URL'),
[[t.get('id'), t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails])
def render_subtitles_table(self, video_id, subtitles):
def _row(lang, formats):
exts, names = zip(*((f['ext'], f.get('name') or 'unknown') for f in reversed(formats)))
if len(set(names)) == 1:
names = [] if names[0] == 'unknown' else names[:1]
return [lang, ', '.join(names), ', '.join(exts)]
if not subtitles:
return None
return render_table(
self._list_format_headers('Language', 'Name', 'Formats'),
[_row(lang, formats) for lang, formats in subtitles.items()],
hide_empty=True)
def __list_table(self, video_id, name, func, *args):
table = func(*args)
if not table:
self.to_screen(f'{video_id} has no {name}')
return
self.to_screen(f'[info] Available {name} for {video_id}:')
self.to_stdout(table)
def list_formats(self, info_dict):
self.__list_table(info_dict['id'], 'formats', self.render_formats_table, info_dict)
def list_thumbnails(self, info_dict):
self.__list_table(info_dict['id'], 'thumbnails', self.render_thumbnails_table, info_dict)
def list_subtitles(self, video_id, subtitles, name='subtitles'):
self.__list_table(video_id, name, self.render_subtitles_table, video_id, subtitles)
def urlopen(self, req):
""" Start an HTTP download """
if isinstance(req, str):
req = sanitized_Request(req)
return self._opener.open(req, timeout=self._socket_timeout)
def print_debug_header(self):
if not self.params.get('verbose'):
return
# These imports can be slow. So import them only as needed
from .extractor.extractors import _LAZY_LOADER
from .extractor.extractors import _PLUGIN_CLASSES as plugin_extractors
def get_encoding(stream):
ret = str(getattr(stream, 'encoding', 'missing (%s)' % type(stream).__name__))
if not supports_terminal_sequences(stream):
from .utils import WINDOWS_VT_MODE # Must be imported locally
ret += ' (No VT)' if WINDOWS_VT_MODE is False else ' (No ANSI)'
return ret
encoding_str = 'Encodings: locale %s, fs %s, pref %s, %s' % (
locale.getpreferredencoding(),
sys.getfilesystemencoding(),
self.get_encoding(),
', '.join(
f'{key} {get_encoding(stream)}' for key, stream in self._out_files.items_
if stream is not None and key != 'console')
)
logger = self.params.get('logger')
if logger:
write_debug = lambda msg: logger.debug(f'[debug] {msg}')
write_debug(encoding_str)
else:
write_string(f'[debug] {encoding_str}\n', encoding=None)
write_debug = lambda msg: self._write_string(f'[debug] {msg}\n')
source = detect_variant()
write_debug(join_nonempty(
'yt-dlp version', __version__,
f'[{RELEASE_GIT_HEAD}]' if RELEASE_GIT_HEAD else '',
'' if source == 'unknown' else f'({source})',
delim=' '))
if not _LAZY_LOADER:
if os.environ.get('YTDLP_NO_LAZY_EXTRACTORS'):
write_debug('Lazy loading extractors is forcibly disabled')
else:
write_debug('Lazy loading extractors is disabled')
if plugin_extractors or plugin_postprocessors:
write_debug('Plugins: %s' % [
'%s%s' % (klass.__name__, '' if klass.__name__ == name else f' as {name}')
for name, klass in itertools.chain(plugin_extractors.items(), plugin_postprocessors.items())])
if self.params['compat_opts']:
write_debug('Compatibility options: %s' % ', '.join(self.params['compat_opts']))
if source == 'source':
try:
stdout, _, _ = Popen.run(
['git', 'rev-parse', '--short', 'HEAD'],
text=True, cwd=os.path.dirname(os.path.abspath(__file__)),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if re.fullmatch('[0-9a-f]+', stdout.strip()):
write_debug(f'Git HEAD: {stdout.strip()}')
except Exception:
with contextlib.suppress(Exception):
sys.exc_clear()
def python_implementation():
impl_name = platform.python_implementation()
if impl_name == 'PyPy' and hasattr(sys, 'pypy_version_info'):
return impl_name + ' version %d.%d.%d' % sys.pypy_version_info[:3]
return impl_name
write_debug('Python version %s (%s %s) - %s' % (
platform.python_version(),
python_implementation(),
platform.architecture()[0],
platform_name()))
exe_versions, ffmpeg_features = FFmpegPostProcessor.get_versions_and_features(self)
ffmpeg_features = {key for key, val in ffmpeg_features.items() if val}
if ffmpeg_features:
exe_versions['ffmpeg'] += ' (%s)' % ','.join(sorted(ffmpeg_features))
exe_versions['rtmpdump'] = rtmpdump_version()
exe_versions['phantomjs'] = PhantomJSwrapper._version()
exe_str = ', '.join(
f'{exe} {v}' for exe, v in sorted(exe_versions.items()) if v
) or 'none'
write_debug('exe versions: %s' % exe_str)
from .compat.compat_utils import get_package_info
from .dependencies import available_dependencies
write_debug('Optional libraries: %s' % (', '.join(sorted({
join_nonempty(*get_package_info(m)) for m in available_dependencies.values()
})) or 'none'))
self._setup_opener()
proxy_map = {}
for handler in self._opener.handlers:
if hasattr(handler, 'proxies'):
proxy_map.update(handler.proxies)
write_debug(f'Proxy map: {proxy_map}')
# Not implemented
if False and self.params.get('call_home'):
ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode()
write_debug('Public IP address: %s' % ipaddr)
latest_version = self.urlopen(
'https://yt-dl.org/latest/version').read().decode()
if version_tuple(latest_version) > version_tuple(__version__):
self.report_warning(
'You are using an outdated version (newest version: %s)! '
'See https://yt-dl.org/update if you need help updating.' %
latest_version)
def _setup_opener(self):
if hasattr(self, '_opener'):
return
timeout_val = self.params.get('socket_timeout')
self._socket_timeout = 20 if timeout_val is None else float(timeout_val)
opts_cookiesfrombrowser = self.params.get('cookiesfrombrowser')
opts_cookiefile = self.params.get('cookiefile')
opts_proxy = self.params.get('proxy')
self.cookiejar = load_cookies(opts_cookiefile, opts_cookiesfrombrowser, self)
cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
if opts_proxy is not None:
if opts_proxy == '':
proxies = {}
else:
proxies = {'http': opts_proxy, 'https': opts_proxy}
else:
proxies = compat_urllib_request.getproxies()
# Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
if 'http' in proxies and 'https' not in proxies:
proxies['https'] = proxies['http']
proxy_handler = PerRequestProxyHandler(proxies)
debuglevel = 1 if self.params.get('debug_printtraffic') else 0
https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
redirect_handler = YoutubeDLRedirectHandler()
data_handler = urllib.request.DataHandler()
# When passing our own FileHandler instance, build_opener won't add the
# default FileHandler and allows us to disable the file protocol, which
# can be used for malicious purposes (see
# https://github.com/ytdl-org/youtube-dl/issues/8227)
file_handler = compat_urllib_request.FileHandler()
def file_open(*args, **kwargs):
raise compat_urllib_error.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons')
file_handler.file_open = file_open
opener = compat_urllib_request.build_opener(
proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
# Delete the default user-agent header, which would otherwise apply in
# cases where our custom HTTP handler doesn't come into play
# (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
opener.addheaders = []
self._opener = opener
def encode(self, s):
if isinstance(s, bytes):
return s # Already encoded
try:
return s.encode(self.get_encoding())
except UnicodeEncodeError as err:
err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
raise
def get_encoding(self):
encoding = self.params.get('encoding')
if encoding is None:
encoding = preferredencoding()
return encoding
def _write_info_json(self, label, ie_result, infofn, overwrite=None):
''' Write infojson and returns True = written, 'exists' = Already exists, False = skip, None = error '''
if overwrite is None:
overwrite = self.params.get('overwrites', True)
if not self.params.get('writeinfojson'):
return False
elif not infofn:
self.write_debug(f'Skipping writing {label} infojson')
return False
elif not self._ensure_dir_exists(infofn):
return None
elif not overwrite and os.path.exists(infofn):
self.to_screen(f'[info] {label.title()} metadata is already present')
return 'exists'
self.to_screen(f'[info] Writing {label} metadata as JSON to: {infofn}')
try:
write_json_file(self.sanitize_info(ie_result, self.params.get('clean_infojson', True)), infofn)
return True
except OSError:
self.report_error(f'Cannot write {label} metadata to JSON file {infofn}')
return None
def _write_description(self, label, ie_result, descfn):
''' Write description and returns True = written, False = skip, None = error '''
if not self.params.get('writedescription'):
return False
elif not descfn:
self.write_debug(f'Skipping writing {label} description')
return False
elif not self._ensure_dir_exists(descfn):
return None
elif not self.params.get('overwrites', True) and os.path.exists(descfn):
self.to_screen(f'[info] {label.title()} description is already present')
elif ie_result.get('description') is None:
self.report_warning(f'There\'s no {label} description to write')
return False
else:
try:
self.to_screen(f'[info] Writing {label} description to: {descfn}')
with open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
descfile.write(ie_result['description'])
except OSError:
self.report_error(f'Cannot write {label} description file {descfn}')
return None
return True
def _write_subtitles(self, info_dict, filename):
''' Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error'''
ret = []
subtitles = info_dict.get('requested_subtitles')
if not subtitles or not (self.params.get('writesubtitles') or self.params.get('writeautomaticsub')):
# subtitles download errors are already managed as troubles in relevant IE
# that way it will silently go on when used with unsupporting IE
return ret
sub_filename_base = self.prepare_filename(info_dict, 'subtitle')
if not sub_filename_base:
self.to_screen('[info] Skipping writing video subtitles')
return ret
for sub_lang, sub_info in subtitles.items():
sub_format = sub_info['ext']
sub_filename = subtitles_filename(filename, sub_lang, sub_format, info_dict.get('ext'))
sub_filename_final = subtitles_filename(sub_filename_base, sub_lang, sub_format, info_dict.get('ext'))
existing_sub = self.existing_file((sub_filename_final, sub_filename))
if existing_sub:
self.to_screen(f'[info] Video subtitle {sub_lang}.{sub_format} is already present')
sub_info['filepath'] = existing_sub
ret.append((existing_sub, sub_filename_final))
continue
self.to_screen(f'[info] Writing video subtitles to: {sub_filename}')
if sub_info.get('data') is not None:
try:
# Use newline='' to prevent conversion of newline characters
# See https://github.com/ytdl-org/youtube-dl/issues/10268
with open(sub_filename, 'w', encoding='utf-8', newline='') as subfile:
subfile.write(sub_info['data'])
sub_info['filepath'] = sub_filename
ret.append((sub_filename, sub_filename_final))
continue
except OSError:
self.report_error(f'Cannot write video subtitles file {sub_filename}')
return None
try:
sub_copy = sub_info.copy()
sub_copy.setdefault('http_headers', info_dict.get('http_headers'))
self.dl(sub_filename, sub_copy, subtitle=True)
sub_info['filepath'] = sub_filename
ret.append((sub_filename, sub_filename_final))
except (DownloadError, ExtractorError, IOError, OSError, ValueError) + network_exceptions as err:
msg = f'Unable to download video subtitles for {sub_lang!r}: {err}'
if self.params.get('ignoreerrors') is not True: # False or 'only_download'
if not self.params.get('ignoreerrors'):
self.report_error(msg)
raise DownloadError(msg)
self.report_warning(msg)
return ret
def _write_thumbnails(self, label, info_dict, filename, thumb_filename_base=None):
''' Write thumbnails to file and return list of (thumb_filename, final_thumb_filename) '''
write_all = self.params.get('write_all_thumbnails', False)
thumbnails, ret = [], []
if write_all or self.params.get('writethumbnail', False):
thumbnails = info_dict.get('thumbnails') or []
multiple = write_all and len(thumbnails) > 1
if thumb_filename_base is None:
thumb_filename_base = filename
if thumbnails and not thumb_filename_base:
self.write_debug(f'Skipping writing {label} thumbnail')
return ret
for idx, t in list(enumerate(thumbnails))[::-1]:
thumb_ext = (f'{t["id"]}.' if multiple else '') + determine_ext(t['url'], 'jpg')
thumb_display_id = f'{label} thumbnail {t["id"]}'
thumb_filename = replace_extension(filename, thumb_ext, info_dict.get('ext'))
thumb_filename_final = replace_extension(thumb_filename_base, thumb_ext, info_dict.get('ext'))
existing_thumb = self.existing_file((thumb_filename_final, thumb_filename))
if existing_thumb:
self.to_screen('[info] %s is already present' % (
thumb_display_id if multiple else f'{label} thumbnail').capitalize())
t['filepath'] = existing_thumb
ret.append((existing_thumb, thumb_filename_final))
else:
self.to_screen(f'[info] Downloading {thumb_display_id} ...')
try:
uf = self.urlopen(sanitized_Request(t['url'], headers=t.get('http_headers', {})))
self.to_screen(f'[info] Writing {thumb_display_id} to: {thumb_filename}')
with open(encodeFilename(thumb_filename), 'wb') as thumbf:
shutil.copyfileobj(uf, thumbf)
ret.append((thumb_filename, thumb_filename_final))
t['filepath'] = thumb_filename
except network_exceptions as err:
thumbnails.pop(idx)
self.report_warning(f'Unable to download {thumb_display_id}: {err}')
if ret and not write_all:
break
return ret
| []
| []
| [
"YTDLP_NO_LAZY_EXTRACTORS"
]
| [] | ["YTDLP_NO_LAZY_EXTRACTORS"] | python | 1 | 0 | |
eth/tools/fixtures/helpers.py | import os
import rlp
from typing import (
cast,
Any,
Dict,
Iterable,
Tuple,
Type,
)
from eth_utils.toolz import first
from eth_utils import (
to_normalized_address,
)
from eth import MainnetChain
from eth.abc import (
BlockAPI,
ChainAPI,
StateAPI,
VirtualMachineAPI,
)
from eth.db.atomic import AtomicDB
from eth.chains.mainnet import (
MainnetDAOValidatorVM,
)
from eth.tools.builder.chain import (
disable_pow_check,
)
from eth.typing import (
AccountState,
)
from eth._utils.state import (
diff_state,
)
from eth.vm.forks import (
PetersburgVM,
ConstantinopleVM,
ByzantiumVM,
TangerineWhistleVM,
FrontierVM,
HomesteadVM as BaseHomesteadVM,
SpuriousDragonVM,
IstanbulVM,
BerlinVM,
)
#
# State Setup
#
def setup_state(desired_state: AccountState, state: StateAPI) -> None:
for account, account_data in desired_state.items():
for slot, value in account_data['storage'].items():
state.set_storage(account, slot, value)
nonce = account_data['nonce']
code = account_data['code']
balance = account_data['balance']
state.set_nonce(account, nonce)
state.set_code(account, code)
state.set_balance(account, balance)
state.persist()
def verify_state(expected_state: AccountState, state: StateAPI) -> None:
diff = diff_state(expected_state, state)
new_line = "\n"
if diff:
error_messages = []
for account, field, actual_value, expected_value in diff:
if field == 'balance':
error_messages.append(
f"{to_normalized_address(account)}(balance) | "
f"Actual: {actual_value!r} | Expected: {expected_value!r} | "
f"Delta: {cast(int, actual_value) - cast(int, expected_value)}"
)
else:
error_messages.append(
f"{to_normalized_address(account)}({field}) | "
f"Actual: {actual_value!r} | Expected: {expected_value!r}"
)
raise AssertionError(
f"State DB did not match expected state on {len(error_messages)} values:{new_line}"
f"{f'{new_line} - '.join(error_messages)}"
)
def chain_vm_configuration(fixture: Dict[str, Any]) -> Iterable[Tuple[int, Type[VirtualMachineAPI]]]: # noqa: E501
network = fixture['network']
if network == 'Frontier':
return (
(0, FrontierVM),
)
elif network == 'Homestead':
HomesteadVM = BaseHomesteadVM.configure(support_dao_fork=False)
return (
(0, HomesteadVM),
)
elif network == 'EIP150':
return (
(0, TangerineWhistleVM),
)
elif network == 'EIP158':
return (
(0, SpuriousDragonVM),
)
elif network == 'Byzantium':
return (
(0, ByzantiumVM),
)
elif network == 'Constantinople':
return (
(0, ConstantinopleVM),
)
elif network == 'ConstantinopleFix':
return (
(0, PetersburgVM),
)
elif network == 'Istanbul':
return (
(0, IstanbulVM),
)
elif network == 'Berlin':
return (
(0, BerlinVM),
)
elif network == 'FrontierToHomesteadAt5':
HomesteadVM = BaseHomesteadVM.configure(support_dao_fork=False)
return (
(0, FrontierVM),
(5, HomesteadVM),
)
elif network == 'HomesteadToEIP150At5':
HomesteadVM = BaseHomesteadVM.configure(support_dao_fork=False)
return (
(0, HomesteadVM),
(5, TangerineWhistleVM),
)
elif network == 'HomesteadToDaoAt5':
HomesteadVM = MainnetDAOValidatorVM.configure(
support_dao_fork=True,
_dao_fork_block_number=5,
)
return (
(0, HomesteadVM),
)
elif network == 'EIP158ToByzantiumAt5':
return (
(0, SpuriousDragonVM),
(5, ByzantiumVM),
)
elif network == 'ByzantiumToConstantinopleFixAt5':
return (
(0, ByzantiumVM),
(5, PetersburgVM),
)
else:
raise ValueError(f"Network {network} does not match any known VM rules")
def genesis_params_from_fixture(fixture: Dict[str, Any]) -> Dict[str, Any]:
return {
'parent_hash': fixture['genesisBlockHeader']['parentHash'],
'uncles_hash': fixture['genesisBlockHeader']['uncleHash'],
'coinbase': fixture['genesisBlockHeader']['coinbase'],
'state_root': fixture['genesisBlockHeader']['stateRoot'],
'transaction_root': fixture['genesisBlockHeader']['transactionsTrie'],
'receipt_root': fixture['genesisBlockHeader']['receiptTrie'],
'bloom': fixture['genesisBlockHeader']['bloom'],
'difficulty': fixture['genesisBlockHeader']['difficulty'],
'block_number': fixture['genesisBlockHeader']['number'],
'gas_limit': fixture['genesisBlockHeader']['gasLimit'],
'gas_used': fixture['genesisBlockHeader']['gasUsed'],
'timestamp': fixture['genesisBlockHeader']['timestamp'],
'extra_data': fixture['genesisBlockHeader']['extraData'],
'mix_hash': fixture['genesisBlockHeader']['mixHash'],
'nonce': fixture['genesisBlockHeader']['nonce'],
}
def new_chain_from_fixture(fixture: Dict[str, Any],
chain_cls: Type[ChainAPI] = MainnetChain) -> ChainAPI:
base_db = AtomicDB()
vm_config = chain_vm_configuration(fixture)
ChainFromFixture = chain_cls.configure(
'ChainFromFixture',
vm_configuration=vm_config,
)
if 'sealEngine' in fixture and fixture['sealEngine'] == 'NoProof':
ChainFromFixture = disable_pow_check(ChainFromFixture)
return ChainFromFixture.from_genesis(
base_db,
genesis_params=genesis_params_from_fixture(fixture),
genesis_state=fixture['pre'],
)
def apply_fixture_block_to_chain(
block_fixture: Dict[str, Any],
chain: ChainAPI,
perform_validation: bool = True) -> Tuple[BlockAPI, BlockAPI, bytes]:
"""
:return: (premined_block, mined_block, rlp_encoded_mined_block)
"""
# The block to import may be in a different block-class-range than the
# chain's current one, so we use the block number specified in the
# fixture to look up the correct block class.
if 'blockHeader' in block_fixture:
block_number = block_fixture['blockHeader']['number']
block_class = chain.get_vm_class_for_block_number(block_number).get_block_class()
else:
block_class = chain.get_vm().get_block_class()
block = rlp.decode(block_fixture['rlp'], sedes=block_class)
import_result = chain.import_block(block, perform_validation=perform_validation)
mined_block = import_result.imported_block
rlp_encoded_mined_block = rlp.encode(mined_block, sedes=block_class)
return (block, mined_block, rlp_encoded_mined_block)
def should_run_slow_tests() -> bool:
if os.environ.get('TRAVIS_EVENT_TYPE') == 'cron':
return True
return False
def get_test_name(filler: Dict[str, Any]) -> str:
assert len(filler) == 1
return first(filler)
| []
| []
| [
"TRAVIS_EVENT_TYPE"
]
| [] | ["TRAVIS_EVENT_TYPE"] | python | 1 | 0 | |
patent_server_prj/asgi.py | """
ASGI config for patent_server_prj project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'patent_server_prj.settings')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
examples/pwr_run/checkpointing/nonpc_short/k80_only/job39.py | """
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.densenet import DenseNet121, DenseNet169, DenseNet201
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 128
args_lr = 0.003
args_model = 'densenet121'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_k80_only/' + job_name + '*'
total_epochs = 65
starting_epoch = 0
# first step is to update the PID
pid_dict = {}
with open('pid_lock.json', 'r') as fp:
pid_dict = json.load(fp)
pid_dict[job_name] = os.getpid()
json_file = json.dumps(pid_dict)
with open('pid_lock.json', 'w') as fp:
fp.write(json_file)
os.rename('pid_lock.json', 'pid.json')
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
model = keras.models.load_model(save_file)
else:
print('train from start')
model = models.Sequential()
if '121' in args_model:
base_model = DenseNet121(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
elif '169' in args_model:
base_model = DenseNet169(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
elif '201' in args_model:
base_model = DenseNet201(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
model.add(base_model)
#model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
epoch_waste_dict = {}
with open('epoch_waste.json', 'r') as fp:
epoch_waste_dict = json.load(fp)
epoch_waste_dict[job_name] += epoch_waste_time
json_file3 = json.dumps(epoch_waste_dict)
with open('epoch_waste.json', 'w') as fp:
fp.write(json_file3)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_k80_only/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
checkpoint_dict = {}
with open('checkpoint.json', 'r') as fp:
checkpoint_dict = json.load(fp)
checkpoint_dict[job_name] = 1
json_file3 = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp:
fp.write(json_file3)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
| []
| []
| [
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"] | python | 2 | 0 | |
hello.py | import requests
import random
from bs4 import BeautifulSoup
import re
from langdetect import detect
import os
import json
from cloudant import Cloudant
from datetime import datetime,date
import time
import tweet
db_name = 'mydb'
client = None
db = None
if 'VCAP_SERVICES' in os.environ:
vcap = json.loads(os.getenv('VCAP_SERVICES'))
#print('Found VCAP_SERVICES')
if 'cloudantNoSQLDB' in vcap:
creds = vcap['cloudantNoSQLDB'][0]['credentials']
user = creds['username']
password = creds['password']
url = 'https://' + creds['host']
client = Cloudant(user, password, url=url, connect=True)
db = client.create_database(db_name, throw_on_exists=False)
elif "CLOUDANT_URL" in os.environ:
client = Cloudant(os.environ['CLOUDANT_USERNAME'], os.environ['CLOUDANT_PASSWORD'], url=os.environ['CLOUDANT_URL'], connect=True)
db = client.create_database(db_name, throw_on_exists=False)
elif os.path.isfile('vcap-local.json'):
with open('vcap-local.json') as f:
vcap = json.load(f)
#print('Found local VCAP_SERVICES')
creds = vcap['services']['cloudantNoSQLDB'][0]['credentials']
user = creds['username']
password = creds['password']
url = 'https://' + creds['host']
client = Cloudant(user, password, url=url, connect=True)
db = client.create_database(db_name, throw_on_exists=False)
def number_gen():
return random.randint(1,10**7)
def recheck(lst):
i=0
while i<len(lst):
if lst[i]=="" or re.match(".*verse \d: ",lst[i].lower()) or re.match(".+intro:.+",lst[i].lower()) or re.match(".+chorus.+",lst[i].lower()) or re.match(".+hook.+",lst[i].lower()) or re.match(".+solo.+",lst[i].lower())or re.match(".+bridge:.+",lst[i].lower()) or re.match(".*Lyrics for this song.+",lst[i].lower()):
lst.remove(lst[i])
else:
temp = list(lst[i])
if len(temp)>150 or len(temp)<10:
lst.remove(lst[i])
else:
i+=1
return lst
def song_title(ftitle):
title = re.match("(.+) Lyrics | Genius Lyrics",ftitle)
return title.groups()[0]
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36"}
if __name__ == '__main__':
var=0
while True:
if var==0:
start=time.time()
var=1
number = number_gen()
#url = "https://genius.com/songs/3524045"
url = "https://genius.com/songs/"+str(number)
#print(url)
page = requests.get(url, headers=headers)
soup = BeautifulSoup(page.content, 'html.parser')
lyrics = soup.find("div", {"class": "lyrics"})
if lyrics:
lyrics = lyrics.get_text()
lyrics = lyrics.splitlines()
lyrics = recheck(lyrics)
if len(lyrics)>0:
lyric = random.choice(lyrics)
try:
if detect(lyric) == 'en':
ftitle = soup.title.get_text()
#print(lyric)
title = song_title(ftitle)
#print(title)
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
#print("Current Time =", current_time)
today = date.today()
#print("Today's date:", today)
data = {'Song': title,
'Time': current_time,
'Date': str(today),
'Lyric': lyric}
tweet.tweetit(lyric+"\n"+title)
if client:
my_document = db.create_document(data)
data['_id'] = my_document['_id']
else:
pass
#print('No database')
var=0
elapsed = time.time() - start
time.sleep(10800-float(elapsed))
except:
pass
| []
| []
| [
"CLOUDANT_URL",
"CLOUDANT_USERNAME",
"VCAP_SERVICES",
"CLOUDANT_PASSWORD"
]
| [] | ["CLOUDANT_URL", "CLOUDANT_USERNAME", "VCAP_SERVICES", "CLOUDANT_PASSWORD"] | python | 4 | 0 | |
main.go | package main
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"regexp"
"strings"
"text/template"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/serializer/yaml"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/discovery"
"k8s.io/client-go/discovery/cached/memory"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/rest"
"k8s.io/client-go/restmapper"
"k8s.io/client-go/tools/clientcmd"
)
func main() {
// Lookup for env variable `PLUGIN_KUBECONFIG`.
kubeconfig, exists := os.LookupEnv("PLUGIN_KUBECONFIG")
switch exists {
// If it does exists means user intents for out-of-cluster usage with provided kubeconfig
case true:
data := []byte(kubeconfig)
// create a kubeconfig file
err := ioutil.WriteFile("./kubeconfig", data, 0644)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
outOfCluster, err := clientcmd.BuildConfigFromFlags("", "./kubeconfig")
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Println("Out-of-cluster SSA initiliazing")
err = ssa(context.Background(), outOfCluster)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
// If user didn't provide a kubeconfig dron8s defaults to create an in-cluster config
case false:
inCluster, err := rest.InClusterConfig()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Println("In-cluster SSA initiliazing")
err = ssa(context.Background(), inCluster)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
}
}
// https://ymmt2005.hatenablog.com/entry/2020/04/14/An_example_of_using_dynamic_client_of_k8s.io/client-go#Go-client-libraries
func ssa(ctx context.Context, cfg *rest.Config) error {
var decUnstructured = yaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme)
// 1. Prepare a RESTMapper to find GVR
dc, err := discovery.NewDiscoveryClientForConfig(cfg)
if err != nil {
return err
}
mapper := restmapper.NewDeferredDiscoveryRESTMapper(memory.NewMemCacheClient(dc))
// 2. Prepare the dynamic client
dyn, err := dynamic.NewForConfig(cfg)
if err != nil {
return err
}
// 2.1. Read user's yaml
yaml, err := ioutil.ReadFile(os.Getenv("PLUGIN_YAML"))
if err != nil {
return err
}
// convert it to string
text := string(yaml)
// Parse variables
t := template.Must(template.New("dron8s").Option("missingkey=error").Parse(text))
b := bytes.NewBuffer(make([]byte, 0, 512))
err = t.Execute(b, getVariablesFromDrone())
if err != nil {
return err
}
text = b.String()
// Parse each yaml from file
configs := strings.Split(text, "---")
// variable to hold and print how many yaml configs are present
var sum int
// Iterate over provided configs
for i, v := range configs {
// If a yaml starts with `---`
// the first slice of `configs` will be empty
// so we just skip (continue) to next iteration
if len(v) == 0 {
continue
}
// 3. Decode YAML manifest into unstructured.Unstructured
obj := &unstructured.Unstructured{}
_, gvk, err := decUnstructured.Decode([]byte(v), nil, obj)
if err != nil {
return err
}
// 4. Find GVR
mapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version)
if err != nil {
return err
}
// 5. Obtain REST interface for the GVR
var dr dynamic.ResourceInterface
if mapping.Scope.Name() == meta.RESTScopeNameNamespace {
if obj.GetNamespace() == "" {
obj.SetNamespace("default")
}
dr = dyn.Resource(mapping.Resource).Namespace(obj.GetNamespace())
} else {
// for cluster-wide resources
dr = dyn.Resource(mapping.Resource)
}
// 6. Marshal object into JSON
data, err := json.Marshal(obj)
if err != nil {
return err
}
fmt.Println("Applying config #", i)
// 7. Create or Update the object with SSA
// types.ApplyPatchType indicates SSA.
// FieldManager specifies the field owner ID.
_, err = dr.Patch(ctx, obj.GetName(), types.ApplyPatchType, data, metav1.PatchOptions{
FieldManager: "dron8s-plugin",
})
if err != nil {
return err
}
sum = i
}
fmt.Println("Dron8s finished applying ", sum+1, " configs.")
return nil
}
func getVariablesFromDrone() map[string]string {
ctx := make(map[string]string)
pluginEnv := os.Environ()
pluginReg := regexp.MustCompile(`^PLUGIN_(.*)=(.*)`)
droneReg := regexp.MustCompile(`^DRONE_(.*)=(.*)`)
for _, value := range pluginEnv {
if pluginReg.MatchString(value) {
matches := pluginReg.FindStringSubmatch(value)
key := strings.ToLower(matches[1])
ctx[key] = matches[2]
}
if droneReg.MatchString(value) {
matches := droneReg.FindStringSubmatch(value)
key := strings.ToLower(matches[1])
ctx[key] = matches[2]
}
}
return ctx
}
| [
"\"PLUGIN_YAML\""
]
| []
| [
"PLUGIN_YAML"
]
| [] | ["PLUGIN_YAML"] | go | 1 | 0 | |
upgrade.go | // Copyright (c) 2013-2014 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"io"
"os"
"path/filepath"
)
// dirEmpty returns whether or not the specified directory path is empty.
func dirEmpty(dirPath string) (bool, error) {
f, err := os.Open(dirPath)
if err != nil {
return false, err
}
defer f.Close()
// Read the names of a max of one entry from the directory. When the
// directory is empty, an io.EOF error will be returned, so allow it.
names, err := f.Readdirnames(1)
if err != nil && err != io.EOF {
return false, err
}
return len(names) == 0, nil
}
// oldBtcdHomeDir returns the OS specific home directory monad used prior to
// version 0.3.3. This has since been replaced with monautil.AppDataDir, but
// this function is still provided for the automatic upgrade path.
func oldBtcdHomeDir() string {
// Search for Windows APPDATA first. This won't exist on POSIX OSes.
appData := os.Getenv("APPDATA")
if appData != "" {
return filepath.Join(appData, "monad")
}
// Fall back to standard HOME directory that works for most POSIX OSes.
home := os.Getenv("HOME")
if home != "" {
return filepath.Join(home, ".monad")
}
// In the worst case, use the current directory.
return "."
}
// upgradeDBPathNet moves the database for a specific network from its
// location prior to monad version 0.2.0 and uses heuristics to ascertain the old
// database type to rename to the new format.
func upgradeDBPathNet(oldDbPath, netName string) error {
// Prior to version 0.2.0, the database was named the same thing for
// both sqlite and leveldb. Use heuristics to figure out the type
// of the database and move it to the new path and name introduced with
// version 0.2.0 accordingly.
fi, err := os.Stat(oldDbPath)
if err == nil {
oldDbType := "sqlite"
if fi.IsDir() {
oldDbType = "leveldb"
}
// The new database name is based on the database type and
// resides in a directory named after the network type.
newDbRoot := filepath.Join(filepath.Dir(cfg.DataDir), netName)
newDbName := blockDbNamePrefix + "_" + oldDbType
if oldDbType == "sqlite" {
newDbName = newDbName + ".db"
}
newDbPath := filepath.Join(newDbRoot, newDbName)
// Create the new path if needed.
err = os.MkdirAll(newDbRoot, 0700)
if err != nil {
return err
}
// Move and rename the old database.
err := os.Rename(oldDbPath, newDbPath)
if err != nil {
return err
}
}
return nil
}
// upgradeDBPaths moves the databases from their locations prior to monad
// version 0.2.0 to their new locations.
func upgradeDBPaths() error {
// Prior to version 0.2.0, the databases were in the "db" directory and
// their names were suffixed by "testnet" and "regtest" for their
// respective networks. Check for the old database and update it to the
// new path introduced with version 0.2.0 accordingly.
oldDbRoot := filepath.Join(oldBtcdHomeDir(), "db")
upgradeDBPathNet(filepath.Join(oldDbRoot, "monad.db"), "mainnet")
upgradeDBPathNet(filepath.Join(oldDbRoot, "monad_testnet.db"), "testnet")
upgradeDBPathNet(filepath.Join(oldDbRoot, "monad_regtest.db"), "regtest")
// Remove the old db directory.
return os.RemoveAll(oldDbRoot)
}
// upgradeDataPaths moves the application data from its location prior to monad
// version 0.3.3 to its new location.
func upgradeDataPaths() error {
// No need to migrate if the old and new home paths are the same.
oldHomePath := oldBtcdHomeDir()
newHomePath := defaultHomeDir
if oldHomePath == newHomePath {
return nil
}
// Only migrate if the old path exists and the new one doesn't.
if fileExists(oldHomePath) && !fileExists(newHomePath) {
// Create the new path.
monadLog.Infof("Migrating application home path from '%s' to '%s'",
oldHomePath, newHomePath)
err := os.MkdirAll(newHomePath, 0700)
if err != nil {
return err
}
// Move old monad.conf into new location if needed.
oldConfPath := filepath.Join(oldHomePath, defaultConfigFilename)
newConfPath := filepath.Join(newHomePath, defaultConfigFilename)
if fileExists(oldConfPath) && !fileExists(newConfPath) {
err := os.Rename(oldConfPath, newConfPath)
if err != nil {
return err
}
}
// Move old data directory into new location if needed.
oldDataPath := filepath.Join(oldHomePath, defaultDataDirname)
newDataPath := filepath.Join(newHomePath, defaultDataDirname)
if fileExists(oldDataPath) && !fileExists(newDataPath) {
err := os.Rename(oldDataPath, newDataPath)
if err != nil {
return err
}
}
// Remove the old home if it is empty or show a warning if not.
ohpEmpty, err := dirEmpty(oldHomePath)
if err != nil {
return err
}
if ohpEmpty {
err := os.Remove(oldHomePath)
if err != nil {
return err
}
} else {
monadLog.Warnf("Not removing '%s' since it contains files "+
"not created by this application. You may "+
"want to manually move them or delete them.",
oldHomePath)
}
}
return nil
}
// doUpgrades performs upgrades to monad as new versions require it.
func doUpgrades() error {
err := upgradeDBPaths()
if err != nil {
return err
}
return upgradeDataPaths()
}
| [
"\"APPDATA\"",
"\"HOME\""
]
| []
| [
"APPDATA",
"HOME"
]
| [] | ["APPDATA", "HOME"] | go | 2 | 0 | |
v3/integrations/nrhttprouter/nrhttprouter.go | // Copyright 2020 New Relic Corporation. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
// Package nrhttprouter instruments https://github.com/julienschmidt/httprouter
// applications.
//
// Use this package to instrument inbound requests handled by a
// httprouter.Router. Use an *nrhttprouter.Router in place of your
// *httprouter.Router. Example:
//
// package main
//
// import (
// "fmt"
// "net/http"
// "os"
//
// "github.com/julienschmidt/httprouter"
// newrelic "github.com/divyanshgaba/go-agent/v3/newrelic"
// "github.com/divyanshgaba/go-agent/v3/integrations/nrhttprouter"
// )
//
// func main() {
// cfg := newrelic.NewConfig("httprouter App", os.Getenv("NEW_RELIC_LICENSE_KEY"))
// app, _ := newrelic.NewApplication(cfg)
//
// // Create the Router replacement:
// router := nrhttprouter.New(app)
//
// router.GET("/", func(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
// w.Write([]byte("welcome\n"))
// })
// router.GET("/hello/:name", (w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
// w.Write([]byte(fmt.Sprintf("hello %s\n", ps.ByName("name"))))
// })
// http.ListenAndServe(":8000", router)
// }
//
// Runnable example: https://github.com/divyanshgaba/go-agent/tree/master/v3/integrations/nrhttprouter/example/main.go
package nrhttprouter
import (
"net/http"
"github.com/julienschmidt/httprouter"
"github.com/divyanshgaba/go-agent/v3/internal"
newrelic "github.com/divyanshgaba/go-agent/v3/newrelic"
)
func init() { internal.TrackUsage("integration", "framework", "httprouter") }
// Router should be used in place of httprouter.Router. Create it using
// New().
type Router struct {
*httprouter.Router
application *newrelic.Application
}
// New creates a new Router to be used in place of httprouter.Router.
func New(app *newrelic.Application) *Router {
return &Router{
Router: httprouter.New(),
application: app,
}
}
func txnName(method, path string) string {
return method + " " + path
}
func (r *Router) handle(method string, path string, original httprouter.Handle) {
handle := original
if nil != r.application {
handle = func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) {
txn := r.application.StartTransaction(txnName(method, path))
txn.SetWebRequestHTTP(req)
w = txn.SetWebResponse(w)
defer txn.End()
req = newrelic.RequestWithTransactionContext(req, txn)
original(w, req, ps)
}
}
r.Router.Handle(method, path, handle)
}
// DELETE replaces httprouter.Router.DELETE.
func (r *Router) DELETE(path string, h httprouter.Handle) {
r.handle(http.MethodDelete, path, h)
}
// GET replaces httprouter.Router.GET.
func (r *Router) GET(path string, h httprouter.Handle) {
r.handle(http.MethodGet, path, h)
}
// HEAD replaces httprouter.Router.HEAD.
func (r *Router) HEAD(path string, h httprouter.Handle) {
r.handle(http.MethodHead, path, h)
}
// OPTIONS replaces httprouter.Router.OPTIONS.
func (r *Router) OPTIONS(path string, h httprouter.Handle) {
r.handle(http.MethodOptions, path, h)
}
// PATCH replaces httprouter.Router.PATCH.
func (r *Router) PATCH(path string, h httprouter.Handle) {
r.handle(http.MethodPatch, path, h)
}
// POST replaces httprouter.Router.POST.
func (r *Router) POST(path string, h httprouter.Handle) {
r.handle(http.MethodPost, path, h)
}
// PUT replaces httprouter.Router.PUT.
func (r *Router) PUT(path string, h httprouter.Handle) {
r.handle(http.MethodPut, path, h)
}
// Handle replaces httprouter.Router.Handle.
func (r *Router) Handle(method, path string, h httprouter.Handle) {
r.handle(method, path, h)
}
// Handler replaces httprouter.Router.Handler.
func (r *Router) Handler(method, path string, handler http.Handler) {
_, h := newrelic.WrapHandle(r.application, path, handler)
r.Router.Handler(method, path, h)
}
// HandlerFunc replaces httprouter.Router.HandlerFunc.
func (r *Router) HandlerFunc(method, path string, handler http.HandlerFunc) {
r.Handler(method, path, handler)
}
// ServeHTTP replaces httprouter.Router.ServeHTTP.
func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
if nil != r.application {
h, _, _ := r.Router.Lookup(req.Method, req.URL.Path)
if nil == h {
txn := r.application.StartTransaction("NotFound")
defer txn.End()
req = newrelic.RequestWithTransactionContext(req, txn)
txn.SetWebRequestHTTP(req)
w = txn.SetWebResponse(w)
}
}
r.Router.ServeHTTP(w, req)
}
| [
"\"NEW_RELIC_LICENSE_KEY\""
]
| []
| [
"NEW_RELIC_LICENSE_KEY"
]
| [] | ["NEW_RELIC_LICENSE_KEY"] | go | 1 | 0 | |
src/ghostricon/vpn.py | import os
import re
import typing
import logging
import subprocess
from shlex import quote
from ghostricon.commands import server_types
from ghostricon.config import reload_config
class Vpn:
logger = logging.getLogger("VPNTOOL")
reg = re.compile(r"^\|\s*\d+\s*\|\s*(.*?)\s*\|\s*(.*?)\s*\|$")
def __init__(self, user: str):
self.user = user
self.load_config()
self.connected()
def load_config(self):
self.config = reload_config(self.user)["Global"]
def _run(self, args: typing.List[str]) -> str:
self.logger.debug("running as " +
f"{subprocess.check_output(['/usr/bin/whoami'])}")
self.logger.debug(f"substitute as {self.user}")
env = os.environ
env["USER"] = self.user
cmd = ["/usr/bin/cyberghostvpn"]
cmd += args
# cmd = [quote(x) for x in cmd]
self.logger.debug(f"COMMAND: {cmd}")
try:
ret = subprocess.check_output(cmd, env=env).decode("utf-8").rstrip()
self.logger.debug(f"RET: {ret}")
return ret
except subprocess.CalledProcessError:
self.logger.exception("Command Failed!")
def list(self, kind: str = None, *args) -> typing.List[str]:
fargs = ["--country-code"]
kind = kind.lower() if kind else None
if kind and kind in server_types:
fargs.insert(0, server_types[kind])
fargs += args
ret = self._run(fargs)
servers = []
for line in ret.splitlines():
match = self.reg.match(line)
if match:
servers.append((match.group(1), match.group(2)))
return servers
def status(self) -> bool:
ret = self._run(["--status"])
return ret != "No VPN connections found."
def disconnect(self):
if not self.connected():
return False
self._run(["--stop"])
return self.connected()
def connect(self,
kind: str = None,
country: str = None,
platform: str = None,
force: bool = False) -> bool:
def _select_from_default(kind_, country_=None):
servers = self.list(kind_)
default_country_name = country_ or self.config.get("default_country")
for name, code in servers:
if name == default_country_name:
return code
if self.connected():
if force:
self.disconnect()
else:
return True
self.load_config()
default_server_type = self.config.get("default_type").lower()
args = ["--connect"]
if not kind or kind not in server_types:
kind = default_server_type
if kind not in server_types:
kind = "traffic"
args.append(server_types[kind])
if kind == "streaming":
if not platform and default_server_type == kind:
platform = self.config.get("default_country")
args.append(platform)
if not country:
country = _select_from_default(kind, platform)
elif not country:
country = _select_from_default(kind)
if country:
args += ["--country-code", country]
self._run(args)
return self.connected()
def connected(self) -> bool:
self._connected = self.status()
self.logger.debug(f"CONNECTED: {self._connected}")
return self._connected
def changed(self) -> bool:
if self.status() != self._connected:
self._connected = not self._connected
return True
return False
| []
| []
| []
| [] | [] | python | 0 | 0 | |
examples/bluetooth/nimble/blehr/blehr_test.py | #!/usr/bin/env python
#
# Copyright 2019 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import sys
import re
import threading
import traceback
import subprocess
try:
import Queue
except ImportError:
import queue as Queue
try:
# This environment variable is expected on the host machine
test_fw_path = os.getenv("TEST_FW_PATH")
if test_fw_path and test_fw_path not in sys.path:
sys.path.insert(0, test_fw_path)
import IDF
except ImportError as e:
print(e)
print("\nCheck your IDF_PATH\nOR")
print("Try `export TEST_FW_PATH=$IDF_PATH/tools/tiny-test-fw` for resolving the issue\nOR")
print("Try `pip install -r $IDF_PATH/tools/tiny-test-fw/requirements.txt` for resolving the issue\n")
import IDF
try:
import lib_ble_client
except ImportError:
lib_ble_client_path = os.getenv("IDF_PATH") + "/tools/ble"
if lib_ble_client_path and lib_ble_client_path not in sys.path:
sys.path.insert(0, lib_ble_client_path)
import lib_ble_client
import Utility
# When running on local machine execute the following before running this script
# > make app bootloader
# > make print_flash_cmd | tail -n 1 > build/download.config
# > export TEST_FW_PATH=~/esp/esp-idf/tools/tiny-test-fw
def blehr_client_task(hr_obj, dut_addr):
interface = 'hci0'
ble_devname = 'blehr_sensor_1.0'
hr_srv_uuid = '180d'
hr_char_uuid = '2a37'
# Get BLE client module
ble_client_obj = lib_ble_client.BLE_Bluez_Client(interface, devname=ble_devname, devaddr=dut_addr)
if not ble_client_obj:
raise RuntimeError("Failed to get DBus-Bluez object")
# Discover Bluetooth Adapter and power on
is_adapter_set = ble_client_obj.set_adapter()
if not is_adapter_set:
raise RuntimeError("Adapter Power On failed !!")
# Connect BLE Device
is_connected = ble_client_obj.connect()
if not is_connected:
# Call disconnect to perform cleanup operations before exiting application
ble_client_obj.disconnect()
raise RuntimeError("Connection to device " + str(ble_devname) + " failed !!")
# Read Services
services_ret = ble_client_obj.get_services()
if services_ret:
Utility.console_log("\nServices\n")
Utility.console_log(str(services_ret))
else:
ble_client_obj.disconnect()
raise RuntimeError("Failure: Read Services failed")
'''
Blehr application run:
Start Notifications
Retrieve updated value
Stop Notifications
'''
blehr_ret = ble_client_obj.hr_update_simulation(hr_srv_uuid, hr_char_uuid)
if blehr_ret:
Utility.console_log("Success: blehr example test passed")
else:
raise RuntimeError("Failure: blehr example test failed")
# Call disconnect to perform cleanup operations before exiting application
ble_client_obj.disconnect()
class BleHRThread(threading.Thread):
def __init__(self, dut_addr, exceptions_queue):
threading.Thread.__init__(self)
self.dut_addr = dut_addr
self.exceptions_queue = exceptions_queue
def run(self):
try:
blehr_client_task(self, self.dut_addr)
except Exception:
self.exceptions_queue.put(traceback.format_exc(), block=False)
@IDF.idf_example_test(env_tag="Example_WIFI_BT")
def test_example_app_ble_hr(env, extra_data):
"""
Steps:
1. Discover Bluetooth Adapter and Power On
2. Connect BLE Device
3. Start Notifications
4. Updated value is retrieved
5. Stop Notifications
"""
subprocess.check_output(['rm','-rf','/var/lib/bluetooth/*'])
subprocess.check_output(['hciconfig','hci0','reset'])
# Acquire DUT
dut = env.get_dut("blehr", "examples/bluetooth/nimble/blehr")
# Get binary file
binary_file = os.path.join(dut.app.binary_path, "blehr.bin")
bin_size = os.path.getsize(binary_file)
IDF.log_performance("blehr_bin_size", "{}KB".format(bin_size // 1024))
IDF.check_performance("blehr_bin_size", bin_size // 1024)
# Upload binary and start testing
Utility.console_log("Starting blehr simple example test app")
dut.start_app()
dut.reset()
# Get device address from dut
dut_addr = dut.expect(re.compile(r"Device Address: ([a-fA-F0-9:]+)"), timeout=30)[0]
exceptions_queue = Queue.Queue()
# Starting a py-client in a separate thread
blehr_thread_obj = BleHRThread(dut_addr, exceptions_queue)
blehr_thread_obj.start()
blehr_thread_obj.join()
exception_msg = None
while True:
try:
exception_msg = exceptions_queue.get(block=False)
except Queue.Empty:
break
else:
Utility.console_log("\n" + exception_msg)
if exception_msg:
raise Exception("Thread did not run successfully")
# Check dut responses
dut.expect("subscribe event; cur_notify=1", timeout=30)
dut.expect("subscribe event; cur_notify=0", timeout=30)
dut.expect("disconnect;", timeout=30)
if __name__ == '__main__':
test_example_app_ble_hr()
| []
| []
| [
"TEST_FW_PATH",
"IDF_PATH"
]
| [] | ["TEST_FW_PATH", "IDF_PATH"] | python | 2 | 0 | |
ravenutil/appdata_test.go | // Copyright (c) 2013-2014 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package ravenutil_test
import (
"os"
"os/user"
"path/filepath"
"runtime"
"testing"
"unicode"
"github.com/RavenProject/rosetta-ravencoin/ravenutil"
)
// TestAppDataDir tests the API for AppDataDir to ensure it gives expected
// results for various operating systems.
func TestAppDataDir(t *testing.T) {
// App name plus upper and lowercase variants.
appName := "myapp"
appNameUpper := string(unicode.ToUpper(rune(appName[0]))) + appName[1:]
appNameLower := string(unicode.ToLower(rune(appName[0]))) + appName[1:]
// When we're on Windows, set the expected local and roaming directories
// per the environment vars. When we aren't on Windows, the function
// should return the current directory when forced to provide the
// Windows path since the environment variables won't exist.
winLocal := "."
winRoaming := "."
if runtime.GOOS == "windows" {
localAppData := os.Getenv("LOCALAPPDATA")
roamingAppData := os.Getenv("APPDATA")
if localAppData == "" {
localAppData = roamingAppData
}
winLocal = filepath.Join(localAppData, appNameUpper)
winRoaming = filepath.Join(roamingAppData, appNameUpper)
}
// Get the home directory to use for testing expected results.
var homeDir string
usr, err := user.Current()
if err != nil {
t.Errorf("user.Current: %v", err)
return
}
homeDir = usr.HomeDir
// Mac app data directory.
macAppData := filepath.Join(homeDir, "Library", "Application Support")
tests := []struct {
goos string
appName string
roaming bool
want string
}{
// Various combinations of application name casing, leading
// period, operating system, and roaming flags.
{"windows", appNameLower, false, winLocal},
{"windows", appNameUpper, false, winLocal},
{"windows", "." + appNameLower, false, winLocal},
{"windows", "." + appNameUpper, false, winLocal},
{"windows", appNameLower, true, winRoaming},
{"windows", appNameUpper, true, winRoaming},
{"windows", "." + appNameLower, true, winRoaming},
{"windows", "." + appNameUpper, true, winRoaming},
{"linux", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"linux", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"linux", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"linux", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"darwin", appNameLower, false, filepath.Join(macAppData, appNameUpper)},
{"darwin", appNameUpper, false, filepath.Join(macAppData, appNameUpper)},
{"darwin", "." + appNameLower, false, filepath.Join(macAppData, appNameUpper)},
{"darwin", "." + appNameUpper, false, filepath.Join(macAppData, appNameUpper)},
{"openbsd", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"openbsd", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"openbsd", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"openbsd", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"freebsd", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"freebsd", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"freebsd", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"freebsd", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"netbsd", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"netbsd", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"netbsd", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"netbsd", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"plan9", appNameLower, false, filepath.Join(homeDir, appNameLower)},
{"plan9", appNameUpper, false, filepath.Join(homeDir, appNameLower)},
{"plan9", "." + appNameLower, false, filepath.Join(homeDir, appNameLower)},
{"plan9", "." + appNameUpper, false, filepath.Join(homeDir, appNameLower)},
{"unrecognized", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"unrecognized", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"unrecognized", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"unrecognized", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
// No application name provided, so expect current directory.
{"windows", "", false, "."},
{"windows", "", true, "."},
{"linux", "", false, "."},
{"darwin", "", false, "."},
{"openbsd", "", false, "."},
{"freebsd", "", false, "."},
{"netbsd", "", false, "."},
{"plan9", "", false, "."},
{"unrecognized", "", false, "."},
// Single dot provided for application name, so expect current
// directory.
{"windows", ".", false, "."},
{"windows", ".", true, "."},
{"linux", ".", false, "."},
{"darwin", ".", false, "."},
{"openbsd", ".", false, "."},
{"freebsd", ".", false, "."},
{"netbsd", ".", false, "."},
{"plan9", ".", false, "."},
{"unrecognized", ".", false, "."},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
ret := ravenutil.TstAppDataDir(test.goos, test.appName, test.roaming)
if ret != test.want {
t.Errorf("appDataDir #%d (%s) does not match - "+
"expected got %s, want %s", i, test.goos, ret,
test.want)
continue
}
}
}
| [
"\"LOCALAPPDATA\"",
"\"APPDATA\""
]
| []
| [
"APPDATA",
"LOCALAPPDATA"
]
| [] | ["APPDATA", "LOCALAPPDATA"] | go | 2 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "RTTWebInterface.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
pkg/model/db_test.go | package model
import (
"os"
"testing"
"github.com/huweihuang/gin-api-frame/cmd/server/app/config"
"github.com/huweihuang/gin-api-frame/pkg/util/log"
)
func init() {
log.InitLogger("", "debug", "", "text", true)
}
func TestSetupDB(t *testing.T) {
conf := config.MustLoad(os.Getenv("TEST_GIN_CONFIG"))
dsn := config.FormatDSN(&conf.Database)
if err := SetupDB(dsn); err != nil {
t.Errorf("Failed to setup db: %s", err)
}
}
| [
"\"TEST_GIN_CONFIG\""
]
| []
| [
"TEST_GIN_CONFIG"
]
| [] | ["TEST_GIN_CONFIG"] | go | 1 | 0 | |
src/environment.py | import math
import operator as op
from .types import Symbol, Number
class Env(dict):
"An environment: a dict of {'var':val} pairs, with an outer Env."
def __init__(self, parms=(), args=(), outer=None):
# Bind parm list to corresponding args, or single parm to list of args
self.outer = outer
if isinstance(parms, Symbol):
self.update({parms: list(args)})
else:
if len(args) != len(parms):
raise TypeError('expected %s, given %s, '
% (to_string(parms), to_string(args)))
self.update(zip(parms, args))
def find(self, var):
"Find the innermost Env where var appears."
if var in self:
return self
elif self.outer is None:
raise LookupError(var)
else:
return self.outer.find(var)
class StandartEnv(Env):
"An environment with some Scheme standard procedures."
def __init__(self, parms=(), args=(), outer=None):
super().__init__(parms, args, outer)
self.update(vars(math)) # sin, cos, sqrt, pi, ...
self.update({
'+': op.add,
'-': op.sub,
'*': op.mul,
'/': op.truediv,
'>': op.gt,
'<': op.lt,
'>=': op.ge,
'<=': op.le,
'=': op.eq,
'abs': abs,
'append': op.add,
'apply': lambda proc, args: proc(*args),
'begin': lambda *x: x[-1],
'car': lambda x: x[0],
'cdr': lambda x: x[1:],
'cons': lambda x, y: [x] + y,
'eq?': op.is_,
'equal?': op.eq,
'length': len,
'list': lambda *x: list(x),
'list?': lambda x: isinstance(x, list),
'map': map,
'max': max,
'min': min,
'not': op.not_,
'null?': lambda x: x == [],
'number?': lambda x: isinstance(x, Number),
'procedure?': callable,
'round': round,
'symbol?': lambda x: isinstance(x, Symbol),
})
isa = isinstance
def to_string(x):
"Convert a Python object back into a Lisp-readable string."
if x is True:
return "#t"
elif x is False:
return "#f"
elif isa(x, Symbol):
return x
elif isa(x, str):
return '"%s"' % x.encode('string_escape').replace('"', r'\"')
elif isa(x, list):
return '('+' '.join(map(to_string, x))+')'
elif isa(x, complex):
return str(x).replace('j', 'i')
else:
return str(x)
| []
| []
| []
| [] | [] | python | null | null | null |
pkg/actuators/machine/stubs.go | package machine
import (
"fmt"
"os"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/elbv2"
machinev1 "github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1"
machinecontroller "github.com/openshift/machine-api-operator/pkg/controller/machine"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
awsproviderv1 "sigs.k8s.io/cluster-api-provider-aws/pkg/apis/awsprovider/v1beta1"
awsclient "sigs.k8s.io/cluster-api-provider-aws/pkg/client"
)
const (
defaultNamespace = "default"
defaultAvailabilityZone = "us-east-1a"
region = "us-east-1"
awsCredentialsSecretName = "aws-credentials-secret"
userDataSecretName = "aws-actuator-user-data-secret"
keyName = "aws-actuator-key-name"
clusterID = "aws-actuator-cluster"
)
const userDataBlob = `#cloud-config
write_files:
- path: /root/node_bootstrap/node_settings.yaml
owner: 'root:root'
permissions: '0640'
content: |
node_config_name: node-config-master
runcmd:
- [ cat, /root/node_bootstrap/node_settings.yaml]
`
func stubProviderConfig() *awsproviderv1.AWSMachineProviderConfig {
return &awsproviderv1.AWSMachineProviderConfig{
AMI: awsproviderv1.AWSResourceReference{
ID: aws.String("ami-a9acbbd6"),
},
CredentialsSecret: &corev1.LocalObjectReference{
Name: awsCredentialsSecretName,
},
InstanceType: "m4.xlarge",
Placement: awsproviderv1.Placement{
Region: region,
AvailabilityZone: defaultAvailabilityZone,
},
Subnet: awsproviderv1.AWSResourceReference{
ID: aws.String("subnet-0e56b13a64ff8a941"),
},
IAMInstanceProfile: &awsproviderv1.AWSResourceReference{
ID: aws.String("openshift_master_launch_instances"),
},
KeyName: aws.String(keyName),
UserDataSecret: &corev1.LocalObjectReference{
Name: userDataSecretName,
},
Tags: []awsproviderv1.TagSpecification{
{Name: "openshift-node-group-config", Value: "node-config-master"},
{Name: "host-type", Value: "master"},
{Name: "sub-host-type", Value: "default"},
},
SecurityGroups: []awsproviderv1.AWSResourceReference{
{ID: aws.String("sg-00868b02fbe29de17")},
{ID: aws.String("sg-0a4658991dc5eb40a")},
{ID: aws.String("sg-009a70e28fa4ba84e")},
{ID: aws.String("sg-07323d56fb932c84c")},
{ID: aws.String("sg-08b1ffd32874d59a2")},
},
PublicIP: aws.Bool(true),
LoadBalancers: []awsproviderv1.LoadBalancerReference{
{
Name: "cluster-con",
Type: awsproviderv1.ClassicLoadBalancerType,
},
{
Name: "cluster-ext",
Type: awsproviderv1.ClassicLoadBalancerType,
},
{
Name: "cluster-int",
Type: awsproviderv1.ClassicLoadBalancerType,
},
{
Name: "cluster-net-lb",
Type: awsproviderv1.NetworkLoadBalancerType,
},
},
}
}
func stubMachine() (*machinev1.Machine, error) {
machinePc := stubProviderConfig()
providerSpec, err := awsproviderv1.RawExtensionFromProviderSpec(machinePc)
if err != nil {
return nil, fmt.Errorf("codec.EncodeProviderSpec failed: %v", err)
}
machine := &machinev1.Machine{
ObjectMeta: metav1.ObjectMeta{
Name: "aws-actuator-testing-machine",
Namespace: defaultNamespace,
Labels: map[string]string{
machinev1.MachineClusterIDLabel: clusterID,
},
Annotations: map[string]string{
// skip node draining since it's not mocked
machinecontroller.ExcludeNodeDrainingAnnotation: "",
},
},
Spec: machinev1.MachineSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"node-role.kubernetes.io/master": "",
"node-role.kubernetes.io/infra": "",
},
},
ProviderSpec: machinev1.ProviderSpec{
Value: providerSpec,
},
},
}
return machine, nil
}
func stubUserDataSecret() *corev1.Secret {
return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: userDataSecretName,
Namespace: defaultNamespace,
},
Data: map[string][]byte{
userDataSecretKey: []byte(userDataBlob),
},
}
}
func stubAwsCredentialsSecret() *corev1.Secret {
return GenerateAwsCredentialsSecretFromEnv(awsCredentialsSecretName, defaultNamespace)
}
// GenerateAwsCredentialsSecretFromEnv generates secret with AWS credentials
func GenerateAwsCredentialsSecretFromEnv(secretName, namespace string) *corev1.Secret {
return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Namespace: namespace,
},
Data: map[string][]byte{
awsclient.AwsCredsSecretIDKey: []byte(os.Getenv("AWS_ACCESS_KEY_ID")),
awsclient.AwsCredsSecretAccessKey: []byte(os.Getenv("AWS_SECRET_ACCESS_KEY")),
},
}
}
func stubInstance(imageID, instanceID string) *ec2.Instance {
return &ec2.Instance{
ImageId: aws.String(imageID),
InstanceId: aws.String(instanceID),
State: &ec2.InstanceState{
Name: aws.String(ec2.InstanceStateNameRunning),
Code: aws.Int64(16),
},
LaunchTime: aws.Time(time.Now()),
PublicDnsName: aws.String("publicDNS"),
PrivateDnsName: aws.String("privateDNS"),
PublicIpAddress: aws.String("1.1.1.1"),
PrivateIpAddress: aws.String("1.1.1.1"),
Tags: []*ec2.Tag{
{
Key: aws.String("key"),
Value: aws.String("value"),
},
},
IamInstanceProfile: &ec2.IamInstanceProfile{
Id: aws.String("profile"),
},
SubnetId: aws.String("subnetID"),
Placement: &ec2.Placement{
AvailabilityZone: aws.String("us-east-1a"),
},
SecurityGroups: []*ec2.GroupIdentifier{
{
GroupName: aws.String("groupName"),
},
},
}
}
func stubPCSecurityGroups(groups []awsproviderv1.AWSResourceReference) *awsproviderv1.AWSMachineProviderConfig {
pc := stubProviderConfig()
pc.SecurityGroups = groups
return pc
}
func stubPCSubnet(subnet awsproviderv1.AWSResourceReference) *awsproviderv1.AWSMachineProviderConfig {
pc := stubProviderConfig()
pc.Subnet = subnet
return pc
}
func stubPCAMI(ami awsproviderv1.AWSResourceReference) *awsproviderv1.AWSMachineProviderConfig {
pc := stubProviderConfig()
pc.AMI = ami
return pc
}
func stubDescribeLoadBalancersOutput() *elbv2.DescribeLoadBalancersOutput {
return &elbv2.DescribeLoadBalancersOutput{
LoadBalancers: []*elbv2.LoadBalancer{
{
LoadBalancerName: aws.String("lbname"),
LoadBalancerArn: aws.String("lbarn"),
},
},
}
}
func stubDescribeTargetGroupsOutput() *elbv2.DescribeTargetGroupsOutput {
return &elbv2.DescribeTargetGroupsOutput{
TargetGroups: []*elbv2.TargetGroup{
{
TargetType: aws.String(elbv2.TargetTypeEnumInstance),
TargetGroupArn: aws.String("arn1"),
},
{
TargetType: aws.String(elbv2.TargetTypeEnumIp),
TargetGroupArn: aws.String("arn2"),
},
},
}
}
func stubReservation(imageID, instanceID string) *ec2.Reservation {
az := defaultAvailabilityZone
return &ec2.Reservation{
Instances: []*ec2.Instance{
{
ImageId: aws.String(imageID),
InstanceId: aws.String(instanceID),
State: &ec2.InstanceState{
Name: aws.String(ec2.InstanceStateNameRunning),
Code: aws.Int64(16),
},
LaunchTime: aws.Time(time.Now()),
Placement: &ec2.Placement{
AvailabilityZone: &az,
},
},
},
}
}
func stubDescribeInstancesOutput(imageID, instanceID string, state string) *ec2.DescribeInstancesOutput {
return &ec2.DescribeInstancesOutput{
Reservations: []*ec2.Reservation{
{
Instances: []*ec2.Instance{
{
ImageId: aws.String(imageID),
InstanceId: aws.String(instanceID),
State: &ec2.InstanceState{
Name: aws.String(state),
Code: aws.Int64(16),
},
LaunchTime: aws.Time(time.Now()),
},
},
},
},
}
}
| [
"\"AWS_ACCESS_KEY_ID\"",
"\"AWS_SECRET_ACCESS_KEY\""
]
| []
| [
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY"
]
| [] | ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"] | go | 2 | 0 | |
src/tests/ftest/util/fault_config_utils.py | #!/usr/bin/python3
'''
(C) Copyright 2019-2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
'''
import os
import io
import yaml
from general_utils import distribute_files, run_command, get_clush_command, DaosTestError
# a lookup table of predefined faults
#
# In addition the following fault IDs are used elsewhere
#
# 0: This is used in D_ALLOC to force memory allocation failures.
# 100: Used in dfuse to trigger an exit after initialization is complete
# 101: Used by daos_init() to disable fault id 0 for duration of daos_init
FAULTS = {
'DAOS_CSUM_CORRUPT_DISK': {
'id': '65574',
'probability_x': '100',
'probability_y': '100',
'interval': '5',
'max_faults': '10'},
'DAOS_CSUM_CORRUPT_UPDATE': {
'id': '65568',
'probability_x': '20',
'probability_y': '100',
'interval': '1',
'max_faults': '1'},
'DAOS_DTX_LOST_RPC_REQUEST': {
'id': '65587',
'probability_x': '100',
'probability_y': '100',
'interval': '1',
'max_faults': '1'},
'DAOS_DTX_LOST_RPC_REPLY': {
'id': '65588',
'probability_x': '100',
'probability_y': '100',
'interval': '1',
'max_faults': '1'},
'DAOS_DTX_LONG_TIME_RESEND': {
'id': '65589',
'probability_x': '100',
'probability_y': '100',
'interval': '1',
'max_faults': '1'},
'DAOS_SHARD_OBJ_UPDATE_TIMEOUT': {
'id': '65537',
'probability_x': '100',
'probability_y': '100',
'interval': '1',
'max_faults': '1'},
'DAOS_SHARD_OBJ_FETCH_TIMEOUT': {
'id': '65538',
'probability_x': '100',
'probability_y': '100',
'interval': '1',
'max_faults': '1'},
'DAOS_SHARD_OBJ_FAIL': {
'id': '65539',
'probability_x': '100',
'probability_y': '100',
'interval': '1',
'max_faults': '1'},
'DAOS_OBJ_UPDATE_NOSPACE': {
'id': '65540',
'probability_x': '100',
'probability_y': '100',
'interval': '1',
'max_faults': '1'},
'DAOS_SHARD_OBJ_RW_CRT_ERROR': {
'id': '65541',
'probability_x': '100',
'probability_y': '100',
'interval': '1',
'max_faults': '1'},
'DAOS_OBJ_REQ_CREATE_TIMEOUT': {
'id': '65542',
'probability_x': '100',
'probability_y': '100',
'interval': '1',
'max_faults': '1'},
'DAOS_SHARD_OBJ_UPDATE_TIMEOUT_SINGLE': {
'id': '65543',
'probability_x': '100',
'probability_y': '100',
'interval': '1',
'max_faults': '1'},
'DAOS_OBJ_SPECIAL_SHARD': {
'id': '65544',
'probability_x': '100',
'probability_y': '100',
'interval': '1',
'max_faults': '1'},
'DAOS_OBJ_TGT_IDX_CHANGE': {
'id': '65545',
'probability_x': '100',
'probability_y': '100',
'interval': '1',
'max_faults': '1'},
'DAOS_POOL_CREATE_FAIL_CORPC': {
'id': '65632',
'probability_x': '100',
'probability_y': '20',
'interval': '1',
'max_faults': '10'},
'DAOS_POOL_DESTROY_FAIL_CORPC': {
'id': '65633',
'probability_x': '100',
'probability_y': '20',
'interval': '1',
'max_faults': '1'},
'DAOS_POOL_CONNECT_FAIL_CORPC': {
'id': '65634',
'probability_x': '100',
'probability_y': '20',
'interval': '1',
'max_faults': '10'},
'DAOS_POOL_DISCONNECT_FAIL_CORPC': {
'id': '65635',
'probability_x': '100',
'probability_y': '20',
'interval': '1',
'max_faults': '10'},
'DAOS_POOL_QUERY_FAIL_CORPC': {
'id': '65636',
'probability_x': '100',
'probability_y': '20',
'interval': '1',
'max_faults': '10'},
'DAOS_CONT_DESTROY_FAIL_CORPC': {
'id': '65637',
'probability_x': '100',
'probability_y': '20',
'interval': '1',
'max_faults': '10'},
'DAOS_CONT_CLOSE_FAIL_CORPC': {
'id': '65638',
'probability_x': '100',
'probability_y': '20',
'interval': '1',
'max_faults': '10'},
'DAOS_CONT_QUERY_FAIL_CORPC': {
'id': '65639',
'probability_x': '100',
'probability_y': '20',
'interval': '1',
'max_faults': '10'},
'DAOS_CONT_OPEN_FAIL': {
'id': '65640',
'probability_x': '100',
'probability_y': '20',
'interval': '1',
'max_faults': '10'},
'DAOS_REBUILD_DROP_SCAN': {
'id': '65546',
'probability_x': '100',
'probability_y': '20',
'interval': '1',
'max_faults': '10'},
'DAOS_REBUILD_NO_HDL': {
'id': '65547',
'probability_x': '100',
'probability_y': '20',
'interval': '1',
'max_faults': '10'},
'DAOS_REBUILD_DROP_OBJ': {
'id': '65548',
'probability_x': '100',
'probability_y': '20',
'interval': '1',
'max_faults': '10'},
'DAOS_REBUILD_UPDATE_FAIL': {
'id': '65549',
'probability_x': '100',
'probability_y': '20',
'interval': '1',
'max_faults': '10'},
'DAOS_REBUILD_STALE_POOL': {
'id': '65550',
'probability_x': '100',
'probability_y': '20',
'interval': '1',
'max_faults': '10'},
'DAOS_REBUILD_TGT_IV_UPDATE_FAIL': {
'id': '65551',
'probability_x': '100',
'probability_y': '20',
'interval': '1',
'max_faults': '10'},
'DAOS_REBUILD_TGT_START_FAIL': {
'id': '65552',
'probability_x': '100',
'probability_y': '20',
'interval': '1',
'max_faults': '10'},
'DAOS_REBUILD_HANG': {
'id': '65555',
'probability_x': '100',
'probability_y': '20',
'interval': '1',
'max_faults': '10'},
'DAOS_REBUILD_TGT_SEND_OBJS_FAIL': {
'id': '65556',
'probability_x': '100',
'probability_y': '20',
'interval': '1',
'max_faults': '10'},
'DAOS_REBUILD_TGT_NOSPACE': {
'id': '65559',
'probability_x': '100',
'probability_y': '20',
'interval': '1',
'max_faults': '10'},
}
class FaultInjectionFailed(Exception):
"""Raise if FI failed."""
class FaultInjection():
"""Fault Injection
:avocado: recursive
"""
def __init__(self):
super().__init__()
self._hosts = []
self.fault_file = None
self._test_dir = None
self._fault_list = []
def write_fault_file(self, on_the_fly_fault=None):
""" Write out a fault injection config file.
Args:
on_the_fly_fault --a fault dictionary that isn't predefined
"""
if self._fault_list is None and on_the_fly_fault is None:
raise FaultInjectionFailed("bad parameters")
fi_config = os.path.join(self._test_dir, "fi.yaml")
with io.open(fi_config, 'w', encoding='utf8') as outfile:
yaml.dump({'seed': '123'}, outfile, default_flow_style=False, allow_unicode=True)
fault_config = []
if self._fault_list is not None:
for fault in self._fault_list:
fault_config.append(FAULTS[fault])
if on_the_fly_fault is not None:
fault_config.append(on_the_fly_fault)
yaml.dump({'fault_config': fault_config}, outfile,
default_flow_style=False, allow_unicode=True)
os.environ["D_FI_CONFIG"] = fi_config
self.fault_file = fi_config
def start(self, fault_list, test_dir):
"""Create the fault injection file to inject DAOS faults.
Args:
fault_list (list): List of faults to inject.
test_dir(str) : Path to create the fault injection file.
"""
self._fault_list = fault_list
self._test_dir = test_dir
if self._fault_list:
# not using workdir because the huge path was messing up
# orterun or something, could re-evaluate this later
self.write_fault_file(None)
def copy_fault_files(self, hosts):
"""Copy the fault injection file to all test hosts.
Args:
hosts (list): list of hosts to copy the fault injection file
"""
if self._fault_list:
self._hosts = hosts
distribute_files(self._hosts, self.fault_file, self.fault_file)
def stop(self):
"""Remove the fault injection file created during testing.
Returns:
error_list (list) : Errors during removing fault files (if any).
"""
# Remove the fault injection files on the hosts.
error_list = []
commands = ["rm -f {}".format(self.fault_file)]
if self._hosts:
commands.insert(0, get_clush_command(self._hosts, "-S -v", True))
try:
run_command(" ".join(commands), verbose=True, raise_exception=False)
except DaosTestError as error:
error_list.append(error)
return error_list
| []
| []
| [
"D_FI_CONFIG"
]
| [] | ["D_FI_CONFIG"] | python | 1 | 0 | |
todolist/manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "todolist.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
tests/fixtures/mock_hooks.py | """Mock hooks."""
import os
GLOBAL_VALUE = os.getenv('AWS_DEFAULT_REGION')
| []
| []
| [
"AWS_DEFAULT_REGION"
]
| [] | ["AWS_DEFAULT_REGION"] | python | 1 | 0 | |
manage.py | import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "infocom_notice.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.