filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
traffic_ops/app/db/admin.go
|
package main
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import (
"bytes"
"errors"
"flag"
"fmt"
"io/ioutil"
"os"
"os/exec"
"strings"
"gopkg.in/yaml.v2"
)
type DBConfig struct {
Development GooseConfig `yaml:"development"`
Test GooseConfig `yaml:"test"`
Integration GooseConfig `yaml:"integration"`
Production GooseConfig `yaml:"production"`
}
type GooseConfig struct {
Driver string `yaml:"driver"`
Open string `yaml:"open"`
}
func (conf DBConfig) getGooseConfig(env string) (GooseConfig, error) {
switch env {
case EnvDevelopment:
return conf.Development, nil
case EnvTest:
return conf.Test, nil
case EnvIntegration:
return conf.Integration, nil
case EnvProduction:
return conf.Production, nil
default:
return GooseConfig{}, errors.New("invalid environment: " + env)
}
}
const (
// the possible environments to use
EnvDevelopment = "development"
EnvTest = "test"
EnvIntegration = "integration"
EnvProduction = "production"
// keys in the goose config's "open" string value
HostKey = "host"
PortKey = "port"
UserKey = "user"
PasswordKey = "password"
DBNameKey = "dbname"
// available commands
CmdCreateDB = "createdb"
CmdDropDB = "dropdb"
CmdCreateUser = "create_user"
CmdDropUser = "drop_user"
CmdShowUsers = "show_users"
CmdReset = "reset"
CmdUpgrade = "upgrade"
CmdMigrate = "migrate"
CmdDown = "down"
CmdRedo = "redo"
CmdStatus = "status"
CmdDBVersion = "dbversion"
CmdSeed = "seed"
CmdLoadSchema = "load_schema"
CmdReverseSchema = "reverse_schema"
CmdPatch = "patch"
// goose commands that don't match the commands for this tool
GooseUp = "up"
DBConfigPath = "db/dbconf.yml"
DBSeedsPath = "db/seeds.sql"
DBSchemaPath = "db/create_tables.sql"
DBPatchesPath = "db/patches.sql"
DefaultEnvironment = EnvDevelopment
DefaultDBSuperUser = "postgres"
)
var (
// globals that are passed in via CLI flags and used in commands
Environment string
// globals that are parsed out of DBConfigFile and used in commands
DBName string
DBSuperUser = DefaultDBSuperUser
DBUser string
DBPassword string
HostIP string
HostPort string
)
func parseDBConfig() error {
confBytes, err := ioutil.ReadFile(DBConfigPath)
if err != nil {
return errors.New("reading DB conf '" + DBConfigPath + "': " + err.Error())
}
dbConfig := DBConfig{}
err = yaml.Unmarshal(confBytes, &dbConfig)
if err != nil {
return errors.New("unmarshalling DB conf yaml: " + err.Error())
}
gooseCfg, err := dbConfig.getGooseConfig(Environment)
if err != nil {
return errors.New("getting goose config: " + err.Error())
}
// parse the 'open' string into a map
open := make(map[string]string)
pairs := strings.Split(gooseCfg.Open, " ")
for _, pair := range pairs {
if pair == "" {
continue
}
kv := strings.Split(pair, "=")
if len(kv) != 2 || kv[0] == "" || kv[1] == "" {
continue
}
open[kv[0]] = kv[1]
}
ok := false
HostIP, ok = open[HostKey]
if !ok {
return errors.New("unable to get '" + HostKey + "' for environment '" + Environment + "'")
}
HostPort, ok = open[PortKey]
if !ok {
return errors.New("unable to get '" + PortKey + "' for environment '" + Environment + "'")
}
DBUser, ok = open[UserKey]
if !ok {
return errors.New("unable to get '" + UserKey + "' for environment '" + Environment + "'")
}
DBPassword, ok = open[PasswordKey]
if !ok {
return errors.New("unable to get '" + PasswordKey + "' for environment '" + Environment + "'")
}
DBName, ok = open[DBNameKey]
if !ok {
return errors.New("unable to get '" + DBNameKey + "' for environment '" + Environment + "'")
}
return nil
}
func createDB() {
dbExistsCmd := exec.Command("psql", "-h", HostIP, "-U", DBSuperUser, "-p", HostPort, "-tAc", "SELECT 1 FROM pg_database WHERE datname='"+DBName+"'")
stderr := bytes.Buffer{}
dbExistsCmd.Stderr = &stderr
out, err := dbExistsCmd.Output()
// An error is returned if the database could not be found, which is to be expected. Don't exit on this error.
if err != nil {
fmt.Println("unable to check if DB already exists: " + err.Error() + ", stderr: " + stderr.String())
}
if len(out) > 0 {
fmt.Println("Database " + DBName + " already exists")
return
}
createDBCmd := exec.Command("createdb", "-h", HostIP, "-p", HostPort, "-U", DBSuperUser, "-e", "--owner", DBUser, DBName)
out, err = createDBCmd.CombinedOutput()
fmt.Printf("%s", out)
if err != nil {
die("Can't create db " + DBName)
}
}
func dropDB() {
fmt.Println("Dropping database: " + DBName)
cmd := exec.Command("dropdb", "-h", HostIP, "-p", HostPort, "-U", DBSuperUser, "-e", "--if-exists", DBName)
out, err := cmd.CombinedOutput()
fmt.Printf("%s", out)
if err != nil {
die("Can't drop db " + DBName)
}
}
func createUser() {
fmt.Println("Creating user: " + DBUser)
userExistsCmd := exec.Command("psql", "-h", HostIP, "-U", DBSuperUser, "-p", HostPort, "-tAc", "SELECT 1 FROM pg_roles WHERE rolname='"+DBUser+"'")
stderr := bytes.Buffer{}
userExistsCmd.Stderr = &stderr
out, err := userExistsCmd.Output()
// An error is returned if the user could not be found, which is to be expected. Don't exit on this error.
if err != nil {
fmt.Println("unable to check if user already exists: " + err.Error() + ", stderr: " + stderr.String())
}
if len(out) > 0 {
fmt.Println("User " + DBUser + " already exists")
return
}
createUserCmd := exec.Command("psql", "-h", HostIP, "-p", HostPort, "-U", DBSuperUser, "-etAc", "CREATE USER "+DBUser+" WITH LOGIN ENCRYPTED PASSWORD '"+DBPassword+"'")
out, err = createUserCmd.CombinedOutput()
fmt.Printf("%s", out)
if err != nil {
die("Can't create user " + DBUser)
}
}
func dropUser() {
cmd := exec.Command("dropuser", "-h", HostIP, "-p", HostPort, "-U", DBSuperUser, "-i", "-e", DBUser)
out, err := cmd.CombinedOutput()
fmt.Printf("%s", out)
if err != nil {
die("Can't drop user " + DBUser)
}
}
func showUsers() {
cmd := exec.Command("psql", "-h", HostIP, "-p", HostPort, "-U", DBSuperUser, "-ec", `\du`)
out, err := cmd.CombinedOutput()
fmt.Printf("%s", out)
if err != nil {
die("Can't show users")
}
}
func reset() {
createUser()
dropDB()
createDB()
loadSchema()
migrate()
}
func upgrade() {
goose(GooseUp)
seed()
patch()
}
func migrate() {
goose(GooseUp)
}
func down() {
goose(CmdDown)
}
func redo() {
goose(CmdRedo)
}
func status() {
goose(CmdStatus)
}
func dbVersion() {
goose(CmdDBVersion)
}
func seed() {
fmt.Println("Seeding database w/ required data.")
seedsBytes, err := ioutil.ReadFile(DBSeedsPath)
if err != nil {
die("unable to read '" + DBSeedsPath + "': " + err.Error())
}
cmd := exec.Command("psql", "-h", HostIP, "-p", HostPort, "-d", DBName, "-U", DBUser, "-e", "-v", "ON_ERROR_STOP=1")
cmd.Stdin = bytes.NewBuffer(seedsBytes)
cmd.Env = append(os.Environ(), "PGPASSWORD="+DBPassword)
out, err := cmd.CombinedOutput()
fmt.Printf("%s", out)
if err != nil {
die("Can't patch database w/ required data")
}
}
func loadSchema() {
fmt.Println("Creating database tables.")
schemaBytes, err := ioutil.ReadFile(DBSchemaPath)
if err != nil {
die("unable to read '" + DBSchemaPath + "': " + err.Error())
}
cmd := exec.Command("psql", "-h", HostIP, "-p", HostPort, "-d", DBName, "-U", DBUser, "-e", "-v", "ON_ERROR_STOP=1")
cmd.Stdin = bytes.NewBuffer(schemaBytes)
cmd.Env = append(os.Environ(), "PGPASSWORD="+DBPassword)
out, err := cmd.CombinedOutput()
fmt.Printf("%s", out)
if err != nil {
die("Can't create database tables")
}
}
func reverseSchema() {
fmt.Fprintf(os.Stderr, "WARNING: the '%s' command will be removed with Traffic Ops Perl because it will no longer be necessary\n", CmdReverseSchema)
cmd := exec.Command("db/reverse_schema.pl")
cmd.Env = append(os.Environ(), "MOJO_MODE="+Environment)
out, err := cmd.CombinedOutput()
fmt.Printf("%s", out)
if err != nil {
die("Can't run `db/reverse_schema.pl`: " + err.Error())
}
}
func patch() {
fmt.Println("Patching database with required data fixes.")
patchesBytes, err := ioutil.ReadFile(DBPatchesPath)
if err != nil {
die("unable to read '" + DBPatchesPath + "': " + err.Error())
}
cmd := exec.Command("psql", "-h", HostIP, "-p", HostPort, "-d", DBName, "-U", DBUser, "-e", "-v", "ON_ERROR_STOP=1")
cmd.Stdin = bytes.NewBuffer(patchesBytes)
cmd.Env = append(os.Environ(), "PGPASSWORD="+DBPassword)
out, err := cmd.CombinedOutput()
fmt.Printf("%s", out)
if err != nil {
die("Can't patch database w/ required data")
}
}
func goose(arg string) {
fmt.Println("Running goose " + arg + "...")
cmd := exec.Command("goose", "--env="+Environment, arg)
out, err := cmd.CombinedOutput()
fmt.Printf("%s", out)
if err != nil {
die("Can't run goose: " + err.Error())
}
}
func die(message string) {
fmt.Println(message)
os.Exit(1)
}
func usage() string {
programName := os.Args[0]
home := "$HOME"
home = os.Getenv("HOME")
return `
Usage: ` + programName + ` [--env (development|test|production|integration)] [arguments]
Example: ` + programName + ` --env=test reset
Purpose: This script is used to manage database. The environments are
defined in the dbconf.yml, as well as the database names.
NOTE:
Postgres Superuser: The 'postgres' superuser needs to be created to run ` + programName + ` and setup databases.
If the 'postgres' superuser has not been created or password has not been set then run the following commands accordingly.
Create the 'postgres' user as a super user (if not created):
$ createuser postgres --superuser --createrole --createdb --login --pwprompt
Modify your ` + home + `/.pgpass file which allows for easy command line access by defaulting the user and password for the database
without prompts.
Postgres .pgpass file format:
hostname:port:database:username:password
----------------------
Example Contents
----------------------
*:*:*:postgres:your-postgres-password
*:*:*:traffic_ops:the-password-in-dbconf.yml
----------------------
Save the following example into this file ` + home + `/.pgpass with the permissions of this file
so only your user can read and write.
$ chmod 0600 ` + home + `/.pgpass
===================================================================================================================
` + programName + ` arguments:
createdb - Execute db 'createdb' the database for the current environment.
create_user - Execute 'create_user' the user for the current environment (traffic_ops).
dropdb - Execute db 'dropdb' on the database for the current environment.
down - Roll back a single migration from the current version.
drop_user - Execute 'drop_user' the user for the current environment (traffic_ops).
patch - Execute sql from db/patches.sql for loading post-migration data patches.
redo - Roll back the most recently applied migration, then run it again.
reset - Execute db 'dropdb', 'createdb', load_schema, migrate on the database for the current environment.
reverse_schema - Reverse engineer the lib/Schema/Result files from the environment database.
seed - Execute sql from db/seeds.sql for loading static data.
show_users - Execute sql to show all of the user for the current environment.
status - Print the status of all migrations.
upgrade - Execute migrate, seed, and patches on the database for the current environment.
migrate - Execute migrate (without seeds or patches) on the database for the current environment.
`
}
func main() {
flag.StringVar(&Environment, "env", DefaultEnvironment, "The environment to use (defined in "+DBConfigPath+").")
flag.Parse()
if len(flag.Args()) != 1 || flag.Arg(0) == "" {
die(usage())
}
if Environment == "" {
die(usage())
}
if err := parseDBConfig(); err != nil {
die(err.Error())
}
commands := make(map[string]func())
commands[CmdCreateDB] = createDB
commands[CmdDropDB] = dropDB
commands[CmdCreateUser] = createUser
commands[CmdDropUser] = dropUser
commands[CmdShowUsers] = showUsers
commands[CmdReset] = reset
commands[CmdUpgrade] = upgrade
commands[CmdMigrate] = migrate
commands[CmdDown] = down
commands[CmdRedo] = redo
commands[CmdStatus] = status
commands[CmdDBVersion] = dbVersion
commands[CmdSeed] = seed
commands[CmdLoadSchema] = loadSchema
commands[CmdReverseSchema] = reverseSchema
commands[CmdPatch] = patch
userCmd := flag.Arg(0)
if cmd, ok := commands[userCmd]; ok {
cmd()
} else {
fmt.Println(usage())
die("invalid command: " + userCmd)
}
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
cd4ml/tracking.py
|
import mlflow
import os
MLFLOW_TRACKING_URL = os.getenv('MLFLOW_TRACKING_URL')
TENANT = os.getenv('TENANT', 'local')
RUN_LABEL = os.getenv('BUILD_NUMBER', '0')
USE_MLFLOW = MLFLOW_TRACKING_URL is not None
# TODO: clean this up
class track:
def __enter__(self):
if USE_MLFLOW:
mlflow.set_tracking_uri(uri=MLFLOW_TRACKING_URL)
mlflow.set_experiment(TENANT)
mlflow.start_run(run_name=RUN_LABEL)
print("MLFLOW_TRACKING_URL: ", MLFLOW_TRACKING_URL)
self.artifact_uri = mlflow.get_artifact_uri()
print('artifact_uri: ', self.artifact_uri)
return self
def __exit__(self, type, value, traceback):
if USE_MLFLOW:
mlflow.end_run()
@staticmethod
def log_param(key, val):
if USE_MLFLOW:
mlflow.log_param(key, val)
def log_ml_params(self, ml_params):
for key, val in ml_params.items():
self.log_param(key, val)
def log_pipeline_params(self, pipeline_params):
excluded_keys = ['download_data_info']
for key, val in pipeline_params.items():
if key not in excluded_keys:
self.log_param(key, val.__repr__())
@staticmethod
def log_metrics(metrics):
if USE_MLFLOW:
for key, val in metrics.items():
mlflow.log_metric(key, val)
@staticmethod
def log_artifact(filename):
if USE_MLFLOW:
mlflow.log_artifact(filename)
|
[] |
[] |
[
"BUILD_NUMBER",
"MLFLOW_TRACKING_URL",
"TENANT"
] |
[]
|
["BUILD_NUMBER", "MLFLOW_TRACKING_URL", "TENANT"]
|
python
| 3 | 0 | |
src/python/pants/bin/local_pants_runner.py
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from colors import green
from pants.base.build_environment import get_buildroot
from pants.bin.goal_runner import GoalRunner, OptionsInitializer, ReportingInitializer
from pants.bin.repro import Reproducer
from pants.option.options_bootstrapper import OptionsBootstrapper
class LocalPantsRunner(object):
"""Handles a single pants invocation running in the process-local context."""
def __init__(self, exiter, args, env, options_bootstrapper=None):
"""
:param Exiter exiter: The Exiter instance to use for this run.
:param list args: The arguments (e.g. sys.argv) for this run.
:param dict env: The environment (e.g. os.environ) for this run.
:param OptionsBootstrapper options_bootstrapper: An optional existing OptionsBootstrapper.
"""
self._exiter = exiter
self._args = args
self._env = env
self._options_bootstrapper = options_bootstrapper
self._profile_path = self._env.get('PANTS_PROFILE')
def _maybe_profiled(self, runner):
"""Run with profiling, if requested."""
if self._profile_path:
import cProfile
profiler = cProfile.Profile()
try:
profiler.runcall(runner)
finally:
profiler.dump_stats(self._profile_path)
print('\nDumped profile data to {}'.format(self._profile_path))
view_cmd = green('gprof2dot -f pstats {path} | dot -Tpng -o {path}.png && open {path}.png'
.format(path=self._profile_path))
print('Use, e.g., {} to render and view.'.format(view_cmd))
else:
runner()
def run(self):
self._maybe_profiled(self._run)
def _run(self):
# Bootstrap options and logging.
options_bootstrapper = self._options_bootstrapper or OptionsBootstrapper(env=self._env,
args=self._args)
options, build_config = OptionsInitializer(options_bootstrapper, exiter=self._exiter).setup()
# Apply exiter options.
self._exiter.apply_options(options)
# Launch RunTracker as early as possible (just after Subsystem options are initialized).
run_tracker, reporting = ReportingInitializer().setup()
try:
# Determine the build root dir.
root_dir = get_buildroot()
# Capture a repro of the 'before' state for this build, if needed.
repro = Reproducer.global_instance().create_repro()
if repro:
repro.capture(run_tracker.run_info.get_as_dict())
# Setup and run GoalRunner.
goal_runner = GoalRunner.Factory(root_dir,
options,
build_config,
run_tracker,
reporting,
exiter=self._exiter).setup()
result = goal_runner.run()
if repro:
# TODO: Have Repro capture the 'after' state (as a diff) as well?
repro.log_location_of_repro_file()
finally:
run_tracker.end()
self._exiter.exit(result)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
s3_url_server.py
|
"""
Token-based auth for S3 buckets, handing out presigned urls.
ENV Args:
S3_ADDRESS = S3 endpoint address
ACCESS_KEY = S3 access key
SECRET_KEY = S3 secret key
EXPIRE_DEFAULT = default url expiration in seconds (default: 1 hour)
EXPIRE_LIMIT = limit for url expiration in seconds (default: 1 day)
AUTH_SECRET = token auth secret
AUTH_ISSUER = token auth issuer (default: IceCube token service)
AUTH_ALGORITHM = token auth algorithm (default: RS512)
ADDRESS = local address to server from (default: all interfaces)
PORT = local port to serve from (default: 8080)
LOGLEVEL = log level (default: INFO)
"""
import os
import json
import logging
from functools import partial
from rest_tools.client import json_decode
from rest_tools.server import RestServer, RestHandler, RestHandlerSetup, scope_role_auth
from tornado.web import HTTPError
from tornado.ioloop import IOLoop
import boto3
class PresignedURL:
"""Handle S3 access and creating presigned urls"""
def __init__(self, address, access_key, secret_key, expiration, expiration_limit):
#self.address = address
#self.access_key = access_key
#self.secret_key = secret_key
self.expiration = expiration
self.expiration_limit = expiration_limit
self.s3 = boto3.client('s3','us-east-1',
endpoint_url=address,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
def get(self, bucket, key, expiration=None, method='GET'):
if not expiration:
expiration = self.expiration
if (not isinstance(expiration, int)) or expiration < 1 or expiration > self.expiration_limit:
raise Exception('invalid expiration time')
params = {
'Bucket': bucket,
'Key': key,
}
client_method = 'get_object'
if method == 'PUT':
client_method = 'put_object'
elif method == 'DELETE':
client_method = 'delete_object'
url = self.s3.generate_presigned_url(
ClientMethod=client_method,
Params=params,
ExpiresIn=expiration,
HttpMethod=method,
)
return url
### now do the http server
role_auth = partial(scope_role_auth, prefix='s3-url')
class MyHandler(RestHandler):
def initialize(self, s3=None, **kwargs):
super(MyHandler, self).initialize(**kwargs)
self.s3 = s3
class S3Object(MyHandler):
async def helper(self, bucket, key, method):
if not bucket:
raise HTTPError(404, reason='bad bucket name')
if not key:
raise HTTPError(404, reason='bad object name')
try:
req = json_decode(self.request.body)
except Exception:
req = {}
expiration = req.get('expiration', None)
url = self.s3.get(bucket, key, expiration=expiration, method=method)
self.write(url)
@role_auth(roles=['read'])
async def get(self, bucket, key):
await self.helper(bucket, key, 'GET')
@role_auth(roles=['write'])
async def put(self, bucket, key):
await self.helper(bucket, key, 'PUT')
@role_auth(roles=['write'])
async def delete(self, bucket, key):
await self.helper(bucket, key, 'DELETE')
### now configure
def configs():
config = {
's3': {
'address': os.environ.get('S3_ADDRESS'),
'access_key': os.environ.get('ACCESS_KEY'),
'secret_key': os.environ.get('SECRET_KEY'),
'expiration': int(os.environ.get('EXPIRE_DEFAULT', 3600)),
'expiration_limit': int(os.environ.get('EXPIRE_LIMIT', 86400)),
},
'auth': {
'secret': os.environ.get('AUTH_SECRET'),
'issuer': os.environ.get('AUTH_ISSUER', 'https://tokens.icecube.wisc.edu'),
'algorithm': os.environ.get('AUTH_ALGORITHM', 'RS512'),
},
'address': os.environ.get('ADDRESS', ''),
'port': int(os.environ.get('PORT', '8080')),
'loglevel': os.environ.get('LOGLEVEL', 'INFO'),
}
return config
def app(config):
kwargs = RestHandlerSetup(config)
kwargs.update({'s3': PresignedURL(**config['s3'])})
server = RestServer()
server.add_route(r'/(?P<bucket>[^\?]+)/(?P<key>[^\?]+)', S3Object, kwargs)
return server
def main():
config = configs()
logging.basicConfig(level=config['loglevel'])
server = app(config)
server.startup(address=config['address'], port=config['port'])
IOLoop.current().start()
if __name__ == '__main__':
main()
|
[] |
[] |
[
"PORT",
"AUTH_SECRET",
"ACCESS_KEY",
"EXPIRE_DEFAULT",
"EXPIRE_LIMIT",
"SECRET_KEY",
"AUTH_ALGORITHM",
"ADDRESS",
"LOGLEVEL",
"S3_ADDRESS",
"AUTH_ISSUER"
] |
[]
|
["PORT", "AUTH_SECRET", "ACCESS_KEY", "EXPIRE_DEFAULT", "EXPIRE_LIMIT", "SECRET_KEY", "AUTH_ALGORITHM", "ADDRESS", "LOGLEVEL", "S3_ADDRESS", "AUTH_ISSUER"]
|
python
| 11 | 0 | |
tstest/integration/integration_test.go
|
// Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package integration
import (
"bytes"
"context"
crand "crypto/rand"
"crypto/tls"
"encoding/json"
"errors"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/http/httptest"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
"go4.org/mem"
"tailscale.com/derp"
"tailscale.com/derp/derphttp"
"tailscale.com/ipn/ipnstate"
"tailscale.com/net/stun/stuntest"
"tailscale.com/safesocket"
"tailscale.com/smallzstd"
"tailscale.com/tailcfg"
"tailscale.com/tstest"
"tailscale.com/tstest/integration/testcontrol"
"tailscale.com/types/key"
"tailscale.com/types/logger"
"tailscale.com/types/nettype"
)
var (
verboseLogCatcher = flag.Bool("verbose-log-catcher", false, "verbose log catcher logging")
verboseTailscaled = flag.Bool("verbose-tailscaled", false, "verbose tailscaled logging")
)
var mainError atomic.Value // of error
func TestMain(m *testing.M) {
flag.Parse()
v := m.Run()
if v != 0 {
os.Exit(v)
}
if err, ok := mainError.Load().(error); ok {
fmt.Fprintf(os.Stderr, "FAIL: %v\n", err)
os.Exit(1)
}
os.Exit(0)
}
func TestOneNodeUp_NoAuth(t *testing.T) {
t.Parallel()
bins := BuildTestBinaries(t)
env := newTestEnv(t, bins)
defer env.Close()
n1 := newTestNode(t, env)
d1 := n1.StartDaemon(t)
defer d1.Kill()
n1.AwaitListening(t)
st := n1.MustStatus(t)
t.Logf("Status: %s", st.BackendState)
if err := tstest.WaitFor(20*time.Second, func() error {
const sub = `Program starting: `
if !env.LogCatcher.logsContains(mem.S(sub)) {
return fmt.Errorf("log catcher didn't see %#q; got %s", sub, env.LogCatcher.logsString())
}
return nil
}); err != nil {
t.Error(err)
}
n1.MustUp()
if d, _ := time.ParseDuration(os.Getenv("TS_POST_UP_SLEEP")); d > 0 {
t.Logf("Sleeping for %v to give 'up' time to misbehave (https://github.com/tailscale/tailscale/issues/1840) ...", d)
time.Sleep(d)
}
t.Logf("Got IP: %v", n1.AwaitIP(t))
n1.AwaitRunning(t)
d1.MustCleanShutdown(t)
t.Logf("number of HTTP logcatcher requests: %v", env.LogCatcher.numRequests())
}
func TestOneNodeUp_Auth(t *testing.T) {
t.Parallel()
bins := BuildTestBinaries(t)
env := newTestEnv(t, bins)
defer env.Close()
env.Control.RequireAuth = true
n1 := newTestNode(t, env)
d1 := n1.StartDaemon(t)
defer d1.Kill()
n1.AwaitListening(t)
st := n1.MustStatus(t)
t.Logf("Status: %s", st.BackendState)
t.Logf("Running up --login-server=%s ...", env.ControlServer.URL)
cmd := n1.Tailscale("up", "--login-server="+env.ControlServer.URL)
var authCountAtomic int32
cmd.Stdout = &authURLParserWriter{fn: func(urlStr string) error {
if env.Control.CompleteAuth(urlStr) {
atomic.AddInt32(&authCountAtomic, 1)
t.Logf("completed auth path %s", urlStr)
return nil
}
err := fmt.Errorf("Failed to complete auth path to %q", urlStr)
t.Log(err)
return err
}}
cmd.Stderr = cmd.Stdout
if err := cmd.Run(); err != nil {
t.Fatalf("up: %v", err)
}
t.Logf("Got IP: %v", n1.AwaitIP(t))
n1.AwaitRunning(t)
if n := atomic.LoadInt32(&authCountAtomic); n != 1 {
t.Errorf("Auth URLs completed = %d; want 1", n)
}
d1.MustCleanShutdown(t)
}
func TestTwoNodes(t *testing.T) {
t.Parallel()
bins := BuildTestBinaries(t)
env := newTestEnv(t, bins)
defer env.Close()
// Create two nodes:
n1 := newTestNode(t, env)
d1 := n1.StartDaemon(t)
defer d1.Kill()
n2 := newTestNode(t, env)
d2 := n2.StartDaemon(t)
defer d2.Kill()
n1.AwaitListening(t)
n2.AwaitListening(t)
n1.MustUp()
n2.MustUp()
n1.AwaitRunning(t)
n2.AwaitRunning(t)
if err := tstest.WaitFor(2*time.Second, func() error {
st := n1.MustStatus(t)
if len(st.Peer) == 0 {
return errors.New("no peers")
}
if len(st.Peer) > 1 {
return fmt.Errorf("got %d peers; want 1", len(st.Peer))
}
peer := st.Peer[st.Peers()[0]]
if peer.ID == st.Self.ID {
return errors.New("peer is self")
}
return nil
}); err != nil {
t.Error(err)
}
d1.MustCleanShutdown(t)
d2.MustCleanShutdown(t)
}
func TestNodeAddressIPFields(t *testing.T) {
t.Parallel()
bins := BuildTestBinaries(t)
env := newTestEnv(t, bins)
defer env.Close()
n1 := newTestNode(t, env)
d1 := n1.StartDaemon(t)
defer d1.Kill()
n1.AwaitListening(t)
n1.MustUp()
n1.AwaitRunning(t)
testNodes := env.Control.AllNodes()
if len(testNodes) != 1 {
t.Errorf("Expected %d nodes, got %d", 1, len(testNodes))
}
node := testNodes[0]
if len(node.Addresses) == 0 {
t.Errorf("Empty Addresses field in node")
}
if len(node.AllowedIPs) == 0 {
t.Errorf("Empty AllowedIPs field in node")
}
d1.MustCleanShutdown(t)
}
func TestAddPingRequest(t *testing.T) {
t.Parallel()
bins := BuildTestBinaries(t)
env := newTestEnv(t, bins)
defer env.Close()
n1 := newTestNode(t, env)
d1 := n1.StartDaemon(t)
defer d1.Kill()
n1.AwaitListening(t)
n1.MustUp()
n1.AwaitRunning(t)
gotPing := make(chan bool, 1)
waitPing := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
gotPing <- true
}))
defer waitPing.Close()
nodes := env.Control.AllNodes()
if len(nodes) != 1 {
t.Fatalf("expected 1 node, got %d nodes", len(nodes))
}
nodeKey := nodes[0].Key
// Check that we get at least one ping reply after 10 tries.
for try := 1; try <= 10; try++ {
t.Logf("ping %v ...", try)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
if err := env.Control.AwaitNodeInMapRequest(ctx, nodeKey); err != nil {
t.Fatal(err)
}
cancel()
pr := &tailcfg.PingRequest{URL: fmt.Sprintf("%s/ping-%d", waitPing.URL, try), Log: true}
if !env.Control.AddPingRequest(nodeKey, pr) {
t.Logf("failed to AddPingRequest")
continue
}
// Wait for PingRequest to come back
pingTimeout := time.NewTimer(2 * time.Second)
defer pingTimeout.Stop()
select {
case <-gotPing:
t.Logf("got ping; success")
return
case <-pingTimeout.C:
// Try again.
}
}
t.Error("all ping attempts failed")
}
// testEnv contains the test environment (set of servers) used by one
// or more nodes.
type testEnv struct {
t testing.TB
Binaries *Binaries
LogCatcher *logCatcher
LogCatcherServer *httptest.Server
Control *testcontrol.Server
ControlServer *httptest.Server
TrafficTrap *trafficTrap
TrafficTrapServer *httptest.Server
derpShutdown func()
}
// newTestEnv starts a bunch of services and returns a new test
// environment.
//
// Call Close to shut everything down.
func newTestEnv(t testing.TB, bins *Binaries) *testEnv {
if runtime.GOOS == "windows" {
t.Skip("not tested/working on Windows yet")
}
derpMap, derpShutdown := runDERPAndStun(t, logger.Discard)
logc := new(logCatcher)
control := &testcontrol.Server{
DERPMap: derpMap,
}
control.HTTPTestServer = httptest.NewUnstartedServer(control)
control.HTTPTestServer.Start()
trafficTrap := new(trafficTrap)
e := &testEnv{
t: t,
Binaries: bins,
LogCatcher: logc,
LogCatcherServer: httptest.NewServer(logc),
Control: control,
ControlServer: control.HTTPTestServer,
TrafficTrap: trafficTrap,
TrafficTrapServer: httptest.NewServer(trafficTrap),
derpShutdown: derpShutdown,
}
return e
}
func (e *testEnv) Close() error {
if err := e.TrafficTrap.Err(); err != nil {
e.t.Errorf("traffic trap: %v", err)
e.t.Logf("logs: %s", e.LogCatcher.logsString())
}
e.LogCatcherServer.Close()
e.TrafficTrapServer.Close()
e.ControlServer.Close()
e.derpShutdown()
return nil
}
// testNode is a machine with a tailscale & tailscaled.
// Currently, the test is simplistic and user==node==machine.
// That may grow complexity later to test more.
type testNode struct {
env *testEnv
dir string // temp dir for sock & state
sockFile string
stateFile string
}
// newTestNode allocates a temp directory for a new test node.
// The node is not started automatically.
func newTestNode(t *testing.T, env *testEnv) *testNode {
dir := t.TempDir()
return &testNode{
env: env,
dir: dir,
sockFile: filepath.Join(dir, "tailscale.sock"),
stateFile: filepath.Join(dir, "tailscale.state"),
}
}
type Daemon struct {
Process *os.Process
}
func (d *Daemon) Kill() {
d.Process.Kill()
}
func (d *Daemon) MustCleanShutdown(t testing.TB) {
d.Process.Signal(os.Interrupt)
ps, err := d.Process.Wait()
if err != nil {
t.Fatalf("tailscaled Wait: %v", err)
}
if ps.ExitCode() != 0 {
t.Errorf("tailscaled ExitCode = %d; want 0", ps.ExitCode())
}
}
// StartDaemon starts the node's tailscaled, failing if it fails to
// start.
func (n *testNode) StartDaemon(t testing.TB) *Daemon {
cmd := exec.Command(n.env.Binaries.Daemon,
"--tun=userspace-networking",
"--state="+n.stateFile,
"--socket="+n.sockFile,
)
cmd.Env = append(os.Environ(),
"TS_LOG_TARGET="+n.env.LogCatcherServer.URL,
"HTTP_PROXY="+n.env.TrafficTrapServer.URL,
"HTTPS_PROXY="+n.env.TrafficTrapServer.URL,
)
if *verboseTailscaled {
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stdout
}
if err := cmd.Start(); err != nil {
t.Fatalf("starting tailscaled: %v", err)
}
return &Daemon{
Process: cmd.Process,
}
}
func (n *testNode) MustUp() {
t := n.env.t
t.Logf("Running up --login-server=%s ...", n.env.ControlServer.URL)
if err := n.Tailscale("up", "--login-server="+n.env.ControlServer.URL).Run(); err != nil {
t.Fatalf("up: %v", err)
}
}
// AwaitListening waits for the tailscaled to be serving local clients
// over its localhost IPC mechanism. (Unix socket, etc)
func (n *testNode) AwaitListening(t testing.TB) {
if err := tstest.WaitFor(20*time.Second, func() (err error) {
c, err := safesocket.Connect(n.sockFile, 41112)
if err != nil {
return err
}
c.Close()
return nil
}); err != nil {
t.Fatal(err)
}
}
func (n *testNode) AwaitIP(t testing.TB) (ips string) {
t.Helper()
if err := tstest.WaitFor(20*time.Second, func() error {
out, err := n.Tailscale("ip").Output()
if err != nil {
return err
}
ips = string(out)
return nil
}); err != nil {
t.Fatalf("awaiting an IP address: %v", err)
}
if ips == "" {
t.Fatalf("returned IP address was blank")
}
return ips
}
func (n *testNode) AwaitRunning(t testing.TB) {
t.Helper()
if err := tstest.WaitFor(20*time.Second, func() error {
st, err := n.Status()
if err != nil {
return err
}
if st.BackendState != "Running" {
return fmt.Errorf("in state %q", st.BackendState)
}
return nil
}); err != nil {
t.Fatalf("failure/timeout waiting for transition to Running status: %v", err)
}
}
// Tailscale returns a command that runs the tailscale CLI with the provided arguments.
// It does not start the process.
func (n *testNode) Tailscale(arg ...string) *exec.Cmd {
cmd := exec.Command(n.env.Binaries.CLI, "--socket="+n.sockFile)
cmd.Args = append(cmd.Args, arg...)
cmd.Dir = n.dir
return cmd
}
func (n *testNode) Status() (*ipnstate.Status, error) {
out, err := n.Tailscale("status", "--json").CombinedOutput()
if err != nil {
return nil, fmt.Errorf("running tailscale status: %v, %s", err, out)
}
st := new(ipnstate.Status)
if err := json.Unmarshal(out, st); err != nil {
return nil, fmt.Errorf("decoding tailscale status JSON: %w", err)
}
return st, nil
}
func (n *testNode) MustStatus(tb testing.TB) *ipnstate.Status {
tb.Helper()
st, err := n.Status()
if err != nil {
tb.Fatal(err)
}
return st
}
// logCatcher is a minimal logcatcher for the logtail upload client.
type logCatcher struct {
mu sync.Mutex
buf bytes.Buffer
gotErr error
reqs int
}
func (lc *logCatcher) logsContains(sub mem.RO) bool {
lc.mu.Lock()
defer lc.mu.Unlock()
return mem.Contains(mem.B(lc.buf.Bytes()), sub)
}
func (lc *logCatcher) numRequests() int {
lc.mu.Lock()
defer lc.mu.Unlock()
return lc.reqs
}
func (lc *logCatcher) logsString() string {
lc.mu.Lock()
defer lc.mu.Unlock()
return lc.buf.String()
}
func (lc *logCatcher) ServeHTTP(w http.ResponseWriter, r *http.Request) {
var body io.Reader = r.Body
if r.Header.Get("Content-Encoding") == "zstd" {
var err error
body, err = smallzstd.NewDecoder(body)
if err != nil {
log.Printf("bad caught zstd: %v", err)
http.Error(w, err.Error(), 400)
return
}
}
bodyBytes, _ := ioutil.ReadAll(body)
type Entry struct {
Logtail struct {
ClientTime time.Time `json:"client_time"`
ServerTime time.Time `json:"server_time"`
Error struct {
BadData string `json:"bad_data"`
} `json:"error"`
} `json:"logtail"`
Text string `json:"text"`
}
var jreq []Entry
var err error
if len(bodyBytes) > 0 && bodyBytes[0] == '[' {
err = json.Unmarshal(bodyBytes, &jreq)
} else {
var ent Entry
err = json.Unmarshal(bodyBytes, &ent)
jreq = append(jreq, ent)
}
lc.mu.Lock()
defer lc.mu.Unlock()
lc.reqs++
if lc.gotErr == nil && err != nil {
lc.gotErr = err
}
if err != nil {
fmt.Fprintf(&lc.buf, "error from %s of %#q: %v\n", r.Method, bodyBytes, err)
} else {
for _, ent := range jreq {
fmt.Fprintf(&lc.buf, "%s\n", strings.TrimSpace(ent.Text))
if *verboseLogCatcher {
fmt.Fprintf(os.Stderr, "%s\n", strings.TrimSpace(ent.Text))
}
}
}
w.WriteHeader(200) // must have no content, but not a 204
}
// trafficTrap is an HTTP proxy handler to note whether any
// HTTP traffic tries to leave localhost from tailscaled. We don't
// expect any, so any request triggers a failure.
type trafficTrap struct {
atomicErr atomic.Value // of error
}
func (tt *trafficTrap) Err() error {
if err, ok := tt.atomicErr.Load().(error); ok {
return err
}
return nil
}
func (tt *trafficTrap) ServeHTTP(w http.ResponseWriter, r *http.Request) {
var got bytes.Buffer
r.Write(&got)
err := fmt.Errorf("unexpected HTTP proxy via proxy: %s", got.Bytes())
mainError.Store(err)
if tt.Err() == nil {
// Best effort at remembering the first request.
tt.atomicErr.Store(err)
}
log.Printf("Error: %v", err)
w.WriteHeader(403)
}
func runDERPAndStun(t testing.TB, logf logger.Logf) (derpMap *tailcfg.DERPMap, cleanup func()) {
var serverPrivateKey key.Private
if _, err := crand.Read(serverPrivateKey[:]); err != nil {
t.Fatal(err)
}
d := derp.NewServer(serverPrivateKey, logf)
httpsrv := httptest.NewUnstartedServer(derphttp.Handler(d))
httpsrv.Config.ErrorLog = logger.StdLogger(logf)
httpsrv.Config.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler))
httpsrv.StartTLS()
stunAddr, stunCleanup := stuntest.ServeWithPacketListener(t, nettype.Std{})
m := &tailcfg.DERPMap{
Regions: map[int]*tailcfg.DERPRegion{
1: {
RegionID: 1,
RegionCode: "test",
Nodes: []*tailcfg.DERPNode{
{
Name: "t1",
RegionID: 1,
HostName: "127.0.0.1", // to bypass HTTP proxy
IPv4: "127.0.0.1",
IPv6: "none",
STUNPort: stunAddr.Port,
DERPTestPort: httpsrv.Listener.Addr().(*net.TCPAddr).Port,
STUNTestIP: stunAddr.IP.String(),
},
},
},
},
}
cleanup = func() {
httpsrv.CloseClientConnections()
httpsrv.Close()
d.Close()
stunCleanup()
}
return m, cleanup
}
type authURLParserWriter struct {
buf bytes.Buffer
fn func(urlStr string) error
}
var authURLRx = regexp.MustCompile(`(https?://\S+/auth/\S+)`)
func (w *authURLParserWriter) Write(p []byte) (n int, err error) {
n, err = w.buf.Write(p)
m := authURLRx.FindSubmatch(w.buf.Bytes())
if m != nil {
urlStr := string(m[1])
w.buf.Reset() // so it's not matched again
if err := w.fn(urlStr); err != nil {
return 0, err
}
}
return n, err
}
|
[
"\"TS_POST_UP_SLEEP\""
] |
[] |
[
"TS_POST_UP_SLEEP"
] |
[]
|
["TS_POST_UP_SLEEP"]
|
go
| 1 | 0 | |
awstesting/performance/logging.go
|
// +build integration
// Package performance contains shared step definitions that are used for performance testing
package performance
import (
"errors"
"fmt"
"os"
"time"
"github.com/gosemver/aws_aws-sdk-go_v1.4.3-1-g1f24fa1/aws"
"github.com/gosemver/aws_aws-sdk-go_v1.4.3-1-g1f24fa1/awstesting/unit"
"github.com/gosemver/aws_aws-sdk-go_v1.4.3-1-g1f24fa1/service/dynamodb"
"github.com/gosemver/aws_aws-sdk-go_v1.4.3-1-g1f24fa1/service/dynamodb/dynamodbattribute"
)
// benchmarkLogger handles all benchmark logging
type benchmarkLogger struct {
outputer
}
// logger interface that handles any logging to an output
type logger interface {
log(key string, data map[string]interface{}) error
}
// init initializes the logger and uses dependency injection for the
// outputer
func newBenchmarkLogger(output string) (*benchmarkLogger, error) {
b := &benchmarkLogger{}
switch output {
case "dynamodb":
region := os.Getenv("AWS_TESTING_REGION")
if region == "" {
return b, errors.New("No region specified. Please export AWS_TESTING_REGION")
}
table := os.Getenv("AWS_TESTING_DB_TABLE")
if table == "" {
return b, errors.New("No table specified. Please export AWS_TESTING_DB_TABLE")
}
b.outputer = newDynamodbOut(table, region)
case "stdout":
b.outputer = stdout{}
default:
return b, errors.New("Unsupported outputer")
}
return b, nil
}
type record struct {
Key string
Data interface{}
}
// log calls the output command and building a data structure
// to pass into its output formatter
func (b benchmarkLogger) log(key, data interface{}) error {
formatData := record{
Key: fmt.Sprintf("%d-%v", time.Now().Unix(), key.(string)),
Data: data,
}
return b.output(formatData)
}
// outputer is a simple interface that'll handle output
// to whatever system like dynamodb or stdout
type outputer interface {
output(record) error
}
// dyanmodbOut handles simple writes to dynamodb
type dynamodbOut struct {
table string // table to write to in dynamodb
region string
db *dynamodb.DynamoDB // the dynamodb
}
// init initializes dynamodbOut
func newDynamodbOut(table, region string) *dynamodbOut {
out := dynamodbOut{
table: table,
region: region,
}
out.db = dynamodb.New(
unit.Session,
&aws.Config{Region: &out.region},
)
return &out
}
// output just writes to dynamodb
func (out dynamodbOut) output(data record) error {
input := &dynamodb.PutItemInput{
TableName: aws.String(out.table),
}
item, err := dynamodbattribute.ConvertToMap(data)
if err != nil {
return err
}
input.Item = item
_, err = out.db.PutItem(input)
return err
}
// stdout handles writes to stdout
type stdout struct{}
// output expects key value data to print to stdout
func (out stdout) output(data record) error {
item, err := dynamodbattribute.ConvertToMap(data.Data)
if err != nil {
return err
}
fmt.Println(item)
return nil
}
|
[
"\"AWS_TESTING_REGION\"",
"\"AWS_TESTING_DB_TABLE\""
] |
[] |
[
"AWS_TESTING_REGION",
"AWS_TESTING_DB_TABLE"
] |
[]
|
["AWS_TESTING_REGION", "AWS_TESTING_DB_TABLE"]
|
go
| 2 | 0 | |
google/appengine/tools/devappserver2/module.py
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Manage the lifecycle of runtime processes and dispatch requests to them."""
import cgi
import collections
import cStringIO
import functools
import httplib
import logging
import math
import os.path
import random
import re
import string
import thread
import threading
import time
import urllib
import urlparse
import wsgiref.headers
from concurrent import futures
from google.appengine.api import api_base_pb
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import appinfo
from google.appengine.api import request_info
from google.appengine.api.logservice import log_service_pb
from google.appengine.tools.devappserver2 import application_configuration
from google.appengine.tools.devappserver2 import blob_image
from google.appengine.tools.devappserver2 import blob_upload
from google.appengine.tools.devappserver2 import channel
from google.appengine.tools.devappserver2 import constants
from google.appengine.tools.devappserver2 import endpoints
from google.appengine.tools.devappserver2 import errors
from google.appengine.tools.devappserver2 import file_watcher
from google.appengine.tools.devappserver2 import gcs_server
from google.appengine.tools.devappserver2 import http_proxy
from google.appengine.tools.devappserver2 import http_runtime
from google.appengine.tools.devappserver2 import http_runtime_constants
from google.appengine.tools.devappserver2 import instance
from google.appengine.tools.devappserver2 import login
from google.appengine.tools.devappserver2 import metrics
from google.appengine.tools.devappserver2 import request_rewriter
from google.appengine.tools.devappserver2 import runtime_config_pb2
from google.appengine.tools.devappserver2 import runtime_factories
from google.appengine.tools.devappserver2 import start_response_utils
from google.appengine.tools.devappserver2 import static_files_handler
from google.appengine.tools.devappserver2 import thread_executor
from google.appengine.tools.devappserver2 import url_handler
from google.appengine.tools.devappserver2 import util
from google.appengine.tools.devappserver2 import wsgi_handler
from google.appengine.tools.devappserver2 import wsgi_server
_LOWER_HEX_DIGITS = string.hexdigits.lower()
_UPPER_HEX_DIGITS = string.hexdigits.upper()
_REQUEST_ID_HASH_LENGTH = 8
_THREAD_POOL = thread_executor.ThreadExecutor()
_RESTART_INSTANCES_CONFIG_CHANGES = frozenset(
[application_configuration.NORMALIZED_LIBRARIES_CHANGED,
application_configuration.SKIP_FILES_CHANGED,
application_configuration.NOBUILD_FILES_CHANGED,
# The server must be restarted when the handlers change because files
# appearing in static content handlers make them unavailable to the
# runtime.
application_configuration.HANDLERS_CHANGED,
application_configuration.ENV_VARIABLES_CHANGED,
application_configuration.ENTRYPOINT_ADDED,
application_configuration.ENTRYPOINT_CHANGED,
application_configuration.ENTRYPOINT_REMOVED])
_REQUEST_LOGGING_BLACKLIST_RE = re.compile(
r'^/_ah/(?:channel/(?:dev|jsapi)|img|login|upload)')
# Fake arguments for _handle_script_request for request types that don't use
# user-specified handlers.
_EMPTY_MATCH = re.match('', '')
_DUMMY_URLMAP = appinfo.URLMap(script='/')
_SHUTDOWN_TIMEOUT = 30
_MAX_UPLOAD_MEGABYTES = 32
_MAX_UPLOAD_BYTES = _MAX_UPLOAD_MEGABYTES * 1024 * 1024
_MAX_UPLOAD_NO_TRIGGER_BAD_CLIENT_BYTES = 64 * 1024 * 1024
_REDIRECT_HTML = '''\
<HTML><HEAD><meta http-equiv="content-type" content="%(content-type)s">
<TITLE>%(status)d Moved</TITLE></HEAD>
<BODY><H1>%(status)d Moved</H1>
The document has moved'
<A HREF="%(correct-url)s">here</A>.
</BODY></HTML>'''
_TIMEOUT_HTML = '<HTML><BODY>503 - This request has timed out.</BODY></HTML>'
# Factor applied to the request timeouts to compensate for the
# long vmengines reloads. TODO eventually remove that once we have
# optimized the vm_engine reload.
_VMENGINE_SLOWDOWN_FACTOR = 2
# polling time on module changes.
_CHANGE_POLLING_MS = 1000
# specific resources prefixes we don't want to see pollute the info level on
# access.
_QUIETER_RESOURCES = ('/_ah/health',)
# TODO: Remove after the Files API is really gone.
_FILESAPI_DEPRECATION_WARNING = (
'The Files API is deprecated and will soon be removed. Further information'
' is available here: https://cloud.google.com/appengine/docs/deprecations'
'/files_api')
_ALLOWED_RUNTIMES_ENV_FLEX = (
'python-compat', 'java', 'java7', 'go', 'custom')
def _static_files_regex_from_handlers(handlers):
patterns = []
for url_map in handlers:
handler_type = url_map.GetHandlerType()
if url_map.application_readable:
continue
if handler_type == appinfo.STATIC_FILES:
patterns.append(r'(%s)' % url_map.upload)
elif handler_type == appinfo.STATIC_DIR:
patterns.append('(%s%s%s)' % (url_map.static_dir.rstrip(os.path.sep),
re.escape(os.path.sep), r'.*'))
return r'^%s$' % '|'.join(patterns)
class InteractiveCommandError(errors.Error):
pass
class _ScriptHandler(url_handler.UserConfiguredURLHandler):
"""A URL handler that will cause the request to be dispatched to an instance.
This handler is special in that it does not have a working handle() method
since the Module's dispatch logic is used to select the appropriate Instance.
"""
def __init__(self, url_map):
"""Initializer for _ScriptHandler.
Args:
url_map: An appinfo.URLMap instance containing the configuration for this
handler.
"""
try:
url_pattern = re.compile('%s$' % url_map.url)
except re.error, e:
raise errors.InvalidAppConfigError(
'invalid url %r in script handler: %s' % (url_map.url, e))
super(_ScriptHandler, self).__init__(url_map, url_pattern)
self.url_map = url_map
def handle(self, match, environ, start_response):
"""This is a dummy method that should never be called."""
raise NotImplementedError()
class Module(object):
"""The abstract base for all instance pool implementations."""
_MAX_REQUEST_WAIT_TIME = 10
def _get_wait_time(self):
"""Gets the wait time before timing out a request.
Returns:
The timeout value in seconds.
"""
return self._MAX_REQUEST_WAIT_TIME
def _create_instance_factory(self,
module_configuration):
"""Create an instance.InstanceFactory.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for a module.
Returns:
A instance.InstanceFactory subclass that can be used to create instances
with the provided configuration.
Raises:
RuntimeError: if the configuration specifies an unknown runtime.
errors.InvalidAppConfigError: if using removed runtimes for env: 2
"""
runtime = module_configuration.runtime
if runtime == 'vm':
runtime = module_configuration.effective_runtime
# NOTE(user): b/24139391
# If in env: 2, users either use a compat runtime or custom.
if util.is_env_flex(module_configuration.env):
if runtime not in _ALLOWED_RUNTIMES_ENV_FLEX:
raise errors.InvalidAppConfigError(
'In env: {0}, only the following runtimes '
'are allowed: {1}'
.format(module_configuration.env, _ALLOWED_RUNTIMES_ENV_FLEX))
if runtime not in runtime_factories.FACTORIES:
raise RuntimeError(
'Unknown runtime %r; supported runtimes are %s.' %
(runtime,
', '.join(
sorted(repr(k) for k in runtime_factories.FACTORIES))))
instance_factory = runtime_factories.FACTORIES[runtime]
return instance_factory(
request_data=self._request_data,
runtime_config_getter=self._get_runtime_config,
module_configuration=module_configuration)
def _is_modern(self):
return (
self._module_configuration.runtime in runtime_factories.MODERN_RUNTIMES)
def _create_url_handlers(self):
"""Constructs URLHandlers based on the module configuration.
Returns:
A list of url_handler.URLHandlers corresponding that can react as
described in the given configuration.
"""
handlers = []
# Add special URL handlers (taking precedence over user-defined handlers)
# Login/logout handlers.
handlers.append(wsgi_handler.WSGIHandler(
login.application, '/%s$' % login.LOGIN_URL_RELATIVE))
handlers.append(wsgi_handler.WSGIHandler(
login.application, '/%s$' % login.LOGOUT_URL_RELATIVE))
url_pattern = '/%s' % blob_upload.UPLOAD_URL_PATH
# The blobstore upload handler forwards successful requests to the
# dispatcher.
handlers.append(
wsgi_handler.WSGIHandler(blob_upload.Application(self._dispatcher),
url_pattern))
url_pattern = '/%s' % blob_image.BLOBIMAGE_URL_PATTERN
handlers.append(
wsgi_handler.WSGIHandler(blob_image.Application(), url_pattern))
url_pattern = '/%s' % channel.CHANNEL_URL_PATTERN
handlers.append(
wsgi_handler.WSGIHandler(channel.application, url_pattern))
url_pattern = '/%s' % gcs_server.GCS_URL_PATTERN
handlers.append(
wsgi_handler.WSGIHandler(gcs_server.Application(), url_pattern))
# Add a handler for Endpoints, only if version == 1.0 and /_ah/spi handler
# is configured.
runtime_config = self._get_runtime_config()
for library in runtime_config.libraries:
if library.name == 'endpoints' and library.version == '1.0':
if [url_map for url_map in self._module_configuration.handlers
if url_map.url.startswith('/_ah/spi/')]:
url_pattern = '/%s' % endpoints.API_SERVING_PATTERN
handlers.append(
wsgi_handler.WSGIHandler(
endpoints.EndpointsDispatcher(self._dispatcher),
url_pattern))
found_start_handler = False
found_warmup_handler = False
# Add user-defined URL handlers
for url_map in self._module_configuration.handlers:
handler_type = url_map.GetHandlerType()
if handler_type == appinfo.HANDLER_SCRIPT:
if not self._is_modern():
# Handle script only for traditional runtimes.
handlers.append(_ScriptHandler(url_map))
if not found_start_handler and re.match('%s$' % url_map.url,
'/_ah/start'):
found_start_handler = True
if not found_warmup_handler and re.match('%s$' % url_map.url,
'/_ah/warmup'):
found_warmup_handler = True
elif handler_type == appinfo.STATIC_FILES:
handlers.append(
static_files_handler.StaticFilesHandler(
self._module_configuration.application_root,
url_map,
self._module_configuration.default_expiration))
elif handler_type == appinfo.STATIC_DIR:
handlers.append(
static_files_handler.StaticDirHandler(
self._module_configuration.application_root,
url_map,
self._module_configuration.default_expiration))
else:
assert 0, 'unexpected handler %r for %r' % (handler_type, url_map)
# Add a handler for /_ah/start if no script handler matches.
if not found_start_handler:
handlers.insert(0, _ScriptHandler(self._instance_factory.START_URL_MAP))
# Add a handler for /_ah/warmup if no script handler matches and warmup is
# enabled.
if (not found_warmup_handler and
'warmup' in (self._module_configuration.inbound_services or [])):
handlers.insert(0, _ScriptHandler(self._instance_factory.WARMUP_URL_MAP))
if self._is_modern():
# Modern runtimes use default handler to route to user defined entrypoint.
# This handler should be checked after all other handlers.
default_handler = _ScriptHandler(appinfo.URLMap(url='/.*'))
handlers.append(default_handler)
return handlers
def _get_runtime_config(self):
"""Returns the configuration for the runtime.
Returns:
A runtime_config_pb2.Config instance representing the configuration to be
passed to an instance. NOTE: This does *not* include the instance_id
field, which must be populated elsewhere.
"""
runtime_config = runtime_config_pb2.Config()
runtime_config.app_id = self._module_configuration.application
runtime_config.version_id = self._module_configuration.version_id
if self._threadsafe_override is None:
runtime_config.threadsafe = self._module_configuration.threadsafe or False
else:
runtime_config.threadsafe = self._threadsafe_override
runtime_config.application_root = (
self._module_configuration.application_root)
if not self._allow_skipped_files:
runtime_config.skip_files = str(self._module_configuration.skip_files)
runtime_config.static_files = _static_files_regex_from_handlers(
self._module_configuration.handlers)
runtime_config.api_host = self._api_host
runtime_config.api_port = self._api_port
runtime_config.server_port = self._balanced_port
runtime_config.stderr_log_level = self._runtime_stderr_loglevel
runtime_config.datacenter = 'us1'
runtime_config.auth_domain = self._auth_domain
if self._max_instances is not None:
runtime_config.max_instances = self._max_instances
for library in self._module_configuration.normalized_libraries:
runtime_config.libraries.add(name=library.name, version=library.version)
for key, value in (self._module_configuration.env_variables or {}).items():
runtime_config.environ.add(key=str(key), value=str(value))
if self._cloud_sql_config:
runtime_config.cloud_sql_config.CopyFrom(self._cloud_sql_config)
if (self._php_config and
self._module_configuration.runtime.startswith('php')):
runtime_config.php_config.CopyFrom(self._php_config)
if (self._python_config and
self._module_configuration.runtime.startswith('python')):
runtime_config.python_config.CopyFrom(self._python_config)
if (self._java_config and
(self._module_configuration.runtime.startswith('java') or
self._module_configuration.effective_runtime.startswith('java'))):
runtime_config.java_config.CopyFrom(self._java_config)
if (self._go_config and
self._module_configuration.runtime.startswith('go')):
runtime_config.go_config.CopyFrom(self._go_config)
if self._vm_config:
runtime_config.vm_config.CopyFrom(self._vm_config)
if self._module_configuration.effective_runtime == 'custom':
runtime_config.custom_config.CopyFrom(self._custom_config)
runtime_config.vm = self._module_configuration.runtime == 'vm'
return runtime_config
def _maybe_restart_instances(self, config_changed, file_changed,
modern_runtime_dep_libs_changed=False):
"""Restarts instances. May avoid some restarts depending on policy.
If neither config_changed or file_changed is True, returns immediately.
Args:
config_changed: True if the configuration for the application has changed.
file_changed: True if any file relevant to the application has changed.
modern_runtime_dep_libs_changed: True if dependency libraries of a modern
runtime(eg: python3) changed.
"""
policy = self._instance_factory.FILE_CHANGE_INSTANCE_RESTART_POLICY
assert policy is not None, 'FILE_CHANGE_INSTANCE_RESTART_POLICY not set'
if policy == instance.NEVER or (
not config_changed and not file_changed
and not modern_runtime_dep_libs_changed):
return
logging.debug('Restarting instances.')
with self._condition:
instances_to_quit = set()
for inst in self._instances:
if (config_changed or modern_runtime_dep_libs_changed or
(policy == instance.ALWAYS) or
(policy == instance.AFTER_FIRST_REQUEST and inst.total_requests)):
instances_to_quit.add(inst)
self._instances -= instances_to_quit
for inst in instances_to_quit:
inst.quit(allow_async=True)
def _handle_changes(self, timeout=0):
"""Handle file or configuration changes."""
# Check for file changes first, because they can trigger config changes.
file_changes = self._get_file_changes(timeout)
if file_changes:
logging.info(
'[%s] Detected file changes:\n %s', self.name,
'\n '.join(sorted(file_changes)))
self._instance_factory.files_changed()
# Always check for config and file changes because checking also clears
# pending changes.
config_changes = self._module_configuration.check_for_updates()
if application_configuration.HANDLERS_CHANGED in config_changes:
handlers = self._create_url_handlers()
with self._handler_lock:
self._handlers = handlers
# For python3, changes to requirements.txt should trigger instance factory
# reload
dep_libs_changed = None
if hasattr(self._instance_factory, 'dependency_libraries_changed'):
dep_libs_changed = (
self._instance_factory.dependency_libraries_changed(file_changes))
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES:
self._instance_factory.configuration_changed(config_changes)
self._maybe_restart_instances(
config_changed=bool(config_changes & _RESTART_INSTANCES_CONFIG_CHANGES),
file_changed=bool(file_changes),
modern_runtime_dep_libs_changed=dep_libs_changed)
def __init__(
self,
module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
java_config,
go_config,
custom_config,
cloud_sql_config,
vm_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
watcher_ignore_re,
automatic_restarts,
allow_skipped_files,
threadsafe_override,
enable_host_checking=True,
ssl_certificate_paths=None,
ssl_port=None):
"""Initializer for Module.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for a module.
host: A string containing the host that any HTTP servers should bind to
e.g. "localhost".
balanced_port: An int specifying the port where the balanced module for
the pool should listen.
api_host: The host that APIModule listens for RPC requests on.
api_port: The port that APIModule listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_config: A runtime_config_pb2.PhpConfig instances containing PHP
runtime-specific configuration. If None then defaults are used.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are used.
java_config: A runtime_config_pb2.JavaConfig instance containing
Java runtime-specific configuration. If None then defaults are used.
go_config: A runtime_config_pb2.GoConfig instances containing Go
runtime-specific configuration. If None then defaults are used.
custom_config: A runtime_config_pb2.CustomConfig instance. If 'runtime'
is set then we switch to another runtime. Otherwise, we use the
custom_entrypoint to start the app. If neither or both are set,
then we will throw an error.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
vm_config: A runtime_config_pb2.VMConfig instance containing
VM runtime-specific configuration. If None all docker-related stuff
is disabled.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Module and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
max_instances: The maximum number of instances to create for this module.
If None then there is no limit on the number of created instances.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
watcher_ignore_re: A regex that optionally defines a pattern for the file
watcher to ignore.
automatic_restarts: If True then instances will be restarted when a
file or configuration change that effects them is detected.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
threadsafe_override: If not None, ignore the YAML file value of threadsafe
and use this value instead.
enable_host_checking: A bool indicating that HTTP Host checking should
be enforced for incoming requests.
ssl_certificate_paths: A ssl_utils.SSLCertificatePaths instance. If
not None, the module's wsgi server will be launched with SSL. Must
also specify ssl_port.
ssl_port: An additional port to bind to for SSL connections. Must be
specified if ssl_certificate_paths is specified.
Raises:
errors.InvalidAppConfigError: For runtime: custom, either mistakenly set
both --custom_entrypoint and --runtime or neither.
"""
self._module_configuration = module_configuration
self._name = module_configuration.module_name
self._version = module_configuration.major_version
self._app_name_external = module_configuration.application_external_name
self._host = host
self._api_host = api_host
self._api_port = api_port
self._auth_domain = auth_domain
self._runtime_stderr_loglevel = runtime_stderr_loglevel
self._balanced_port = balanced_port
self._php_config = php_config
self._python_config = python_config
self._java_config = java_config
self._go_config = go_config
self._custom_config = custom_config
self._cloud_sql_config = cloud_sql_config
self._vm_config = vm_config
self._request_data = request_data
self._allow_skipped_files = allow_skipped_files
self._threadsafe_override = threadsafe_override
self._dispatcher = dispatcher
self._max_instances = max_instances
self._automatic_restarts = automatic_restarts
self._use_mtime_file_watcher = use_mtime_file_watcher
self._watcher_ignore_re = watcher_ignore_re
self._default_version_port = default_version_port
self._port_registry = port_registry
self._ssl_port = ssl_port
if self.effective_runtime == 'custom':
if self._custom_config.runtime and self._custom_config.custom_entrypoint:
raise errors.InvalidAppConfigError(
'Cannot set both --runtime and --custom_entrypoint.')
elif self._custom_config.runtime:
actual_runtime = self._custom_config.runtime
self._module_configuration.effective_runtime = actual_runtime
elif not self._custom_config.custom_entrypoint:
raise errors.InvalidAppConfigError(
'Must set either --runtime or --custom_entrypoint. For a '
'standard runtime, set the --runtime flag with one of %s. '
'For a custom runtime, set the --custom_entrypoint with a '
'command to start your app.' % runtime_factories.valid_runtimes())
self._instance_factory = self._create_instance_factory(
self._module_configuration)
if self._automatic_restarts:
self._watcher = file_watcher.get_file_watcher(
[self._module_configuration.application_root] +
self._instance_factory.get_restart_directories(),
self._use_mtime_file_watcher)
if hasattr(self._watcher, 'set_watcher_ignore_re'):
self._watcher.set_watcher_ignore_re(self._watcher_ignore_re)
if hasattr(self._watcher, 'set_skip_files_re'):
self._watcher.set_skip_files_re(self._module_configuration.skip_files)
else:
self._watcher = None
self._handler_lock = threading.Lock()
self._handlers = self._create_url_handlers()
if enable_host_checking:
wsgi_module = wsgi_server.WsgiHostCheck([self._host], self)
else:
wsgi_module = self
self._balanced_module = wsgi_server.WsgiServer(
(self._host, self._balanced_port), wsgi_module,
ssl_certificate_paths, self._ssl_port)
self._quit_event = threading.Event() # Set when quit() has been called.
# TODO: Remove after the Files API is really gone.
if (self._module_configuration.runtime.startswith('python') or
self._module_configuration.runtime.startswith('java') or
self._module_configuration.runtime.startswith('go')):
self._filesapi_warning_message = _FILESAPI_DEPRECATION_WARNING
else:
self._filesapi_warning_message = None
self._total_file_change_time = 0.0
self._file_change_count = 0
@property
def name(self):
"""The name of the module, as defined in app.yaml.
This value will be constant for the lifetime of the module even if the
module configuration changes.
"""
return self._name
@property
def version(self):
"""The version of the module, as defined in app.yaml.
This value will be constant for the lifetime of the module even if the
module configuration changes.
"""
return self._version
@property
def app_name_external(self):
"""The external application name of the module, as defined in app.yaml.
This value will be constant for the lifetime of the module even if the
module configuration changes.
"""
return self._app_name_external
@property
def ready(self):
"""The module is ready to handle HTTP requests."""
return self._balanced_module.ready
@property
def balanced_port(self):
"""The port that the balanced HTTP server for the Module is listening on."""
assert self._balanced_module.ready, 'balanced module not running'
return self._balanced_module.port
@property
def host(self):
"""The host that the HTTP server(s) for this Module is listening on."""
return self._host
@property
def balanced_address(self):
"""The address of the balanced HTTP server e.g. "localhost:8080"."""
if self.balanced_port != 80:
return '%s:%s' % (self.host, self.balanced_port)
else:
return self.host
@property
def max_instance_concurrent_requests(self):
"""The number of concurrent requests that each Instance can handle."""
return self._instance_factory.max_concurrent_requests
@property
def module_configuration(self):
"""The application_configuration.ModuleConfiguration for this module."""
return self._module_configuration
@property
def runtime(self):
"""Runtime property for this module."""
return self._module_configuration.runtime
@property
def effective_runtime(self):
"""Effective_runtime property for this module."""
return self._module_configuration.effective_runtime
@property
def mvm_logs_enabled(self):
"""Returns True iff it's a Managed VM module and logs are enabled."""
return self._vm_config and self._vm_config.enable_logs
@property
def supports_interactive_commands(self):
"""True if the module can evaluate arbitrary code and return the result."""
return self._instance_factory.SUPPORTS_INTERACTIVE_REQUESTS
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
inst=None):
"""Handles a HTTP request that has matched a script handler.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
inst: The Instance to send the request to. If None then an appropriate
Instance will be chosen.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
raise NotImplementedError()
def _no_handler_for_request(self, environ, start_response, request_id):
"""Handle a HTTP request that does not match any user-defined handlers."""
self._insert_log_message('No handlers matched this URL.', 2, request_id)
start_response('404 Not Found', [('Content-Type', 'text/html')])
return [
'<html><head><title>Not Found</title></head>',
('<body>The url "%s" does not match any handlers.</body></html>' %
cgi.escape(environ['PATH_INFO']))
]
def _error_response(self, environ, start_response, status, body=None):
if body:
start_response(
'%d %s' % (status, httplib.responses[status]),
[('Content-Type', 'text/html'),
('Content-Length', str(len(body)))])
return body
start_response('%d %s' % (status, httplib.responses[status]), [])
return []
def _handle_request(self, environ, start_response, inst=None,
request_type=instance.NORMAL_REQUEST):
"""Handles a HTTP request.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
inst: The Instance to send the request to. If None then an appropriate
Instance will be chosen. Setting inst is not meaningful if the
request does not match a "script" handler.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if inst:
try:
environ['SERVER_PORT'] = str(self.get_instance_port(inst.instance_id))
except request_info.NotSupportedWithAutoScalingError:
environ['SERVER_PORT'] = str(self.balanced_port)
else:
environ['SERVER_PORT'] = str(self.balanced_port)
if 'HTTP_HOST' in environ:
environ['SERVER_NAME'] = environ['HTTP_HOST'].rsplit(':', 1)[0]
environ['DEFAULT_VERSION_HOSTNAME'] = '%s:%s' % (
environ['SERVER_NAME'], self._default_version_port)
runtime_config = self._get_runtime_config()
# Python monkey-patches out os.environ because some environment variables
# are set per-request (REQUEST_ID_HASE and REQUEST_LOG_ID for example).
# This means that although these environment variables could be set once
# at startup, they must be passed in during each request.
if (runtime_config.vm and
self._module_configuration.effective_runtime == 'python27'):
environ.update(http_runtime.get_vm_environment_variables(
self._module_configuration, runtime_config))
with self._request_data.request(
environ,
self._module_configuration) as request_id:
should_log_request = not _REQUEST_LOGGING_BLACKLIST_RE.match(
environ['PATH_INFO'])
environ['REQUEST_ID_HASH'] = self.generate_request_id_hash()
if should_log_request:
environ['REQUEST_LOG_ID'] = self.generate_request_log_id()
if 'HTTP_HOST' in environ:
hostname = environ['HTTP_HOST']
elif environ['SERVER_PORT'] == '80':
hostname = environ['SERVER_NAME']
else:
hostname = '%s:%s' % (environ['SERVER_NAME'], environ['SERVER_PORT'])
if environ.get('QUERY_STRING'):
resource = '%s?%s' % (urllib.quote(environ['PATH_INFO']),
environ['QUERY_STRING'])
else:
resource = urllib.quote(environ['PATH_INFO'])
email, _, _ = login.get_user_info(environ.get('HTTP_COOKIE', ''))
method = environ.get('REQUEST_METHOD', 'GET')
http_version = environ.get('SERVER_PROTOCOL', 'HTTP/1.0')
logservice = apiproxy_stub_map.apiproxy.GetStub('logservice')
logservice.start_request(
request_id=request_id,
user_request_id=environ['REQUEST_LOG_ID'],
ip=environ.get('REMOTE_ADDR', ''),
app_id=self._module_configuration.application,
version_id=self._module_configuration.major_version,
nickname=email.split('@', 1)[0],
user_agent=environ.get('HTTP_USER_AGENT', ''),
host=hostname,
method=method,
resource=resource,
http_version=http_version,
module=self._module_configuration.module_name)
def wrapped_start_response(status, response_headers, exc_info=None):
response_headers.append(('Server',
http_runtime_constants.SERVER_SOFTWARE))
if should_log_request:
headers = wsgiref.headers.Headers(response_headers)
status_code = int(status.split(' ', 1)[0])
content_length = int(headers.get('Content-Length', 0))
# TODO: Remove after the Files API is really gone.
if (self._filesapi_warning_message is not None
and self._request_data.was_filesapi_used(request_id)):
logging.warning(self._filesapi_warning_message)
self._insert_log_message(self._filesapi_warning_message,
2, request_id)
logservice.end_request(request_id, status_code, content_length)
if any(resource.startswith(prefix) for prefix in _QUIETER_RESOURCES):
level = logging.DEBUG
else:
level = logging.INFO
logging.log(level, '%(module_name)s: '
'"%(method)s %(resource)s %(http_version)s" '
'%(status)d %(content_length)s',
{'module_name': self.name,
'method': method,
'resource': resource,
'http_version': http_version,
'status': status_code,
'content_length': content_length or '-'})
return start_response(status, response_headers, exc_info)
content_length = int(environ.get('CONTENT_LENGTH', '0'))
if (environ['REQUEST_METHOD'] in ('GET', 'HEAD', 'DELETE', 'TRACE') and
content_length != 0):
# CONTENT_LENGTH may be empty or absent.
wrapped_start_response('400 Bad Request', [])
return ['"%s" requests may not contain bodies.' %
environ['REQUEST_METHOD']]
# Do not apply request limits to internal _ah handlers (known to break
# blob uploads).
# TODO: research if _ah handlers need limits.
if (not environ.get('REQUEST_URI', '/').startswith('/_ah/') and
content_length > _MAX_UPLOAD_BYTES):
# As allowed by the RFC, cherrypy closes the connection for 413 errors.
# Most clients do not handle this correctly and treat the page as
# unavailable if the connection is closed before the client can send
# all the data. To match the behavior of production, for large files
# < 64M read the data to prevent the client bug from being triggered.
if content_length <= _MAX_UPLOAD_NO_TRIGGER_BAD_CLIENT_BYTES:
environ['wsgi.input'].read(content_length)
status = '%d %s' % (httplib.REQUEST_ENTITY_TOO_LARGE,
httplib.responses[httplib.REQUEST_ENTITY_TOO_LARGE])
wrapped_start_response(status, [])
return ['Upload limited to %d megabytes.' % _MAX_UPLOAD_MEGABYTES]
with self._handler_lock:
handlers = self._handlers
try:
path_info = environ['PATH_INFO']
path_info_normal = self._normpath(path_info)
if path_info_normal != path_info:
# While a 301 Moved Permanently makes more sense for non-normal
# paths, prod issues a 302 so we do the same.
return self._redirect_302_path_info(path_info_normal,
environ,
wrapped_start_response)
if request_type in (instance.BACKGROUND_REQUEST,
instance.INTERACTIVE_REQUEST,
instance.SHUTDOWN_REQUEST):
app = functools.partial(self._handle_script_request,
url_map=_DUMMY_URLMAP,
match=_EMPTY_MATCH,
request_id=request_id,
inst=inst,
request_type=request_type)
return request_rewriter.frontend_rewriter_middleware(app)(
environ, wrapped_start_response)
for handler in handlers:
match = handler.match(path_info)
if match:
# Only check secure: if module was configured to run with SSL
if self._ssl_port:
handler_secure = getattr(getattr(handler, '_url_map', None),
'secure', None)
if (handler_secure == 'always' and
environ['wsgi.url_scheme'] != 'https'):
# Since secure: was set to 'always', redirect to the https
# version of the url
start_response('302 Found', [('Location',
util.construct_url_from_environ(
environ,
secure=True,
include_query_params=True,
port=self._ssl_port))])
return []
elif (handler_secure == 'never' and
environ['wsgi.url_scheme'] != 'http'):
# Since secure: was set to 'never', redirect to the http version
# of the url, but without the query params
start_response('302 Found', [('Location',
util.construct_url_from_environ(
environ,
secure=False,
include_query_params=False,
port=self._balanced_port))])
return []
auth_failure = handler.handle_authorization(environ,
wrapped_start_response)
if auth_failure is not None:
return auth_failure
if isinstance(handler, _ScriptHandler):
app = functools.partial(self._handle_script_request,
url_map=handler.url_map,
match=match,
request_id=request_id,
inst=inst,
request_type=request_type)
return request_rewriter.frontend_rewriter_middleware(app)(
environ, wrapped_start_response)
else:
ret = handler.handle(match, environ, wrapped_start_response)
if ret is not None:
return ret
return self._no_handler_for_request(environ, wrapped_start_response,
request_id)
except StandardError, e:
if logging.getLogger('').isEnabledFor(logging.DEBUG):
logging.exception('Request to %r failed', path_info)
else:
logging.error('Request to %r failed', path_info)
wrapped_start_response('500 Internal Server Error', [], e)
return []
def _async_shutdown_instance(self, inst, port):
return _THREAD_POOL.submit(self._shutdown_instance, inst, port)
def _shutdown_instance(self, inst, port):
force_shutdown_time = time.time() + _SHUTDOWN_TIMEOUT
try:
environ = self.build_request_environ(
'GET', '/_ah/stop', [], '', '0.1.0.3', port, fake_login=True)
self._handle_request(environ,
start_response_utils.null_start_response,
inst=inst,
request_type=instance.SHUTDOWN_REQUEST)
logging.debug('Sent shutdown request: %s', inst)
except:
logging.exception('Internal error while handling shutdown request.')
finally:
time_to_wait = force_shutdown_time - time.time()
self._quit_event.wait(time_to_wait)
inst.quit(force=True)
@staticmethod
def _quote_querystring(qs):
"""Quote a query string to protect against XSS."""
parsed_qs = urlparse.parse_qs(qs, keep_blank_values=True)
# urlparse.parse returns a dictionary with values as lists while
# urllib.urlencode does not handle those. Expand to a list of
# key values.
expanded_qs = []
for key, multivalue in parsed_qs.items():
for value in multivalue:
expanded_qs.append((key, value))
return urllib.urlencode(expanded_qs)
def _redirect_302_path_info(self, updated_path_info, environ, start_response):
"""Redirect to an updated path.
Respond to the current request with a 302 Found status with an updated path
but preserving the rest of the request.
Notes:
- WSGI does not make the fragment available so we are not able to preserve
it. Luckily prod does not preserve the fragment so it works out.
Args:
updated_path_info: the new HTTP path to redirect to.
environ: WSGI environ object.
start_response: WSGI start response callable.
Returns:
WSGI-compatible iterable object representing the body of the response.
"""
correct_url = urlparse.urlunsplit(
(environ['wsgi.url_scheme'],
environ['HTTP_HOST'],
urllib.quote(updated_path_info),
self._quote_querystring(environ['QUERY_STRING']),
None))
content_type = 'text/html; charset=utf-8'
output = _REDIRECT_HTML % {
'content-type': content_type,
'status': httplib.FOUND,
'correct-url': correct_url
}
start_response('%d %s' % (httplib.FOUND, httplib.responses[httplib.FOUND]),
[('Content-Type', content_type),
('Location', correct_url),
('Content-Length', str(len(output)))])
return output
@staticmethod
def _normpath(path):
"""Normalize the path by handling . and .. directory entries.
Normalizes the path. A directory entry of . is just dropped while a
directory entry of .. removes the previous entry. Note that unlike
os.path.normpath, redundant separators remain in place to match prod.
Args:
path: an HTTP path.
Returns:
A normalized HTTP path.
"""
normalized_path_entries = []
for entry in path.split('/'):
if entry == '..':
if normalized_path_entries:
normalized_path_entries.pop()
elif entry != '.':
normalized_path_entries.append(entry)
return '/'.join(normalized_path_entries)
def _insert_log_message(self, message, level, request_id):
logs_group = log_service_pb.UserAppLogGroup()
log_line = logs_group.add_log_line()
log_line.set_timestamp_usec(int(time.time() * 1e6))
log_line.set_level(level)
log_line.set_message(message)
request = log_service_pb.FlushRequest()
request.set_logs(logs_group.Encode())
response = api_base_pb.VoidProto()
logservice = apiproxy_stub_map.apiproxy.GetStub('logservice')
logservice._Dynamic_Flush(request, response, request_id)
@staticmethod
def generate_request_log_id():
"""Generate a random REQUEST_LOG_ID.
Returns:
A string suitable for use as a REQUEST_LOG_ID. The returned string is
variable length to emulate the production values, which encapsulate
the application id, version and some log state.
"""
return ''.join(random.choice(_LOWER_HEX_DIGITS)
for _ in range(random.randrange(30, 100)))
@staticmethod
def generate_request_id_hash():
"""Generate a random REQUEST_ID_HASH."""
return ''.join(random.choice(_UPPER_HEX_DIGITS)
for _ in range(_REQUEST_ID_HASH_LENGTH))
def set_num_instances(self, instances):
"""Sets the number of instances for this module to run.
Args:
instances: An int containing the number of instances to run.
Raises:
request_info.NotSupportedWithAutoScalingError: Always.
"""
raise request_info.NotSupportedWithAutoScalingError()
def get_num_instances(self):
"""Returns the number of instances for this module to run."""
raise request_info.NotSupportedWithAutoScalingError()
def suspend(self):
"""Stops the module from serving requests."""
raise request_info.NotSupportedWithAutoScalingError()
def resume(self):
"""Restarts the module."""
raise request_info.NotSupportedWithAutoScalingError()
def get_instance_address(self, instance_id):
"""Returns the address of the HTTP server for an instance."""
return '%s:%s' % (self.host, self.get_instance_port(instance_id))
def get_instance_port(self, instance_id):
"""Returns the port of the HTTP server for an instance."""
raise request_info.NotSupportedWithAutoScalingError()
def get_instance(self, instance_id):
"""Returns the instance with the provided instance ID."""
raise request_info.NotSupportedWithAutoScalingError()
def report_start_metrics(self):
metrics.GetMetricsLogger().Log(metrics.DEVAPPSERVER_SERVICE_CATEGORY,
'ServiceStart',
label=type(self).__name__)
def report_quit_metrics(self, instance_count):
metrics.GetMetricsLogger().Log(metrics.DEVAPPSERVER_SERVICE_CATEGORY,
'ServiceQuit',
label=type(self).__name__,
value=instance_count)
@property
def supports_individually_addressable_instances(self):
return False
def create_interactive_command_module(self):
"""Returns a InteractiveCommandModule that can be sent user commands."""
if self._instance_factory.SUPPORTS_INTERACTIVE_REQUESTS:
return InteractiveCommandModule(self._module_configuration,
self._host,
self._balanced_port,
self._api_host,
self._api_port,
self._auth_domain,
self._runtime_stderr_loglevel,
self._php_config,
self._python_config,
self._java_config,
self._go_config,
self._custom_config,
self._cloud_sql_config,
self._vm_config,
self._default_version_port,
self._port_registry,
self._request_data,
self._dispatcher,
self._use_mtime_file_watcher,
self._watcher_ignore_re,
self._allow_skipped_files,
self._threadsafe_override)
else:
raise NotImplementedError('runtime does not support interactive commands')
def build_request_environ(self, method, relative_url, headers, body,
source_ip, port, fake_login=False):
if isinstance(body, unicode):
body = body.encode('ascii')
url = urlparse.urlsplit(relative_url)
if port != 80:
if ':' in self.host:
host = '[%s]:%s' % (self.host, port)
else:
host = '%s:%s' % (self.host, port)
else:
host = self.host
environ = {constants.FAKE_IS_ADMIN_HEADER: '1',
'CONTENT_LENGTH': str(len(body)),
'PATH_INFO': url.path,
'QUERY_STRING': url.query,
'REQUEST_METHOD': method,
'REMOTE_ADDR': source_ip,
'SERVER_NAME': self.host,
'SERVER_PORT': str(port),
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.errors': cStringIO.StringIO(),
'wsgi.multithread': True,
'wsgi.multiprocess': True,
'wsgi.input': cStringIO.StringIO(body)}
if fake_login:
environ[constants.FAKE_LOGGED_IN_HEADER] = '1'
util.put_headers_in_environ(headers, environ)
environ['HTTP_HOST'] = host
return environ
def _get_file_changes(self, timeout):
"""Returns a set of paths that have changed and update metrics information.
Args:
timeout: Integer milliseconds on which this watcher will be allowed to
wait for a change.
Returns:
A set of string paths that have changed since the last call to this
function.
"""
t1 = time.time()
res = self._watcher.changes(timeout)
t2 = time.time()
if res:
self._total_file_change_time += t2 - t1
self._file_change_count += 1
return res
def get_watcher_result(self):
"""Returns a tuple of file watcher cumulated results for google analytics.
Returns:
A 3-tuple of:
An int representing total time spent detecting file changes in seconds.
An int representing total number of file change events detected.
The class of file watcher.
"""
return (self._total_file_change_time, self._file_change_count,
self._watcher.__class__.__name__) if self._watcher else None
class AutoScalingModule(Module):
"""A pool of instances that is autoscaled based on traffic."""
# The minimum number of seconds to wait, after quitting an idle instance,
# before quitting another idle instance.
_MIN_SECONDS_BETWEEN_QUITS = 60
# The time horizon to use when calculating the number of instances required
# to serve the current level of traffic.
_REQUIRED_INSTANCE_WINDOW_SECONDS = 60
_DEFAULT_AUTOMATIC_SCALING = appinfo.AutomaticScaling(
min_pending_latency='0.1s',
max_pending_latency='0.5s',
min_idle_instances=1,
max_idle_instances=1000)
@staticmethod
def _parse_pending_latency(timing):
"""Parse a pending latency string into a float of the value in seconds.
Args:
timing: A str of the form 1.0s or 1000ms.
Returns:
A float representation of the value in seconds.
"""
if timing.endswith('ms'):
return float(timing[:-2]) / 1000
else:
return float(timing[:-1])
@classmethod
def _populate_default_automatic_scaling(cls, automatic_scaling_config):
for attribute in automatic_scaling_config.ATTRIBUTES:
if getattr(automatic_scaling_config, attribute) in ('automatic', None):
setattr(automatic_scaling_config, attribute,
getattr(cls._DEFAULT_AUTOMATIC_SCALING, attribute))
def _process_automatic_scaling(self, automatic_scaling_config):
"""Configure min/max instances and pending latencies."""
if automatic_scaling_config:
self._populate_default_automatic_scaling(automatic_scaling_config)
else:
automatic_scaling_config = self._DEFAULT_AUTOMATIC_SCALING
self._min_pending_latency = self._parse_pending_latency(
automatic_scaling_config.min_pending_latency)
self._max_pending_latency = self._parse_pending_latency(
automatic_scaling_config.max_pending_latency)
self._min_idle_instances = int(automatic_scaling_config.min_idle_instances)
self._max_idle_instances = int(automatic_scaling_config.max_idle_instances)
def __init__(self, **kwargs):
"""Initializer for AutoScalingModule.
Args:
**kwargs: Arguments to forward to Module.__init__.
"""
kwargs['vm_config'] = None
super(AutoScalingModule, self).__init__(**kwargs)
self._process_automatic_scaling(
self._module_configuration.automatic_scaling_config)
self._instances = set() # Protected by self._condition.
# A deque containg (time, num_outstanding_instance_requests) 2-tuples.
# This is used to track the maximum number of outstanding requests in a time
# period. Protected by self._condition.
self._outstanding_request_history = collections.deque()
self._num_outstanding_instance_requests = 0 # Protected by self._condition.
# The time when the last instance was quit in seconds since the epoch.
self._last_instance_quit_time = 0 # Protected by self._condition.
# The maximum number of instances we've had in the lifetime of the module
self._instance_high_water_mark = 0 # Protected by self._condition
self._condition = threading.Condition() # Protects instance state.
self._instance_adjustment_thread = threading.Thread(
target=self._loop_adjusting_instances,
name='Instance Adjustment')
def start(self):
"""Start background management of the Module."""
self._balanced_module.start()
self._port_registry.add(self.balanced_port, self, None)
if self._ssl_port:
self._port_registry.add(self._ssl_port, self, None)
if self._watcher:
self._watcher.start()
self.report_start_metrics()
self._instance_adjustment_thread.start()
def quit(self):
"""Stops the Module."""
self._quit_event.set()
self._instance_adjustment_thread.join()
# The instance adjustment thread depends on the balanced module and the
# watcher so wait for it exit before quitting them.
if self._watcher:
self._watcher.quit()
self._balanced_module.quit()
with self._condition:
instances = self._instances
high_water = self._instance_high_water_mark
self._instances = set()
self._condition.notify_all()
self.report_quit_metrics(high_water)
for inst in instances:
inst.quit(force=True)
@property
def instances(self):
"""A set of all the instances currently in the Module."""
with self._condition:
return set(self._instances)
@property
def num_outstanding_instance_requests(self):
"""The number of requests that instances are currently handling."""
with self._condition:
return self._num_outstanding_instance_requests
def _handle_instance_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst,
request_type):
"""Handles a request routed a particular Instance.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if request_type != instance.READY_REQUEST:
with self._condition:
self._num_outstanding_instance_requests += 1
self._outstanding_request_history.append(
(time.time(), self.num_outstanding_instance_requests))
try:
logging.debug('Dispatching request to %s', inst)
return inst.handle(environ, start_response, url_map, match, request_id,
request_type)
finally:
with self._condition:
if request_type != instance.READY_REQUEST:
self._num_outstanding_instance_requests -= 1
self._condition.notify()
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst=None,
request_type=instance.NORMAL_REQUEST):
"""Handles a HTTP request that has matched a script handler.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to. If None then an
appropriate instance.Instance will be chosen.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if inst is not None:
return self._handle_instance_request(
environ, start_response, url_map, match, request_id, inst,
request_type)
with self._condition:
self._num_outstanding_instance_requests += 1
self._outstanding_request_history.append(
(time.time(), self.num_outstanding_instance_requests))
try:
start_time = time.time()
timeout_time = start_time + self._min_pending_latency
# Loop until an instance is available to handle the request.
while True:
if self._quit_event.is_set():
return self._error_response(environ, start_response, 404)
inst = self._choose_instance(timeout_time)
if not inst:
inst = self._add_instance(permit_warmup=False)
if not inst:
# No instance is available nor can a new one be created, so loop
# waiting for one to be free.
timeout_time = time.time() + 0.2
continue
try:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
return inst.handle(environ,
start_response,
url_map,
match,
request_id,
request_type)
except instance.CannotAcceptRequests:
continue
finally:
with self._condition:
self._num_outstanding_instance_requests -= 1
self._condition.notify()
def _add_instance(self, permit_warmup):
"""Creates and adds a new instance.Instance to the Module.
Args:
permit_warmup: If True then the new instance.Instance will be sent a new
warmup request if it is configured to receive them.
Returns:
The newly created instance.Instance. Returns None if no new instance
could be created because the maximum number of instances have already
been created.
"""
if self._max_instances is not None:
with self._condition:
if len(self._instances) >= self._max_instances:
return None
perform_warmup = permit_warmup and (
'warmup' in (self._module_configuration.inbound_services or []))
inst = self._instance_factory.new_instance(
self.generate_instance_id(),
expect_ready_request=perform_warmup)
with self._condition:
if self._quit_event.is_set():
return None
self._instances.add(inst)
self._instance_high_water_mark = max(
len(self._instances),
self._instance_high_water_mark)
if not inst.start():
return None
if perform_warmup:
self._async_warmup(inst)
else:
with self._condition:
self._condition.notify(self.max_instance_concurrent_requests)
logging.debug('Created instance: %s', inst)
return inst
@staticmethod
def generate_instance_id():
return ''.join(random.choice(_LOWER_HEX_DIGITS) for _ in range(36))
def _warmup(self, inst):
"""Send a warmup request to the given instance."""
try:
environ = self.build_request_environ(
'GET', '/_ah/warmup', [], '', '0.1.0.3', self.balanced_port,
fake_login=True)
self._handle_request(environ,
start_response_utils.null_start_response,
inst=inst,
request_type=instance.READY_REQUEST)
with self._condition:
self._condition.notify(self.max_instance_concurrent_requests)
except:
logging.exception('Internal error while handling warmup request.')
def _async_warmup(self, inst):
"""Asynchronously send a markup request to the given Instance."""
return _THREAD_POOL.submit(self._warmup, inst)
def _trim_outstanding_request_history(self):
"""Removes obsolete entries from _outstanding_request_history."""
window_start = time.time() - self._REQUIRED_INSTANCE_WINDOW_SECONDS
with self._condition:
while self._outstanding_request_history:
t, _ = self._outstanding_request_history[0]
if t < window_start:
self._outstanding_request_history.popleft()
else:
break
def _get_num_required_instances(self):
"""Returns the number of Instances required to handle the request load."""
with self._condition:
self._trim_outstanding_request_history()
if not self._outstanding_request_history:
return 0
else:
peak_concurrent_requests = max(
current_requests
for (t, current_requests)
in self._outstanding_request_history)
return int(math.ceil(peak_concurrent_requests /
self.max_instance_concurrent_requests))
def _split_instances(self):
"""Returns a 2-tuple representing the required and extra Instances.
Returns:
A 2-tuple of (required_instances, not_required_instances):
required_instances: The set of the instance.Instances, in a state that
can handle requests, required to handle the current
request load.
not_required_instances: The set of the Instances contained in this
Module that not are not required.
"""
with self._condition:
num_required_instances = self._get_num_required_instances()
available = [inst for inst in self._instances
if inst.can_accept_requests]
available.sort(key=lambda inst: -inst.num_outstanding_requests)
required = set(available[:num_required_instances])
return required, self._instances - required
def _choose_instance(self, timeout_time):
"""Returns the best Instance to handle a request or None if all are busy."""
with self._condition:
while time.time() < timeout_time:
required_instances, not_required_instances = self._split_instances()
if required_instances:
# Pick the instance with the most remaining capacity to handle
# requests.
required_instances = sorted(
required_instances,
key=lambda inst: inst.remaining_request_capacity)
if required_instances[-1].remaining_request_capacity:
return required_instances[-1]
available_instances = [inst for inst in not_required_instances
if inst.remaining_request_capacity > 0 and
inst.can_accept_requests]
if available_instances:
# Pick the instance with the *least* capacity to handle requests
# to avoid using unnecessary idle instances.
available_instances.sort(
key=lambda instance: instance.num_outstanding_requests)
return available_instances[-1]
else:
self._condition.wait(timeout_time - time.time())
return None
def _adjust_instances(self):
"""Creates new Instances or deletes idle Instances based on current load."""
now = time.time()
with self._condition:
_, not_required_instances = self._split_instances()
if len(not_required_instances) < self._min_idle_instances:
self._add_instance(permit_warmup=True)
elif (len(not_required_instances) > self._max_idle_instances and
now >
(self._last_instance_quit_time + self._MIN_SECONDS_BETWEEN_QUITS)):
for inst in not_required_instances:
if not inst.num_outstanding_requests:
try:
inst.quit()
except instance.CannotQuitServingInstance:
pass
else:
self._last_instance_quit_time = now
logging.debug('Quit instance: %s', inst)
with self._condition:
self._instances.discard(inst)
break
def _loop_adjusting_instances(self):
"""Loops until the Module exits, reloading, adding or removing Instances."""
while not self._quit_event.is_set():
if self.ready:
if self._automatic_restarts:
self._handle_changes(_CHANGE_POLLING_MS)
else:
time.sleep(_CHANGE_POLLING_MS/1000.0)
try:
self._adjust_instances()
except Exception as e: # pylint: disable=broad-except
logging.error(e.message)
# thread.interrupt_main() throws a KeyboardInterrupt error in the main
# thread, which triggers devappserver.stop() and shuts down all other
# processes.
thread.interrupt_main()
break
def __call__(self, environ, start_response):
return self._handle_request(environ, start_response)
class ManualScalingModule(Module):
"""A pool of instances that is manually-scaled."""
_DEFAULT_MANUAL_SCALING = appinfo.ManualScaling(instances='1')
@classmethod
def _populate_default_manual_scaling(cls, manual_scaling_config):
for attribute in manual_scaling_config.ATTRIBUTES:
if getattr(manual_scaling_config, attribute) in ('manual', None):
setattr(manual_scaling_config, attribute,
getattr(cls._DEFAULT_MANUAL_SCALING, attribute))
def _process_manual_scaling(self, manual_scaling_config):
if manual_scaling_config:
self._populate_default_manual_scaling(manual_scaling_config)
else:
manual_scaling_config = self._DEFAULT_MANUAL_SCALING
self._initial_num_instances = int(manual_scaling_config.instances)
def __init__(self, **kwargs):
"""Initializer for ManualScalingModule.
Args:
**kwargs: Arguments to forward to Module.__init__.
"""
super(ManualScalingModule, self).__init__(**kwargs)
self._process_manual_scaling(
self._module_configuration.manual_scaling_config)
self._instances = [] # Protected by self._condition.
self._wsgi_servers = [] # Protected by self._condition.
self._instance_high_water_mark = 0 # Protected by self._condition
# Whether the module has been stopped. Protected by self._condition.
self._suspended = False
self._condition = threading.Condition() # Protects instance state.
# Serializes operations that modify the serving state of or number of
# instances.
self._instances_change_lock = threading.RLock()
self._change_watcher_thread = threading.Thread(
target=self._loop_watching_for_changes, name='Change Watcher')
def start(self):
"""Start background management of the Module."""
self._balanced_module.start()
self._port_registry.add(self.balanced_port, self, None)
if self._ssl_port:
self._port_registry.add(self._ssl_port, self, None)
if self._watcher:
self._watcher.start()
self._change_watcher_thread.start()
with self._instances_change_lock:
if self._max_instances is not None:
initial_num_instances = min(self._max_instances,
self._initial_num_instances)
else:
initial_num_instances = self._initial_num_instances
for _ in xrange(initial_num_instances):
self._add_instance()
self.report_start_metrics()
def quit(self):
"""Stops the Module."""
self._quit_event.set()
# The instance adjustment thread depends on the balanced module and the
# watcher so wait for it exit before quitting them.
if self._watcher:
self._watcher.quit()
self._change_watcher_thread.join()
self._balanced_module.quit()
for wsgi_servr in self._wsgi_servers:
wsgi_servr.quit()
with self._condition:
instances = self._instances
high_water = self._instance_high_water_mark
self._instances = []
self._condition.notify_all()
self.report_quit_metrics(high_water)
for inst in instances:
inst.quit(force=True)
def get_instance_port(self, instance_id):
"""Returns the port of the HTTP server for an instance."""
try:
instance_id = int(instance_id)
except ValueError:
raise request_info.InvalidInstanceIdError()
with self._condition:
if 0 <= instance_id < len(self._instances):
wsgi_servr = self._wsgi_servers[instance_id]
else:
raise request_info.InvalidInstanceIdError()
return wsgi_servr.port
@property
def instances(self):
"""A set of all the instances currently in the Module."""
with self._condition:
return set(self._instances)
def _handle_instance_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst,
request_type):
"""Handles a request routed a particular Instance.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
start_time = time.time()
timeout_time = start_time + self._get_wait_time()
try:
while time.time() < timeout_time:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
try:
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
pass
inst.wait(timeout_time)
if inst.has_quit:
return self._error_response(environ, start_response, 503)
else:
return self._error_response(environ, start_response, 503)
finally:
with self._condition:
self._condition.notify()
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst=None,
request_type=instance.NORMAL_REQUEST):
"""Handles a HTTP request that has matched a script handler.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to. If None then an
appropriate instance.Instance will be chosen.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if ((request_type in (instance.NORMAL_REQUEST, instance.READY_REQUEST) and
self._suspended) or self._quit_event.is_set()):
return self._error_response(environ, start_response, 404)
if self._module_configuration.is_backend:
environ['BACKEND_ID'] = self._module_configuration.module_name
else:
environ['BACKEND_ID'] = (
self._module_configuration.version_id.split('.', 1)[0])
if inst is not None:
return self._handle_instance_request(
environ, start_response, url_map, match, request_id, inst,
request_type)
start_time = time.time()
timeout_time = start_time + self._get_wait_time()
while time.time() < timeout_time:
if ((request_type in (instance.NORMAL_REQUEST, instance.READY_REQUEST) and
self._suspended) or self._quit_event.is_set()):
return self._error_response(environ, start_response, 404)
inst = self._choose_instance(timeout_time)
if inst:
try:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
continue
finally:
with self._condition:
self._condition.notify()
else:
return self._error_response(environ, start_response, 503, _TIMEOUT_HTML)
def _add_instance(self):
"""Creates and adds a new instance.Instance to the Module.
This must be called with _instances_change_lock held.
"""
instance_id = self.get_num_instances()
assert self._max_instances is None or instance_id < self._max_instances
inst = self._instance_factory.new_instance(instance_id,
expect_ready_request=True)
wsgi_servr = wsgi_server.WsgiServer(
(self._host, 0), functools.partial(self._handle_request, inst=inst))
wsgi_servr.start()
self._port_registry.add(wsgi_servr.port, self, inst)
with self._condition:
if self._quit_event.is_set():
return
self._wsgi_servers.append(wsgi_servr)
self._instances.append(inst)
self._instance_high_water_mark = max(
self._instance_high_water_mark,
len(self._instances))
suspended = self._suspended
if not suspended:
self._async_start_instance(wsgi_servr, inst)
def _async_start_instance(self, wsgi_servr, inst):
return _THREAD_POOL.submit(self._start_instance, wsgi_servr, inst)
def _start_instance(self, wsgi_servr, inst):
try:
if not inst.start():
return
except:
logging.exception('Internal error while starting instance.')
raise
logging.debug('Started instance: %s at http://%s:%s', inst, self.host,
wsgi_servr.port)
logging.info('New instance for module "%s" serving on:\nhttp://%s\n',
self.name, self.balanced_address)
try:
environ = self.build_request_environ(
'GET', '/_ah/start', [], '', '0.1.0.3', wsgi_servr.port,
fake_login=True)
self._handle_request(environ,
start_response_utils.null_start_response,
inst=inst,
request_type=instance.READY_REQUEST)
logging.debug('Sent start request: %s', inst)
with self._condition:
self._condition.notify(self.max_instance_concurrent_requests)
except Exception, e: # pylint: disable=broad-except
logging.exception('Internal error while handling start request: %s', e)
def _choose_instance(self, timeout_time):
"""Returns an Instance to handle a request or None if all are busy."""
with self._condition:
while time.time() < timeout_time:
for inst in self._instances:
if inst.can_accept_requests:
return inst
self._condition.wait(timeout_time - time.time())
return None
def _handle_changes(self, timeout=0):
"""Handle file or configuration changes."""
# Always check for config and file changes because checking also clears
# pending changes.
config_changes = self._module_configuration.check_for_updates()
if application_configuration.HANDLERS_CHANGED in config_changes:
handlers = self._create_url_handlers()
with self._handler_lock:
self._handlers = handlers
file_changes = self._get_file_changes(timeout)
if file_changes:
logging.info(
'[%s] Detected file changes:\n %s', self.name,
'\n '.join(sorted(file_changes)))
self._instance_factory.files_changed()
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES:
self._instance_factory.configuration_changed(config_changes)
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES or file_changes:
with self._instances_change_lock:
if not self._suspended:
self.restart()
def _loop_watching_for_changes(self):
"""Loops until the InstancePool is done watching for file changes."""
while not self._quit_event.is_set():
if self.ready:
if self._automatic_restarts:
self._handle_changes(_CHANGE_POLLING_MS)
else:
time.sleep(_CHANGE_POLLING_MS/1000.0)
def get_num_instances(self):
with self._instances_change_lock:
with self._condition:
return len(self._instances)
def set_num_instances(self, instances):
if self._max_instances is not None:
instances = min(instances, self._max_instances)
with self._instances_change_lock:
with self._condition:
running_instances = self.get_num_instances()
if running_instances > instances:
wsgi_servers_to_quit = self._wsgi_servers[instances:]
del self._wsgi_servers[instances:]
instances_to_quit = self._instances[instances:]
del self._instances[instances:]
if running_instances < instances:
for _ in xrange(instances - running_instances):
self._add_instance()
if running_instances > instances:
for inst, wsgi_servr in zip(instances_to_quit, wsgi_servers_to_quit):
self._async_quit_instance(inst, wsgi_servr)
def _async_quit_instance(self, inst, wsgi_servr):
return _THREAD_POOL.submit(self._quit_instance, inst, wsgi_servr)
def _quit_instance(self, inst, wsgi_servr):
port = wsgi_servr.port
wsgi_servr.quit()
inst.quit(expect_shutdown=True)
self._shutdown_instance(inst, port)
def suspend(self):
"""Suspends serving for this module, quitting all running instances."""
with self._instances_change_lock:
if self._suspended:
raise request_info.VersionAlreadyStoppedError()
self._suspended = True
with self._condition:
instances_to_stop = zip(self._instances, self._wsgi_servers)
for wsgi_servr in self._wsgi_servers:
wsgi_servr.set_error(404)
for inst, wsgi_servr in instances_to_stop:
self._async_suspend_instance(inst, wsgi_servr.port)
def _async_suspend_instance(self, inst, port):
return _THREAD_POOL.submit(self._suspend_instance, inst, port)
def _suspend_instance(self, inst, port):
inst.quit(expect_shutdown=True)
self._shutdown_instance(inst, port)
def resume(self):
"""Resumes serving for this module."""
with self._instances_change_lock:
if not self._suspended:
raise request_info.VersionAlreadyStartedError()
self._suspended = False
with self._condition:
if self._quit_event.is_set():
return
wsgi_servers = self._wsgi_servers
instances_to_start = []
for instance_id, wsgi_servr in enumerate(wsgi_servers):
inst = self._instance_factory.new_instance(instance_id,
expect_ready_request=True)
wsgi_servr.set_app(functools.partial(self._handle_request, inst=inst))
self._port_registry.add(wsgi_servr.port, self, inst)
with self._condition:
if self._quit_event.is_set():
return
self._instances[instance_id] = inst
instances_to_start.append((wsgi_servr, inst))
for wsgi_servr, inst in instances_to_start:
self._async_start_instance(wsgi_servr, inst)
def restart(self):
"""Restarts the module, replacing all running instances."""
with self._instances_change_lock:
with self._condition:
if self._quit_event.is_set():
return
instances_to_stop = self._instances[:]
wsgi_servers = self._wsgi_servers[:]
instances_to_start = []
for instance_id, wsgi_servr in enumerate(wsgi_servers):
inst = self._instance_factory.new_instance(instance_id,
expect_ready_request=True)
wsgi_servr.set_app(functools.partial(self._handle_request, inst=inst))
self._port_registry.add(wsgi_servr.port, self, inst)
instances_to_start.append(inst)
with self._condition:
if self._quit_event.is_set():
return
self._instances[:] = instances_to_start
# Just force instances to stop for a faster restart.
for inst in instances_to_stop:
inst.quit(force=True)
start_futures = [
self._async_start_instance(wsgi_servr, inst)
for wsgi_servr, inst in zip(wsgi_servers, instances_to_start)]
logging.info('Waiting for instances to restart')
_, not_done = futures.wait(start_futures, timeout=_SHUTDOWN_TIMEOUT)
if not_done:
logging.warning('All instances may not have restarted')
else:
logging.info('Instances restarted')
def _restart_instance(self, inst):
"""Restarts the specified instance."""
with self._instances_change_lock:
# Quit the old instance.
inst.quit(force=True)
# Create the new instance.
new_instance = self._instance_factory.new_instance(inst.instance_id)
wsgi_servr = self._wsgi_servers[inst.instance_id]
wsgi_servr.set_app(
functools.partial(self._handle_request, inst=new_instance))
self._port_registry.add(wsgi_servr.port, self, new_instance)
# Start the new instance.
self._start_instance(wsgi_servr, new_instance)
# Replace it in the module registry.
with self._condition:
self._instances[new_instance.instance_id] = new_instance
def get_instance(self, instance_id):
"""Returns the instance with the provided instance ID."""
try:
with self._condition:
return self._instances[int(instance_id)]
except (ValueError, IndexError):
raise request_info.InvalidInstanceIdError()
def __call__(self, environ, start_response, inst=None):
return self._handle_request(environ, start_response, inst)
@property
def supports_individually_addressable_instances(self):
return True
class ExternalModule(Module):
"""A module with a single instance that is run externally on a given port."""
# TODO: reduce code duplication between the various Module classes.
def __init__(self, **kwargs):
"""Initializer for ManualScalingModule.
Args:
**kwargs: Arguments to forward to Module.__init__.
"""
super(ExternalModule, self).__init__(**kwargs)
self._instance = None # Protected by self._condition.
self._wsgi_server = None # Protected by self._condition.
# Whether the module has been stopped. Protected by self._condition.
self._suspended = False
self._condition = threading.Condition() # Protects instance state.
# Serializes operations that modify the serving state of the instance.
self._instance_change_lock = threading.RLock()
self._change_watcher_thread = threading.Thread(
target=self._loop_watching_for_changes, name='Change Watcher')
# Override this method from the parent class
def _create_instance_factory(self, module_configuration):
return _ExternalInstanceFactory(
request_data=self._request_data,
module_configuration=module_configuration)
def start(self):
"""Start background management of the Module."""
self._balanced_module.start()
self._port_registry.add(self.balanced_port, self, None)
if self._ssl_port:
self._port_registry.add(self._ssl_port, self, None)
if self._watcher:
self._watcher.start()
self._change_watcher_thread.start()
with self._instance_change_lock:
self._add_instance()
self.report_start_metrics()
def quit(self):
"""Stops the Module."""
self._quit_event.set()
# The instance adjustment thread depends on the balanced module and the
# watcher so wait for it exit before quitting them.
if self._watcher:
self._watcher.quit()
self._change_watcher_thread.join()
self._balanced_module.quit()
self._wsgi_server.quit()
self.report_quit_metrics(1)
def get_instance_port(self, instance_id):
"""Returns the port of the HTTP server for an instance."""
if instance_id != 0:
raise request_info.InvalidInstanceIdError()
return self._wsgi_server.port
@property
def instances(self):
"""A set of all the instances currently in the Module."""
return {self._instance}
def _handle_instance_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst,
request_type):
"""Handles a request routed a particular Instance.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
start_time = time.time()
timeout_time = start_time + self._get_wait_time()
try:
while time.time() < timeout_time:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
try:
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
pass
inst.wait(timeout_time)
if inst.has_quit:
return self._error_response(environ, start_response, 503)
return self._error_response(environ, start_response, 503)
finally:
with self._condition:
self._condition.notify()
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst=None,
request_type=instance.NORMAL_REQUEST):
"""Handles a HTTP request that has matched a script handler.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to. If None then an
appropriate instance.Instance will be chosen.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if ((request_type in (instance.NORMAL_REQUEST, instance.READY_REQUEST) and
self._suspended) or self._quit_event.is_set()):
return self._error_response(environ, start_response, 404)
environ['BACKEND_ID'] = (
self._module_configuration.module_name
if self._module_configuration.is_backend
else self._module_configuration.version_id.split('.', 1)[0])
return self._handle_instance_request(
environ, start_response, url_map, match, request_id,
inst or self._instance, request_type)
def _add_instance(self):
"""Creates and adds a new instance.Instance to the Module.
This must be called with _instances_change_lock held.
"""
inst = self._instance_factory.new_instance(0)
wsgi_servr = wsgi_server.WsgiServer(
(self._host, 0), functools.partial(self._handle_request, inst=inst))
wsgi_servr.start()
self._port_registry.add(wsgi_servr.port, self, inst)
with self._condition:
if self._quit_event.is_set():
return
self._wsgi_server = wsgi_servr
self._instance = inst
suspended = self._suspended
if not suspended:
self._async_start_instance(wsgi_servr, inst)
def _async_start_instance(self, wsgi_servr, inst):
return _THREAD_POOL.submit(self._start_instance, wsgi_servr, inst)
def _start_instance(self, wsgi_servr, inst):
try:
if not inst.start():
return
except:
logging.exception('Internal error while starting instance.')
raise
logging.debug('Started instance: %s at http://%s:%s', inst, self.host,
wsgi_servr.port)
logging.info('New instance for module "%s" serving on:\nhttp://%s\n',
self.name, self.balanced_address)
def _handle_changes(self, timeout=0):
"""Handle file or configuration changes."""
# Always check for config changes because checking also clears
# pending changes.
config_changes = self._module_configuration.check_for_updates()
if application_configuration.HANDLERS_CHANGED in config_changes:
handlers = self._create_url_handlers()
with self._handler_lock:
self._handlers = handlers
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES:
self._instance_factory.configuration_changed(config_changes)
with self._instances_change_lock:
if not self._suspended:
self.restart()
def _loop_watching_for_changes(self):
"""Loops until the InstancePool is done watching for file changes."""
while not self._quit_event.is_set():
if self.ready:
if self._automatic_restarts:
self._handle_changes(_CHANGE_POLLING_MS)
else:
time.sleep(_CHANGE_POLLING_MS/1000.0)
def get_num_instances(self):
return 1
def set_num_instances(self, instances):
pass
def _async_quit_instance(self, inst, wsgi_servr):
return _THREAD_POOL.submit(self._quit_instance, inst, wsgi_servr)
def _quit_instance(self, inst, wsgi_servr):
port = wsgi_servr.port
wsgi_servr.quit()
inst.quit(expect_shutdown=True)
self._shutdown_instance(inst, port)
def suspend(self):
"""Suspends serving for this module, quitting all running instances."""
with self._instance_change_lock:
if self._suspended:
raise request_info.VersionAlreadyStoppedError()
self._suspended = True
with self._condition:
self._wsgi_server.set_error(404)
return _THREAD_POOL.submit(
self._suspend_instance, self._instance, self._wsgi_server.port)
def _suspend_instance(self, inst, port):
inst.quit(expect_shutdown=True)
self._shutdown_instance(inst, port)
def resume(self):
"""Resumes serving for this module."""
with self._instance_change_lock:
if not self._suspended:
raise request_info.VersionAlreadyStartedError()
self._suspended = False
with self._condition:
if self._quit_event.is_set():
return
inst = self._instance_factory.new_instance(0, expect_ready_request=True)
self._instance = inst
self._wsgi_server.set_app(
functools.partial(self._handle_request, inst=inst))
self._port_registry.add(self._wsgi_server.port, self, inst)
self._async_start_instance(self._wsgi_server, inst)
def restart(self):
"""Restarts the module, replacing all running instances."""
with self._instance_change_lock:
with self._condition:
if self._quit_event.is_set():
return
inst = self._instance_factory.new_instance(0, expect_ready_request=True)
self._wsgi_server.set_app(
functools.partial(self._handle_request, inst=inst))
self._port_registry.add(self._wsgi_server.port, self, inst)
self._instance = inst
# Just force instance to stop for a faster restart.
inst.quit(force=True)
logging.info('Waiting for instances to restart')
self._start_instance(self._wsgi_server, inst)
logging.info('Instances restarted')
def get_instance(self, instance_id):
"""Returns the instance with the provided instance ID."""
if instance_id == 0:
return self._instance
raise request_info.InvalidInstanceIdError()
def __call__(self, environ, start_response, inst=None):
return self._handle_request(environ, start_response, inst)
@property
def supports_individually_addressable_instances(self):
return True
class _ExternalInstanceFactory(instance.InstanceFactory):
"""Factory for instances that are started externally rather than by us."""
_MAX_CONCURRENT_REQUESTS = 20
# TODO: reconsider this
START_URL_MAP = appinfo.URLMap(
url='/_ah/start',
script='ignored',
login='admin')
WARMUP_URL_MAP = appinfo.URLMap(
url='/_ah/warmup',
script='ignored',
login='admin')
def __init__(self, request_data, module_configuration):
super(_ExternalInstanceFactory, self).__init__(
request_data, self._MAX_CONCURRENT_REQUESTS)
self._module_configuration = module_configuration
def new_instance(self, instance_id, expect_ready_request=False):
assert instance_id == 0
proxy = _ExternalRuntimeProxy(self._module_configuration)
return instance.Instance(self.request_data,
instance_id,
proxy,
self.max_concurrent_requests,
self.max_background_threads,
expect_ready_request)
class _ExternalRuntimeProxy(instance.RuntimeProxy):
def __init__(self, module_configuration):
super(_ExternalRuntimeProxy, self).__init__()
self._module_configuration = module_configuration
def start(self):
self._proxy = http_proxy.HttpProxy(
host='localhost', port=self._module_configuration.external_port,
instance_died_unexpectedly=lambda: False,
instance_logs_getter=lambda: '',
error_handler_file=application_configuration.get_app_error_file(
self._module_configuration),
prior_error=None)
self.handle = self._proxy.handle
class BasicScalingModule(Module):
"""A pool of instances that is basic-scaled."""
_DEFAULT_BASIC_SCALING = appinfo.BasicScaling(max_instances='1',
idle_timeout='15m')
@staticmethod
def _parse_idle_timeout(timing):
"""Parse a idle timeout string into an int of the value in seconds.
Args:
timing: A str of the form 1m or 10s.
Returns:
An int representation of the value in seconds.
"""
if timing.endswith('m'):
return int(timing[:-1]) * 60
else:
return int(timing[:-1])
@classmethod
def _populate_default_basic_scaling(cls, basic_scaling_config):
for attribute in basic_scaling_config.ATTRIBUTES:
if getattr(basic_scaling_config, attribute) in ('basic', None):
setattr(basic_scaling_config, attribute,
getattr(cls._DEFAULT_BASIC_SCALING, attribute))
def _process_basic_scaling(self, basic_scaling_config):
"""Configure _max_instances and _instance_idle_timeout."""
if basic_scaling_config:
self._populate_default_basic_scaling(basic_scaling_config)
else:
basic_scaling_config = self._DEFAULT_BASIC_SCALING
if self._max_instances is not None:
self._max_instances = min(self._max_instances,
int(basic_scaling_config.max_instances))
else:
self._max_instances = int(basic_scaling_config.max_instances)
self._instance_idle_timeout = self._parse_idle_timeout(
basic_scaling_config.idle_timeout)
def __init__(self, **kwargs):
"""Initializer for BasicScalingModule.
Args:
**kwargs: Arguments to forward to Module.__init__.
"""
super(BasicScalingModule, self).__init__(**kwargs)
self._process_basic_scaling(self._module_configuration.basic_scaling_config)
self._instances = [] # Protected by self._condition.
self._wsgi_servers = [] # Protected by self._condition.
# A list of booleans signifying whether the corresponding instance in
# self._instances has been or is being started.
self._instance_running = [] # Protected by self._condition.
self._instance_high_water_mark = 0 # Protected by self._condition
for instance_id in xrange(self._max_instances):
inst = self._instance_factory.new_instance(instance_id,
expect_ready_request=True)
self._instances.append(inst)
self._wsgi_servers.append(wsgi_server.WsgiServer(
(self._host, 0), functools.partial(self._handle_request, inst=inst)))
self._instance_running.append(False)
self._condition = threading.Condition() # Protects instance state.
self._change_watcher_thread = threading.Thread(
target=self._loop_watching_for_changes_and_idle_instances,
name='Change Watcher')
def start(self):
"""Start background management of the Module."""
self._balanced_module.start()
self._port_registry.add(self.balanced_port, self, None)
if self._ssl_port:
self._port_registry.add(self._ssl_port, self, None)
if self._watcher:
self._watcher.start()
self._change_watcher_thread.start()
for wsgi_servr, inst in zip(self._wsgi_servers, self._instances):
wsgi_servr.start()
self._port_registry.add(wsgi_servr.port, self, inst)
self.report_start_metrics()
def quit(self):
"""Stops the Module."""
self._quit_event.set()
self._change_watcher_thread.join()
# The instance adjustment thread depends on the balanced module and the
# watcher so wait for it exit before quitting them.
if self._watcher:
self._watcher.quit()
self._balanced_module.quit()
for wsgi_servr in self._wsgi_servers:
wsgi_servr.quit()
with self._condition:
instances = self._instances
high_water = self._instance_high_water_mark
self._instances = []
self._condition.notify_all()
self.report_quit_metrics(high_water)
for inst in instances:
inst.quit(force=True)
def get_instance_port(self, instance_id):
"""Returns the port of the HTTP server for an instance."""
try:
instance_id = int(instance_id)
except ValueError:
raise request_info.InvalidInstanceIdError()
with self._condition:
if 0 <= instance_id < len(self._instances):
wsgi_servr = self._wsgi_servers[instance_id]
else:
raise request_info.InvalidInstanceIdError()
return wsgi_servr.port
@property
def instances(self):
"""A set of all the instances currently in the Module."""
with self._condition:
return set(self._instances)
def _handle_instance_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst,
request_type):
"""Handles a request routed a particular Instance.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
instance_id = inst.instance_id
start_time = time.time()
timeout_time = start_time + self._get_wait_time()
try:
while time.time() < timeout_time:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
try:
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
pass
if inst.has_quit:
return self._error_response(environ, start_response, 503)
with self._condition:
if self._instance_running[instance_id]:
should_start = False
else:
self._instance_running[instance_id] = True
should_start = True
self._instance_high_water_mark = max(
self._instance_high_water_mark,
sum(self._instance_running))
if should_start:
self._start_instance(instance_id)
else:
inst.wait(timeout_time)
else:
return self._error_response(environ, start_response, 503)
finally:
with self._condition:
self._condition.notify()
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst=None,
request_type=instance.NORMAL_REQUEST):
"""Handles a HTTP request that has matched a script handler.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to. If None then an
appropriate instance.Instance will be chosen.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if self._quit_event.is_set():
return self._error_response(environ, start_response, 404)
if self._module_configuration.is_backend:
environ['BACKEND_ID'] = self._module_configuration.module_name
else:
environ['BACKEND_ID'] = (
self._module_configuration.version_id.split('.', 1)[0])
if inst is not None:
return self._handle_instance_request(
environ, start_response, url_map, match, request_id, inst,
request_type)
start_time = time.time()
timeout_time = start_time + self._get_wait_time()
while time.time() < timeout_time:
if self._quit_event.is_set():
return self._error_response(environ, start_response, 404)
inst = self._choose_instance(timeout_time)
if inst:
try:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
continue
finally:
with self._condition:
self._condition.notify()
else:
return self._error_response(environ, start_response, 503, _TIMEOUT_HTML)
def _start_any_instance(self):
"""Choose an inactive instance and start it asynchronously.
Returns:
An instance.Instance that will be started asynchronously or None if all
instances are already running.
"""
with self._condition:
for instance_id, running in enumerate(self._instance_running):
if not running:
self._instance_running[instance_id] = True
inst = self._instances[instance_id]
self._instance_high_water_mark = max(
self._instance_high_water_mark,
sum(self._instance_running))
break
else:
return None
self._async_start_instance(instance_id)
return inst
def _async_start_instance(self, instance_id):
return _THREAD_POOL.submit(self._start_instance, instance_id)
def _start_instance(self, instance_id):
with self._condition:
if self._quit_event.is_set():
return
wsgi_servr = self._wsgi_servers[instance_id]
inst = self._instances[instance_id]
if inst.start():
logging.debug('Started instance: %s at http://%s:%s', inst, self.host,
wsgi_servr.port)
try:
environ = self.build_request_environ(
'GET', '/_ah/start', [], '', '0.1.0.3', wsgi_servr.port,
fake_login=True)
self._handle_request(environ,
start_response_utils.null_start_response,
inst=inst,
request_type=instance.READY_REQUEST)
logging.debug('Sent start request: %s', inst)
with self._condition:
self._condition.notify(self.max_instance_concurrent_requests)
except:
logging.exception('Internal error while handling start request.')
def _choose_instance(self, timeout_time):
"""Returns an Instance to handle a request or None if all are busy."""
with self._condition:
while time.time() < timeout_time and not self._quit_event.is_set():
for inst in self._instances:
if inst.can_accept_requests:
return inst
else:
inst = self._start_any_instance()
if inst:
break
self._condition.wait(timeout_time - time.time())
else:
return None
if inst:
inst.wait(timeout_time)
return inst
def _handle_changes(self, timeout=0):
"""Handle file or configuration changes."""
# Always check for config and file changes because checking also clears
# pending changes.
config_changes = self._module_configuration.check_for_updates()
if application_configuration.HANDLERS_CHANGED in config_changes:
handlers = self._create_url_handlers()
with self._handler_lock:
self._handlers = handlers
file_changes = self._get_file_changes(timeout)
if file_changes:
self._instance_factory.files_changed()
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES:
self._instance_factory.configuration_changed(config_changes)
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES or file_changes:
self.restart()
def _loop_watching_for_changes_and_idle_instances(self):
"""Loops until the InstancePool is done watching for file changes."""
while not self._quit_event.is_set():
if self.ready:
self._shutdown_idle_instances()
if self._automatic_restarts:
self._handle_changes(_CHANGE_POLLING_MS)
else:
time.sleep(_CHANGE_POLLING_MS/1000.0)
def _shutdown_idle_instances(self):
instances_to_stop = []
with self._condition:
for instance_id, inst in enumerate(self._instances):
if (self._instance_running[instance_id] and
inst.idle_seconds > self._instance_idle_timeout):
instances_to_stop.append((self._instances[instance_id],
self._wsgi_servers[instance_id]))
self._instance_running[instance_id] = False
new_instance = self._instance_factory.new_instance(
instance_id, expect_ready_request=True)
self._instances[instance_id] = new_instance
wsgi_servr = self._wsgi_servers[instance_id]
wsgi_servr.set_app(
functools.partial(self._handle_request, inst=new_instance))
self._port_registry.add(wsgi_servr.port, self, new_instance)
for inst, wsgi_servr in instances_to_stop:
logging.debug('Shutting down %r', inst)
self._stop_instance(inst, wsgi_servr)
def _stop_instance(self, inst, wsgi_servr):
inst.quit(expect_shutdown=True)
self._async_shutdown_instance(inst, wsgi_servr.port)
def restart(self):
"""Restarts the module, replacing all running instances."""
instances_to_stop = []
instances_to_start = []
with self._condition:
if self._quit_event.is_set():
return
for instance_id, inst in enumerate(self._instances):
if self._instance_running[instance_id]:
instances_to_stop.append((inst, self._wsgi_servers[instance_id]))
new_instance = self._instance_factory.new_instance(
instance_id, expect_ready_request=True)
self._instances[instance_id] = new_instance
instances_to_start.append(instance_id)
wsgi_servr = self._wsgi_servers[instance_id]
wsgi_servr.set_app(
functools.partial(self._handle_request, inst=new_instance))
self._port_registry.add(wsgi_servr.port, self, new_instance)
for instance_id in instances_to_start:
self._async_start_instance(instance_id)
for inst, wsgi_servr in instances_to_stop:
self._stop_instance(inst, wsgi_servr)
def get_instance(self, instance_id):
"""Returns the instance with the provided instance ID."""
try:
with self._condition:
return self._instances[int(instance_id)]
except (ValueError, IndexError):
raise request_info.InvalidInstanceIdError()
def __call__(self, environ, start_response, inst=None):
return self._handle_request(environ, start_response, inst)
@property
def supports_individually_addressable_instances(self):
return True
class InteractiveCommandModule(Module):
"""A Module that can evaluate user commands.
This module manages a single Instance which is started lazily.
"""
_MAX_REQUEST_WAIT_TIME = 15
def __init__(self,
module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
java_config,
go_config,
custom_config,
cloud_sql_config,
vm_config,
default_version_port,
port_registry,
request_data,
dispatcher,
use_mtime_file_watcher,
watcher_ignore_re,
allow_skipped_files,
threadsafe_override):
"""Initializer for InteractiveCommandModule.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for this module.
host: A string containing the host that will be used when constructing
HTTP headers sent to the Instance executing the interactive command
e.g. "localhost".
balanced_port: An int specifying the port that will be used when
constructing HTTP headers sent to the Instance executing the
interactive command e.g. "localhost".
api_host: The host that APIServer listens for RPC requests on.
api_port: The port that APIServer listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_config: A runtime_config_pb2.PhpConfig instances containing PHP
runtime-specific configuration. If None then defaults are used.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are used.
java_config: A runtime_config_pb2.JavaConfig instance containing
Java runtime-specific configuration. If None then defaults are used.
go_config: A runtime_config_pb2.GoConfig instances containing Go
runtime-specific configuration. If None then defaults are used.
custom_config: A runtime_config_pb2.CustomConfig instance. If None, or
'custom_entrypoint' is not set, then attempting to instantiate a
custom runtime module will result in an error.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
vm_config: A runtime_config_pb2.VMConfig instance containing
VM runtime-specific configuration. If None all docker-related stuff
is disabled.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Module and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
watcher_ignore_re: A regex that optionally defines a pattern for the file
watcher to ignore.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
threadsafe_override: If not None, ignore the YAML file value of threadsafe
and use this value instead.
"""
super(InteractiveCommandModule, self).__init__(
module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
java_config,
go_config,
custom_config,
cloud_sql_config,
vm_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances=1,
use_mtime_file_watcher=use_mtime_file_watcher,
watcher_ignore_re=watcher_ignore_re,
automatic_restarts=True,
allow_skipped_files=allow_skipped_files,
threadsafe_override=threadsafe_override)
# Use a single instance so that state is consistent across requests.
self._inst_lock = threading.Lock()
self._inst = None
@property
def balanced_port(self):
"""The port that the balanced HTTP server for the Module is listening on.
The InteractiveCommandModule does not actually listen on this port but it is
used when constructing the "SERVER_PORT" in the WSGI-environment.
"""
return self._balanced_port
def quit(self):
"""Stops the InteractiveCommandModule."""
instances = 0
if self._inst:
instances = 1
self._inst.quit(force=True)
self._inst = None
self.report_quit_metrics(instances)
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst=None,
request_type=instance.INTERACTIVE_REQUEST):
"""Handles a interactive request by forwarding it to the managed Instance.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to.
request_type: The type of the request. See instance.*_REQUEST module
constants. This must be instance.INTERACTIVE_REQUEST.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
assert inst is None
assert request_type == instance.INTERACTIVE_REQUEST
start_time = time.time()
timeout_time = start_time + self._get_wait_time()
while time.time() < timeout_time:
new_instance = False
with self._inst_lock:
if not self._inst:
self._inst = self._instance_factory.new_instance(
AutoScalingModule.generate_instance_id(),
expect_ready_request=False)
new_instance = True
inst = self._inst
if new_instance:
self._inst.start()
try:
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
inst.wait(timeout_time)
except Exception:
# If the instance is restarted while handling a request then the
# exception raises is unpredictable.
if inst != self._inst:
start_response('503 Service Unavailable', [])
return ['Instance was restarted while executing command']
logging.exception('Unexpected exception handling command: %r', environ)
raise
else:
start_response('503 Service Unavailable', [])
return ['The command timed-out while waiting for another one to complete']
def restart(self):
"""Restarts the module."""
with self._inst_lock:
if self._inst:
self._inst.quit(force=True)
self._inst = None
def send_interactive_command(self, command):
"""Sends an interactive command to the module.
Args:
command: The command to send e.g. "print 5+5".
Returns:
A string representing the result of the command e.g. "10\n".
Raises:
InteractiveCommandError: if the command failed for any reason.
"""
start_response = start_response_utils.CapturingStartResponse()
# 192.0.2.0 is an example address defined in RFC 5737.
environ = self.build_request_environ(
'POST', '/', [], command, '192.0.2.0', self.balanced_port)
try:
response = self._handle_request(
environ,
start_response,
request_type=instance.INTERACTIVE_REQUEST)
except Exception as e:
raise InteractiveCommandError('Unexpected command failure: ', str(e))
if start_response.status != '200 OK':
raise InteractiveCommandError(start_response.merged_response(response))
return start_response.merged_response(response)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
mackerel-plugin-aws-elb/aws-elb.go
|
package main
import (
"errors"
"flag"
"github.com/crowdmob/goamz/aws"
"github.com/crowdmob/goamz/cloudwatch"
mp "github.com/mackerelio/go-mackerel-plugin"
"log"
"os"
"time"
)
var graphdef map[string](mp.Graphs) = map[string](mp.Graphs){
"elb.latency": mp.Graphs{
Label: "Whole ELB Latency",
Unit: "float",
Metrics: [](mp.Metrics){
mp.Metrics{Name: "Latency", Label: "Latency"},
},
},
"elb.http_backend": mp.Graphs{
Label: "Whole ELB HTTP Backend Count",
Unit: "integer",
Metrics: [](mp.Metrics){
mp.Metrics{Name: "HTTPCode_Backend_2XX", Label: "2XX", Stacked: true},
mp.Metrics{Name: "HTTPCode_Backend_3XX", Label: "3XX", Stacked: true},
mp.Metrics{Name: "HTTPCode_Backend_4XX", Label: "4XX", Stacked: true},
mp.Metrics{Name: "HTTPCode_Backend_5XX", Label: "5XX", Stacked: true},
},
},
// "elb.healthy_host_count", "elb.unhealthy_host_count" will be generated dynamically
}
type StatType int
const (
Average StatType = iota
Sum
)
func (s StatType) String() string {
switch s {
case Average:
return "Average"
case Sum:
return "Sum"
}
return ""
}
type ELBPlugin struct {
Region string
AccessKeyId string
SecretAccessKey string
AZs []string
CloudWatch *cloudwatch.CloudWatch
}
func (p *ELBPlugin) Prepare() error {
auth, err := aws.GetAuth(p.AccessKeyId, p.SecretAccessKey, "", time.Now())
if err != nil {
return err
}
p.CloudWatch, err = cloudwatch.NewCloudWatch(auth, aws.Regions[p.Region].CloudWatchServicepoint)
if err != nil {
return err
}
ret, err := p.CloudWatch.ListMetrics(&cloudwatch.ListMetricsRequest{
Namespace: "AWS/ELB",
Dimensions: []cloudwatch.Dimension{
cloudwatch.Dimension{
Name: "AvailabilityZone",
},
},
MetricName: "HealthyHostCount",
})
if err != nil {
return err
}
p.AZs = make([]string, 0, len(ret.ListMetricsResult.Metrics))
for _, met := range ret.ListMetricsResult.Metrics {
if len(met.Dimensions) > 1 {
continue
} else if met.Dimensions[0].Name != "AvailabilityZone" {
continue
}
p.AZs = append(p.AZs, met.Dimensions[0].Value)
}
return nil
}
func (p ELBPlugin) GetLastPoint(dimension *cloudwatch.Dimension, metricName string, statType StatType) (float64, error) {
now := time.Now()
response, err := p.CloudWatch.GetMetricStatistics(&cloudwatch.GetMetricStatisticsRequest{
Dimensions: []cloudwatch.Dimension{*dimension},
StartTime: now.Add(time.Duration(120) * time.Second * -1), // 2 min (to fetch at least 1 data-point)
EndTime: now,
MetricName: metricName,
Period: 60,
Statistics: []string{statType.String()},
Namespace: "AWS/ELB",
})
if err != nil {
return 0, err
}
datapoints := response.GetMetricStatisticsResult.Datapoints
if len(datapoints) == 0 {
return 0, errors.New("fetched no datapoints")
}
latest := time.Unix(0, 0)
var latestVal float64
for _, dp := range datapoints {
if dp.Timestamp.Before(latest) {
continue
}
latest = dp.Timestamp
switch statType {
case Average:
latestVal = dp.Average
case Sum:
latestVal = dp.Sum
}
}
return latestVal, nil
}
func (p ELBPlugin) FetchMetrics() (map[string]float64, error) {
stat := make(map[string]float64)
// HostCount per AZ
for _, az := range p.AZs {
d := &cloudwatch.Dimension{
Name: "AvailabilityZone",
Value: az,
}
for _, met := range []string{"HealthyHostCount", "UnHealthyHostCount"} {
v, err := p.GetLastPoint(d, met, Average)
if err == nil {
stat[met+"_"+az] = v
}
}
}
glb := &cloudwatch.Dimension{
Name: "Service",
Value: "ELB",
}
v, err := p.GetLastPoint(glb, "Latency", Average)
if err == nil {
stat["Latency"] = v
}
for _, met := range [...]string{"HTTPCode_Backend_2XX", "HTTPCode_Backend_3XX", "HTTPCode_Backend_4XX", "HTTPCode_Backend_5XX"} {
v, err := p.GetLastPoint(glb, met, Sum)
if err == nil {
stat[met] = v
}
}
return stat, nil
}
func (p ELBPlugin) GraphDefinition() map[string](mp.Graphs) {
for _, grp := range [...]string{"elb.healthy_host_count", "elb.unhealthy_host_count"} {
var name_pre string
var label string
switch grp {
case "elb.healthy_host_count":
name_pre = "HealthyHostCount_"
label = "ELB Healthy Host Count"
case "elb.unhealthy_host_count":
name_pre = "UnHealthyHostCount_"
label = "ELB Unhealthy Host Count"
}
var metrics [](mp.Metrics)
for _, az := range p.AZs {
metrics = append(metrics, mp.Metrics{Name: name_pre + az, Label: az, Stacked: true})
}
graphdef[grp] = mp.Graphs{
Label: label,
Unit: "integer",
Metrics: metrics,
}
}
return graphdef
}
func main() {
optRegion := flag.String("region", "", "AWS Region")
optAccessKeyId := flag.String("access-key-id", "", "AWS Access Key ID")
optSecretAccessKey := flag.String("secret-access-key", "", "AWS Secret Access Key")
optTempfile := flag.String("tempfile", "", "Temp file name")
flag.Parse()
var elb ELBPlugin
if *optRegion == "" {
elb.Region = aws.InstanceRegion()
} else {
elb.Region = *optRegion
}
elb.AccessKeyId = *optAccessKeyId
elb.SecretAccessKey = *optSecretAccessKey
err := elb.Prepare()
if err != nil {
log.Fatalln(err)
}
helper := mp.NewMackerelPlugin(elb)
if *optTempfile != "" {
helper.Tempfile = *optTempfile
} else {
helper.Tempfile = "/tmp/mackerel-plugin-elb"
}
if os.Getenv("MACKEREL_AGENT_PLUGIN_META") != "" {
helper.OutputDefinitions()
} else {
helper.OutputValues()
}
}
|
[
"\"MACKEREL_AGENT_PLUGIN_META\""
] |
[] |
[
"MACKEREL_AGENT_PLUGIN_META"
] |
[]
|
["MACKEREL_AGENT_PLUGIN_META"]
|
go
| 1 | 0 | |
cmd/id.go
|
// Copyright © 2018 NAME HERE <EMAIL ADDRESS>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"github.com/spf13/cobra"
. "gitlab.com/paulsevere/dff/pkg/pipeline"
"gitlab.com/paulsevere/dff/pkg/services"
)
// idCmd represents the id command
var idCmd = &cobra.Command{
Use: "id",
Short: "A brief description of your command",
Long: `A longer description that spans multiple lines and likely contains examples
and usage of using your command. For example:
Cobra is a CLI library for Go that empowers applications.
This application is a tool to generate the needed files
to quickly create a Cobra application.`,
Run: func(cmd *cobra.Command, args []string) {
iArgs := make([]interface{}, len(args))
for n, i := range args {
iArgs[n] = i
}
outPipe := Pipeline(LiftString(services.ByName))(iArgs...)
srvId := outPipe[0].(string)
// srvId := services.ByName(args[0])
println(srvId)
println()
},
}
func init() {
rootCmd.AddCommand(idCmd)
// Here you will define your flags and configuration settings.
// Cobra supports Persistent Flags which will work for this command
// and all subcommands, e.g.:
// idCmd.PersistentFlags().String("foo", "", "A help for foo")
// Cobra supports local flags which will only run when this command
// is called directly, e.g.:
// idCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
jfrog-cli/bintray/cli.go
|
package bintray
import (
"github.com/codegangsta/cli"
"github.com/AngelloMaggio/jfrog-cli-go/jfrog-cli/bintray/commands"
accesskeysdoc "github.com/AngelloMaggio/jfrog-cli-go/jfrog-cli/docs/bintray/accesskeys"
configdocs "github.com/AngelloMaggio/jfrog-cli-go/jfrog-cli/docs/bintray/config"
"github.com/AngelloMaggio/jfrog-cli-go/jfrog-cli/docs/bintray/downloadfile"
"github.com/AngelloMaggio/jfrog-cli-go/jfrog-cli/docs/bintray/downloadver"
entitlementsdocs "github.com/AngelloMaggio/jfrog-cli-go/jfrog-cli/docs/bintray/entitlements"
"github.com/AngelloMaggio/jfrog-cli-go/jfrog-cli/docs/bintray/gpgsignfile"
"github.com/AngelloMaggio/jfrog-cli-go/jfrog-cli/docs/bintray/gpgsignver"
logsdocs "github.com/AngelloMaggio/jfrog-cli-go/jfrog-cli/docs/bintray/logs"
"github.com/AngelloMaggio/jfrog-cli-go/jfrog-cli/docs/bintray/packagecreate"
"github.com/AngelloMaggio/jfrog-cli-go/jfrog-cli/docs/bintray/packagedelete"
"github.com/AngelloMaggio/jfrog-cli-go/jfrog-cli/docs/bintray/packageshow"
"github.com/AngelloMaggio/jfrog-cli-go/jfrog-cli/docs/bintray/packageupdate"
streamdocs "github.com/AngelloMaggio/jfrog-cli-go/jfrog-cli/docs/bintray/stream"
uploaddocs "github.com/AngelloMaggio/jfrog-cli-go/jfrog-cli/docs/bintray/upload"
"github.com/AngelloMaggio/jfrog-cli-go/jfrog-cli/docs/bintray/urlsign"
"github.com/AngelloMaggio/jfrog-cli-go/jfrog-cli/docs/bintray/versioncreate"
"github.com/AngelloMaggio/jfrog-cli-go/jfrog-cli/docs/bintray/versiondelete"
"github.com/AngelloMaggio/jfrog-cli-go/jfrog-cli/docs/bintray/versionpublish"
"github.com/AngelloMaggio/jfrog-cli-go/jfrog-cli/docs/bintray/versionshow"
"github.com/AngelloMaggio/jfrog-cli-go/jfrog-cli/docs/bintray/versionupdate"
"github.com/AngelloMaggio/jfrog-cli-go/jfrog-cli/docs/common"
"github.com/AngelloMaggio/jfrog-cli-go/jfrog-cli/utils/cliutils"
"github.com/AngelloMaggio/jfrog-cli-go/jfrog-cli/utils/config"
"github.com/AngelloMaggio/jfrog-cli-go/jfrog-client/bintray"
"github.com/AngelloMaggio/jfrog-cli-go/jfrog-client/bintray/auth"
"github.com/AngelloMaggio/jfrog-cli-go/jfrog-client/bintray/services"
"github.com/AngelloMaggio/jfrog-cli-go/jfrog-client/bintray/services/accesskeys"
"github.com/AngelloMaggio/jfrog-cli-go/jfrog-client/bintray/services/entitlements"
"github.com/AngelloMaggio/jfrog-cli-go/jfrog-client/bintray/services/packages"
"github.com/AngelloMaggio/jfrog-cli-go/jfrog-client/bintray/services/url"
"github.com/AngelloMaggio/jfrog-cli-go/jfrog-client/bintray/services/utils"
"github.com/AngelloMaggio/jfrog-cli-go/jfrog-client/bintray/services/versions"
clientutils "github.com/AngelloMaggio/jfrog-cli-go/jfrog-client/utils"
"github.com/AngelloMaggio/jfrog-cli-go/jfrog-client/utils/log"
"os"
"strconv"
"strings"
"errors"
)
func GetCommands() []cli.Command {
return []cli.Command{
{
Name: "config",
Flags: getConfigFlags(),
Aliases: []string{"c"},
Usage: configdocs.Description,
HelpName: common.CreateUsage("bt config", configdocs.Description, configdocs.Usage),
ArgsUsage: common.CreateEnvVars(),
Action: func(c *cli.Context) {
configure(c)
},
},
{
Name: "upload",
Flags: getUploadFlags(),
Aliases: []string{"u"},
Usage: uploaddocs.Description,
HelpName: common.CreateUsage("bt upload", uploaddocs.Description, uploaddocs.Usage),
UsageText: uploaddocs.Arguments,
ArgsUsage: common.CreateEnvVars(),
Action: func(c *cli.Context) {
upload(c)
},
},
{
Name: "download-file",
Flags: getDownloadFileFlags(),
Aliases: []string{"dlf"},
Usage: downloadfile.Description,
HelpName: common.CreateUsage("bt download-file", downloadfile.Description, downloadfile.Usage),
UsageText: downloadfile.Arguments,
ArgsUsage: common.CreateEnvVars(),
Action: func(c *cli.Context) {
downloadFile(c)
},
},
{
Name: "download-ver",
Flags: getDownloadVersionFlags(),
Aliases: []string{"dlv"},
Usage: downloadver.Description,
HelpName: common.CreateUsage("bt download-ver", downloadver.Description, downloadver.Usage),
UsageText: downloadver.Arguments,
ArgsUsage: common.CreateEnvVars(),
Action: func(c *cli.Context) {
downloadVersion(c)
},
},
{
Name: "package-show",
Flags: getFlags(),
Aliases: []string{"ps"},
Usage: packageshow.Description,
HelpName: common.CreateUsage("bt package-show", packageshow.Description, packageshow.Usage),
UsageText: packageshow.Arguments,
ArgsUsage: common.CreateEnvVars(),
Action: func(c *cli.Context) {
showPackage(c)
},
},
{
Name: "package-create",
Flags: getCreateAndUpdatePackageFlags(),
Aliases: []string{"pc"},
Usage: packagecreate.Description,
HelpName: common.CreateUsage("bt package-create", packagecreate.Description, packagecreate.Usage),
UsageText: packagecreate.Arguments,
ArgsUsage: common.CreateEnvVars(),
Action: func(c *cli.Context) {
createPackage(c)
},
},
{
Name: "package-update",
Flags: getCreateAndUpdatePackageFlags(),
Aliases: []string{"pu"},
Usage: packageupdate.Description,
HelpName: common.CreateUsage("bt package-update", packageupdate.Description, packageupdate.Usage),
UsageText: packageupdate.Arguments,
ArgsUsage: common.CreateEnvVars(),
Action: func(c *cli.Context) {
updatePackage(c)
},
},
{
Name: "package-delete",
Flags: getDeletePackageAndVersionFlags(),
Aliases: []string{"pd"},
Usage: packagedelete.Description,
HelpName: common.CreateUsage("bt package-delete", packagedelete.Description, packagedelete.Usage),
UsageText: packagedelete.Arguments,
ArgsUsage: common.CreateEnvVars(),
Action: func(c *cli.Context) {
deletePackage(c)
},
},
{
Name: "version-show",
Flags: getFlags(),
Aliases: []string{"vs"},
Usage: versionshow.Description,
HelpName: common.CreateUsage("bt version-show", versionshow.Description, versionshow.Usage),
UsageText: versionshow.Arguments,
ArgsUsage: common.CreateEnvVars(),
Action: func(c *cli.Context) {
showVersion(c)
},
},
{
Name: "version-create",
Flags: getCreateAndUpdateVersionFlags(),
Aliases: []string{"vc"},
Usage: versioncreate.Description,
HelpName: common.CreateUsage("bt version-create", versioncreate.Description, versioncreate.Usage),
UsageText: versioncreate.Arguments,
ArgsUsage: common.CreateEnvVars(),
Action: func(c *cli.Context) {
createVersion(c)
},
},
{
Name: "version-update",
Flags: getCreateAndUpdateVersionFlags(),
Aliases: []string{"vu"},
Usage: versionupdate.Description,
HelpName: common.CreateUsage("bt version-update", versionupdate.Description, versionupdate.Usage),
UsageText: versionupdate.Arguments,
ArgsUsage: common.CreateEnvVars(),
Action: func(c *cli.Context) {
updateVersion(c)
},
},
{
Name: "version-delete",
Flags: getDeletePackageAndVersionFlags(),
Aliases: []string{"vd"},
Usage: versiondelete.Description,
HelpName: common.CreateUsage("bt version-delete", versiondelete.Description, versiondelete.Usage),
UsageText: versiondelete.Arguments,
ArgsUsage: common.CreateEnvVars(),
Action: func(c *cli.Context) {
deleteVersion(c)
},
},
{
Name: "version-publish",
Flags: getFlags(),
Aliases: []string{"vp"},
Usage: versionpublish.Description,
HelpName: common.CreateUsage("bt version-publish", versionpublish.Description, versionpublish.Usage),
UsageText: versionpublish.Arguments,
ArgsUsage: common.CreateEnvVars(),
Action: func(c *cli.Context) {
publishVersion(c)
},
},
{
Name: "entitlements",
Flags: getEntitlementsFlags(),
Aliases: []string{"ent"},
Usage: entitlementsdocs.Description,
HelpName: common.CreateUsage("bt entitlements", entitlementsdocs.Description, entitlementsdocs.Usage),
UsageText: entitlementsdocs.Arguments,
ArgsUsage: common.CreateEnvVars(),
Action: func(c *cli.Context) {
handleEntitlements(c)
},
},
{
Name: "access-keys",
Flags: getAccessKeysFlags(),
Aliases: []string{"acc-keys"},
Usage: accesskeysdoc.Description,
HelpName: common.CreateUsage("bt access-keys", accesskeysdoc.Description, accesskeysdoc.Usage),
UsageText: accesskeysdoc.Arguments,
ArgsUsage: common.CreateEnvVars(),
Action: func(c *cli.Context) {
accessKeys(c)
},
},
{
Name: "url-sign",
Flags: getUrlSigningFlags(),
Aliases: []string{"us"},
Usage: urlsign.Description,
HelpName: common.CreateUsage("bt url-sign", urlsign.Description, urlsign.Usage),
UsageText: urlsign.Arguments,
ArgsUsage: common.CreateEnvVars(),
Action: func(c *cli.Context) {
signUrl(c)
},
},
{
Name: "gpg-sign-file",
Flags: getGpgSigningFlags(),
Aliases: []string{"gsf"},
Usage: gpgsignfile.Description,
HelpName: common.CreateUsage("bt gpg-sign-file", gpgsignfile.Description, gpgsignfile.Usage),
UsageText: gpgsignfile.Arguments,
ArgsUsage: common.CreateEnvVars(),
Action: func(c *cli.Context) {
gpgSignFile(c)
},
},
{
Name: "gpg-sign-ver",
Flags: getGpgSigningFlags(),
Aliases: []string{"gsv"},
Usage: gpgsignver.Description,
HelpName: common.CreateUsage("bt gpg-sign-ver", gpgsignver.Description, gpgsignver.Usage),
UsageText: gpgsignver.Arguments,
ArgsUsage: common.CreateEnvVars(),
Action: func(c *cli.Context) {
gpgSignVersion(c)
},
},
{
Name: "logs",
Flags: getFlags(),
Aliases: []string{"l"},
Usage: logsdocs.Description,
HelpName: common.CreateUsage("bt logs", logsdocs.Description, logsdocs.Usage),
UsageText: logsdocs.Arguments,
ArgsUsage: common.CreateEnvVars(),
Action: func(c *cli.Context) {
logs(c)
},
},
{
Name: "stream",
Flags: getStreamFlags(),
Aliases: []string{"st"},
Usage: streamdocs.Description,
HelpName: common.CreateUsage("bt stream", streamdocs.Description, streamdocs.Usage),
UsageText: streamdocs.Arguments,
ArgsUsage: common.CreateEnvVars(),
Action: func(c *cli.Context) {
stream(c)
},
},
}
}
func getFlags() []cli.Flag {
return []cli.Flag{
cli.StringFlag{
Name: "user",
Value: "",
Usage: "[Optional] Bintray username. If not set, the subject sent as part of the command argument is used for authentication.",
},
cli.StringFlag{
Name: "key",
Value: "",
Usage: "[Mandatory] Bintray API key",
},
}
}
func getStreamFlags() []cli.Flag {
return append(getFlags(), []cli.Flag{
cli.StringFlag{
Name: "include",
Value: "",
Usage: "[Optional] List of events type in the form of \"value1;value2;...\" leave empty to include all.",
},
}...)
}
func getConfigFlags() []cli.Flag {
flags := []cli.Flag{
cli.StringFlag{
Name: "interactive",
Value: "",
Usage: "[Default: true] Set to false if you do not want the config command to be interactive.",
},
}
flags = append(flags, getFlags()...)
return append(flags, cli.StringFlag{
Name: "licenses",
Value: "",
Usage: "[Optional] Default package licenses in the form of Apache-2.0,GPL-3.0...",
})
}
func getPackageFlags(prefix string) []cli.Flag {
return []cli.Flag{
cli.StringFlag{
Name: "licenses",
Value: "",
Usage: "[Mandatory for OSS] Package licenses in the form of Apache-2.0,GPL-3.0...",
},
cli.StringFlag{
Name: "vcs-url",
Value: "",
Usage: "[Mandatory for OSS] Package VCS URL.",
},
cli.StringFlag{
Name: "pub-dn",
Value: "",
Usage: "[Default: false] Public download numbers.",
},
cli.StringFlag{
Name: "pub-stats",
Value: "",
Usage: "[Default: true] Public statistics.",
},
cli.StringFlag{
Name: "desc",
Value: "",
Usage: "[Optional] Package description.",
},
cli.StringFlag{
Name: "labels",
Value: "",
Usage: "[Optional] Package lables in the form of \"lable11\",\"lable2\"...",
},
cli.StringFlag{
Name: "cust-licenses",
Value: "",
Usage: "[Optional] Package custom licenses in the form of \"my-license-1\",\"my-license-2\"...",
},
cli.StringFlag{
Name: "website-url",
Value: "",
Usage: "[Optional] Package web site URL.",
},
cli.StringFlag{
Name: "issuetracker-url",
Value: "",
Usage: "[Optional] Package Issues Tracker URL.",
},
cli.StringFlag{
Name: "github-repo",
Value: "",
Usage: "[Optional] Package Github repository.",
},
cli.StringFlag{
Name: "github-rel-notes",
Value: "",
Usage: "[Optional] Github release notes file.",
},
}
}
func getVersionFlags() []cli.Flag {
return []cli.Flag{
cli.StringFlag{
Name: "github-tag-rel-notes",
Value: "",
Usage: "[Default: false] Set to true if you wish to use a Github tag release notes.",
},
cli.StringFlag{
Name: "desc",
Value: "",
Usage: "[Optional] Version description.",
},
cli.StringFlag{
Name: "released",
Value: "",
Usage: "[Optional] Release date in ISO8601 format (yyyy-MM-dd'T'HH:mm:ss.SSSZ)",
},
cli.StringFlag{
Name: "github-rel-notes",
Value: "",
Usage: "[Optional] Github release notes file.",
},
cli.StringFlag{
Name: "vcs-tag",
Value: "",
Usage: "[Optional] VCS tag.",
},
}
}
func getCreateAndUpdatePackageFlags() []cli.Flag {
return append(getFlags(), getPackageFlags("")...)
}
func getCreateAndUpdateVersionFlags() []cli.Flag {
return append(getFlags(), getVersionFlags()...)
}
func getDeletePackageAndVersionFlags() []cli.Flag {
return append(getFlags(), cli.StringFlag{
Name: "quiet",
Value: "",
Usage: "[Default: false] Set to true to skip the delete confirmation message.",
})
}
func getDownloadFlags() []cli.Flag {
return []cli.Flag{
cli.StringFlag{
Name: "flat",
Value: "",
Usage: "[Default: false] Set to true if you do not wish to have the Bintray path structure created locally for your downloaded files.",
},
cli.StringFlag{
Name: "min-split",
Value: "",
Usage: "[Default: 5120] Minimum file size in KB to split into ranges when downloading. Set to -1 for no splits.",
},
cli.StringFlag{
Name: "split-count",
Value: "",
Usage: "[Default: 3] Number of parts to split a file when downloading. Set to 0 for no splits.",
},
cli.StringFlag{
Name: "unpublished",
Value: "",
Usage: "[Default: false] Download both published and unpublished files.",
},
}
}
func getDownloadFileFlags() []cli.Flag {
return append(getFlags(), getDownloadFlags()...)
}
func getDownloadVersionFlags() []cli.Flag {
flags := append(getFlags(), cli.StringFlag{
Name: "threads",
Value: "",
Usage: "[Default: 3] Number of artifacts to download in parallel.",
})
return append(flags, getDownloadFlags()...)
}
func getUploadFlags() []cli.Flag {
return append(getFlags(), []cli.Flag{
cli.StringFlag{
Name: "recursive",
Value: "",
Usage: "[Default: true] Set to false if you do not wish to collect files in sub-folders to be uploaded to Bintray.",
},
cli.StringFlag{
Name: "flat",
Value: "",
Usage: "[Default: true] If set to false, files are uploaded according to their file system hierarchy.",
},
cli.BoolFlag{
Name: "regexp",
Usage: "[Default: false] Set to true to use a regular expression instead of wildcards expression to collect files to upload.",
},
cli.StringFlag{
Name: "publish",
Value: "",
Usage: "[Default: false] Set to true to publish the uploaded files.",
},
cli.StringFlag{
Name: "override",
Value: "",
Usage: "[Default: false] Set to true to enable overriding existing published files.",
},
cli.StringFlag{
Name: "explode",
Value: "",
Usage: "[Default: false] Set to true to explode archived files after upload.",
},
cli.StringFlag{
Name: "threads",
Value: "",
Usage: "[Default: 3] Number of artifacts to upload in parallel.",
},
cli.BoolFlag{
Name: "dry-run",
Usage: "[Default: false] Set to true to disable communication with Bintray.",
},
cli.StringFlag{
Name: "deb",
Value: "",
Usage: "[Optional] Used for Debian packages in the form of distribution/component/architecture.",
},
}...)
}
func getEntitlementsFlags() []cli.Flag {
return append(getFlags(), []cli.Flag{
cli.StringFlag{
Name: "id",
Usage: "[Optional] Entitlement ID. Used for entitlements update.",
},
cli.StringFlag{
Name: "access",
Usage: "[Optional] Entitlement access. Used for entitlements creation and update.",
},
cli.StringFlag{
Name: "keys",
Usage: "[Optional] Used for entitlements creation and update. List of Access Keys in the form of \"key1\",\"key2\"...",
},
cli.StringFlag{
Name: "path",
Usage: "[Optional] Entitlement path. Used for entitlements creating and update.",
},
}...)
}
func getAccessKeysFlags() []cli.Flag {
return append(getFlags(), []cli.Flag{
cli.StringFlag{
Name: "org",
Usage: "[Optional] Bintray organization",
},
cli.StringFlag{
Name: "password",
Usage: "[Optional] Access Key password.",
},
cli.StringFlag{
Name: "expiry",
Usage: "[Optional] Access Key expiry (required for 'jfrog bt acc-keys show/create/update/delete'",
},
cli.StringFlag{
Name: "ex-check-url",
Usage: "[Optional] You can optionally provide an existence check directive, in the form of a callback URL, to verify whether the source identity of the Access Key still exists.",
},
cli.StringFlag{
Name: "ex-check-cache",
Usage: "[Optional] You can optionally provide the period in seconds for the callback URL results cache.",
},
cli.StringFlag{
Name: "white-cidrs",
Usage: "[Optional] Specifying white CIDRs in the form of 127.0.0.1/22,193.5.0.1/92 will allow access only for those IPs that exist in that address range.",
},
cli.StringFlag{
Name: "black-cidrs",
Usage: "[Optional] Specifying black CIDRs in the form of 127.0.0.1/22,193.5.0.1/92 will block access for all IPs that exist in the specified range.",
},
cli.StringFlag{
Name: "api-only",
Usage: "[Default: true] You can set api_only to false to allow access keys access to Bintray UI as well as to the API.",
},
}...)
}
func getUrlSigningFlags() []cli.Flag {
return append(getFlags(), []cli.Flag{
cli.StringFlag{
Name: "expiry",
Usage: "[Optional] An expiry date for the URL, in Unix epoch time in milliseconds, after which the URL will be invalid. By default, expiry date will be 24 hours.",
},
cli.StringFlag{
Name: "valid-for",
Usage: "[Optional] The number of seconds since generation before the URL expires. Mutually exclusive with the --expiry option.",
},
cli.StringFlag{
Name: "callback-id",
Usage: "[Optional] An applicative identifier for the request. This identifier appears in download logs and is used in email and download webhook notifications.",
},
cli.StringFlag{
Name: "callback-email",
Usage: "[Optional] An email address to send mail to when a user has used the download URL. This requiers a callback_id. The callback-id will be included in the mail message.",
},
cli.StringFlag{
Name: "callback-url",
Usage: "[Optional] A webhook URL to call when a user has used the download URL.",
},
cli.StringFlag{
Name: "callback-method",
Usage: "[Optional] HTTP method to use for making the callback. Will use POST by default. Supported methods are: GET, POST, PUT and HEAD.",
},
}...)
}
func getGpgSigningFlags() []cli.Flag {
return append(getFlags(), cli.StringFlag{
Name: "passphrase",
Usage: "[Optional] GPG passphrase.",
})
}
func configure(c *cli.Context) {
if c.NArg() > 1 {
cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c)
} else if c.NArg() == 1 {
if c.Args().Get(0) == "show" {
commands.ShowConfig()
} else if c.Args().Get(0) == "clear" {
commands.ClearConfig()
} else {
cliutils.ExitOnErr(errors.New("Unknown argument '"+c.Args().Get(0)+"'. Available arguments are 'show' and 'clear'."))
}
} else {
interactive := cliutils.GetBoolFlagValue(c, "interactive", true)
if !interactive {
if c.String("user") == "" || c.String("key") == "" {
cliutils.ExitOnErr(errors.New("The --user and --key options are mandatory when the --interactive option is set to false"))
}
}
bintrayDetails, err := createBintrayDetails(c, false)
cliutils.ExitOnErr(err)
cliBtDetails := &config.BintrayDetails{
User: bintrayDetails.GetUser(),
Key: bintrayDetails.GetKey(),
ApiUrl: bintrayDetails.GetApiUrl(),
DownloadServerUrl: bintrayDetails.GetDownloadServerUrl(),
DefPackageLicense: bintrayDetails.GetDefPackageLicense(),
}
commands.Config(cliBtDetails, nil, interactive)
}
}
func showPackage(c *cli.Context) {
if c.NArg() != 1 {
cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c)
}
packagePath, err := packages.CreatePath(c.Args().Get(0))
cliutils.ExitOnErr(err)
btConfig := newBintrayConfig(c)
err = commands.ShowPackage(btConfig, packagePath)
cliutils.ExitOnErr(err)
}
func showVersion(c *cli.Context) {
if c.NArg() != 1 {
cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c)
}
versionPath, err := versions.CreatePath(c.Args().Get(0))
cliutils.ExitOnErr(err)
btConfig := newBintrayConfig(c)
err = commands.ShowVersion(btConfig, versionPath)
cliutils.ExitOnErr(err)
}
func createPackage(c *cli.Context) {
if c.NArg() != 1 {
cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c)
}
packageParams, err := createPackageParams(c)
cliutils.ExitOnErr(err)
btConfig := newBintrayConfig(c)
err = commands.CreatePackage(btConfig, packageParams)
cliutils.ExitOnErr(err)
}
func createVersion(c *cli.Context) {
if c.NArg() != 1 {
cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c)
}
versionParams, err := createVersionParams(c)
cliutils.ExitOnErr(err)
btConfig := newBintrayConfig(c)
err = commands.CreateVersion(btConfig, versionParams)
cliutils.ExitOnErr(err)
}
func updateVersion(c *cli.Context) {
if c.NArg() != 1 {
cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c)
}
versionParams, err := createVersionParams(c)
cliutils.ExitOnErr(err)
btConfig := newBintrayConfig(c)
err = commands.UpdateVersion(btConfig, versionParams)
cliutils.ExitOnErr(err)
}
func updatePackage(c *cli.Context) {
if c.NArg() != 1 {
cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c)
}
packageParams, err := createPackageParams(c)
cliutils.ExitOnErr(err)
btConfig := newBintrayConfig(c)
err = commands.UpdatePackage(btConfig, packageParams)
cliutils.ExitOnErr(err)
}
func deletePackage(c *cli.Context) {
if c.NArg() != 1 {
cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c)
}
packagePath, err := packages.CreatePath(c.Args().Get(0))
cliutils.ExitOnErr(err)
btConfig := newBintrayConfig(c)
if !c.Bool("quiet") {
confirmed := cliutils.InteractiveConfirm("Delete package " + packagePath.Package + "?")
if !confirmed {
return
}
}
err = commands.DeletePackage(btConfig, packagePath)
cliutils.ExitOnErr(err)
}
func deleteVersion(c *cli.Context) {
if c.NArg() != 1 {
cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c)
}
versionPath, err := versions.CreatePath(c.Args().Get(0))
cliutils.ExitOnErr(err)
btConfig := newBintrayConfig(c)
if !c.Bool("quiet") {
confirmed := cliutils.InteractiveConfirm("Delete version " + versionPath.Version +
" of package " + versionPath.Package + "?")
if !confirmed {
return
}
}
err = commands.DeleteVersion(btConfig, versionPath)
cliutils.ExitOnErr(err)
}
func publishVersion(c *cli.Context) {
if c.NArg() != 1 {
cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c)
}
versionPath, err := versions.CreatePath(c.Args().Get(0))
cliutils.ExitOnErr(err)
btConfig := newBintrayConfig(c)
err = commands.PublishVersion(btConfig, versionPath)
cliutils.ExitOnErr(err)
}
func downloadVersion(c *cli.Context) {
if c.NArg() < 1 || c.NArg() > 2 {
cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c)
}
var err error
params := services.NewDownloadVersionParams()
params.IncludeUnpublished = cliutils.GetBoolFlagValue(c, "unpublished", false)
params.Path, err = services.CreateVersionDetailsForDownloadVersion(c.Args().Get(0))
cliutils.ExitOnErr(err)
params.TargetPath = c.Args().Get(1)
if strings.HasPrefix(params.TargetPath, "/") {
params.TargetPath = params.TargetPath[1:]
}
btConfig := newBintrayConfig(c)
downloaded, failed, err := commands.DownloadVersion(btConfig, params)
err = cliutils.PrintSummaryReport(downloaded, failed, err)
cliutils.ExitOnErr(err)
if failed > 0 {
cliutils.ExitOnErr(errors.New(""))
}
}
func upload(c *cli.Context) {
if c.NArg() < 2 || c.NArg() > 3 {
cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c)
}
params := services.NewUploadParams()
params.Pattern = c.Args().Get(0)
var err error
params.Path, err = versions.CreatePath(c.Args().Get(1))
cliutils.ExitOnErr(err)
params.TargetPath = c.Args().Get(2)
if strings.HasPrefix(params.TargetPath, "/") {
params.TargetPath = params.TargetPath[1:]
}
params.Deb = c.String("deb")
if params.Deb != "" && len(strings.Split(params.Deb, "/")) != 3 {
cliutils.ExitOnErr(errors.New("The --deb option should be in the form of distribution/component/architecture"))
}
params.Recursive = cliutils.GetBoolFlagValue(c, "recursive", true)
params.Flat = cliutils.GetBoolFlagValue(c, "flat", true)
params.Publish = cliutils.GetBoolFlagValue(c, "publish", false)
params.Override = cliutils.GetBoolFlagValue(c, "override", false)
params.Explode = cliutils.GetBoolFlagValue(c, "explode", false)
params.UseRegExp = cliutils.GetBoolFlagValue(c, "regexp", false)
uploadConfig := newBintrayConfig(c)
uploaded, failed, err := commands.Upload(uploadConfig, params)
err = cliutils.PrintSummaryReport(uploaded, failed, err)
cliutils.ExitOnErr(err)
if failed > 0 {
cliutils.ExitOnErr(errors.New(""))
}
}
func downloadFile(c *cli.Context) {
if c.NArg() < 1 || c.NArg() > 2 {
cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c)
}
var err error
params := services.NewDownloadFileParams()
params.Flat = cliutils.GetBoolFlagValue(c, "flat", false)
params.IncludeUnpublished = cliutils.GetBoolFlagValue(c, "unpublished", false)
params.PathDetails, err = utils.CreatePathDetails(c.Args().Get(0))
cliutils.ExitOnErr(err)
params.TargetPath = c.Args().Get(1)
if strings.HasPrefix(params.TargetPath, "/") {
params.TargetPath = params.TargetPath[1:]
}
btConfig := newBintrayConfig(c)
downloaded, failed, err := commands.DownloadFile(btConfig, params)
err = cliutils.PrintSummaryReport(downloaded, failed, err)
cliutils.ExitOnErr(err)
}
func signUrl(c *cli.Context) {
if c.NArg() != 1 {
cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c)
}
btConfig := newBintrayConfig(c)
signUrlParams := createUrlSigningFlags(c)
err := commands.SignVersion(btConfig, signUrlParams)
cliutils.ExitOnErr(err)
}
func gpgSignFile(c *cli.Context) {
if c.NArg() != 1 {
cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c)
}
pathDetails, err := utils.CreatePathDetails(c.Args().Get(0))
cliutils.ExitOnErr(err)
btConfig := newBintrayConfig(c)
err = commands.GpgSignFile(btConfig, pathDetails, c.String("passphrase"))
cliutils.ExitOnErr(err)
}
func logs(c *cli.Context) {
btConfig := newBintrayConfig(c)
if c.NArg() == 1 {
versionPath, err := versions.CreatePath(c.Args().Get(0))
cliutils.ExitOnErr(err)
err = commands.LogsList(btConfig, versionPath)
cliutils.ExitOnErr(err)
} else if c.NArg() == 3 && c.Args().Get(0) == "download" {
versionPath, err := versions.CreatePath(c.Args().Get(1))
cliutils.ExitOnErr(err)
err = commands.DownloadLog(btConfig, versionPath, c.Args().Get(2))
cliutils.ExitOnErr(err)
} else {
cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c)
}
}
func stream(c *cli.Context) {
bintrayDetails, err := createBintrayDetails(c, true)
if err != nil {
cliutils.ExitOnErr(err)
}
if c.NArg() != 1 {
cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c)
}
streamDetails := &commands.StreamDetails{
BintrayDetails: bintrayDetails,
Subject: c.Args().Get(0),
Include: c.String("include"),
}
commands.Stream(streamDetails, os.Stdout)
}
func gpgSignVersion(c *cli.Context) {
if c.NArg() != 1 {
cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c)
}
versionDetails, err := versions.CreatePath(c.Args().Get(0))
cliutils.ExitOnErr(err)
btConfig := newBintrayConfig(c)
err = commands.GpgSignVersion(btConfig, versionDetails, c.String("passphrase"))
cliutils.ExitOnErr(err)
}
func accessKeys(c *cli.Context) {
var err error
org := c.String("org")
btConfig := newBintrayConfig(c)
if c.NArg() == 0 {
err = commands.ShowAllAccessKeys(btConfig, org)
cliutils.ExitOnErr(err)
return
}
if c.NArg() != 2 {
cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c)
}
keyId := c.Args().Get(1)
switch c.Args().Get(0) {
case "show":
err = commands.ShowAccessKey(btConfig, org, keyId)
case "delete":
err = commands.DeleteAccessKey(btConfig, org, keyId)
case "create":
err = commands.CreateAccessKey(btConfig, createAccessKeysParams(c, org, keyId))
case "update":
err = commands.UpdateAccessKey(btConfig, createAccessKeysParams(c, org, keyId))
default:
cliutils.ExitOnErr(errors.New("Expecting show, create, update or delete before the key argument. Got "+c.Args().Get(0)))
}
cliutils.ExitOnErr(err)
}
func handleEntitlements(c *cli.Context) {
if c.NArg() == 0 {
cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c)
}
btConfig := newBintrayConfig(c)
if c.NArg() == 1 {
details, err := entitlements.CreateVersionDetails(c.Args().Get(0))
cliutils.ExitOnErr(err)
err = commands.ShowAllEntitlements(btConfig, details)
cliutils.ExitOnErr(err)
return
}
if c.NArg() != 2 {
cliutils.PrintHelpAndExitWithError("Wrong number of arguments.", c)
}
versionPath, err := entitlements.CreateVersionDetails(c.Args().Get(1))
cliutils.ExitOnErr(err)
switch c.Args().Get(0) {
case "show":
id := c.String("id")
if id == "" {
cliutils.ExitOnErr(errors.New("Please add the --id option"))
}
err = commands.ShowEntitlement(btConfig, id, versionPath)
case "create":
params := createEntitlementFlagsForCreate(c, versionPath)
err = commands.CreateEntitlement(btConfig, params)
case "update":
params := createEntitlementFlagsForUpdate(c, versionPath)
err = commands.UpdateEntitlement(btConfig, params)
case "delete":
id := c.String("id")
if id == "" {
cliutils.ExitOnErr(errors.New("Please add the --id option"))
}
err = commands.DeleteEntitlement(btConfig, id, versionPath)
default:
cliutils.ExitOnErr(errors.New("Expecting show, create, update or delete before "+c.Args().Get(1)+". Got "+c.Args().Get(0)))
}
cliutils.ExitOnErr(err)
}
func createPackageParams(c *cli.Context) (*packages.Params, error) {
var publicDownloadNumbers string
var publicStats string
if c.String("pub-dn") != "" {
publicDownloadNumbers = c.String("pub-dn")
publicDownloadNumbers = strings.ToLower(publicDownloadNumbers)
if publicDownloadNumbers != "true" && publicDownloadNumbers != "false" {
cliutils.ExitOnErr(errors.New("The --pub-dn option should have a boolean value."))
}
}
if c.String("pub-stats") != "" {
publicStats = c.String("pub-stats")
publicStats = strings.ToLower(publicStats)
if publicStats != "true" && publicStats != "false" {
cliutils.ExitOnErr(errors.New("The --pub-stats option should have a boolean value."))
}
}
licenses := c.String("licenses")
if licenses == "" {
confDetails, err := commands.GetConfig()
if err != nil {
return nil, err
}
licenses = confDetails.DefPackageLicense
}
packagePath, err := packages.CreatePath(c.Args().Get(0))
if err != nil {
return nil, err
}
params := packages.NewPackageParams()
params.Path = packagePath
params.Desc = c.String("desc")
params.Labels = c.String("labels")
params.Licenses = licenses
params.CustomLicenses = c.String("cust-licenses")
params.VcsUrl = c.String("vcs-url")
params.WebsiteUrl = c.String("website-url")
params.IssueTrackerUrl = c.String("issuetracker-url")
params.GithubRepo = c.String("github-repo")
params.GithubReleaseNotesFile = c.String("github-rel-notes")
params.PublicDownloadNumbers = publicDownloadNumbers
params.PublicStats = publicStats
return params, nil
}
func newBintrayConfig(c *cli.Context) bintray.Config {
btDetails, err := createBintrayDetails(c, true)
cliutils.ExitOnErr(err)
btConfig := bintray.NewConfigBuilder().
SetBintrayDetails(btDetails).
SetDryRun(c.Bool("dry-run")).
SetThreads(getThreadsOptionValue(c)).
SetMinSplitSize(getMinSplitFlag(c)).
SetSplitCount(getSplitCountFlag(c)).
SetLogger(log.Logger).
Build()
return btConfig
}
func createVersionParams(c *cli.Context) (*versions.Params, error) {
var githubTagReleaseNotes string
if c.String("github-tag-rel-notes") != "" {
githubTagReleaseNotes = c.String("github-tag-rel-notes")
githubTagReleaseNotes = strings.ToLower(githubTagReleaseNotes)
if githubTagReleaseNotes != "true" && githubTagReleaseNotes != "false" {
cliutils.ExitOnErr(errors.New("The --github-tag-rel-notes option should have a boolean value."))
}
}
versionDetails, err := versions.CreatePath(c.Args().Get(0))
if err != nil {
return nil, err
}
params := versions.NewVersionParams()
params.Path = versionDetails
params.Desc = c.String("desc")
params.VcsTag = c.String("vcs-tag")
params.Released = c.String("released")
params.GithubReleaseNotesFile = c.String("github-rel-notes")
params.GithubUseTagReleaseNotes = githubTagReleaseNotes
return params, nil
}
func createUrlSigningFlags(c *cli.Context) *url.Params {
if c.String("valid-for") != "" {
_, err := strconv.ParseInt(c.String("valid-for"), 10, 64)
if err != nil {
cliutils.ExitOnErr(errors.New("The '--valid-for' option should have a numeric value."))
}
}
urlSigningDetails, err := utils.CreatePathDetails(c.Args().Get(0))
cliutils.ExitOnErr(err)
var expiry int64
if c.String("expiry") != "" {
var err error
expiry, err = strconv.ParseInt(c.String("expiry"), 10, 64)
if err != nil {
cliutils.ExitOnErr(errors.New("The --expiry option should have a numeric value."))
}
}
params := url.NewURLParams()
params.PathDetails = urlSigningDetails
params.Expiry = expiry
params.ValidFor = c.Int("valid-for")
params.CallbackId = c.String("callback-id")
params.CallbackEmail = c.String("callback-email")
params.CallbackUrl = c.String("callback-url")
params.CallbackMethod = c.String("callback-method")
return params
}
func getThreadsOptionValue(c *cli.Context) (threads int) {
if c.String("threads") == "" {
threads = 3
} else {
var err error
threads, err = strconv.Atoi(c.String("threads"))
if err != nil || threads < 1 {
cliutils.ExitOnErr(errors.New("The '--threads' option should have a numeric positive value."))
}
}
return
}
func createEntitlementFlagsForCreate(c *cli.Context, path *versions.Path) *entitlements.Params {
if c.String("access") == "" {
cliutils.ExitOnErr(errors.New("Please add the --access option"))
}
params := entitlements.NewEntitlementsParams()
params.VersionPath = path
params.Path = c.String("path")
params.Access = c.String("access")
params.Keys = c.String("keys")
return params
}
func createEntitlementFlagsForUpdate(c *cli.Context, path *versions.Path) *entitlements.Params {
if c.String("id") == "" {
cliutils.ExitOnErr(errors.New("Please add the --id option"))
}
if c.String("access") == "" {
cliutils.ExitOnErr(errors.New("Please add the --access option"))
}
params := entitlements.NewEntitlementsParams()
params.VersionPath = path
params.Id = c.String("id")
params.Path = c.String("path")
params.Access = c.String("access")
params.Keys = c.String("keys")
return params
}
func createAccessKeysParams(c *cli.Context, org, keyId string) *accesskeys.Params {
var cachePeriod int
if c.String("ex-check-cache") != "" {
var err error
cachePeriod, err = strconv.Atoi(c.String("ex-check-cache"))
if err != nil {
cliutils.ExitOnErr(errors.New("The --ex-check-cache option should have a numeric value."))
}
}
var expiry int64
if c.String("expiry") != "" {
var err error
expiry, err = strconv.ParseInt(c.String("expiry"), 10, 64)
if err != nil {
cliutils.ExitOnErr(errors.New("The --expiry option should have a numeric value."))
}
}
params := accesskeys.NewAccessKeysParams()
params.Id = keyId
params.Password = c.String("password")
params.Org = org
params.Expiry = expiry
params.ExistenceCheckUrl = c.String("ex-check-url")
params.ExistenceCheckCache = cachePeriod
params.WhiteCidrs = c.String("white-cidrs")
params.BlackCidrs = c.String("black-cidrs")
params.ApiOnly = cliutils.GetBoolFlagValue(c, "recursive", true)
return params
}
func offerConfig(c *cli.Context) (*config.BintrayDetails, error) {
exists, err := config.IsBintrayConfExists()
if err != nil {
return nil, err
}
if exists {
return nil, nil
}
val, err := cliutils.GetBoolEnvValue("JFROG_CLI_OFFER_CONFIG", true)
if err != nil {
return nil, err
}
if !val {
config.SaveBintrayConf(new(config.BintrayDetails))
return nil, nil
}
msg := "Some CLI commands require the following common options:\n" +
"- User\n" +
"- API Key\n" +
"- Default Package Licenses\n" +
"Configuring JFrog CLI with these parameters now will save you having to include them as command options.\n" +
"You can also configure these parameters later using the 'config' command.\n" +
"Configure now?"
confirmed := cliutils.InteractiveConfirm(msg)
if !confirmed {
config.SaveBintrayConf(new(config.BintrayDetails))
return nil, nil
}
bintrayDetails, err := createBintrayDetails(c, false)
if err != nil {
return nil, err
}
cliBtDetails := &config.BintrayDetails{
ApiUrl: bintrayDetails.GetApiUrl(),
DownloadServerUrl: bintrayDetails.GetDownloadServerUrl(),
User: bintrayDetails.GetUser(),
Key: bintrayDetails.GetKey(),
DefPackageLicense: bintrayDetails.GetDefPackageLicense()}
details, err := commands.Config(nil, cliBtDetails, true)
cliutils.ExitOnErr(err)
details.ApiUrl = bintrayDetails.GetApiUrl()
details.DownloadServerUrl = bintrayDetails.GetDownloadServerUrl()
return details, nil
}
func createBintrayDetails(c *cli.Context, includeConfig bool) (auth.BintrayDetails, error) {
if includeConfig {
bintrayDetails, err := offerConfig(c)
if err != nil {
return nil, err
}
if bintrayDetails != nil {
btDetails := auth.NewBintrayDetails()
btDetails.SetApiUrl(bintrayDetails.ApiUrl)
btDetails.SetDownloadServerUrl(bintrayDetails.DownloadServerUrl)
btDetails.SetUser(bintrayDetails.User)
btDetails.SetKey(bintrayDetails.Key)
btDetails.SetDefPackageLicense(bintrayDetails.DefPackageLicense)
return btDetails, nil
}
}
user := c.String("user")
key := c.String("key")
defaultPackageLicenses := c.String("licenses")
if includeConfig && (user == "" || key == "" || defaultPackageLicenses == "") {
confDetails, err := commands.GetConfig()
if err != nil {
return nil, err
}
if user == "" {
user = confDetails.User
}
if key == "" {
key = confDetails.Key
}
if key == "" {
cliutils.ExitOnErr(errors.New("Please set your Bintray API key using the config command or send it as the --key option."))
}
if defaultPackageLicenses == "" {
defaultPackageLicenses = confDetails.DefPackageLicense
}
}
btDetails := auth.NewBintrayDetails()
apiUrl := os.Getenv("JFROG_CLI_BINTRAY_API_URL")
if apiUrl != "" {
apiUrl = clientutils.AddTrailingSlashIfNeeded(apiUrl)
btDetails.SetApiUrl(apiUrl)
}
downloadServerUrl := os.Getenv("JFROG_CLI_BINTRAY_DOWNLOAD_URL")
if downloadServerUrl != "" {
downloadServerUrl = clientutils.AddTrailingSlashIfNeeded(downloadServerUrl)
btDetails.SetDownloadServerUrl(downloadServerUrl)
}
btDetails.SetUser(user)
btDetails.SetKey(key)
btDetails.SetDefPackageLicense(defaultPackageLicenses)
return btDetails, nil
}
func getMinSplitFlag(c *cli.Context) int64 {
if c.String("min-split") == "" {
return 5120
}
minSplit, err := strconv.ParseInt(c.String("min-split"), 10, 64)
if err != nil {
cliutils.PrintHelpAndExitWithError("The '--min-split' option should have a numeric value.", c)
}
return minSplit
}
func getSplitCountFlag(c *cli.Context) int {
if c.String("split-count") == "" {
return 3
}
splitCount, err := strconv.Atoi(c.String("split-count"))
if err != nil {
cliutils.PrintHelpAndExitWithError("The '--split-count' option should have a numeric value.", c)
}
if splitCount > 15 {
cliutils.ExitOnErr(errors.New("The '--split-count' option value is limitted to a maximum of 15."))
}
if splitCount < 0 {
cliutils.ExitOnErr(errors.New("The '--split-count' option cannot have a negative value."))
}
return splitCount
}
|
[
"\"JFROG_CLI_BINTRAY_API_URL\"",
"\"JFROG_CLI_BINTRAY_DOWNLOAD_URL\""
] |
[] |
[
"JFROG_CLI_BINTRAY_API_URL",
"JFROG_CLI_BINTRAY_DOWNLOAD_URL"
] |
[]
|
["JFROG_CLI_BINTRAY_API_URL", "JFROG_CLI_BINTRAY_DOWNLOAD_URL"]
|
go
| 2 | 0 | |
selfdrive/modeld/runners/onnx_runner.py
|
#!/usr/bin/env python3
# TODO: why are the keras models saved with python 2?
from __future__ import print_function
import os
import sys
import numpy as np
import onnxruntime as ort
def read(sz):
dd = []
gt = 0
while gt < sz * 4:
st = os.read(0, sz * 4 - gt)
assert(len(st) > 0)
dd.append(st)
gt += len(st)
return np.frombuffer(b''.join(dd), dtype=np.float32)
def write(d):
os.write(1, d.tobytes())
def run_loop(m):
ishapes = [[1]+ii.shape[1:] for ii in m.get_inputs()]
keys = [x.name for x in m.get_inputs()]
print("ready to run onnx model", keys, ishapes, file=sys.stderr)
while 1:
inputs = []
for shp in ishapes:
ts = np.product(shp)
#print("reshaping %s with offset %d" % (str(shp), offset), file=sys.stderr)
inputs.append(read(ts).reshape(shp))
ret = m.run(None, dict(zip(keys, inputs)))
#print(ret, file=sys.stderr)
for r in ret:
write(r)
if __name__ == "__main__":
print(ort.get_available_providers(), file=sys.stderr)
if 'OpenVINOExecutionProvider' in ort.get_available_providers() and 'ONNXCPU' not in os.environ:
print("OnnxJit is using openvino", file=sys.stderr)
options = ort.SessionOptions()
options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
provider = 'OpenVINOExecutionProvider'
else:
print("OnnxJit is using CPU", file=sys.stderr)
options = ort.SessionOptions()
options.intra_op_num_threads = 4
options.inter_op_num_threads = 8
provider = 'CPUExecutionProvider'
ort_session = ort.InferenceSession(sys.argv[1], options)
ort_session.set_providers([provider], None)
run_loop(ort_session)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
bigquery/cloud-client/authenticate_service_account.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
def main():
key_path = os.environ.get("GOOGLE_APPLICATION_CREDENTIALS")
# [START bigquery_client_json_credentials]
from google.cloud import bigquery
from google.oauth2 import service_account
# TODO(developer): Set key_path to the path to the service account key
# file.
# key_path = "path/to/service_account.json"
credentials = service_account.Credentials.from_service_account_file(
key_path,
scopes=["https://www.googleapis.com/auth/cloud-platform"],
)
client = bigquery.Client(
credentials=credentials,
project=credentials.project_id,
)
# [END bigquery_client_json_credentials]
return client
if __name__ == "__main__":
main()
|
[] |
[] |
[
"GOOGLE_APPLICATION_CREDENTIALS"
] |
[]
|
["GOOGLE_APPLICATION_CREDENTIALS"]
|
python
| 1 | 0 | |
constant.py
|
import os
USERS = eval(os.environ['USERS'])
SERVER_KEY = os.environ['SERVER_KEY']
LOGIN_API = 'https://auth.bupt.edu.cn/authserver/login'
GET_API = 'https://app.bupt.edu.cn/ncov/wap/default/index'
REPORT_API = 'https://app.bupt.edu.cn/ncov/wap/default/save'
# 当今日没有填报时,在https://app.bupt.edu.cn/ncov/wap/default/index下进行填报,
# 全部填完,不要提交,f12打开控制台,在Console页面下输入代码 console.log(vm.info) 就会得到以下信息,之后每天就默认填以下信息
INFO = r"""{
"address":"北京市海淀区北太平庄街道北京邮电大学计算机学院北京邮电大学海淀校区",
"area":"北京市 海淀区",
"bztcyy":"",
"city":"北京市",
"csmjry":"0",
"fjqszgjdq":"",
"geo_api_info":"{\"type\":\"complete\",\"position\":{\"Q\":39.960390625,\"R\":116.356397569445,\"lng\":116.356398,\"lat\":39.960391},\"location_type\":\"html5\",\"message\":\"Get ipLocation failed.Get geolocation success.Convert Success.Get address success.\",\"accuracy\":23,\"isConverted\":true,\"status\":1,\"addressComponent\":{\"citycode\":\"010\",\"adcode\":\"110108\",\"businessAreas\":[{\"name\":\"北下关\",\"id\":\"110108\",\"location\":{\"Q\":39.955976,\"R\":116.33873,\"lng\":116.33873,\"lat\":39.955976}},{\"name\":\"西直门\",\"id\":\"110102\",\"location\":{\"Q\":39.942856,\"R\":116.34666099999998,\"lng\":116.346661,\"lat\":39.942856}},{\"name\":\"小西天\",\"id\":\"110108\",\"location\":{\"Q\":39.957147,\"R\":116.364058,\"lng\":116.364058,\"lat\":39.957147}}],\"neighborhoodType\":\"科教文化服务;学校;高等院校\",\"neighborhood\":\"北京邮电大学\",\"building\":\"北京邮电大学计算机学院\",\"buildingType\":\"科教文化服务;学校;高等院校\",\"street\":\"西土城路\",\"streetNumber\":\"10号\",\"country\":\"中国\",\"province\":\"北京市\",\"city\":\"\",\"district\":\"海淀区\",\"township\":\"北太平庄街道\"},\"formattedAddress\":\"北京市海淀区北太平庄街道北京邮电大学计算机学院北京邮电大学海淀校区\",\"roads\":[],\"crosses\":[],\"pois\":[],\"info\":\"SUCCESS\"}",
"glksrq":"",
"gllx":"",
"gtjzzchdfh":"",
"gtjzzfjsj":"",
"ismoved":"0",
"jcbhlx":"",
"jcbhrq":"",
"jchbryfs":"",
"jcjgqr":"0",
"jcwhryfs":"",
"jhfjhbcc":"",
"jhfjjtgj":"",
"jhfjrq":"",
"mjry":"0",
"province":"北京市",
"qksm":"",
"remark":"",
"sfcxtz":"0",
"sfcxzysx":"0",
"sfcyglq":"0",
"sfjcbh":"0",
"sfjchbry":"0",
"sfjcwhry":"0",
"sfjzdezxgym":"1",
"sfjzxgym":"1",
"sfsfbh":"0",
"sftjhb":"0",
"sftjwh":"0",
"sfxk":"0",
"sfygtjzzfj":"",
"sfyyjc":"0",
"sfzx":1,
"szcs":"",
"szgj":"",
"szsqsfybl":"0",
"tw":"2",
"xjzd":"",
"xkqq":"",
"xwxgymjzqk":"3",
"ymjzxgqk":"已接种",
"zgfxdq":"0"
}"""
REASONABLE_LENGTH = 24
TIMEOUT_SECOND = 25
class HEADERS:
REFERER_LOGIN_API = 'https://app.bupt.edu.cn/uc/wap/login'
REFERER_POST_API = 'https://app.bupt.edu.cn/ncov/wap/default/index'
ORIGIN_BUPTAPP = 'https://app.bupt.edu.cn'
UA = ('Mozilla/5.0 (iPhone; CPU iPhone OS 13_3_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) '
'Mobile/15E148 MicroMessenger/7.0.11(0x17000b21) NetType/4G Language/zh_CN')
ACCEPT_JSON = 'application/json'
ACCEPT_HTML = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
REQUEST_WITH_XHR = 'XMLHttpRequest'
ACCEPT_LANG = 'zh-cn'
CONTENT_TYPE_UTF8 = 'application/x-www-form-urlencoded; charset=UTF-8'
def __init__(self) -> None:
raise NotImplementedError
COMMON_HEADERS = {
'User-Agent': HEADERS.UA,
'Accept-Language': HEADERS.ACCEPT_LANG,
}
COMMON_POST_HEADERS = {
'Accept': HEADERS.ACCEPT_JSON,
'Origin': HEADERS.ORIGIN_BUPTAPP,
'X-Requested-With': HEADERS.REQUEST_WITH_XHR,
'Content-Type': HEADERS.CONTENT_TYPE_UTF8,
}
from typing import Optional
from abc import ABCMeta, abstractmethod
class INotifier(metaclass=ABCMeta):
@property
@abstractmethod
def PLATFORM_NAME(self) -> str:
"""
将 PLATFORM_NAME 设为类的 Class Variable,内容是通知平台的名字(用于打日志)。
如:PLATFORM_NAME = 'Telegram 机器人'
:return: 通知平台名
"""
@abstractmethod
def notify(self, *, success, msg, data,username, name) -> None:
"""
通过该平台通知用户操作成功的消息。失败时将抛出各种异常。
:param success: 表示是否成功
:param msg: 成功时表示服务器的返回值,失败时表示失败原因;None 表示没有上述内容
:return: None
"""
|
[] |
[] |
[
"USERS",
"SERVER_KEY"
] |
[]
|
["USERS", "SERVER_KEY"]
|
python
| 2 | 0 | |
denoiser-multi-gpu.py
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import argparse
import numpy as np
import tensorflow as tf
import cv2
from scipy.misc import imread
import time
import os, random
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import functools
import itertools
import collections
import six
from tensorflow.python.platform import tf_logging as logging
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import device as pydev
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
from tensorflow.python.training import device_setter
from tensorflow.contrib.learn.python.learn import run_config
import Image
slim = tf.contrib.slim #For depthwise separable strided atrous convolutions
tf.logging.set_verbosity(tf.logging.DEBUG)
filters00 = 32
filters01 = 64
filters1 = 128
filters2 = 256
filters3 = 728
filters4 = 728
filters5 = 1024
filters6 = 1536
filters7 = 2048
numMiddleXception = 8
features0 = 64
features1 = 128
features2 = 256
features3 = 728
features4 = 728
aspp_filters = features4
aspp_output=256
aspp_size=32
aspp_rateSmall = 6
aspp_rateMedium = 12
aspp_rateLarge = 18
num_extra_blocks = 11
fc_features = 4096
#trainDir = "F:/stills_all/train/"
#valDir = "F:/stills_all/val/"
#testDir = "F:/stills_all/test/"
data_dir = "E:/stills_hq-mini/"
modelSavePeriod = 3. #Train timestep in hours
modelSavePeriod *= 3600 #Convert to s
model_dir = "//flexo.ads.warwick.ac.uk/Shared41/Microscopy/Jeffrey-Ede/models/denoiser-multi-gpu-13/"
shuffle_buffer_size = 5000
num_parallel_calls = 8
num_parallel_readers = 6
prefetch_buffer_size = 10
batch_size = 1
num_gpus = 1
def num_examples_per_epoch(subset='train'):
if subset == 'train':
return 65536
elif subset == 'validation':
return 4096
elif subset == 'eval':
return 16384
else:
raise ValueError('Invalid data subset "%s"' % subset)
#batch_size = 8 #Batch size to use during training
num_epochs = 1000000 #Dataset repeats indefinitely
logDir = "C:/dump/train/"
log_file = model_dir+"log.txt"
val_log_file = model_dir+"val_log.txt"
log_every = 1 #Log every _ examples
cumProbs = np.array([]) #Indices of the distribution plus 1 will be correspond to means
numMeans = 64 // batch_size
scaleMean = 4 #Each means array index increment corresponds to this increase in the mean
numDynamicGrad = 1 #Number of gradients to calculate for each possible mean when dynamically updating training
lossSmoothingBoxcarSize = 5
#Dimensions of images in the dataset
height = width = 2048
channels = 1 #Greyscale input image
#Sidelength of images to feed the neural network
cropsize = 512
height_crop = width_crop = cropsize
#hparams = experiment_hparams(train_batch_size=batch_size, eval_batch_size=16)
weight_decay = 5.e-5
initial_learning_rate = 0.001
num_workers = 1
increase_batch_size_by_factor = 1
effective_batch_size = increase_batch_size_by_factor*batch_size
val_skip_n = 10
save_result_every_n_batches = 5000
def _tf_fspecial_gauss(size, sigma):
"""Function to mimic the 'fspecial' gaussian MATLAB function
"""
x_data, y_data = np.mgrid[-size//2 + 1:size//2 + 1, -size//2 + 1:size//2 + 1]
x_data = np.expand_dims(x_data, axis=-1)
x_data = np.expand_dims(x_data, axis=-1)
y_data = np.expand_dims(y_data, axis=-1)
y_data = np.expand_dims(y_data, axis=-1)
x = tf.constant(x_data, dtype=tf.float32)
y = tf.constant(y_data, dtype=tf.float32)
g = tf.exp(-((x**2 + y**2)/(2.0*sigma**2)))
return g / tf.reduce_sum(g)
def tf_ssim(img1, img2, cs_map=False, mean_metric=True, size=11, sigma=1.5):
window = _tf_fspecial_gauss(size, sigma) # window shape [size, size]
K1 = 0.01
K2 = 0.03
L = 1 # depth of image (255 in case the image has a differnt scale)
C1 = (K1*L)**2
C2 = (K2*L)**2
mu1 = tf.nn.conv2d(img1, window, strides=[1,1,1,1], padding='VALID')
mu2 = tf.nn.conv2d(img2, window, strides=[1,1,1,1], padding='VALID')
mu1_sq = mu1*mu1
mu2_sq = mu2*mu2
mu1_mu2 = mu1*mu2
sigma1_sq = tf.nn.conv2d(img1*img1, window, strides=[1,1,1,1],padding='VALID') - mu1_sq
sigma2_sq = tf.nn.conv2d(img2*img2, window, strides=[1,1,1,1],padding='VALID') - mu2_sq
sigma12 = tf.nn.conv2d(img1*img2, window, strides=[1,1,1,1],padding='VALID') - mu1_mu2
if cs_map:
value = (((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*
(sigma1_sq + sigma2_sq + C2)),
(2.0*sigma12 + C2)/(sigma1_sq + sigma2_sq + C2))
else:
value = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*
(sigma1_sq + sigma2_sq + C2))
if mean_metric:
value = tf.reduce_mean(value)
return value
def tf_ms_ssim(img1, img2, mean_metric=True, level=5):
weight = tf.constant([0.0448, 0.2856, 0.3001, 0.2363, 0.1333], dtype=tf.float32)
mssim = []
mcs = []
for l in range(level):
ssim_map, cs_map = tf_ssim(img1, img2, cs_map=True, mean_metric=False)
mssim.append(tf.reduce_mean(ssim_map))
mcs.append(tf.reduce_mean(cs_map))
filtered_im1 = tf.nn.avg_pool(img1, [1,2,2,1], [1,2,2,1], padding='SAME')
filtered_im2 = tf.nn.avg_pool(img2, [1,2,2,1], [1,2,2,1], padding='SAME')
img1 = filtered_im1
img2 = filtered_im2
# list to tensor of dim D+1
mssim = tf.pack(mssim, axis=0)
mcs = tf.pack(mcs, axis=0)
value = (tf.reduce_prod(mcs[0:level-1]**weight[0:level-1])*
(mssim[level-1]**weight[level-1]))
if mean_metric:
value = tf.reduce_mean(value)
return value
def tf_median(v):
v = tf.reshape(v, [-1])
m = v.get_shape()[0]//2
return tf.nn.top_k(v, m).values[m-1]
##Modified aligned xception
def architecture(inputs, ground_truth, phase=False, params=None):
"""
Atrous convolutional encoder-decoder noise-removing network
phase - True during training
"""
#phase = mode == tf.estimator.ModeKeys.TRAIN #phase is true during training
concat_axis = 3
##Reusable blocks
def _batch_norm_fn(input):
batch_norm = tf.contrib.layers.batch_norm(
input,
center=True, scale=True,
is_training=False,
fused=True,
zero_debias_moving_mean=False,
renorm=False)
return batch_norm
def batch_then_activ(input):
batch_then_activ = _batch_norm_fn(input)
batch_then_activ = tf.nn.relu6(batch_then_activ)
return batch_then_activ
def conv_block_not_sep(input, filters, kernel_size=3, phase=phase):
"""
Convolution -> batch normalisation -> leaky relu
phase defaults to true, meaning that the network is being trained
"""
conv_block = slim.conv2d(
inputs=input,
num_outputs=filters,
kernel_size=kernel_size,
padding="SAME",
activation_fn=None)
conv_block = batch_then_activ(conv_block)
return conv_block
def conv_block(input, filters, phase=phase):
"""
Convolution -> batch normalisation -> leaky relu
phase defaults to true, meaning that the network is being trained
"""
conv_block = strided_conv_block(input, filters, 1, 1)
return conv_block
def strided_conv_block(input, filters, stride, rate=1, phase=phase,
extra_batch_norm=True):
strided_conv = slim.separable_convolution2d(
inputs=input,
num_outputs=filters,
kernel_size=3,
depth_multiplier=1,
stride=stride,
padding='SAME',
data_format='NHWC',
rate=rate,
activation_fn=None,#tf.nn.relu,
normalizer_fn=_batch_norm_fn if extra_batch_norm else False,
normalizer_params=None,
weights_initializer=tf.contrib.layers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=tf.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None)
strided_conv = batch_then_activ(strided_conv)
return strided_conv
def deconv_block(input, filters, phase=phase):
'''Transpositionally convolute a feature space to upsample it'''
deconv_block = slim.conv2d_transpose(
inputs=input,
num_outputs=filters,
kernel_size=3,
stride=2,
padding="same",
activation_fn=None)
deconv_block = batch_then_activ(deconv_block)
return deconv_block
def aspp_block(input, phase=phase):
"""
Atrous spatial pyramid pooling
phase defaults to true, meaning that the network is being trained
"""
##Convolutions at multiple rates
conv1x1 = slim.conv2d(inputs=input,
num_outputs=aspp_filters,
kernel_size=1,
activation_fn=None,
padding="same")
conv1x1 = batch_then_activ(conv1x1)
conv3x3_rateSmall = strided_conv_block(input=input,
filters=aspp_filters,
stride=1,
rate=aspp_rateSmall)
conv3x3_rateSmall = batch_then_activ(conv3x3_rateSmall)
conv3x3_rateMedium = strided_conv_block(input=input,
filters=aspp_filters,
stride=1,
rate=aspp_rateMedium)
conv3x3_rateMedium = batch_then_activ(conv3x3_rateMedium)
conv3x3_rateLarge = strided_conv_block(input=input,
filters=aspp_filters,
stride=1,
rate=aspp_rateLarge)
conv3x3_rateLarge = batch_then_activ(conv3x3_rateLarge)
#Image-level features
pooling = tf.nn.pool(input=input,
window_shape=(2,2),
pooling_type="AVG",
padding="SAME",
strides=(2, 2))
#Use 1x1 convolutions to project into a feature space the same size as
#the atrous convolutions'
#pooling = slim.conv2d(
# inputs=pooling,
# num_outputs=aspp_filters,
# kernel_size=1,
# activation_fn=None,
# padding="SAME")
pooling = tf.image.resize_images(input, [aspp_size, aspp_size])
pooling = batch_then_activ(pooling)
#Concatenate the atrous and image-level pooling features
concatenation = tf.concat(
values=[conv1x1, conv3x3_rateSmall, conv3x3_rateMedium, conv3x3_rateLarge, pooling],
axis=concat_axis)
#Reduce the number of channels
reduced = slim.conv2d(
inputs=concatenation,
num_outputs=aspp_output,
kernel_size=1,
activation_fn=None,
padding="SAME")
reduced = batch_then_activ(reduced)
return reduced
def residual_conv(input, filters):
residual = slim.conv2d(
inputs=input,
num_outputs=filters,
kernel_size=1,
stride=2,
padding="SAME",
activation_fn=None)
residual = batch_then_activ(residual)
return residual
def xception_middle_block(input, features):
main_flow = strided_conv_block(
input=input,
filters=features,
stride=1)
main_flow = strided_conv_block(
input=main_flow,
filters=features,
stride=1)
main_flow = strided_conv_block(
input=main_flow,
filters=features,
stride=1)
return main_flow + input
'''Model building'''
input_layer = tf.reshape(inputs, [-1, cropsize, cropsize, channels])
#Encoding block 0
cnn0 = conv_block(
input=input_layer,
filters=features0)
cnn0_last = conv_block(
input=cnn0,
filters=features0)
cnn0_strided = strided_conv_block(
input=cnn0_last,
filters=features1,
stride=2)
residual0 = residual_conv(input_layer, features1)
cnn0_strided += residual0
#Encoding block 1
cnn1 = conv_block(
input=cnn0_strided,
filters=features1)
cnn1_last = conv_block(
input=cnn1,
filters=features1)
cnn1_strided = strided_conv_block(
input=cnn1_last,
filters=features1,
stride=2)
residual1 = residual_conv(cnn0_strided, features1)
cnn1_strided += residual1
#Encoding block 2
cnn2 = conv_block(
input=cnn1_strided,
filters=features2)
cnn2_last = conv_block(
input=cnn2,
filters=features2)
cnn2_strided = strided_conv_block(
input=cnn2_last,
filters=features2,
stride=2)
residual2 = residual_conv(cnn1_strided, features2)
cnn2_strided += residual2
#Encoding block 3
cnn3 = conv_block(
input=cnn2_strided,
filters=features3)
cnn3_last = conv_block(
input=cnn3,
filters=features3)
cnn3_strided = strided_conv_block(
input=cnn3_last,
filters=features3,
stride=2)
residual3 = residual_conv(cnn2_strided, features3)
cnn3_strided += residual3
#Encoding block 4
cnn4 = conv_block(
input=cnn3_strided,
filters=features4)
cnn4 = conv_block(
input=cnn4,
filters=features4)
cnn4_last = conv_block(
input=cnn4,
filters=features4)
cnn4_last += cnn3_strided
for _ in range(num_extra_blocks):
cnn4_last = xception_middle_block(cnn4_last, features4)
##Atrous spatial pyramid pooling
aspp = aspp_block(cnn4_last)
#Upsample the semantics by a factor of 4
#upsampled_aspp = tf.image.resize_bilinear(
# images=aspp,
# tf.shape(aspp)[1:3],
# align_corners=True)
##Decoding block 1 (deepest)
#deconv4 = conv_block(aspp, features4)
#deconv4 = conv_block(deconv4, features4)
#deconv4 = conv_block(deconv4, features4)
##Decoding block 2
#deconv4to3 = deconv_block(deconv4, features4)
#concat3 = tf.concat(
# values=[deconv4to3, cnn3_last],
# axis=concat_axis)
#deconv3 = conv_block(concat3, features3)
#deconv3 = conv_block(deconv3, features3)
#deconv3 = conv_block(deconv3, features3)
deconv3 = tf.image.resize_images(aspp, [aspp_size*4, aspp_size*4])
#Decoding block 3
concat2 = tf.concat(
values=[deconv3, cnn1_strided],
axis=concat_axis)
deconv2 = conv_block(concat2, features2)
deconv2 = conv_block(deconv2, features2)
residual2_d = conv_block_not_sep(concat2, features2, 1)
deconv2 += residual2_d
deconv2to1 = deconv_block(deconv2, features2)
#Decoding block 4
concat1 = tf.concat(
values=[deconv2to1, cnn0_strided],
axis=concat_axis)
deconv1 = conv_block(concat1, features1)
deconv1 = conv_block(deconv1, features1)
residual1_d = conv_block_not_sep(concat1, features1, 1)
deconv1 += residual1_d
deconv1to0 = deconv_block(deconv1, features1)
#Decoding block 5
#concat0 = tf.concat(
# values=[deconv1to0, cnn0_last],
# axis=concat_axis)
deconv0 = conv_block(deconv1to0, features0)
deconv0 = conv_block(deconv0, features0)
residual0_d = conv_block_not_sep(deconv1to0, features0, 1)
deconv0 += residual0_d
#Create final image with 1x1 convolutions
deconv_final = conv_block_not_sep(deconv0, 1)
#Image values will be between 0 and 1
#output = tf.clip_by_value(
# deconv_final,
# clip_value_min=-0.1,
# clip_value_max=1.1,
# name='clipper')
output = deconv_final
return output
##########################################################################################################
class ExamplesPerSecondHook(session_run_hook.SessionRunHook):
"""Hook to print out examples per second.
Total time is tracked and then divided by the total number of steps
to get the average step time and then batch_size is used to determine
the running average of examples per second. The examples per second for the
most recent interval is also logged.
"""
def __init__(
self,
batch_size,
every_n_steps=100,
every_n_secs=None,):
"""Initializer for ExamplesPerSecondHook.
Args:
batch_size: Total batch size used to calculate examples/second from
global time.
every_n_steps: Log stats every n steps.
every_n_secs: Log stats every n seconds.
"""
if (every_n_steps is None) == (every_n_secs is None):
raise ValueError('exactly one of every_n_steps and every_n_secs should be provided.')
self._timer = basic_session_run_hooks.SecondOrStepTimer(
every_steps=every_n_steps, every_secs=every_n_secs)
self._step_train_time = 0
self._total_steps = 0
self._batch_size = batch_size
def begin(self):
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError(
'Global step should be created to use StepCounterHook.')
def before_run(self, run_context): # pylint: disable=unused-argument
return basic_session_run_hooks.SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
_ = run_context
global_step = run_values.results
if self._timer.should_trigger_for_step(global_step):
elapsed_time, elapsed_steps = self._timer.update_last_triggered_step(
global_step)
if elapsed_time is not None:
steps_per_sec = elapsed_steps / elapsed_time
self._step_train_time += elapsed_time
self._total_steps += elapsed_steps
average_examples_per_sec = self._batch_size * (
self._total_steps / self._step_train_time)
current_examples_per_sec = steps_per_sec * self._batch_size
# Average examples/sec followed by current examples/sec
logging.info('%s: %g (%g), step = %g', 'Average examples/sec',
average_examples_per_sec, current_examples_per_sec,
self._total_steps)
def local_device_setter(num_devices=1,
ps_device_type='cpu',
worker_device='/cpu:0',
ps_ops=None,
ps_strategy=None):
if ps_ops == None:
ps_ops = ['Variable', 'VariableV2', 'VarHandleOp']
if ps_strategy is None:
ps_strategy = device_setter._RoundRobinStrategy(num_devices)
if not six.callable(ps_strategy):
raise TypeError("ps_strategy must be callable")
def _local_device_chooser(op):
current_device = pydev.DeviceSpec.from_string(op.device or "")
node_def = op if isinstance(op, node_def_pb2.NodeDef) else op.node_def
if node_def.op in ps_ops:
ps_device_spec = pydev.DeviceSpec.from_string(
'/{}:{}'.format(ps_device_type, ps_strategy(op)))
ps_device_spec.merge_from(current_device)
return ps_device_spec.to_string()
else:
worker_device_spec = pydev.DeviceSpec.from_string(worker_device or "")
worker_device_spec.merge_from(current_device)
return worker_device_spec.to_string()
return _local_device_chooser
def get_model_fn(num_gpus, variable_strategy, num_workers):
"""Returns a function that will build the model."""
def _model_fn(features, labels=None, mode=None, params=None):
"""Model body.
Support single host, one or more GPU training. Parameter distribution can
be either one of the following scheme.
1. CPU is the parameter server and manages gradient updates.
2. Parameters are distributed evenly across all GPUs, and the first GPU
manages gradient updates.
Args:
features: a list of tensors, one for each tower
mode: ModeKeys.TRAIN or EVAL
params: Hyperparameters suitable for tuning
Returns:
An EstimatorSpec object.
"""
is_training = mode#(mode == tf.estimator.ModeKeys.TRAIN)
momentum = params.momentum
tower_features = features
tower_labels = labels
tower_losses = []
tower_grads = []
tower_preds = []
tower_mses = []
# channels first (NCHW) is normally optimal on GPU and channels last (NHWC)
# on CPU. The exception is Intel MKL on CPU which is optimal with
# channels_last.
data_format = params.data_format
if not data_format:
if num_gpus == 0:
data_format = 'channels_last'
else:
data_format = 'channels_first'
if num_gpus == 0:
num_devices = 1
device_type = 'cpu'
else:
num_devices = num_gpus
device_type = 'gpu'
for i in range(num_devices):
worker_device = '/{}:{}'.format(device_type, i)
if variable_strategy == 'CPU':
device_setter = local_device_setter(
worker_device=worker_device)
elif variable_strategy == 'GPU':
device_setter = local_device_setter(
ps_device_type='gpu',
worker_device=worker_device,
ps_strategy=tf.contrib.training.GreedyLoadBalancingStrategy(
num_gpus, tf.contrib.training.byte_size_load_fn))
with tf.variable_scope('nn', reuse=bool(i != 0)):
with tf.name_scope('tower_%d' % i) as name_scope:
with tf.device(device_setter):
loss, grads, preds, mse = _tower_fn(
is_training, tower_features[i], tower_labels[i])
tower_losses.append(loss)
tower_grads.append(grads)
tower_preds.append(preds)
tower_mses.append(mse)
if i == 0:
# Only trigger batch_norm moving mean and variance update from
# the 1st tower. Ideally, we should grab the updates from all
# towers but these stats accumulate extremely fast so we can
# ignore the other stats from the other towers without
# significant detriment.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, name_scope)
_tower_losses_tmp = tf.tuple(tower_losses)
_tower_mses_tmp = tf.tuple(tower_mses)
_tower_preds_tmp = tf.stack(preds)
return [_tower_losses_tmp, _tower_preds_tmp, _tower_mses_tmp, update_ops] + tower_grads
return _model_fn
#def xception_loss(stack):
# #Similarity
# sim = 0.
# for i in range(0, stack.shape[0], 2):
# prod = u2 = v2 = 0.
# for u, v in zip(stack[i], stack[i+1]):
# prod += u*v
# u2 += u*u
# v2 += v*v
# cos_theta = (np.abs(prod) / (np.sqrt(u2)*np.sqrt(v2))).clip(0., 1.)
# sim += np.arccos(cos_theta)
# sim /= stack.shape[0]/2
# #Dissimilarity
# dissim = []
# count = 0
# for i in range(stack.shape[0]):
# for j in range(i+2, stack.shape[0]):
# count += 1
# prod = u2 = v2 = 0.
# for u, v in zip(stack[i], stack[j]):
# prod += u*v
# u2 += u*u
# v2 += v*v
# cos_theta = (np.abs(prod) / (np.sqrt(u2)*np.sqrt(v2))).clip(0., 1.)
# dissim.append(np.arccos(cos_theta))
# dissim = np.median(dissim)
# loss = np.array([np.pi + sim - dissim], dtype=np.float32) #Offset to avoid negative losses
# return loss if np.isfinite(loss) else np.pi/2
def _tower_fn(is_training, feature, ground_truth):
"""Build computation tower.
Args:
is_training: true if is training graph.
feature: a Tensor.
Returns:
A tuple with the loss for the tower, the gradients and parameters, and
predictions.
"""
#phase = tf.estimator.ModeKeys.TRAIN if is_training else tf.estimator.ModeKeys.EVAL
output = architecture(feature[0], ground_truth[0], is_training)
model_params = tf.trainable_variables()
tower_pred = output
out = tf.reshape(output, [-1, cropsize, cropsize, channels])
truth = tf.reshape(ground_truth[0], [-1, cropsize, cropsize, channels])
mse = tf.reduce_mean(tf.losses.mean_squared_error(out, truth))
mse = tf.reshape(tf.cond(mse < 0.001, lambda: 1000.*mse, lambda: tf.sqrt(1000.*mse)), [1])
#ssim_loss = 2.5*(1.0-tf_ssim(out, truth)) #Don't need to unstack for batch size of 2
tower_loss = mse# + ssim_loss #tf.reduce_max(tf.concat(mse_loss_set, axis=0)) +
tower_loss += weight_decay * tf.add_n(
[tf.nn.l2_loss(v) for v in model_params])
tower_loss = tf.reshape(tower_loss, (1,))
tower_grad = tf.gradients(tower_loss, model_params)
output_clipped = tf.clip_by_value(
output,
clip_value_min=0.,
clip_value_max=1.,
name='clipper-user_mse')
out_clipped = tf.reshape(output_clipped, [-1, cropsize, cropsize, channels])
mse_for_trainer = tf.reduce_mean(tf.losses.mean_squared_error(out_clipped, truth))
return tower_loss, tower_grad, tower_pred, mse_for_trainer
def get_scale():
return 25.+np.random.exponential(75.)
def gen_lq(img, scale, img_type=np.float32):
'''Generate low quality image'''
#Ensure that the seed is random
np.random.seed(int(np.random.rand()*(2**32-1)))
#Adjust the image scale so that the image has the
# correct average counts
lq = np.random.poisson( img * scale )
return scale0to1(lq).astype(img_type)
def load_image(addr, resizeSize=None, imgType=np.float32):
"""Read an image and make sure it is of the correct type. Optionally resize it"""
try:
img = imread(addr, mode='F')
except:
img = 0.5*np.ones((512,512))
print("Image read failed")
if resizeSize:
img = cv2.resize(img, resizeSize, interpolation=cv2.INTER_AREA)
return img.astype(imgType)
def scale0to1(img):
"""Rescale image between 0 and 1"""
min = np.min(img)
max = np.max(img)
if min == max:
img.fill(0.5)
else:
img = (img-min) / (max-min)
return img.astype(np.float32)
def flip_rotate(img):
"""Applies a random flip || rotation to the image, possibly leaving it unchanged"""
choice = int(8*np.random.rand())
if choice == 0:
return img
if choice == 1:
return np.rot90(img, 1)
if choice == 2:
return np.rot90(img, 2)
if choice == 3:
return np.rot90(img, 3)
if choice == 4:
return np.flip(img, 0)
if choice == 5:
return np.flip(img, 1)
if choice == 6:
return np.flip(np.rot90(img, 1), 0)
if choice == 7:
return np.flip(np.rot90(img, 1), 1)
def preprocess(img):
img[np.isnan(img)] = 0.5
img[np.isinf(img)] = 0.5
return scale0to1(flip_rotate(img))
def record_parser(record):
"""Parse files and generate lower quality images from them"""
img = load_image(record)
img = preprocess(img)
lq = gen_lq(img, scale=get_scale())
rescaled_img = (np.mean(lq)/np.mean(img))*img
return lq, rescaled_img
def data_from_data(img, mean):
img = flip_rotate(img.reshape(cropsize, cropsize))
lq = np.reshape(gen_lq(img, scale=get_scale()),
(1, cropsize, cropsize, 1))
rescaled_img = np.reshape((np.mean(lq)/mean)*img,
(1, cropsize, cropsize, 1))
return lq, rescaled_img
def reshaper(img1, img2):
img1 = tf.reshape(img1, [cropsize, cropsize, channels])
img2 = tf.reshape(img2, [cropsize, cropsize, channels])
return img1, img2
def input_fn(dir, subset, batch_size, num_shards):
"""Create a dataset from a list of filenames and shard batches from it"""
with tf.device('/cpu:0'):
dataset = tf.data.Dataset.list_files(dir+subset+"/"+"*.tif")
dataset = dataset.shuffle(buffer_size=shuffle_buffer_size)
dataset = dataset.repeat(num_epochs)
dataset = dataset.map(
lambda file: tf.py_func(record_parser, [file], [tf.float32, tf.float32]),
num_parallel_calls=num_parallel_calls)
#print(dataset.output_shapes, dataset.output_types)
dataset = dataset.map(reshaper, num_parallel_calls=num_parallel_calls)
#print(dataset.output_shapes, dataset.output_types)
dataset = dataset.batch(batch_size=batch_size)
dataset = dataset.prefetch(buffer_size=prefetch_buffer_size)
iter = dataset.make_one_shot_iterator()
img_batch = iter.get_next()
if num_shards <= 1:
# No GPU available or only 1 GPU.
return [img_batch[0]], [img_batch[1]]
else:
image_batch = tf.unstack(img_batch, num=batch_size, axis=1)
feature_shards = [[] for i in range(num_shards)]
feature_shards_truth = [[] for i in range(num_shards)]
for i in range(batch_size):
idx = i % num_shards
tensors = tf.unstack(image_batch[i], num=2, axis=0)
feature_shards[idx].append(tensors[0])
feature_shards_truth[idx].append(tensors[1])
feature_shards = [tf.parallel_stack(x) for x in feature_shards]
feature_shards_truth = [tf.parallel_stack(x) for x in feature_shards_truth]
return feature_shards, feature_shards_truth
def disp(img):
cv2.namedWindow('CV_Window', cv2.WINDOW_NORMAL)
cv2.imshow('CV_Window', scale0to1(img))
cv2.waitKey(0)
return
def get_experiment_fn(data_dir,
num_gpus,
variable_strategy):
"""Returns an experiment function
Experiments perform training on several workers in parallel,
in other words experiments know how to invoke train and eval in a sensible
fashion for distributed training. Arguments passed directly to this
function are not tunable, all other arguments should be passed within
tf.HParams, passed to the enclosed function.
Args:
data_dir: str. Location of the data for input_fns.
num_gpus: int. Number of GPUs on each worker.
variable_strategy: String. CPU to use CPU as the parameter server
and GPU to use the GPUs as the parameter server.
Returns:
A function (tf.estimator.RunConfig, tf.contrib.training.HParams) ->
tf.contrib.learn.Experiment.
Suitable for use by tf.contrib.learn.learn_runner, which will run various
methods on Experiment (train, evaluate) based on information
about the current runner in `run_config`.
"""
def _experiment_fn(run_config, hparams):
"""Returns an Experiment."""
# Create estimator.
train_input_fn = functools.partial(
input_fn,
data_dir,
subset='train',
num_shards=num_gpus,
batch_size=hparams.train_batch_size)
eval_input_fn = functools.partial(
input_fn,
data_dir,
subset='eval',
batch_size=hparams.eval_batch_size,
num_shards=num_gpus)
num_eval_examples = num_examples_per_epoch('eval')
if num_eval_examples % hparams.eval_batch_size != 0:
print(num_eval_examples, hparams.eval_batch_size)
raise ValueError(
'validation set size must be multiple of eval_batch_size')
train_steps = hparams.train_steps
eval_steps = num_eval_examples // hparams.eval_batch_size
model = tf.estimator.Estimator(
model_fn=get_model_fn(num_gpus, variable_strategy,
run_config.num_worker_replicas or 1),
config=run_config,
params=hparams)
# Create experiment.
return tf.contrib.learn.Experiment(
model,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
train_steps=train_steps,
eval_steps=eval_steps)
return _experiment_fn
class RunConfig(tf.contrib.learn.RunConfig):
def uid(self, whitelist=None):
"""Generates a 'Unique Identifier' based on all internal fields.
Caller should use the uid string to check `RunConfig` instance integrity
in one session use, but should not rely on the implementation details, which
is subject to change.
Args:
whitelist: A list of the string names of the properties uid should not
include. If `None`, defaults to `_DEFAULT_UID_WHITE_LIST`, which
includes most properties user allowes to change.
Returns:
A uid string.
"""
if whitelist is None:
whitelist = run_config._DEFAULT_UID_WHITE_LIST
state = {k: v for k, v in self.__dict__.items() if not k.startswith('__')}
# Pop out the keys in whitelist.
for k in whitelist:
state.pop('_' + k, None)
ordered_state = collections.OrderedDict(
sorted(state.items(), key=lambda t: t[0]))
# For class instance without __repr__, some special cares are required.
# Otherwise, the object address will be used.
if '_cluster_spec' in ordered_state:
ordered_state['_cluster_spec'] = collections.OrderedDict(
sorted(ordered_state['_cluster_spec'].as_dict().items(), key=lambda t: t[0]))
return ', '.join(
'%s=%r' % (k, v) for (k, v) in six.iteritems(ordered_state))
def _train_op(tower_losses_ph, tower_mses_ph, variable_strategy, update_ops,
learning_rate_ph, **kwargs):
tower_losses = tf.unstack(tower_losses_ph, effective_batch_size)
tower_mses = tf.unstack(tower_mses_ph, effective_batch_size)
#tower_losses = [tower_loss_ph for tower_loss_ph in tower_losses_ph]
#tower_mses = [tower_mse_ph for tower_mse_ph in tower_mses_ph]
#tower_grads = [ for tower_loss in tower_losses]
tower_params = tf.trainable_variables()
tower_gradvars = []
for tower_grad in kwargs['_tower_grads']:
tower_gradvars.append(zip(tower_grad, tower_params))
# Now compute global loss and gradients.
gradvars = []
with tf.name_scope('gradient_averaging'):
all_grads = {}
for grad, var in itertools.chain(*tower_gradvars):
if grad is not None:
all_grads.setdefault(var, []).append(grad)
for var, grads in six.iteritems(all_grads):
# Average gradients on the same device as the variables
# to which they apply.
with tf.device(var.device):
if len(grads) == 1:
avg_grad = grads[0]
else:
avg_grad = tf.multiply(tf.add_n(grads), 1. / len(grads))
gradvars.append((avg_grad, var))
global_step = tf.train.get_global_step()
# Device that runs the ops to apply global gradient updates.
consolidation_device = '/gpu:0' if variable_strategy == 'GPU' else '/cpu:0'
with tf.device(consolidation_device):
#decay_steps = 3000000
#learning_rate_decay_factor = 0.9
#learning_rate = tf.train.exponential_decay(
# learning_rate = initial_learning_rate,
# global_step = global_step,
# decay_steps = decay_steps,
# decay_rate = learning_rate_decay_factor,
# staircase = True)
#learning_rate = initial_learning_rate
get_loss = tf.reduce_mean(tower_losses, name='loss')
get_loss_mse = tf.reduce_mean(tower_mses, name='loss_mse')
#optimizer = tf.train.RMSPropOptimizer(learning_rate_ph, momentum=0.9)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate_ph, beta1=0.5)
#with tf.device("/cpu:0"):
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) #For batch normalisation windows
with tf.control_dependencies(update_ops):
# Create single grouped train op
train_op = [
optimizer.apply_gradients(
gradvars, global_step=global_step)
]
train_op.extend(update_ops)
train_op = tf.group(*train_op)
return train_op, get_loss, get_loss_mse
def main(job_dir, data_dir, variable_strategy, num_gpus, log_device_placement,
num_intra_threads, **hparams):
temp = set(tf.all_variables())
with open(log_file, 'a') as log:
log.flush()
with open(val_log_file, 'a') as val_log:
val_log.flush()
# The env variable is on deprecation path, default is set to off.
os.environ['TF_SYNC_ON_FINISH'] = '0'
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
# Session configuration.
log_device_placement = False #Once placement is correct, this fills up too much of the cmd window...
sess_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=log_device_placement,
intra_op_parallelism_threads=num_intra_threads,
gpu_options=tf.GPUOptions(force_gpu_compatible=True))
config = RunConfig(
session_config=sess_config, model_dir=job_dir)
hparams=tf.contrib.training.HParams(
is_chief=config.is_chief,
**hparams)
img, img_truth = input_fn(data_dir, 'train', batch_size, num_gpus)
img_val, img_val_truth = input_fn(data_dir, 'val', batch_size, num_gpus)
with tf.Session(config=sess_config) as sess: #Alternative is tf.train.MonitoredTrainingSession()
sess.run(tf.initialize_variables(set(tf.all_variables())-temp))
#sess.run( tf.global_variables_initializer())
temp = set(tf.all_variables())
____img, ____img_truth = sess.run([img, img_truth])
img_ph = [tf.placeholder(tf.float32, shape=i.shape, name='img') for i in ____img]
img_truth_ph = [tf.placeholder(tf.float32, shape=i.shape, name='img_truth') for i in ____img_truth]
del ____img
del ____img_truth
is_training = True
model_fn = get_model_fn(num_gpus, variable_strategy, num_workers)
results = model_fn(img_ph, img_truth_ph, mode=is_training, params=hparams)
_tower_losses = results[0]
_tower_preds = results[1]
_tower_mses = results[2]
update_ops = results[3]
tower_grads = results[4:(4+batch_size)]
tower_losses_ph = tf.placeholder(tf.float32, shape=(effective_batch_size,), name='tower_losses')
tower_mses_ph = tf.placeholder(tf.float32, shape=(effective_batch_size,), name='tower_mses')
learning_rate_ph = tf.placeholder(tf.float32, name='learning_rate')
sess.run(tf.initialize_variables(set(tf.all_variables()) - temp))
temp = set(tf.all_variables())
mini_batch_dict = {}
for i in range(batch_size):
_img, _img_truth = sess.run([img[i], img_truth[i]])
mini_batch_dict.update({img_ph[i]: _img})
mini_batch_dict.update({img_truth_ph[i]: _img_truth})
gradvars_pry = sess.run(tower_grads, feed_dict=mini_batch_dict)
del mini_batch_dict
tower_grads_ph = [[tf.placeholder(tf.float32, shape=t.shape, name='tower_grads')
for t in gradvars_pry[0]]
for _ in range(effective_batch_size)]
del gradvars_pry
train_op, get_loss, get_loss_mse = _train_op(tower_losses_ph, tower_mses_ph,
variable_strategy, update_ops, learning_rate_ph,
_tower_grads=tower_grads_ph)
sess.run(tf.initialize_variables(set(tf.all_variables()) - temp))
train_writer = tf.summary.FileWriter( logDir, sess.graph )
#print(tf.all_variables())
saver = tf.train.Saver()
saver.restore(sess, tf.train.latest_checkpoint("//flexo.ads.warwick.ac.uk/Shared41/Microscopy/Jeffrey-Ede/models/denoiser-multi-gpu-13/model/"))
learning_rate = 0.001
counter = 113823
cycleNum = 0
while True:
cycleNum += 1
#Train for a couple of hours
time0 = time.time()
try:
with open(model_dir+"learning_rate.txt", "r") as lrf:
learning_rate_list = [float(line) for line in lrf]
learning_rate = np.float32(learning_rate_list[0])
print("Using learning rate: {}".format(learning_rate))
except:
pass
while time.time()-time0 < modelSavePeriod:
counter += 1
tower_losses_list = []
tower_preds_list = []
tower_mses_list = []
ph_dict = {}
j = 0
for incr in range(increase_batch_size_by_factor):
mini_batch_dict = {}
for i in range(batch_size):
__img, __img_truth = sess.run([img[i], img_truth[i]])
#__img, __img_truth = sess.run([img[i], img_truth[i]])
mini_batch_dict.update({img_ph[i]: __img})
mini_batch_dict.update({img_truth_ph[i]: __img_truth})
mini_batch_results = sess.run([_tower_losses, _tower_preds, _tower_mses] +
tower_grads,
feed_dict=mini_batch_dict)
tower_losses_list += [x for x in mini_batch_results[0]]
#disp(mini_batch_results[1][0].reshape(cropsize, cropsize).astype(np.float32).clip(0.,1.))
#disp(__img_truth[0].reshape(cropsize, cropsize).astype(np.float32))
#_save_loc = model_dir+"example-"
#floor = 11
#for m, imgi in enumerate(mini_batch_results[1]):
# #print(__img)
# #print(imgi)
# #disp(__img.reshape(cropsize, cropsize))
# #disp(imgi.reshape(cropsize, cropsize))
# Image.fromarray(__img.reshape(cropsize, cropsize)).save( _save_loc+str(floor+m)+".tif")
# Image.fromarray(imgi.reshape(cropsize, cropsize)).save( _save_loc+str(floor+m)+"_restored.tif")
# Image.fromarray(__img_truth.reshape(cropsize, cropsize)).save( _save_loc+str(floor+m)+"_truth.tif")
#tower_preds_list += [x for x in mini_batch_results[1]]
tower_mses_list += [x for x in mini_batch_results[2]]
for i in range(3, 3+batch_size):
ph_dict.update({ph: val for ph, val in
zip(tower_grads_ph[j],
mini_batch_results[i])})
j += 1
feed_list = np.asarray(tower_losses_list)
feed_list.shape = (1,)
ph_dict.update({tower_losses_ph: feed_list,
tower_mses_ph: np.asarray(tower_mses_list),
learning_rate_ph: learning_rate,
img_ph[0]: __img,
#img_ph[1]: __img,
img_truth_ph[0]: __img_truth})#,
#img_truth_ph[1]: __img_truth})
del tower_losses_list
#del tower_preds_list
del tower_mses_list
if counter <= 1 or not counter % save_result_every_n_batches:
try:
save_input_loc = model_dir+"input-"+str(counter)+".tif"
save_truth_loc = model_dir+"truth-"+str(counter)+".tif"
save_output_loc = model_dir+"output-"+str(counter)+".tif"
Image.fromarray(__img.reshape(cropsize, cropsize).astype(np.float32)).save( save_input_loc )
Image.fromarray(__img_truth.reshape(cropsize, cropsize).astype(np.float32)).save( save_truth_loc )
Image.fromarray(mini_batch_results[1][0].reshape(cropsize, cropsize).astype(np.float32)).save( save_output_loc )
except:
print("Image save failed")
#for i in range(effective_batch_size):
# ph_dict.update({ph: val for ph, val in
# zip(tower_grads_ph[i],
# tower_grads_list[i])})
_, actual_loss, loss_value = sess.run([train_op, get_loss, get_loss_mse],
feed_dict=ph_dict)
del ph_dict
try:
log.write("Iter: {}, Loss: {:.8f}".format(counter, float(loss_value)))
except:
print("Failed to write to log")
if not counter % val_skip_n:
mini_batch_dict = {}
for i in range(batch_size):
___img, ___img_truth = sess.run([img_val[i], img_val_truth[i]])
mini_batch_dict.update({img_ph[i]: ___img})
mini_batch_dict.update({img_truth_ph[i]: ___img_truth})
mini_batch_results = sess.run([_tower_losses, _tower_preds, _tower_mses]+
tower_grads,
feed_dict=mini_batch_dict)
val_loss = np.mean(np.asarray([x for x in mini_batch_results[2]]))
try:
val_log.write("Iter: {}, Loss: {:.8f}".format(counter, float(val_loss)))
except:
print("Failed to write to val log")
print("Iter: {}, MSE Loss: {:.6f}, Loss: {:.6f}, Val loss: {:.6f}".format(
counter, loss_value, actual_loss, val_loss))
else:
print("Iter: {}, MSE Loss: {:.6f}, Loss: {:.6f}".format(
counter, loss_value, actual_loss))
#train_writer.add_summary(summary, counter)
#Save the model
saver.save(sess, save_path=model_dir+"model/", global_step=counter)
#tf.saved_model.simple_save(
# session=sess,
# export_dir=model_dir+"model-"+str(counter)+"/",
# inputs={"img": img[0][0]},
# outputs={"prediction": prediction})
return
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--data-dir',
type=str,
default=data_dir,
help='The directory where the CIFAR-10 input data is stored.')
parser.add_argument(
'--job-dir',
type=str,
default=model_dir,
help='The directory where the model will be stored.')
parser.add_argument(
'--variable-strategy',
choices=['CPU', 'GPU'],
type=str,
default='GPU',
help='Where to locate variable operations')
parser.add_argument(
'--num-gpus',
type=int,
default=num_gpus,
help='The number of gpus used. Uses only CPU if set to 0.')
parser.add_argument(
'--log-device-placement',
action='store_true',
default=True,
help='Whether to log device placement.')
parser.add_argument(
'--num-intra-threads',
type=int,
default=0,
help="""\
Number of threads to use for intra-op parallelism. When training on CPU
set to 0 to have the system pick the appropriate number or alternatively
set it to the number of physical CPU cores.\
""")
parser.add_argument(
'--train-steps',
type=int,
default=80000,
help='The number of steps to use for training.')
parser.add_argument(
'--train-batch-size',
type=int,
default=batch_size,
help='Batch size for training.')
parser.add_argument(
'--eval-batch-size',
type=int,
default=batch_size,
help='Batch size for validation.')
parser.add_argument(
'--momentum',
type=float,
default=0.9,
help='Momentum for MomentumOptimizer.')
parser.add_argument(
'--learning-rate',
type=float,
default=0.1,
help="""\
This is the inital learning rate value. The learning rate will decrease
during training. For more details check the model_fn implementation in
this file.\
""")
parser.add_argument(
'--sync',
action='store_true',
default=False,
help="""\
If present when running in a distributed environment will run on sync mode.\
""")
parser.add_argument(
'--num-inter-threads',
type=int,
default=0,
help="""\
Number of threads to use for inter-op parallelism. If set to 0, the
system will pick an appropriate number.\
""")
parser.add_argument(
'--data-format',
type=str,
default="NHWC",
help="""\
If not set, the data format best for the training device is used.
Allowed values: channels_first (NCHW) channels_last (NHWC).\
""")
parser.add_argument(
'--batch-norm-decay',
type=float,
default=0.997,
help='Decay for batch norm.')
parser.add_argument(
'--batch-norm-epsilon',
type=float,
default=1e-5,
help='Epsilon for batch norm.')
args = parser.parse_args()
if args.num_gpus > 0:
assert tf.test.is_gpu_available(), "Requested GPUs but none found."
if args.num_gpus < 0:
raise ValueError(
'Invalid GPU count: \"--num-gpus\" must be 0 or a positive integer.')
if args.num_gpus == 0 and args.variable_strategy == 'GPU':
raise ValueError('num-gpus=0, CPU must be used as parameter server. Set'
'--variable-strategy=CPU.')
if args.num_gpus != 0 and args.train_batch_size % args.num_gpus != 0:
raise ValueError('--train-batch-size must be multiple of --num-gpus.')
if args.num_gpus != 0 and args.eval_batch_size % args.num_gpus != 0:
raise ValueError('--eval-batch-size must be multiple of --num-gpus.')
main(**vars(args))
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES",
"TF_ENABLE_WINOGRAD_NONFUSED",
"TF_SYNC_ON_FINISH"
] |
[]
|
["CUDA_VISIBLE_DEVICES", "TF_ENABLE_WINOGRAD_NONFUSED", "TF_SYNC_ON_FINISH"]
|
python
| 3 | 0 | |
redash/__init__.py
|
import sys
import logging
import urlparse
import urllib
import redis
from flask import Flask, current_app
from flask_sslify import SSLify
from werkzeug.contrib.fixers import ProxyFix
from werkzeug.routing import BaseConverter
from statsd import StatsClient
from flask_mail import Mail
from flask_limiter import Limiter
from flask_limiter.util import get_ipaddr
from flask_migrate import Migrate
from redash import settings
from redash.query_runner import import_query_runners
from redash.destinations import import_destinations
__version__ = '7.0.0-beta'
import os
if os.environ.get("REMOTE_DEBUG"):
import ptvsd
ptvsd.enable_attach(address=('0.0.0.0', 5678))
def setup_logging():
handler = logging.StreamHandler(sys.stdout if settings.LOG_STDOUT else sys.stderr)
formatter = logging.Formatter(settings.LOG_FORMAT)
handler.setFormatter(formatter)
logging.getLogger().addHandler(handler)
logging.getLogger().setLevel(settings.LOG_LEVEL)
# Make noisy libraries less noisy
if settings.LOG_LEVEL != "DEBUG":
logging.getLogger("passlib").setLevel("ERROR")
logging.getLogger("requests.packages.urllib3").setLevel("ERROR")
logging.getLogger("snowflake.connector").setLevel("ERROR")
logging.getLogger('apiclient').setLevel("ERROR")
def create_redis_connection():
logging.debug("Creating Redis connection (%s)", settings.REDIS_URL)
redis_url = urlparse.urlparse(settings.REDIS_URL)
if redis_url.scheme == 'redis+socket':
qs = urlparse.parse_qs(redis_url.query)
if 'virtual_host' in qs:
db = qs['virtual_host'][0]
else:
db = 0
client = redis.StrictRedis(unix_socket_path=redis_url.path, db=db)
else:
if redis_url.path:
redis_db = redis_url.path[1]
else:
redis_db = 0
# Redis passwords might be quoted with special characters
redis_password = redis_url.password and urllib.unquote(redis_url.password)
client = redis.StrictRedis(host=redis_url.hostname, port=redis_url.port, db=redis_db, password=redis_password)
return client
setup_logging()
redis_connection = create_redis_connection()
mail = Mail()
migrate = Migrate()
mail.init_mail(settings.all_settings())
statsd_client = StatsClient(host=settings.STATSD_HOST, port=settings.STATSD_PORT, prefix=settings.STATSD_PREFIX)
limiter = Limiter(key_func=get_ipaddr, storage_uri=settings.LIMITER_STORAGE)
import_query_runners(settings.QUERY_RUNNERS)
import_destinations(settings.DESTINATIONS)
from redash.version_check import reset_new_version_status
reset_new_version_status()
class SlugConverter(BaseConverter):
def to_python(self, value):
# This is ay workaround for when we enable multi-org and some files are being called by the index rule:
# for path in settings.STATIC_ASSETS_PATHS:
# full_path = safe_join(path, value)
# if os.path.isfile(full_path):
# raise ValidationError()
return value
def to_url(self, value):
return value
def create_app():
from redash import authentication, extensions, handlers
from redash.handlers.webpack import configure_webpack
from redash.handlers import chrome_logger
from redash.models import db, users
from redash.metrics.request import provision_app
from redash.utils import sentry
sentry.init()
app = Flask(__name__,
template_folder=settings.STATIC_ASSETS_PATH,
static_folder=settings.STATIC_ASSETS_PATH,
static_path='/static')
# Make sure we get the right referral address even behind proxies like nginx.
app.wsgi_app = ProxyFix(app.wsgi_app, settings.PROXIES_COUNT)
app.url_map.converters['org_slug'] = SlugConverter
if settings.ENFORCE_HTTPS:
SSLify(app, skips=['ping'])
# configure our database
app.config['SQLALCHEMY_DATABASE_URI'] = settings.SQLALCHEMY_DATABASE_URI
app.config.update(settings.all_settings())
provision_app(app)
db.init_app(app)
migrate.init_app(app, db)
mail.init_app(app)
authentication.init_app(app)
limiter.init_app(app)
handlers.init_app(app)
configure_webpack(app)
extensions.init_extensions(app)
chrome_logger.init_app(app)
users.init_app(app)
return app
|
[] |
[] |
[
"REMOTE_DEBUG"
] |
[]
|
["REMOTE_DEBUG"]
|
python
| 1 | 0 | |
src/tools/benchmark_tool_nodes/launch/ray_ground_classifier_benchmark.launch.py
|
# Copyright (c) 2020-2021, Arm Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import launch
import launch_ros.actions
from os.path import join as joinPath
from launch.actions import IncludeLaunchDescription
from launch.substitutions import LaunchConfiguration
from ament_index_python.packages import get_package_share_directory
from launch.launch_description_sources import PythonLaunchDescriptionSource
def generate_launch_description():
# ray ground classifier parameter file definition.
vlp16_lexus_ray_ground_file_path = os.path.join(
get_package_share_directory('ray_ground_classifier_nodes'),
'param',
'vlp16_lexus.param.yaml'
)
ray_ground_param_file = launch.substitutions.LaunchConfiguration(
'params',
default=[vlp16_lexus_ray_ground_file_path]
)
return launch.LaunchDescription([
# benchmark_tool_nodes arguments
launch.actions.DeclareLaunchArgument(
'dataset_path',
default_value=joinPath(os.environ['HOME'], 'kitti_data', '3d_bench'),
description='Path of the dataset in the system',
),
launch.actions.DeclareLaunchArgument(
'input_topic',
default_value='/points_in',
description='Input topic of the blackbox system to be benchmarked',
),
launch.actions.DeclareLaunchArgument(
'output_topic',
default_value='/points_nonground',
description='Output topic of the blackbox system to be benchmarked',
),
launch.actions.DeclareLaunchArgument(
'benchmarked_input_topic',
default_value=[LaunchConfiguration('input_topic')],
description='The input topic of the benchmarked node',
),
launch.actions.DeclareLaunchArgument(
'benchmarked_output_topic',
default_value=[LaunchConfiguration('output_topic')],
description='The output topic of the benchmarked node',
),
launch.actions.DeclareLaunchArgument(
'result_path',
default_value=joinPath(os.environ['HOME'], 'benchmark_result'),
description='',
),
launch.actions.DeclareLaunchArgument(
'force_end_at_frame_n',
default_value='500',
description='Limit the number of played frames (-1 means unlimited)',
),
launch.actions.DeclareLaunchArgument(
'node_start_delay',
default_value='0',
description='',
),
launch.actions.DeclareLaunchArgument(
'node_name',
default_value='benchmark_tool_node',
description='The name of the node',
),
launch.actions.DeclareLaunchArgument(
'node_output',
default_value='screen',
description='Where to display running informations (screen or log)',
),
launch.actions.DeclareLaunchArgument(
'rosbag_record',
default_value='False',
description='Record on rosbag the input and output topic during the benchmark',
),
launch.actions.DeclareLaunchArgument(
'rosbag_record_subfolder',
default_value='The subfolder on filesystem where to save the rosbag record file, it ' \
'must be a subfolder of result_path folder',
description='Record on rosbag the input and output topic during the benchmark',
),
launch.actions.DeclareLaunchArgument(
'ros_info_record',
default_value='False',
description='Record ROS node topology and bandwidth information during the benchmark',
),
launch.actions.DeclareLaunchArgument(
'sys_info_record',
default_value='False',
description='Record system metrics during the benchmark',
),
launch.actions.DeclareLaunchArgument(
'cyclone_dds_info_record',
default_value='False',
description='Record DDS metrics during the benchmark',
),
# Nodes
launch_ros.actions.Node(
package='ray_ground_classifier_nodes',
executable='ray_ground_classifier_cloud_node_exe',
parameters=[
ray_ground_param_file,
{"pcl_size": 210000}
]
),
IncludeLaunchDescription(
PythonLaunchDescriptionSource(
[
get_package_share_directory('benchmark_tool_nodes'),
'/benchmark_task.launch.py'
]
),
launch_arguments={
'benchmark_task': 'ray_ground_classifier_task',
'dataset_path': LaunchConfiguration('dataset_path'),
'input_topic': LaunchConfiguration('input_topic'),
'output_topic': LaunchConfiguration('output_topic'),
'benchmarked_input_topic': LaunchConfiguration('benchmarked_input_topic'),
'benchmarked_output_topic': LaunchConfiguration('benchmarked_output_topic'),
'result_path': LaunchConfiguration('result_path'),
'force_end_at_frame_n': LaunchConfiguration('force_end_at_frame_n'),
'node_start_delay': LaunchConfiguration('node_start_delay'),
'node_name': LaunchConfiguration('node_name'),
'node_output': LaunchConfiguration('node_output'),
'rosbag_record': LaunchConfiguration('rosbag_record'),
'rosbag_record_subfolder': LaunchConfiguration('rosbag_record_subfolder'),
'ros_info_record': LaunchConfiguration('ros_info_record'),
'sys_info_record': LaunchConfiguration('sys_info_record'),
'cyclone_dds_info_record': LaunchConfiguration('cyclone_dds_info_record'),
}.items()
)
])
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
runsc/main.go
|
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Binary runsc is an implementation of the Open Container Initiative Runtime
// that runs applications inside a sandbox.
package main
import (
"context"
"fmt"
"io"
"io/ioutil"
"os"
"os/signal"
"path/filepath"
"strings"
"syscall"
"time"
"github.com/google/subcommands"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/refs"
"gvisor.dev/gvisor/pkg/sentry/platform"
"gvisor.dev/gvisor/runsc/boot"
"gvisor.dev/gvisor/runsc/cmd"
"gvisor.dev/gvisor/runsc/flag"
"gvisor.dev/gvisor/runsc/specutils"
)
var (
// Although these flags are not part of the OCI spec, they are used by
// Docker, and thus should not be changed.
rootDir = flag.String("root", "", "root directory for storage of container state.")
logFilename = flag.String("log", "", "file path where internal debug information is written, default is stdout.")
logFormat = flag.String("log-format", "text", "log format: text (default), json, or json-k8s.")
debug = flag.Bool("debug", false, "enable debug logging.")
showVersion = flag.Bool("version", false, "show version and exit.")
// TODO(gvisor.dev/issue/193): support systemd cgroups
systemdCgroup = flag.Bool("systemd-cgroup", false, "Use systemd for cgroups. NOT SUPPORTED.")
// These flags are unique to runsc, and are used to configure parts of the
// system that are not covered by the runtime spec.
// Debugging flags.
debugLog = flag.String("debug-log", "", "additional location for logs. If it ends with '/', log files are created inside the directory with default names. The following variables are available: %TIMESTAMP%, %COMMAND%.")
panicLog = flag.String("panic-log", "", "file path were panic reports and other Go's runtime messages are written.")
logPackets = flag.Bool("log-packets", false, "enable network packet logging.")
logFD = flag.Int("log-fd", -1, "file descriptor to log to. If set, the 'log' flag is ignored.")
debugLogFD = flag.Int("debug-log-fd", -1, "file descriptor to write debug logs to. If set, the 'debug-log-dir' flag is ignored.")
panicLogFD = flag.Int("panic-log-fd", -1, "file descriptor to write Go's runtime messages.")
debugLogFormat = flag.String("debug-log-format", "text", "log format: text (default), json, or json-k8s.")
alsoLogToStderr = flag.Bool("alsologtostderr", false, "send log messages to stderr.")
// Debugging flags: strace related
strace = flag.Bool("strace", false, "enable strace.")
straceSyscalls = flag.String("strace-syscalls", "", "comma-separated list of syscalls to trace. If --strace is true and this list is empty, then all syscalls will be traced.")
straceLogSize = flag.Uint("strace-log-size", 1024, "default size (in bytes) to log data argument blobs.")
// Flags that control sandbox runtime behavior.
platformName = flag.String("platform", "ptrace", "specifies which platform to use: ptrace (default), kvm.")
network = flag.String("network", "sandbox", "specifies which network to use: sandbox (default), host, none. Using network inside the sandbox is more secure because it's isolated from the host network.")
hardwareGSO = flag.Bool("gso", true, "enable hardware segmentation offload if it is supported by a network device.")
softwareGSO = flag.Bool("software-gso", true, "enable software segmentation offload when hardware ofload can't be enabled.")
fileAccess = flag.String("file-access", "exclusive", "specifies which filesystem to use for the root mount: exclusive (default), shared. Volume mounts are always shared.")
fsGoferHostUDS = flag.Bool("fsgofer-host-uds", false, "allow the gofer to mount Unix Domain Sockets.")
overlay = flag.Bool("overlay", false, "wrap filesystem mounts with writable overlay. All modifications are stored in memory inside the sandbox.")
overlayfsStaleRead = flag.Bool("overlayfs-stale-read", false, "reopen cached FDs after a file is opened for write to workaround overlayfs limitation on kernels before 4.19.")
watchdogAction = flag.String("watchdog-action", "log", "sets what action the watchdog takes when triggered: log (default), panic.")
panicSignal = flag.Int("panic-signal", -1, "register signal handling that panics. Usually set to SIGUSR2(12) to troubleshoot hangs. -1 disables it.")
profile = flag.Bool("profile", false, "prepares the sandbox to use Golang profiler. Note that enabling profiler loosens the seccomp protection added to the sandbox (DO NOT USE IN PRODUCTION).")
netRaw = flag.Bool("net-raw", false, "enable raw sockets. When false, raw sockets are disabled by removing CAP_NET_RAW from containers (`runsc exec` will still be able to utilize raw sockets). Raw sockets allow malicious containers to craft packets and potentially attack the network.")
numNetworkChannels = flag.Int("num-network-channels", 1, "number of underlying channels(FDs) to use for network link endpoints.")
rootless = flag.Bool("rootless", false, "it allows the sandbox to be started with a user that is not root. Sandbox and Gofer processes may run with same privileges as current user.")
referenceLeakMode = flag.String("ref-leak-mode", "disabled", "sets reference leak check mode: disabled (default), log-names, log-traces.")
cpuNumFromQuota = flag.Bool("cpu-num-from-quota", false, "set cpu number to cpu quota (least integer greater or equal to quota value, but not less than 2)")
// Test flags, not to be used outside tests, ever.
testOnlyAllowRunAsCurrentUserWithoutChroot = flag.Bool("TESTONLY-unsafe-nonroot", false, "TEST ONLY; do not ever use! This skips many security measures that isolate the host from the sandbox.")
testOnlyTestNameEnv = flag.String("TESTONLY-test-name-env", "", "TEST ONLY; do not ever use! Used for automated tests to improve logging.")
)
func main() {
// Help and flags commands are generated automatically.
help := cmd.NewHelp(subcommands.DefaultCommander)
help.Register(new(cmd.Syscalls))
subcommands.Register(help, "")
subcommands.Register(subcommands.FlagsCommand(), "")
// Installation helpers.
const helperGroup = "helpers"
subcommands.Register(new(cmd.Install), helperGroup)
subcommands.Register(new(cmd.Uninstall), helperGroup)
// Register user-facing runsc commands.
subcommands.Register(new(cmd.Checkpoint), "")
subcommands.Register(new(cmd.Create), "")
subcommands.Register(new(cmd.Delete), "")
subcommands.Register(new(cmd.Do), "")
subcommands.Register(new(cmd.Events), "")
subcommands.Register(new(cmd.Exec), "")
subcommands.Register(new(cmd.Gofer), "")
subcommands.Register(new(cmd.Kill), "")
subcommands.Register(new(cmd.List), "")
subcommands.Register(new(cmd.Pause), "")
subcommands.Register(new(cmd.PS), "")
subcommands.Register(new(cmd.Restore), "")
subcommands.Register(new(cmd.Resume), "")
subcommands.Register(new(cmd.Run), "")
subcommands.Register(new(cmd.Spec), "")
subcommands.Register(new(cmd.State), "")
subcommands.Register(new(cmd.Start), "")
subcommands.Register(new(cmd.Wait), "")
// Register internal commands with the internal group name. This causes
// them to be sorted below the user-facing commands with empty group.
// The string below will be printed above the commands.
const internalGroup = "internal use only"
subcommands.Register(new(cmd.Boot), internalGroup)
subcommands.Register(new(cmd.Debug), internalGroup)
subcommands.Register(new(cmd.Gofer), internalGroup)
subcommands.Register(new(cmd.Statefile), internalGroup)
// All subcommands must be registered before flag parsing.
flag.Parse()
// Are we showing the version?
if *showVersion {
// The format here is the same as runc.
fmt.Fprintf(os.Stdout, "runsc version %s\n", version)
fmt.Fprintf(os.Stdout, "spec: %s\n", specutils.Version)
os.Exit(0)
}
// TODO(gvisor.dev/issue/193): support systemd cgroups
if *systemdCgroup {
fmt.Fprintln(os.Stderr, "systemd cgroup flag passed, but systemd cgroups not supported. See gvisor.dev/issue/193")
os.Exit(1)
}
var errorLogger io.Writer
if *logFD > -1 {
errorLogger = os.NewFile(uintptr(*logFD), "error log file")
} else if *logFilename != "" {
// We must set O_APPEND and not O_TRUNC because Docker passes
// the same log file for all commands (and also parses these
// log files), so we can't destroy them on each command.
var err error
errorLogger, err = os.OpenFile(*logFilename, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
if err != nil {
cmd.Fatalf("error opening log file %q: %v", *logFilename, err)
}
}
cmd.ErrorLogger = errorLogger
platformType := *platformName
if _, err := platform.Lookup(platformType); err != nil {
cmd.Fatalf("%v", err)
}
fsAccess, err := boot.MakeFileAccessType(*fileAccess)
if err != nil {
cmd.Fatalf("%v", err)
}
if fsAccess == boot.FileAccessShared && *overlay {
cmd.Fatalf("overlay flag is incompatible with shared file access")
}
netType, err := boot.MakeNetworkType(*network)
if err != nil {
cmd.Fatalf("%v", err)
}
wa, err := boot.MakeWatchdogAction(*watchdogAction)
if err != nil {
cmd.Fatalf("%v", err)
}
if *numNetworkChannels <= 0 {
cmd.Fatalf("num_network_channels must be > 0, got: %d", *numNetworkChannels)
}
refsLeakMode, err := boot.MakeRefsLeakMode(*referenceLeakMode)
if err != nil {
cmd.Fatalf("%v", err)
}
// Sets the reference leak check mode. Also set it in config below to
// propagate it to child processes.
refs.SetLeakMode(refsLeakMode)
// Create a new Config from the flags.
conf := &boot.Config{
RootDir: *rootDir,
Debug: *debug,
LogFilename: *logFilename,
LogFormat: *logFormat,
DebugLog: *debugLog,
PanicLog: *panicLog,
DebugLogFormat: *debugLogFormat,
FileAccess: fsAccess,
FSGoferHostUDS: *fsGoferHostUDS,
Overlay: *overlay,
Network: netType,
HardwareGSO: *hardwareGSO,
SoftwareGSO: *softwareGSO,
LogPackets: *logPackets,
Platform: platformType,
Strace: *strace,
StraceLogSize: *straceLogSize,
WatchdogAction: wa,
PanicSignal: *panicSignal,
ProfileEnable: *profile,
EnableRaw: *netRaw,
NumNetworkChannels: *numNetworkChannels,
Rootless: *rootless,
AlsoLogToStderr: *alsoLogToStderr,
ReferenceLeakMode: refsLeakMode,
OverlayfsStaleRead: *overlayfsStaleRead,
CPUNumFromQuota: *cpuNumFromQuota,
TestOnlyAllowRunAsCurrentUserWithoutChroot: *testOnlyAllowRunAsCurrentUserWithoutChroot,
TestOnlyTestNameEnv: *testOnlyTestNameEnv,
}
if len(*straceSyscalls) != 0 {
conf.StraceSyscalls = strings.Split(*straceSyscalls, ",")
}
// Set up logging.
if *debug {
log.SetLevel(log.Debug)
}
// Logging will include the local date and time via the time package.
//
// On first use, time.Local initializes the local time zone, which
// involves opening tzdata files on the host. Since this requires
// opening host files, it must be done before syscall filter
// installation.
//
// Generally there will be a log message before filter installation
// that will force initialization, but force initialization here in
// case that does not occur.
_ = time.Local.String()
subcommand := flag.CommandLine.Arg(0)
var e log.Emitter
if *debugLogFD > -1 {
f := os.NewFile(uintptr(*debugLogFD), "debug log file")
e = newEmitter(*debugLogFormat, f)
} else if *debugLog != "" {
f, err := specutils.DebugLogFile(*debugLog, subcommand, "" /* name */)
if err != nil {
cmd.Fatalf("error opening debug log file in %q: %v", *debugLog, err)
}
e = newEmitter(*debugLogFormat, f)
} else {
// Stderr is reserved for the application, just discard the logs if no debug
// log is specified.
e = newEmitter("text", ioutil.Discard)
}
if *panicLogFD > -1 || *debugLogFD > -1 {
fd := *panicLogFD
if fd < 0 {
fd = *debugLogFD
}
// Quick sanity check to make sure no other commands get passed
// a log fd (they should use log dir instead).
if subcommand != "boot" && subcommand != "gofer" {
cmd.Fatalf("flags --debug-log-fd and --panic-log-fd should only be passed to 'boot' and 'gofer' command, but was passed to %q", subcommand)
}
// If we are the boot process, then we own our stdio FDs and can do what we
// want with them. Since Docker and Containerd both eat boot's stderr, we
// dup our stderr to the provided log FD so that panics will appear in the
// logs, rather than just disappear.
if err := syscall.Dup3(fd, int(os.Stderr.Fd()), 0); err != nil {
cmd.Fatalf("error dup'ing fd %d to stderr: %v", fd, err)
}
}
if *alsoLogToStderr {
e = &log.MultiEmitter{e, newEmitter(*debugLogFormat, os.Stderr)}
}
log.SetTarget(e)
log.Infof("***************************")
log.Infof("Args: %s", os.Args)
log.Infof("Version %s", version)
log.Infof("PID: %d", os.Getpid())
log.Infof("UID: %d, GID: %d", os.Getuid(), os.Getgid())
log.Infof("Configuration:")
log.Infof("\t\tRootDir: %s", conf.RootDir)
log.Infof("\t\tPlatform: %v", conf.Platform)
log.Infof("\t\tFileAccess: %v, overlay: %t", conf.FileAccess, conf.Overlay)
log.Infof("\t\tNetwork: %v, logging: %t", conf.Network, conf.LogPackets)
log.Infof("\t\tStrace: %t, max size: %d, syscalls: %s", conf.Strace, conf.StraceLogSize, conf.StraceSyscalls)
log.Infof("***************************")
if *testOnlyAllowRunAsCurrentUserWithoutChroot {
// SIGTERM is sent to all processes if a test exceeds its
// timeout and this case is handled by syscall_test_runner.
log.Warningf("Block the TERM signal. This is only safe in tests!")
signal.Ignore(syscall.SIGTERM)
}
// Call the subcommand and pass in the configuration.
var ws syscall.WaitStatus
subcmdCode := subcommands.Execute(context.Background(), conf, &ws)
if subcmdCode == subcommands.ExitSuccess {
log.Infof("Exiting with status: %v", ws)
if ws.Signaled() {
// No good way to return it, emulate what the shell does. Maybe raise
// signall to self?
os.Exit(128 + int(ws.Signal()))
}
os.Exit(ws.ExitStatus())
}
// Return an error that is unlikely to be used by the application.
log.Warningf("Failure to execute command, err: %v", subcmdCode)
os.Exit(128)
}
func newEmitter(format string, logFile io.Writer) log.Emitter {
switch format {
case "text":
return &log.GoogleEmitter{log.Writer{Next: logFile}}
case "json":
return &log.JSONEmitter{log.Writer{Next: logFile}}
case "json-k8s":
return &log.K8sJSONEmitter{log.Writer{Next: logFile}}
}
cmd.Fatalf("invalid log format %q, must be 'text', 'json', or 'json-k8s'", format)
panic("unreachable")
}
func init() {
// Set default root dir to something (hopefully) user-writeable.
*rootDir = "/var/run/runsc"
if runtimeDir := os.Getenv("XDG_RUNTIME_DIR"); runtimeDir != "" {
*rootDir = filepath.Join(runtimeDir, "runsc")
}
}
|
[
"\"XDG_RUNTIME_DIR\""
] |
[] |
[
"XDG_RUNTIME_DIR"
] |
[]
|
["XDG_RUNTIME_DIR"]
|
go
| 1 | 0 | |
qbertconfig/Kubeconfig.py
|
# Copyright 2018 Platform9 Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import logging
from yaml import safe_load, safe_dump
LOG = logging.getLogger(__name__)
DEFAULT_KUBECONFIG = '~/.kube/config'
# the piece of kubeconfig we care about
KUBECONFIG_REPEATABLES = ['clusters', 'users', 'contexts']
class Kubeconfig(object):
""" High level class to describe operations on kubeconfigs """
def __init__(self, kcfg_path=None, kcfg_yaml=None, kcfg=None):
self.kubeconfig_path = self.determine_location(kcfg_path)
if kcfg:
# User provided us with the kubeconfig
LOG.debug('Using user provided Kubeconfig')
self.kubeconfig = kcfg
else:
# Attempt to load from yaml or file
existing_kubeconfig = self.read()
# load the kubeconfig from string
if kcfg_yaml:
loaded_kcfg = safe_load(kcfg_yaml)
if existing_kubeconfig != {}:
LOG.warn('kubeconfig already exists at %s. It will be overwritten', self.kubeconfig_path)
LOG.debug('Using provided yaml as kubeconfig')
self.kubeconfig = loaded_kcfg
else:
LOG.debug('Using kubeconfig as loaded from file')
self.kubeconfig = existing_kubeconfig
def __eq__(self, other):
# TODO: This only checks that the NAMES are the same. This doesn't catch when the content is
# different but the names are the same.
if isinstance(other, self.__class__):
# check that both objects have the same clusters, contexts, & users
for item in KUBECONFIG_REPEATABLES:
names = [c['name'] for c in self.kubeconfig[item]]
incoming_names = [c['name'] for c in other.kubeconfig[item]]
if set(names) != set(incoming_names):
return False
return True
else:
return False
def read(self):
""" Loads the current kubeconfig from file """
if not os.path.isfile(self.kubeconfig_path):
LOG.debug('Kubeconfig not found at %s', self.kubeconfig_path)
return {}
else:
with open(self.kubeconfig_path) as kcfg_f:
LOG.debug('Reading kubeconfig at %s', self.kubeconfig_path)
return safe_load(kcfg_f)
def save(self):
""" Saves the current kubeconfig to file """
kcfg_dir = os.path.dirname(self.kubeconfig_path)
LOG.debug('saving to %s' % kcfg_dir)
if not os.path.exists(kcfg_dir):
os.makedirs(kcfg_dir)
# File has not be created yet
with open(self.kubeconfig_path, "w+") as kcfg_f:
kcfg_f.write(safe_dump(self.kubeconfig))
def determine_location(self, kcfg_path=None):
""" Identifies which kubeconfig is currently to be used.
This will load the kubeconfig from the following locations in this precedence order:
- specified in '--kubeconfig' flag
- specified in $KUBECONFIG environment variable
- home directory location ~/.kube/config
Returns:
The identified kubeconfig file to use
"""
kubeconfig_env = None
try:
kubeconfig_env = os.environ['KUBECONFIG']
except KeyError:
pass # :its_fine:
# Determine
kubeconfig_path = None
if kcfg_path:
kubeconfig_path = kcfg_path
elif kubeconfig_env:
kubeconfig_path = kubeconfig_env
else:
kubeconfig_path = DEFAULT_KUBECONFIG
# Clean it up
kubeconfig_path = os.path.expanduser(kubeconfig_path)
kubeconfig_path = os.path.expandvars(kubeconfig_path)
return kubeconfig_path
def merge_kubeconfigs(self, new_kubeconfig):
""" Soft merges two kubeconfig files.
If name matches for cluster, context, or user the new_kubeconfig will be preferred
Args:
new_kubeconfig: A Kubeconfig object to merge into this one
Returns:
The merged kubeconfig dictionary
"""
LOG.debug('Current kubeconfig:\n%s', self.kubeconfig)
LOG.debug('Incoming kubeconfig:\n%s', new_kubeconfig.kubeconfig)
if not self.kubeconfig:
LOG.debug('Source is empty, no merging required')
# it's a fresh kubeconfig! no need to merge anything
self.kubeconfig = new_kubeconfig.kubeconfig
return self.kubeconfig
result = self.kubeconfig
for category in KUBECONFIG_REPEATABLES:
incoming_list = new_kubeconfig.kubeconfig[category]
# merge based on the key 'name'
for inc in incoming_list:
merged = False
for index, item in enumerate(result[category]):
if item['name'] == inc['name']:
LOG.debug('Item %s found in %s. Overwriting', inc['name'], category)
result[category][index] = inc
merged = True
if not merged:
LOG.debug('Item %s not found in %s. Appending', inc['name'], category)
result[category].append(inc)
LOG.debug('After merge:\n%s', result)
self.kubeconfig = result
return self.kubeconfig
|
[] |
[] |
[
"KUBECONFIG"
] |
[]
|
["KUBECONFIG"]
|
python
| 1 | 0 | |
pkg/plugins/mco/mco_plugin.go
|
package main
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"github.com/golang/glog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
sriovnetworkv1 "github.com/k8snetworkplumbingwg/sriov-network-operator/api/v1"
"github.com/k8snetworkplumbingwg/sriov-network-operator/controllers"
)
type McoPlugin struct {
PluginName string
SpecVersion string
DesireState *sriovnetworkv1.SriovNetworkNodeState
LastState *sriovnetworkv1.SriovNetworkNodeState
}
const (
switchdevUnitPath = "/host/etc/systemd/system/switchdev-configuration.service"
switchDevConfPath = "/host/etc/switchdev.conf"
nodeLabelPrefix = "node-role.kubernetes.io/"
)
var nodeName string
var Plugin McoPlugin
var kubeclient *kubernetes.Clientset
var switchdevConfigured bool
// Initialize our plugin and set up initial values
func init() {
Plugin = McoPlugin{
PluginName: "mco_plugin",
SpecVersion: "1.0",
}
var config *rest.Config
var err error
kubeconfig := os.Getenv("KUBECONFIG")
if kubeconfig != "" {
config, err = clientcmd.BuildConfigFromFlags("", kubeconfig)
} else {
// creates the in-cluster config
config, err = rest.InClusterConfig()
}
if err != nil {
panic(err.Error())
}
kubeclient = kubernetes.NewForConfigOrDie(config)
}
// Name returns the name of the plugin
func (p *McoPlugin) Name() string {
return p.PluginName
}
// Spec returns the version of the spec expected by the plugin
func (p *McoPlugin) Spec() string {
return p.SpecVersion
}
// OnNodeStateAdd Invoked when SriovNetworkNodeState CR is created, return if need dain and/or reboot node
func (p *McoPlugin) OnNodeStateAdd(state *sriovnetworkv1.SriovNetworkNodeState) (needDrain bool, needReboot bool, err error) {
glog.Info("mco-plugin OnNodeStateAdd()")
nodeName = state.GetName()
return p.OnNodeStateChange(nil, state)
}
// OnNodeStateChange Invoked when SriovNetworkNodeState CR is updated, return if need dain and/or reboot node
func (p *McoPlugin) OnNodeStateChange(old, new *sriovnetworkv1.SriovNetworkNodeState) (needDrain bool, needReboot bool, err error) {
glog.Info("mco-plugin OnNodeStateChange()")
switchdevConfigured = false
for _, iface := range new.Spec.Interfaces {
if iface.EswitchMode == sriovnetworkv1.ESWITCHMODE_SWITCHDEV {
switchdevConfigured = true
break
}
}
var update, remove bool
if update, remove, err = writeSwitchdevConfFile(new); err != nil {
glog.Errorf("mco-plugin OnNodeStateChange():fail to update switchdev.conf file: %v", err)
return
}
if remove {
needDrain = true
return
}
if update {
if _, err = os.Stat(switchdevUnitPath); err != nil {
if os.IsNotExist(err) {
glog.Info("mco-plugin OnNodeStateChange(): the latest MachineConfig has not been applied")
needDrain = true
err = nil
return
}
return
}
// node is already in the offload MCP
glog.Info("mco-plugin OnNodeStateChange(): need reboot node to use the up-to-date switchdev.conf")
needDrain = true
needReboot = true
return
}
return
}
// Apply config change
func (p *McoPlugin) Apply() error {
glog.Info("mco-plugin Apply()")
node, err := kubeclient.CoreV1().Nodes().Get(context.Background(), nodeName, metav1.GetOptions{})
if err != nil {
return err
}
label := nodeLabelPrefix + controllers.HwOffloadNodeLabel
if switchdevConfigured {
if _, ok := node.Labels[label]; !ok {
glog.Infof("Move node %s into HW offload MachineConfigPool", node.Name)
mergePatch, _ := json.Marshal(map[string]interface{}{
"metadata": map[string]interface{}{
"labels": map[string]interface{}{
label: "",
},
},
})
kubeclient.CoreV1().Nodes().Patch(context.Background(), nodeName, types.MergePatchType, mergePatch, metav1.PatchOptions{})
return nil
}
glog.Infof("Node %s is already in HW offload MachineConfigPool", node.Name)
return nil
}
if _, ok := node.Labels[label]; ok {
glog.Infof("Remove node %s from HW offload MachineConfigPool", node.Name)
mergePatch, _ := json.Marshal(map[string]interface{}{
"metadata": map[string]interface{}{
"labels": map[string]interface{}{
label: nil,
},
},
})
kubeclient.CoreV1().Nodes().Patch(context.Background(), nodeName, types.MergePatchType, mergePatch, metav1.PatchOptions{})
return nil
}
glog.Infof("Node %s is not in HW offload MachineConfigPool", node.Name)
return nil
}
func writeSwitchdevConfFile(newState *sriovnetworkv1.SriovNetworkNodeState) (update, remove bool, err error) {
_, err = os.Stat(switchDevConfPath)
if err != nil {
if os.IsNotExist(err) {
glog.V(2).Infof("writeSwitchdevConfFile(): file not existed, create it")
_, err = os.Create(switchDevConfPath)
if err != nil {
glog.Errorf("writeSwitchdevConfFile(): fail to create file: %v", err)
return
}
} else {
return
}
}
newContent := ""
for _, iface := range newState.Spec.Interfaces {
if iface.EswitchMode == sriovnetworkv1.ESWITCHMODE_SWITCHDEV {
newContent = newContent + fmt.Sprintln(iface.PciAddress, iface.NumVfs)
}
}
oldContent, err := ioutil.ReadFile(switchDevConfPath)
if err != nil {
glog.Errorf("writeSwitchdevConfFile(): fail to read file: %v", err)
return
}
if newContent == string(oldContent) {
glog.V(2).Info("writeSwitchdevConfFile(): no update")
return
}
if newContent == "" {
remove = true
glog.V(2).Info("writeSwitchdevConfFile(): remove content in switchdev.conf")
}
update = true
glog.V(2).Infof("writeSwitchdevConfFile(): write %s to switchdev.conf", newContent)
err = ioutil.WriteFile(switchDevConfPath, []byte(newContent), 0666)
if err != nil {
glog.Errorf("writeSwitchdevConfFile(): fail to write file: %v", err)
return
}
return
}
|
[
"\"KUBECONFIG\""
] |
[] |
[
"KUBECONFIG"
] |
[]
|
["KUBECONFIG"]
|
go
| 1 | 0 | |
oauth/oauth.go
|
package oauth
import (
"fmt"
"net/http"
"net/url"
"os"
"strings"
"github.com/caddyserver/caddy"
"github.com/caddyserver/caddy/caddyhttp/httpserver"
"golang.org/x/oauth2"
)
type config struct {
RedirectURL string
LoginPage string
LoginURL string
LogoutURL string
CallbackPath string
ClientID string
ClientSecret string
AuthURL string
TokenURL string
JwtURL string
ExtraScopes string
APIPath string
Organizations map[string][]string
Usernames map[string][]string
AuthenticationRequired []string
AllowedExtensions []string
ForwardPayload bool
}
func newConfig() config {
return config{
Organizations: map[string][]string{},
Usernames: map[string][]string{},
AuthenticationRequired: []string{},
AllowedExtensions: []string{},
ForwardPayload: false,
}
}
func init() {
caddy.RegisterPlugin("oauth", caddy.Plugin{
ServerType: "http",
Action: setup,
})
}
func setup(c *caddy.Controller) error {
conf, err := parse(c)
if err != nil {
return err
}
// Runs on Caddy startup, useful for services or other setups.
c.OnStartup(func() error {
fmt.Printf("caddy_oauth plugin is initiated with conf=%#v\n", conf)
return nil
})
// Runs on Caddy shutdown, useful for cleanups.
c.OnShutdown(func() error {
fmt.Println("caddy_oauth plugin is cleaning up")
return nil
})
oauthConfs := map[string]*oauth2.Config{}
// create oauth conf for organizations
for path, orgs := range conf.Organizations {
var scopes []string
for _, org := range orgs {
scopes = append(scopes, "user:memberof:"+org)
}
oauthConfs[path] = newOauthConf(conf, scopes)
}
// create oauth conf for usernames
for path := range conf.Usernames {
if _, ok := oauthConfs[path]; ok {
continue
}
oauthConfs[path] = newOauthConf(conf, []string{})
}
for _, path := range conf.AuthenticationRequired {
if _, ok := oauthConfs[path]; ok {
continue
}
oauthConfs[path] = newOauthConf(conf, []string{})
}
// Create oauthConf for LoginURL if exist to be used for login only
if conf.LoginURL != "" {
oauthConfs[conf.LoginURL] = newOauthConf(conf, []string{})
}
httpserver.GetConfig(c).AddMiddleware(func(next httpserver.Handler) httpserver.Handler {
return &handler{
LoginPage: conf.LoginPage,
LoginURL: conf.LoginURL,
LogoutURL: conf.LogoutURL,
JwtURL: conf.JwtURL,
ExtraScopes: conf.ExtraScopes,
CallbackPath: conf.CallbackPath,
Next: next,
hc: http.Client{},
OauthConfs: oauthConfs,
Usernames: conf.Usernames,
Organizations: conf.Organizations,
AuthenticationRequired: conf.AuthenticationRequired,
AllowedExtensions: conf.AllowedExtensions,
ForwardPayload: conf.ForwardPayload,
APIPath: conf.APIPath,
}
})
return nil
}
func newOauthConf(conf config, scopes []string) *oauth2.Config {
return &oauth2.Config{
RedirectURL: conf.RedirectURL,
ClientID: conf.ClientID,
ClientSecret: conf.ClientSecret,
Scopes: scopes,
Endpoint: oauth2.Endpoint{
AuthURL: conf.AuthURL,
TokenURL: conf.TokenURL,
},
}
}
func parse(c *caddy.Controller) (config, error) {
// This parses the following config blocks
var err error
conf := newConfig()
for c.Next() {
args := c.RemainingArgs()
switch len(args) {
case 0:
// no argument passed, check the config block
for c.NextBlock() {
switch c.Val() {
case "redirect_url":
conf.RedirectURL, err = parseOne(c)
case "login_page":
conf.LoginPage, err = parseOne(c)
case "login_url":
conf.LoginURL, err = parseOne(c)
case "logout_url":
conf.LogoutURL, err = parseOne(c)
case "client_id":
conf.ClientID, err = parseOne(c)
case "client_secret":
conf.ClientSecret, err = parseOne(c)
case "auth_url":
conf.AuthURL, err = parseOne(c)
case "token_url":
conf.TokenURL, err = parseOne(c)
case "jwt_url":
conf.JwtURL, err = parseOne(c)
case "organizations":
path, orgs, e := parseTwo(c)
if e != nil {
return conf, e
}
conf.Organizations[path] = strings.Split(orgs, ",")
case "usernames":
path, usernames, e := parseTwo(c)
if e != nil {
return conf, e
}
conf.Usernames[path] = strings.Split(usernames, ",")
case "authentication_required":
path, e := parseOne(c)
if e != nil {
return conf, e
}
conf.AuthenticationRequired = append(conf.AuthenticationRequired, path)
case "allow_extension":
extension, e := parseOne(c)
if e != nil {
return conf, e
}
conf.AllowedExtensions = append(conf.AllowedExtensions, extension)
case "api_base_path":
conf.APIPath, err = parseOne(c)
case "extra_scopes":
conf.ExtraScopes, err = parseOne(c)
case "forward_payload":
conf.ForwardPayload = true
}
if err != nil {
return conf, err
}
}
default:
// we want only one argument max
return conf, c.ArgErr()
}
}
if conf.RedirectURL == "" || conf.ClientID == "" || conf.ClientSecret == "" {
return conf, fmt.Errorf("redirect_url, client_id, and client_secret can't be empty")
}
if conf.AuthURL == "" {
conf.AuthURL = "https://itsyou.online/v1/oauth/authorize"
}
if conf.TokenURL == "" {
conf.TokenURL = "https://itsyou.online/v1/oauth/access_token"
}
if conf.JwtURL == "" {
conf.JwtURL = "https://itsyou.online/v1/oauth/jwt"
}
// callback path
redirURL, err := url.Parse(conf.RedirectURL)
if err != nil {
return conf, err
}
conf.CallbackPath = redirURL.Path
return conf, nil
}
// parse exactly one arguments
func parseOne(c *caddy.Controller) (string, error) {
if !c.NextArg() {
// we are expecting a value
return "", c.ArgErr()
}
val := c.Val()
if c.NextArg() {
// we are expecting only one value.
return "", c.ArgErr()
}
return val, nil
}
func parseTwo(c *caddy.Controller) (string, string, error) {
args := c.RemainingArgs()
if len(args) != 2 {
return "", "", fmt.Errorf("expected 2 args, get %v args", len(args))
}
return args[0], args[1], nil
}
func init() {
if os.Getenv("CADDY_DEV_MODE") == "1" {
httpserver.RegisterDevDirective("oauth", "browse")
}
}
|
[
"\"CADDY_DEV_MODE\""
] |
[] |
[
"CADDY_DEV_MODE"
] |
[]
|
["CADDY_DEV_MODE"]
|
go
| 1 | 0 | |
skt/ye.py
|
from skt.vault_utils import get_secrets
def get_hive_conn():
from pyhive import hive
hiveserver2 = get_secrets(path="ye/hiveserver2")
host = hiveserver2["ip"]
port = hiveserver2["port"]
user = hiveserver2["user"]
conn = hive.connect(host, port=port, username=user)
return conn
def get_hdfs_conn():
import os
import pyarrow
os.environ["ARROW_LIBHDFS_DIR"] = "/usr/hdp/3.0.1.0-187/usr/lib"
conn = pyarrow.hdfs.connect(user="airflow")
return conn
def get_sqlalchemy_engine():
from sqlalchemy import create_engine
hiveserver2 = get_secrets(path="ye/hiveserver2")
host = hiveserver2["ip"]
port = hiveserver2["port"]
user = hiveserver2["user"]
return create_engine(f"hive://{user}@{host}:{port}/tmp")
def get_pkl_from_hdfs(pkl_path):
import pickle
conn = get_hdfs_conn()
byte_object = conn.cat(f"{pkl_path}")
pkl_object = pickle.loads(byte_object)
return pkl_object
def get_spark(scale=0, queue=None):
import os
import uuid
import tempfile
from pyspark.sql import SparkSession
from skt.vault_utils import get_secrets
tmp_uuid = str(uuid.uuid4())
app_name = f"skt-{os.environ.get('USER', 'default')}-{tmp_uuid}"
if not queue:
if "JUPYTERHUB_USER" in os.environ:
queue = "dmig_eda"
else:
queue = "airflow_job"
os.environ["ARROW_PRE_0_15_IPC_FORMAT"] = "1"
key = get_secrets("gcp/sktaic-datahub/dataflow")["config"]
key_file_name = tempfile.mkstemp()[1]
with open(key_file_name, "wb") as key_file:
key_file.write(key.encode())
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = key_file.name
if scale in [1, 2, 3, 4]:
spark = (
SparkSession.builder.config("spark.app.name", app_name)
.config("spark.driver.memory", f"{scale*8}g")
.config("spark.executor.memory", f"{scale*3}g")
.config("spark.executor.instances", f"{scale*8}")
.config("spark.driver.maxResultSize", f"{scale*4}g")
.config("spark.rpc.message.maxSize", "1024")
.config("spark.yarn.queue", queue)
.config("spark.ui.enabled", "false")
.config("spark.port.maxRetries", "128")
.config("spark.executorEnv.ARROW_PRE_0_15_IPC_FORMAT", "1")
.config("spark.yarn.appMasterEnv.ARROW_PRE_0_15_IPC_FORMAT", "1")
.config("spark.jars", "gs://external_libs/spark/jars/spark-bigquery-with-dependencies_2.11-0.16.1.jar",)
.enableHiveSupport()
.getOrCreate()
)
else:
spark = (
SparkSession.builder.config("spark.app.name", app_name)
.config("spark.driver.memory", "6g")
.config("spark.executor.memory", "8g")
.config("spark.shuffle.service.enabled", "true")
.config("spark.dynamicAllocation.enabled", "true")
.config("spark.dynamicAllocation.maxExecutors", "200")
.config("spark.driver.maxResultSize", "6g")
.config("spark.rpc.message.maxSize", "1024")
.config("spark.yarn.queue", queue)
.config("spark.ui.enabled", "false")
.config("spark.port.maxRetries", "128")
.config("spark.executorEnv.ARROW_PRE_0_15_IPC_FORMAT", "1")
.config("spark.yarn.appMasterEnv.ARROW_PRE_0_15_IPC_FORMAT", "1")
.config("spark.jars", "gs://external_libs/spark/jars/spark-bigquery-with-dependencies_2.11-0.16.1.jar",)
.enableHiveSupport()
.getOrCreate()
)
spark.conf.set("spark.sql.execution.arrow.enabled", "true")
return spark
def hive_execute(query):
conn = get_hive_conn()
c = conn.cursor()
c.execute(query)
c.close()
conn.close()
def hive_get_result(query):
conn = get_hive_conn()
c = conn.cursor()
c.execute(query)
result = c.fetchall()
c.close()
conn.close()
return result
def hive_to_pandas(query, scale=0):
if scale == 1:
import pandas
conn = get_hive_conn()
df = pandas.read_sql(query, conn)
df.info()
conn.close()
return df
import uuid
tmp_id = str(uuid.uuid4()).replace("-", "_")
ctas = f"CREATE TABLE dumbo.{tmp_id} stored as parquet as {query}"
conn = get_hive_conn()
c = conn.cursor()
c.execute("set parquet.column.index.access=false")
c.execute(ctas)
hdfs = get_hdfs_conn()
table_path = hdfs.ls(f"/warehouse/tablespace/managed/hive/dumbo.db/{tmp_id}")[0]
hdfs.close()
df = parquet_to_pandas(table_path)
c.execute(f"DROP TABLE dumbo.{tmp_id}")
c.close()
conn.close()
return df
def parquet_to_pandas(hdfs_path):
from pyarrow import parquet
hdfs = get_hdfs_conn()
df = parquet.read_table(hdfs_path, filesystem=hdfs).to_pandas()
df.info()
return df
def pandas_to_parquet(pandas_df, hdfs_path, spark):
df = spark.createDataFrame(pandas_df)
df.write.mode("overwrite").parquet(hdfs_path)
def slack_send(
text="This is default text",
username="SKT",
channel="#leavemealone",
icon_emoji=":large_blue_circle:",
blocks=None,
dataframe=False,
):
import requests
from skt.vault_utils import get_secrets
if dataframe:
from tabulate import tabulate
text = "```" + tabulate(text, tablefmt="simple", headers="keys") + "```"
token = get_secrets("slack")["bot_token"]["airflow"]
proxy = get_secrets("proxy")["proxy"]
proxies = {
"http": proxy,
"https": proxy,
}
headers = {
"Content-Type": "application/json;charset=utf-8",
"Authorization": f"Bearer {token}",
}
json_body = {
"username": username,
"channel": channel,
"text": text,
"blocks": blocks,
"icon_emoji": icon_emoji,
}
r = requests.post("https://www.slack.com/api/chat.postMessage", proxies=proxies, headers=headers, json=json_body,)
r.raise_for_status()
if not r.json()["ok"]:
raise Exception(r.json())
def get_github_util():
from skt.github_utils import GithubUtil
github_token = get_secrets("github/sktaiflow")["token"]
proxy = get_secrets("proxy")["proxy"]
proxies = {
"http": proxy,
"https": proxy,
}
g = GithubUtil(github_token, proxies)
return g
def _write_to_parquet_via_spark(pandas_df, hdfs_path):
spark = get_spark()
spark_df = spark.createDataFrame(pandas_df)
spark_df.write.mode("overwrite").parquet(hdfs_path)
def _write_to_parquet(pandas_df, hdfs_path):
import pyarrow as pa
import pyarrow.parquet as pq
# Read Parquet INT64 timestamp issue:
# https://issues.apache.org/jira/browse/HIVE-21215
if "datetime64[ns]" in pandas_df.dtypes.tolist():
_write_to_parquet_via_spark(pandas_df, hdfs_path)
return
pa_table = pa.Table.from_pandas(pandas_df)
hdfs_conn = get_hdfs_conn()
try:
pq.write_to_dataset(pa_table, root_path=hdfs_path, filesystem=hdfs_conn)
finally:
hdfs_conn.close()
def _write_df(pandas_df, schema_name, table_name, hdfs_path, engine, cursor, tmp_table_name):
import sqlalchemy.exc
cursor.execute(f"drop table if exists {schema_name}.{tmp_table_name}")
try:
pandas_df.to_sql(tmp_table_name, engine, schema=schema_name, if_exists="replace", index=False)
except sqlalchemy.exc.ProgrammingError:
# Hive bulk insert issue:
# https://github.com/dropbox/PyHive/issues/343
pass
cursor.execute(f"drop table if exists {schema_name}.{table_name}")
if hdfs_path is None:
cursor.execute(
f"""create table {schema_name}.{table_name}
like {schema_name}.{tmp_table_name}
stored as parquet"""
)
cursor.execute(f"show create table {schema_name}.{table_name}")
result = cursor.fetchall()
managed_hdfs_path = list(filter(lambda row: row[0].strip().find("hdfs://") == 1, result))[0][0].strip()[1:-1]
_write_to_parquet(pandas_df, managed_hdfs_path)
else:
cursor.execute(
f"""create external table {schema_name}.{table_name}
like {schema_name}.{tmp_table_name}
stored as parquet
location '{hdfs_path}'"""
)
def write_df_to_hive(pandas_df, schema_name, table_name, hdfs_path=None):
"""
Exports a Panadas dataframe into a table in Hive.
Example:
write_df_to_hive(pandas_df1, "my_schema", "my_table1")
write_df_to_hive(pandas_df2, "my_schema", "my_table2")
write_df_to_hive(pandas_df1, "my_schema", "my_table3",
hdfs_path="hdfs://.../my_schema.db/my_table1")
Parameters
----------
pandas_df : an ojbect of Pandas Dataframe
schema_name : str
A target schema name of Hive
table_name : str
A target table name of Hive
hdfs_path : str, default None
A path of Hadoop file system as an optional parameter.
It will be used to create an external table. If hdfs_path
is not None, data in the dataframe will not be converted.
A metadata in the dataframe is just used to create a Hive
table.
"""
engine = get_sqlalchemy_engine()
conn = get_hive_conn()
cursor = conn.cursor()
import hashlib
tmp_table_name = hashlib.sha1(str(f"{schema_name}.{table_name}").encode("utf-8")).hexdigest()
try:
_write_df(pandas_df, schema_name, table_name, hdfs_path, engine, cursor, tmp_table_name)
finally:
cursor.execute(f"drop table if exists {schema_name}.{tmp_table_name}")
cursor.close()
conn.close()
|
[] |
[] |
[
"ARROW_LIBHDFS_DIR",
"USER",
"GOOGLE_APPLICATION_CREDENTIALS",
"ARROW_PRE_0_15_IPC_FORMAT"
] |
[]
|
["ARROW_LIBHDFS_DIR", "USER", "GOOGLE_APPLICATION_CREDENTIALS", "ARROW_PRE_0_15_IPC_FORMAT"]
|
python
| 4 | 0 | |
pubsub/envelope.go
|
// ------------------------------------------------------------
// Copyright (c) Microsoft Corporation and Dapr Contributors.
// Licensed under the MIT License.
// ------------------------------------------------------------
package pubsub
import (
"encoding/base64"
"fmt"
"time"
contrib_contenttype "github.com/dapr/components-contrib/contenttype"
contrib_metadata "github.com/dapr/components-contrib/metadata"
"github.com/google/uuid"
jsoniter "github.com/json-iterator/go"
)
const (
// DefaultCloudEventType is the default event type for an Dapr published event
DefaultCloudEventType = "com.dapr.event.sent"
// CloudEventsSpecVersion is the specversion used by Dapr for the cloud events implementation
CloudEventsSpecVersion = "1.0"
// DefaultCloudEventSource is the default event source
DefaultCloudEventSource = "Dapr"
// DefaultCloudEventDataContentType is the default content-type for the data attribute
DefaultCloudEventDataContentType = "text/plain"
TraceIDField = "traceid"
TopicField = "topic"
PubsubField = "pubsubname"
ExpirationField = "expiration"
DataContentTypeField = "datacontenttype"
DataField = "data"
DataBase64Field = "data_base64"
SpecVersionField = "specversion"
TypeField = "type"
SourceField = "source"
IDField = "id"
SubjectField = "subject"
)
// NewCloudEventsEnvelope returns a map representation of a cloudevents JSON
func NewCloudEventsEnvelope(id, source, eventType, subject string, topic string, pubsubName string, dataContentType string, data []byte, traceID string) map[string]interface{} {
// defaults
if id == "" {
id = uuid.New().String()
}
if source == "" {
source = DefaultCloudEventSource
}
if eventType == "" {
eventType = DefaultCloudEventType
}
if dataContentType == "" {
dataContentType = DefaultCloudEventDataContentType
}
var ceData interface{}
ceDataField := DataField
var err error
if contrib_contenttype.IsJSONContentType(dataContentType) {
err = jsoniter.Unmarshal(data, &ceData)
} else if contrib_contenttype.IsBinaryContentType(dataContentType) {
ceData = base64.StdEncoding.EncodeToString(data)
ceDataField = DataBase64Field
} else {
ceData = string(data)
}
if err != nil {
ceData = string(data)
}
ce := map[string]interface{}{
IDField: id,
SpecVersionField: CloudEventsSpecVersion,
DataContentTypeField: dataContentType,
SourceField: source,
TypeField: eventType,
TopicField: topic,
PubsubField: pubsubName,
TraceIDField: traceID,
}
ce[ceDataField] = ceData
if subject != "" {
ce[SubjectField] = subject
}
return ce
}
// FromCloudEvent returns a map representation of an existing cloudevents JSON
func FromCloudEvent(cloudEvent []byte, topic, pubsub, traceID string) (map[string]interface{}, error) {
var m map[string]interface{}
err := jsoniter.Unmarshal(cloudEvent, &m)
if err != nil {
return m, err
}
m[TraceIDField] = traceID
m[TopicField] = topic
m[PubsubField] = pubsub
return m, nil
}
// FromRawPayload returns a CloudEvent for a raw payload on subscriber's end.
func FromRawPayload(data []byte, topic, pubsub string) map[string]interface{} {
// Limitations of generating the CloudEvent on the subscriber side based on raw payload:
// - The CloudEvent ID will be random, so the same message can be redelivered as a different ID.
// - TraceID is not useful since it is random and not from publisher side.
// - Data is always returned as `data_base64` since we don't know the actual content type.
return map[string]interface{}{
IDField: uuid.New().String(),
SpecVersionField: CloudEventsSpecVersion,
DataContentTypeField: "application/octet-stream",
SourceField: DefaultCloudEventSource,
TypeField: DefaultCloudEventType,
TopicField: topic,
PubsubField: pubsub,
DataBase64Field: base64.StdEncoding.EncodeToString(data),
}
}
// HasExpired determines if the current cloud event has expired.
func HasExpired(cloudEvent map[string]interface{}) bool {
e, ok := cloudEvent[ExpirationField]
if ok && e != "" {
expiration, err := time.Parse(time.RFC3339, fmt.Sprintf("%s", e))
if err != nil {
return false
}
return expiration.UTC().Before(time.Now().UTC())
}
return false
}
// ApplyMetadata will process metadata to modify the cloud event based on the component's feature set.
func ApplyMetadata(cloudEvent map[string]interface{}, componentFeatures []Feature, metadata map[string]string) {
ttl, hasTTL, _ := contrib_metadata.TryGetTTL(metadata)
if hasTTL && !FeatureMessageTTL.IsPresent(componentFeatures) {
// Dapr only handles Message TTL if component does not.
now := time.Now().UTC()
// The maximum ttl is maxInt64, which is not enough to overflow time, for now.
// As of the time this code was written (2020 Dec 28th),
// the maximum time of now() adding maxInt64 is ~ "2313-04-09T23:30:26Z".
// Max time in golang is currently 292277024627-12-06T15:30:07.999999999Z.
// So, we have some time before the overflow below happens :)
expiration := now.Add(ttl)
cloudEvent[ExpirationField] = expiration.Format(time.RFC3339)
}
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
main.go
|
package main
import (
"context"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"strings"
pb "github.com/evankanderson/sia/doer"
"github.com/hkwi/h2c"
"google.golang.org/grpc"
)
type doerServer struct {
}
func (s *doerServer) DoIt(ctx context.Context, c *pb.Command) (*pb.Response, error) {
resp := fmt.Sprintf("Did: %s", c.Thing)
log.Printf("RPC: %s\n DONE!", c.Thing)
return &pb.Response{Words: resp}, nil
}
func (s *doerServer) KeepDoing(stream pb.Doer_KeepDoingServer) error {
for {
in, err := stream.Recv()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
log.Printf("STREAM: %s", in)
words := fmt.Sprintf("Did: %s", in.Thing)
resp := &pb.Response{Words: words}
if err = stream.Send(resp); err != nil {
return err
}
}
}
func (s *doerServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
in, err := ioutil.ReadAll(r.Body)
if err != nil {
w.WriteHeader(500)
fmt.Fprintf(w, "Failed to read: %v\n", err)
return
}
w.WriteHeader(200)
log.Printf("HTTP/%d %s: %s\n", r.ProtoMajor, r.Method, in)
fmt.Fprintf(w, "You %s for %q\n\n", r.Method, r.URL)
fmt.Fprintf(w, "Got: %s\n", in)
}
func newServer() *doerServer {
s := &doerServer{}
return s
}
type grpcAdapter struct {
grpcServer http.Handler
}
func (g grpcAdapter) ServeHTTP(w http.ResponseWriter, r *http.Request) {
log.Printf("Got %s on %d with %s", r.Method, r.ProtoMajor, r.Header.Get("Content-Type"))
if r.ProtoMajor == 2 && strings.HasPrefix(
r.Header.Get("Content-Type"), "application/grpc") {
g.grpcServer.ServeHTTP(w, r)
} else {
http.DefaultServeMux.ServeHTTP(w, r)
}
}
func main() {
port := os.Getenv("PORT")
if port == "" {
port = "8080"
}
addr := fmt.Sprintf(":%s", port)
fmt.Printf("Listening on %s\n", addr)
grpcServer := grpc.NewServer()
doer := newServer()
pb.RegisterDoerServer(grpcServer, doer)
http.Handle("/", doer)
h2g := grpcAdapter{grpcServer: grpcServer}
noTls := h2c.Server{Handler: h2g}
log.Fatal(http.ListenAndServe(addr, noTls))
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
tridesclous/tests/testingtools.py
|
import os
import shutil
import numpy as np
from tridesclous.datasets import download_dataset
from tridesclous.dataio import DataIO
from tridesclous.catalogueconstructor import CatalogueConstructor
from tridesclous.cataloguetools import apply_all_catalogue_steps
def is_running_on_ci_cloud():
if os.environ.get('TRAVIS') in ('true', 'True'):
return True
if os.environ.get('APPVEYOR') in ('true', 'True'):
return True
if os.environ.get('CIRCLECI') in ('true', 'True'):
return True
return False
ON_CI_CLOUD = is_running_on_ci_cloud()
def setup_catalogue(dirname, dataset_name='olfactory_bulb'):
if os.path.exists(dirname):
shutil.rmtree(dirname)
dataio = DataIO(dirname=dirname)
localdir, filenames, params = download_dataset(name=dataset_name)
dataio.set_data_source(type='RawData', filenames=filenames, **params)
if dataset_name=='olfactory_bulb':
channels = [5, 6, 7, 8, 9]
else:
channels = [0,1,2,3]
dataio.add_one_channel_group(channels=channels)
catalogueconstructor = CatalogueConstructor(dataio=dataio)
params = {
'duration' : 60.,
'preprocessor' : {
'highpass_freq' : 300.,
'chunksize' : 1024,
'lostfront_chunksize' : 100,
},
'peak_detector' : {
'peak_sign' : '-',
'relative_threshold' : 7.,
'peak_span_ms' : 0.5,
},
'extract_waveforms' : {
'wf_left_ms' : -2.5,
'wf_right_ms' : 4.0,
'nb_max' : 10000,
},
'clean_waveforms' : {
'alien_value_threshold' : 60.,
},
'noise_snippet' : {
'nb_snippet' : 300,
},
'feature_method': 'global_pca',
'feature_kargs':{'n_components': 5},
'cluster_method' : 'kmeans',
'cluster_kargs' : {'n_clusters': 12},
'clean_cluster' : False,
'clean_cluster_kargs' : {},
}
apply_all_catalogue_steps(catalogueconstructor, params, verbose=True)
catalogueconstructor.trash_small_cluster()
catalogueconstructor.order_clusters(by='waveforms_rms')
catalogueconstructor.make_catalogue_for_peeler()
if __name__ =='__main__':
print('is_running_on_ci_cloud', is_running_on_ci_cloud())
|
[] |
[] |
[
"CIRCLECI",
"TRAVIS",
"APPVEYOR"
] |
[]
|
["CIRCLECI", "TRAVIS", "APPVEYOR"]
|
python
| 3 | 0 | |
hue7jip8/wsgi.py
|
"""
WSGI config for hue7jip8 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hue7jip8.settings")
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
azure/azure_integration_test.go
|
// +build integration
package azure
import (
"github.com/VirtusLab/go-extended/pkg/files"
"io/ioutil"
"os"
"path"
"testing"
"github.com/VirtusLab/crypt/crypto"
"github.com/VirtusLab/crypt/test"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestEncryptDecryptFileWithHeader(t *testing.T) {
logrus.SetLevel(logrus.DebugLevel)
// configuration from config.env
vaultURL := os.Getenv("VAULT_URL")
vaultKey := os.Getenv("VAULT_KEY")
vaultKeyVersion := os.Getenv("VAULT_KEY_VERSION")
require.NotEmpty(t, vaultURL)
require.NotEmpty(t, vaultKey)
require.NotEmpty(t, vaultKeyVersion)
keyVault, err := New(vaultURL, vaultKey, vaultKeyVersion)
require.NoError(t, err)
encrypt := crypto.New(keyVault)
// don't need key info because it is in header in encrypted file
keyVault, err = New("", "", "")
require.NoError(t, err)
decrypt := crypto.New(keyVault)
inputFile := "test.txt"
secret := "top secret token"
err = ioutil.WriteFile(inputFile, []byte(secret), 0644)
defer func() { _ = os.Remove(inputFile) }()
require.NoError(t, err, "Can't write plaintext file")
actual, err := test.EncryptAndDecryptFile(encrypt, decrypt, inputFile)
assert.NoError(t, err)
assert.Equal(t, secret, string(actual))
}
func TestEncryptDecryptWithoutHeader(t *testing.T) {
logrus.SetLevel(logrus.DebugLevel)
// configuration from config.env
vaultURL := os.Getenv("VAULT_URL")
vaultKey := os.Getenv("VAULT_KEY")
vaultKeyVersion := os.Getenv("VAULT_KEY_VERSION")
require.NotEmpty(t, vaultURL)
require.NotEmpty(t, vaultKey)
require.NotEmpty(t, vaultKeyVersion)
keyVault, err := New(vaultURL, vaultKey, vaultKeyVersion)
require.NoError(t, err)
secret := "top secret token"
encrypted, err := keyVault.encrypt([]byte(secret), false)
require.NoError(t, err)
decrypted, err := keyVault.Decrypt(encrypted)
require.NoError(t, err)
assert.Equal(t, string(decrypted), secret)
}
func TestCrypt_EncryptDecryptFiles(t *testing.T) {
logrus.SetLevel(logrus.DebugLevel)
// configuration from config.env
vaultURL := os.Getenv("VAULT_URL")
vaultKey := os.Getenv("VAULT_KEY")
vaultKeyVersion := os.Getenv("VAULT_KEY_VERSION")
require.NotEmpty(t, vaultURL)
require.NotEmpty(t, vaultKey)
require.NotEmpty(t, vaultKeyVersion)
encryptedFileExtension := ".crypt"
rootFileName := "root.txt"
subdirectoryFileName := "sub-directory.txt"
subdirectoryName := "sub-directory"
inDir := "testdata/encryptDecryptFiles"
encryptedFilesDir := "encryptedFiles"
decryptedFilesDir := "decryptedFiles"
keyVault, err := New(vaultURL, vaultKey, vaultKeyVersion)
require.NoError(t, err)
crypt := crypto.New(keyVault)
defer func() { _ = os.RemoveAll(encryptedFilesDir) }()
err = crypt.EncryptFiles(inDir, encryptedFilesDir, "", encryptedFileExtension)
require.NoError(t, err)
assert.FileExists(t, path.Join(encryptedFilesDir, rootFileName+encryptedFileExtension))
assert.FileExists(t, path.Join(encryptedFilesDir, subdirectoryName, subdirectoryFileName+encryptedFileExtension))
defer func() { _ = os.RemoveAll(decryptedFilesDir) }()
err = crypt.DecryptFiles(encryptedFilesDir, decryptedFilesDir, encryptedFileExtension, "")
require.NoError(t, err)
assert.FileExists(t, path.Join(decryptedFilesDir, rootFileName))
assert.FileExists(t, path.Join(decryptedFilesDir, subdirectoryName, subdirectoryFileName))
rootFile, err := files.ReadInput(path.Join(inDir, rootFileName))
require.NoError(t, err)
rootFileAfterDecryption, err := files.ReadInput(path.Join(decryptedFilesDir, rootFileName))
require.NoError(t, err)
assert.Equal(t, rootFile, rootFileAfterDecryption)
subdirectoryFile, err := files.ReadInput(path.Join(inDir, subdirectoryName, subdirectoryFileName))
require.NoError(t, err)
subdirectoryFileAfterDecryption, err := files.ReadInput(path.Join(decryptedFilesDir, subdirectoryName, subdirectoryFileName))
require.NoError(t, err)
assert.Equal(t, subdirectoryFile, subdirectoryFileAfterDecryption)
}
|
[
"\"VAULT_URL\"",
"\"VAULT_KEY\"",
"\"VAULT_KEY_VERSION\"",
"\"VAULT_URL\"",
"\"VAULT_KEY\"",
"\"VAULT_KEY_VERSION\"",
"\"VAULT_URL\"",
"\"VAULT_KEY\"",
"\"VAULT_KEY_VERSION\""
] |
[] |
[
"VAULT_KEY_VERSION",
"VAULT_URL",
"VAULT_KEY"
] |
[]
|
["VAULT_KEY_VERSION", "VAULT_URL", "VAULT_KEY"]
|
go
| 3 | 0 | |
cmd/vault.go
|
/*
Copyright © 2022 NAME HERE <EMAIL ADDRESS>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"fmt"
"os"
"path/filepath"
search "github.com/isan-rivkin/surf/lib/search/vaultsearch"
"github.com/isan-rivkin/surf/lib/vault"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
var (
query *string
parallel *int
mount *string
prefix *string
outputWebURL *bool
)
// vaultCmd represents the vault command
var vaultCmd = &cobra.Command{
Use: "vault",
Short: "pattern match again keys in Valut",
Long: `
$surf vault -q aws -m backend-secrets/prod -t 15
$surf vault -q aws -m 'user_.*pro'
`,
Run: func(cmd *cobra.Command, args []string) {
mount := getEnvOrOverride(mount, EnvKeyVaultDefaultMount)
prefix := getEnvOrOverride(prefix, EnvKeyVaultDefaultPrefix)
basePath := filepath.Join(*mount, *prefix)
client := runVaultDefaultAuth()
log.WithFields(log.Fields{
"address": client.GetVaultAddr(),
"base_path": basePath,
"query": *query,
}).Info("starting search")
m := search.NewDefaultRegexMatcher()
s := search.NewRecursiveSearcher[search.VC, search.Matcher](client, m)
output, err := s.Search(search.NewSearchInput(*query, basePath, *parallel))
if err != nil {
panic(err)
}
if output != nil {
for _, i := range output.Matches {
path := i.GetFullPath()
if *outputWebURL {
fmt.Println(vault.PathToWebURL(client.GetVaultAddr(), path))
} else {
fmt.Println(path)
}
}
}
},
}
func runVaultDefaultAuth() vault.Client[vault.Authenticator] {
vaultAddr := os.Getenv("VAULT_ADDR")
if err := setVaultAccessCredentialsValues(); err != nil {
log.WithError(err).Fatal("failed auth to Vault")
}
auth := vault.NewLdapAuth(*username, *password, vaultAddr)
client := vault.NewClient(auth)
return client
}
func init() {
rootCmd.AddCommand(vaultCmd)
query = vaultCmd.PersistentFlags().StringP("query", "q", "", "search query regex supported")
mount = vaultCmd.PersistentFlags().StringP("mount", "m", "", "mount to start the search at the root")
prefix = vaultCmd.PersistentFlags().StringP("prefix", "p", "", "$mount/prefix inside the mount to search in")
parallel = vaultCmd.PersistentFlags().IntP("threads", "t", 10, "parallel search number")
outputWebURL = vaultCmd.PersistentFlags().Bool("output-url", true, "defaullt output is web urls to click on and go to the browser UI")
vaultCmd.MarkPersistentFlagRequired("query")
}
|
[
"\"VAULT_ADDR\""
] |
[] |
[
"VAULT_ADDR"
] |
[]
|
["VAULT_ADDR"]
|
go
| 1 | 0 | |
hadoop-ozone/dist/src/main/blockade/conftest.py
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import time
import subprocess
EPOCH_TIME = int(time.time())
def pytest_addoption(parser):
parser.addoption("--output-dir",
action="store",
default="/tmp/BlockadeTests",
help="location of output directory where output log "
"and plot files will be created")
parser.addoption("--log-format",
action="store",
default="%(asctime)s|%(levelname)s|%(threadName)s|"
"%(filename)s:%(lineno)s -"
" %(funcName)s()|%(message)s",
help="specify log format")
parser.addoption("--log-level",
action="store",
default="info",
help="specify log level")
parser.addoption("--containerStatusSleep",
action="store",
default="900",
help="sleep time before checking container status")
def pytest_configure(config):
global OUTPUT_DIR
os.environ["CONTAINER_STATUS_SLEEP"] = config.option.containerStatusSleep
OUTPUT_DIR = "%s/%s" % (config.option.output_dir, EPOCH_TIME)
try:
os.makedirs(OUTPUT_DIR)
except OSError, e:
raise Exception(e.strerror + ": " + e.filename)
log_file = os.path.join(OUTPUT_DIR, "output.log")
if config.option.log_level == "trace":
loglevel = eval("logging.DEBUG")
else:
loglevel = eval("logging." + config.option.log_level.upper())
logformatter = logging.Formatter(config.option.log_format)
logging.basicConfig(filename=log_file,
filemode='w',
level=loglevel,
format=config.option.log_format)
console = logging.StreamHandler()
console.setLevel(loglevel)
console.setFormatter(logformatter)
logging.getLogger('').addHandler(console)
def pytest_report_teststatus(report):
logger = logging.getLogger('main')
loc, line, name = report.location
if report.outcome == 'skipped':
pass
elif report.when == 'setup':
logger.info("RUNNING TEST \"%s\" at location \"%s\" at line number"
" \"%s\"" % (name, loc, str(line)))
elif report.when == 'call':
logger.info("TEST \"%s\" %s in %3.2f seconds" %
(name, report.outcome.upper(), report.duration))
log_file_path = "%s/%s_all_docker.log" % \
(OUTPUT_DIR, name)
gather_docker_logs(log_file_path)
def pytest_sessionfinish(session):
logger = logging.getLogger('main')
logger.info("ALL TESTS FINISHED")
logger.info("ALL logs present in following directory: %s", OUTPUT_DIR)
def gather_docker_logs(log_file_path):
docker_compose_file = os.environ["DOCKER_COMPOSE_FILE"]
output = subprocess.check_output(["docker-compose", "-f",
docker_compose_file, "logs"])
with open(log_file_path, "w") as text_file:
text_file.write(output)
|
[] |
[] |
[
"CONTAINER_STATUS_SLEEP",
"DOCKER_COMPOSE_FILE"
] |
[]
|
["CONTAINER_STATUS_SLEEP", "DOCKER_COMPOSE_FILE"]
|
python
| 2 | 0 | |
internal/testutils/test_utils.go
|
package testutils
import (
"bytes"
"encoding/json"
"fmt"
"github.com/go-jet/jet/internal/jet"
"github.com/go-jet/jet/internal/utils"
"github.com/go-jet/jet/qrm"
"gotest.tools/assert"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"testing"
)
// AssertExec assert statement execution for successful execution and number of rows affected
func AssertExec(t *testing.T, stmt jet.Statement, db qrm.DB, rowsAffected ...int64) {
res, err := stmt.Exec(db)
assert.NilError(t, err)
rows, err := res.RowsAffected()
assert.NilError(t, err)
if len(rowsAffected) > 0 {
assert.Equal(t, rows, rowsAffected[0])
}
}
// AssertExecErr assert statement execution for failed execution with error string errorStr
func AssertExecErr(t *testing.T, stmt jet.Statement, db qrm.DB, errorStr string) {
_, err := stmt.Exec(db)
assert.Error(t, err, errorStr)
}
func getFullPath(relativePath string) string {
goPath := os.Getenv("GOPATH")
return filepath.Join(goPath, "src/github.com/go-jet/jet/tests", relativePath)
}
// PrintJson print v as json
func PrintJson(v interface{}) {
jsonText, _ := json.MarshalIndent(v, "", "\t")
fmt.Println(string(jsonText))
}
// AssertJSON check if data json output is the same as expectedJSON
func AssertJSON(t *testing.T, data interface{}, expectedJSON string) {
jsonData, err := json.MarshalIndent(data, "", "\t")
assert.NilError(t, err)
assert.Equal(t, "\n"+string(jsonData)+"\n", expectedJSON)
}
// SaveJSONFile saves v as json at testRelativePath
func SaveJSONFile(v interface{}, testRelativePath string) {
jsonText, _ := json.MarshalIndent(v, "", "\t")
filePath := getFullPath(testRelativePath)
err := ioutil.WriteFile(filePath, jsonText, 0644)
utils.PanicOnError(err)
}
// AssertJSONFile check if data json representation is the same as json at testRelativePath
func AssertJSONFile(t *testing.T, data interface{}, testRelativePath string) {
filePath := getFullPath(testRelativePath)
fileJSONData, err := ioutil.ReadFile(filePath)
assert.NilError(t, err)
if runtime.GOOS == "windows" {
fileJSONData = bytes.Replace(fileJSONData, []byte("\r\n"), []byte("\n"), -1)
}
jsonData, err := json.MarshalIndent(data, "", "\t")
assert.NilError(t, err)
assert.Assert(t, string(fileJSONData) == string(jsonData))
//assert.DeepEqual(t, string(fileJSONData), string(jsonData))
}
// AssertStatementSql check if statement Sql() is the same as expectedQuery and expectedArgs
func AssertStatementSql(t *testing.T, query jet.Statement, expectedQuery string, expectedArgs ...interface{}) {
queryStr, args := query.Sql()
assert.Equal(t, queryStr, expectedQuery)
if len(expectedArgs) == 0 {
return
}
assert.DeepEqual(t, args, expectedArgs)
}
// AssertStatementSqlErr checks if statement Sql() panics with errorStr
func AssertStatementSqlErr(t *testing.T, stmt jet.Statement, errorStr string) {
defer func() {
r := recover()
assert.Equal(t, r, errorStr)
}()
stmt.Sql()
}
// AssertDebugStatementSql check if statement Sql() is the same as expectedQuery
func AssertDebugStatementSql(t *testing.T, query jet.Statement, expectedQuery string, expectedArgs ...interface{}) {
_, args := query.Sql()
if len(expectedArgs) > 0 {
assert.DeepEqual(t, args, expectedArgs)
}
debuqSql := query.DebugSql()
assert.Equal(t, debuqSql, expectedQuery)
}
// AssertClauseSerialize checks if clause serialize produces expected query and args
func AssertClauseSerialize(t *testing.T, dialect jet.Dialect, clause jet.Serializer, query string, args ...interface{}) {
out := jet.SQLBuilder{Dialect: dialect}
jet.Serialize(clause, jet.SelectStatementType, &out)
//fmt.Println(out.Buff.String())
assert.DeepEqual(t, out.Buff.String(), query)
if len(args) > 0 {
assert.DeepEqual(t, out.Args, args)
}
}
// AssertClauseSerializeErr check if clause serialize panics with errString
func AssertClauseSerializeErr(t *testing.T, dialect jet.Dialect, clause jet.Serializer, errString string) {
defer func() {
r := recover()
assert.Equal(t, r, errString)
}()
out := jet.SQLBuilder{Dialect: dialect}
jet.Serialize(clause, jet.SelectStatementType, &out)
}
// AssertProjectionSerialize check if projection serialize produces expected query and args
func AssertProjectionSerialize(t *testing.T, dialect jet.Dialect, projection jet.Projection, query string, args ...interface{}) {
out := jet.SQLBuilder{Dialect: dialect}
jet.SerializeForProjection(projection, jet.SelectStatementType, &out)
assert.DeepEqual(t, out.Buff.String(), query)
assert.DeepEqual(t, out.Args, args)
}
// AssertQueryPanicErr check if statement Query execution panics with error errString
func AssertQueryPanicErr(t *testing.T, stmt jet.Statement, db qrm.DB, dest interface{}, errString string) {
defer func() {
r := recover()
assert.Equal(t, r, errString)
}()
stmt.Query(db, dest)
}
// AssertFileContent check if file content at filePath contains expectedContent text.
func AssertFileContent(t *testing.T, filePath string, contentBegin string, expectedContent string) {
enumFileData, err := ioutil.ReadFile(filePath)
assert.NilError(t, err)
beginIndex := bytes.Index(enumFileData, []byte(contentBegin))
//fmt.Println("-"+string(enumFileData[beginIndex:])+"-")
assert.DeepEqual(t, string(enumFileData[beginIndex:]), expectedContent)
}
// AssertFileNamesEqual check if all filesInfos are contained in fileNames
func AssertFileNamesEqual(t *testing.T, fileInfos []os.FileInfo, fileNames ...string) {
assert.Equal(t, len(fileInfos), len(fileNames))
fileNamesMap := map[string]bool{}
for _, fileInfo := range fileInfos {
fileNamesMap[fileInfo.Name()] = true
}
for _, fileName := range fileNames {
assert.Assert(t, fileNamesMap[fileName], fileName+" does not exist.")
}
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
server/server.go
|
package main
import (
"log"
"net/http"
"os"
"github.com/99designs/gqlgen/handler"
_ "github.com/go-sql-driver/mysql"
traph_go "github.com/wakashiyo/traph-go"
)
const defaultPort = "8080"
func main() {
port := os.Getenv("PORT")
if port == "" {
port = defaultPort
}
http.Handle("/", handler.Playground("GraphQL playground", "/query"))
http.Handle("/query", handler.GraphQL(traph_go.NewExecutableSchema(traph_go.Config{Resolvers: &traph_go.Resolver{}})))
log.Printf("connect to http://localhost:%s/ for GraphQL playground", port)
log.Fatal(http.ListenAndServe(":"+port, nil))
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
backend/tomato/__init__.py
|
# -*- coding: utf-8 -*-
# ToMaTo (Topology management software)
# Copyright (C) 2010 Dennis Schwerdel, University of Kaiserslautern
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
import os, sys, signal, time, thread
os.environ['TOMATO_MODULE'] = "backend"
import monkey
monkey.patch_all()
import config
from mongoengine import connect
database_connnection = connect(config.DATABASE, host=config.DATABASE_HOST)
database_obj = getattr(database_connnection, config.DATABASE)
def db_migrate():
def getMigration(version):
try:
return __import__("tomato.migrations.migration_%04d" % version, {}, {}, 'migration_%04d' % version).migrate
except ImportError:
return None
from .db import data
version = data.get('db_version', 0)
print >>sys.stderr, "Database version: %04d" % version
if version > 0 and not getMigration(version):
raise Exception("Database is newer than code")
if not version and not getMigration(1):
raise Exception("Failed to migrate to initial version")
while True:
version += 1
migrate = getMigration(version)
if not migrate:
break
print >>sys.stderr, " - migrating to version %04d..." % version
try:
migrate()
except:
import traceback
traceback.print_exc()
raise
data.set('db_version', version)
import threading
_currentUser = threading.local()
def currentUser():
return _currentUser.user if hasattr(_currentUser, "user") else None # fixme
def setCurrentUser(user):
_currentUser.user = user
def login(credentials, sslCert):
user = auth.login(*credentials) if credentials else None
setCurrentUser(user)
return user or not credentials
from lib import logging
def handleError():
logging.logException()
dump.dumpException()
from lib import tasks #@UnresolvedImport
scheduler = tasks.TaskScheduler(maxLateTime=30.0, minWorkers=5, maxWorkers=25)
starttime = time.time()
from . import host, auth, rpcserver #@UnresolvedImport
from lib.cmd import bittorrent, process #@UnresolvedImport
from lib import util, cache #@UnresolvedImport
scheduler.scheduleRepeated(config.BITTORRENT_RESTART, util.wrap_task(bittorrent.restartClient))
stopped = threading.Event()
import dump
import dumpmanager
import models
def start():
logging.openDefault(config.LOG_FILE)
if not os.environ.has_key("TOMATO_NO_MIGRATE"):
db_migrate()
else:
print >>sys.stderr, "Skipping migrations"
auth.init()
global starttime
bittorrent.startTracker(config.TRACKER_PORT, config.TEMPLATE_PATH)
bittorrent.startClient(config.TEMPLATE_PATH)
rpcserver.start()
starttime = time.time()
if not os.environ.has_key("TOMATO_NO_TASKS"):
scheduler.start()
else:
print >>sys.stderr, "Running without tasks"
dump.init()
dumpmanager.init()# important: must be called after dump.init()
cache.init()# this does not depend on anything (except the scheduler variable being initialized), and nothing depends on this. No need to hurry this.
def reload_(*args):
print >>sys.stderr, "Reloading..."
logging.closeDefault()
reload(config)
logging.openDefault(config.LOG_FILE)
#stopRPCserver()
#startRPCserver()
def _printStackTraces():
import traceback
for threadId, stack in sys._current_frames().items():
print >>sys.stderr, "ThreadID: %s" % threadId
for filename, lineno, name, line in traceback.extract_stack(stack):
print >>sys.stderr, '\tFile: "%s", line %d, in %s' % (filename, lineno, name)
if line:
print >>sys.stderr, "\t\t%s" % (line.strip())
def _stopHelper():
stopped.wait(10)
if stopped.isSet():
return
print >>sys.stderr, "Stopping takes long, waiting some more time..."
stopped.wait(10)
if stopped.isSet():
return
print >>sys.stderr, "Ok last chance, killing process in 10 seconds..."
stopped.wait(10)
if stopped.isSet():
return
print >>sys.stderr, "Some threads are still running:"
_printStackTraces()
print >>sys.stderr, "Killing process..."
process.kill(os.getpid(), force=True)
def stop(*args):
print >>sys.stderr, "Shutting down..."
thread.start_new_thread(_stopHelper, ())
rpcserver.stop()
host.stopCaching()
scheduler.stop()
bittorrent.stopTracker()
bittorrent.stopClient()
logging.closeDefault()
stopped.set()
def run():
start()
signal.signal(signal.SIGTERM, stop)
signal.signal(signal.SIGINT, stop)
signal.signal(signal.SIGHUP, reload_)
try:
while not stopped.isSet():
stopped.wait(1.0)
except KeyboardInterrupt:
stop()
|
[] |
[] |
[
"TOMATO_MODULE"
] |
[]
|
["TOMATO_MODULE"]
|
python
| 1 | 0 | |
providers/kubernetes/kubernetes_provider.go
|
// Copyright 2018 The Terraformer Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kubernetes
import (
"encoding/json"
"fmt"
"log"
"os"
"path/filepath"
"runtime"
"time"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"github.com/GoogleCloudPlatform/terraformer/terraform_utils"
"github.com/GoogleCloudPlatform/terraformer/terraform_utils/provider_wrapper"
"github.com/zclconf/go-cty/cty"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/discovery"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" // GKE support
)
type KubernetesProvider struct {
terraform_utils.Provider
region string
}
func (p KubernetesProvider) GetResourceConnections() map[string]map[string][]string {
return map[string]map[string][]string{}
}
func (p KubernetesProvider) GetProviderData(arg ...string) map[string]interface{} {
return map[string]interface{}{
"provider": map[string]interface{}{
"kubernetes": map[string]interface{}{
"version": provider_wrapper.GetProviderVersion(p.GetName()),
},
},
}
}
func (p *KubernetesProvider) Init(args []string) error {
return nil
}
func (p *KubernetesProvider) GetName() string {
return "kubernetes"
}
func (p *KubernetesProvider) InitService(serviceName string) error {
var isSupported bool
if _, isSupported = p.GetSupportedService()[serviceName]; !isSupported {
return errors.New("kubernetes: " + serviceName + " not supported resource")
}
p.Service = p.GetSupportedService()[serviceName]
p.Service.SetName(serviceName)
p.Service.SetProviderName(p.GetName())
return nil
}
// GetSupportService return map of supported resource for Kubernetes
func (p *KubernetesProvider) GetSupportedService() map[string]terraform_utils.ServiceGenerator {
resources := make(map[string]terraform_utils.ServiceGenerator)
config, _, err := initClientAndConfig()
if err != nil {
return resources
}
dc, err := discovery.NewDiscoveryClientForConfig(config)
if err != nil {
log.Println(err)
return resources
}
lists, err := dc.ServerPreferredResources()
if err != nil {
log.Println(err)
return resources
}
provider, err := provider_wrapper.NewProviderWrapper("kubernetes", cty.Value{})
if err != nil {
log.Println(err)
return resources
}
resp := provider.Provider.GetSchema()
for _, list := range lists {
if len(list.APIResources) == 0 {
continue
}
gv, err := schema.ParseGroupVersion(list.GroupVersion)
if err != nil {
continue
}
for _, resource := range list.APIResources {
if len(resource.Verbs) == 0 {
continue
}
// filter to resources that support list
if len(resource.Verbs) > 0 && !sets.NewString(resource.Verbs...).Has("list") {
continue
}
// filter to resource that are supported by terraform kubernetes provider
if _, ok := resp.ResourceTypes[extractTfResourceName(resource.Kind)]; !ok {
continue
}
resources[resource.Name] = &Kind{
Group: gv.Group,
Version: gv.Version,
Name: resource.Kind,
Namespaced: resource.Namespaced,
}
}
}
return resources
}
// InitClientAndConfig uses the KUBECONFIG environment variable to create
// a new rest client and config object based on the existing kubectl config
// and options passed from the plugin framework via environment variables
func initClientAndConfig() (*restclient.Config, clientcmd.ClientConfig, error) {
// resolve kubeconfig location, prioritizing the --config global flag,
// then the value of the KUBECONFIG env var (if any), and defaulting
// to ~/.kube/config as a last resort.
home := os.Getenv("HOME")
if runtime.GOOS == "windows" {
home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
if home == "" {
home = os.Getenv("USERPROFILE")
}
}
kubeconfig := filepath.Join(home, ".kube", "config")
kubeconfigEnv := os.Getenv("KUBECONFIG")
if len(kubeconfigEnv) > 0 {
kubeconfig = kubeconfigEnv
}
configFile := os.Getenv("KUBECTL_PLUGINS_GLOBAL_FLAG_CONFIG")
kubeConfigFile := os.Getenv("KUBECTL_PLUGINS_GLOBAL_FLAG_KUBECONFIG")
if len(configFile) > 0 {
kubeconfig = configFile
} else if len(kubeConfigFile) > 0 {
kubeconfig = kubeConfigFile
}
if len(kubeconfig) == 0 {
return nil, nil, fmt.Errorf("error initializing config. The KUBECONFIG environment variable must be defined.")
}
config, err := configFromPath(kubeconfig)
if err != nil {
return nil, nil, fmt.Errorf("error obtaining kubectl config: %v", err)
}
client, err := config.ClientConfig()
if err != nil {
return nil, nil, fmt.Errorf("the provided credentials %q could not be used: %v", kubeconfig, err)
}
err = applyGlobalOptionsToConfig(client)
if err != nil {
return nil, nil, fmt.Errorf("error processing global plugin options: %v", err)
}
return client, config, nil
}
func configFromPath(path string) (clientcmd.ClientConfig, error) {
rules := &clientcmd.ClientConfigLoadingRules{ExplicitPath: path}
credentials, err := rules.Load()
if err != nil {
return nil, fmt.Errorf("the provided credentials %q could not be loaded: %v", path, err)
}
overrides := &clientcmd.ConfigOverrides{
Context: clientcmdapi.Context{
Namespace: os.Getenv("KUBECTL_PLUGINS_GLOBAL_FLAG_NAMESPACE"),
},
}
var cfg clientcmd.ClientConfig
context := os.Getenv("KUBECTL_PLUGINS_GLOBAL_FLAG_CONTEXT")
if len(context) > 0 {
rules := clientcmd.NewDefaultClientConfigLoadingRules()
cfg = clientcmd.NewNonInteractiveClientConfig(*credentials, context, overrides, rules)
} else {
cfg = clientcmd.NewDefaultClientConfig(*credentials, overrides)
}
return cfg, nil
}
func applyGlobalOptionsToConfig(config *restclient.Config) error {
// impersonation config
impersonateUser := os.Getenv("KUBECTL_PLUGINS_GLOBAL_FLAG_AS")
if len(impersonateUser) > 0 {
config.Impersonate.UserName = impersonateUser
}
impersonateGroup := os.Getenv("KUBECTL_PLUGINS_GLOBAL_FLAG_AS_GROUP")
if len(impersonateGroup) > 0 {
impersonateGroupJSON := []string{}
err := json.Unmarshal([]byte(impersonateGroup), &impersonateGroupJSON)
if err != nil {
return errors.New(fmt.Sprintf("error parsing global option %q: %v", "--as-group", err))
}
if len(impersonateGroupJSON) > 0 {
config.Impersonate.Groups = impersonateGroupJSON
}
}
// tls config
caFile := os.Getenv("KUBECTL_PLUGINS_GLOBAL_FLAG_CERTIFICATE_AUTHORITY")
if len(caFile) > 0 {
config.TLSClientConfig.CAFile = caFile
}
clientCertFile := os.Getenv("KUBECTL_PLUGINS_GLOBAL_FLAG_CLIENT_CERTIFICATE")
if len(clientCertFile) > 0 {
config.TLSClientConfig.CertFile = clientCertFile
}
clientKey := os.Getenv("KUBECTL_PLUGINS_GLOBAL_FLAG_CLIENT_KEY")
if len(clientKey) > 0 {
config.TLSClientConfig.KeyFile = clientKey
}
cluster := os.Getenv("KUBECTL_PLUGINS_GLOBAL_FLAG_CLUSTER")
if len(cluster) > 0 {
// TODO(jvallejo): figure out how to override kubeconfig options
}
user := os.Getenv("KUBECTL_PLUGINS_GLOBAL_FLAG_USER")
if len(user) > 0 {
// TODO(jvallejo): figure out how to override kubeconfig options
}
// user / misc request config
requestTimeout := os.Getenv("KUBECTL_PLUGINS_GLOBAL_FLAG_REQUEST_TIMEOUT")
if len(requestTimeout) > 0 {
t, err := time.ParseDuration(requestTimeout)
if err != nil {
return errors.New(fmt.Sprintf("%v", err))
}
config.Timeout = t
}
server := os.Getenv("KUBECTL_PLUGINS_GLOBAL_FLAG_SERVER")
if len(server) > 0 {
config.ServerName = server
}
token := os.Getenv("KUBECTL_PLUGINS_GLOBAL_FLAG_TOKEN")
if len(token) > 0 {
config.BearerToken = token
}
username := os.Getenv("KUBECTL_PLUGINS_GLOBAL_FLAG_USERNAME")
if len(username) > 0 {
config.Username = username
}
password := os.Getenv("KUBECTL_PLUGINS_GLOBAL_FLAG_PASSWORD")
if len(password) > 0 {
config.Password = password
}
return nil
}
|
[
"\"HOME\"",
"\"HOMEDRIVE\"",
"\"HOMEPATH\"",
"\"USERPROFILE\"",
"\"KUBECONFIG\"",
"\"KUBECTL_PLUGINS_GLOBAL_FLAG_CONFIG\"",
"\"KUBECTL_PLUGINS_GLOBAL_FLAG_KUBECONFIG\"",
"\"KUBECTL_PLUGINS_GLOBAL_FLAG_NAMESPACE\"",
"\"KUBECTL_PLUGINS_GLOBAL_FLAG_CONTEXT\"",
"\"KUBECTL_PLUGINS_GLOBAL_FLAG_AS\"",
"\"KUBECTL_PLUGINS_GLOBAL_FLAG_AS_GROUP\"",
"\"KUBECTL_PLUGINS_GLOBAL_FLAG_CERTIFICATE_AUTHORITY\"",
"\"KUBECTL_PLUGINS_GLOBAL_FLAG_CLIENT_CERTIFICATE\"",
"\"KUBECTL_PLUGINS_GLOBAL_FLAG_CLIENT_KEY\"",
"\"KUBECTL_PLUGINS_GLOBAL_FLAG_CLUSTER\"",
"\"KUBECTL_PLUGINS_GLOBAL_FLAG_USER\"",
"\"KUBECTL_PLUGINS_GLOBAL_FLAG_REQUEST_TIMEOUT\"",
"\"KUBECTL_PLUGINS_GLOBAL_FLAG_SERVER\"",
"\"KUBECTL_PLUGINS_GLOBAL_FLAG_TOKEN\"",
"\"KUBECTL_PLUGINS_GLOBAL_FLAG_USERNAME\"",
"\"KUBECTL_PLUGINS_GLOBAL_FLAG_PASSWORD\""
] |
[] |
[
"KUBECTL_PLUGINS_GLOBAL_FLAG_PASSWORD",
"KUBECTL_PLUGINS_GLOBAL_FLAG_CLIENT_KEY",
"KUBECTL_PLUGINS_GLOBAL_FLAG_CONTEXT",
"KUBECTL_PLUGINS_GLOBAL_FLAG_KUBECONFIG",
"KUBECTL_PLUGINS_GLOBAL_FLAG_USER",
"KUBECTL_PLUGINS_GLOBAL_FLAG_CERTIFICATE_AUTHORITY",
"KUBECTL_PLUGINS_GLOBAL_FLAG_AS",
"HOMEPATH",
"KUBECTL_PLUGINS_GLOBAL_FLAG_REQUEST_TIMEOUT",
"USERPROFILE",
"KUBECTL_PLUGINS_GLOBAL_FLAG_NAMESPACE",
"KUBECTL_PLUGINS_GLOBAL_FLAG_SERVER",
"KUBECTL_PLUGINS_GLOBAL_FLAG_USERNAME",
"KUBECONFIG",
"HOMEDRIVE",
"KUBECTL_PLUGINS_GLOBAL_FLAG_CONFIG",
"KUBECTL_PLUGINS_GLOBAL_FLAG_TOKEN",
"KUBECTL_PLUGINS_GLOBAL_FLAG_AS_GROUP",
"KUBECTL_PLUGINS_GLOBAL_FLAG_CLUSTER",
"KUBECTL_PLUGINS_GLOBAL_FLAG_CLIENT_CERTIFICATE",
"HOME"
] |
[]
|
["KUBECTL_PLUGINS_GLOBAL_FLAG_PASSWORD", "KUBECTL_PLUGINS_GLOBAL_FLAG_CLIENT_KEY", "KUBECTL_PLUGINS_GLOBAL_FLAG_CONTEXT", "KUBECTL_PLUGINS_GLOBAL_FLAG_KUBECONFIG", "KUBECTL_PLUGINS_GLOBAL_FLAG_USER", "KUBECTL_PLUGINS_GLOBAL_FLAG_CERTIFICATE_AUTHORITY", "KUBECTL_PLUGINS_GLOBAL_FLAG_AS", "HOMEPATH", "KUBECTL_PLUGINS_GLOBAL_FLAG_REQUEST_TIMEOUT", "USERPROFILE", "KUBECTL_PLUGINS_GLOBAL_FLAG_NAMESPACE", "KUBECTL_PLUGINS_GLOBAL_FLAG_SERVER", "KUBECTL_PLUGINS_GLOBAL_FLAG_USERNAME", "KUBECONFIG", "HOMEDRIVE", "KUBECTL_PLUGINS_GLOBAL_FLAG_CONFIG", "KUBECTL_PLUGINS_GLOBAL_FLAG_TOKEN", "KUBECTL_PLUGINS_GLOBAL_FLAG_AS_GROUP", "KUBECTL_PLUGINS_GLOBAL_FLAG_CLUSTER", "KUBECTL_PLUGINS_GLOBAL_FLAG_CLIENT_CERTIFICATE", "HOME"]
|
go
| 21 | 0 | |
tests/ignite/handlers/test_checkpoint.py
|
import os
import warnings
from unittest.mock import MagicMock
import pytest
import torch
import torch.nn as nn
import ignite.distributed as idist
from ignite.engine import Engine, Events, State
from ignite.handlers import Checkpoint, DiskSaver, ModelCheckpoint
from ignite.handlers.checkpoint import BaseSaveHandler
_PREFIX = "PREFIX"
class DummyModel(nn.Module):
def __init__(self):
super(DummyModel, self).__init__()
self.net = nn.Linear(1, 1)
def forward(self, x):
return self.net(x)
class DummyPretrainedModel(nn.Module):
def __init__(self):
super(DummyPretrainedModel, self).__init__()
self.features = nn.Linear(4, 2, bias=False)
self.fc = nn.Linear(2, 1)
def forward(self, x):
x = self.features(x)
x = self.fc(x)
return x
def test_checkpoint_wrong_input():
with pytest.raises(TypeError, match=r"Argument `to_save` should be a dictionary"):
Checkpoint(12, lambda x: x, "prefix")
with pytest.raises(TypeError, match=r"Argument `to_save` should be a dictionary"):
Checkpoint([12], lambda x: x, "prefix")
with pytest.raises(ValueError, match=r"No objects to checkpoint."):
Checkpoint({}, lambda x: x, "prefix")
model = DummyModel()
to_save = {"model": model}
with pytest.raises(TypeError, match=r"Argument `save_handler` should be callable"):
Checkpoint(to_save, 12, "prefix")
with pytest.raises(
ValueError, match=r"If `score_name` is provided, then `score_function` should be also provided."
):
Checkpoint(to_save, lambda x: x, score_name="acc")
with pytest.raises(TypeError, match=r"global_step_transform should be a function."):
Checkpoint(to_save, lambda x: x, score_function=lambda e: 123, score_name="acc", global_step_transform=123)
with pytest.warns(UserWarning, match=r"Argument archived is deprecated"):
Checkpoint(to_save, lambda x: x, score_function=lambda e: 123, score_name="acc", archived=True)
def test_checkpoint_score_function_wrong_output():
model = DummyModel()
to_save = {"model": model}
checkpointer = Checkpoint(to_save, lambda x: x, score_function=lambda e: {"1": 1}, score_name="acc")
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=0, iteration=0)
with pytest.raises(ValueError, match=r"Output of score_function should be a number"):
checkpointer(trainer)
def test_checkpoint_default():
def _test(to_save, obj, name):
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(to_save, save_handler=save_handler)
assert checkpointer.last_checkpoint is None
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=0, iteration=0)
checkpointer(trainer)
assert save_handler.call_count == 1
metadata = {"basename": name, "score_name": None, "priority": 0}
save_handler.assert_called_with(obj, "{}_0.pt".format(name), metadata)
trainer.state.epoch = 12
trainer.state.iteration = 1234
checkpointer(trainer)
assert save_handler.call_count == 2
metadata["priority"] = 1234
save_handler.assert_called_with(obj, "{}_1234.pt".format(name), metadata)
assert save_handler.remove.call_count == 1
save_handler.remove.assert_called_with("{}_0.pt".format(name))
assert checkpointer.last_checkpoint == "{}_1234.pt".format(name)
model = DummyModel()
to_save = {"model": model}
_test(to_save, model.state_dict(), "model")
model = DummyModel()
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
to_save = {"model": model, "optimizer": optimizer}
_test(to_save, {"model": model.state_dict(), "optimizer": optimizer.state_dict()}, "checkpoint")
def test_checkpoint_with_dp():
model = DummyModel()
dp_model = nn.DataParallel(model)
to_save = {"model": dp_model}
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(to_save, save_handler=save_handler)
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=0, iteration=0)
checkpointer(trainer)
assert save_handler.call_count == 1
metadata = {"basename": "model", "score_name": None, "priority": 0}
save_handler.assert_called_with(model.state_dict(), "model_0.pt", metadata)
def test_checkpoint_with_global_step_transform():
def _test(filename_prefix, to_save, obj, name):
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(
to_save,
save_handler=save_handler,
filename_prefix=filename_prefix,
global_step_transform=lambda e, _: e.state.epoch,
)
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=1, iteration=1)
checkpointer(trainer)
assert save_handler.call_count == 1
if len(filename_prefix) > 0:
filename_prefix += "_"
metadata = {"basename": "{}{}".format(filename_prefix, name), "score_name": None, "priority": 1}
save_handler.assert_called_with(obj, "{}{}_1.pt".format(filename_prefix, name), metadata)
trainer.state.epoch = 12
trainer.state.iteration = 1234
checkpointer(trainer)
assert save_handler.call_count == 2
metadata["priority"] = 1234
save_handler.assert_called_with(obj, "{}{}_12.pt".format(filename_prefix, name), metadata)
assert save_handler.remove.call_count == 1
save_handler.remove.assert_called_with("{}{}_1.pt".format(filename_prefix, name))
assert checkpointer.last_checkpoint == "{}{}_12.pt".format(filename_prefix, name)
for prefix in ["", "dummytask"]:
model = DummyModel()
to_save = {"model": model}
_test(prefix, to_save, model.state_dict(), "model")
model = DummyModel()
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
to_save = {"model": model, "optimizer": optimizer}
_test(prefix, to_save, {"model": model.state_dict(), "optimizer": optimizer.state_dict()}, "checkpoint")
def test_checkpoint_with_score_function():
def _test(to_save, obj, name):
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(to_save, save_handler=save_handler, score_function=lambda e: e.state.score)
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=1, iteration=1, score=0.77)
checkpointer(trainer)
assert save_handler.call_count == 1
metadata = {"basename": name, "score_name": None, "priority": 0.77}
save_handler.assert_called_with(obj, "{}_0.7700.pt".format(name), metadata)
trainer.state.epoch = 12
trainer.state.iteration = 1234
trainer.state.score = 0.78
checkpointer(trainer)
assert save_handler.call_count == 2
metadata["priority"] = 0.78
save_handler.assert_called_with(obj, "{}_0.7800.pt".format(name), metadata)
assert save_handler.remove.call_count == 1
save_handler.remove.assert_called_with("{}_0.7700.pt".format(name))
assert checkpointer.last_checkpoint == "{}_0.7800.pt".format(name)
model = DummyModel()
to_save = {"model": model}
_test(to_save, model.state_dict(), "model")
model = DummyModel()
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
to_save = {"model": model, "optimizer": optimizer}
_test(to_save, {"model": model.state_dict(), "optimizer": optimizer.state_dict()}, "checkpoint")
def test_checkpoint_with_score_name_and_function():
def _test(to_save, obj, name):
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(
to_save, save_handler=save_handler, score_name="loss", score_function=lambda e: e.state.score
)
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=1, iteration=1, score=-0.77)
checkpointer(trainer)
assert save_handler.call_count == 1
metadata = {"basename": name, "score_name": "loss", "priority": -0.77}
save_handler.assert_called_with(obj, "{}_loss=-0.7700.pt".format(name), metadata)
trainer.state.epoch = 12
trainer.state.iteration = 1234
trainer.state.score = -0.76
checkpointer(trainer)
assert save_handler.call_count == 2
metadata["priority"] = -0.76
save_handler.assert_called_with(obj, "{}_loss=-0.7600.pt".format(name), metadata)
assert save_handler.remove.call_count == 1
save_handler.remove.assert_called_with("{}_loss=-0.7700.pt".format(name))
assert checkpointer.last_checkpoint == "{}_loss=-0.7600.pt".format(name)
model = DummyModel()
to_save = {"model": model}
_test(to_save, model.state_dict(), "model")
model = DummyModel()
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
to_save = {"model": model, "optimizer": optimizer}
_test(to_save, {"model": model.state_dict(), "optimizer": optimizer.state_dict()}, "checkpoint")
def test_checkpoint_with_int_score():
def _test(to_save, obj, name, score_name=None):
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(
to_save, save_handler=save_handler, score_name=score_name, score_function=lambda e: e.state.epoch
)
if score_name is None:
score_name = ""
else:
score_name += "="
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=1, iteration=1)
checkpointer(trainer)
assert save_handler.call_count == 1
metadata = {"basename": name, "score_name": score_name[:-1] if len(score_name) > 0 else None, "priority": 1}
save_handler.assert_called_with(obj, "{}_{}1.pt".format(name, score_name), metadata)
trainer.state.epoch = 12
trainer.state.iteration = 1234
checkpointer(trainer)
assert save_handler.call_count == 2
metadata["priority"] = 12
save_handler.assert_called_with(obj, "{}_{}12.pt".format(name, score_name), metadata)
assert save_handler.remove.call_count == 1
save_handler.remove.assert_called_with("{}_{}1.pt".format(name, score_name))
assert checkpointer.last_checkpoint == "{}_{}12.pt".format(name, score_name)
model = DummyModel()
to_save = {"model": model}
_test(to_save, model.state_dict(), "model")
_test(to_save, model.state_dict(), "model", "epoch")
model = DummyModel()
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
to_save = {"model": model, "optimizer": optimizer}
_test(to_save, {"model": model.state_dict(), "optimizer": optimizer.state_dict()}, "checkpoint")
_test(to_save, {"model": model.state_dict(), "optimizer": optimizer.state_dict()}, "checkpoint", "epoch")
def test_checkpoint_with_score_function_and_trainer_epoch():
def _test(to_save, obj, name):
save_handler = MagicMock(spec=BaseSaveHandler)
trainer = Engine(lambda e, b: None)
evaluator = Engine(lambda e, b: None)
trainer.state = State(epoch=11, iteration=1)
checkpointer = Checkpoint(
to_save,
save_handler=save_handler,
global_step_transform=lambda _1, _2: trainer.state.epoch,
score_function=lambda e: e.state.metrics["val_acc"],
)
evaluator.state = State(epoch=1, iteration=1000, metrics={"val_acc": 0.77})
checkpointer(evaluator)
assert save_handler.call_count == 1
metadata = {"basename": name, "score_name": None, "priority": 0.77}
save_handler.assert_called_with(obj, "{}_11_0.7700.pt".format(name), metadata)
trainer.state.epoch = 12
evaluator.state.metrics["val_acc"] = 0.78
checkpointer(evaluator)
assert save_handler.call_count == 2
metadata["priority"] = 0.78
save_handler.assert_called_with(obj, "{}_12_0.7800.pt".format(name), metadata)
assert save_handler.remove.call_count == 1
save_handler.remove.assert_called_with("{}_11_0.7700.pt".format(name))
assert checkpointer.last_checkpoint == "{}_12_0.7800.pt".format(name)
model = DummyModel()
to_save = {"model": model}
_test(to_save, model.state_dict(), "model")
def test_checkpoint_with_score_name_and_function_and_trainer_epoch():
def _test(to_save, obj, name):
save_handler = MagicMock(spec=BaseSaveHandler)
trainer = Engine(lambda e, b: None)
evaluator = Engine(lambda e, b: None)
trainer.state = State(epoch=11, iteration=1)
checkpointer = Checkpoint(
to_save,
save_handler=save_handler,
global_step_transform=lambda _1, _2: trainer.state.epoch,
score_name="val_acc",
score_function=lambda e: e.state.metrics["val_acc"],
)
evaluator.state = State(epoch=1, iteration=1000, metrics={"val_acc": 0.77})
checkpointer(evaluator)
assert save_handler.call_count == 1
metadata = {"basename": name, "score_name": "val_acc", "priority": 0.77}
save_handler.assert_called_with(obj, "{}_11_val_acc=0.7700.pt".format(name), metadata)
trainer.state.epoch = 12
evaluator.state.metrics["val_acc"] = 0.78
checkpointer(evaluator)
assert save_handler.call_count == 2
metadata["priority"] = 0.78
save_handler.assert_called_with(obj, "{}_12_val_acc=0.7800.pt".format(name), metadata)
assert save_handler.remove.call_count == 1
save_handler.remove.assert_called_with("{}_11_val_acc=0.7700.pt".format(name))
assert checkpointer.last_checkpoint == "{}_12_val_acc=0.7800.pt".format(name)
model = DummyModel()
to_save = {"model": model}
_test(to_save, model.state_dict(), "model")
def test_checkpoint_last_checkpoint():
save_handler = MagicMock(spec=BaseSaveHandler)
to_save = {"model": DummyModel()}
checkpointer = Checkpoint(to_save, save_handler=save_handler, n_saved=None)
trainer = Engine(lambda e, b: None)
for i in range(10):
trainer.state = State(epoch=1, iteration=i)
checkpointer(trainer)
assert save_handler.call_count == 10
assert checkpointer.last_checkpoint == "{}_9.pt".format("model")
def test_checkpoint_last_checkpoint_on_score():
save_handler = MagicMock(spec=BaseSaveHandler)
to_save = {"model": DummyModel()}
checkpointer = Checkpoint(
to_save,
save_handler=save_handler,
n_saved=None,
score_name="val_acc",
score_function=lambda e: e.state.metrics["val_acc"],
)
trainer = Engine(lambda e, b: None)
val_acc = 0.0
for i in range(10):
val_acc = i * 0.1
trainer.state = State(epoch=1, iteration=i, metrics={"val_acc": val_acc})
checkpointer(trainer)
assert save_handler.call_count == 10
assert checkpointer.last_checkpoint == "{}_val_acc=0.9000.pt".format("model")
def test_checkpoint_save_handler_callable():
def save_handler(c, f):
assert f == "model_12.pt"
to_save = {"model": DummyModel()}
checkpointer = Checkpoint(to_save, save_handler=save_handler,)
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=1, iteration=12)
checkpointer(trainer)
def test_model_checkpoint_args_validation(dirname):
existing = os.path.join(dirname, "existing_dir")
nonempty = os.path.join(dirname, "nonempty")
os.makedirs(existing)
os.makedirs(nonempty)
with open(os.path.join(nonempty, "{}_name_0.pt".format(_PREFIX)), "w"):
pass
with pytest.raises(ValueError, match=r"with extension '.pt' are already present "):
ModelCheckpoint(nonempty, _PREFIX)
with pytest.raises(ValueError, match=r"Argument save_interval is deprecated and should be None"):
ModelCheckpoint(existing, _PREFIX, save_interval=42)
with pytest.raises(ValueError, match=r"Directory path '\S+' is not found"):
ModelCheckpoint(os.path.join(dirname, "non_existing_dir"), _PREFIX, create_dir=False)
with pytest.raises(ValueError, match=r"Argument save_as_state_dict is deprecated and should be True"):
ModelCheckpoint(existing, _PREFIX, create_dir=False, save_as_state_dict=False)
with pytest.raises(ValueError, match=r"If `score_name` is provided, then `score_function` "):
ModelCheckpoint(existing, _PREFIX, create_dir=False, score_name="test")
with pytest.raises(TypeError, match=r"global_step_transform should be a function"):
ModelCheckpoint(existing, _PREFIX, create_dir=False, global_step_transform=1234)
with pytest.warns(UserWarning, match=r"Argument archived is deprecated"):
ModelCheckpoint(existing, _PREFIX, create_dir=False, archived=True)
h = ModelCheckpoint(dirname, _PREFIX, create_dir=False)
assert h.last_checkpoint is None
with pytest.raises(RuntimeError, match=r"No objects to checkpoint found."):
h(None, [])
def test_model_checkpoint_simple_recovery(dirname):
h = ModelCheckpoint(dirname, _PREFIX, create_dir=False)
engine = Engine(lambda e, b: None)
engine.state = State(epoch=0, iteration=1)
model = DummyModel()
to_save = {"model": model}
h(engine, to_save)
fname = h.last_checkpoint
assert isinstance(fname, str)
assert os.path.join(dirname, _PREFIX) in fname
assert os.path.exists(fname)
loaded_objects = torch.load(fname)
assert loaded_objects == model.state_dict()
def test_model_checkpoint_simple_recovery_from_existing_non_empty(dirname):
def _test(ext, require_empty):
previous_fname = os.path.join(dirname, "{}_{}_{}{}".format(_PREFIX, "obj", 1, ext))
with open(previous_fname, "w") as f:
f.write("test")
h = ModelCheckpoint(dirname, _PREFIX, create_dir=True, require_empty=require_empty)
engine = Engine(lambda e, b: None)
engine.state = State(epoch=0, iteration=1)
model = DummyModel()
to_save = {"model": model}
h(engine, to_save)
fname = h.last_checkpoint
ext = ".pt"
assert isinstance(fname, str)
assert os.path.join(dirname, "{}_{}_{}{}".format(_PREFIX, "model", 1, ext)) == fname
assert os.path.exists(fname)
assert os.path.exists(previous_fname)
loaded_objects = torch.load(fname)
assert loaded_objects == model.state_dict()
os.remove(fname)
_test(".txt", require_empty=True)
_test(".pt", require_empty=False)
def test_disk_saver_atomic(dirname):
model = DummyModel()
to_save_serializable = {"model": model}
to_save_non_serializable = {"model": lambda x: x}
def _test_existance(atomic, _to_save, expected):
saver = DiskSaver(dirname, atomic=atomic, create_dir=False, require_empty=False)
fname = "test.pt"
try:
with warnings.catch_warnings():
# Ignore torch/serialization.py:292: UserWarning: Couldn't retrieve source code for container of type
# DummyModel. It won't be checked for correctness upon loading.
warnings.simplefilter("ignore", category=UserWarning)
saver(_to_save, fname)
except Exception:
pass
fp = os.path.join(saver.dirname, fname)
assert os.path.exists(fp) == expected
if expected:
saver.remove(fname)
_test_existance(atomic=False, _to_save=to_save_serializable, expected=True)
_test_existance(atomic=False, _to_save=to_save_non_serializable, expected=True)
_test_existance(atomic=True, _to_save=to_save_serializable, expected=True)
_test_existance(atomic=True, _to_save=to_save_non_serializable, expected=False)
def test_last_k(dirname):
h = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=2)
engine = Engine(lambda e, b: None)
engine.state = State(epoch=0, iteration=0)
model = DummyModel()
to_save = {"model": model}
h(engine, to_save)
for i in range(1, 9):
engine.state.iteration = i
h(engine, to_save)
expected = ["{}_{}_{}.pt".format(_PREFIX, "model", i) for i in [7, 8]]
assert sorted(os.listdir(dirname)) == expected, "{} vs {}".format(sorted(os.listdir(dirname)), expected)
def test_disabled_n_saved(dirname):
h = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=None)
engine = Engine(lambda e, b: None)
engine.state = State(epoch=0, iteration=0)
model = DummyModel()
to_save = {"model": model}
num_iters = 100
for i in range(num_iters):
engine.state.iteration = i
h(engine, to_save)
saved_files = sorted(os.listdir(dirname))
assert len(saved_files) == num_iters, "{}".format(saved_files)
expected = sorted(["{}_{}_{}.pt".format(_PREFIX, "model", i) for i in range(num_iters)])
assert saved_files == expected, "{} vs {}".format(saved_files, expected)
def test_best_k(dirname):
scores = iter([1.2, -2.0, 3.1, -4.0])
def score_function(_):
return next(scores)
h = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=2, score_function=score_function)
engine = Engine(lambda e, b: None)
engine.state = State(epoch=0, iteration=0)
model = DummyModel()
to_save = {"model": model}
for _ in range(4):
h(engine, to_save)
expected = ["{}_{}_{:.4f}.pt".format(_PREFIX, "model", i) for i in [1.2, 3.1]]
assert sorted(os.listdir(dirname)) == expected
def test_best_k_with_suffix(dirname):
scores = [0.3456789, 0.1234, 0.4567, 0.134567]
scores_iter = iter(scores)
def score_function(engine):
return next(scores_iter)
h = ModelCheckpoint(
dirname, _PREFIX, create_dir=False, n_saved=2, score_function=score_function, score_name="val_loss"
)
engine = Engine(lambda e, b: None)
engine.state = State(epoch=0, iteration=0)
model = DummyModel()
to_save = {"model": model}
for _ in range(4):
engine.state.epoch += 1
h(engine, to_save)
expected = ["{}_{}_val_loss={:.4}.pt".format(_PREFIX, "model", scores[e - 1]) for e in [1, 3]]
assert sorted(os.listdir(dirname)) == expected
def test_removes_each_score_at_most_once(dirname):
scores = [0, 1, 1, 2, 3]
scores_iter = iter(scores)
def score_function(_):
return next(scores_iter)
h = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=2, score_function=score_function)
engine = Engine(lambda e, b: None)
engine.state = State(epoch=0, iteration=0)
model = DummyModel()
to_save = {"model": model}
for _ in range(len(scores)):
h(engine, to_save)
# If a score was removed multiple times, the code above would have raise a
# FileNotFoundError. So this just tests the absence of such a failure
# without futher assertions.
def test_with_engine(dirname):
def update_fn(_1, _2):
pass
name = "model"
engine = Engine(update_fn)
handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=2)
model = DummyModel()
to_save = {"model": model}
engine.add_event_handler(Events.EPOCH_COMPLETED, handler, to_save)
engine.run([0], max_epochs=4)
expected = ["{}_{}_{}.pt".format(_PREFIX, name, i) for i in [3, 4]]
assert sorted(os.listdir(dirname)) == expected
def test_with_state_dict(dirname):
def update_fn(_1, _2):
pass
engine = Engine(update_fn)
handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)
model = DummyModel()
to_save = {"model": model}
engine.add_event_handler(Events.EPOCH_COMPLETED, handler, to_save)
engine.run([0], max_epochs=4)
saved_model = os.path.join(dirname, os.listdir(dirname)[0])
load_model = torch.load(saved_model)
assert not isinstance(load_model, DummyModel)
assert isinstance(load_model, dict)
model_state_dict = model.state_dict()
loaded_model_state_dict = load_model
for key in model_state_dict.keys():
assert key in loaded_model_state_dict
model_value = model_state_dict[key]
loaded_model_value = loaded_model_state_dict[key]
assert model_value.numpy() == loaded_model_value.numpy()
def test_valid_state_dict_save(dirname):
model = DummyModel()
h = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)
engine = Engine(lambda e, b: None)
engine.state = State(epoch=0, iteration=0)
to_save = {"name": 42}
with pytest.raises(TypeError, match=r"should have `state_dict` method"):
h(engine, to_save)
to_save = {"name": model}
try:
h(engine, to_save)
except ValueError:
pytest.fail("Unexpected ValueError")
def _test_save_model_optimizer_lr_scheduler_with_state_dict(device, dirname, on_zero_rank=False):
torch.manual_seed(23)
model = DummyModel().to(device)
optim = torch.optim.SGD(model.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optim, gamma=0.5)
def update_fn(engine, batch):
x = torch.rand((4, 1)).to(device)
optim.zero_grad()
y = model(x)
loss = y.pow(2.0).sum()
loss.backward()
if idist.has_xla_support:
import torch_xla.core.xla_model as xm
xm.optimizer_step(optim, barrier=True)
else:
optim.step()
lr_scheduler.step()
engine = Engine(update_fn)
if (not on_zero_rank) or (on_zero_rank and idist.get_rank() == 0):
handler = ModelCheckpoint(dirname, _PREFIX, create_dir=True, n_saved=1)
engine.add_event_handler(
Events.EPOCH_COMPLETED, handler, {"model": model, "optimizer": optim, "lr_scheduler": lr_scheduler}
)
engine.run([0], max_epochs=4)
idist.barrier()
saved_objects = sorted(os.listdir(dirname))
# saved object is ['PREFIX_checkpoint_3.pt', ]
saved_checkpoint = os.path.join(dirname, saved_objects[0])
if idist.has_xla_support:
device = "cpu"
loaded_obj = torch.load(saved_checkpoint, map_location=device)
for f in ["model", "optimizer", "lr_scheduler"]:
assert f in loaded_obj
loaded_model_state_dict = loaded_obj["model"]
loaded_optimizer_state_dict = loaded_obj["optimizer"]
loaded_lr_scheduler_state_dict = loaded_obj["lr_scheduler"]
assert isinstance(loaded_model_state_dict, dict)
assert isinstance(loaded_optimizer_state_dict, dict)
assert isinstance(loaded_lr_scheduler_state_dict, dict)
# Specifically move device to CPU first
model_state_dict = model.cpu().state_dict()
for key in model_state_dict.keys():
assert key in loaded_model_state_dict
model_value = model_state_dict[key]
loaded_model_value = loaded_model_state_dict[key]
assert model_value.cpu().numpy() == loaded_model_value.cpu().numpy()
optim_state_dict = optim.state_dict()
for key in optim_state_dict.keys():
assert key in loaded_optimizer_state_dict
optim_value = optim_state_dict[key]
loaded_optim_value = loaded_optimizer_state_dict[key]
if idist.get_rank() == 0:
assert optim_value == loaded_optim_value
lr_scheduler_state_dict = lr_scheduler.state_dict()
for key in lr_scheduler_state_dict.keys():
assert key in loaded_lr_scheduler_state_dict
lr_scheduler_value = lr_scheduler_state_dict[key]
loaded_lr_scheduler_value = loaded_lr_scheduler_state_dict[key]
assert lr_scheduler_value == loaded_lr_scheduler_value
def test_save_model_optimizer_lr_scheduler_with_state_dict(dirname):
_test_save_model_optimizer_lr_scheduler_with_state_dict("cpu", dirname)
def test_checkpoint_load_objects():
with pytest.raises(TypeError, match=r"Argument checkpoint should be a dictionary"):
Checkpoint.load_objects({}, [])
with pytest.raises(TypeError, match=r"should have `load_state_dict` method"):
Checkpoint.load_objects({"a": None}, {"a": None})
model = DummyModel()
to_load = {"model": model, "another_model": model}
with pytest.raises(ValueError, match=r"from `to_load` is not found in the checkpoint"):
Checkpoint.load_objects(to_load, {})
model = DummyModel()
to_load = {"model": model}
model2 = DummyModel()
chkpt = {"model": model2.state_dict()}
Checkpoint.load_objects(to_load, chkpt)
assert model.state_dict() == model2.state_dict()
def test_checkpoint_load_objects_from_saved_file(dirname):
def _get_single_obj_to_save():
model = DummyModel()
to_save = {"model": model}
return to_save
def _get_multiple_objs_to_save():
model = DummyModel()
optim = torch.optim.SGD(model.parameters(), lr=0.001)
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optim, gamma=0.5)
to_save = {"model": model, "optimizer": optim, "lr_scheduler": lr_scheduler}
return to_save
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=0, iteration=0)
# case: multiple objects
handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)
to_save = _get_multiple_objs_to_save()
handler(trainer, to_save)
fname = handler.last_checkpoint
assert isinstance(fname, str)
assert os.path.join(dirname, _PREFIX) in fname
assert os.path.exists(fname)
loaded_objects = torch.load(fname)
Checkpoint.load_objects(to_save, loaded_objects)
os.remove(fname)
# case: saved multiple objects, loaded single object
handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)
to_save = _get_multiple_objs_to_save()
handler(trainer, to_save)
fname = handler.last_checkpoint
assert isinstance(fname, str)
assert os.path.join(dirname, _PREFIX) in fname
assert os.path.exists(fname)
loaded_objects = torch.load(fname)
to_load = {"model": to_save["model"]}
Checkpoint.load_objects(to_load, loaded_objects)
os.remove(fname)
# case: single object
handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)
to_save = _get_single_obj_to_save()
handler(trainer, to_save)
fname = handler.last_checkpoint
assert isinstance(fname, str)
assert os.path.join(dirname, _PREFIX) in fname
assert os.path.exists(fname)
loaded_objects = torch.load(fname)
Checkpoint.load_objects(to_save, loaded_objects)
def test_load_checkpoint_with_different_num_classes(dirname):
model = DummyPretrainedModel()
to_save_single_object = {"model": model}
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=0, iteration=0)
handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)
handler(trainer, to_save_single_object)
fname = handler.last_checkpoint
loaded_checkpoint = torch.load(fname)
to_load_single_object = {"pretrained_features": model.features}
with pytest.raises(RuntimeError):
Checkpoint.load_objects(to_load_single_object, loaded_checkpoint)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
Checkpoint.load_objects(to_load_single_object, loaded_checkpoint, strict=False, blah="blah")
loaded_weights = to_load_single_object["pretrained_features"].state_dict()["weight"]
assert torch.all(model.state_dict()["features.weight"].eq(loaded_weights))
def test_disksaver_wrong_input(dirname):
with pytest.raises(ValueError, match=r"Directory path '\S+' is not found"):
DiskSaver("/tmp/non-existing-folder", create_dir=False)
def _test(ext):
previous_fname = os.path.join(dirname, "{}_{}_{}{}".format(_PREFIX, "obj", 1, ext))
with open(previous_fname, "w") as f:
f.write("test")
with pytest.raises(ValueError, match=r"with extension '.pt' are already present"):
DiskSaver(dirname, require_empty=True)
_test(".pt")
def _test_checkpoint_with_ddp(device):
torch.manual_seed(0)
model = DummyModel().to(device)
device_ids = (
None if "cpu" in device.type else [device,]
)
ddp_model = nn.parallel.DistributedDataParallel(model, device_ids=device_ids)
to_save = {"model": ddp_model}
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(to_save, save_handler=save_handler)
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=0, iteration=0)
checkpointer(trainer)
assert save_handler.call_count == 1
metadata = {"basename": "model", "score_name": None, "priority": 0}
save_handler.assert_called_with(model.state_dict(), "model_0.pt", metadata)
@pytest.mark.distributed
def test_distrib_cpu(distributed_context_single_node_gloo, get_rank_zero_dirname):
device = torch.device("cpu")
dirname = get_rank_zero_dirname()
_test_save_model_optimizer_lr_scheduler_with_state_dict(device, os.path.join(dirname, "1"))
_test_save_model_optimizer_lr_scheduler_with_state_dict(device, os.path.join(dirname, "2"), on_zero_rank=True)
_test_checkpoint_with_ddp(device)
@pytest.mark.distributed
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_gpu(distributed_context_single_node_nccl, get_rank_zero_dirname):
device = idist.device()
dirname = get_rank_zero_dirname()
_test_save_model_optimizer_lr_scheduler_with_state_dict(device, os.path.join(dirname, "1"))
_test_save_model_optimizer_lr_scheduler_with_state_dict("cpu", os.path.join(dirname, "2"), on_zero_rank=True)
_test_checkpoint_with_ddp(device=device)
def _test_tpu_saves_to_cpu(device, dirname):
torch.manual_seed(0)
h = ModelCheckpoint(dirname, _PREFIX)
engine = Engine(lambda e, b: None)
engine.state = State(epoch=0, iteration=1)
model = DummyModel().to(device)
to_save = {"model": model}
h(engine, to_save)
idist.barrier()
fname = h.last_checkpoint
assert isinstance(fname, str)
assert os.path.join(dirname, _PREFIX) in fname
assert os.path.exists(fname)
loaded_objects = torch.load(fname)
assert loaded_objects == model.cpu().state_dict()
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Not on TPU device")
def test_distrib_single_device_xla(dirname):
assert "xla" in idist.device().type
_test_tpu_saves_to_cpu(idist.device(), os.path.join(dirname, "1"))
_test_save_model_optimizer_lr_scheduler_with_state_dict(idist.device(), os.path.join(dirname, "2"))
def _test_tpu_saves_to_cpu_nprocs(index, dirname):
device = idist.device()
_test_tpu_saves_to_cpu(device, os.path.join(dirname, "1"))
_test_save_model_optimizer_lr_scheduler_with_state_dict(device, os.path.join(dirname, "2"))
import time
# hack to have all proc properly sync:
time.sleep(1)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Not on TPU device")
def test_distrib_single_device_xla_nprocs(xmp_executor, dirname):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_tpu_saves_to_cpu_nprocs, args=(dirname,), nprocs=n)
|
[] |
[] |
[
"NUM_TPU_WORKERS"
] |
[]
|
["NUM_TPU_WORKERS"]
|
python
| 1 | 0 | |
test/go-tests/test_utils.go
|
package go_tests
import (
"errors"
"fmt"
"github.com/cloudevents/sdk-go/v2"
"github.com/google/uuid"
"github.com/imroc/req"
"github.com/keptn/go-utils/pkg/api/models"
"github.com/keptn/go-utils/pkg/common/osutils"
keptncommon "github.com/keptn/go-utils/pkg/lib/keptn"
keptnv2 "github.com/keptn/go-utils/pkg/lib/v0_2_0"
"github.com/keptn/kubernetes-utils/pkg"
"io/ioutil"
"net/http"
"os"
"strings"
"testing"
)
const (
KeptnSpecVersion = "0.2.0"
KeptnNamespaceEnvVar = "KEPTN_NAMESPACE"
DefaultKeptnNamespace = "keptn"
)
type APIEventSender struct {
}
func (sender *APIEventSender) SendEvent(event v2.Event) error {
_, err := ApiPOSTRequest("/v1/event", event)
return err
}
func CreateProject(projectName, shipyardFilePath string, recreateIfAlreadyThere bool) error {
resp, err := ApiGETRequest("/controlPlane/v1/project/" + projectName)
if err != nil {
return err
}
if resp.Response().StatusCode != http.StatusNotFound {
if recreateIfAlreadyThere {
// delete project if it exists
_, err = ExecuteCommand(fmt.Sprintf("keptn delete project %s", projectName))
if err != nil {
return err
}
} else {
return errors.New("project already exists")
}
}
_, err = ExecuteCommand(fmt.Sprintf("keptn create project %s --shipyard=./%s", projectName, shipyardFilePath))
return err
}
func TriggerSequence(projectName, serviceName, stageName, sequenceName string, eventData keptncommon.EventProperties) (string, error) {
source := "golang-test"
eventType := keptnv2.GetTriggeredEventType(stageName + "." + sequenceName)
if eventData == nil {
eventData = &keptnv2.EventData{}
}
eventData.SetProject(projectName)
eventData.SetService(serviceName)
eventData.SetStage(stageName)
resp, err := ApiPOSTRequest("/v1/event", models.KeptnContextExtendedCE{
Contenttype: "application/json",
Data: eventData,
ID: uuid.NewString(),
Shkeptnspecversion: KeptnSpecVersion,
Source: &source,
Specversion: "1.0",
Type: &eventType,
})
if err != nil {
return "", err
}
context := &models.EventContext{}
err = resp.ToJSON(context)
if err != nil {
return "", err
}
return *context.KeptnContext, nil
}
func ApiDELETERequest(path string) (*req.Resp, error) {
apiToken, keptnAPIURL, err := GetApiCredentials()
if err != nil {
return nil, err
}
authHeader := getAuthHeader(apiToken)
r, err := req.Delete(keptnAPIURL+path, authHeader)
if err != nil {
return nil, err
}
return r, nil
}
func getAuthHeader(apiToken string) req.Header {
authHeader := req.Header{
"Accept": "application/json",
"x-token": apiToken,
}
return authHeader
}
func ApiGETRequest(path string) (*req.Resp, error) {
apiToken, keptnAPIURL, err := GetApiCredentials()
if err != nil {
return nil, err
}
authHeader := getAuthHeader(apiToken)
r, err := req.Get(keptnAPIURL+path, authHeader)
if err != nil {
return nil, err
}
return r, nil
}
func ApiPOSTRequest(path string, payload interface{}) (*req.Resp, error) {
apiToken, keptnAPIURL, err := GetApiCredentials()
if err != nil {
return nil, err
}
authHeader := getAuthHeader(apiToken)
r, err := req.Post(keptnAPIURL+path, authHeader, req.BodyJSON(payload))
if err != nil {
return nil, err
}
return r, nil
}
func GetApiCredentials() (string, string, error) {
apiToken, err := keptnkubeutils.GetKeptnAPITokenFromSecret(false, GetKeptnNameSpaceFromEnv(), "keptn-api-token")
if err != nil {
return "", "", err
}
keptnAPIURL := os.Getenv("KEPTN_ENDPOINT")
if keptnAPIURL == "" {
serviceIP, err := keptnkubeutils.GetKeptnEndpointFromService(false, GetKeptnNameSpaceFromEnv(), "api-gateway-nginx")
if err != nil {
return "", "", err
}
keptnAPIURL = "http://" + serviceIP + "/api"
}
return apiToken, keptnAPIURL, nil
}
func ScaleDownUniform(deployments []string) error {
for _, deployment := range deployments {
if err := keptnkubeutils.ScaleDeployment(false, deployment, GetKeptnNameSpaceFromEnv(), 0); err != nil {
// log the error but continue
fmt.Println("could not scale down deployment: " + err.Error())
}
}
return nil
}
func ScaleUpUniform(deployments []string) error {
for _, deployment := range deployments {
if err := keptnkubeutils.ScaleDeployment(false, deployment, GetKeptnNameSpaceFromEnv(), 1); err != nil {
// log the error but continue
fmt.Println("could not scale up deployment: " + err.Error())
}
}
return nil
}
func RestartPod(deploymentName string) error {
return keptnkubeutils.RestartPodsWithSelector(false, GetKeptnNameSpaceFromEnv(), "app.kubernetes.io/name="+deploymentName)
}
func WaitForPodOfDeployment(deploymentName string) error {
return keptnkubeutils.WaitForDeploymentToBeRolledOut(false, deploymentName, GetKeptnNameSpaceFromEnv())
}
func CreateTmpShipyardFile(shipyardContent string) (string, error) {
return CreateTmpFile("shipyard-*.yaml", shipyardContent)
}
func CreateTmpFile(fileNamePattern, fileContent string) (string, error) {
file, err := ioutil.TempFile(".", fileNamePattern)
if err != nil {
return "", err
}
if err := ioutil.WriteFile(file.Name(), []byte(fileContent), os.ModeAppend); err != nil {
os.Remove(file.Name())
return "", err
}
return file.Name(), nil
}
func ExecuteCommand(cmd string) (string, error) {
split := strings.Split(cmd, " ")
if len(split) == 0 {
return "", errors.New("invalid command")
}
return keptnkubeutils.ExecuteCommand(split[0], split[1:])
}
func GetKeptnNameSpaceFromEnv() string {
return osutils.GetOSEnvOrDefault(KeptnNamespaceEnvVar, DefaultKeptnNamespace)
}
func GetLatestEventOfType(keptnContext, projectName, stage, eventType string) (*models.KeptnContextExtendedCE, error) {
resp, err := ApiGETRequest("/mongodb-datastore/event?project=" + projectName + "&keptnContext=" + keptnContext + "&stage=" + stage + "&type=" + eventType)
if err != nil {
return nil, err
}
events := &models.Events{}
if err := resp.ToJSON(events); err != nil {
return nil, err
}
if len(events.Events) > 0 {
return events.Events[0], nil
}
return nil, nil
}
func GetEventTraceForContext(keptnContext, projectName string) ([]*models.KeptnContextExtendedCE, error) {
resp, err := ApiGETRequest("/mongodb-datastore/event?project=" + projectName + "&keptnContext=" + keptnContext)
if err != nil {
return nil, err
}
events := &models.Events{}
if err := resp.ToJSON(events); err != nil {
return nil, err
}
if len(events.Events) > 0 {
return events.Events, nil
}
return nil, nil
}
func IsEqual(t *testing.T, expected, actual interface{}, property string) bool {
if expected != actual {
t.Logf("%s: expected %v, got %v", property, expected, actual)
return false
}
return true
}
func StringArr(el ...string) []string {
return el
}
|
[
"\"KEPTN_ENDPOINT\""
] |
[] |
[
"KEPTN_ENDPOINT"
] |
[]
|
["KEPTN_ENDPOINT"]
|
go
| 1 | 0 | |
plugin/src/py/android_screenshot_tests/test_common.py
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import os
from . import common
import subprocess
import sys
class TestCommon(unittest.TestCase):
def setUp(self):
self.android_sdk = common.get_android_sdk()
self._environ = dict(os.environ)
os.environ.pop('ANDROID_SDK', None)
os.environ.pop('ANDROID_HOME', None)
def tearDown(self):
os.environ.clear()
os.environ.update(self._environ)
def test_get_android_sdk_happy_path(self):
os.environ['ANDROID_SDK'] = '/tmp/foo'
self.assertEqual("/tmp/foo", common.get_android_sdk())
def test_tilde_is_expanded(self):
if sys.version_info >= (3,):
return
os.environ['ANDROID_SDK'] = '~/foobar'
home = os.environ['HOME']
self.assertEqual(os.path.join(home, 'foobar'), common.get_android_sdk())
def test_get_adb_can_run_in_subprocess(self):
os.environ['ANDROID_SDK'] = self.android_sdk
subprocess.check_call([common.get_adb(), "devices"])
|
[] |
[] |
[
"ANDROID_SDK",
"HOME"
] |
[]
|
["ANDROID_SDK", "HOME"]
|
python
| 2 | 0 | |
app/server.go
|
package app
import (
"github.com/toonsevrin/simplechain/types"
"net/http"
"github.com/gorilla/mux"
"encoding/json"
"io/ioutil"
"fmt"
"strings"
"os"
)
type Server struct {
App App
}
func (server *Server) Init(){
router := mux.NewRouter().StrictSlash(true)
router.Methods("GET").Path("/blocks").HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
if isLocalhostOrPeer(*server, *request){
json.NewEncoder(writer).Encode(server.App.Blockchain)
}else {
json.NewEncoder(writer).Encode(Success{false, str("Unauthorized")})
}
})
router.Methods("POST").Path("/mineBlock").HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
if isLocalhostOrPeer(*server, *request) {
body, err := ioutil.ReadAll(request.Body)
if err != nil {
json.NewEncoder(writer).Encode(Success{false, str("An error occurred reading request body")})
fmt.Println(err.Error())
return
}
data := string(body)
block := server.App.createAndAddNextBlock(data)
json.NewEncoder(writer).Encode(Success{Success:true})
server.App.broadcast(block)
}else {
json.NewEncoder(writer).Encode(Success{false, str("Unauthorized")})
}
})
router.Methods("POST").Path("/addBlock").HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
if isLocalhostOrPeer(*server, *request) {
block := types.Block{}
body, err := ioutil.ReadAll(request.Body)
if err != nil {
json.NewEncoder(writer).Encode(Success{false, str("An error occurred reading request body")})
fmt.Println(err.Error())
return
}
if err := json.Unmarshal(body, &block); err != nil {
json.NewEncoder(writer).Encode(Success{false, str("An error occurred parsing request body")})
fmt.Println(err.Error())
return
}
if server.App.HasBlock(block) {
json.NewEncoder(writer).Encode(Success{false, str("Block already exists")})
fmt.Println("Received block that already exists in db.")
return
}
if !block.IsValid() {
json.NewEncoder(writer).Encode(Success{false, str("Received invalid block")})
fmt.Println("Received invalid block")
return
}
if uint32(len(server.App.Blockchain)) == block.Index { //next block
if block.PreviousHash == server.App.getLatestBlock().Hash { //next block references your chain
server.App.AddBlock(block)
json.NewEncoder(writer).Encode(Success{Success: true})
server.App.broadcast(block)
} else {
server.fixChain(block, writer, request)
}
} else if uint32(len(server.App.Blockchain)) < block.Index { //block is in the future
server.fixChain(block, writer, request)
}
}else {
json.NewEncoder(writer).Encode(Success{Success: false, Error: str("Unauthorized")})
}
})
router.Methods("POST").Path("/addPeer").HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
if isLocalhost(*request){
body, err := ioutil.ReadAll(request.Body)
if err != nil {
json.NewEncoder(writer).Encode(Success{Success: false, Error: str(err.Error())})
fmt.Println(err.Error())
return
}
peer := Peer{}
if err := json.Unmarshal(body, &peer); err != nil {
json.NewEncoder(writer).Encode(Success{Success: false, Error: str(err.Error())})
fmt.Println(err.Error())
return
}
server.App.Peers[peer.getUrl()] = &peer
server.App.PeerAddresses[peer.Ip] = peer.getUrl()
json.NewEncoder(writer).Encode(Success{Success: true})
}else{
writer.Write([]byte("Only localhost can add peers"))
}
})
http.ListenAndServe(":" + getPort(), router)
}
func (server *Server) fixChain(block types.Block, writer http.ResponseWriter, request *http.Request){
if url , exists := server.App.PeerAddresses[strings.Split(request.RemoteAddr, ":")[0]]; exists {
RemoteChain := []types.Block{}
req, err := http.NewRequest("GET", url +"/blocks", nil)
if err != nil {
json.NewEncoder(writer).Encode(Success{Success: false, Error: str(err.Error())})
fmt.Println(err.Error())
return
}
client := &http.Client{}
response, err := client.Do(req)
if err != nil {
fmt.Println(err.Error())
return
}
body, err := ioutil.ReadAll(response.Body)
if err != nil {
json.NewEncoder(writer).Encode(Success{Success: false, Error: str(err.Error())})
fmt.Println(err.Error())
return
}
if err := json.Unmarshal(body, &RemoteChain); err != nil {
json.NewEncoder(writer).Encode(Success{Success: false, Error: str(err.Error())})
fmt.Println(err.Error())
return
}
if (server.App.pickLongestChain(RemoteChain)) {
json.NewEncoder(writer).Encode(Success{Success: true})
server.App.broadcast(block)
} else {
json.NewEncoder(writer).Encode(Success{Success: false, Error: str("Peer has a longer chain")})
}
}else {
json.NewEncoder(writer).Encode(Success{Success: false, Error: str("Peer is not mutually tethered")})
}
}
func getPort() string{
port := os.Getenv("PORT")
if(port == ""){
return "8080"
}else {
return port
}
}
func isLocalhostOrPeer(server Server, request http.Request) bool{
_, isPeer := server.App.PeerAddresses[request.RemoteAddr]
return isPeer || isLocalhost(request)
}
func isLocalhost(req http.Request) bool{
fmt.Println(req.RemoteAddr)
return strings.Contains(req.RemoteAddr, "127.0.0.1") || strings.Contains(req.RemoteAddr, "[::1]")//::1 is ipv6
}
type Success struct {
Success bool `json:"success"`
Error *string `json:"error"`
}
func str(str string) *string{
return &str
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
eos/eos.go
|
// eos stands for 'enhanced os', it mostly supplies 'eos.Open', which supports
// the 'itchfs://' scheme to access remote files
package eos
import (
"fmt"
"io"
"net/http"
"net/url"
"os"
"github.com/go-errors/errors"
"github.com/itchio/httpkit/httpfile"
"github.com/itchio/wharf/eos/option"
)
var debugHttpFile = os.Getenv("HTTPFILE_DEBUG") == "1"
type File interface {
io.Reader
io.Closer
io.ReaderAt
io.Seeker
Stat() (os.FileInfo, error)
}
type Handler interface {
Scheme() string
MakeResource(u *url.URL) (httpfile.GetURLFunc, httpfile.NeedsRenewalFunc, error)
}
var handlers = make(map[string]Handler)
func RegisterHandler(h Handler) error {
scheme := h.Scheme()
if handlers[scheme] != nil {
return fmt.Errorf("already have a handler for %s:", scheme)
}
handlers[h.Scheme()] = h
return nil
}
func DeregisterHandler(h Handler) {
delete(handlers, h.Scheme())
}
type simpleHTTPResource struct {
url string
}
func (shr *simpleHTTPResource) GetURL() (string, error) {
return shr.url, nil
}
func (shr *simpleHTTPResource) NeedsRenewal(res *http.Response, body []byte) bool {
return false
}
func Open(name string, opts ...option.Option) (File, error) {
settings := option.DefaultSettings()
for _, opt := range opts {
opt.Apply(settings)
}
if name == "/dev/null" {
return &emptyFile{}, nil
}
u, err := url.Parse(name)
if err != nil {
return nil, errors.Wrap(err, 1)
}
switch u.Scheme {
case "http", "https":
res := &simpleHTTPResource{name}
hf, err := httpfile.New(res.GetURL, res.NeedsRenewal, &httpfile.Settings{
Client: settings.HTTPClient,
})
if err != nil {
return nil, err
}
if debugHttpFile {
hf.Log = func(msg string) {
fmt.Fprintf(os.Stderr, "[hf] %s\n", msg)
}
}
return hf, nil
default:
handler := handlers[u.Scheme]
if handler == nil {
return os.Open(name)
}
getURL, needsRenewal, err := handler.MakeResource(u)
if err != nil {
return nil, errors.Wrap(err, 1)
}
hf, err := httpfile.New(getURL, needsRenewal, &httpfile.Settings{
Client: settings.HTTPClient,
})
if err != nil {
return nil, err
}
if debugHttpFile {
hf.Log = func(msg string) {
fmt.Fprintf(os.Stderr, "[hf] %s\n", msg)
}
}
return hf, nil
}
}
|
[
"\"HTTPFILE_DEBUG\""
] |
[] |
[
"HTTPFILE_DEBUG"
] |
[]
|
["HTTPFILE_DEBUG"]
|
go
| 1 | 0 | |
modules/help_urls/help_urls.py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Help URL resolver.
Help URLs are of the form <base>/<version>/<suffix> where
1) <base> is the base help URL, which defaults to _BASE_URL below.
2) <version> is derived from the GCB_PRODUCT_VERSION environment variable. If
the patch version is zero, it and its leading dot are stripped (so '1.0.0'
becomes '1.0').
3) <suffix> is a string from topics._ALL, which contains a mapping from a
topic_id to a URL suffix.
URLs are normalized to contain correct slashes. To set a help URL, edit
topics.py's _ALL variable.
The flow is:
1) Use services.help_urls.make_learn_more_message() to make a message for
display in the UI.
2) This composes a link with the href set to _REDIRECT_HANDLER_URL, and passes
the topic_id passed in the call to make_learn_more_message().
3) The redirect handler validates the topic_id, then redirects the user to the
real help URL, calculated from the value in topics._ALL.
This allows us control over the help URLs, opening up the ability to version
them, or to have different doc sets for different runtime configurations. It
also gathers the URLs into one place (topics._ALL) rather than scattering them
throughout the codebase.
"""
__author__ = [
'John Cox ([email protected])',
]
import logging
import os
from common import safe_dom
from controllers import utils
from models import custom_modules
from models import services
from modules.help_urls import topics
_BASE_URL = 'https://edu.google.com/openonline/course-builder/docs'
# Legacy documentation URL. Fall through to this whenever an item is in
# topics._ALL but its value is topics._DEFAULT.
# TODO(johncox): remove this once topics._ALL is fully populated.
_LOG = logging.getLogger('modules.help_urls.help_urls')
logging.basicConfig()
_REDIRECT_HANDLER_URL = '/modules/help_urls/redirect'
class Service(services.HelpUrls):
def get(self, topic_id):
return _TopicRegistry.get_url(topic_id)
def make_learn_more_message(self, text, topic_id, to_string=True):
message = safe_dom.assemble_text_message(
text, '%s?topic_id=%s' % (_REDIRECT_HANDLER_URL, topic_id))
return str(message) if to_string else message
class _TopicRegistry(object):
_MAP = {}
@classmethod
def build(cls, rows):
for row in rows:
key, value = cls._validate(row)
cls._MAP[key] = value
@classmethod
def get_url(cls, topic_id):
suffix = cls._MAP.get(topic_id)
if not suffix:
raise ValueError('No URL suffix found for topic "%s"' % topic_id)
# Treat as module-protected. pylint: disable=protected-access
if isinstance(suffix, topics._LegacyUrl):
return suffix.value
if suffix.startswith('/'):
suffix = suffix[1:]
return '%s/%s/%s' % (_BASE_URL, cls._get_version_infix(), suffix)
@classmethod
def _get_version_infix(cls):
version = os.environ.get('GCB_PRODUCT_VERSION')
assert version
parts = version.split('.')
assert len(parts) == 3
parts.pop()
return '.'.join(parts)
@classmethod
def _validate(cls, row):
row_length = len(row)
if row_length != 2:
raise ValueError(
'Topic row must have exactly 2 items; got %s for row "%s"' % (
row_length, row))
key, value = row
if not key or not value:
raise ValueError(
'Topic mapping values must both be set; got "%s" and "%s"' % (
key, value))
if key in cls._MAP:
raise ValueError(
'Topic mappings must be unique; "%s" already registered' % key)
return key, value
class _RedirectHandler(utils.BaseHandler):
def get(self):
topic_id = self.request.get('topic_id')
if not topic_id:
_LOG.error('No topic_id')
self.error(400)
return
try:
url = services.help_urls.get(topic_id)
except ValueError:
_LOG.error("topic_id '%s' not found", topic_id)
self.error(400)
return
self.redirect(url, normalize=False)
custom_module = None
def register_module():
# pylint: disable=global-statement
global custom_module
def on_module_enabled():
# Treat as module-protected. pylint: disable=protected-access
_TopicRegistry.build(topics._ALL)
services.help_urls = Service()
global_routes = [
(_REDIRECT_HANDLER_URL, _RedirectHandler),
]
namespaced_routes = []
custom_module = custom_modules.Module(
'Help URL Resolver', 'Creates help URLs for the admin UI',
global_routes, namespaced_routes,
notify_module_enabled=on_module_enabled)
return custom_module
|
[] |
[] |
[
"GCB_PRODUCT_VERSION"
] |
[]
|
["GCB_PRODUCT_VERSION"]
|
python
| 1 | 0 | |
src/archive.py
|
import os
import click
import pathlib
from bln.client import Client
DATA_DIR = pathlib.Path(__file__).parent.parent / "data"
@click.command()
def main():
"""
Archive the source data file.
"""
token = os.getenv("BLN_TOKEN")
c = Client(token)
item_list = c.everything()['effectiveProjectRoles']
project_name = "Iowa liquor stores"
try:
project = next(i['project'] for i in item_list if i['project']['name'] == project_name)
except StopIteration:
project = c.createProject(
project_name,
contact="[email protected]",
isOpen=True,
)
c.upload_file(project['id'], DATA_DIR / "iowa-liquor-stores.csv")
if __name__ == '__main__':
main()
|
[] |
[] |
[
"BLN_TOKEN"
] |
[]
|
["BLN_TOKEN"]
|
python
| 1 | 0 | |
bueno/public/host.py
|
#
# Copyright (c) 2019-2021 Triad National Security, LLC
# All rights reserved.
#
# This file is part of the bueno project. See the LICENSE file at the
# top-level directory of this distribution for more information.
#
'''
Host utilities.
'''
from typing import (
List,
Union
)
import os
import shlex
import shutil
import subprocess # nosec
from bueno.core import constants
from bueno.public import logger
from bueno.public import utils
def kernel() -> str:
'''
Returns the kernel name.
'''
return capture('uname -s')
def kernelrel() -> str:
'''
Returns the kernel release.
'''
return capture('uname -r')
def hostname() -> str:
'''
Returns the host computer's name.
'''
return capture('hostname')
def shostname() -> str:
'''
Returns the host computer's short name.
'''
return capture('hostname -s')
def whoami() -> str:
'''
Akin to whoami(1).
'''
return capture('whoami')
def os_pretty_name() -> str:
'''
Returns the host's pretty name as reported by /etc/os-release.
'''
name = 'Unknown'
try:
with open('/etc/os-release', encoding='utf8') as osrel:
for line in osrel:
if not line.startswith('PRETTY_NAME='):
continue
name = utils.chomp(line.split('=')[1]).strip('"')
break
except (OSError, IOError):
pass
return name
def capture(
cmd: str,
check_exit_code: bool = True
) -> str:
'''
Executes the provided command and returns a string with the command's
output.
See run() for exceptions.
'''
res = run(
cmd,
capture_output=True,
verbose=False,
check_exit_code=check_exit_code
)
return utils.chomp(str().join(res))
def which(cmd: str) -> Union[str, None]:
'''
Akin to which(1).
Returns None if cmd is not found.
'''
return shutil.which(cmd)
def whichl(cmds: List[str]) -> Union[str, None]:
'''
Akin to which(1), but accepts a list of commands to search for. The first
command found by which() is returned.
Returns None if none of the provided commands are found.
'''
for cmd in cmds:
wcmd = which(cmd)
if wcmd is not None:
return wcmd
return None
def tmpdir() -> str:
'''
Returns tmpdir.
'''
tdir = os.getenv('TMPDIR')
if tdir is not None:
return tdir
return '/tmp' # nosec
def run( # pylint: disable=too-many-arguments
cmd: str,
verbatim: bool = False,
echo: bool = False,
capture_output: bool = False,
verbose: bool = True,
check_exit_code: bool = True
) -> List[str]:
'''
Executes the provided command.
Returns newline-delimited list of output if capture_output if True.
Throws ChildProcessError on error if check_exit_code is True.
'''
def getrealcmd(cmd: str, verbatim: bool) -> str:
# The user wants us to run the string exactly as provided.
if verbatim:
return cmd
return F'{constants.BASH_MAGIC} {shlex.quote(cmd)}'
realcmd = getrealcmd(cmd, verbatim)
if echo:
logger.log(F'# $ {realcmd}')
# Output list of strings used to (optionally) capture command output.
olst: List[str] = []
with subprocess.Popen(
realcmd,
shell=True, # nosec
bufsize=1,
# Enables text mode, making write() et al. happy.
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
) as spo:
# To silence mypy warnings.
assert spo.stdout is not None # nosec
# Show progress and store output to a string (if requested).
while True:
stdout = spo.stdout.readline()
if not stdout:
break
if capture_output:
olst.append(stdout)
if verbose:
logger.log(utils.chomp(stdout))
wrc = spo.wait()
if wrc != os.EX_OK and check_exit_code:
cpe = ChildProcessError()
cpe.errno = wrc
estr = F"Command '{realcmd}' returned non-zero exit status."
cpe.strerror = estr
raise cpe
return olst
# vim: ft=python ts=4 sts=4 sw=4 expandtab
|
[] |
[] |
[
"TMPDIR"
] |
[]
|
["TMPDIR"]
|
python
| 1 | 0 | |
pkg/dbutil/common.go
|
// Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package dbutil
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"net/url"
"os"
"strconv"
"strings"
"time"
"github.com/go-sql-driver/mysql"
"github.com/pingcap/errors"
"github.com/pingcap/log"
"github.com/pingcap/tidb-tools/pkg/utils"
"github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/parser"
"github.com/pingcap/tidb/parser/model"
tmysql "github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/types"
"go.uber.org/zap"
)
const (
// DefaultRetryTime is the default retry time to execute sql
DefaultRetryTime = 10
// DefaultTimeout is the default timeout for execute sql
DefaultTimeout time.Duration = 10 * time.Second
// SlowLogThreshold defines the duration to log debug log of sql when exec time greater than
SlowLogThreshold = 200 * time.Millisecond
// DefaultDeleteRowsNum is the default rows num for delete one time
DefaultDeleteRowsNum int64 = 100000
)
var (
// ErrVersionNotFound means can't get the database's version
ErrVersionNotFound = errors.New("can't get the database's version")
// ErrNoData means no data in table
ErrNoData = errors.New("no data found in table")
)
// DBConfig is database configuration.
type DBConfig struct {
Host string `toml:"host" json:"host"`
Port int `toml:"port" json:"port"`
User string `toml:"user" json:"user"`
Password string `toml:"password" json:"-"` // omit it for privacy
Schema string `toml:"schema" json:"schema"`
Snapshot string `toml:"snapshot" json:"snapshot"`
}
// String returns native format of database configuration
func (c *DBConfig) String() string {
cfg, err := json.Marshal(c)
if err != nil {
return "<nil>"
}
return string(cfg)
}
// GetDBConfigFromEnv returns DBConfig from environment
func GetDBConfigFromEnv(schema string) DBConfig {
host := os.Getenv("MYSQL_HOST")
if host == "" {
host = "127.0.0.1"
}
port, _ := strconv.Atoi(os.Getenv("MYSQL_PORT"))
if port == 0 {
port = 3306
}
user := os.Getenv("MYSQL_USER")
if user == "" {
user = "root"
}
pswd := os.Getenv("MYSQL_PSWD")
return DBConfig{
Host: host,
Port: port,
User: user,
Password: pswd,
Schema: schema,
}
}
// OpenDB opens a mysql connection FD
func OpenDB(cfg DBConfig, vars map[string]string) (*sql.DB, error) {
var dbDSN string
if len(cfg.Snapshot) != 0 {
log.Info("create connection with snapshot", zap.String("snapshot", cfg.Snapshot))
dbDSN = fmt.Sprintf("%s:%s@tcp(%s:%d)/?charset=utf8mb4&tidb_snapshot=%s", cfg.User, cfg.Password, cfg.Host, cfg.Port, cfg.Snapshot)
} else {
dbDSN = fmt.Sprintf("%s:%s@tcp(%s:%d)/?charset=utf8mb4", cfg.User, cfg.Password, cfg.Host, cfg.Port)
}
for key, val := range vars {
// key='val'. add single quote for better compatibility.
dbDSN += fmt.Sprintf("&%s=%%27%s%%27", key, url.QueryEscape(val))
}
dbConn, err := sql.Open("mysql", dbDSN)
if err != nil {
return nil, errors.Trace(err)
}
err = dbConn.Ping()
return dbConn, errors.Trace(err)
}
// CloseDB closes the mysql fd
func CloseDB(db *sql.DB) error {
if db == nil {
return nil
}
return errors.Trace(db.Close())
}
// GetCreateTableSQL returns the create table statement.
func GetCreateTableSQL(ctx context.Context, db QueryExecutor, schemaName string, tableName string) (string, error) {
/*
show create table example result:
mysql> SHOW CREATE TABLE `test`.`itest`;
+-------+--------------------------------------------------------------------+
| Table | Create Table |
+-------+--------------------------------------------------------------------+
| itest | CREATE TABLE `itest` (
`id` int(11) DEFAULT NULL,
`name` varchar(24) DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin |
+-------+--------------------------------------------------------------------+
*/
query := fmt.Sprintf("SHOW CREATE TABLE %s", TableName(schemaName, tableName))
var tbl, createTable sql.NullString
err := db.QueryRowContext(ctx, query).Scan(&tbl, &createTable)
if err != nil {
return "", errors.Trace(err)
}
if !tbl.Valid || !createTable.Valid {
return "", errors.NotFoundf("table %s", tableName)
}
return createTable.String, nil
}
// GetRowCount returns row count of the table.
// if not specify where condition, return total row count of the table.
func GetRowCount(ctx context.Context, db QueryExecutor, schemaName string, tableName string, where string, args []interface{}) (int64, error) {
/*
select count example result:
mysql> SELECT count(1) cnt from `test`.`itest` where id > 0;
+------+
| cnt |
+------+
| 100 |
+------+
*/
query := fmt.Sprintf("SELECT COUNT(1) cnt FROM %s", TableName(schemaName, tableName))
if len(where) > 0 {
query += fmt.Sprintf(" WHERE %s", where)
}
log.Debug("get row count", zap.String("sql", query), zap.Reflect("args", args))
var cnt sql.NullInt64
err := db.QueryRowContext(ctx, query, args...).Scan(&cnt)
if err != nil {
return 0, errors.Trace(err)
}
if !cnt.Valid {
return 0, errors.NotFoundf("table `%s`.`%s`", schemaName, tableName)
}
return cnt.Int64, nil
}
// GetRandomValues returns some random value. Tips: limitArgs is the value in limitRange.
func GetRandomValues(ctx context.Context, db QueryExecutor, schemaName, table, column string, num int, limitRange string, limitArgs []interface{}, collation string) ([]string, error) {
/*
example:
mysql> SELECT `id` FROM (SELECT `id`, rand() rand_value FROM `test`.`test` WHERE `id` COLLATE "latin1_bin" > 0 AND `id` COLLATE "latin1_bin" < 100 ORDER BY rand_value LIMIT 5) rand_tmp ORDER BY `id` COLLATE "latin1_bin";
+------+
| id |
+------+
| 1 |
| 2 |
| 3 |
+------+
*/
if limitRange == "" {
limitRange = "TRUE"
}
if collation != "" {
collation = fmt.Sprintf(" COLLATE \"%s\"", collation)
}
query := fmt.Sprintf("SELECT %[1]s FROM (SELECT %[1]s, rand() rand_value FROM %[2]s WHERE %[3]s ORDER BY rand_value LIMIT %[4]d)rand_tmp ORDER BY %[1]s%[5]s",
ColumnName(column), TableName(schemaName, table), limitRange, num, collation)
log.Debug("get random values", zap.String("sql", query), zap.Reflect("args", limitArgs))
rows, err := db.QueryContext(ctx, query, limitArgs...)
if err != nil {
return nil, errors.Trace(err)
}
defer rows.Close()
randomValue := make([]string, 0, num)
for rows.Next() {
var value sql.NullString
err = rows.Scan(&value)
if err != nil {
return nil, errors.Trace(err)
}
if value.Valid {
randomValue = append(randomValue, value.String)
}
}
return randomValue, errors.Trace(rows.Err())
}
// GetMinMaxValue return min and max value of given column by specified limitRange condition.
func GetMinMaxValue(ctx context.Context, db QueryExecutor, schema, table, column string, limitRange string, limitArgs []interface{}, collation string) (string, string, error) {
/*
example:
mysql> SELECT MIN(`id`) as MIN, MAX(`id`) as MAX FROM `test`.`testa` WHERE id > 0 AND id < 10;
+------+------+
| MIN | MAX |
+------+------+
| 1 | 2 |
+------+------+
*/
if limitRange == "" {
limitRange = "TRUE"
}
if collation != "" {
collation = fmt.Sprintf(" COLLATE \"%s\"", collation)
}
query := fmt.Sprintf("SELECT /*!40001 SQL_NO_CACHE */ MIN(%s%s) as MIN, MAX(%s%s) as MAX FROM %s WHERE %s",
ColumnName(column), collation, ColumnName(column), collation, TableName(schema, table), limitRange)
log.Debug("GetMinMaxValue", zap.String("sql", query), zap.Reflect("args", limitArgs))
var min, max sql.NullString
rows, err := db.QueryContext(ctx, query, limitArgs...)
if err != nil {
return "", "", errors.Trace(err)
}
defer rows.Close()
for rows.Next() {
err = rows.Scan(&min, &max)
if err != nil {
return "", "", errors.Trace(err)
}
}
if !min.Valid || !max.Valid {
// don't have any data
return "", "", ErrNoData
}
return min.String, max.String, errors.Trace(rows.Err())
}
func GetTimeZoneOffset(ctx context.Context, db QueryExecutor) (time.Duration, error) {
var timeStr string
err := db.QueryRowContext(ctx, "SELECT cast(TIMEDIFF(NOW(6), UTC_TIMESTAMP(6)) as time);").Scan(&timeStr)
if err != nil {
return 0, errors.Trace(err)
}
factor := time.Duration(1)
if timeStr[0] == '-' || timeStr[0] == '+' {
if timeStr[0] == '-' {
factor *= -1
}
timeStr = timeStr[1:]
}
t, err := time.Parse("15:04:05", timeStr)
if err != nil {
return 0, errors.Trace(err)
}
if t.IsZero() {
return 0, nil
}
hour, minute, second := t.Clock()
d := time.Duration(hour*3600+minute*60+second) * time.Second * factor
return d, nil
}
func FormatTimeZoneOffset(offset time.Duration) string {
prefix := "+"
if offset < 0 {
prefix = "-"
offset *= -1
}
hours := offset / time.Hour
minutes := (offset % time.Hour) / time.Minute
return fmt.Sprintf("%s%02d:%02d", prefix, hours, minutes)
}
func queryTables(ctx context.Context, db QueryExecutor, q string) (tables []string, err error) {
log.Debug("query tables", zap.String("query", q))
rows, err := db.QueryContext(ctx, q)
if err != nil {
return nil, errors.Trace(err)
}
defer rows.Close()
tables = make([]string, 0, 8)
for rows.Next() {
var table, tType sql.NullString
err = rows.Scan(&table, &tType)
if err != nil {
return nil, errors.Trace(err)
}
if !table.Valid || !tType.Valid {
continue
}
tables = append(tables, table.String)
}
return tables, errors.Trace(rows.Err())
}
// GetTables returns name of all tables in the specified schema
func GetTables(ctx context.Context, db QueryExecutor, schemaName string) (tables []string, err error) {
/*
show tables without view: https://dev.mysql.com/doc/refman/5.7/en/show-tables.html
example:
mysql> show full tables in test where Table_Type != 'VIEW';
+----------------+------------+
| Tables_in_test | Table_type |
+----------------+------------+
| NTEST | BASE TABLE |
+----------------+------------+
*/
query := fmt.Sprintf("SHOW FULL TABLES IN `%s` WHERE Table_Type != 'VIEW';", escapeName(schemaName))
return queryTables(ctx, db, query)
}
// GetViews returns names of all views in the specified schema
func GetViews(ctx context.Context, db QueryExecutor, schemaName string) (tables []string, err error) {
query := fmt.Sprintf("SHOW FULL TABLES IN `%s` WHERE Table_Type = 'VIEW';", escapeName(schemaName))
return queryTables(ctx, db, query)
}
// GetSchemas returns name of all schemas
func GetSchemas(ctx context.Context, db QueryExecutor) ([]string, error) {
query := "SHOW DATABASES"
rows, err := db.QueryContext(ctx, query)
if err != nil {
return nil, errors.Trace(err)
}
defer rows.Close()
// show an example.
/*
mysql> SHOW DATABASES;
+--------------------+
| Database |
+--------------------+
| information_schema |
| mysql |
| performance_schema |
| sys |
| test_db |
+--------------------+
*/
schemas := make([]string, 0, 10)
for rows.Next() {
var schema string
err = rows.Scan(&schema)
if err != nil {
return nil, errors.Trace(err)
}
schemas = append(schemas, schema)
}
return schemas, errors.Trace(rows.Err())
}
// GetCRC32Checksum returns checksum code of some data by given condition
func GetCRC32Checksum(ctx context.Context, db QueryExecutor, schemaName, tableName string, tbInfo *model.TableInfo, limitRange string, args []interface{}) (int64, error) {
/*
calculate CRC32 checksum example:
mysql> SELECT BIT_XOR(CAST(CRC32(CONCAT_WS(',', id, name, age, CONCAT(ISNULL(id), ISNULL(name), ISNULL(age))))AS UNSIGNED)) AS checksum FROM test.test WHERE id > 0 AND id < 10;
+------------+
| checksum |
+------------+
| 1466098199 |
+------------+
*/
columnNames := make([]string, 0, len(tbInfo.Columns))
columnIsNull := make([]string, 0, len(tbInfo.Columns))
for _, col := range tbInfo.Columns {
columnNames = append(columnNames, ColumnName(col.Name.O))
columnIsNull = append(columnIsNull, fmt.Sprintf("ISNULL(%s)", ColumnName(col.Name.O)))
}
query := fmt.Sprintf("SELECT BIT_XOR(CAST(CRC32(CONCAT_WS(',', %s, CONCAT(%s)))AS UNSIGNED)) AS checksum FROM %s WHERE %s;",
strings.Join(columnNames, ", "), strings.Join(columnIsNull, ", "), TableName(schemaName, tableName), limitRange)
log.Debug("checksum", zap.String("sql", query), zap.Reflect("args", args))
var checksum sql.NullInt64
err := db.QueryRowContext(ctx, query, args...).Scan(&checksum)
if err != nil {
return -1, errors.Trace(err)
}
if !checksum.Valid {
// if don't have any data, the checksum will be `NULL`
log.Warn("get empty checksum", zap.String("sql", query), zap.Reflect("args", args))
return 0, nil
}
return checksum.Int64, nil
}
// Bucket saves the bucket information from TiDB.
type Bucket struct {
Count int64
LowerBound string
UpperBound string
}
// GetBucketsInfo SHOW STATS_BUCKETS in TiDB.
func GetBucketsInfo(ctx context.Context, db QueryExecutor, schema, table string, tableInfo *model.TableInfo) (map[string][]Bucket, error) {
/*
example in tidb:
mysql> SHOW STATS_BUCKETS WHERE db_name= "test" AND table_name="testa";
+---------+------------+----------------+-------------+----------+-----------+-------+---------+---------------------+---------------------+
| Db_name | Table_name | Partition_name | Column_name | Is_index | Bucket_id | Count | Repeats | Lower_Bound | Upper_Bound |
+---------+------------+----------------+-------------+----------+-----------+-------+---------+---------------------+---------------------+
| test | testa | | PRIMARY | 1 | 0 | 64 | 1 | 1846693550524203008 | 1846838686059069440 |
| test | testa | | PRIMARY | 1 | 1 | 128 | 1 | 1846840885082324992 | 1847056389361369088 |
+---------+------------+----------------+-------------+----------+-----------+-------+---------+---------------------+---------------------+
*/
buckets := make(map[string][]Bucket)
query := "SHOW STATS_BUCKETS WHERE db_name= ? AND table_name= ?;"
log.Debug("GetBucketsInfo", zap.String("sql", query), zap.String("schema", schema), zap.String("table", table))
rows, err := db.QueryContext(ctx, query, schema, table)
if err != nil {
return nil, errors.Trace(err)
}
defer rows.Close()
cols, err := rows.Columns()
if err != nil {
return nil, errors.Trace(err)
}
for rows.Next() {
var dbName, tableName, partitionName, columnName, lowerBound, upperBound sql.NullString
var isIndex, bucketID, count, repeats, ndv sql.NullInt64
// add partiton_name in new version
switch len(cols) {
case 9:
err = rows.Scan(&dbName, &tableName, &columnName, &isIndex, &bucketID, &count, &repeats, &lowerBound, &upperBound)
case 10:
err = rows.Scan(&dbName, &tableName, &partitionName, &columnName, &isIndex, &bucketID, &count, &repeats, &lowerBound, &upperBound)
case 11:
err = rows.Scan(&dbName, &tableName, &partitionName, &columnName, &isIndex, &bucketID, &count, &repeats, &lowerBound, &upperBound, &ndv)
default:
return nil, errors.New("Unknown struct for buckets info")
}
if err != nil {
return nil, errors.Trace(err)
}
if _, ok := buckets[columnName.String]; !ok {
buckets[columnName.String] = make([]Bucket, 0, 100)
}
buckets[columnName.String] = append(buckets[columnName.String], Bucket{
Count: count.Int64,
LowerBound: lowerBound.String,
UpperBound: upperBound.String,
})
}
// when primary key is int type, the columnName will be column's name, not `PRIMARY`, check and transform here.
indices := FindAllIndex(tableInfo)
for _, index := range indices {
if index.Name.O != "PRIMARY" {
continue
}
_, ok := buckets[index.Name.O]
if !ok && len(index.Columns) == 1 {
if _, ok := buckets[index.Columns[0].Name.O]; !ok {
return nil, errors.NotFoundf("primary key on %s in buckets info", index.Columns[0].Name.O)
}
buckets[index.Name.O] = buckets[index.Columns[0].Name.O]
delete(buckets, index.Columns[0].Name.O)
}
}
return buckets, errors.Trace(rows.Err())
}
// AnalyzeValuesFromBuckets analyze upperBound or lowerBound to string for each column.
// upperBound and lowerBound are looks like '(123, abc)' for multiple fields, or '123' for one field.
func AnalyzeValuesFromBuckets(valueString string, cols []*model.ColumnInfo) ([]string, error) {
// FIXME: maybe some values contains '(', ')' or ', '
vStr := strings.Trim(valueString, "()")
values := strings.Split(vStr, ", ")
if len(values) != len(cols) {
return nil, errors.Errorf("analyze value %s failed", valueString)
}
for i, col := range cols {
if IsTimeTypeAndNeedDecode(col.Tp) {
// check if values[i] is already a time string
sc := &stmtctx.StatementContext{TimeZone: time.UTC}
_, err := types.ParseTime(sc, values[i], col.Tp, types.MinFsp)
if err == nil {
continue
}
value, err := DecodeTimeInBucket(values[i])
if err != nil {
log.Error("analyze values from buckets", zap.String("column", col.Name.O), zap.String("value", values[i]), zap.Error(err))
return nil, errors.Trace(err)
}
values[i] = value
}
}
return values, nil
}
// DecodeTimeInBucket decodes Time from a packed uint64 value.
func DecodeTimeInBucket(packedStr string) (string, error) {
packed, err := strconv.ParseUint(packedStr, 10, 64)
if err != nil {
return "", err
}
if packed == 0 {
return "", nil
}
t := new(types.Time)
err = t.FromPackedUint(packed)
if err != nil {
return "", err
}
return t.String(), nil
}
// GetTidbLatestTSO returns tidb's current TSO.
func GetTidbLatestTSO(ctx context.Context, db QueryExecutor) (int64, error) {
/*
example in tidb:
mysql> SHOW MASTER STATUS;
+-------------+--------------------+--------------+------------------+-------------------+
| File | Position | Binlog_Do_DB | Binlog_Ignore_DB | Executed_Gtid_Set |
+-------------+--------------------+--------------+------------------+-------------------+
| tidb-binlog | 400718757701615617 | | | |
+-------------+--------------------+--------------+------------------+-------------------+
*/
rows, err := db.QueryContext(ctx, "SHOW MASTER STATUS")
if err != nil {
return 0, errors.Trace(err)
}
defer rows.Close()
for rows.Next() {
fields, err1 := ScanRow(rows)
if err1 != nil {
return 0, errors.Trace(err1)
}
ts, err1 := strconv.ParseInt(string(fields["Position"].Data), 10, 64)
if err1 != nil {
return 0, errors.Trace(err1)
}
return ts, nil
}
return 0, errors.New("get secondary cluster's ts failed")
}
// GetDBVersion returns the database's version
func GetDBVersion(ctx context.Context, db QueryExecutor) (string, error) {
/*
example in TiDB:
mysql> select version();
+--------------------------------------+
| version() |
+--------------------------------------+
| 5.7.10-TiDB-v2.1.0-beta-173-g7e48ab1 |
+--------------------------------------+
example in MySQL:
mysql> select version();
+-----------+
| version() |
+-----------+
| 5.7.21 |
+-----------+
*/
query := "SELECT version()"
result, err := db.QueryContext(ctx, query)
if err != nil {
return "", errors.Trace(err)
}
defer result.Close()
var version sql.NullString
for result.Next() {
err := result.Scan(&version)
if err != nil {
return "", errors.Trace(err)
}
break
}
if version.Valid {
return version.String, nil
}
return "", ErrVersionNotFound
}
// GetSessionVariable gets server's session variable, although argument is QueryExecutor, (session) system variables may be
// set through DSN
func GetSessionVariable(ctx context.Context, db QueryExecutor, variable string) (value string, err error) {
query := fmt.Sprintf("SHOW VARIABLES LIKE '%s'", variable)
rows, err := db.QueryContext(ctx, query)
if err != nil {
return "", errors.Trace(err)
}
defer rows.Close()
// Show an example.
/*
mysql> SHOW VARIABLES LIKE "binlog_format";
+---------------+-------+
| Variable_name | Value |
+---------------+-------+
| binlog_format | ROW |
+---------------+-------+
*/
for rows.Next() {
err = rows.Scan(&variable, &value)
if err != nil {
return "", errors.Trace(err)
}
}
if rows.Err() != nil {
return "", errors.Trace(err)
}
return value, nil
}
// GetSQLMode returns sql_mode.
func GetSQLMode(ctx context.Context, db QueryExecutor) (tmysql.SQLMode, error) {
sqlMode, err := GetSessionVariable(ctx, db, "sql_mode")
if err != nil {
return tmysql.ModeNone, err
}
mode, err := tmysql.GetSQLMode(sqlMode)
return mode, errors.Trace(err)
}
// IsTiDB returns true if this database is tidb
func IsTiDB(ctx context.Context, db QueryExecutor) (bool, error) {
version, err := GetDBVersion(ctx, db)
if err != nil {
log.Error("get database's version failed", zap.Error(err))
return false, errors.Trace(err)
}
return strings.Contains(strings.ToLower(version), "tidb"), nil
}
// TableName returns `schema`.`table`
func TableName(schema, table string) string {
return fmt.Sprintf("`%s`.`%s`", escapeName(schema), escapeName(table))
}
// ColumnName returns `column`
func ColumnName(column string) string {
return fmt.Sprintf("`%s`", escapeName(column))
}
func escapeName(name string) string {
return strings.Replace(name, "`", "``", -1)
}
// ReplacePlaceholder will use args to replace '?', used for log.
// tips: make sure the num of "?" is same with len(args)
func ReplacePlaceholder(str string, args []string) string {
/*
for example:
str is "a > ? AND a < ?", args is {'1', '2'},
this function will return "a > '1' AND a < '2'"
*/
newStr := strings.Replace(str, "?", "'%s'", -1)
return fmt.Sprintf(newStr, utils.StringsToInterfaces(args)...)
}
// ExecSQLWithRetry executes sql with retry
func ExecSQLWithRetry(ctx context.Context, db DBExecutor, sql string, args ...interface{}) (err error) {
for i := 0; i < DefaultRetryTime; i++ {
startTime := time.Now()
_, err = db.ExecContext(ctx, sql, args...)
takeDuration := time.Since(startTime)
if takeDuration > SlowLogThreshold {
log.Debug("exec sql slow", zap.String("sql", sql), zap.Reflect("args", args), zap.Duration("take", takeDuration))
}
if err == nil {
return nil
}
if ignoreError(err) {
log.Warn("ignore execute sql error", zap.Error(err))
return nil
}
if !IsRetryableError(err) {
return errors.Trace(err)
}
log.Warn("exe sql failed, will try again", zap.String("sql", sql), zap.Reflect("args", args), zap.Error(err))
if i == DefaultRetryTime-1 {
break
}
select {
case <-ctx.Done():
return errors.Trace(ctx.Err())
case <-time.After(10 * time.Millisecond):
}
}
return errors.Trace(err)
}
// ExecuteSQLs executes some sqls in one transaction
func ExecuteSQLs(ctx context.Context, db DBExecutor, sqls []string, args [][]interface{}) error {
txn, err := db.BeginTx(ctx, nil)
if err != nil {
log.Error("exec sqls begin", zap.Error(err))
return errors.Trace(err)
}
for i := range sqls {
startTime := time.Now()
_, err = txn.ExecContext(ctx, sqls[i], args[i]...)
if err != nil {
log.Error("exec sql", zap.String("sql", sqls[i]), zap.Reflect("args", args[i]), zap.Error(err))
rerr := txn.Rollback()
if rerr != nil {
log.Error("rollback", zap.Error(err))
}
return errors.Trace(err)
}
takeDuration := time.Since(startTime)
if takeDuration > SlowLogThreshold {
log.Debug("exec sql slow", zap.String("sql", sqls[i]), zap.Reflect("args", args[i]), zap.Duration("take", takeDuration))
}
}
err = txn.Commit()
if err != nil {
log.Error("exec sqls commit", zap.Error(err))
return errors.Trace(err)
}
return nil
}
func ignoreError(err error) bool {
// TODO: now only ignore some ddl error, add some dml error later
if ignoreDDLError(err) {
return true
}
return false
}
func ignoreDDLError(err error) bool {
err = errors.Cause(err)
mysqlErr, ok := err.(*mysql.MySQLError)
if !ok {
return false
}
errCode := errors.ErrCode(mysqlErr.Number)
switch errCode {
case infoschema.ErrDatabaseExists.Code(), infoschema.ErrDatabaseDropExists.Code(),
infoschema.ErrTableExists.Code(), infoschema.ErrTableDropExists.Code(),
infoschema.ErrColumnExists.Code(), infoschema.ErrIndexExists.Code():
return true
case ddl.ErrDupKeyName.Code():
return true
default:
return false
}
}
// DeleteRows delete rows in several times. Only can delete less than 300,000 one time in TiDB.
func DeleteRows(ctx context.Context, db DBExecutor, schemaName string, tableName string, where string, args []interface{}) error {
deleteSQL := fmt.Sprintf("DELETE FROM %s WHERE %s limit %d;", TableName(schemaName, tableName), where, DefaultDeleteRowsNum)
result, err := db.ExecContext(ctx, deleteSQL, args...)
if err != nil {
return errors.Trace(err)
}
rows, err := result.RowsAffected()
if err != nil {
return errors.Trace(err)
}
if rows < DefaultDeleteRowsNum {
return nil
}
return DeleteRows(ctx, db, schemaName, tableName, where, args)
}
// getParser gets parser according to sql mode
func getParser(sqlModeStr string) (*parser.Parser, error) {
if len(sqlModeStr) == 0 {
return parser.New(), nil
}
sqlMode, err := tmysql.GetSQLMode(tmysql.FormatSQLModeStr(sqlModeStr))
if err != nil {
return nil, errors.Annotatef(err, "invalid sql mode %s", sqlModeStr)
}
parser2 := parser.New()
parser2.SetSQLMode(sqlMode)
return parser2, nil
}
// GetParserForDB discovers ANSI_QUOTES in db's session variables and returns a proper parser
func GetParserForDB(ctx context.Context, db QueryExecutor) (*parser.Parser, error) {
mode, err := GetSQLMode(ctx, db)
if err != nil {
return nil, err
}
parser2 := parser.New()
parser2.SetSQLMode(mode)
return parser2, nil
}
|
[
"\"MYSQL_HOST\"",
"\"MYSQL_PORT\"",
"\"MYSQL_USER\"",
"\"MYSQL_PSWD\""
] |
[] |
[
"MYSQL_PORT",
"MYSQL_USER",
"MYSQL_PSWD",
"MYSQL_HOST"
] |
[]
|
["MYSQL_PORT", "MYSQL_USER", "MYSQL_PSWD", "MYSQL_HOST"]
|
go
| 4 | 0 | |
main.go
|
package main
import (
"net/http"
"os"
"time"
"github.com/kpango/glg"
"github.com/rking788/go-alexa/skillserver"
"github.com/rking788/twitch-box/alexa"
"github.com/rking788/twitch-box/twitch"
)
// AlexaHandler is the type of function that should be used to respond to a specific intent.
type AlexaHandler func(*skillserver.EchoRequest) *skillserver.EchoResponse
// AlexaHandlers are the handler functions mapped by the intent name that they should handle.
var (
AlexaHandlers = map[string]AlexaHandler{
"StartAudioStream": alexa.StartAudioStream,
"StartVideoStream": alexa.StartVideoStream,
"AMAZON.NextIntent": alexa.StartAudioStream,
"AMAZON.PreviousIntent": alexa.StartAudioStream,
"AMAZON.ResumeIntent": alexa.StartAudioStream,
}
)
// Applications is a definition of the Alexa applications running on this server.
var applications map[string]interface{}
const (
FATAL uint = iota
ERROR
WARNING
INFO
DEBUG
ALL
)
// InitEnv is responsible for initializing all components (including sub-packages) that
// depend on a specific deployment environment configuration.
func InitEnv() {
applications = map[string]interface{}{
"/echo/twitch-box": skillserver.EchoApplication{ // Route
AppID: os.Getenv("ALEXA_APP_ID"), // Echo App ID from Amazon Dashboard
OnIntent: EchoIntentHandler,
OnLaunch: EchoIntentHandler,
OnSessionEnded: EchoSessionEndedHandler,
},
"/health": skillserver.StdApplication{
Methods: "GET",
Handler: healthHandler,
},
}
// Configure logging
logger := glg.Get()
level, ok := map[string]uint{"FATAL": FATAL, "ERROR": ERROR,
"WARNING": WARNING, "INFO": INFO, "DEBUG": DEBUG,
"ALL": ALL}[os.Getenv("TWITCH_BOX_LOG_LEVEL")]
if !ok {
level = WARNING
}
if level < DEBUG {
logger.SetLevelMode(glg.DEBG, glg.NONE)
}
if level < INFO {
logger.SetLevelMode(glg.INFO, glg.NONE)
}
if level < WARNING {
logger.SetLevelMode(glg.WARN, glg.NONE)
}
if level < ERROR {
logger.SetLevelMode(glg.ERR, glg.NONE)
}
}
func main() {
// flag.Parse()
// config = loadConfig(configPath)
// glg.Infof("Loaded config : %+v\n", config)
twitch.InitEnv(os.Getenv("REDIS_URL"))
InitEnv()
// defer CloseLogger()
glg.Printf("Version=%s, BuildDate=%v", Version, BuildDate)
// writeHeapProfile()
// if config.Environment == "production" {
// port := ":443"
// err := skillserver.RunSSL(applications, port, config.SSLCertPath, config.SSLKeyPath)
// if err != nil {
// glg.Errorf("Error starting the application! : %s", err.Error())
// }
// } else {
// Heroku makes us read a random port from the environment and our app is a
// subdomain of theirs so we get SSL for free
port := os.Getenv("PORT")
skillserver.Run(applications, port)
//}
}
func healthHandler(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("Up"))
}
// Alexa skill related functions
// EchoSessionEndedHandler is responsible for cleaning up an open session since the
// user has quit the session.
func EchoSessionEndedHandler(echoRequest *skillserver.EchoRequest,
echoResponse *skillserver.EchoResponse) {
*echoResponse = *skillserver.NewEchoResponse()
//alexa.ClearSession(echoRequest.GetSessionID())
}
// EchoIntentHandler is a handler method that is responsible for receiving the
// call from a Alexa command and returning the correct speech or cards.
func EchoIntentHandler(echoRequest *skillserver.EchoRequest, echoResponse *skillserver.EchoResponse) {
// Time the intent handler to determine if it is taking longer than normal
startTime := time.Now()
defer func(start time.Time) {
glg.Infof("IntentHandler execution time: %v", time.Since(start))
}(startTime)
var response *skillserver.EchoResponse
intentName := echoRequest.GetIntentName()
glg.Infof("RequestType: %s, IntentName: %s", echoRequest.GetRequestType(), intentName)
// During this time, users can invoke the following built-in playback control intents without using your skill’s invocation name:
// AMAZON.CancelIntent
// AMAZON.LoopOffIntent
// AMAZON.LoopOnIntent
// AMAZON.RepeatIntent
// AMAZON.ResumeIntent
// AMAZON.ShuffleOffIntent
// AMAZON.ShuffleOnIntent
// AMAZON.StartOverIntent
handler, ok := AlexaHandlers[intentName]
if echoRequest.GetRequestType() == "LaunchRequest" {
response = alexa.WelcomePrompt(echoRequest)
} else if intentName == "AMAZON.StopIntent" {
response = skillserver.NewEchoResponse()
} else if intentName == "AMAZON.CancelIntent" {
response = skillserver.NewEchoResponse()
} else if intentName == "AMAZON.PauseIntent" {
// Send stop directive
response = alexa.StopAudioDirective()
} else if ok {
response = handler(echoRequest)
} else {
response = skillserver.NewEchoResponse()
response.OutputSpeech("Sorry Guardian, I did not understand your request.")
}
*echoResponse = *response
}
// func dumpRequest(ctx *gin.Context) {
// data, err := httputil.DumpRequest(ctx.Request, true)
// if err != nil {
// glg.Errorf("Failed to dump the request: %s", err.Error())
// return
// }
// glg.Debug(string(data))
// }
|
[
"\"ALEXA_APP_ID\"",
"\"TWITCH_BOX_LOG_LEVEL\"",
"\"REDIS_URL\"",
"\"PORT\""
] |
[] |
[
"REDIS_URL",
"PORT",
"ALEXA_APP_ID",
"TWITCH_BOX_LOG_LEVEL"
] |
[]
|
["REDIS_URL", "PORT", "ALEXA_APP_ID", "TWITCH_BOX_LOG_LEVEL"]
|
go
| 4 | 0 | |
system/cloud/aws/cloudwatch/client_test.go
|
package cloudwatch
import (
"github.com/aws/aws-sdk-go/service/cloudwatch"
"github.com/stretchr/testify/assert"
"github.com/viant/endly"
"github.com/viant/toolbox"
"os"
"path"
"testing"
)
func TestClient(t *testing.T) {
context := endly.New().NewContext(nil)
err := setClient(context, map[string]interface{}{
"Credentials": "4234234dasdasde",
})
assert.NotNil(t, err)
_, err = getClient(context)
assert.NotNil(t, err)
if !toolbox.FileExists(path.Join(os.Getenv("HOME"), ".secret/aws.json")) {
return
}
err = setClient(context, map[string]interface{}{
"Credentials": "aws",
})
assert.Nil(t, err)
client, err := getClient(context)
assert.Nil(t, err)
assert.NotNil(t, client)
ec2Client, ok := client.(*cloudwatch.CloudWatch)
assert.True(t, ok)
assert.NotNil(t, ec2Client)
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
testutils/find.go
|
/*
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testutils
import (
"os"
"path/filepath"
"testing"
)
// FindTests finds all files matching the given pattern.
// It changes the working directory to `directory`, and returns a function
// to call to change back to the current directory.
// This allows tests to assert on alias finding between absolute and relative labels.
func FindTests(t *testing.T, directory, pattern string) ([]string, func()) {
wd, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
if err := os.Chdir(filepath.Join(os.Getenv("TEST_SRCDIR"), os.Getenv("TEST_WORKSPACE"), directory)); err != nil {
t.Fatal(err)
}
files, err := filepath.Glob(pattern)
if err != nil {
t.Fatal(err)
}
if len(files) == 0 {
t.Fatal("Didn't find any test cases")
}
return files, func() { os.Chdir(wd) }
}
|
[
"\"TEST_SRCDIR\"",
"\"TEST_WORKSPACE\""
] |
[] |
[
"TEST_WORKSPACE",
"TEST_SRCDIR"
] |
[]
|
["TEST_WORKSPACE", "TEST_SRCDIR"]
|
go
| 2 | 0 | |
lostsales/asgi.py
|
"""
ASGI config for lostsales project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'lostsales.settings')
application = get_asgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
fly/commands/hijack.go
|
package commands
import (
"context"
"encoding/json"
"fmt"
"io"
"net/url"
"os"
"sort"
"strconv"
"strings"
"github.com/concourse/concourse/atc"
"github.com/concourse/concourse/fly/commands/internal/displayhelpers"
"github.com/concourse/concourse/fly/commands/internal/flaghelpers"
"github.com/concourse/concourse/fly/commands/internal/hijacker"
"github.com/concourse/concourse/fly/commands/internal/hijackhelpers"
"github.com/concourse/concourse/fly/pty"
"github.com/concourse/concourse/fly/rc"
"github.com/concourse/concourse/go-concourse/concourse"
"github.com/tedsuo/rata"
"github.com/vito/go-interact/interact"
)
type HijackCommand struct {
Job flaghelpers.JobFlag `short:"j" long:"job" value-name:"PIPELINE/JOB" description:"Name of a job to hijack"`
Handle string ` long:"handle" description:"Handle id of a job to hijack"`
Check flaghelpers.ResourceFlag `short:"c" long:"check" value-name:"PIPELINE/CHECK" description:"Name of a resource's checking container to hijack"`
Url string `short:"u" long:"url" description:"URL for the build, job, or check container to hijack"`
Build string `short:"b" long:"build" description:"Build number within the job, or global build ID"`
StepName string `short:"s" long:"step" description:"Name of step to hijack (e.g. build, unit, resource name)"`
StepType string ` long:"step-type" description:"Type of step to hijack (e.g. get, put, task)"`
Attempt string `short:"a" long:"attempt" value-name:"N[,N,...]" description:"Attempt number of step to hijack."`
PositionalArgs struct {
Command []string `positional-arg-name:"command" description:"The command to run in the container (default: bash)"`
} `positional-args:"yes"`
Team string `long:"team" description:"Name of the team to which the container belongs, if different from the target default"`
}
func (command *HijackCommand) Execute([]string) error {
var (
chosenContainer atc.Container
err error
name rc.TargetName
target rc.Target
team concourse.Team
)
if Fly.Target == "" && command.Url != "" {
u, err := url.Parse(command.Url)
if err != nil {
return err
}
urlMap := parseUrlPath(u.Path)
target, name, err = rc.LoadTargetFromURL(fmt.Sprintf("%s://%s", u.Scheme, u.Host), urlMap["teams"], Fly.Verbose)
if err != nil {
return err
}
Fly.Target = name
} else {
target, err = rc.LoadTarget(Fly.Target, Fly.Verbose)
if err != nil {
return err
}
}
err = target.Validate()
if err != nil {
return err
}
if command.Team != "" {
team, err = target.FindTeam(command.Team)
if err != nil {
return err
}
} else {
team = target.Team()
}
if command.Handle != "" {
chosenContainer, err = team.GetContainer(command.Handle)
if err != nil {
displayhelpers.Failf("no containers matched the given handle id!\n\nthey may have expired if your build hasn't recently finished.")
}
} else {
fingerprint, err := command.getContainerFingerprint(target, team)
if err != nil {
return err
}
containers, err := command.getContainerIDs(target, fingerprint, team)
if err != nil {
return err
}
hijackableContainers := make([]atc.Container, 0)
for _, container := range containers {
if container.State == atc.ContainerStateCreated || container.State == atc.ContainerStateFailed {
hijackableContainers = append(hijackableContainers, container)
}
}
if len(hijackableContainers) == 0 {
displayhelpers.Failf("no containers matched your search parameters!\n\nthey may have expired if your build hasn't recently finished.")
} else if len(hijackableContainers) > 1 {
var choices []interact.Choice
for _, container := range hijackableContainers {
var infos []string
if container.BuildID != 0 {
if container.JobName != "" {
infos = append(infos, fmt.Sprintf("build #%s", container.BuildName))
} else {
infos = append(infos, fmt.Sprintf("build id: %d", container.BuildID))
}
}
if container.StepName != "" {
infos = append(infos, fmt.Sprintf("step: %s", container.StepName))
}
if container.ResourceName != "" {
infos = append(infos, fmt.Sprintf("resource: %s", container.ResourceName))
}
infos = append(infos, fmt.Sprintf("type: %s", container.Type))
if container.Type == "check" {
infos = append(infos, fmt.Sprintf("expires in: %s", container.ExpiresIn))
}
if container.Attempt != "" {
infos = append(infos, fmt.Sprintf("attempt: %s", container.Attempt))
}
choices = append(choices, interact.Choice{
Display: strings.Join(infos, ", "),
Value: container,
})
}
err = interact.NewInteraction("choose a container", choices...).Resolve(&chosenContainer)
if err == io.EOF {
return nil
}
if err != nil {
return err
}
} else {
chosenContainer = hijackableContainers[0]
}
}
privileged := true
reqGenerator := rata.NewRequestGenerator(target.URL(), atc.Routes)
var ttySpec *atc.HijackTTYSpec
rows, cols, err := pty.Getsize(os.Stdout)
if err == nil {
ttySpec = &atc.HijackTTYSpec{
WindowSize: atc.HijackWindowSize{
Columns: cols,
Rows: rows,
},
}
}
path, args := remoteCommand(command.PositionalArgs.Command)
someShell := false
if path == "" {
path = "bash"
someShell = true
}
spec := atc.HijackProcessSpec{
Path: path,
Args: args,
Env: []string{"TERM=" + os.Getenv("TERM")},
User: chosenContainer.User,
Dir: chosenContainer.WorkingDirectory,
Privileged: privileged,
TTY: ttySpec,
}
result, err := func() (int, error) { // so the term.Restore() can run before the os.Exit()
var in io.Reader
if pty.IsTerminal() {
term, err := pty.OpenRawTerm()
if err != nil {
return -1, err
}
defer func() {
_ = term.Restore()
}()
in = term
} else {
in = os.Stdin
}
inputs := make(chan atc.HijackInput, 1)
go func() {
io.Copy(&stdinWriter{inputs}, in)
inputs <- atc.HijackInput{Closed: true}
}()
io := hijacker.ProcessIO{
In: inputs,
Out: os.Stdout,
Err: os.Stderr,
}
ctx := context.Background()
h := hijacker.New(target.TLSConfig(), reqGenerator, target.Token())
result, exeNotFound, err := h.Hijack(ctx, team.Name(), chosenContainer.ID, spec, io)
if exeNotFound && someShell {
spec.Path = "sh"
os.Stderr.WriteString("\rCouldn't find \"bash\" on container, retrying with \"sh\"\n\r")
result, exeNotFound, err = h.Hijack(ctx, team.Name(), chosenContainer.ID, spec, io)
}
return result, err
}()
if err != nil {
return err
}
os.Exit(result)
return nil
}
func parseUrlPath(urlPath string) map[string]string {
pathWithoutFirstSlash := strings.Replace(urlPath, "/", "", 1)
urlComponents := strings.Split(pathWithoutFirstSlash, "/")
urlMap := make(map[string]string)
for i := 0; i < len(urlComponents)/2; i++ {
keyIndex := i * 2
valueIndex := keyIndex + 1
urlMap[urlComponents[keyIndex]] = urlComponents[valueIndex]
}
return urlMap
}
func (command *HijackCommand) getContainerFingerprintFromUrl(target rc.Target, urlParam string, team concourse.Team) (*containerFingerprint, error) {
u, err := url.Parse(urlParam)
if err != nil {
return nil, err
}
urlMap := parseUrlPath(u.Path)
parsedTargetUrl := url.URL{
Scheme: u.Scheme,
Host: u.Host,
}
host := parsedTargetUrl.String()
if host != target.URL() {
err = fmt.Errorf("URL doesn't match that of target")
return nil, err
}
teamFromUrl := urlMap["teams"]
if teamFromUrl != team.Name() {
err = fmt.Errorf("Team in URL doesn't match the current team of the target")
return nil, err
}
fingerprint := &containerFingerprint{
pipelineName: urlMap["pipelines"],
pipelineInstanceVars: u.Query().Get("instance_vars"),
jobName: urlMap["jobs"],
buildNameOrID: urlMap["builds"],
checkName: urlMap["resources"],
}
return fingerprint, nil
}
func (command *HijackCommand) getContainerFingerprint(target rc.Target, team concourse.Team) (*containerFingerprint, error) {
var err error
fingerprint := &containerFingerprint{}
if command.Url != "" {
fingerprint, err = command.getContainerFingerprintFromUrl(target, command.Url, team)
if err != nil {
return nil, err
}
}
pipelineName := command.Check.PipelineRef.Name
if command.Job.PipelineRef.Name != "" {
pipelineName = command.Job.PipelineRef.Name
}
var pipelineInstanceVars string
var instanceVars atc.InstanceVars
if command.Check.PipelineRef.InstanceVars != nil {
instanceVars = command.Check.PipelineRef.InstanceVars
} else {
instanceVars = command.Job.PipelineRef.InstanceVars
}
if instanceVars != nil {
instanceVarsJSON, _ := json.Marshal(instanceVars)
pipelineInstanceVars = string(instanceVarsJSON)
}
for _, field := range []struct {
fp *string
cmd string
}{
{fp: &fingerprint.pipelineName, cmd: pipelineName},
{fp: &fingerprint.pipelineInstanceVars, cmd: pipelineInstanceVars},
{fp: &fingerprint.buildNameOrID, cmd: command.Build},
{fp: &fingerprint.stepName, cmd: command.StepName},
{fp: &fingerprint.stepType, cmd: command.StepType},
{fp: &fingerprint.jobName, cmd: command.Job.JobName},
{fp: &fingerprint.checkName, cmd: command.Check.ResourceName},
{fp: &fingerprint.attempt, cmd: command.Attempt},
} {
if field.cmd != "" {
*field.fp = field.cmd
}
}
return fingerprint, nil
}
func (command *HijackCommand) getContainerIDs(target rc.Target, fingerprint *containerFingerprint, team concourse.Team) ([]atc.Container, error) {
reqValues, err := locateContainer(target.Client(), fingerprint)
if err != nil {
return nil, err
}
containers, err := team.ListContainers(reqValues)
if err != nil {
return nil, err
}
sort.Sort(hijackhelpers.ContainerSorter(containers))
return containers, nil
}
func remoteCommand(argv []string) (string, []string) {
var path string
var args []string
switch len(argv) {
case 0:
path = ""
case 1:
path = argv[0]
default:
path = argv[0]
args = argv[1:]
}
return path, args
}
type containerLocator interface {
locate(*containerFingerprint) (map[string]string, error)
}
type stepContainerLocator struct {
client concourse.Client
}
func (locator stepContainerLocator) locate(fingerprint *containerFingerprint) (map[string]string, error) {
reqValues := map[string]string{}
if fingerprint.stepType != "" {
reqValues["type"] = fingerprint.stepType
}
if fingerprint.stepName != "" {
reqValues["step_name"] = fingerprint.stepName
}
if fingerprint.attempt != "" {
reqValues["attempt"] = fingerprint.attempt
}
if fingerprint.jobName != "" {
reqValues["pipeline_name"] = fingerprint.pipelineName
if fingerprint.pipelineInstanceVars != "" {
reqValues["instance_vars"] = fingerprint.pipelineInstanceVars
}
reqValues["job_name"] = fingerprint.jobName
if fingerprint.buildNameOrID != "" {
reqValues["build_name"] = fingerprint.buildNameOrID
}
} else if fingerprint.buildNameOrID != "" {
reqValues["build_id"] = fingerprint.buildNameOrID
} else {
build, err := GetBuild(locator.client, nil, "", "", atc.PipelineRef{})
if err != nil {
return reqValues, err
}
reqValues["build_id"] = strconv.Itoa(build.ID)
}
return reqValues, nil
}
type checkContainerLocator struct{}
func (locator checkContainerLocator) locate(fingerprint *containerFingerprint) (map[string]string, error) {
reqValues := map[string]string{}
reqValues["type"] = "check"
if fingerprint.checkName != "" {
reqValues["resource_name"] = fingerprint.checkName
}
if fingerprint.pipelineName != "" {
reqValues["pipeline_name"] = fingerprint.pipelineName
}
if fingerprint.pipelineInstanceVars != "" {
reqValues["instance_vars"] = fingerprint.pipelineInstanceVars
}
return reqValues, nil
}
type containerFingerprint struct {
pipelineName string
pipelineInstanceVars string
jobName string
buildNameOrID string
stepName string
stepType string
checkName string
attempt string
}
func locateContainer(client concourse.Client, fingerprint *containerFingerprint) (map[string]string, error) {
var locator containerLocator
if fingerprint.checkName == "" {
locator = stepContainerLocator{
client: client,
}
} else {
locator = checkContainerLocator{}
}
return locator.locate(fingerprint)
}
type stdinWriter struct {
inputs chan<- atc.HijackInput
}
func (w *stdinWriter) Write(d []byte) (int, error) {
w.inputs <- atc.HijackInput{
Stdin: d,
}
return len(d), nil
}
|
[
"\"TERM\""
] |
[] |
[
"TERM"
] |
[]
|
["TERM"]
|
go
| 1 | 0 | |
src/foremast/runner.py
|
#!/usr/bin/env python
# Foremast - Pipeline Tooling
#
# Copyright 2018 Gogo, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A runner for all of the spinnaker pipe modules.
Read environment variables from Jenkins:
* EMAIL
* ENV
* GIT_REPO
* PROJECT
* REGION
Then run specific prepare jobs.
"""
import argparse
import logging
import os
import gogoutils
from . import (autoscaling_policy, awslambda, configs, consts, datapipeline, dns, elb, iam, pipeline, s3,
scheduled_actions, securitygroup, slacknotify, stepfunction, utils)
from .gcp_iam import GcpIamResourceClient
from .app import SpinnakerApp
from .args import add_debug
from .cloudfunction.cloud_functions_client import CloudFunctionsClient
from .exceptions import ForemastError
from .utils.gcp_environment import GcpEnvironment
from tabulate import tabulate
LOG = logging.getLogger(__name__)
class ForemastRunner:
"""Wrap each pipes module in a way that is easy to invoke."""
def __init__(self):
"""Setup the Runner for all Foremast modules."""
debug_flag()
self.email = os.getenv("EMAIL")
self.env = os.getenv("ENV")
self.group = os.getenv("PROJECT")
self.region = os.getenv("REGION")
self.repo = os.getenv("GIT_REPO")
self.runway_dir = os.getenv("RUNWAY_DIR")
self.artifact_path = os.getenv("ARTIFACT_PATH")
self.artifact_version = os.getenv("ARTIFACT_VERSION")
self.artifact_branch = os.getenv("ARTIFACT_BRANCH", "master")
self.promote_stage = os.getenv("PROMOTE_STAGE", "latest")
self.provider = os.getenv("PROVIDER", "aws")
self.git_project = "{}/{}".format(self.group, self.repo)
parsed = gogoutils.Parser(self.git_project)
generated = gogoutils.Generator(*parsed.parse_url(), formats=consts.APP_FORMATS)
self.app = generated.app_name()
self.trigger_job = generated.jenkins()['name']
self.git_short = generated.gitlab()['main']
self.raw_path = "./raw.properties"
self.json_path = self.raw_path + ".json"
self.configs = None
def write_configs(self):
"""Generate the configurations needed for pipes."""
utils.banner("Generating Configs")
if not self.runway_dir:
app_configs = configs.process_git_configs(git_short=self.git_short)
else:
app_configs = configs.process_runway_configs(runway_dir=self.runway_dir)
self.configs = configs.write_variables(
app_configs=app_configs, out_file=self.raw_path, git_short=self.git_short)
def create_app(self):
"""Create the spinnaker application."""
utils.banner("Creating Spinnaker App")
spinnakerapp = SpinnakerApp(
provider=self.provider,
app=self.app,
email=self.email,
project=self.group,
repo=self.repo,
pipeline_config=self.configs['pipeline'])
spinnakerapp.create()
def create_pipeline(self, onetime=None):
"""Create the spinnaker pipeline(s)."""
utils.banner("Creating Pipeline")
kwargs = {
'app': self.app,
'trigger_job': self.trigger_job,
'prop_path': self.json_path,
'base': None,
'runway_dir': self.runway_dir,
}
pipeline_type = self.configs['pipeline']['type']
if pipeline_type not in consts.ALLOWED_TYPES and pipeline_type not in consts.MANUAL_TYPES:
raise NotImplementedError('Pipeline type "{0}" not permitted.'.format(pipeline_type))
if not onetime:
if pipeline_type == 'lambda':
spinnakerpipeline = pipeline.SpinnakerPipelineLambda(**kwargs)
elif pipeline_type == 's3':
spinnakerpipeline = pipeline.SpinnakerPipelineS3(**kwargs)
elif pipeline_type == 'datapipeline':
spinnakerpipeline = pipeline.SpinnakerPipelineDataPipeline(**kwargs)
elif pipeline_type == 'stepfunction':
spinnakerpipeline = pipeline.SpinnakerPipelineStepFunction(**kwargs)
elif pipeline_type in consts.MANUAL_TYPES:
spinnakerpipeline = pipeline.SpinnakerPipelineManual(**kwargs)
elif pipeline_type == 'cloudfunction':
spinnakerpipeline = pipeline.SpinnakerPipelineCloudFunction(**kwargs)
else:
# Handles all other pipelines
spinnakerpipeline = pipeline.SpinnakerPipeline(**kwargs)
else:
spinnakerpipeline = pipeline.SpinnakerPipelineOnetime(onetime=onetime, **kwargs)
spinnakerpipeline.create_pipeline()
def create_aws_iam(self):
"""Create AWS IAM resources."""
utils.banner("Creating AWS IAM")
iam.create_iam_resources(env=self.env, app=self.app)
def create_gcp_iam(self):
"""Create GCP IAM resources."""
utils.banner("Creating GCP IAM")
env = self.get_current_gcp_env()
gcp_iam_client = GcpIamResourceClient(env=env, app_name=self.app, group_name=self.group,
repo_name=self.repo, configs=self.configs)
gcp_iam_client.create_iam_resources()
def create_archaius(self):
"""Create S3 bucket for Archaius."""
utils.banner("Creating S3")
s3.init_properties(env=self.env, app=self.app)
def create_s3app(self):
"""Create S3 infra for s3 applications"""
utils.banner("Creating S3 App Infrastructure")
primary_region = self.configs['pipeline']['primary_region']
s3obj = s3.S3Apps(app=self.app,
env=self.env,
region=self.region,
prop_path=self.json_path,
primary_region=primary_region)
s3obj.create_bucket()
def deploy_s3app(self):
"""Deploys artifacts contents to S3 bucket"""
utils.banner("Deploying S3 App")
primary_region = self.configs['pipeline']['primary_region']
s3obj = s3.S3Deployment(
app=self.app,
env=self.env,
region=self.region,
prop_path=self.json_path,
artifact_path=self.artifact_path,
artifact_version=self.artifact_version,
artifact_branch=self.artifact_branch,
primary_region=primary_region)
s3obj.upload_artifacts()
def promote_s3app(self):
"""promotes S3 deployment to LATEST"""
utils.banner("Promoting S3 App")
primary_region = self.configs['pipeline']['primary_region']
s3obj = s3.S3Deployment(
app=self.app,
env=self.env,
region=self.region,
prop_path=self.json_path,
artifact_path=self.artifact_path,
artifact_version=self.artifact_version,
artifact_branch=self.artifact_branch,
primary_region=primary_region)
s3obj.promote_artifacts(promote_stage=self.promote_stage)
def create_secgroups(self):
"""Create security groups as defined in the configs."""
utils.banner("Creating Security Group")
sgobj = securitygroup.SpinnakerSecurityGroup(
app=self.app, env=self.env, region=self.region, prop_path=self.json_path)
sgobj.create_security_group()
def create_awslambda(self):
"""Create security groups as defined in the configs."""
utils.banner("Creating Lambda Function")
awslambdaobj = awslambda.LambdaFunction(
app=self.app, env=self.env, region=self.region, prop_path=self.json_path)
awslambdaobj.create_lambda_function()
utils.banner("Creating Lambda Event")
lambdaeventobj = awslambda.LambdaEvent(app=self.app, env=self.env, region=self.region, prop_path=self.json_path)
lambdaeventobj.create_lambda_events()
def create_elb(self):
"""Create the ELB for the defined environment."""
utils.banner("Creating ELB")
elbobj = elb.SpinnakerELB(app=self.app, env=self.env, region=self.region, prop_path=self.json_path)
elbobj.create_elb()
def create_dns(self):
"""Create DNS for the defined app and environment."""
utils.banner("Creating DNS")
elb_subnet = self.configs[self.env]['elb']['subnet_purpose']
regions = self.configs[self.env]['regions']
failover = self.configs[self.env]['dns']['failover_dns']
primary_region = self.configs['pipeline']['primary_region']
regionspecific_dns = self.configs[self.env]['dns']['region_specific']
dnsobj = dns.SpinnakerDns(
app=self.app, env=self.env, region=self.region, prop_path=self.json_path, elb_subnet=elb_subnet)
if len(regions) > 1 and failover:
dnsobj.create_elb_dns(regionspecific=True)
dnsobj.create_failover_dns(primary_region=primary_region)
else:
if regionspecific_dns:
dnsobj.create_elb_dns(regionspecific=True)
if self.region == primary_region:
dnsobj.create_elb_dns(regionspecific=False)
def create_autoscaling_policy(self):
"""Create Scaling Policy for app in environment"""
utils.banner("Creating Scaling Policy")
policyobj = autoscaling_policy.AutoScalingPolicy(
app=self.app, env=self.env, region=self.region, prop_path=self.json_path)
policyobj.create_policy()
def create_scheduled_actions(self):
"""Create Scheduled Actions for app in environment"""
utils.banner("Creating Scheduled Actions")
actionsobj = scheduled_actions.ScheduledActions(
app=self.app, env=self.env, region=self.region, prop_path=self.json_path)
actionsobj.create_scheduled_actions()
def create_datapipeline(self):
"""Creates data pipeline and adds definition"""
utils.banner("Creating Data Pipeline")
dpobj = datapipeline.AWSDataPipeline(app=self.app, env=self.env, region=self.region, prop_path=self.json_path)
dpobj.create_datapipeline()
dpobj.set_pipeline_definition()
if self.configs[self.env].get('datapipeline').get('activate_on_deploy'):
dpobj.activate_pipeline()
def create_stepfunction(self):
"""Creates AWS Step Function"""
utils.banner("Creating AWS Step Function")
sfnobj = stepfunction.AWSStepFunction(app=self.app, env=self.env, region=self.region, prop_path=self.json_path)
sfnobj.create_stepfunction()
def deploy_cloudfunction(self):
"""Creates a Cloud Function"""
utils.banner("Creating GCP Cloud Function")
env = self.get_current_gcp_env()
cloud_function_client = CloudFunctionsClient(self.app, env, self.configs)
cloud_function_client.prepare_client()
cloud_function_client.deploy_function(self.artifact_path, self.region)
LOG.info("Finished deploying cloud function")
def slack_notify(self):
"""Send out a slack notification."""
utils.banner("Sending slack notification")
if self.env.startswith("prod"):
notify = slacknotify.SlackNotification(app=self.app, env=self.env, prop_path=self.json_path)
notify.post_message()
else:
LOG.info("No slack message sent, not production environment")
def get_current_gcp_env(self):
"""Gets the current GCP Environment
Returns:
GcpEnvironment, the current GCP environment
Raises:
ForemastError, when the current env name is not a known GCP Environment
"""
all_gcp_envs = GcpEnvironment.get_environments_from_config()
if self.env not in all_gcp_envs:
raise ForemastError("GCP environment %s not found in configuration", self.env)
return all_gcp_envs[self.env]
def cleanup(self):
"""Clean up generated files."""
os.remove(self.raw_path)
def prepare_infrastructure():
"""Entry point for preparing the infrastructure in a specific env."""
runner = ForemastRunner()
runner.write_configs()
try:
pipeline_type = runner.configs['pipeline']['type']
if pipeline_type is None or pipeline_type.isspace():
raise ForemastError("pipeline.type is required")
except KeyError:
raise ForemastError("pipeline.type is required")
cloud_name = configs.get_cloud_for_pipeline_type(pipeline_type)
if cloud_name == "gcp":
LOG.info("Will create GCP Infrastructure for pipeline.type '%s'", pipeline_type)
prepare_infrastructure_gcp(runner)
elif cloud_name in "aws":
LOG.info("Will create AWS Infrastructure for pipeline.type '%s'", pipeline_type)
prepare_infrastructure_aws(runner, pipeline_type)
else:
error_message = ("pipeline.type of '{0}' is not supported. "
"If this is a manual pipeline it is required you specify the "
"pipeline type in AWS_MANUAL_TYPES or GCP_MANUAL_TYPES"
).format(pipeline_type)
raise ForemastError(error_message)
def prepare_infrastructure_gcp(runner: ForemastRunner):
"""Creates GCP infrastructure for a specific env."""
# Always create IAM, this ensure svc account and permissions is done
runner.create_gcp_iam()
def prepare_infrastructure_aws(runner, pipeline_type):
"""Creates AWS infrastructure for a specific env."""
archaius = runner.configs[runner.env]['app']['archaius_enabled']
eureka = runner.configs[runner.env]['app']['eureka_enabled']
runner.create_app()
if pipeline_type not in ['s3', 'datapipeline', 'stepfunction']:
runner.create_aws_iam()
# TODO: Refactor Archaius to be fully featured
if archaius:
runner.create_archaius()
runner.create_secgroups()
if eureka:
LOG.info("Eureka Enabled, skipping ELB and DNS setup")
elif pipeline_type == "lambda":
LOG.info("Lambda Enabled, skipping ELB and DNS setup")
runner.create_awslambda()
elif pipeline_type == "s3":
runner.create_s3app()
elif pipeline_type == 'datapipeline':
runner.create_datapipeline()
elif pipeline_type == 'stepfunction':
runner.create_aws_iam()
runner.create_stepfunction()
else:
LOG.info("No Eureka, running ELB and DNS setup")
runner.create_elb()
runner.create_dns()
runner.slack_notify()
runner.cleanup()
def describe_environments(args):
"""Prints a simple visual output of environments visible to Foremast"""
if args.parsed.cloud_provider == "gcp":
table = get_describe_gcp_environments()
elif args.parsed.cloud_provider == "aws":
table = get_describe_aws_environments()
else:
raise ForemastError("Cannot describe cloud '{}'".format(args.parsed.cloud_provider))
output = tabulate(table[1], table[0], tablefmt=args.parsed.print_table_format)
if args.parsed.print_to_file:
file = open(args.parsed.print_to_file, "w")
file.write(output)
file.close()
LOG.info("Saved printed table to %s", args.parsed.print_to_file)
print(output)
def get_describe_gcp_environments():
"""Prints a simple visual output of GCP Environments visible to Foremast
Returns:
tuple: first the table headers, second the table values
"""
table_header = ["Environment", "Project", "Permitted Groups"]
env_table = list()
all_envs = GcpEnvironment.get_environments_from_config()
for env_name in all_envs:
env = all_envs[env_name]
env_table.append([env.name, env.service_account_project, "N/A"])
all_projects = env.get_all_projects()
for project in all_projects:
if "foremast_groups" in project["labels"]:
groups = project["labels"]["foremast_groups"]
else:
groups = "N/A"
env_table.append([env.name, project["projectId"], groups])
return table_header, env_table
def get_describe_aws_environments():
"""Prints a simple visual output of AWS environments configured in Foremast
Returns:
tuple: first the table headers, second the table values
"""
table_header = ["Environment"]
env_table = list()
for env in configs.ENVS:
env_table.append([env])
return table_header, env_table
def prepare_app_pipeline():
"""Entry point for application setup and initial pipeline in Spinnaker."""
runner = ForemastRunner()
runner.write_configs()
runner.create_app()
runner.create_pipeline()
runner.cleanup()
def prepare_onetime_pipeline():
"""Entry point for single use pipeline setup in the defined app."""
runner = ForemastRunner()
runner.write_configs()
runner.create_pipeline(onetime=os.getenv('ENV'))
runner.cleanup()
def create_scaling_policy():
"""Create Auto Scaling Policy for an Auto Scaling Group."""
runner = ForemastRunner()
runner.write_configs()
runner.create_autoscaling_policy()
runner.cleanup()
def create_scheduled_actions():
"""Create Scheduled Actions for an Auto Scaling Group."""
runner = ForemastRunner()
runner.write_configs()
runner.create_scheduled_actions()
runner.cleanup()
def rebuild_pipelines(*args):
"""Entry point for rebuilding pipelines.
Use to rebuild all pipelines or a specific group.
"""
rebuild_all = False
rebuild_project = os.getenv("REBUILD_PROJECT")
if args:
LOG.debug('Incoming arguments: %s', args)
command_args, *_ = args
rebuild_all = command_args.parsed.all
rebuild_project = command_args.parsed.project
if rebuild_project == 'ALL':
rebuild_all = True
if rebuild_all:
LOG.info('Rebuilding all projects.')
elif rebuild_project is None:
msg = 'No REBUILD_PROJECT variable found'
LOG.fatal(msg)
raise SystemExit('Error: {0}'.format(msg))
else:
LOG.info('Rebuilding project: %s', rebuild_project)
all_apps = utils.get_all_apps()
for apps in all_apps:
if 'repoProjectKey' not in apps:
LOG.info('Skipping %s. No project key found', apps['name'])
continue
app_name = '{}/{}'.format(apps['repoProjectKey'], apps['repoSlug'])
if apps['repoProjectKey'].lower() == rebuild_project.lower() or rebuild_all:
os.environ["PROJECT"] = apps['repoProjectKey']
os.environ["GIT_REPO"] = apps['repoSlug']
LOG.info('Rebuilding pipelines for %s', app_name)
runner = ForemastRunner()
try:
runner.write_configs()
runner.create_pipeline()
runner.cleanup()
except Exception: # pylint: disable=broad-except
LOG.warning('Error updating pipeline for %s', app_name)
def deploy_cloudfunction():
"""Deploys a GCP Cloud Function"""
runner = ForemastRunner()
runner.write_configs()
runner.deploy_cloudfunction()
def deploy_s3app():
"""Entry point for application setup and s3 deployments"""
runner = ForemastRunner()
runner.write_configs()
runner.deploy_s3app()
def promote_s3app():
"""Entry point for application setup and s3 promotions"""
runner = ForemastRunner()
runner.write_configs()
runner.promote_s3app()
def debug_flag():
"""Set logging level for entry points."""
logging.basicConfig(format=consts.LOGGING_FORMAT)
parser = argparse.ArgumentParser(description=debug_flag.__doc__)
add_debug(parser)
args, _extra_args = parser.parse_known_args()
package, *_ = __package__.split('.')
logging.getLogger(package).setLevel(args.debug)
|
[] |
[] |
[
"EMAIL",
"PROMOTE_STAGE",
"ENV",
"PROJECT",
"ARTIFACT_PATH",
"GIT_REPO",
"REGION",
"ARTIFACT_BRANCH",
"RUNWAY_DIR",
"REBUILD_PROJECT",
"PROVIDER",
"ARTIFACT_VERSION"
] |
[]
|
["EMAIL", "PROMOTE_STAGE", "ENV", "PROJECT", "ARTIFACT_PATH", "GIT_REPO", "REGION", "ARTIFACT_BRANCH", "RUNWAY_DIR", "REBUILD_PROJECT", "PROVIDER", "ARTIFACT_VERSION"]
|
python
| 12 | 0 | |
selfservice/strategy/oidc/strategy_helper_test.go
|
package oidc_test
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"net/http/cookiejar"
"net/http/httptest"
"net/url"
"os"
"testing"
"time"
"github.com/julienschmidt/httprouter"
"github.com/phayes/freeport"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tidwall/gjson"
"github.com/ory/dockertest/v3"
"github.com/ory/dockertest/v3/docker"
"github.com/ory/kratos/driver"
"github.com/ory/kratos/driver/config"
"github.com/ory/kratos/identity"
"github.com/ory/kratos/selfservice/strategy/oidc"
"github.com/ory/kratos/x"
"github.com/ory/x/ioutilx"
"github.com/ory/x/logrusx"
"github.com/ory/x/resilience"
"github.com/ory/x/urlx"
)
func createClient(t *testing.T, remote string, redir, id string) {
require.NoError(t, resilience.Retry(logrusx.New("", ""), time.Second*10, time.Minute*2, func() error {
if req, err := http.NewRequest("DELETE", remote+"/clients/"+id, nil); err != nil {
return err
} else if _, err := http.DefaultClient.Do(req); err != nil {
return err
}
var b bytes.Buffer
require.NoError(t, json.NewEncoder(&b).Encode(&struct {
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
Scope string `json:"scope"`
GrantTypes []string `json:"grant_types"`
ResponseTypes []string `json:"response_types"`
RedirectURIs []string `json:"redirect_uris"`
}{
ClientID: id,
ClientSecret: "secret",
GrantTypes: []string{"authorization_code", "refresh_token"},
ResponseTypes: []string{"code"},
Scope: "offline offline_access openid",
RedirectURIs: []string{redir},
}))
res, err := http.Post(remote+"/clients", "application/json", &b)
if err != nil {
return err
}
defer res.Body.Close()
if http.StatusCreated != res.StatusCode {
return errors.Errorf("got status code: %d", http.StatusCreated)
}
return nil
}))
}
func newHydraIntegration(t *testing.T, remote *string, subject *string, scope *[]string, addr string) (*http.Server, string) {
router := httprouter.New()
type p struct {
Subject string `json:"subject,omitempty"`
GrantScope []string `json:"grant_scope,omitempty"`
}
var do = func(w http.ResponseWriter, r *http.Request, href string, payload io.Reader) {
req, err := http.NewRequest("PUT", href, payload)
require.NoError(t, err)
req.Header.Set("Content-Type", "application/json")
res, err := http.DefaultClient.Do(req)
require.NoError(t, err)
defer res.Body.Close()
body := ioutilx.MustReadAll(res.Body)
require.Equal(t, http.StatusOK, res.StatusCode, "%s", body)
var response struct {
RedirectTo string `json:"redirect_to"`
}
require.NoError(t, json.NewDecoder(bytes.NewBuffer(body)).Decode(&response))
require.NotNil(t, response.RedirectTo, "%s", body)
http.Redirect(w, r, response.RedirectTo, http.StatusFound)
}
router.GET("/login", func(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
require.NotEmpty(t, *remote)
require.NotEmpty(t, *subject)
challenge := r.URL.Query().Get("login_challenge")
require.NotEmpty(t, challenge)
var b bytes.Buffer
require.NoError(t, json.NewEncoder(&b).Encode(&p{Subject: *subject}))
href := urlx.MustJoin(*remote, "/oauth2/auth/requests/login/accept") + "?login_challenge=" + challenge
do(w, r, href, &b)
})
router.GET("/consent", func(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
require.NotEmpty(t, *remote)
require.NotNil(t, *scope)
challenge := r.URL.Query().Get("consent_challenge")
require.NotEmpty(t, challenge)
var b bytes.Buffer
require.NoError(t, json.NewEncoder(&b).Encode(&p{GrantScope: *scope}))
href := urlx.MustJoin(*remote, "/oauth2/auth/requests/consent/accept") + "?consent_challenge=" + challenge
do(w, r, href, &b)
})
if addr == "" {
server := httptest.NewServer(router)
t.Cleanup(server.Close)
return server.Config, server.URL
}
parsed, err := url.ParseRequestURI(addr)
require.NoError(t, err)
server := &http.Server{Addr: ":" + parsed.Port(), Handler: router}
go func(t *testing.T) {
if err := server.ListenAndServe(); err != http.ErrServerClosed {
require.NoError(t, err)
} else if err == nil {
require.NoError(t, server.Close())
}
}(t)
t.Cleanup(func() {
require.NoError(t, server.Close())
})
return server, addr
}
func newReturnTs(t *testing.T, reg driver.Registry) *httptest.Server {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
sess, err := reg.SessionManager().FetchFromRequest(r.Context(), r)
require.NoError(t, err)
require.Empty(t, sess.Identity.Credentials)
reg.Writer().Write(w, r, sess)
}))
reg.Config(context.Background()).MustSet(config.ViperKeySelfServiceBrowserDefaultReturnTo, ts.URL)
t.Cleanup(ts.Close)
return ts
}
func newUI(t *testing.T, reg driver.Registry) *httptest.Server {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var e interface{}
var err error
if r.URL.Path == "/login" {
e, err = reg.LoginFlowPersister().GetLoginFlow(r.Context(), x.ParseUUID(r.URL.Query().Get("flow")))
} else if r.URL.Path == "/registration" {
e, err = reg.RegistrationFlowPersister().GetRegistrationFlow(r.Context(), x.ParseUUID(r.URL.Query().Get("flow")))
} else if r.URL.Path == "/settings" {
e, err = reg.SettingsFlowPersister().GetSettingsFlow(r.Context(), x.ParseUUID(r.URL.Query().Get("flow")))
}
require.NoError(t, err)
reg.Writer().Write(w, r, e)
}))
t.Cleanup(ts.Close)
reg.Config(context.Background()).MustSet(config.ViperKeySelfServiceLoginUI, ts.URL+"/login")
reg.Config(context.Background()).MustSet(config.ViperKeySelfServiceRegistrationUI, ts.URL+"/registration")
reg.Config(context.Background()).MustSet(config.ViperKeySelfServiceSettingsURL, ts.URL+"/settings")
return ts
}
func newHydra(t *testing.T, subject *string, scope *[]string) (remoteAdmin, remotePublic, hydraIntegrationTSURL string) {
remoteAdmin = os.Getenv("TEST_SELFSERVICE_OIDC_HYDRA_ADMIN")
remotePublic = os.Getenv("TEST_SELFSERVICE_OIDC_HYDRA_PUBLIC")
hydraIntegrationTS, hydraIntegrationTSURL := newHydraIntegration(t, &remoteAdmin, subject, scope, os.Getenv("TEST_SELFSERVICE_OIDC_HYDRA_INTEGRATION_ADDR"))
t.Cleanup(func() {
require.NoError(t, hydraIntegrationTS.Close())
})
if remotePublic == "" && remoteAdmin == "" {
t.Logf("Environment did not provide ORY Hydra, starting fresh.")
publicPort, err := freeport.GetFreePort()
require.NoError(t, err)
pool, err := dockertest.NewPool("")
require.NoError(t, err)
hydra, err := pool.RunWithOptions(&dockertest.RunOptions{
Repository: "oryd/hydra",
Tag: "v1.4.10",
Env: []string{
"DSN=memory",
fmt.Sprintf("URLS_SELF_ISSUER=http://127.0.0.1:%d/", publicPort),
"URLS_LOGIN=" + hydraIntegrationTSURL + "/login",
"URLS_CONSENT=" + hydraIntegrationTSURL + "/consent",
},
Cmd: []string{"serve", "all", "--dangerous-force-http"},
ExposedPorts: []string{"4444/tcp", "4445/tcp"},
PortBindings: map[docker.Port][]docker.PortBinding{
"4444/tcp": {{HostPort: fmt.Sprintf("%d/tcp", publicPort)}},
},
})
require.NoError(t, err)
t.Cleanup(func() {
require.NoError(t, hydra.Close())
})
require.NoError(t, hydra.Expire(uint(60*5)))
require.NotEmpty(t, hydra.GetPort("4444/tcp"), "%+v", hydra.Container.NetworkSettings.Ports)
require.NotEmpty(t, hydra.GetPort("4445/tcp"), "%+v", hydra.Container)
remotePublic = "http://127.0.0.1:" + hydra.GetPort("4444/tcp")
remoteAdmin = "http://127.0.0.1:" + hydra.GetPort("4445/tcp")
}
t.Logf("ORY Hydra running at: %s %s", remotePublic, remoteAdmin)
return remoteAdmin, remotePublic, hydraIntegrationTSURL
}
func newOIDCProvider(
t *testing.T,
kratos *httptest.Server,
hydraPublic string,
hydraAdmin string,
id, clientID string,
) oidc.Configuration {
createClient(t, hydraAdmin, kratos.URL+oidc.RouteBase+"/callback/"+id, clientID)
return oidc.Configuration{
Provider: "generic",
ID: id,
ClientID: clientID,
ClientSecret: "secret",
IssuerURL: hydraPublic + "/",
Mapper: "file://./stub/oidc.hydra.jsonnet",
}
}
func viperSetProviderConfig(t *testing.T, conf *config.Config, providers ...oidc.Configuration) {
conf.MustSet(config.ViperKeySelfServiceStrategyConfig+"."+string(identity.CredentialsTypeOIDC),
map[string]interface{}{"enabled": true, "config": &oidc.ConfigurationCollection{Providers: providers}})
}
func newClient(t *testing.T, jar *cookiejar.Jar) *http.Client {
if jar == nil {
j, err := cookiejar.New(nil)
jar = j
require.NoError(t, err)
}
return &http.Client{
Jar: jar,
CheckRedirect: func(req *http.Request, via []*http.Request) error {
if debugRedirects {
t.Logf("Redirect: %s", req.URL.String())
}
if len(via) >= 20 {
for k, v := range via {
t.Logf("Failed with redirect (%d): %s", k, v.URL.String())
}
return errors.New("stopped after 20 redirects")
}
return nil
},
}
}
// AssertSystemError asserts an error ui response
func AssertSystemError(t *testing.T, errTS *httptest.Server, res *http.Response, body []byte, code int, reason string) {
require.Contains(t, res.Request.URL.String(), errTS.URL, "%s", body)
assert.Equal(t, int64(code), gjson.GetBytes(body, "0.code").Int(), "%s", body)
assert.Contains(t, gjson.GetBytes(body, "0.reason").String(), reason, "%s", body)
}
|
[
"\"TEST_SELFSERVICE_OIDC_HYDRA_ADMIN\"",
"\"TEST_SELFSERVICE_OIDC_HYDRA_PUBLIC\"",
"\"TEST_SELFSERVICE_OIDC_HYDRA_INTEGRATION_ADDR\""
] |
[] |
[
"TEST_SELFSERVICE_OIDC_HYDRA_PUBLIC",
"TEST_SELFSERVICE_OIDC_HYDRA_ADMIN",
"TEST_SELFSERVICE_OIDC_HYDRA_INTEGRATION_ADDR"
] |
[]
|
["TEST_SELFSERVICE_OIDC_HYDRA_PUBLIC", "TEST_SELFSERVICE_OIDC_HYDRA_ADMIN", "TEST_SELFSERVICE_OIDC_HYDRA_INTEGRATION_ADDR"]
|
go
| 3 | 0 | |
StRoot/StSpinPool/StChargedPionAnalysisMaker/scripts/minimc.py
|
#!/usr/bin/env python
# encoding: utf-8
"""
minimc.py
Created by Adam Kocoloski on 2007-05-21.
Copyright (c) 2007 __MyCompanyName__. All rights reserved.
"""
import os, sys, getopt, math, re, glob
from array import array
import ROOT
help_message = '''
This module manages histogramming minimc.root files and combining the results. Requires an
StJetSkimEvent(?) tree as input that contains the events of interest (i.e. trigger maker results).
'''
loadLibs = ''
site = 'local'
miniKeys = ['minbias', '96201', '96211', '96221', '96233', 'notrig']
trigKeys = miniKeys
asymKeys = ['lo','nlo','max','min','zero','m015','m030','m045','m060','m075','m090','m105','p030','p045','p060','p070']
asymKeys2 = ['p060','p045','p030','nlo','zero','m015','m030','m045','m060','m075','m090','m105','min']
Category = { 'MC':0, 'MATCHED':1, 'MERGED':2, 'SPLIT':3, 'CONTAM':4, 'GHOST':5, 'MATGLOB':6 }
processIds = { 'gg':68, 'qg':28, 'qq':11 }
xsec = {
'2_3' : 8.150,
'3_4' : 1.302,
'4_5' : 3.158E-01,
'5_7' : 1.372E-01,
'7_9' : 2.290E-02,
'9_11' : 5.495E-03,
'11_15' : 2.220E-03,
'15_25' : 3.907E-04,
'25_35' : 1.074E-05,
'above_35' : 5.300E-07,
'45_55' : 2.857E-08,
'55_65' : 1.451E-09
}
#xsec = {
#'3_4' : 1.287,
#'4_5' : 3.117*10**-1,
#'5_7' : 1.360*10**-1,
#'7_9' : 2.305*10**-2,
#'9_11' : 5.494*10**-3,
#'11_15' : 2.228*10**-3,
#'15_25' : 3.895*10**-4,
#'25_35' : 1.016*10**-5,
#'above_35' : 5.299*10**-7,
#'45_55' : 2.830*10**-8,
#'55_65' : 1.433*10**-9
#}
fileid = {
'2_3' : 1231,
'3_4' : 1232,
'4_5' : 1233,
'5_7' : 1224,
'7_9' : 1225,
'9_11' : 1226,
'11_15' : 1227,
'15_25' : 1228,
'25_35' : 1229,
'above_35' : 1230,
'45_55' : 1270,
'55_65' : 1271
}
daveName = {
'2_3' : 'pt2',
'3_4' : 'pt3',
'4_5' : 'pt4',
'5_7' : 'pt5',
'7_9' : 'pt7',
'9_11' : 'pt9',
'11_15' : 'pt11',
'15_25' : 'pt15',
'25_35' : 'pt25',
'above_35' : 'pt35above',
'45_55' : 'pt45',
'55_65' : 'pt55'
}
def initForXgrid():
try:
dirList = os.listdir('/Volumes/star1.lns.mit.edu')
if len(dirList) == 0:
os.system('/sbin/mount_nfs star1.lns.mit.edu:/XgridLibs /Volumes/star1.lns.mit.edu')
except OSError:
os.mkdir('/Volumes/star1.lns.mit.edu')
os.system('/sbin/mount_nfs star1.lns.mit.edu:/XgridLibs /Volumes/star1.lns.mit.edu')
if sys.byteorder == 'big': sys.path.append('/Volumes/star1.lns.mit.edu/sw_ppc/lib/root')
else: sys.path.append('/Volumes/star1.lns.mit.edu/sw/lib/root')
try:
dirList = os.listdir('/Volumes/deltag5.lns.mit.edu')
if len(dirList) == 0:
os.system('/sbin/mount_afp -o nobrowse "afp://;AUTH=No%20User%[email protected]/scratch" /Volumes/deltag5.lns.mit.edu')
except OSError:
os.mkdir('/Volumes/deltag5.lns.mit.edu')
os.system('/sbin/mount_afp -o nobrowse "afp://;AUTH=No%20User%[email protected]/scratch" /Volumes/deltag5.lns.mit.edu')
from socket import gethostname
print 'xgrid initialization complete'
print 'minimc.py executing on', gethostname()
class MiniMcHistos(dict):
ptBins = [40, 0, 20]
etaBins = [40, -2.0, 2.0]
phiBins = [20, -math.pi, math.pi]
vzBins = [300, -150., 150.]
nFitBins = [45, 0.5, 45.5]
pBins = [250, 0.2, 20.0]
dEdxBins = [100, 4., 40.]
dcaGBins = [100, 0., 3.1]
partonBins = [70, 0, 70]
eventPartonBins = [1400, 0, 70]
xbins = [100, 0, 1]
ggRescaleFactor = 1.15
def __init__(self,trigIdString,subProcess='',tfile=None):
self.trigIdString = trigIdString
#self.subProcess = subProcess
if subProcess != '':
self.processId = processIds[subProcess]
else:
self.processId = 0
self['pt'] = ROOT.TH1D('_%s_pt_%s' % (trigIdString,subProcess), '', self.ptBins[0], self.ptBins[1], self.ptBins[2])
self['ptRescaled'] = ROOT.TH1D('_%s_ptRescaled_%s' % (trigIdString,subProcess), '', self.ptBins[0], self.ptBins[1], self.ptBins[2])
self['eta'] = ROOT.TH1D('_%s_eta_%s' % (trigIdString,subProcess), '', self.etaBins[0], self.etaBins[1], self.etaBins[2])
self['phi'] = ROOT.TH1D('_%s_phi_%s' % (trigIdString,subProcess), '', self.phiBins[0], self.phiBins[1], self.phiBins[2])
self['vz'] = ROOT.TH1D('_%s_vz_%s' % (trigIdString,subProcess), '', self.vzBins[0], self.vzBins[1], self.vzBins[2])
self['nHitsFit'] = ROOT.TH1D('_%s_nHitsFit_%s' % (trigIdString,subProcess), '', self.nFitBins[0], self.nFitBins[1], self.nFitBins[2])
self['dEdx'] = ROOT.TH2D('_%s_dEdx_%s' % (trigIdString,subProcess), '', self.pBins[0], self.pBins[1], self.pBins[2], self.dEdxBins[0], self.dEdxBins[1], self.dEdxBins[2])
self['dcaG'] = ROOT.TH1D('_%s_dcaG_%s' % (trigIdString,subProcess), '', self.dcaGBins[0], self.dcaGBins[1], self.dcaGBins[2])
self['eventPartonicPt'] = ROOT.TH1D('_%s_eventPartonicPt_%s' % (trigIdString,subProcess), '', self.eventPartonBins[0], self.eventPartonBins[1], self.eventPartonBins[2])
self['x1'] = ROOT.TH1D('_%s_x1_%s' % (trigIdString,subProcess), '', self.xbins[0], self.xbins[1], self.xbins[2])
self['x2'] = ROOT.TH1D('_%s_x2_%s' % (trigIdString,subProcess), '', self.xbins[0], self.xbins[1], self.xbins[2])
self['partonicPt'] = ROOT.TH2D('_%s_partonicPt_%s' % (trigIdString,subProcess), '', self.ptBins[0], self.ptBins[1], self.ptBins[2], self.partonBins[0], self.partonBins[1], self.partonBins[2])
self['partonProf'] = ROOT.TProfile('_%s_partonProf_%s' % (trigIdString,subProcess), '', self.ptBins[0], self.ptBins[1], self.ptBins[2])
self['ptMc_ptPr'] = ROOT.TH2D('_%s_ptMc_ptPr_%s' % (trigIdString,subProcess), '', self.ptBins[0], self.ptBins[1], self.ptBins[2], self.ptBins[0], self.ptBins[1], self.ptBins[2])
self['lo'] = ROOT.TH1D('_%s_lo_%s' % (trigIdString,subProcess), '', self.ptBins[0], self.ptBins[1], self.ptBins[2])
self['nlo'] = ROOT.TH1D('_%s_nlo_%s' % (trigIdString,subProcess), '', self.ptBins[0], self.ptBins[1], self.ptBins[2])
self['max'] = ROOT.TH1D('_%s_max_%s' % (trigIdString,subProcess), '', self.ptBins[0], self.ptBins[1], self.ptBins[2])
self['min'] = ROOT.TH1D('_%s_min_%s' % (trigIdString,subProcess), '', self.ptBins[0], self.ptBins[1], self.ptBins[2])
self['zero'] = ROOT.TH1D('_%s_zero_%s' % (trigIdString,subProcess), '', self.ptBins[0], self.ptBins[1], self.ptBins[2])
self['m015'] = ROOT.TH1D('_%s_m015_%s' % (trigIdString,subProcess), '', self.ptBins[0], self.ptBins[1], self.ptBins[2])
self['m030'] = ROOT.TH1D('_%s_m030_%s' % (trigIdString,subProcess), '', self.ptBins[0], self.ptBins[1], self.ptBins[2])
self['m045'] = ROOT.TH1D('_%s_m045_%s' % (trigIdString,subProcess), '', self.ptBins[0], self.ptBins[1], self.ptBins[2])
self['m060'] = ROOT.TH1D('_%s_m060_%s' % (trigIdString,subProcess), '', self.ptBins[0], self.ptBins[1], self.ptBins[2])
self['m075'] = ROOT.TH1D('_%s_m075_%s' % (trigIdString,subProcess), '', self.ptBins[0], self.ptBins[1], self.ptBins[2])
self['m090'] = ROOT.TH1D('_%s_m090_%s' % (trigIdString,subProcess), '', self.ptBins[0], self.ptBins[1], self.ptBins[2])
self['m105'] = ROOT.TH1D('_%s_m105_%s' % (trigIdString,subProcess), '', self.ptBins[0], self.ptBins[1], self.ptBins[2])
self['p030'] = ROOT.TH1D('_%s_p030_%s' % (trigIdString,subProcess), '', self.ptBins[0], self.ptBins[1], self.ptBins[2])
self['p045'] = ROOT.TH1D('_%s_p045_%s' % (trigIdString,subProcess), '', self.ptBins[0], self.ptBins[1], self.ptBins[2])
self['p060'] = ROOT.TH1D('_%s_p060_%s' % (trigIdString,subProcess), '', self.ptBins[0], self.ptBins[1], self.ptBins[2])
self['p070'] = ROOT.TH1D('_%s_p070_%s' % (trigIdString,subProcess), '', self.ptBins[0], self.ptBins[1], self.ptBins[2])
self['denom'] = ROOT.TH1D('_%s_denom_%s' % (trigIdString,subProcess), '', self.ptBins[0], self.ptBins[1], self.ptBins[2])
self.clear()
if tfile is not None:
for key in self.keys():
self[key].Delete()
self[key] = tfile.Get('_%s_%s_%s' % (self.trigIdString,key,subProcess))
def clear(self):
self.miniEvent = 0
self.pythiaEvent= 0
self.subProcess = 0
self.partonicPt = 0
self.lo = 0
self.nlo = 0
self.max = 0
self.min = 0
self.zero = 0
self.m015 = 0
self.m030 = 0
self.m045 = 0
self.m060 = 0
self.m075 = 0
self.m090 = 0
self.m105 = 0
self.p030 = 0
self.p045 = 0
self.p060 = 0
self.p070 = 0
def fillEvent(self, miniEvent, pythiaEvent):
import ROOT
self.miniEvent = miniEvent
self.pythiaEvent = pythiaEvent
if self.processId != 0 and self.processId != self.pythiaEvent.processId(): return
self.subProcess = pythiaEvent.processId()
self.partonicPt = pythiaEvent.pt()
self.lo = pythiaEvent.ALL(ROOT.StPythiaEvent.LO)
self.nlo = pythiaEvent.ALL(ROOT.StPythiaEvent.NLO)
self.max = pythiaEvent.ALL(ROOT.StPythiaEvent.MAX)
self.min = pythiaEvent.ALL(ROOT.StPythiaEvent.MIN)
self.zero = pythiaEvent.ALL(ROOT.StPythiaEvent.ZERO)
self.m015 = pythiaEvent.ALL(ROOT.StPythiaEvent.M015)
self.m030 = pythiaEvent.ALL(ROOT.StPythiaEvent.M030)
self.m045 = pythiaEvent.ALL(ROOT.StPythiaEvent.M045)
self.m060 = pythiaEvent.ALL(ROOT.StPythiaEvent.M060)
self.m075 = pythiaEvent.ALL(ROOT.StPythiaEvent.M075)
self.m090 = pythiaEvent.ALL(ROOT.StPythiaEvent.M090)
self.m105 = pythiaEvent.ALL(ROOT.StPythiaEvent.M105)
self.p030 = pythiaEvent.ALL(ROOT.StPythiaEvent.P030)
self.p045 = pythiaEvent.ALL(ROOT.StPythiaEvent.P045)
self.p060 = pythiaEvent.ALL(ROOT.StPythiaEvent.P060)
self.p070 = pythiaEvent.ALL(ROOT.StPythiaEvent.P070)
self['vz'].Fill(miniEvent.vertexZ())
self['eventPartonicPt'].Fill( self.partonicPt )
self['x1'].Fill( pythiaEvent.x1() )
self['x2'].Fill( pythiaEvent.x2() )
def fillTrack(self,track):
if self.processId != 0 and self.processId != self.pythiaEvent.processId(): return
if self.pythiaEvent.processId() == 68:
mypt = track.ptPr() ** self.ggRescaleFactor
else:
mypt = track.ptPr()
ptCut = mypt > 2.0
pidCut = ( track.geantId() == 8 ) or ( track.geantId() == 9 )
etaCut = math.fabs( track.etaPr() ) < 1.0
dcaCut = math.fabs( track.dcaGl() ) < 1.0
hitCut = ( track.fitPts() ) > 25
vzCut = ( math.fabs( self.miniEvent.vertexZ() ) < 60.0 ) and ( math.fabs( self.miniEvent.mcVertexZ() ) < 60 )
if ptCut and pidCut and etaCut and dcaCut and hitCut and vzCut:
self['phi'].Fill(track.phiPr())
if pidCut and etaCut and dcaCut and hitCut and vzCut:
self['pt'].Fill(track.ptPr())
self['ptRescaled'].Fill(mypt)
if ptCut and pidCut and dcaCut and hitCut and vzCut:
self['eta'].Fill(track.etaPr())
if ptCut and pidCut and etaCut and dcaCut and vzCut:
self['nHitsFit'].Fill(track.fitPts())
if ptCut and etaCut and dcaCut and hitCut and vzCut:
self['dEdx'].Fill(track.pPr(), track.dedx() * 1e7)
if ptCut and pidCut and etaCut and hitCut and vzCut:
self['dcaG'].Fill(track.dcaGl())
if ptCut and pidCut and etaCut and dcaCut and hitCut and vzCut:
self['partonicPt'].Fill(mypt, self.partonicPt)
self['partonProf'].Fill(mypt, self.partonicPt)
self['ptMc_ptPr'].Fill(track.ptMc(),mypt)
self['lo'].Fill(mypt,self.lo)
self['nlo'].Fill(mypt,self.nlo)
self['max'].Fill(mypt,self.max)
self['min'].Fill(mypt,self.min)
self['zero'].Fill(mypt,self.zero)
self['m015'].Fill(mypt,self.m015)
self['m030'].Fill(mypt,self.m030)
self['m045'].Fill(mypt,self.m045)
self['m060'].Fill(mypt,self.m060)
self['m075'].Fill(mypt,self.m075)
self['m090'].Fill(mypt,self.m090)
self['m105'].Fill(mypt,self.m105)
self['p030'].Fill(mypt,self.p030)
self['p045'].Fill(mypt,self.p045)
self['p060'].Fill(mypt,self.p060)
self['p070'].Fill(mypt,self.p070)
self['denom'].Fill(mypt)
def add(self, other, weight, nevents, minCounts):
originalMinCounts = minCounts
for key in self.keys():
#if key in ('ptMc_ptPr',): minCounts = 0
#else: minCounts = originalMinCounts
#asymCheck = key.split('_')[0]
if key in asymKeys or key == 'denom': continue
if key not in other.keys(): continue
nbins = self[key].GetBin(self[key].GetNbinsX(),self[key].GetNbinsY(),self[key].GetNbinsZ())
for i in range(nbins):
content = self[key].GetBinContent(i+1)
error = self[key].GetBinError(i+1)**2
if other[key] is None: continue
nparticles = other[key].GetBinContent(i+1)
if nparticles >= minCounts:
wp = weight / nevents
content += nparticles * wp
error += wp * wp * nparticles * (1+nparticles/nevents)
self[key].SetBinContent(i+1,content)
self[key].SetBinError(i+1, math.sqrt(error))
def draw(self):
c1 = ROOT.TCanvas('c1','The Basics', 0, 0, 1000, 800)
c1.Divide(4,3)
basicKeys = ['pt','eta','phi','vz','nHitsFit','dEdx','dcaG','partonicPt','partonProf','ptMc_ptPr']
for index, key in enumerate(basicKeys):
c1.cd(index+1)
self[key].Draw()
c1.cd(1).SetLogy()
c1.cd(2).SetLogy()
c1.cd(5).SetLogy()
c1.cd(6).SetLogy()
c1.cd(6).SetLogx()
c1.cd(7).SetLogy()
c2 = ROOT.TCanvas('c2', 'Asymmetries', 0, 0, 1000, 800)
c2.Divide(4,4)
for index, key in enumerate(asymKeys):
c2.cd(index+1)
self[key].Draw()
c3 = ROOT.TCanvas('c3', 'g-g Asymmetries', 0, 0, 1000, 800)
c3.Divide(4,4)
for index, key in enumerate(asymKeys):
key = '%s_gg' % (key,)
c3.cd(index+1)
self[key].Draw()
c4 = ROOT.TCanvas('c4', 'q-g Asymmetries', 0, 0, 1000, 800)
c4.Divide(4,4)
for index, key in enumerate(asymKeys):
key = '%s_qg' % (key,)
c4.cd(index+1)
self[key].Draw()
c5 = ROOT.TCanvas('c5', 'q-q Asymmetries', 0, 0, 1000, 800)
c5.Divide(4,4)
for index, key in enumerate(asymKeys):
key = '%s_qg' % (key,)
c5.cd(index+1)
self[key].Draw()
raw_input('what do you think?')
def accept_mc_track(track):
geantId = track.geantId()
if (geantId != 8) and (geantId != 9): return False
if math.fabs(track.etaMc()): return False
if track.nHitMc() < 25: return False
return True
def accept_reco_track(track):
geantId = track.geantId()
if (geantId != 8) and (geantId != 9): return False
if math.fabs(track.etaPr()) > 1.: return False
if math.fabs(track.dcaGl()) > 1.: return False
if track.fitPts() < 25: return False
return True
def accept_event(ev):
if math.fabs(ev.vertexZ()) > 60: return False
if math.fabs(ev.mcVertexZ()) > 60: return False
return True
def makeTriggerNtuple2(inputFileList):
"""ntuple contains event-level info from Renee's old files"""
import ROOT
ROOT.gSystem.Load('StMiniMcEvent')
fileid = {'pt3':1232, 'pt4':1233, 'pt5':1224, 'pt7':1225, 'pt9':1226, 'pt11':1227, 'pt15':1228, 'pt25':1229, 'pt35':1230, 'pt45':1270, 'pt55':1271}
outFile = ROOT.TFile('trigs.nt.root','recreate')
nt = ROOT.TNtuple('nt','trigger simu results','fileid1:fileid2:event:subProcess:isMB:isHT1:isHT2:isJP1:isJP2:lo:nlo:max:min:zero:partonicPt')
for path in inputFileList:
print 'now skimming', path
tfile = ROOT.TFile(path,'read')
tree = tfile.Get('Event')
#parse the path for fileid1 and fileid2
fileid1 = fileid[re.search('pt\d{1,2}',path).group()]
fileid2 = int(re.search('_\d{2,3}',path).group().strip('_'))
for i in xrange(tree.GetEntries()):
tree.GetEntry(i)
nt.Fill(fileid1,fileid2,tree.evtID,tree.pID,tree.bbc,tree.HT1_2005,tree.HT2_2005,tree.JP1_2005,tree.JP2_2005,
tree.weight_LO,tree.weight_NLO,tree.weight_NLO_gmax,tree.weight_NLO_gmin,tree.weight_NLO_g0,tree.hard_p)
tfile.Close()
outFile.cd()
nt.Write()
outFile.Close()
def makeTriggerNtuple(inputFileList):
"""ntuple contains event-level info from my old TChargePion trees."""
outFile = ROOT.TFile('simutrigs.nt.root','recreate')
nt = ROOT.TNtuple('nt','trigger simu results','fileid1:fileid2:run:event:subProcess:isMB:isHT1:isHT2:isJP1:isJP2')
ev = ROOT.TChargedPionEvent()
for path in inputFileList:
print 'now skimming', path
tfile = ROOT.TFile(path,'read')
pionTree = tfile.Get('pionTree')
pionTree.SetBranchAddress('Event Branch',ev)
for i in xrange(pionTree.GetEntries()):
pionTree.GetEntry(i)
nt.Fill(ev.fileid1,ev.fileid2,ev.event,ev.run,ev.subProcessID,ev.bbcTrigger,ev.ht1TrigMaker[0],ev.ht2TrigMaker[0],ev.jp1TrigMaker[0],ev.jp2TrigMaker[0])
tfile.Close()
outFile.cd()
nt.Write()
outFile.Close()
def fill(sample, fileLimit = None, charge = 0):
"""fill histograms using StMiniMcEvent and custom trigger Ntuple as inputs"""
if site == 'Xgrid':
inputFileList = glob.glob('/Volumes/deltag5.lns.mit.edu/common/pythia/P05ih/minimc/%s/*' % (sample,))
trigFile = ROOT.TFile('/Volumes/deltag5.lns.mit.edu/kocolosk/pythia/trigs.nt.root','read')
else:
inputFileList = glob.glob('/Volumes/scratch/common/pythia/P05ih/minimc/%s/*' % (sample,))
trigFile = ROOT.TFile('/Volumes/scratch/kocolosk/pythia/trigs.nt.root','read')
if fileLimit is not None: inputFileList = inputFileList[:fileLimit]
eventCounter = 0
#ntuple storing supplemental event info
nt = trigFile.Get('nt')
print 'building trigger index'
nt.BuildIndex('fileid1*1000 + fileid2','event')
miniHists = [ MiniMcHistos(key) for key in miniKeys ]
ev = ROOT.StMiniMcEvent()
eventCounter = ROOT.TH1I('eventCounter','event counter',1,-0.5,0.5)
for path in inputFileList:
fileid1 = int(re.search('12\d{2}',path).group())
fileid2 = int(re.search('_\d{2,3}_',path).group().strip('_'))
print 'analyzing', fileid1, fileid2, path
tfile = ROOT.TFile(path,'read')
ttree = tfile.Get('StMiniMcTree')
ttree.SetBranchAddress('StMiniMcEvent',ev)
for i in xrange(ttree.GetEntries()):
ttree.GetEntry(i)
ret = nt.GetEntryWithIndex(fileid1*1000 + fileid2, ev.eventId())
[m.clear() for m in miniHists]
if ret > 0:
for m in miniHists:
m.subProcess = nt.subProcess
m.partonicPt = nt.partonicPt
m.lo = nt.lo
m.nlo = nt.nlo
m.max = nt.max
m.min = nt.min
m.zero = nt.zero
eventCounter.Fill(0.)
if accept_event(ev) is False: continue
miniHists[5].fillEvent(ev)
if int(nt.isMB) > 0:
miniHists[0].fillEvent(ev)
if int(nt.isHT1) > 0: miniHists[1].fillEvent(ev)
if int(nt.isHT2) > 0: miniHists[2].fillEvent(ev)
if int(nt.isJP1) > 0: miniHists[3].fillEvent(ev)
if int(nt.isJP2) > 0: miniHists[4].fillEvent(ev)
#find all reco pions
tracks = ev.tracks(Category['MATCHED'])
for track in tracks:
if accept_reco_track(track):
if charge != 0 and track.charge() != charge: continue
miniHists[5].fillTrack(track)
if int(nt.isMB) > 0:
miniHists[0].fillTrack(track)
if int(nt.isHT1) > 0: miniHists[1].fillTrack(track)
if int(nt.isHT2) > 0: miniHists[2].fillTrack(track)
if int(nt.isJP1) > 0: miniHists[3].fillTrack(track)
if int(nt.isJP2) > 0: miniHists[4].fillTrack(track)
else:
print 'no matching ntuple entry for', fileid1, fileid2, ev.eventId()
if charge == 0: chargeName = 'sum'
elif charge == 1: chargeName = 'plus'
elif charge == -1: chargeName = 'minus'
outFile = ROOT.TFile('%s.%s.hist.root' % (sample,chargeName),'recreate')
eventCounter.Write()
for m in miniHists: [h.Write() for h in m.values()]
def fill2(sample, fileLimit = None, useXrootd = False, listFile=None):
"""fill histograms using StJetSkimTree/StPythiaEvent and minimc files as inputs"""
pathToMinimc = '/Volumes/data01/reco/pp200/pythia6_205/%sgev/cdf_a/y2004y/gheisha_on/p05ih' % (sample,)
pathToJetSkim = '/Volumes/data01/sim/staszak/2005jets/2005jets_15grids'
ROOT.gSystem.Load('StJetMaker')
ROOT.gSystem.Load('StMiniMcEvent')
fileCounter = 0
skimEvent = ROOT.StJetSkimEvent()
miniEvent = ROOT.StMiniMcEvent()
pythiaEvent = ROOT.StPythiaEvent()
outFilePlus = ROOT.TFile('%s.plus.hist.root' % (sample,),'recreate')
miniHistsPlus = [ MiniMcHistos(key,'') for key in miniKeys ]
miniHistsPlus_gg = [ MiniMcHistos(key,'gg') for key in miniKeys ]
miniHistsPlus_qg = [ MiniMcHistos(key,'qg') for key in miniKeys ]
miniHistsPlus_qq = [ MiniMcHistos(key,'qq') for key in miniKeys ]
outFileMinus = ROOT.TFile('%s.minus.hist.root' % (sample,),'recreate')
miniHistsMinus = [ MiniMcHistos(key,'') for key in miniKeys ]
miniHistsMinus_gg = [ MiniMcHistos(key,'gg') for key in miniKeys ]
miniHistsMinus_qg = [ MiniMcHistos(key,'qg') for key in miniKeys ]
miniHistsMinus_qq = [ MiniMcHistos(key,'qq') for key in miniKeys ]
outFileSum = ROOT.TFile('%s.sum.hist.root' % (sample,),'recreate')
miniHistsSum = [ MiniMcHistos(key,'') for key in miniKeys ]
miniHistsSum_gg = [ MiniMcHistos(key,'gg') for key in miniKeys ]
miniHistsSum_qg = [ MiniMcHistos(key,'qg') for key in miniKeys ]
miniHistsSum_qq = [ MiniMcHistos(key,'qq') for key in miniKeys ]
miniHistLists = [
miniHistsPlus, miniHistsPlus_gg, miniHistsPlus_qg, miniHistsPlus_qq,
miniHistsMinus, miniHistsMinus_gg, miniHistsMinus_qg, miniHistsMinus_qq,
miniHistsSum, miniHistsSum_gg, miniHistsSum_qg, miniHistsSum_qq,
]
eventCounter = ROOT.TH1I('eventCounter','event counter',1,-0.5,0.5)
if listFile is not None:
f = open(listFile)
minimcPaths = [ line.strip() for line in f]
else:
minimcPaths = os.listdir(pathToMinimc)
for fileName in minimcPaths:
if not fileName.endswith('.root'): continue
fileCounter += 1
if fileLimit is not None and fileCounter > fileLimit: break
print '%03d : %s' % (fileCounter, fileName)
fileIndex = fileName.split('_')[-2]
skimPath = os.path.join( pathToJetSkim, 'skim_%s_%s.root' % (daveName[sample], fileIndex) )
if not os.path.isfile(skimPath) and not useXrootd:
print 'missing file = ', skimPath
continue
if useXrootd: skimPath = 'root://deltag5.lns.mit.edu/' + skimPath
print skimPath
skimFile = ROOT.TFile.Open( skimPath )
skimTree = skimFile.Get('jetSkimTree')
skimTree.SetBranchAddress('skimEventBranch', skimEvent)
#if useXrootd:
# miniFile = ROOT.TFile.Open( 'root://deltag5.lns.mit.edu/' + os.path.join( pathToMinimc, fileName ) )
#else:
if useXrootd:
miniFile = ROOT.TFile.Open( fileName )
else:
miniFile = ROOT.TFile.Open( os.path.join( pathToMinimc, fileName ) )
miniTree = miniFile.Get('StMiniMcTree')
miniTree.SetBranchAddress('StMiniMcEvent', miniEvent)
print 'skim entries = ', skimTree.GetEntries()
print 'mini entries = ', miniTree.GetEntries()
[ eventCounter.Fill(0.) for i in range(skimTree.GetEntries()) ]
# set pythia event now so we don't delete it every event
skimTree.GetEntry(1)
pythiaEvent = skimEvent.mcEvent()
pythiaEvent.ResetBit( pythiaEvent.kMustCleanup ) # if we don't it will be deleted on GetEntry
#assert(miniTree.GetEntries() == skimTree.GetEntries())
skimTree.BuildIndex('mRunId', 'mEventId')
for i in range(miniTree.GetEntries()):
miniTree.GetEntry(i)
skimTree.GetEntryWithIndex(miniEvent.runId(), miniEvent.eventId())
assert(miniEvent.eventId() == skimEvent.eventId())
for li in miniHistLists:
[m.clear() for m in li]
li[5].fillEvent(miniEvent, pythiaEvent)
triggers = [ skimEvent.trigger(96201),
skimEvent.trigger(96211),
skimEvent.trigger(96221),
skimEvent.trigger(96233) ]
trigDidFire = [ skimEvent.eBbc() > 0 and skimEvent.wBbc() > 0 ]
trigDidFire.extend( [ trig is not None and trig.shouldFire() > 0 for trig in triggers ] )
for li in miniHistLists:
if trigDidFire[0]:
li[0].fillEvent(miniEvent, pythiaEvent)
for i in range(4):
if trigDidFire[i+1]:
li[i+1].fillEvent(miniEvent, pythiaEvent)
tracks = miniEvent.tracks(Category['MATCHED'])
for track in tracks:
if track.charge() == 1:
miniHistsPlus[5].fillTrack(track)
miniHistsPlus_gg[5].fillTrack(track)
miniHistsPlus_qg[5].fillTrack(track)
miniHistsPlus_qq[5].fillTrack(track)
if track.charge() == -1:
miniHistsMinus[5].fillTrack(track)
miniHistsMinus_gg[5].fillTrack(track)
miniHistsMinus_qg[5].fillTrack(track)
miniHistsMinus_qq[5].fillTrack(track)
miniHistsSum[5].fillTrack(track)
miniHistsSum_gg[5].fillTrack(track)
miniHistsSum_qg[5].fillTrack(track)
miniHistsSum_qq[5].fillTrack(track)
if trigDidFire[0]:
if track.charge() == 1:
miniHistsPlus[0].fillTrack(track)
miniHistsPlus_gg[0].fillTrack(track)
miniHistsPlus_qg[0].fillTrack(track)
miniHistsPlus_qq[0].fillTrack(track)
if track.charge() == -1:
miniHistsMinus[0].fillTrack(track)
miniHistsMinus_gg[0].fillTrack(track)
miniHistsMinus_qg[0].fillTrack(track)
miniHistsMinus_qq[0].fillTrack(track)
miniHistsSum[0].fillTrack(track)
miniHistsSum_gg[0].fillTrack(track)
miniHistsSum_qg[0].fillTrack(track)
miniHistsSum_qq[0].fillTrack(track)
for i in range(4):
if trigDidFire[i+1]:
if track.charge() == 1:
miniHistsPlus[i+1].fillTrack(track)
miniHistsPlus_gg[i+1].fillTrack(track)
miniHistsPlus_qg[i+1].fillTrack(track)
miniHistsPlus_qq[i+1].fillTrack(track)
if track.charge() == -1:
miniHistsMinus[i+1].fillTrack(track)
miniHistsMinus_gg[i+1].fillTrack(track)
miniHistsMinus_qg[i+1].fillTrack(track)
miniHistsMinus_qq[i+1].fillTrack(track)
miniHistsSum[i+1].fillTrack(track)
miniHistsSum_gg[i+1].fillTrack(track)
miniHistsSum_qg[i+1].fillTrack(track)
miniHistsSum_qq[i+1].fillTrack(track)
skimFile.Close()
miniFile.Close()
#miniHists[0].draw()
#miniHists[-1].draw()
outFilePlus.cd()
eventCounter.Write()
for m in miniHistsPlus: [h.Write() for h in m.values()]
for m in miniHistsPlus_gg: [h.Write() for h in m.values()]
for m in miniHistsPlus_qg: [h.Write() for h in m.values()]
for m in miniHistsPlus_qq: [h.Write() for h in m.values()]
outFilePlus.Close()
outFileMinus.cd()
eventCounter.Write()
for m in miniHistsMinus: [h.Write() for h in m.values()]
for m in miniHistsMinus_gg: [h.Write() for h in m.values()]
for m in miniHistsMinus_qg: [h.Write() for h in m.values()]
for m in miniHistsMinus_qq: [h.Write() for h in m.values()]
outFileMinus.Close()
outFileSum.cd()
eventCounter.Write()
for m in miniHistsSum: [h.Write() for h in m.values()]
for m in miniHistsSum_gg: [h.Write() for h in m.values()]
for m in miniHistsSum_qg: [h.Write() for h in m.values()]
for m in miniHistsSum_qq: [h.Write() for h in m.values()]
outFileSum.Close()
def fill3(minimcList):
"""fill histograms using StJetSkimTree/StPythiaEvent and minimc files as inputs"""
#pathToMinimc = '/Volumes/data01/reco/pp200/pythia6_205/%sgev/cdf_a/y2004y/gheisha_on/p05ih' % (sample,)
#pathToJetSkim = '/Volumes/data01/sim/staszak/2005jets/2005jets_15grids'
pathToJetSkim = '/star/data04/sim/staszak/2005jets/2005jets_15grids'
libs_to_load = [ 'libPhysics', 'libTable', 'StarRoot', 'StarClassLibrary', 'St_base',
'StChain', 'St_Tables', 'StUtilities', 'StTreeMaker', 'StIOMaker', 'StTriggerDataMaker',
'StBichsel', 'StEvent', 'StEventUtilities', 'StDbLib', 'StEmcUtil', 'StTofUtil', 'StPmdUtil',
'StStrangeMuDstMaker', 'StMuDSTMaker', 'StDaqLib', 'StDetectorDbMaker', 'StEmcTriggerMaker',
'StMCAsymMaker', 'StSpinDbMaker', 'StJetFinder', 'StJetMaker', 'StChargedPionAnalysisMaker']
print 'minimc : loading shared libraries ...'
libs_already_loaded = ROOT.gSystem.GetLibraries()
for library in libs_to_load:
if library not in libs_already_loaded:
ROOT.gSystem.Load(library)
#print 'analysis : loaded', library
print 'minimc : loading complete'
#ROOT.gSystem.Load('StJetMaker')
#ROOT.gSystem.Load('StMiniMcEvent')
fileCounter = 0
skimEvent = ROOT.StJetSkimEvent()
miniEvent = ROOT.StMiniMcEvent()
pythiaEvent = ROOT.StPythiaEvent()
outFilePlus = ROOT.TFile('%s.plus.hist.root' % (sample,),'recreate')
miniHistsPlus = [ MiniMcHistos(key,'') for key in miniKeys ]
miniHistsPlus_gg = [ MiniMcHistos(key,'gg') for key in miniKeys ]
miniHistsPlus_qg = [ MiniMcHistos(key,'qg') for key in miniKeys ]
miniHistsPlus_qq = [ MiniMcHistos(key,'qq') for key in miniKeys ]
outFileMinus = ROOT.TFile('%s.minus.hist.root' % (sample,),'recreate')
miniHistsMinus = [ MiniMcHistos(key,'') for key in miniKeys ]
miniHistsMinus_gg = [ MiniMcHistos(key,'gg') for key in miniKeys ]
miniHistsMinus_qg = [ MiniMcHistos(key,'qg') for key in miniKeys ]
miniHistsMinus_qq = [ MiniMcHistos(key,'qq') for key in miniKeys ]
outFileSum = ROOT.TFile('%s.sum.hist.root' % (sample,),'recreate')
miniHistsSum = [ MiniMcHistos(key,'') for key in miniKeys ]
miniHistsSum_gg = [ MiniMcHistos(key,'gg') for key in miniKeys ]
miniHistsSum_qg = [ MiniMcHistos(key,'qg') for key in miniKeys ]
miniHistsSum_qq = [ MiniMcHistos(key,'qq') for key in miniKeys ]
miniHistLists = [
miniHistsPlus, miniHistsPlus_gg, miniHistsPlus_qg, miniHistsPlus_qq,
miniHistsMinus, miniHistsMinus_gg, miniHistsMinus_qg, miniHistsMinus_qq,
miniHistsSum, miniHistsSum_gg, miniHistsSum_qg, miniHistsSum_qq,
]
eventCounter = ROOT.TH1I('eventCounter','event counter',1,-0.5,0.5)
#if listFile is not None:
# f = open(listFile)
# minimcPaths = [ line.strip() for line in f]
#else:
# minimcPaths = os.listdir(pathToMinimc)
f = open(minimcList)
minimcPaths = [ line.strip() for line in f ]
for fileName in minimcPaths:
baseName = os.path.basename(fileName)
if not fileName.endswith('.root'): continue
fileCounter += 1
if fileLimit is not None and fileCounter > fileLimit: break
print '%03d : %s' % (fileCounter, baseName)
fileIndex = baseName.split('_')[-2]
skimPath = os.path.join( pathToJetSkim, 'skim_%s_%s.root' % (daveName[sample], fileIndex) )
if not os.path.isfile(skimPath) and not useXrootd:
print 'missing file = ', skimPath
continue
#if useXrootd: skimPath = 'root://deltag5.lns.mit.edu/' + skimPath
print skimPath
skimFile = ROOT.TFile.Open( skimPath )
skimTree = skimFile.Get('jetSkimTree')
skimTree.SetBranchAddress('skimEventBranch', skimEvent)
#if useXrootd:
# miniFile = ROOT.TFile.Open( 'root://deltag5.lns.mit.edu/' + os.path.join( pathToMinimc, fileName ) )
#else:
#if useXrootd:
# miniFile = ROOT.TFile.Open( fileName )
#else:
#miniFile = ROOT.TFile.Open( os.path.join( pathToMinimc, fileName ) )
miniFile = ROOT.TFile.Open( fileName )
miniTree = miniFile.Get('StMiniMcTree')
miniTree.SetBranchAddress('StMiniMcEvent', miniEvent)
print 'skim entries = ', skimTree.GetEntries()
print 'mini entries = ', miniTree.GetEntries()
[ eventCounter.Fill(0.) for i in range(skimTree.GetEntries()) ]
# set pythia event now so we don't delete it every event
skimTree.GetEntry(1)
pythiaEvent = skimEvent.mcEvent()
pythiaEvent.ResetBit( pythiaEvent.kMustCleanup ) # if we don't it will be deleted on GetEntry
#assert(miniTree.GetEntries() == skimTree.GetEntries())
skimTree.BuildIndex('mRunId', 'mEventId')
for i in range(miniTree.GetEntries()):
miniTree.GetEntry(i)
skimTree.GetEntryWithIndex(miniEvent.runId(), miniEvent.eventId())
assert(miniEvent.eventId() == skimEvent.eventId())
for li in miniHistLists:
[m.clear() for m in li]
li[5].fillEvent(miniEvent, pythiaEvent)
triggers = [ skimEvent.trigger(96201),
skimEvent.trigger(96211),
skimEvent.trigger(96221),
skimEvent.trigger(96233) ]
trigDidFire = [ skimEvent.eBbc() > 0 and skimEvent.wBbc() > 0 ]
trigDidFire.extend( [ trig is not None and trig.shouldFire() > 0 for trig in triggers ] )
for li in miniHistLists:
if trigDidFire[0]:
li[0].fillEvent(miniEvent, pythiaEvent)
for i in range(4):
if trigDidFire[i+1]:
li[i+1].fillEvent(miniEvent, pythiaEvent)
tracks = miniEvent.tracks(Category['MATCHED'])
for track in tracks:
if track.charge() == 1:
miniHistsPlus[5].fillTrack(track)
miniHistsPlus_gg[5].fillTrack(track)
miniHistsPlus_qg[5].fillTrack(track)
miniHistsPlus_qq[5].fillTrack(track)
if track.charge() == -1:
miniHistsMinus[5].fillTrack(track)
miniHistsMinus_gg[5].fillTrack(track)
miniHistsMinus_qg[5].fillTrack(track)
miniHistsMinus_qq[5].fillTrack(track)
miniHistsSum[5].fillTrack(track)
miniHistsSum_gg[5].fillTrack(track)
miniHistsSum_qg[5].fillTrack(track)
miniHistsSum_qq[5].fillTrack(track)
if trigDidFire[0]:
if track.charge() == 1:
miniHistsPlus[0].fillTrack(track)
miniHistsPlus_gg[0].fillTrack(track)
miniHistsPlus_qg[0].fillTrack(track)
miniHistsPlus_qq[0].fillTrack(track)
if track.charge() == -1:
miniHistsMinus[0].fillTrack(track)
miniHistsMinus_gg[0].fillTrack(track)
miniHistsMinus_qg[0].fillTrack(track)
miniHistsMinus_qq[0].fillTrack(track)
miniHistsSum[0].fillTrack(track)
miniHistsSum_gg[0].fillTrack(track)
miniHistsSum_qg[0].fillTrack(track)
miniHistsSum_qq[0].fillTrack(track)
for i in range(4):
if trigDidFire[i+1]:
if track.charge() == 1:
miniHistsPlus[i+1].fillTrack(track)
miniHistsPlus_gg[i+1].fillTrack(track)
miniHistsPlus_qg[i+1].fillTrack(track)
miniHistsPlus_qq[i+1].fillTrack(track)
if track.charge() == -1:
miniHistsMinus[i+1].fillTrack(track)
miniHistsMinus_gg[i+1].fillTrack(track)
miniHistsMinus_qg[i+1].fillTrack(track)
miniHistsMinus_qq[i+1].fillTrack(track)
miniHistsSum[i+1].fillTrack(track)
miniHistsSum_gg[i+1].fillTrack(track)
miniHistsSum_qg[i+1].fillTrack(track)
miniHistsSum_qq[i+1].fillTrack(track)
skimFile.Close()
miniFile.Close()
#miniHists[0].draw()
#miniHists[-1].draw()
outFilePlus.cd()
eventCounter.Write()
for m in miniHistsPlus: [h.Write() for h in m.values()]
for m in miniHistsPlus_gg: [h.Write() for h in m.values()]
for m in miniHistsPlus_qg: [h.Write() for h in m.values()]
for m in miniHistsPlus_qq: [h.Write() for h in m.values()]
outFilePlus.Close()
outFileMinus.cd()
eventCounter.Write()
for m in miniHistsMinus: [h.Write() for h in m.values()]
for m in miniHistsMinus_gg: [h.Write() for h in m.values()]
for m in miniHistsMinus_qg: [h.Write() for h in m.values()]
for m in miniHistsMinus_qq: [h.Write() for h in m.values()]
outFileMinus.Close()
outFileSum.cd()
eventCounter.Write()
for m in miniHistsSum: [h.Write() for h in m.values()]
for m in miniHistsSum_gg: [h.Write() for h in m.values()]
for m in miniHistsSum_qg: [h.Write() for h in m.values()]
for m in miniHistsSum_qq: [h.Write() for h in m.values()]
outFileSum.Close()
def calculateALL(num, denom, nevents, sample_weights):
#error calculations in this function follow the recipe derived by Jim Sowinski
#don't include a bin from an individual sample in content or error if it has fewer than this # of particles
minParticlesToAccept = 10
#rebinning
#[h.Rebin(2) for h in num]
#[h.Rebin(2) for h in denom]
top = []
bot = []
content = []
avg_asym_weight = []
nparticles = []
[avg_asym_weight.append([]) for sample in num]
[nparticles.append([]) for sample in num]
for bin in range(num[0].GetNbinsX()):
top.append(0.)
bot.append(0.)
content.append(0.)
for sample in range(len(num)):
nparticles[sample].append(denom[sample].GetBinContent(bin+1))
if nparticles[sample][bin] >= minParticlesToAccept:
avg_asym_weight[sample].append(num[sample].GetBinContent(bin+1) / nparticles[sample][bin])
top[bin] += num[sample].GetBinContent(bin+1) * sample_weights[sample] / nevents[sample]
bot[bin] += denom[sample].GetBinContent(bin+1) * sample_weights[sample] / nevents[sample]
else:
avg_asym_weight[sample].append(0.)
if bot[bin] != 0.: content[bin] = top[bin] / bot[bin]
#calculating the errors requires another loop over all samples
error = []
for bin in range(num[0].GetNbinsX()):
error.append(0.)
for sample in range(len(num)):
if nparticles[sample][bin] >= minParticlesToAccept:
first_term = avg_asym_weight[sample][bin] - content[bin]
second_term = num[sample].GetBinError(bin+1) * num[sample].GetBinError(bin+1) / nparticles[sample][bin] - avg_asym_weight[sample][bin] * avg_asym_weight[sample][bin]
else:
first_term = 0
second_term = 0
wp = sample_weights[sample] / nevents[sample]
error[bin] += wp*wp*nparticles[sample][bin] * (first_term*first_term*(1+nparticles[sample][bin]/nevents[sample]) + second_term)
if bot[bin] != 0.: error[bin] *= 1.0 / (bot[bin] * bot[bin])
h = num[0].Clone()
#update the new histo
for bin in range(num[0].GetNbinsX()):
if bot[bin] == 0.:
content[bin] = 0.
error[bin] = 0.
h.SetBinContent(bin+1, content[bin])
h.SetBinError(bin+1, math.sqrt(error[bin]))
return h
def combine(outFilePath, inputFileList, useWeights=False):
"""combines histograms in arg2 into a file named arg1"""
minCounts = 5.5 ## don't take gg events with 1 real count
subProcessWeights={'gg':1.0, 'qg':1.0, 'qq':1.0}
outFile = ROOT.TFile(outFilePath,'recreate')
for subProcess in ('','gg','qg','qq'):
print 'now combining subProcess =',subProcess
outHists = [ MiniMcHistos(key, subProcess) for key in miniKeys ]
#print outHists
#keep all files open to do the asymmetry calculation
tfile = [ ROOT.TFile(path,'read') for path in inputFileList ]
nevents = [ f.Get('eventCounter').GetBinContent(1) for f in tfile ]
xsec_weight = [ xsec[os.path.basename(path).split('.')[0]] for path in inputFileList ]
sampleHists = []
for index in range(len(inputFileList)):
print 'adding', inputFileList[index]
sampleHists.append( [ MiniMcHistos(key, subProcess, tfile[index]) for key in miniKeys ] )
# reweight subProcesses?
if useWeights and subProcess == '':
m = sampleHists[-1]
gg = [ MiniMcHistos(key, 'gg', tfile[index]) for key in miniKeys ]
qg = [ MiniMcHistos(key, 'qg', tfile[index]) for key in miniKeys ]
qq = [ MiniMcHistos(key, 'qq', tfile[index]) for key in miniKeys ]
for i in range(len(outHists)):
for key in m[i].keys():
m[i][key].Reset()
m[i][key].Add(gg[i][key], subProcessWeights['gg'])
m[i][key].Add(qg[i][key], subProcessWeights['qg'])
m[i][key].Add(qq[i][key], subProcessWeights['qq'])
for i in range(len(outHists)):
outHists[i].add(sampleHists[-1][i],xsec_weight[index],nevents[index],minCounts)
#now calculate A_LL
for trig in range(len(sampleHists[0])):
for key in asymKeys:
num = [sample[trig][key] for sample in sampleHists]
denom = [sample[trig]['denom'] for sample in sampleHists]
outHists[trig][key].Delete()
outHists[trig][key] = calculateALL(num, denom, nevents, xsec_weight)
outFile.cd()
for m in outHists:
[h.Write() for h in m.values()]
[f.Close() for f in tfile]
#outFile = ROOT.TFile(outFilePath,'recreate')
#print 'write', outFilePath
#for m in keepMe: [h.Write() for h in m.values()]
def partonicCrossSection(sample='2_3', nevents=1000, sqrts=200):
"""runs standalone Pythia to determine xsec for weighting purposes, returns xsec"""
import ROOT
pythia = ROOT.TPythia6()
ckMin = sample.split('_')[0]
if ckMin == 'above':
ckMin = 35
else:
ckMin = float(ckMin)
ckMax = sample.split('_')[1]
if ckMax == '35':
ckMax = 1000
else:
ckMax = float(ckMax)
#pythia.SetMSTP(51, 8)
#pythia.SetMSEL(1)
# CDF Tune A for STAR
pythia.SetMSEL(1) # could also be 2, but gives VERY different results
pythia.SetMSTP(51, 7)
pythia.SetMSTP(81, 1)
pythia.SetMSTP(82, 4)
pythia.SetPARP(67, 4.0)
pythia.SetPARP(83, 0.5)
pythia.SetPARP(84, 0.4)
pythia.SetPARP(85, 0.9)
pythia.SetPARP(86, 0.95)
pythia.SetPARP(89, 1800)
pythia.SetPARP(90, 0.25)
pythia.SetPARP(91, 1.0)
pythia.SetCKIN(3, ckMin)
pythia.SetCKIN(4, ckMax)
pythia.Initialize('CMS', 'p', 'p', sqrts)
for i in range(nevents):
if i % 1000 == 0: print 'generating event', i
pythia.GenerateEvent()
pythia.Pystat(1)
return pythia.GetPARI(1)
## the rest of the functions just make plots
def ptResolution(histFile='./combined.sum.hist.root', trigKey='notrig', subProcess=''):
#import mystyle; mystyle.use(1)
f = ROOT.TFile(histFile)
m = MiniMcHistos(trigKey, subProcess, f)
c = ROOT.TCanvas()
m['ptMc_ptPr'].GetZaxis().SetRangeUser(1e-8,20)
m['ptMc_ptPr'].Draw()
ROOT.gPad.SetLogz()
raw_input('beli')
return(f,m,c)
def triggerBias(histFile='./combined.plus.hist.root', trigKey='96233', scenario='nlo'):
tfile = ROOT.TFile(histFile)
notrig = MiniMcHistos('notrig', '', tfile)
m = MiniMcHistos(trigKey, '', tfile)
c = ROOT.TCanvas('c')
notrig[scenario].GetXaxis().SetRangeUser(1,11)
notrig[scenario].GetYaxis().SetRangeUser(-0.08,0.08)
if 'plus' in histFile:
notrig[scenario].SetMarkerStyle(20)
m[scenario].SetMarkerStyle(24)
charge = 'plus'
if 'minus' in histFile:
notrig[scenario].SetMarkerStyle(21)
m[scenario].SetMarkerStyle(25)
charge = 'minus'
notrig[scenario].SetTitle('%s - %s - %s' % (charge, trigKey, scenario))
notrig[scenario].Draw()
m[scenario].Draw('same')
#m[scenario].Rebin()
#draw a graph representing size of systematic
nbins = m[scenario].GetNbinsX()
sys = ROOT.TGraphAsymmErrors(nbins)
for bin in range(nbins+1):
x = m[scenario].GetBinCenter(bin)
y = -0.07
diff = m[scenario].GetBinContent(bin) - notrig[scenario].GetBinContent(bin)
stat = m[scenario].GetBinError(bin)
#print x, diff, stat
sys.SetPoint(bin-1, x, y)
sys.SetPointError( bin-1 , 0, 0, 0, max(diff,stat) )
#sys.SetPointError(0, 0, 0, 0, 0.000)
sys.Draw('f same')
raw_input('blerg?')
tfile.Close()
return sys
def triggerBiasAllScenarios(histFile='./combined.plus.hist.root', trigKey='96233'):
asymKeys2.reverse()
graphs = [triggerBias(histFile,trigKey,scenario) for scenario in asymKeys2]
yoffsets = [ 0.05*i for i in range(len(asymKeys2)) ]
# stupid stupid stupid
xbins = [0.5*i+0.25 for i in range(graphs[0].GetN())]
c = ROOT.TCanvas('c2')
bg = ROOT.TH2D('bg','',1,1,11,1,-0.05,yoffsets[-1]+0.1)
bg.Draw()
for row,graph in enumerate(graphs):
for i in range(graph.GetN()):
#graph.SetPoint(i, graph.GetXaxis().GetBinCenter(i+1), yoffsets[row] )
graph.SetPoint(i, xbins[i], yoffsets[row] )
graph.Draw('f same')
# now find max value for each bin
maxValues = [ max([gr.GetErrorYhigh(i) for gr in graphs]) for i in range(graphs[0].GetN()) ]
#print maxValues
c2 = ROOT.TCanvas('c3')
maxGraph = ROOT.TGraphAsymmErrors(len(maxValues))
for i,val in enumerate(maxValues):
maxGraph.SetPoint(i, xbins[i], 0)
maxGraph.SetPointEYhigh(i, maxValues[i])
maxGraph.GetXaxis().SetRangeUser(1,11)
maxGraph.Draw('a f')
raw_input('big blerg')
def subProcessSpectra(histFile='./combined.sum.hist.root', trigKey='notrig'):
tfile = ROOT.TFile(histFile)
bins = (2,3,4,5,7,9,11,15,20,25,30,35,70)
m_gg = MiniMcHistos(trigKey, 'gg', tfile)
m_qg = MiniMcHistos(trigKey, 'qg', tfile)
m_qq = MiniMcHistos(trigKey, 'qq', tfile)
ps = ROOT.TPostScript('subprocess_spectra.ps')
c = ROOT.TCanvas('c','',100,100,600,800)
c.Divide(3,4)
ps.NewPage()
for i in range(len(bins)-1):
c.cd(i+1)
ROOT.gPad.SetLogy()
h_gg = m_gg['partonicPt'].ProjectionX('%d_gg_px' % (i,), bins[i]+1, bins[i+1]+1)
h_gg.SetTitle('%d < partonic p_{T} < %d' % (bins[i], bins[i+1]))
h_gg.SetXTitle('#pi reco p_{T}')
h_gg.SetMarkerStyle(20)
h_gg.SetMarkerColor(ROOT.kRed)
h_gg.DrawCopy()
h_qg = m_qg['partonicPt'].ProjectionX('%d_gg_px' % (i,), bins[i]+1, bins[i+1]+1)
h_gg.SetMarkerStyle(25)
h_gg.SetMarkerColor(ROOT.kBlue)
h_qg.DrawCopy('same')
h_qq = m_qq['partonicPt'].ProjectionX('%d_gg_px' % (i,), bins[i]+1, bins[i+1]+1)
h_gg.SetMarkerStyle(21)
h_gg.SetMarkerColor(ROOT.kGreen)
h_qq.DrawCopy('same')
c.Update()
raw_input('pause')
ps.Close()
def subProcessPionMultiplicity(histFile='./combined.sum.hist.root', trigKey='notrig'):
proj_gg = whatDoICallThis(histFile, trigKey, 'gg')
proj_qg = whatDoICallThis(histFile, trigKey, 'qg')
proj_qq = whatDoICallThis(histFile, trigKey, 'qq')
bg = ROOT.TH2D('bg','',1,0,35,1,2,12)
bg.SetXTitle('event partonic p_{T}')
bg.SetYTitle('< nChargedPions >')
proj_gg[-1].SetMarkerStyle(20)
proj_qg[-1].SetMarkerStyle(25)
proj_qq[-1].SetMarkerStyle(21)
proj_gg[-1].SetMarkerColor(ROOT.kRed)
proj_qg[-1].SetMarkerColor(ROOT.kBlue)
proj_qq[-1].SetMarkerColor(ROOT.kGreen)
leg = ROOT.TLegend(0.15,0.65,0.3,0.85)
leg.AddEntry(proj_gg[-1],'gg','p')
leg.AddEntry(proj_qg[-1],'qg','p')
leg.AddEntry(proj_qq[-1],'qq','p')
c = ROOT.TCanvas()
bg.Draw()
proj_gg[-1].Draw('same')
proj_qg[-1].Draw('same')
proj_qq[-1].Draw('same')
leg.Draw()
raw_input('stop here for instructions')
c.Print('subprocess_pion_multiplicity.gif')
def meanPtVersusPartonicPtAllSub(histFile='./combined.sum.hist.root', trigKey='notrig'):
#import mystyle; mystyle.use(1)
ROOT.gStyle.SetOptStat(0)
proj_gg = meanPtVersusPartonicPt(histFile, trigKey, 'gg')
proj_qg = meanPtVersusPartonicPt(histFile, trigKey, 'qg')
proj_qq = meanPtVersusPartonicPt(histFile, trigKey, 'qq')
bg = ROOT.TH2D('bg','',1,0,35,1,0.4,2.3)
bg.SetXTitle('event partonic p_{T}')
bg.SetYTitle('< #pi p_{T} >')
proj_gg[-1].SetMarkerStyle(20)
proj_qg[-1].SetMarkerStyle(25)
proj_qq[-1].SetMarkerStyle(21)
proj_gg[-1].SetMarkerColor(ROOT.kRed)
proj_qg[-1].SetMarkerColor(ROOT.kBlue)
proj_qq[-1].SetMarkerColor(ROOT.kGreen)
leg = ROOT.TLegend(0.15,0.65,0.3,0.85)
leg.AddEntry(proj_gg[-1],'gg','p')
leg.AddEntry(proj_qg[-1],'qg','p')
leg.AddEntry(proj_qq[-1],'qq','p')
c = ROOT.TCanvas()
bg.Draw()
proj_gg[-1].Draw('same')
proj_qg[-1].Draw('same')
proj_qq[-1].Draw('same')
leg.Draw()
raw_input('pause:')
c.Print('mean_pt_subprocess.gif')
def meanPtVersusPartonicPt(histFile='./combined.sum.hist.root', trigKey='notrig', subProcess=''):
"""note: errors are absolutely fake"""
f = ROOT.TFile(histFile)
m = MiniMcHistos(trigKey, subProcess, f)
c = ROOT.TCanvas()
m['partonicPt'].GetZaxis().SetRangeUser(m['partonicPt'].GetMinimum(1e-10), m['partonicPt'].GetMaximum())
m['partonicPt'].Draw('col')
ROOT.gPad.SetLogz()
c2 = ROOT.TCanvas()
proj = m['partonicPt'].ProfileY('meanPt'+histFile+trigKey+subProcess)
proj = proj.ProjectionX()
[proj.SetBinError(b+1, 0.01) for b in range(proj.GetNbinsX())]
proj.GetXaxis().SetRangeUser(2,45)
proj.SetYTitle('< #pi p_{T} >')
proj.SetXTitle('event partonic p_{T}')
proj.SetTitle('Subprocess (if any): ' + subProcess)
proj.Draw('e')
return [f,m,c,c2,proj]
def whatDoICallThis(histFile='./combined.sum.hist.root', trigKey='notrig', subProcess=''):
"""plots <nChargedPions> ov. partonic pT"""
f = ROOT.TFile(histFile)
m = MiniMcHistos(trigKey, subProcess, f)
c = ROOT.TCanvas()
m['partonicPt'].GetZaxis().SetRangeUser(m['partonicPt'].GetMinimum(1e-10), m['partonicPt'].GetMaximum())
m['partonicPt'].Draw('col')
ROOT.gPad.SetLogz()
c2 = ROOT.TCanvas()
proj = m['partonicPt'].ProjectionY('whatDo'+histFile+trigKey+subProcess)
proj.Divide( m['eventPartonicPt'].Rebin(20) )
proj.GetXaxis().SetRangeUser(2,45)
proj.SetYTitle('<nChargedPions>')
proj.SetXTitle('event partonic p_{T}')
proj.SetTitle('Subprocess (if any): ' + subProcess)
proj.Draw('e')
return [f,m,c,c2,proj]
def pionMultiplicity(histFile='./7_9.plus.hist.root', trigKey='notrig'):
"""this is probably junk"""
f = ROOT.TFile(histFile)
m = MiniMcHistos(trigKey, f)
m['gg'].Scale( 1.0 / (m['eventPartonicPt_gg'].GetEntries()) )
m['qg'].Scale( 1.0 / (m['eventPartonicPt_qg'].GetEntries()) )
m['qq'].Scale( 1.0 / (m['eventPartonicPt_qq'].GetEntries()) )
for key in ['gg','qg','qq']:
[ m[key].SetBinError(b+1,0.1*m[key].GetBinContent(b+1)) for b in range(m[key].GetNbinsX()) ]
m['gg'].SetMarkerColor(ROOT.kRed)
m['qg'].SetMarkerColor(ROOT.kBlue)
m['qq'].SetMarkerColor(ROOT.kGreen)
m['gg'].SetMarkerStyle(20)
m['qg'].SetMarkerStyle(25)
m['qq'].SetMarkerStyle(21)
c1 = ROOT.TCanvas()
m['gg'].Draw('p')
m['qg'].Draw('p same')
m['qq'].Draw('p same')
ROOT.gPad.SetLogy()
raw_input('what do you think?')
def subProcessShiftFromTrigger(histFile='/Users/kocolosk/data/simu/combined.hist.root'):
ROOT.gStyle.SetOptStat(0)
ROOT.gStyle.SetPadGridY(True)
tfile = ROOT.TFile(histFile,'read')
minimc = [ MiniMcHistos(key, tfile) for key in miniKeys ]
#rescale to get bin-by-bin fractions
for i in range(minimc[0].ptBins[0]):
for j in range(len(minimc)):
a = minimc[j]['gg'].GetBinContent(i+1)
b = minimc[j]['qg'].GetBinContent(i+1)
c = minimc[j]['qq'].GetBinContent(i+1)
ea = minimc[j]['gg'].GetBinError(i+1)
eb = minimc[j]['qg'].GetBinError(i+1)
ec = minimc[j]['qq'].GetBinError(i+1)
binSum = a + b + c
if binSum > 0:
minimc[j]['gg'].SetBinContent(i+1,a/binSum)
minimc[j]['qg'].SetBinContent(i+1,b/binSum)
minimc[j]['qq'].SetBinContent(i+1,c/binSum)
minimc[j]['gg'].SetBinError(i+1,ea/binSum)
minimc[j]['qg'].SetBinError(i+1,eb/binSum)
minimc[j]['qq'].SetBinError(i+1,ec/binSum)
#fun with graphics
[m['gg'].SetLineColor(ROOT.kRed) for m in minimc]
[m['qg'].SetLineColor(ROOT.kBlue) for m in minimc]
[m['qq'].SetLineColor(ROOT.kGreen) for m in minimc]
[m['gg'].SetMarkerColor(ROOT.kRed) for m in minimc]
[m['qg'].SetMarkerColor(ROOT.kBlue) for m in minimc]
[m['qq'].SetMarkerColor(ROOT.kGreen) for m in minimc]
[m['gg'].SetMarkerSize(0.6) for m in minimc]
[m['qg'].SetMarkerSize(0.6) for m in minimc]
[m['qq'].SetMarkerSize(0.6) for m in minimc]
[m['gg'].SetMarkerStyle(20) for m in minimc]
[m['qg'].SetMarkerStyle(20) for m in minimc]
[m['qq'].SetMarkerStyle(20) for m in minimc]
[m['gg'].SetLineStyle(2) for m in minimc[1:]]
[m['qg'].SetLineStyle(2) for m in minimc[1:]]
[m['qq'].SetLineStyle(2) for m in minimc[1:]]
leg = ROOT.TLegend(0.65,0.7,0.88,0.88,'solid=MB, dash=trigger')
leg.AddEntry(minimc[0]['gg'],'gg','l')
leg.AddEntry(minimc[0]['qg'],'qg','l')
leg.AddEntry(minimc[0]['qq'],'qq','l')
minimc[1]['gg'].SetTitle('HT1')
minimc[2]['gg'].SetTitle('HT2')
minimc[3]['gg'].SetTitle('JP1')
minimc[4]['gg'].SetTitle('JP2')
bg = ROOT.TH2D('bg','',1,0,15,1,0,0.9)
bg.SetXTitle('#pi reco p_{T}')
c = [ROOT.TCanvas('c_ht1'), ROOT.TCanvas('c_ht2'), ROOT.TCanvas('c_jp1'), ROOT.TCanvas('c_jp2')]
for i in range(4):
c[i].cd()
bg.SetTitle(minimc[i+1]['gg'].GetTitle())
bg.DrawCopy()
minimc[0]['gg'].Draw('e1 same')
minimc[0]['qg'].Draw('e1 same')
minimc[0]['qq'].Draw('e1 same')
minimc[i+1]['gg'].Draw('e1 same')
minimc[i+1]['qg'].Draw('e1 same')
minimc[i+1]['qq'].Draw('e1 same')
leg.Draw()
raw_input('press enter to continue:')
def subProcessContributions(histFile='./combined.minus.hist.root', trigKey='notrig'):
tfile = ROOT.TFile(histFile,'read')
minimc = MiniMcHistos(trigKey, '', tfile)
minimc_gg = MiniMcHistos(trigKey, 'gg', tfile)
minimc_qg = MiniMcHistos(trigKey, 'qg', tfile)
minimc_qq = MiniMcHistos(trigKey, 'qq', tfile)
#rescale to get bin-by-bin fractions
for i in range(minimc.ptBins[0]):
a = minimc_gg['pt'].GetBinContent(i+1)
b = minimc_qg['pt'].GetBinContent(i+1)
c = minimc_qq['pt'].GetBinContent(i+1)
ea = minimc_gg['pt'].GetBinError(i+1)
eb = minimc_qg['pt'].GetBinError(i+1)
ec = minimc_qq['pt'].GetBinError(i+1)
binSum = a + b + c
if binSum > 0:
minimc_gg['pt'].SetBinContent(i+1,a/binSum)
minimc_qg['pt'].SetBinContent(i+1,b/binSum)
minimc_qq['pt'].SetBinContent(i+1,c/binSum)
minimc_gg['pt'].SetBinError(i+1,ea/binSum)
minimc_qg['pt'].SetBinError(i+1,eb/binSum)
minimc_qq['pt'].SetBinError(i+1,ec/binSum)
minimc_gg['pt'].SetMarkerStyle(20)
minimc_qg['pt'].SetMarkerStyle(25)
minimc_qq['pt'].SetMarkerStyle(21)
bg = ROOT.TH2D('bg','',1,0,15,1,0,0.9)
bg.SetXTitle('#pi reco p_{T}')
bg.SetTitle('Subprocess Contributions for #pi^{+} Production')
c1 = ROOT.TCanvas('c1')
bg.Draw()
minimc_gg['pt'].Draw('e1 same')
minimc_qg['pt'].Draw('e1 same')
minimc_qq['pt'].Draw('e1 same')
leg = ROOT.TLegend(0.75,0.7,0.88,0.88)
leg.AddEntry(minimc_gg['pt'],'gg','p')
leg.AddEntry(minimc_qg['pt'],'qg','p')
leg.AddEntry(minimc_qq['pt'],'qq','p')
leg.Draw()
raw_input('What do you think?')
#c1.Print('subprocess_contributions_minus.gif')
def subProcessContributionsEvent(histFile='./combined.minus.hist.root', trigKey='notrig'):
tfile = ROOT.TFile(histFile,'read')
minimc = MiniMcHistos(trigKey, '', tfile)
mgg = MiniMcHistos(trigKey, 'gg', tfile)
mqg = MiniMcHistos(trigKey, 'qg', tfile)
mqq = MiniMcHistos(trigKey, 'qq', tfile)
#rescale to get bin-by-bin fractions
for i in range(minimc.eventPartonBins[0]):
a = mgg['eventPartonicPt'].GetBinContent(i+1)
b = mqg['eventPartonicPt'].GetBinContent(i+1)
c = mqq['eventPartonicPt'].GetBinContent(i+1)
ea = mgg['eventPartonicPt'].GetBinError(i+1)
eb = mqg['eventPartonicPt'].GetBinError(i+1)
ec = mqq['eventPartonicPt'].GetBinError(i+1)
binSum = a + b + c
if binSum > 0:
mgg['eventPartonicPt'].SetBinContent(i+1,a/binSum)
mqg['eventPartonicPt'].SetBinContent(i+1,b/binSum)
mqq['eventPartonicPt'].SetBinContent(i+1,c/binSum)
mgg['eventPartonicPt'].SetBinError(i+1,ea/binSum)
mqg['eventPartonicPt'].SetBinError(i+1,eb/binSum)
mqq['eventPartonicPt'].SetBinError(i+1,ec/binSum)
mgg['eventPartonicPt'].SetMarkerStyle(20)
mqg['eventPartonicPt'].SetMarkerStyle(25)
mqq['eventPartonicPt'].SetMarkerStyle(21)
mgg['eventPartonicPt'].SetMarkerColor(ROOT.kRed)
mqg['eventPartonicPt'].SetMarkerColor(ROOT.kBlue)
mqq['eventPartonicPt'].SetMarkerColor(ROOT.kGreen)
bg = ROOT.TH2D('bg','',1,2,30,1,0,0.9)
bg.SetXTitle('event partonic p_{T}')
bg.SetTitle('Subprocess Mixture')
c1 = ROOT.TCanvas('c1')
bg.Draw()
mgg['eventPartonicPt'].Draw('e1 same')
mqg['eventPartonicPt'].Draw('e1 same')
mqq['eventPartonicPt'].Draw('e1 same')
leg = ROOT.TLegend(0.75,0.7,0.88,0.88)
leg.AddEntry(mgg['eventPartonicPt'],'gg','p')
leg.AddEntry(mqg['eventPartonicPt'],'qg','p')
leg.AddEntry(mqq['eventPartonicPt'],'qq','p')
leg.Draw()
raw_input('What do you think?')
def partonicPt(histFile='/Users/kocolosk/data/simu/combined.hist.root'):
ROOT.gStyle.SetOptLogy(False)
ROOT.gStyle.SetOptLogz(False)
ROOT.gStyle.SetOptStat(0)
ROOT.gStyle.SetPadGridX(False)
ROOT.gStyle.SetPadGridY(True)
tfile = ROOT.TFile(histFile,'read')
minimc = [ MiniMcHistos(key, tfile) for key in miniKeys ]
h = [ ROOT.TH1D('tmp_minbias','',minimc[0].ptBins[0],minimc[0].ptBins[1],minimc[0].ptBins[2]),
ROOT.TH1D('tmp_96201','',minimc[0].ptBins[0],minimc[0].ptBins[1],minimc[0].ptBins[2]),
ROOT.TH1D('tmp_96211','',minimc[0].ptBins[0],minimc[0].ptBins[1],minimc[0].ptBins[2]),
ROOT.TH1D('tmp_96221','',minimc[0].ptBins[0],minimc[0].ptBins[1],minimc[0].ptBins[2]),
ROOT.TH1D('tmp_96233','',minimc[0].ptBins[0],minimc[0].ptBins[1],minimc[0].ptBins[2]), ]
c = ROOT.TCanvas()
for i in range(len(minimc)):
for j in range(minimc[i].ptBins[0]):
minimc[i]['partonicPt'].GetXaxis().SetRange(j+1,j+1)
h[i].SetBinContent(j+1, h[i].GetBinCenter(j+1) / minimc[i]['partonicPt'].GetMean(2))
h[i].SetBinError(j+1, minimc[i]['partonicPt'].GetMeanError(2) * h[i].GetBinContent(j+1) / minimc[i]['partonicPt'].GetMean(2))
h[0].SetMarkerStyle(2)
h[1].SetMarkerStyle(24)
h[2].SetMarkerStyle(20)
h[3].SetMarkerStyle(25)
h[4].SetMarkerStyle(21)
#bg = ROOT.TH2D('bg','',1,0,15,1,0,30)
bg = ROOT.TH2D('bg','',1,0,15,1,0,0.8)
bg.SetTitle('Fraction of parton p_{T} carried by #pi^{+/-}')
bg.SetXTitle('#pi reco p_{T}')
bg.SetYTitle('#pi reco p_{T} / < partonic p_{T} >')
#bg.SetYTitle('< partonic p_{T} >')
bg.DrawCopy()
[tmp.Draw('same') for tmp in h]
leg = ROOT.TLegend(0.65,0.15,0.89,0.45)
leg.AddEntry(h[0],'minbias','p')
leg.AddEntry(h[1],'ht1','p')
leg.AddEntry(h[2],'ht2','p')
leg.AddEntry(h[3],'jp1','p')
leg.AddEntry(h[4],'jp2','p')
leg.Draw('same')
raw_input('press enter to continue:')
def cfactor(histFile='/Users/kocolosk/data/simu/combined.hist.root'):
tfile = ROOT.TFile(histFile,'read')
minimc = {}
for key in miniKeys: minimc[key] = MiniMcHistos(key, tfile)
ROOT.gStyle.SetOptLogy(True)
c = ROOT.TCanvas()
h = [ minimc['minbias']['pt'],
minimc['96201']['pt'],
minimc['96211']['pt'],
minimc['96221']['pt'],
minimc['96233']['pt'] ]
[ elem.Divide(minimc['notrig']['pt']) for elem in h ]
h[0].SetXTitle('p_{T}')
h[0].SetYTitle('trigger tracks / total tracks')
h[0].SetMarkerStyle(2)
h[1].SetMarkerStyle(24)
h[2].SetMarkerStyle(20)
h[3].SetMarkerStyle(25)
h[4].SetMarkerStyle(21)
h[0].Draw()
[ elem.Draw('same') for elem in h[1:] ]
leg = ROOT.TLegend(0.65,0.15,0.89,0.45)
leg.AddEntry(h[0],'minbias','p')
leg.AddEntry(h[1],'ht1','p')
leg.AddEntry(h[2],'ht2','p')
leg.AddEntry(h[3],'jp1','p')
leg.AddEntry(h[4],'jp2','p')
leg.Draw('same')
raw_input('press enter to continue:')
def asymmetries(histFile='/Users/kocolosk/data/simu/combined.hist.root'):
"""plots all triggers for an asmmyetry on a single canvas"""
tfile = ROOT.TFile(histFile,'read')
minimc = [ MiniMcHistos(key, '', tfile) for key in miniKeys ]
c = [ ROOT.TCanvas('c0'), ROOT.TCanvas('c1'), ROOT.TCanvas('c2'), ROOT.TCanvas('c3'), ROOT.TCanvas('c4') ]
keys = ['lo', 'nlo', 'max', 'min', 'zero']
[ minimc[0][key].GetXaxis().SetRangeUser(2.0, 10.0) for key in keys ]
[ minimc[0][key].SetLineColor(ROOT.kRed) for key in keys ]
[ minimc[1][key].SetLineColor(ROOT.kGreen) for key in keys ]
[ minimc[2][key].SetLineColor(ROOT.kGreen) for key in keys ]
[ minimc[3][key].SetLineColor(ROOT.kBlue) for key in keys ]
[ minimc[4][key].SetLineColor(ROOT.kBlue) for key in keys ]
c[0].cd()
minimc[0]['nlo'].Draw()
minimc[2]['nlo'].Draw('same')
minimc[4]['nlo'].Draw('same')
c[1].cd()
minimc[0]['max'].Draw()
minimc[2]['max'].Draw('same')
minimc[4]['max'].Draw('same')
c[2].cd()
minimc[0]['min'].Draw()
minimc[2]['min'].Draw('same')
minimc[4]['min'].Draw('same')
c[3].cd()
minimc[0]['zero'].Draw()
minimc[2]['zero'].Draw('same')
minimc[4]['zero'].Draw('same')
c[4].cd()
minimc[0]['nlo'].Draw()
minimc[1]['nlo'].Draw('same')
minimc[3]['nlo'].Draw('same')
raw_input('press enter to continue:')
def asymmetries2(histFile='./combined.plus.hist.root', trigKey='notrig'):
"""plots all asymmetries for trig on one canvas"""
f = ROOT.TFile(histFile)
minimc = MiniMcHistos(trigKey, '', f)
c1 = ROOT.TCanvas('c1')
bg = ROOT.TH2D('bg','Raw #pi^{-} Asymmetries for All Scenarios ',1,1.0,15.0,1,-0.04,0.08)
bg.SetXTitle('#pi reco p_{T}')
bg.Draw()
leg = ROOT.TLegend(0.78,0.11,0.89,0.89)
minimc['zero'].SetMarkerColor(ROOT.kBlue)
minimc['min'].SetMarkerColor(ROOT.kGreen)
minimc['max'].SetMarkerColor(ROOT.kRed)
minimc['nlo'].SetMarkerColor(7)
# excluding p070 b/c of a bug on my part (oops)
for row,key in enumerate(asymKeys[1:-1]):
if row>3:
minimc[key].SetMarkerStyle(row+16)
else:
minimc[key].SetMarkerStyle(20)
# reset errors for clarity
[ minimc[key].SetBinError(b+1,0) for b in range(minimc[key].GetNbinsX()) ]
minimc[key].GetXaxis().SetRangeUser(1.0, 11.0)
minimc[key].Draw('p same x0')
leg.AddEntry(minimc[key], key, 'p')
leg.Draw()
raw_input('what do you think?')
#c1.Print('all_scenarios_minus.gif')
def asymmetries3(histFile='./combined.plus.hist.root', trigger='96233', scenario='nlo', canvas=None):
"""plots deviation from raw A_{LL} v. p_T"""
f = ROOT.TFile(histFile)
m = {}
for key in trigKeys: m[key] = MiniMcHistos(key, '', f)
if 'plus' in histFile: charge = '+'
else: charge = '-'
if canvas is None:
c1 = ROOT.TCanvas('c1')
else:
c1 = canvas
bg = ROOT.TH2D('bg_%s_%s' % (trigger, scenario),scenario, 1,2.0,10.0, 1,-0.05,0.05)
bg.SetTitle('A_{LL}(%s) - A_{LL}(no trigger) for #pi^{%s} in %s scenario' % (trigger, charge, scenario))
bg.SetXTitle('#pi reco p_{T}')
if charge == '+':
m[trigger][scenario].SetMarkerStyle(20)
else:
m[trigger][scenario].SetMarkerStyle(21)
# plot deviation only
if trigger != 'notrig':
for b in range(m[trigger][scenario].GetNbinsX()):
content = m[trigger][scenario].GetBinContent(b+1) - m['notrig'][scenario].GetBinContent(b+1)
m[trigger][scenario].SetBinContent(b+1, content)
else:
bg.SetTitle('Raw A_{LL} (no trigger) for #pi^{%s} in %s scenario' % (charge, scenario))
m[trigger][scenario].SetMarkerColor(ROOT.kRed)
m[trigger][scenario].SetLineColor(ROOT.kRed)
bg.Draw()
m[trigger][scenario].Draw('p same')
#raw_input('what do you think?')
return [f, bg, m[trigger][scenario]]
def asymmetries4(histFile='./combined.plus.hist.root'):
"""calls asymmetries3 for all possible combos of trigger and scenario => save to Postscript"""
ps = ROOT.TPostScript('blah.ps')
c = ROOT.TCanvas('c','',100,100,600,800)
c.Divide(2,3)
keepMe = []
for scenario in asymKeys[1:-1]:
c.Update()
ps.NewPage()
c.Clear()
c.Divide(2,3)
padCounter = 1
for trigger in trigKeys:
pad = c.cd(padCounter)
keepMe.extend( asymmetries3(histFile, trigger, scenario, pad) )
padCounter += 1
print 'generated', trigger, scenario
ps.Close()
def validateAsymmetries(histFile='combined.plus.hist.root', subprocess=''):
'''compare untriggered PYTHIA asymmetries to GRSV predictions'''
tfile = ROOT.TFile.Open(histFile, 'read')
miniHist = MiniMcHistos('notrig', subprocess, tfile)
if 'plus' in histFile:
charge = 'plus'
elif 'minus' in histFile:
charge = 'minus'
else:
print 'could not determine charge from filename -- assume plus'
charge = 'plus'
#import mystyle; mystyle.use(1)
ROOT.gStyle.SetOptStat(0)
c1 = ROOT.TCanvas('c1')
asymKeys = ['nlo','max','min','zero']
#if subprocess != '':
# asymKeys = [elem + '_' + subprocess for elem in asymKeys]
if charge == 'plus':
[ miniHist[key].SetMarkerStyle(20) for key in asymKeys ]
miniHist[ asymKeys[0] ].SetTitle('A_{LL} for #pi^{+} in Pythia and GRSV')
elif charge == 'minus':
[ miniHist[key].SetMarkerStyle(21) for key in asymKeys ]
miniHist[ asymKeys[0] ].SetTitle('A_{LL} for #pi^{-} in Pythia and GRSV')
[ miniHist[ a ].SetFillStyle( 3004 ) for a in asymKeys ]
miniHist[ asymKeys[0] ].SetLineColor( ROOT.kBlack )
miniHist[ asymKeys[0] ].SetMarkerColor( ROOT.kBlack )
miniHist[ asymKeys[0] ].SetFillColor( ROOT.kBlack )
miniHist[ asymKeys[0] ].Draw('e4')
miniHist[ asymKeys[0] ].GetYaxis().SetRangeUser(-0.04, 0.08)
miniHist[ asymKeys[0] ].GetXaxis().SetRangeUser(1.25, 11.0)
miniHist[ asymKeys[0] ].SetXTitle('p_{T}')
miniHist[ asymKeys[1] ].SetLineColor( ROOT.kRed )
miniHist[ asymKeys[1] ].SetMarkerColor( ROOT.kRed )
miniHist[ asymKeys[1] ].SetFillColor( ROOT.kRed )
miniHist[ asymKeys[1] ].Draw('e4 same')
miniHist[ asymKeys[2] ].SetLineColor( ROOT.kGreen )
miniHist[ asymKeys[2] ].SetMarkerColor( ROOT.kGreen )
miniHist[ asymKeys[2] ].SetFillColor( ROOT.kGreen )
miniHist[ asymKeys[2] ].Draw('e4 same')
miniHist[ asymKeys[3] ].SetLineColor( ROOT.kBlue )
miniHist[ asymKeys[3] ].SetMarkerColor( ROOT.kBlue )
miniHist[ asymKeys[3] ].SetFillColor( ROOT.kBlue )
miniHist[ asymKeys[3] ].Draw('e4 same')
# now draw theory curves
if subprocess == '':
import asym, xsec
#theory = asym.theoryCurves()
#curves = [ theory.getGraph(charge, 'std'),
# theory.getGraph(charge, 'max'),
# theory.getGraph(charge, 'zero'),
# theory.getGraph(charge, 'min')
#]
if charge == 'plus':
curves = [
asym.theoryCurves( asym.werner_plus_dss_cteqm5_std, xsec.werner_plus_dss_cteqm5_pt ).getGraph(),
asym.theoryCurves( asym.werner_plus_dss_cteqm5_max, xsec.werner_plus_dss_cteqm5_pt ).getGraph(),
asym.theoryCurves( asym.werner_plus_dss_cteqm5_zero,xsec.werner_plus_dss_cteqm5_pt ).getGraph(),
asym.theoryCurves( asym.werner_plus_dss_cteqm5_min, xsec.werner_plus_dss_cteqm5_pt ).getGraph()
]
if charge == 'minus':
curves = [
asym.theoryCurves( asym.werner_minus_dss_cteqm5_std, xsec.werner_minus_dss_cteqm5_pt ).getGraph(),
asym.theoryCurves( asym.werner_minus_dss_cteqm5_max, xsec.werner_minus_dss_cteqm5_pt ).getGraph(),
asym.theoryCurves( asym.werner_minus_dss_cteqm5_zero,xsec.werner_minus_dss_cteqm5_pt ).getGraph(),
asym.theoryCurves( asym.werner_minus_dss_cteqm5_min, xsec.werner_minus_dss_cteqm5_pt ).getGraph()
]
curves[1].SetLineColor(ROOT.kRed)
curves[2].SetLineColor(ROOT.kBlue)
curves[3].SetLineColor(ROOT.kGreen)
[ c.Draw('c same') for c in curves ]
raw_input('what do you think?')
name = 'raw_asymmetries_' + charge
if subprocess != '': name = name + '_' + subprocess
#c1.Print(name + '.gif')
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def main(argv=None):
if argv is None:
argv = sys.argv
try:
try:
opts, args = getopt.getopt(argv[1:], 'h', ["help", 'xgrid', 'charge=', 'trig', 'fill=', 'combine'])
except getopt.error, msg:
raise Usage(msg)
#global loadLibs
#loadLibs = localLibs
global site
charge = 0
# option processing
for option, value in opts:
#if option == '--xgrid':
# loadLibs = xgridLibs
# site = 'Xgrid'
if option == '--charge':
charge = int(value)
if option in ("-h", "--help"): raise Usage(help_message)
if option == '--trig': makeTriggerNtuple2(args)
if option == '--fill':
#loadLibs()
#fill2(value,None,True,os.environ['FILELIST'])
fill3(value)
if option == '--combine':
#loadLibs()
ROOT.gSystem.Load('StJetMaker')
ROOT.gSystem.Load('StMiniMcEvent')
combine(args[0],args[1:],True)
except Usage, err:
print >> sys.stderr, sys.argv[0].split("/")[-1] + ": " + str(err.msg)
print >> sys.stderr, "\t for help use --help"
return 2
if __name__ == "__main__":
#test()
sys.exit(main())
|
[] |
[] |
[
"FILELIST"
] |
[]
|
["FILELIST"]
|
python
| 1 | 0 | |
registry/registry.go
|
package registry
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
prometheus "github.com/docker/distribution/metrics"
"io/ioutil"
"net/http"
"os"
"os/signal"
"syscall"
"time"
"rsc.io/letsencrypt"
"github.com/Shopify/logrus-bugsnag"
logstash "github.com/bshuster-repo/logrus-logstash-hook"
"github.com/bugsnag/bugsnag-go"
"github.com/docker/distribution/configuration"
dcontext "github.com/docker/distribution/context"
"github.com/docker/distribution/health"
"github.com/docker/distribution/registry/handlers"
"github.com/docker/distribution/registry/listener"
"github.com/docker/distribution/uuid"
"github.com/docker/distribution/version"
"github.com/docker/go-metrics"
gorhandlers "github.com/gorilla/handlers"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/yvasiyarov/gorelic"
)
// this channel gets notified when process receives signal. It is global to ease unit testing
var quit = make(chan os.Signal, 1)
// ServeCmd is a cobra command for running the registry.
var ServeCmd = &cobra.Command{
Use: "serve <config>",
Short: "`serve` stores and distributes Docker images",
Long: "`serve` stores and distributes Docker images.",
Run: func(cmd *cobra.Command, args []string) {
// setup context
ctx := dcontext.WithVersion(dcontext.Background(), version.Version)
config, err := resolveConfiguration(args)
if err != nil {
fmt.Fprintf(os.Stderr, "configuration error: %v\n", err)
cmd.Usage()
os.Exit(1)
}
if config.HTTP.Debug.Addr != "" {
go func(addr string) {
log.Infof("debug server listening %v", addr)
if err := http.ListenAndServe(addr, nil); err != nil {
log.Fatalf("error listening on debug interface: %v", err)
}
}(config.HTTP.Debug.Addr)
}
registry, err := NewRegistry(ctx, config)
if err != nil {
log.Fatalln(err)
}
if config.HTTP.Debug.Prometheus.Enabled {
path := config.HTTP.Debug.Prometheus.Path
if path == "" {
path = "/metrics"
}
log.Info("providing prometheus metrics on ", path)
http.Handle(path, metrics.Handler())
}
if err = registry.ListenAndServe(); err != nil {
log.Fatalln(err)
}
},
}
// A Registry represents a complete instance of the registry.
// TODO(aaronl): It might make sense for Registry to become an interface.
type Registry struct {
config *configuration.Configuration
app *handlers.App
server *http.Server
}
// NewRegistry creates a new registry from a context and configuration struct.
func NewRegistry(ctx context.Context, config *configuration.Configuration) (*Registry, error) {
var err error
ctx, err = configureLogging(ctx, config)
if err != nil {
return nil, fmt.Errorf("error configuring logger: %v", err)
}
configureBugsnag(config)
// inject a logger into the uuid library. warns us if there is a problem
// with uuid generation under low entropy.
uuid.Loggerf = dcontext.GetLogger(ctx).Warnf
app := handlers.NewApp(ctx, config)
// TODO(aaronl): The global scope of the health checks means NewRegistry
// can only be called once per process.
app.RegisterHealthChecks()
handler := configureReporting(app)
handler = alive("/", handler)
handler = health.Handler(handler)
handler = panicHandler(handler)
if !config.Log.AccessLog.Disabled {
handler = gorhandlers.CombinedLoggingHandler(os.Stdout, handler)
}
// expose build info through Prometheus (`registry_build_info` gauge)
if app.Config.HTTP.Debug.Prometheus.Enabled {
ns := metrics.NewNamespace(prometheus.NamespacePrefix, "", nil)
registryInfo := ns.NewLabeledGauge(
"build",
"Information about the registry.", metrics.Unit("info"),
"version", "revision", "package",
)
metrics.Register(ns)
registryInfo.WithValues(version.Version, version.Revision, version.Package).Set(1)
}
server := &http.Server{
Handler: handler,
}
return &Registry{
app: app,
config: config,
server: server,
}, nil
}
// ListenAndServe runs the registry's HTTP server.
func (registry *Registry) ListenAndServe() error {
config := registry.config
ln, err := listener.NewListener(config.HTTP.Net, config.HTTP.Addr)
if err != nil {
return err
}
if config.HTTP.TLS.Certificate != "" || config.HTTP.TLS.LetsEncrypt.CacheFile != "" {
tlsConf := &tls.Config{
ClientAuth: tls.NoClientCert,
NextProtos: nextProtos(config),
MinVersion: tls.VersionTLS10,
PreferServerCipherSuites: true,
CipherSuites: []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
},
}
if config.HTTP.TLS.LetsEncrypt.CacheFile != "" {
if config.HTTP.TLS.Certificate != "" {
return fmt.Errorf("cannot specify both certificate and Let's Encrypt")
}
var m letsencrypt.Manager
if err := m.CacheFile(config.HTTP.TLS.LetsEncrypt.CacheFile); err != nil {
return err
}
if !m.Registered() {
if err := m.Register(config.HTTP.TLS.LetsEncrypt.Email, nil); err != nil {
return err
}
}
if len(config.HTTP.TLS.LetsEncrypt.Hosts) > 0 {
m.SetHosts(config.HTTP.TLS.LetsEncrypt.Hosts)
}
tlsConf.GetCertificate = m.GetCertificate
} else {
tlsConf.Certificates = make([]tls.Certificate, 1)
tlsConf.Certificates[0], err = tls.LoadX509KeyPair(config.HTTP.TLS.Certificate, config.HTTP.TLS.Key)
if err != nil {
return err
}
}
if len(config.HTTP.TLS.ClientCAs) != 0 {
pool := x509.NewCertPool()
for _, ca := range config.HTTP.TLS.ClientCAs {
caPem, err := ioutil.ReadFile(ca)
if err != nil {
return err
}
if ok := pool.AppendCertsFromPEM(caPem); !ok {
return fmt.Errorf("could not add CA to pool")
}
}
for _, subj := range pool.Subjects() {
dcontext.GetLogger(registry.app).Debugf("CA Subject: %s", string(subj))
}
tlsConf.ClientAuth = tls.RequireAndVerifyClientCert
tlsConf.ClientCAs = pool
}
ln = tls.NewListener(ln, tlsConf)
dcontext.GetLogger(registry.app).Infof("listening on %v, tls", ln.Addr())
} else {
dcontext.GetLogger(registry.app).Infof("listening on %v", ln.Addr())
}
if config.HTTP.DrainTimeout == 0 {
return registry.server.Serve(ln)
}
// setup channel to get notified on SIGTERM signal
signal.Notify(quit, syscall.SIGTERM)
serveErr := make(chan error)
// Start serving in goroutine and listen for stop signal in main thread
go func() {
serveErr <- registry.server.Serve(ln)
}()
select {
case err := <-serveErr:
return err
case <-quit:
dcontext.GetLogger(registry.app).Info("stopping server gracefully. Draining connections for ", config.HTTP.DrainTimeout)
// shutdown the server with a grace period of configured timeout
c, cancel := context.WithTimeout(context.Background(), config.HTTP.DrainTimeout)
defer cancel()
return registry.server.Shutdown(c)
}
}
func configureReporting(app *handlers.App) http.Handler {
var handler http.Handler = app
if app.Config.Reporting.Bugsnag.APIKey != "" {
handler = bugsnag.Handler(handler)
}
if app.Config.Reporting.NewRelic.LicenseKey != "" {
agent := gorelic.NewAgent()
agent.NewrelicLicense = app.Config.Reporting.NewRelic.LicenseKey
if app.Config.Reporting.NewRelic.Name != "" {
agent.NewrelicName = app.Config.Reporting.NewRelic.Name
}
agent.CollectHTTPStat = true
agent.Verbose = app.Config.Reporting.NewRelic.Verbose
agent.Run()
handler = agent.WrapHTTPHandler(handler)
}
return handler
}
// configureLogging prepares the context with a logger using the
// configuration.
func configureLogging(ctx context.Context, config *configuration.Configuration) (context.Context, error) {
log.SetLevel(logLevel(config.Log.Level))
formatter := config.Log.Formatter
if formatter == "" {
formatter = "text" // default formatter
}
switch formatter {
case "json":
log.SetFormatter(&log.JSONFormatter{
TimestampFormat: time.RFC3339Nano,
})
case "text":
log.SetFormatter(&log.TextFormatter{
TimestampFormat: time.RFC3339Nano,
})
case "logstash":
log.SetFormatter(&logstash.LogstashFormatter{
TimestampFormat: time.RFC3339Nano,
})
default:
// just let the library use default on empty string.
if config.Log.Formatter != "" {
return ctx, fmt.Errorf("unsupported logging formatter: %q", config.Log.Formatter)
}
}
if config.Log.Formatter != "" {
log.Debugf("using %q logging formatter", config.Log.Formatter)
}
if len(config.Log.Fields) > 0 {
// build up the static fields, if present.
var fields []interface{}
for k := range config.Log.Fields {
fields = append(fields, k)
}
ctx = dcontext.WithValues(ctx, config.Log.Fields)
ctx = dcontext.WithLogger(ctx, dcontext.GetLogger(ctx, fields...))
}
return ctx, nil
}
func logLevel(level configuration.Loglevel) log.Level {
l, err := log.ParseLevel(string(level))
if err != nil {
l = log.InfoLevel
log.Warnf("error parsing level %q: %v, using %q ", level, err, l)
}
return l
}
// configureBugsnag configures bugsnag reporting, if enabled
func configureBugsnag(config *configuration.Configuration) {
if config.Reporting.Bugsnag.APIKey == "" {
return
}
bugsnagConfig := bugsnag.Configuration{
APIKey: config.Reporting.Bugsnag.APIKey,
}
if config.Reporting.Bugsnag.ReleaseStage != "" {
bugsnagConfig.ReleaseStage = config.Reporting.Bugsnag.ReleaseStage
}
if config.Reporting.Bugsnag.Endpoint != "" {
bugsnagConfig.Endpoint = config.Reporting.Bugsnag.Endpoint
}
bugsnag.Configure(bugsnagConfig)
// configure logrus bugsnag hook
hook, err := logrus_bugsnag.NewBugsnagHook()
if err != nil {
log.Fatalln(err)
}
log.AddHook(hook)
}
// panicHandler add an HTTP handler to web app. The handler recover the happening
// panic. logrus.Panic transmits panic message to pre-config log hooks, which is
// defined in config.yml.
func panicHandler(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer func() {
if err := recover(); err != nil {
log.Panic(fmt.Sprintf("%v", err))
}
}()
handler.ServeHTTP(w, r)
})
}
// alive simply wraps the handler with a route that always returns an http 200
// response when the path is matched. If the path is not matched, the request
// is passed to the provided handler. There is no guarantee of anything but
// that the server is up. Wrap with other handlers (such as health.Handler)
// for greater affect.
func alive(path string, handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == path {
w.Header().Set("Cache-Control", "no-cache")
w.WriteHeader(http.StatusOK)
return
}
handler.ServeHTTP(w, r)
})
}
func resolveConfiguration(args []string) (*configuration.Configuration, error) {
var configurationPath string
if len(args) > 0 {
configurationPath = args[0]
} else if os.Getenv("REGISTRY_CONFIGURATION_PATH") != "" {
configurationPath = os.Getenv("REGISTRY_CONFIGURATION_PATH")
}
if configurationPath == "" {
return nil, fmt.Errorf("configuration path unspecified")
}
fp, err := os.Open(configurationPath)
if err != nil {
return nil, err
}
defer fp.Close()
config, err := configuration.Parse(fp)
if err != nil {
return nil, fmt.Errorf("error parsing %s: %v", configurationPath, err)
}
return config, nil
}
func nextProtos(config *configuration.Configuration) []string {
switch config.HTTP.HTTP2.Disabled {
case true:
return []string{"http/1.1"}
default:
return []string{"h2", "http/1.1"}
}
}
|
[
"\"REGISTRY_CONFIGURATION_PATH\"",
"\"REGISTRY_CONFIGURATION_PATH\""
] |
[] |
[
"REGISTRY_CONFIGURATION_PATH"
] |
[]
|
["REGISTRY_CONFIGURATION_PATH"]
|
go
| 1 | 0 | |
internal/test/test.go
|
// Copyright 2021 Zenauth Ltd.
// SPDX-License-Identifier: Apache-2.0
// +build tests
package test
import (
"fmt"
"io"
"io/fs"
"os"
"path/filepath"
"runtime"
"sort"
"strings"
"testing"
"github.com/stretchr/testify/require"
policyv1 "github.com/cerbos/cerbos/api/genpb/cerbos/policy/v1"
"github.com/cerbos/cerbos/internal/observability/logging"
"github.com/cerbos/cerbos/internal/policy"
)
func init() {
if logLevel := os.Getenv("CERBOS_TEST_LOG_LEVEL"); logLevel != "" {
logging.InitLogging(logLevel)
}
}
func LoadPolicy(t *testing.T, path string) *policyv1.Policy {
t.Helper()
inp := mkReadCloser(t, path)
defer inp.Close()
p, err := policy.ReadPolicy(inp)
require.NoError(t, err, "Failed to load %s", path)
return p
}
func mkReadCloser(t *testing.T, file string) io.ReadCloser {
t.Helper()
f, err := os.Open(file)
require.NoError(t, err, "Failed to open file %s", file)
return f
}
func PathToDir(tb testing.TB, dir string) string {
tb.Helper()
_, currFile, _, ok := runtime.Caller(0)
if !ok {
tb.Error("Failed to detect testdata directory")
return ""
}
return filepath.Join(filepath.Dir(currFile), "testdata", dir)
}
type Case struct {
Name string
Input []byte
Want map[string][]byte
}
// LoadTestCases loads groups of test files from the given path.
// Consider a directory containing the following set of files:
// |- test01.yaml
// |- test01.yaml.err
// |- test01.yaml.out
//
// The above files will be converted to a Case object as follows:
// Case {
// Name: "test01",
// Input: <contents_of_test01.yaml>,
// Want: map[string][]byte{
// "err": <contents_of_test01.yaml.err>,
// "out": <contents_of_test01.yaml.out>,
// }
// }.
func LoadTestCases(tb testing.TB, subDir string) []Case {
tb.Helper()
dir := PathToDir(tb, subDir)
var entries []string
err := filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if d.IsDir() {
return nil
}
if filepath.Ext(path) == ".yaml" {
entries = append(entries, path)
}
return nil
})
require.NoError(tb, err)
testCases := make([]Case, len(entries))
for i, entry := range entries {
name, err := filepath.Rel(dir, strings.TrimSuffix(entry, filepath.Ext(entry)))
require.NoError(tb, err)
testCases[i] = Case{
Name: name,
Input: readFileContents(tb, entry),
}
wantedFiles, err := filepath.Glob(fmt.Sprintf("%s.*", entry))
require.NoError(tb, err)
testCases[i].Want = make(map[string][]byte, len(wantedFiles))
for _, wanted := range wantedFiles {
key := strings.TrimPrefix(filepath.Ext(wanted), ".")
testCases[i].Want[key] = readFileContents(tb, wanted)
}
}
sort.SliceStable(testCases, func(i, j int) bool {
return testCases[i].Name < testCases[j].Name
})
return testCases
}
func readFileContents(tb testing.TB, filePath string) []byte {
tb.Helper()
if _, err := os.Stat(filePath); err == nil {
b, err := os.ReadFile(filePath)
if err != nil {
tb.Errorf("Failed to read %s: %w", filePath, err)
return nil
}
return b
}
return nil
}
func SkipIfGHActions(t *testing.T) {
t.Helper()
if isGH, ok := os.LookupEnv("GITHUB_ACTIONS"); ok && isGH == "true" {
t.Skipf("Skipping because of known issue with GitHub Actions")
}
}
|
[
"\"CERBOS_TEST_LOG_LEVEL\""
] |
[] |
[
"CERBOS_TEST_LOG_LEVEL"
] |
[]
|
["CERBOS_TEST_LOG_LEVEL"]
|
go
| 1 | 0 | |
pkg/kubectl/cmd/util/factory_builder.go
|
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// this file contains factories with no other dependencies
package util
import (
"fmt"
"io"
"os"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/kubectl/plugins"
"k8s.io/kubernetes/pkg/kubectl/resource"
"k8s.io/kubernetes/pkg/printers"
)
type ring2Factory struct {
clientAccessFactory ClientAccessFactory
objectMappingFactory ObjectMappingFactory
}
func NewBuilderFactory(clientAccessFactory ClientAccessFactory, objectMappingFactory ObjectMappingFactory) BuilderFactory {
f := &ring2Factory{
clientAccessFactory: clientAccessFactory,
objectMappingFactory: objectMappingFactory,
}
return f
}
func (f *ring2Factory) PrinterForCommand(cmd *cobra.Command, isLocal bool, outputOpts *printers.OutputOptions, options printers.PrintOptions) (printers.ResourcePrinter, error) {
var mapper meta.RESTMapper
var typer runtime.ObjectTyper
mapper, typer = f.objectMappingFactory.Object()
// TODO: used by the custom column implementation and the name implementation, break this dependency
decoders := []runtime.Decoder{f.clientAccessFactory.Decoder(true), unstructured.UnstructuredJSONScheme}
encoder := f.clientAccessFactory.JSONEncoder()
return PrinterForCommand(cmd, outputOpts, mapper, typer, encoder, decoders, options)
}
func (f *ring2Factory) PrinterForMapping(cmd *cobra.Command, isLocal bool, outputOpts *printers.OutputOptions, mapping *meta.RESTMapping, withNamespace bool) (printers.ResourcePrinter, error) {
// Some callers do not have "label-columns" so we can't use the GetFlagStringSlice() helper
columnLabel, err := cmd.Flags().GetStringSlice("label-columns")
if err != nil {
columnLabel = []string{}
}
options := printers.PrintOptions{
NoHeaders: GetFlagBool(cmd, "no-headers"),
WithNamespace: withNamespace,
Wide: GetWideFlag(cmd),
ShowAll: GetFlagBool(cmd, "show-all"),
ShowLabels: GetFlagBool(cmd, "show-labels"),
AbsoluteTimestamps: isWatch(cmd),
ColumnLabels: columnLabel,
}
printer, err := f.PrinterForCommand(cmd, isLocal, outputOpts, options)
if err != nil {
return nil, err
}
// Make sure we output versioned data for generic printers
if printer.IsGeneric() {
if mapping == nil {
return nil, fmt.Errorf("no serialization format found")
}
version := mapping.GroupVersionKind.GroupVersion()
if version.Empty() {
return nil, fmt.Errorf("no serialization format found")
}
printer = printers.NewVersionedPrinter(printer, mapping.ObjectConvertor, version, mapping.GroupVersionKind.GroupVersion())
}
return printer, nil
}
func (f *ring2Factory) PrintSuccess(mapper meta.RESTMapper, shortOutput bool, out io.Writer, resource, name string, dryRun bool, operation string) {
resource, _ = mapper.ResourceSingularizer(resource)
dryRunMsg := ""
if dryRun {
dryRunMsg = " (dry run)"
}
if shortOutput {
// -o name: prints resource/name
if len(resource) > 0 {
fmt.Fprintf(out, "%s/%s\n", resource, name)
} else {
fmt.Fprintf(out, "%s\n", name)
}
} else {
// understandable output by default
if len(resource) > 0 {
fmt.Fprintf(out, "%s \"%s\" %s%s\n", resource, name, operation, dryRunMsg)
} else {
fmt.Fprintf(out, "\"%s\" %s%s\n", name, operation, dryRunMsg)
}
}
}
func (f *ring2Factory) PrintObject(cmd *cobra.Command, isLocal bool, mapper meta.RESTMapper, obj runtime.Object, out io.Writer) error {
// try to get a typed object
_, typer := f.objectMappingFactory.Object()
gvks, _, err := typer.ObjectKinds(obj)
if err != nil {
return err
}
// Prefer the existing external version if specified
var preferredVersion []string
if gvks[0].Version != "" && gvks[0].Version != runtime.APIVersionInternal {
preferredVersion = []string{gvks[0].Version}
}
mapping, err := mapper.RESTMapping(gvks[0].GroupKind(), preferredVersion...)
if err != nil {
return err
}
printer, err := f.PrinterForMapping(cmd, isLocal, nil, mapping, false)
if err != nil {
return err
}
return printer.PrintObj(obj, out)
}
func (f *ring2Factory) PrintResourceInfoForCommand(cmd *cobra.Command, info *resource.Info, out io.Writer) error {
printer, err := f.PrinterForCommand(cmd, false, nil, printers.PrintOptions{})
if err != nil {
return err
}
if !printer.IsGeneric() {
printer, err = f.PrinterForMapping(cmd, false, nil, nil, false)
if err != nil {
return err
}
}
return printer.PrintObj(info.Object, out)
}
// NewBuilder returns a new resource builder for structured api objects.
func (f *ring2Factory) NewBuilder() *resource.Builder {
clientMapperFunc := resource.ClientMapperFunc(f.objectMappingFactory.ClientForMapping)
mapper, typer := f.objectMappingFactory.Object()
unstructuredClientMapperFunc := resource.ClientMapperFunc(f.objectMappingFactory.UnstructuredClientForMapping)
categoryExpander := f.objectMappingFactory.CategoryExpander()
return resource.NewBuilder(
&resource.Mapper{
RESTMapper: mapper,
ObjectTyper: typer,
ClientMapper: clientMapperFunc,
Decoder: f.clientAccessFactory.Decoder(true),
},
&resource.Mapper{
RESTMapper: mapper,
ObjectTyper: typer,
ClientMapper: unstructuredClientMapperFunc,
Decoder: unstructured.UnstructuredJSONScheme,
},
categoryExpander,
)
}
// PluginLoader loads plugins from a path set by the KUBECTL_PLUGINS_PATH env var.
// If this env var is not set, it defaults to
// "~/.kube/plugins", plus
// "./kubectl/plugins" directory under the "data dir" directory specified by the XDG
// system directory structure spec for the given platform.
func (f *ring2Factory) PluginLoader() plugins.PluginLoader {
if len(os.Getenv("KUBECTL_PLUGINS_PATH")) > 0 {
return plugins.KubectlPluginsPathPluginLoader()
}
return plugins.TolerantMultiPluginLoader{
plugins.XDGDataDirsPluginLoader(),
plugins.UserDirPluginLoader(),
}
}
func (f *ring2Factory) PluginRunner() plugins.PluginRunner {
return &plugins.ExecPluginRunner{}
}
|
[
"\"KUBECTL_PLUGINS_PATH\""
] |
[] |
[
"KUBECTL_PLUGINS_PATH"
] |
[]
|
["KUBECTL_PLUGINS_PATH"]
|
go
| 1 | 0 | |
test/bedrock_Azure_single_cosmos_mongo_test.go
|
package test
import (
"fmt"
"github.com/gruntwork-io/terratest/modules/k8s"
"github.com/gruntwork-io/terratest/modules/random"
"github.com/gruntwork-io/terratest/modules/terraform"
"github.com/otiai10/copy"
"log"
"os"
"os/exec"
"strings"
"testing"
)
func TestIT_Bedrock_Azure_Single_KV_Cosmos_Mongo_DB_Test(t *testing.T) {
t.Parallel()
//Generate common-infra resources for integration use
uniqueID := strings.ToLower(random.UniqueId())
k8sName := fmt.Sprintf("gtestk8s-%s", uniqueID)
addressSpace := "10.39.0.0/16"
kvName := k8sName + "-kv"
kvRG := kvName + "-rg"
k8sVersion := "1.15.7"
location := os.Getenv("DATACENTER_LOCATION")
tenantid := os.Getenv("ARM_TENANT_ID")
clientid := os.Getenv("ARM_CLIENT_ID")
clientsecret := os.Getenv("ARM_CLIENT_SECRET")
subnetName := k8sName + "-subnet"
vnetName := k8sName + "-vnet"
//Generate common-infra backend for tf.state files to be persisted in azure storage account
backendName := os.Getenv("ARM_BACKEND_STORAGE_NAME")
backendKey := os.Getenv("ARM_BACKEND_STORAGE_KEY")
backendContainer := os.Getenv("ARM_BACKEND_STORAGE_CONTAINER")
backendTfstatekey := k8sName + "-tfstatekey"
//Copy env directories as needed to avoid conflicting with other running tests
azureCommonInfraFolder := "../cluster/test-temp-envs/azure-common-infra-" + k8sName
copy.Copy("../cluster/environments/azure-common-infra", azureCommonInfraFolder)
//Create the resource group
cmd0 := exec.Command("az", "login", "--service-principal", "-u", clientid, "-p", clientsecret, "--tenant", tenantid)
err0 := cmd0.Run()
if err0 != nil {
fmt.Println("unable to login to azure cli")
log.Fatal(err0)
os.Exit(-1)
}
cmd1 := exec.Command("az", "group", "create", "-n", kvRG, "-l", location)
err1 := cmd1.Run()
if err1 != nil {
fmt.Println("failed to create common resource group")
log.Fatal(err1)
os.Exit(-1)
}
//Specify the test case folder and "-var" option mapping for the backend
common_backend_tfOptions := &terraform.Options{
TerraformDir: azureCommonInfraFolder,
BackendConfig: map[string]interface{}{
"storage_account_name": backendName,
"access_key": backendKey,
"container_name": backendContainer,
"key": "common_" + backendTfstatekey,
},
}
//Specify the test case folder and "-var" option mapping
common_tfOptions := &terraform.Options{
TerraformDir: azureCommonInfraFolder,
Upgrade: true,
Vars: map[string]interface{}{
"address_space": addressSpace,
"keyvault_name": kvName,
"global_resource_group_name": kvRG,
"service_principal_id": clientid,
"subnet_name": subnetName,
"subnet_prefix": addressSpace,
"vnet_name": vnetName,
},
}
//Terraform init, apply, output, and defer destroy for common-infra bedrock environment
defer terraform.Destroy(t, common_tfOptions)
terraform.Init(t, common_backend_tfOptions)
terraform.Apply(t, common_tfOptions)
// Generate azure single environment using resources generated from common-infra
dnsprefix := k8sName + "-dns"
k8sRG := k8sName + "-rg"
publickey := os.Getenv("public_key")
sshkey := os.Getenv("ssh_key")
cosmos_db_name := k8sName + "-cosmosdb"
mongo_db_name := k8sName + "-mongodb"
//Copy env directories as needed to avoid conflicting with other running tests
azureSingleKeyvaultFolder := "../cluster/test-temp-envs/azure-single-keyvault-cosmos-mongo-db-simple-" + k8sName
copy.Copy("../cluster/environments/azure-single-keyvault-cosmos-mongo-db-simple", azureSingleKeyvaultFolder)
//Create the cluster resource group
cmd2 := exec.Command("az", "group", "create", "-n", k8sRG, "-l", location)
err2 := cmd2.Run()
if err2 != nil {
fmt.Println("failed to create cluster resource group")
log.Fatal(err2)
os.Exit(-1)
}
//Specify the test case folder and "-var" option mapping for the environment backend
k8s_backend_tfOptions := &terraform.Options{
TerraformDir: azureSingleKeyvaultFolder,
BackendConfig: map[string]interface{}{
"storage_account_name": backendName,
"access_key": backendKey,
"container_name": backendContainer,
"key": backendTfstatekey,
},
}
// Specify the test case folder and "-var" options
k8s_tfOptions := &terraform.Options{
TerraformDir: azureSingleKeyvaultFolder,
Upgrade: true,
Vars: map[string]interface{}{
"address_space": addressSpace,
"agent_vm_count": "3",
"agent_vm_size": "Standard_D2s_v3",
"cluster_name": k8sName,
"dns_prefix": dnsprefix,
"gitops_ssh_url": "[email protected]:timfpark/fabrikate-cloud-native-manifests.git",
"gitops_ssh_key": sshkey,
"keyvault_name": kvName,
"keyvault_resource_group": kvRG,
"kubernetes_version": k8sVersion,
"resource_group_name": k8sRG,
"resource_group_location": location,
"ssh_public_key": publickey,
"service_principal_id": clientid,
"service_principal_secret": clientsecret,
"subnet_prefixes": "10.39.0.0/16",
"subnet_name": subnetName,
"vnet_name": vnetName,
"cosmos_db_name": cosmos_db_name,
"mongo_db_name": mongo_db_name,
},
}
//Terraform init, apply, output, and defer destroy on azure-single-keyvault-cosmos-mongo-db-simple bedrock environment
defer terraform.Destroy(t, k8s_tfOptions)
terraform.Init(t, k8s_backend_tfOptions)
terraform.Apply(t, k8s_tfOptions)
//Obtain Kube_config file from module output
os.Setenv("KUBECONFIG", azureSingleKeyvaultFolder+"/output/bedrock_kube_config")
kubeConfig := os.Getenv("KUBECONFIG")
options := k8s.NewKubectlOptions("", kubeConfig)
//Test Case 1: Verify Flux namespace
fmt.Println("Test case 1: Verifying flux namespace")
_flux, fluxErr := k8s.RunKubectlAndGetOutputE(t, options, "get", "po", "--namespace=flux")
if fluxErr != nil || !strings.Contains(_flux, "flux") {
t.Fatal(fluxErr)
} else {
fmt.Println("Flux verification complete")
}
//Test Case 2: Verify keyvault namespace flex
fmt.Println("Test case 2: Verifying flexvolume and kv namespace")
_flex, flexErr := k8s.RunKubectlAndGetOutputE(t, options, "get", "po", "--namespace=kv")
if flexErr != nil || !strings.Contains(_flex, "keyvault-flexvolume") {
t.Fatal(flexErr)
} else {
fmt.Println("Flexvolume verification complete")
}
//Test Case 3: Verify Cosmos/MongoDB
fmt.Println("Test case 3: Verifying Cosmos/MongoDB deployment")
cosmos_db_key := terraform.Output(t, k8s_tfOptions, "azure_cosmos_db_primary_master_key")
cmd3 := exec.Command("az", "cosmosdb", "database", "exists", "--name", cosmos_db_name, "--key", cosmos_db_key, "--db-name", mongo_db_name)
out, cosmosMongoErr := cmd3.CombinedOutput()
if cosmosMongoErr != nil {
t.Fatal(cosmosMongoErr)
} else if !strings.Contains(string(out), "true") {
t.Fatal(cosmosMongoErr)
} else {
fmt.Println("CosmosDB with MongoDB verification complete.")
}
}
|
[
"\"DATACENTER_LOCATION\"",
"\"ARM_TENANT_ID\"",
"\"ARM_CLIENT_ID\"",
"\"ARM_CLIENT_SECRET\"",
"\"ARM_BACKEND_STORAGE_NAME\"",
"\"ARM_BACKEND_STORAGE_KEY\"",
"\"ARM_BACKEND_STORAGE_CONTAINER\"",
"\"public_key\"",
"\"ssh_key\"",
"\"KUBECONFIG\""
] |
[] |
[
"DATACENTER_LOCATION",
"ARM_CLIENT_SECRET",
"public_key",
"ARM_TENANT_ID",
"ssh_key",
"KUBECONFIG",
"ARM_BACKEND_STORAGE_CONTAINER",
"ARM_BACKEND_STORAGE_NAME",
"ARM_BACKEND_STORAGE_KEY",
"ARM_CLIENT_ID"
] |
[]
|
["DATACENTER_LOCATION", "ARM_CLIENT_SECRET", "public_key", "ARM_TENANT_ID", "ssh_key", "KUBECONFIG", "ARM_BACKEND_STORAGE_CONTAINER", "ARM_BACKEND_STORAGE_NAME", "ARM_BACKEND_STORAGE_KEY", "ARM_CLIENT_ID"]
|
go
| 10 | 0 | |
run_scheduler.py
|
#!/usr/bin/env python
import argparse
import urlparse
import sys
import os
from redis import Redis
from rq_scheduler.scheduler import Scheduler
from recruit_app.app import create_app
from recruit_app.settings import DevConfig, ProdConfig
from rq.logutils import setup_loghandlers
def main():
parser = argparse.ArgumentParser(description='Runs RQ scheduler')
parser.add_argument('-H', '--host',
default=os.environ.get('RQ_REDIS_HOST', 'localhost'),
help="Redis host")
parser.add_argument('-p', '--port',
default=int(os.environ.get('RQ_REDIS_PORT', 6379)),
type=int,
help="Redis port number")
parser.add_argument('-d', '--db',
default=int(os.environ.get('RQ_REDIS_DB', 0)),
type=int, help="Redis database")
parser.add_argument('-P', '--password',
default=os.environ.get('RQ_REDIS_PASSWORD'),
help="Redis password")
parser.add_argument('--verbose', '-v',
action='store_true',
default=False,
help='Show more output')
parser.add_argument('--url', '-u',
default=os.environ.get('RQ_REDIS_URL'),
help='URL describing Redis connection details. \
Overrides other connection arguments if supplied.')
parser.add_argument('-i', '--interval',
default=60.0,
type=float,
help="How often the scheduler checks for new jobs to add to the \
queue (in seconds, can be floating-point for more precision).")
parser.add_argument('--path',
default='.',
help='Specify the import path.')
parser.add_argument('--pid',
help='A filename to use for the PID file.',
metavar='FILE')
args = parser.parse_args()
if args.path:
sys.path = args.path.split(':') + sys.path
if args.pid:
pid = str(os.getpid())
filename = args.pid
with open(filename, 'w') as f:
f.write(pid)
if args.url is not None:
connection = Redis.from_url(args.url)
elif os.getenv('REDISTOGO_URL'):
redis_url = os.getenv('REDISTOGO_URL')
if not redis_url:
raise RuntimeError('Set up Redis To Go first.')
urlparse.uses_netloc.append('redis')
url = urlparse.urlparse(redis_url)
connection = Redis(host=url.hostname, port=url.port, db=0, password=url.password)
elif args.host is not None:
connection = Redis(args.host, args.port, args.db, args.password)
else:
connection = Redis()
if args.verbose:
level = 'DEBUG'
else:
level = 'INFO'
setup_loghandlers(level)
scheduler = Scheduler(connection=connection, interval=args.interval)
scheduler.run()
if __name__ == '__main__':
if os.environ.get("RECRUIT_APP_ENV") == 'prod':
app = create_app(ProdConfig)
else:
app = create_app(DevConfig)
with app.app_context():
main()
|
[] |
[] |
[
"RQ_REDIS_HOST",
"REDISTOGO_URL",
"RQ_REDIS_URL",
"RECRUIT_APP_ENV",
"RQ_REDIS_DB",
"RQ_REDIS_PORT",
"RQ_REDIS_PASSWORD"
] |
[]
|
["RQ_REDIS_HOST", "REDISTOGO_URL", "RQ_REDIS_URL", "RECRUIT_APP_ENV", "RQ_REDIS_DB", "RQ_REDIS_PORT", "RQ_REDIS_PASSWORD"]
|
python
| 7 | 0 | |
src/slapdash/utils.py
|
from flask import current_app as server
from functools import wraps
from colour import Color
import ast
import os
import sqlalchemy as sa
from pathlib import Path
def get_url(path):
"""Expands an internal URL to include prefix the app is mounted at"""
return "{}{}".format(server.config['ROUTES_PATHNAME_PREFIX'], path)
def component(func):
"""Decorator to help vanilla functions as pseudo Dash Components"""
@wraps(func)
def function_wrapper(children=None, **kwargs):
# remove className and style args from input kwargs so the component
# function does not have to worry about clobbering them.
className = kwargs.pop('className', None)
style = kwargs.pop('className', None)
# call the component function and get the result
result = func(children=children, **kwargs)
# now restore the initial classes and styles by adding them
# to any values the component introduced
if className is not None:
if hasattr(result, 'className'):
result.className = '{} {}'.format(
className, result.className
)
else:
result.className = className
if style is not None:
if hasattr(result, 'style'):
result.style = style.update(result.style)
else:
result.style = style
return result
return function_wrapper
def connect_db():
try:
url = os.environ['DBURL']
except:
url = open(Path(__file__).parent / 'redshift_key.txt', 'r').read()
return sa.create_engine(url)
def to_deck_line(df, segment_column,
color_column, color_low, color_high,
width_column, width_low, width_high,
legend_column, legend_title,
color_opacity=1, color_steps=10, filter_greater_than_color_column=5):
if isinstance(color_column, str):
color_true = True
color_max_value = 100 #df[color_column].max()
color_range = list(Color(rgb=color_low).range_to(Color(rgb=color_high), color_steps))
color_range = list(map(lambda hues: list(map(lambda x: x * 255, hues.rgb)), color_range))
elif isinstance(color_column, tuple) and len(color_column) == 3:
color_true = False
else:
raise TypeError('color_column should be a rbg tuple or a column name string')
if isinstance(width_column, str):
width_true = True
width_max_value = df[width_column].max()
elif isinstance(width_column, int):
width_true = False
else:
raise TypeError('width_column should be a int or a column name string')
if isinstance(legend_column, str):
legend_true = True
elif isinstance(legend_column, None):
width_true = False
else:
raise TypeError('width_column should be a int or None')
try:
df[segment_column] = df[segment_column].apply(ast.literal_eval)
except ValueError:
pass
columns = [i for i in [segment_column, color_column, width_column] if isinstance(i, str)]
column_map = dict(zip(columns, [i for i in range(len(columns))]))
records = df[columns].to_dict(orient='list')
records = list(zip(*records.values()))
def to_color_range(value, color_low, color_high, color_opacity=225, color_steps=10):
to_range = lambda x: int(round(x / color_max_value * (color_steps - 1)))
return color_range[to_range(value)] + [color_opacity]
def to_width_range(value):
return round(value / width_max_value * (width_high - width_low) + width_low)
def to_legend_data(value):
if isinstance(value, float):
return round(value)
def inside(record, column_map):
segment = record[column_map[segment_column]]
result = {'sourcePosition': [segment[0]['x'], segment[0]['y']],
'targetPosition': [segment[1]['x'], segment[1]['y']],
'color': to_color_range(record[column_map[color_column]], color_low, color_high, color_steps=color_steps) if color_true else color_column,
'width': to_width_range(record[column_map[width_column]]) if width_true else width_column,
'legend_title': legend_title,
'legend_data': to_legend_data(record[column_map[legend_column]]) if legend_true else legend_column}
return result
return list(map(lambda x: inside(x, column_map),
filter(lambda x: x[column_map[color_column]] > filter_greater_than_color_column, records)))
|
[] |
[] |
[
"DBURL"
] |
[]
|
["DBURL"]
|
python
| 1 | 0 | |
scripts/hunter/twitter/tw_watchhunter.py
|
#!/usr/bin/env python
import sys
import os
import configparser
import subprocess
## Django Setup
import django
import pymysql
pymysql.install_as_MySQLdb()
conffile = os.path.join(os.path.dirname(__file__), "../conf/hunter.conf")
conf = configparser.SafeConfigParser()
conf.read(conffile)
sys.path.append(conf.get('exist', 'syspath'))
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'intelligence.settings')
django.setup()
from apps.twitter_hunter.models import Hunt
## Logger Setup
from logging.handlers import TimedRotatingFileHandler
from logging import getLogger, DEBUG, Formatter
logfilename = os.path.join(os.path.dirname(__file__), 'logs/watchhunter.log')
logger = getLogger()
handler = TimedRotatingFileHandler(
filename=logfilename,
when="D",
interval=1,
backupCount=31,
)
handler.setFormatter(Formatter("%(asctime)s %(name)s %(funcName)s [%(levelname)s]: %(message)s"))
logger.setLevel(DEBUG)
logger.addHandler(handler)
logger.propagate = False
def isRunning(hunt_id):
cmd = "ps aux|grep \"tw_hunter.py " + str(hunt_id) + "$\"|grep -v grep |wc -l"
result = subprocess.check_output(cmd, shell=True)
if int(result.decode('utf-8').strip('\n\n')) > 0:
return True
return False
def startHunter(entry):
hunter_path = os.path.join(os.path.dirname(__file__), "tw_hunter.py")
cmd = "{hunter} {id}".format(
hunter = hunter_path,
id = entry['id'],
)
subprocess.Popen(cmd, shell=True)
logger.info("run %s: ", id)
if __name__ == "__main__":
logger.info("start")
entry_list = list(Hunt.objects.all().values('id', 'enable'))
for entry in entry_list:
if entry['enable'] and not isRunning(entry['id']):
startHunter(entry)
logger.info("done")
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
message-transport/pkg/transport/kafka/kafka_integration_test.go
|
/*
* Copyright 2018-Present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/projectriff/riff/message-transport/pkg/transport/kafka"
"strings"
"os"
"github.com/projectriff/riff/message-transport/pkg/transport"
"github.com/bsm/sarama-cluster"
"github.com/projectriff/riff/message-transport/pkg/message"
"github.com/Shopify/sarama"
"time"
"fmt"
)
var _ = Describe("Kafka Integration", func() {
var (
topic string
producer transport.Producer
consumer transport.Consumer
groupId string
inspector transport.Inspector
testMessage message.Message
)
BeforeEach(func() {
topic = fmt.Sprintf("topic-%d", time.Now().Nanosecond())
testMessage = message.NewMessage([]byte("hello"), message.Headers{"Content-Type": []string{"bag/plastic"}})
brokers := brokers()
Expect(brokers).NotTo(BeEmpty())
var err error
producer, err = kafka.NewProducer(brokers)
Expect(err).NotTo(HaveOccurred())
config := cluster.NewConfig()
// Use "oldest" initial offset in case there is a race between the asynchronous construction of the consumer
// machinery and the producer writing the “new” message.
config.Consumer.Offsets.Initial = sarama.OffsetOldest
// Use a fresh group id so that runs in close succession won't suffer from Kafka broker delays
// due to consumers coming and going in the same group
groupId = fmt.Sprintf("group-%d", time.Now().Nanosecond())
consumer, err = kafka.NewConsumer(brokers, groupId, []string{topic}, config)
Expect(err).NotTo(HaveOccurred())
inspector, err = kafka.NewInspector(brokers)
Expect(err).NotTo(HaveOccurred())
})
It("should be able to send a message to a topic and receive it back", func() {
err := producer.Send(topic, testMessage)
Expect(err).NotTo(HaveOccurred())
// It takes a while for the production to affect the queue length.
Eventually(func() int64{
queueLength, err := inspector.QueueLength(topic, groupId)
Expect(err).NotTo(HaveOccurred())
return queueLength
}, time.Second*10).Should(Equal(int64(1)))
msg, topic, err := consumer.Receive()
Expect(err).NotTo(HaveOccurred())
Expect(msg).To(Equal(testMessage))
Expect(topic).To(Equal(topic))
// It takes a while for the consumption to affect the queue length.
Eventually(func() int64{
queueLength, err := inspector.QueueLength(topic, groupId)
Expect(err).NotTo(HaveOccurred())
return queueLength
}, time.Second*10).Should(Equal(int64(0)))
})
})
func brokers() []string {
return strings.Split(os.Getenv("KAFKA_BROKERS"), ",")
}
|
[
"\"KAFKA_BROKERS\""
] |
[] |
[
"KAFKA_BROKERS"
] |
[]
|
["KAFKA_BROKERS"]
|
go
| 1 | 0 | |
pkg/cli/lscolors/lscolors.go
|
// Package lscolors provides styling of filenames based on file features.
//
// This is a reverse-engineered implementation of the parsing and
// interpretation of the LS_COLORS environmental variable used by GNU
// coreutils.
package lscolors
import (
"os"
"path"
"strings"
"sync"
"github.com/elves/elvish/pkg/util"
)
// Colorist styles filenames based on the features of the file.
type Colorist interface {
// GetStyle returns the style for the named file.
GetStyle(fname string) string
}
type colorist struct {
styleForFeature map[feature]string
styleForExt map[string]string
}
const defaultLsColorString = `rs=:di=01;34:ln=01;36:mh=:pi=40;33:so=01;35:do=01;35:bd=40;33;01:cd=40;33;01:or=40;31;01:su=37;41:sg=30;43:ca=30;41:tw=30;42:ow=34;42:st=37;44:ex=01;32:*.tar=01;31:*.tgz=01;31:*.arc=01;31:*.arj=01;31:*.taz=01;31:*.lha=01;31:*.lz4=01;31:*.lzh=01;31:*.lzma=01;31:*.tlz=01;31:*.txz=01;31:*.tzo=01;31:*.t7z=01;31:*.zip=01;31:*.z=01;31:*.Z=01;31:*.dz=01;31:*.gz=01;31:*.lrz=01;31:*.lz=01;31:*.lzo=01;31:*.xz=01;31:*.bz2=01;31:*.bz=01;31:*.tbz=01;31:*.tbz2=01;31:*.tz=01;31:*.deb=01;31:*.rpm=01;31:*.jar=01;31:*.war=01;31:*.ear=01;31:*.sar=01;31:*.rar=01;31:*.alz=01;31:*.ace=01;31:*.zoo=01;31:*.cpio=01;31:*.7z=01;31:*.rz=01;31:*.cab=01;31:*.jpg=01;35:*.jpeg=01;35:*.gif=01;35:*.bmp=01;35:*.pbm=01;35:*.pgm=01;35:*.ppm=01;35:*.tga=01;35:*.xbm=01;35:*.xpm=01;35:*.tif=01;35:*.tiff=01;35:*.png=01;35:*.svg=01;35:*.svgz=01;35:*.mng=01;35:*.pcx=01;35:*.mov=01;35:*.mpg=01;35:*.mpeg=01;35:*.m2v=01;35:*.mkv=01;35:*.webm=01;35:*.ogm=01;35:*.mp4=01;35:*.m4v=01;35:*.mp4v=01;35:*.vob=01;35:*.qt=01;35:*.nuv=01;35:*.wmv=01;35:*.asf=01;35:*.rm=01;35:*.rmvb=01;35:*.flc=01;35:*.avi=01;35:*.fli=01;35:*.flv=01;35:*.gl=01;35:*.dl=01;35:*.xcf=01;35:*.xwd=01;35:*.yuv=01;35:*.cgm=01;35:*.emf=01;35:*.axv=01;35:*.anx=01;35:*.ogv=01;35:*.ogx=01;35:*.aac=36:*.au=36:*.flac=36:*.mid=36:*.midi=36:*.mka=36:*.mp3=36:*.mpc=36:*.ogg=36:*.ra=36:*.wav=36:*.axa=36:*.oga=36:*.spx=36:*.xspf=36:`
var (
lastColorist *colorist
lastColoristMutex sync.Mutex
lastLsColors string
)
func init() {
lastColorist = parseLsColor(defaultLsColorString)
}
func GetColorist() Colorist {
lastColoristMutex.Lock()
defer lastColoristMutex.Unlock()
s := getLsColors()
if lastLsColors != s {
lastLsColors = s
lastColorist = parseLsColor(s)
}
return lastColorist
}
func getLsColors() string {
lsColorString := os.Getenv("LS_COLORS")
if len(lsColorString) == 0 {
return defaultLsColorString
}
return lsColorString
}
var featureForName = map[string]feature{
"rs": featureRegular,
"di": featureDirectory,
"ln": featureSymlink,
"mh": featureMultiHardLink,
"pi": featureNamedPipe,
"so": featureSocket,
"do": featureDoor,
"bd": featureBlockDevice,
"cd": featureCharDevice,
"or": featureOrphanedSymlink,
"su": featureSetuid,
"sg": featureSetgid,
"ca": featureCapability,
"tw": featureWorldWritableStickyDirectory,
"ow": featureWorldWritableDirectory,
"st": featureStickyDirectory,
"ex": featureExecutable,
}
// parseLsColor parses a string in the LS_COLORS format into lsColor. Erroneous
// fields are silently ignored.
func parseLsColor(s string) *colorist {
lc := &colorist{make(map[feature]string), make(map[string]string)}
for _, spec := range strings.Split(s, ":") {
words := strings.Split(spec, "=")
if len(words) != 2 {
continue
}
key, value := words[0], words[1]
filterValues := []string{}
for _, splitValue := range strings.Split(value, ";") {
if strings.Count(splitValue, "0") == len(splitValue) {
continue
}
filterValues = append(filterValues, splitValue)
}
if len(filterValues) == 0 {
continue
}
value = strings.Join(filterValues, ";")
if strings.HasPrefix(key, "*.") {
lc.styleForExt[key[1:]] = value
} else {
feature, ok := featureForName[key]
if !ok {
continue
}
lc.styleForFeature[feature] = value
}
}
return lc
}
func (lc *colorist) GetStyle(fname string) string {
mh := strings.Trim(lc.styleForFeature[featureMultiHardLink], "0") != ""
// TODO Handle error from determineFeature
feature, _ := determineFeature(fname, mh)
if feature == featureRegular {
if ext := path.Ext(fname); ext != "" {
if style, ok := lc.styleForExt[ext]; ok {
return style
}
}
}
return lc.styleForFeature[feature]
}
// WithTestLsColors sets LS_COLORS to a value where directories are blue and
// .png files are red. It returns a function to restore the old value. This
// function is mainly useful in tests.
func WithTestLsColors() func() {
// ow (world-writable directory) needed for Windows.
return util.WithTempEnv("LS_COLORS", "di=34:ow=34:*.png=31")
}
|
[
"\"LS_COLORS\""
] |
[] |
[
"LS_COLORS"
] |
[]
|
["LS_COLORS"]
|
go
| 1 | 0 | |
robopilot/templates/complete.py
|
#!/usr/bin/env python3
"""
Scripts to drive a robopilot 2 car
Usage:
manage.py (drive) [--model=<model>] [--js] [--type=(linear|categorical)] [--camera=(single|stereo)] [--meta=<key:value> ...] [--myconfig=<filename>]
manage.py (train) [--tubs=tubs] (--model=<model>) [--type=(linear|inferred|tensorrt_linear|tflite_linear)]
Options:
-h --help Show this screen.
--js Use physical joystick.
-f --file=<file> A text file containing paths to tub files, one per line. Option may be used more than once.
--meta=<key:value> Key/Value strings describing describing a piece of meta data about this drive. Option may be used more than once.
--myconfig=filename Specify myconfig file to use.
[default: myconfig.py]
"""
import os
import time
import logging
from docopt import docopt
import robopilot as dk
from robopilot.parts.transform import TriggeredCallback, DelayedTrigger
from robopilot.parts.tub_v2 import TubWriter
from robopilot.parts.datastore import TubHandler
from robopilot.parts.controller import LocalWebController, WebFpv, JoystickController
from robopilot.parts.throttle_filter import ThrottleFilter
from robopilot.parts.behavior import BehaviorPart
from robopilot.parts.file_watcher import FileWatcher
from robopilot.parts.launch import AiLaunch
from robopilot.utils import *
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def drive(cfg, model_path=None, use_joystick=False, model_type=None,
camera_type='single', meta=[]):
"""
Construct a working robotic vehicle from many parts. Each part runs as a
job in the Vehicle loop, calling either it's run or run_threaded method
depending on the constructor flag `threaded`. All parts are updated one
after another at the framerate given in cfg.DRIVE_LOOP_HZ assuming each
part finishes processing in a timely manner. Parts may have named outputs
and inputs. The framework handles passing named outputs to parts
requesting the same named input.
"""
logger.info(f'PID: {os.getpid()}')
if cfg.ROBOPILOT_GYM:
#the simulator will use cuda and then we usually run out of resources
#if we also try to use cuda. so disable for robopilot_gym.
os.environ["CUDA_VISIBLE_DEVICES"]="-1"
if model_type is None:
if cfg.TRAIN_LOCALIZER:
model_type = "localizer"
elif cfg.TRAIN_BEHAVIORS:
model_type = "behavior"
else:
model_type = cfg.DEFAULT_MODEL_TYPE
#Initialize car
V = dk.vehicle.Vehicle()
#Initialize logging before anything else to allow console logging
if cfg.HAVE_CONSOLE_LOGGING:
logger.setLevel(logging.getLevelName(cfg.LOGGING_LEVEL))
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter(cfg.LOGGING_FORMAT))
logger.addHandler(ch)
if cfg.HAVE_MQTT_TELEMETRY:
from robopilot.parts.telemetry import MqttTelemetry
tel = MqttTelemetry(cfg)
if cfg.HAVE_ODOM:
if cfg.ENCODER_TYPE == "GPIO":
from robopilot.parts.encoder import RotaryEncoder
enc = RotaryEncoder(mm_per_tick=cfg.MM_PER_TICK, pin=cfg.ODOM_PIN, poll_delay=1.0/(cfg.DRIVE_LOOP_HZ*3), debug=cfg.ODOM_DEBUG)
V.add(enc, inputs=['throttle'], outputs=['enc/speed'], threaded=True)
elif cfg.ENCODER_TYPE == "arduino":
from robopilot.parts.encoder import ArduinoEncoder
enc = ArduinoEncoder(mm_per_tick=cfg.MM_PER_TICK, debug=cfg.ODOM_DEBUG)
V.add(enc, outputs=['enc/speed'], threaded=True)
else:
print("No supported encoder found")
logger.info("cfg.CAMERA_TYPE %s"%cfg.CAMERA_TYPE)
if camera_type == "stereo":
if cfg.CAMERA_TYPE == "WEBCAM":
from robopilot.parts.camera import Webcam
camA = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 0)
camB = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 1)
elif cfg.CAMERA_TYPE == "CVCAM":
from robopilot.parts.cv import CvCam
camA = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 0)
camB = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, iCam = 1)
else:
raise(Exception("Unsupported camera type: %s" % cfg.CAMERA_TYPE))
V.add(camA, outputs=['cam/image_array_a'], threaded=True)
V.add(camB, outputs=['cam/image_array_b'], threaded=True)
from robopilot.parts.image import StereoPair
V.add(StereoPair(), inputs=['cam/image_array_a', 'cam/image_array_b'],
outputs=['cam/image_array'])
elif cfg.CAMERA_TYPE == "D435":
from robopilot.parts.realsense435i import RealSense435i
cam = RealSense435i(
enable_rgb=cfg.REALSENSE_D435_RGB,
enable_depth=cfg.REALSENSE_D435_DEPTH,
enable_imu=cfg.REALSENSE_D435_IMU,
device_id=cfg.REALSENSE_D435_ID)
V.add(cam, inputs=[],
outputs=['cam/image_array', 'cam/depth_array',
'imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z'],
threaded=True)
else:
if cfg.ROBOPILOT_GYM:
from robopilot.parts.dgym import RobopilotGymEnv
inputs = []
outputs = ['cam/image_array']
threaded = True
if cfg.ROBOPILOT_GYM:
from robopilot.parts.dgym import RobopilotGymEnv
#rbx
cam = RobopilotGymEnv(cfg.ROBOPILOT_SIM_PATH, host=cfg.SIM_HOST, env_name=cfg.ROBOPILOT_GYM_ENV_NAME, conf=cfg.GYM_CONF, record_location=cfg.SIM_RECORD_LOCATION, record_gyroaccel=cfg.SIM_RECORD_GYROACCEL, record_velocity=cfg.SIM_RECORD_VELOCITY, record_lidar=cfg.SIM_RECORD_LIDAR, delay=cfg.SIM_ARTIFICIAL_LATENCY)
threaded = True
inputs = ['angle', 'throttle']
elif cfg.CAMERA_TYPE == "PICAM":
from robopilot.parts.camera import PiCamera
cam = PiCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE, vflip=cfg.CAMERA_VFLIP, hflip=cfg.CAMERA_HFLIP)
elif cfg.CAMERA_TYPE == "WEBCAM":
from robopilot.parts.camera import Webcam
cam = Webcam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
elif cfg.CAMERA_TYPE == "CVCAM":
from robopilot.parts.cv import CvCam
cam = CvCam(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
elif cfg.CAMERA_TYPE == "CSIC":
from robopilot.parts.camera import CSICamera
cam = CSICamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE, gstreamer_flip=cfg.CSIC_CAM_GSTREAMER_FLIP_PARM)
elif cfg.CAMERA_TYPE == "V4L":
from robopilot.parts.camera import V4LCamera
cam = V4LCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH, framerate=cfg.CAMERA_FRAMERATE)
elif cfg.CAMERA_TYPE == "MOCK":
from robopilot.parts.camera import MockCamera
cam = MockCamera(image_w=cfg.IMAGE_W, image_h=cfg.IMAGE_H, image_d=cfg.IMAGE_DEPTH)
elif cfg.CAMERA_TYPE == "IMAGE_LIST":
from robopilot.parts.camera import ImageListCamera
cam = ImageListCamera(path_mask=cfg.PATH_MASK)
elif cfg.CAMERA_TYPE == "LEOPARD":
from robopilot.parts.leopard_imaging import LICamera
cam = LICamera(width=cfg.IMAGE_W, height=cfg.IMAGE_H, fps=cfg.CAMERA_FRAMERATE)
else:
raise(Exception("Unkown camera type: %s" % cfg.CAMERA_TYPE))
# Robopilot gym part will output position information if it is configured
if cfg.ROBOPILOT_GYM:
if cfg.SIM_RECORD_LOCATION:
outputs += ['pos/pos_x', 'pos/pos_y', 'pos/pos_z', 'pos/speed', 'pos/cte']
if cfg.SIM_RECORD_GYROACCEL:
outputs += ['gyro/gyro_x', 'gyro/gyro_y', 'gyro/gyro_z', 'accel/accel_x', 'accel/accel_y', 'accel/accel_z']
if cfg.SIM_RECORD_VELOCITY:
outputs += ['vel/vel_x', 'vel/vel_y', 'vel/vel_z']
if cfg.SIM_RECORD_LIDAR:
outputs += ['lidar/dist_array']
V.add(cam, inputs=inputs, outputs=outputs, threaded=threaded)
# add lidar
if cfg.USE_LIDAR:
from robopilot.parts.lidar import RPLidar
if cfg.LIDAR_TYPE == 'RP':
print("adding RP lidar part")
lidar = RPLidar(lower_limit = cfg.LIDAR_LOWER_LIMIT, upper_limit = cfg.LIDAR_UPPER_LIMIT)
V.add(lidar, inputs=[],outputs=['lidar/dist_array'], threaded=True)
if cfg.LIDAR_TYPE == 'YD':
print("YD Lidar not yet supported")
#This web controller will create a web server that is capable
#of managing steering, throttle, and modes, and more.
ctr = LocalWebController(port=cfg.WEB_CONTROL_PORT, mode=cfg.WEB_INIT_MODE)
V.add(ctr,
inputs=['cam/image_array', 'tub/num_records'],
outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],
threaded=True)
if use_joystick or cfg.USE_JOYSTICK_AS_DEFAULT:
#modify max_throttle closer to 1.0 to have more power
#modify steering_scale lower than 1.0 to have less responsive steering
if cfg.CONTROLLER_TYPE == "pigpio_rc": # an RC controllers read by GPIO pins. They typically don't have buttons
from robopilot.parts.controller import RCReceiver
ctr = RCReceiver(cfg)
V.add(ctr, outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],threaded=False)
else:
if cfg.CONTROLLER_TYPE == "custom": #custom controller created with `robopilot createjs` command
from my_joystick import MyJoystickController
ctr = MyJoystickController(
throttle_dir=cfg.JOYSTICK_THROTTLE_DIR,
throttle_scale=cfg.JOYSTICK_MAX_THROTTLE,
steering_scale=cfg.JOYSTICK_STEERING_SCALE,
auto_record_on_throttle=cfg.AUTO_RECORD_ON_THROTTLE)
ctr.set_deadzone(cfg.JOYSTICK_DEADZONE)
elif cfg.CONTROLLER_TYPE == "MM1":
from robopilot.parts.robohat import RoboHATController
ctr = RoboHATController(cfg)
else:
from robopilot.parts.controller import get_js_controller
ctr = get_js_controller(cfg)
if cfg.USE_NETWORKED_JS:
from robopilot.parts.controller import JoyStickSub
netwkJs = JoyStickSub(cfg.NETWORK_JS_SERVER_IP)
V.add(netwkJs, threaded=True)
ctr.js = netwkJs
V.add(ctr, inputs=['cam/image_array'], outputs=['user/angle', 'user/throttle', 'user/mode', 'recording'],threaded=True)
#this throttle filter will allow one tap back for esc reverse
th_filter = ThrottleFilter()
V.add(th_filter, inputs=['user/throttle'], outputs=['user/throttle'])
#See if we should even run the pilot module.
#This is only needed because the part run_condition only accepts boolean
class PilotCondition:
def run(self, mode):
if mode == 'user':
return False
else:
return True
V.add(PilotCondition(), inputs=['user/mode'], outputs=['run_pilot'])
class LedConditionLogic:
def __init__(self, cfg):
self.cfg = cfg
def run(self, mode, recording, recording_alert, behavior_state, model_file_changed, track_loc):
#returns a blink rate. 0 for off. -1 for on. positive for rate.
if track_loc is not None:
led.set_rgb(*self.cfg.LOC_COLORS[track_loc])
return -1
if model_file_changed:
led.set_rgb(self.cfg.MODEL_RELOADED_LED_R, self.cfg.MODEL_RELOADED_LED_G, self.cfg.MODEL_RELOADED_LED_B)
return 0.1
else:
led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B)
if recording_alert:
led.set_rgb(*recording_alert)
return self.cfg.REC_COUNT_ALERT_BLINK_RATE
else:
led.set_rgb(self.cfg.LED_R, self.cfg.LED_G, self.cfg.LED_B)
if behavior_state is not None and model_type == 'behavior':
r, g, b = self.cfg.BEHAVIOR_LED_COLORS[behavior_state]
led.set_rgb(r, g, b)
return -1 #solid on
if recording:
return -1 #solid on
elif mode == 'user':
return 1
elif mode == 'local_angle':
return 0.5
elif mode == 'local':
return 0.1
return 0
if cfg.HAVE_RGB_LED and not cfg.ROBOPILOT_GYM:
from robopilot.parts.led_status import RGB_LED
led = RGB_LED(cfg.LED_PIN_R, cfg.LED_PIN_G, cfg.LED_PIN_B, cfg.LED_INVERT)
led.set_rgb(cfg.LED_R, cfg.LED_G, cfg.LED_B)
V.add(LedConditionLogic(cfg), inputs=['user/mode', 'recording', "records/alert", 'behavior/state', 'modelfile/modified', "pilot/loc"],
outputs=['led/blink_rate'])
V.add(led, inputs=['led/blink_rate'])
def get_record_alert_color(num_records):
col = (0, 0, 0)
for count, color in cfg.RECORD_ALERT_COLOR_ARR:
if num_records >= count:
col = color
return col
class RecordTracker:
def __init__(self):
self.last_num_rec_print = 0
self.dur_alert = 0
self.force_alert = 0
def run(self, num_records):
if num_records is None:
return 0
if self.last_num_rec_print != num_records or self.force_alert:
self.last_num_rec_print = num_records
if num_records % 10 == 0:
print("recorded", num_records, "records")
if num_records % cfg.REC_COUNT_ALERT == 0 or self.force_alert:
self.dur_alert = num_records // cfg.REC_COUNT_ALERT * cfg.REC_COUNT_ALERT_CYC
self.force_alert = 0
if self.dur_alert > 0:
self.dur_alert -= 1
if self.dur_alert != 0:
return get_record_alert_color(num_records)
return 0
rec_tracker_part = RecordTracker()
V.add(rec_tracker_part, inputs=["tub/num_records"], outputs=['records/alert'])
if cfg.AUTO_RECORD_ON_THROTTLE:
def show_record_count_status():
rec_tracker_part.last_num_rec_print = 0
rec_tracker_part.force_alert = 1
if (cfg.CONTROLLER_TYPE != "pigpio_rc") and (cfg.CONTROLLER_TYPE != "MM1"): # these controllers don't use the joystick class
if isinstance(ctr, JoystickController):
ctr.set_button_down_trigger('circle', show_record_count_status) #then we are not using the circle button. hijack that to force a record count indication
else:
show_record_count_status()
#Sombrero
if cfg.HAVE_SOMBRERO:
from robopilot.parts.sombrero import Sombrero
s = Sombrero()
#IMU
if cfg.HAVE_IMU:
from robopilot.parts.imu import IMU
imu = IMU(sensor=cfg.IMU_SENSOR, dlp_setting=cfg.IMU_DLP_CONFIG)
V.add(imu, outputs=['imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z'], threaded=True)
# Use the FPV preview, which will show the cropped image output, or the full frame.
if cfg.USE_FPV:
V.add(WebFpv(), inputs=['cam/image_array'], threaded=True)
#Behavioral state
if cfg.TRAIN_BEHAVIORS:
bh = BehaviorPart(cfg.BEHAVIOR_LIST)
V.add(bh, outputs=['behavior/state', 'behavior/label', "behavior/one_hot_state_array"])
try:
ctr.set_button_down_trigger('L1', bh.increment_state)
except:
pass
inputs = ['cam/image_array', "behavior/one_hot_state_array"]
#IMU
elif cfg.USE_LIDAR:
inputs = ['cam/image_array', 'lidar/dist_array']
elif cfg.HAVE_ODOM:
inputs = ['cam/image_array', 'enc/speed']
elif model_type == "imu":
assert cfg.HAVE_IMU, 'Missing imu parameter in config'
# Run the pilot if the mode is not user.
inputs = ['cam/image_array',
'imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z']
else:
inputs = ['cam/image_array']
def load_model(kl, model_path):
start = time.time()
print('loading model', model_path)
kl.load(model_path)
print('finished loading in %s sec.' % (str(time.time() - start)) )
def load_weights(kl, weights_path):
start = time.time()
try:
print('loading model weights', weights_path)
kl.model.load_weights(weights_path)
print('finished loading in %s sec.' % (str(time.time() - start)) )
except Exception as e:
print(e)
print('ERR>> problems loading weights', weights_path)
def load_model_json(kl, json_fnm):
start = time.time()
print('loading model json', json_fnm)
from tensorflow.python import keras
try:
with open(json_fnm, 'r') as handle:
contents = handle.read()
kl.model = keras.models.model_from_json(contents)
print('finished loading json in %s sec.' % (str(time.time() - start)) )
except Exception as e:
print(e)
print("ERR>> problems loading model json", json_fnm)
if model_path:
# When we have a model, first create an appropriate Keras part
kl = dk.utils.get_model_by_type(model_type, cfg)
model_reload_cb = None
if '.h5' in model_path or '.trt' in model_path or '.tflite' in \
model_path or '.savedmodel' in model_path:
# load the whole model with weigths, etc
load_model(kl, model_path)
def reload_model(filename):
load_model(kl, filename)
model_reload_cb = reload_model
elif '.json' in model_path:
# when we have a .json extension
# load the model from there and look for a matching
# .wts file with just weights
load_model_json(kl, model_path)
weights_path = model_path.replace('.json', '.weights')
load_weights(kl, weights_path)
def reload_weights(filename):
weights_path = filename.replace('.json', '.weights')
load_weights(kl, weights_path)
model_reload_cb = reload_weights
else:
print("ERR>> Unknown extension type on model file!!")
return
# this part will signal visual LED, if connected
V.add(FileWatcher(model_path, verbose=True),
outputs=['modelfile/modified'])
# these parts will reload the model file, but only when ai is running
# so we don't interrupt user driving
V.add(FileWatcher(model_path), outputs=['modelfile/dirty'],
run_condition="ai_running")
V.add(DelayedTrigger(100), inputs=['modelfile/dirty'],
outputs=['modelfile/reload'], run_condition="ai_running")
V.add(TriggeredCallback(model_path, model_reload_cb),
inputs=["modelfile/reload"], run_condition="ai_running")
outputs = ['pilot/angle', 'pilot/throttle']
if cfg.TRAIN_LOCALIZER:
outputs.append("pilot/loc")
# Add image transformations like crop or trapezoidal mask
if hasattr(cfg, 'TRANSFORMATIONS') and cfg.TRANSFORMATIONS:
from robopilot.pipeline.augmentations import ImageAugmentation
V.add(ImageAugmentation(cfg, 'TRANSFORMATIONS'),
inputs=['cam/image_array'], outputs=['cam/image_array_trans'])
inputs = ['cam/image_array_trans'] + inputs[1:]
V.add(kl, inputs=inputs, outputs=outputs, run_condition='run_pilot')
if cfg.STOP_SIGN_DETECTOR:
from robopilot.parts.object_detector.stop_sign_detector \
import StopSignDetector
V.add(StopSignDetector(cfg.STOP_SIGN_MIN_SCORE,
cfg.STOP_SIGN_SHOW_BOUNDING_BOX,
cfg.STOP_SIGN_MAX_REVERSE_COUNT,
cfg.STOP_SIGN_REVERSE_THROTTLE),
inputs=['cam/image_array', 'pilot/throttle'],
outputs=['pilot/throttle', 'cam/image_array'])
V.add(ThrottleFilter(),
inputs=['pilot/throttle'],
outputs=['pilot/throttle'])
# Choose what inputs should change the car.
class DriveMode:
def run(self, mode,
user_angle, user_throttle,
pilot_angle, pilot_throttle):
if mode == 'user':
return user_angle, user_throttle
elif mode == 'local_angle':
return pilot_angle if pilot_angle else 0.0, user_throttle
else:
return pilot_angle if pilot_angle else 0.0, \
pilot_throttle * cfg.AI_THROTTLE_MULT \
if pilot_throttle else 0.0
V.add(DriveMode(),
inputs=['user/mode', 'user/angle', 'user/throttle',
'pilot/angle', 'pilot/throttle'],
outputs=['angle', 'throttle'])
#to give the car a boost when starting ai mode in a race.
aiLauncher = AiLaunch(cfg.AI_LAUNCH_DURATION, cfg.AI_LAUNCH_THROTTLE, cfg.AI_LAUNCH_KEEP_ENABLED)
V.add(aiLauncher,
inputs=['user/mode', 'throttle'],
outputs=['throttle'])
if (cfg.CONTROLLER_TYPE != "pigpio_rc") and (cfg.CONTROLLER_TYPE != "MM1"):
if isinstance(ctr, JoystickController):
ctr.set_button_down_trigger(cfg.AI_LAUNCH_ENABLE_BUTTON, aiLauncher.enable_ai_launch)
class AiRunCondition:
'''
A bool part to let us know when ai is running.
'''
def run(self, mode):
if mode == "user":
return False
return True
V.add(AiRunCondition(), inputs=['user/mode'], outputs=['ai_running'])
# Ai Recording
class AiRecordingCondition:
'''
return True when ai mode, otherwize respect user mode recording flag
'''
def run(self, mode, recording):
if mode == 'user':
return recording
return True
if cfg.RECORD_DURING_AI:
V.add(AiRecordingCondition(), inputs=['user/mode', 'recording'], outputs=['recording'])
# Drive train setup
if cfg.ROBOPILOT_GYM or cfg.DRIVE_TRAIN_TYPE == "MOCK":
pass
elif cfg.DRIVE_TRAIN_TYPE == "I2C_SERVO":
from robopilot.parts.actuator import PCA9685, PWMSteering, PWMThrottle
steering_controller = PCA9685(cfg.STEERING_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL, cfg.PCA9685_I2C_ADDR, busnum=cfg.PCA9685_I2C_BUSNUM)
throttle = PWMThrottle(controller=throttle_controller,
max_pulse=cfg.THROTTLE_FORWARD_PWM,
zero_pulse=cfg.THROTTLE_STOPPED_PWM,
min_pulse=cfg.THROTTLE_REVERSE_PWM)
V.add(steering, inputs=['angle'], threaded=True)
V.add(throttle, inputs=['throttle'], threaded=True)
elif cfg.DRIVE_TRAIN_TYPE == "DC_STEER_THROTTLE":
from robopilot.parts.actuator import Mini_HBridge_DC_Motor_PWM
steering = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT, cfg.HBRIDGE_PIN_RIGHT)
throttle = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD)
V.add(steering, inputs=['angle'])
V.add(throttle, inputs=['throttle'])
elif cfg.DRIVE_TRAIN_TYPE == "DC_TWO_WHEEL":
from robopilot.parts.actuator import TwoWheelSteeringThrottle, Mini_HBridge_DC_Motor_PWM
left_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_LEFT_FWD, cfg.HBRIDGE_PIN_LEFT_BWD)
right_motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_RIGHT_FWD, cfg.HBRIDGE_PIN_RIGHT_BWD)
two_wheel_control = TwoWheelSteeringThrottle()
V.add(two_wheel_control,
inputs=['throttle', 'angle'],
outputs=['left_motor_speed', 'right_motor_speed'])
V.add(left_motor, inputs=['left_motor_speed'])
V.add(right_motor, inputs=['right_motor_speed'])
elif cfg.DRIVE_TRAIN_TYPE == "DC_TWO_WHEEL_L298N":
from robopilot.parts.actuator import TwoWheelSteeringThrottle, L298N_HBridge_DC_Motor
left_motor = L298N_HBridge_DC_Motor(cfg.HBRIDGE_L298N_PIN_LEFT_FWD, cfg.HBRIDGE_L298N_PIN_LEFT_BWD, cfg.HBRIDGE_L298N_PIN_LEFT_EN)
right_motor = L298N_HBridge_DC_Motor(cfg.HBRIDGE_L298N_PIN_RIGHT_FWD, cfg.HBRIDGE_L298N_PIN_RIGHT_BWD, cfg.HBRIDGE_L298N_PIN_RIGHT_EN)
two_wheel_control = TwoWheelSteeringThrottle()
V.add(two_wheel_control,
inputs=['throttle', 'angle'],
outputs=['left_motor_speed', 'right_motor_speed'])
V.add(left_motor, inputs=['left_motor_speed'])
V.add(right_motor, inputs=['right_motor_speed'])
elif cfg.DRIVE_TRAIN_TYPE == "SERVO_HBRIDGE_PWM":
from robopilot.parts.actuator import ServoBlaster, PWMSteering
steering_controller = ServoBlaster(cfg.STEERING_CHANNEL) #really pin
# PWM pulse values should be in the range of 100 to 200
assert(cfg.STEERING_LEFT_PWM <= 200)
assert(cfg.STEERING_RIGHT_PWM <= 200)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
from robopilot.parts.actuator import Mini_HBridge_DC_Motor_PWM
motor = Mini_HBridge_DC_Motor_PWM(cfg.HBRIDGE_PIN_FWD, cfg.HBRIDGE_PIN_BWD)
V.add(steering, inputs=['angle'], threaded=True)
V.add(motor, inputs=["throttle"])
elif cfg.DRIVE_TRAIN_TYPE == "MM1":
from robopilot.parts.robohat import RoboHATDriver
V.add(RoboHATDriver(cfg), inputs=['angle', 'throttle'])
elif cfg.DRIVE_TRAIN_TYPE == "PIGPIO_PWM":
from robopilot.parts.actuator import PWMSteering, PWMThrottle, PiGPIO_PWM
steering_controller = PiGPIO_PWM(cfg.STEERING_PWM_PIN, freq=cfg.STEERING_PWM_FREQ, inverted=cfg.STEERING_PWM_INVERTED)
steering = PWMSteering(controller=steering_controller,
left_pulse=cfg.STEERING_LEFT_PWM,
right_pulse=cfg.STEERING_RIGHT_PWM)
throttle_controller = PiGPIO_PWM(cfg.THROTTLE_PWM_PIN, freq=cfg.THROTTLE_PWM_FREQ, inverted=cfg.THROTTLE_PWM_INVERTED)
throttle = PWMThrottle(controller=throttle_controller,
max_pulse=cfg.THROTTLE_FORWARD_PWM,
zero_pulse=cfg.THROTTLE_STOPPED_PWM,
min_pulse=cfg.THROTTLE_REVERSE_PWM)
V.add(steering, inputs=['angle'], threaded=True)
V.add(throttle, inputs=['throttle'], threaded=True)
# OLED setup
if cfg.USE_SSD1306_128_32:
from robopilot.parts.oled import OLEDPart
auto_record_on_throttle = cfg.USE_JOYSTICK_AS_DEFAULT and cfg.AUTO_RECORD_ON_THROTTLE
oled_part = OLEDPart(cfg.SSD1306_128_32_I2C_ROTATION, cfg.SSD1306_RESOLUTION, auto_record_on_throttle)
V.add(oled_part, inputs=['recording', 'tub/num_records', 'user/mode'], outputs=[], threaded=True)
# add tub to save data
if cfg.USE_LIDAR:
inputs = ['cam/image_array', 'lidar/dist_array', 'user/angle', 'user/throttle', 'user/mode']
types = ['image_array', 'nparray','float', 'float', 'str']
else:
inputs=['cam/image_array','user/angle', 'user/throttle', 'user/mode']
types=['image_array','float', 'float','str']
if cfg.HAVE_ODOM:
inputs += ['enc/speed']
types += ['float']
if cfg.TRAIN_BEHAVIORS:
inputs += ['behavior/state', 'behavior/label', "behavior/one_hot_state_array"]
types += ['int', 'str', 'vector']
if cfg.CAMERA_TYPE == "D435" and cfg.REALSENSE_D435_DEPTH:
inputs += ['cam/depth_array']
types += ['gray16_array']
if cfg.HAVE_IMU or (cfg.CAMERA_TYPE == "D435" and cfg.REALSENSE_D435_IMU):
inputs += ['imu/acl_x', 'imu/acl_y', 'imu/acl_z',
'imu/gyr_x', 'imu/gyr_y', 'imu/gyr_z']
types +=['float', 'float', 'float',
'float', 'float', 'float']
# rbx
if cfg.ROBOPILOT_GYM:
if cfg.SIM_RECORD_LOCATION:
inputs += ['pos/pos_x', 'pos/pos_y', 'pos/pos_z', 'pos/speed', 'pos/cte']
types += ['float', 'float', 'float', 'float', 'float']
if cfg.SIM_RECORD_GYROACCEL:
inputs += ['gyro/gyro_x', 'gyro/gyro_y', 'gyro/gyro_z', 'accel/accel_x', 'accel/accel_y', 'accel/accel_z']
types += ['float', 'float', 'float', 'float', 'float', 'float']
if cfg.SIM_RECORD_VELOCITY:
inputs += ['vel/vel_x', 'vel/vel_y', 'vel/vel_z']
types += ['float', 'float', 'float']
if cfg.SIM_RECORD_LIDAR:
inputs += ['lidar/dist_array']
types += ['nparray']
if cfg.RECORD_DURING_AI:
inputs += ['pilot/angle', 'pilot/throttle']
types += ['float', 'float']
if cfg.HAVE_PERFMON:
from robopilot.parts.perfmon import PerfMonitor
mon = PerfMonitor(cfg)
perfmon_outputs = ['perf/cpu', 'perf/mem', 'perf/freq']
inputs += perfmon_outputs
types += ['float', 'float', 'float']
V.add(mon, inputs=[], outputs=perfmon_outputs, threaded=True)
# do we want to store new records into own dir or append to existing
tub_path = TubHandler(path=cfg.DATA_PATH).create_tub_path() if \
cfg.AUTO_CREATE_NEW_TUB else cfg.DATA_PATH
tub_writer = TubWriter(tub_path, inputs=inputs, types=types, metadata=meta)
V.add(tub_writer, inputs=inputs, outputs=["tub/num_records"], run_condition='recording')
# Telemetry (we add the same metrics added to the TubHandler
if cfg.HAVE_MQTT_TELEMETRY:
telem_inputs, _ = tel.add_step_inputs(inputs, types)
V.add(tel, inputs=telem_inputs, outputs=["tub/queue_size"], threaded=True)
if cfg.PUB_CAMERA_IMAGES:
from robopilot.parts.network import TCPServeValue
from robopilot.parts.image import ImgArrToJpg
pub = TCPServeValue("camera")
V.add(ImgArrToJpg(), inputs=['cam/image_array'], outputs=['jpg/bin'])
V.add(pub, inputs=['jpg/bin'])
if type(ctr) is LocalWebController:
if cfg.ROBOPILOT_GYM:
print("You can now go to http://localhost:%d to drive your car." % cfg.WEB_CONTROL_PORT)
else:
print("You can now go to <your hostname.local>:%d to drive your car." % cfg.WEB_CONTROL_PORT)
elif (cfg.CONTROLLER_TYPE != "pigpio_rc") and (cfg.CONTROLLER_TYPE != "MM1"):
if isinstance(ctr, JoystickController):
print("You can now move your joystick to drive your car.")
ctr.set_tub(tub_writer.tub)
ctr.print_controls()
# run the vehicle
V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
if __name__ == '__main__':
args = docopt(__doc__)
cfg = dk.load_config(myconfig=args['--myconfig'])
if args['drive']:
model_type = args['--type']
camera_type = args['--camera']
drive(cfg, model_path=args['--model'], use_joystick=args['--js'],
model_type=model_type, camera_type=camera_type,
meta=args['--meta'])
elif args['train']:
print('Use python train.py instead.\n')
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
client/crypto/pgp/config.go
|
/**
Plik upload client
The MIT License (MIT)
Copyright (c) <2015>
- Mathieu Bodjikian <[email protected]>
- Charles-Antoine Mathieu <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
**/
package pgp
import (
"os"
"github.com/root-gg/utils"
"golang.org/x/crypto/openpgp"
)
// BackendConfig object
type BackendConfig struct {
Gpg string
Keyring string
Recipient string
Email string
Entity *openpgp.Entity
}
// NewPgpBackendConfig instantiate a new Backend Configuration
// from config map passed as argument
func NewPgpBackendConfig(config map[string]interface{}) (pb *BackendConfig) {
pb = new(BackendConfig)
pb.Gpg = "/usr/bin/gpg"
pb.Keyring = os.Getenv("HOME") + "/.gnupg/pubring.gpg"
utils.Assign(pb, config)
return
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
manager.py
|
import cv2
import json
import numpy as np
import vision as vision
import tracker as tracker
import cinta as cinta
import classifier as classifier
from classifiers import colores
from classifiers import formas
from classifiers import codigos
from classifiers import patrones
class Manager:
vision = vision.Vision()
trackableObjects = {}
params = {}
object = None
infoFunction = None
knownObjects = None
socket = None
def __init__(self, modalidad, socket):
"""
Constructor.
modalidad: string, Modo de trabajo [color, forma, codigo, patron]
"""
self.cinta = cinta.Cinta()
self.classifier = classifier.Classifier()
self.tracker = tracker.Tracker()
self.modalidad = modalidad
self.socket = socket
self.iniciarObjeto()
# Parámetros por defecto de las vistas
dicParams = {
'showID': False,
'showTxt': False,
'showCentroid': False,
'showBoundingRect': False,
'showMask': False,
'position': 'center',
'drawContours': False,
'measure': False,
'countItems': False,
'classify': False
}
self.setVariables(dicParams)
def setVariables(self, dicParams):
"""
Actualiza el diccionario con los parámetros
dicParams: dict, Diccionario de parámetros
"""
self.params = dicParams
def iniciarObjeto(self):
"""
Inicializa objeto de acuerdo a la modalidad de trackeo
"""
if self.object:
del self.object
if self.modalidad == 'color':
self.object = colores.Colores()
self.infoFunction = self.printColorInfo
if self.modalidad == 'forma':
self.object = formas.Formas()
self.infoFunction = self.printFormaInfo
if self.modalidad == 'codigo':
self.object = codigos.Codigos()
self.infoFunction = self.printCodigoInfo
if self.modalidad == 'patron':
self.object = patrones.Patrones()
self.infoFunction = self.printPatronInfo
self.knownObjects = self.object.getKnownObjects()
if self.tracker:
del self.tracker
self.tracker = tracker.Tracker()
def getFrame(self):
"""
Lee el frame, hace un tratamiento a la imágen y detecta los objetos
"""
self.vision.readFrame()
self.vision.rotateImage()
self.vision.cutBorders([20, 0], [630, 0], [3, 478], [622, 478], False)
self.frame = self.vision.getFrame()
imgContours, finalContours = self.getContours()
if len(finalContours) != 0:
self.tracker.setTrackableObjects(finalContours)
self.trackableObjects = self.tracker.getTrackableObjects()
for key, obj in self.trackableObjects.items():
self.showInfo(imgContours, obj)
if not obj.isClassified() and (210 <= obj.getCentroidY() >= 230):
self.classify(obj)
self.sendPicture(obj)
self.vision.drawCenterLine()
return self.vision.getStringData(imgContours)
def cambioModo(self, modo):
"""
Cambia modo de trabajo
modo: string, Modo de trabajo [color, forma, codigo, patron]
"""
self.modalidad = modo
self.iniciarObjeto()
def getContours(self):
imgContours, finalContours = self.object.getContours(self.frame, returnMask=self.params['showMask'])
return imgContours, finalContours
def classify(self, obj):
try:
index = self.knownObjects[0].index(obj.getTxt())
target = self.knownObjects[1][index]
self.classifier.classify(target)
obj.setClassified()
except ValueError:
pass
def getPosition(self, align, xDes, yDes):
"""
Devuelve la posición x, y de acuerdo a la alineación pasada por parámetro
align: string, Alineación [center, right]
xDes: int, Coordenada x
yDes: int, Coordenada y
"""
if align == "center":
xDes = xDes + 20
if align == "right":
xDes = xDes + 100
yDes += 15
return xDes, yDes
def showInfo(self, frame, obj):
"""
Agrega al frame la información del objeto que se haya seteado para mostrar.
frame: array, Frame
obj: TrackableObject, Objeto detectado
"""
x = obj.getCentroidX()
y = obj.getCentroidY()
if self.params['showCentroid'] == True:
self.vision.dibujarPunto(x, y)
if self.params['showBoundingRect'] == True:
(startX, startY, w, h) = obj.bbox
cv2.rectangle(frame,(startX,startY),(startX+w,startY+h), (255,0,0), 3)
if self.params['drawContours'] == True:
cv2.drawContours(frame, obj.getContours(), -1, [0, 255, 0], 3)
if self.params['showID']:
posX, posY = self.getPosition(self.params['position'], x, y)
text = "ID {}".format(obj.objectID)
cv2.putText(frame, text, (posX, posY),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (180, 40, 180), 2)
if self.params['showTxt'] == True:
posX, posY = self.getPosition(self.params['position'], x, y + 15)
cv2.putText(frame, obj.getTxt(), (posX, posY),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (180, 40, 180), 2)
self.infoFunction(frame, obj)
def printColorInfo(self, frame, obj):
"""
Agrega al frame información particular sobre el color.
frame: array, Frame
obj: TrackableObject, Objeto detectado
"""
if self.params['countItems'] == True:
if obj.counted == False:
self.object.total[obj.color] += 1
obj.setCounted()
for i in range(len(self.object.colores)):
text = "{}: {}".format(self.object.colores[i], self.object.total[i])
cv2.putText(frame, text, (10, ((i * 20) + 20)),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
def printFormaInfo(self, frame, obj):
"""
Agrega al frame información particular sobre la forma.
frame: array, Frame
obj: TrackableObject, Objeto detectado
"""
x = obj.getCentroidX()
y = obj.getCentroidY()
(startX, startY, w, h) = obj.bbox
forma = obj.getTxt()
if self.params['measure'] == True:
if (forma == 'Cuadrado') or (forma == 'Rectangulo'):
mW = round((self.object.findDis(obj.poli[0][0], obj.poli[1][0])), 1)
nH = round((self.object.findDis(obj.poli[0][0], obj.poli[2][0])), 1)
cv2.arrowedLine(frame, (obj.poli[0][0][0],obj.poli[0][0][1]),
(obj.poli[1][0][0], obj.poli[1][0][1]), (255,0,255), 2)
cv2.arrowedLine(frame, (obj.poli[0][0][0],obj.poli[0][0][1]),
(obj.poli[2][0][0], obj.poli[2][0][1]), (255,0,255), 2)
cv2.putText(frame, '{}cm'.format(mW), (startX+30,startY-10), cv2.FONT_HERSHEY_COMPLEX, .7,
(255,0,255), 2)
cv2.putText(frame, '{}cm'.format(nH), (startX-70,startY+h//2), cv2.FONT_HERSHEY_COMPLEX, .7, (255,0,255), 2)
if forma == 'Circulo':
border = obj.getContours()[0][0]
mW = round((self.object.findDis([x, y], [border[0], border[1]])), 1)
cv2.arrowedLine(frame, (x, y), (border[0], border[1]), (255,0,255), 2)
cv2.putText(frame, '{}cm'.format(mW), (startX+30,startY-10), cv2.FONT_HERSHEY_COMPLEX, .7,
(255,0,255), 2)
def printCodigoInfo(self, frame, obj):
"""
Agrega al frame información particular sobre el código.
frame: array, Frame
obj: TrackableObject, Objeto detectado
"""
pass
def printPatronInfo(self, frame, obj):
"""
Agrega al frame información particular sobre el patrón.
frame: array, Frame
obj: TrackableObject, Objeto detectado
"""
pass
def moverCinta(self, velocidad):
"""
Inicia elmovimiento de la cinta.
velocidad: int, Velocidad (Valor de 0 al 6)
"""
self.cinta.setVelocidad(velocidad)
def cambiarDireccionCinta(self):
"""
Cambia la dirección de la cinta.
"""
self.cinta.setDireccion()
def toggleShowID(self):
"""
Muestra u oculta ID.
"""
self.params['showID'] = not self.params['showID']
def toggleShowCentroid(self):
"""
Muestra u oculta centro del objeto.
"""
self.params['showCentroid'] = not self.params['showCentroid']
def toggleDrawContours(self):
"""
Muestra u oculta contornos del objeto.
"""
self.params['drawContours'] = not self.params['drawContours']
def toggleShowTxt(self):
"""
Muestra u oculta texto del objeto.
"""
self.params['showTxt'] = not self.params['showTxt']
def toggleShowBoundingRect(self):
"""
Muestra u oculta rectángulo del objeto.
"""
self.params['showBoundingRect'] = not self.params['showBoundingRect']
def toggleShowMeasure(self):
"""
Muestra u oculta medida del objeto (sólo en modalidad 'forma').
"""
self.params['measure'] = not self.params['measure']
def toggleShowMask(self):
"""
Muestra frame o máscara.
"""
self.params['showMask'] = not self.params['showMask']
def getColorRanges(self):
"""
Recupera rangos de colores (sólo en modalidad 'color').
"""
return self.object.getColorRanges()
def setColorRanges(self, colores):
"""
Setea rangos de colores (sólo en modalidad 'color').
"""
return self.object.setColorRanges(colores)
def sendPicture(self, obj=None):
"""
Toma una foto y la envía por socket al cliente.
obj: TrackableObject, Objeto detectado [opcional]
"""
encImage = self.vision.getEncodedImage(self.frame)
objectID = obj.getObjectID() if obj else "F"
objectText = obj.getTxt() if obj else "Foto"
self.socket.emit('addList', json.dumps({
'id': objectID,
'objeto': objectText,
'imagen': encImage
}))
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
pkg/kubectl/main.go
|
package kubectl
import (
goflag "flag"
"fmt"
"math/rand"
"os"
"time"
"github.com/rancher/k3s/pkg/server"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
utilflag "k8s.io/component-base/cli/flag"
"k8s.io/component-base/logs"
"k8s.io/kubectl/pkg/cmd"
)
func Main() {
kubenv := os.Getenv("KUBECONFIG")
if kubenv == "" {
config, err := server.HomeKubeConfig(false, false)
if _, serr := os.Stat(config); err == nil && serr == nil {
os.Setenv("KUBECONFIG", config)
}
if err := checkReadConfigPermissions(config); err != nil {
logrus.Warn(err)
}
}
main()
}
func main() {
rand.Seed(time.Now().UnixNano())
command := cmd.NewDefaultKubectlCommand()
// TODO: once we switch everything over to Cobra commands, we can go back to calling
// utilflag.InitFlags() (by removing its pflag.Parse() call). For now, we have to set the
// normalize func and add the go flag set by hand.
pflag.CommandLine.SetNormalizeFunc(utilflag.WordSepNormalizeFunc)
pflag.CommandLine.AddGoFlagSet(goflag.CommandLine)
// utilflag.InitFlags()
logs.InitLogs()
defer logs.FlushLogs()
if err := command.Execute(); err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
}
func checkReadConfigPermissions(configFile string) error {
file, err := os.OpenFile(configFile, os.O_RDONLY, 0600)
if err != nil {
if os.IsPermission(err) {
return fmt.Errorf("Unable to read %s, please start server "+
"with --write-kubeconfig-mode to modify kube config permissions", configFile)
}
}
file.Close()
return nil
}
|
[
"\"KUBECONFIG\""
] |
[] |
[
"KUBECONFIG"
] |
[]
|
["KUBECONFIG"]
|
go
| 1 | 0 | |
streaminghandler.go
|
package main
import (
"bytes"
"log"
"net/http"
"os"
"os/exec"
"strconv"
"time"
gdrive "github.com/Tony-Liou/Youtube-Upload-Video/google-drive"
ytuploader "github.com/Tony-Liou/Youtube-Upload-Video/youtube"
)
// Dump the target stream, return a current time string and a filename (with path)
func execStreamlink(streamURL string) (string, string) {
t := time.Now()
curTime := t.Format("2006/01/02_15:04:05_Mon")
curTimeStamp := strconv.FormatInt(t.UnixNano(), 10)
app := "streamlink"
option := "-o"
filename := "stream" + curTimeStamp
url := streamURL
quality := "best"
cmd := exec.Command(app, option, filename, url, quality)
var out bytes.Buffer
var stderr bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &stderr
log.Println("Streamlink starting...")
err := cmd.Run()
if err != nil {
log.Printf("%v : %s\n", err, stderr.String())
}
log.Println("Result: ", out.String())
return curTime, filename
}
func removeVideoFile(path string) {
err := os.Remove(path)
if err != nil {
log.Println(err)
}
}
func notifyVideoId(videoID string) {
url := os.Getenv("NOTIFICATION_URL")
if url == "" {
return
}
url += "?videoID=" + videoID
resp, err := http.Get(url)
if err != nil {
log.Println(err)
return
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
log.Printf("Status code: %v, status: %v\n", resp.StatusCode, resp.Status)
}
}
// processStreaming Executes streamlink to dump the live streaming.
// After the live streaming ending, upload the video to YouTube.
func processStreaming(streamURL, privacy string) {
log.Println("Processing streaming...")
recordTime, uri := execStreamlink(streamURL)
setting := &ytuploader.VideoSetting{
Filename: uri,
Title: recordTime,
Description: streamURL,
Category: "22",
Privacy: privacy,
//Language: "zh-TW",
}
videoID, err := ytuploader.UploadVideo(setting)
if err != nil {
log.Println("Upload video failed. Starting uploading video to Google Drive.")
gdrive.UploadVideo(uri, recordTime, "")
} else {
log.Println("Video uploaded. ID: ", videoID)
removeVideoFile(uri)
notifyVideoId(videoID)
}
}
|
[
"\"NOTIFICATION_URL\""
] |
[] |
[
"NOTIFICATION_URL"
] |
[]
|
["NOTIFICATION_URL"]
|
go
| 1 | 0 | |
k8sclient/k8sclient.go
|
// Copyright (c) 2017 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package k8sclient
import (
"encoding/json"
"fmt"
"os"
"regexp"
"strings"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/retry"
"github.com/containernetworking/cni/libcni"
"github.com/containernetworking/cni/pkg/skel"
cnitypes "github.com/containernetworking/cni/pkg/types"
"github.com/intel/multus-cni/checkpoint"
"github.com/intel/multus-cni/logging"
"github.com/intel/multus-cni/types"
)
const (
resourceNameAnnot = "k8s.v1.cni.cncf.io/resourceName"
defaultNetAnnot = "v1.multus-cni.io/default-network"
)
// NoK8sNetworkError indicates error, no network in kubernetes
type NoK8sNetworkError struct {
message string
}
type clientInfo struct {
Client KubeClient
Podnamespace string
Podname string
}
func (e *NoK8sNetworkError) Error() string { return string(e.message) }
type defaultKubeClient struct {
client kubernetes.Interface
}
// defaultKubeClient implements KubeClient
var _ KubeClient = &defaultKubeClient{}
func (d *defaultKubeClient) GetRawWithPath(path string) ([]byte, error) {
return d.client.ExtensionsV1beta1().RESTClient().Get().AbsPath(path).DoRaw()
}
func (d *defaultKubeClient) GetPod(namespace, name string) (*v1.Pod, error) {
return d.client.Core().Pods(namespace).Get(name, metav1.GetOptions{})
}
func (d *defaultKubeClient) UpdatePodStatus(pod *v1.Pod) (*v1.Pod, error) {
return d.client.Core().Pods(pod.Namespace).UpdateStatus(pod)
}
func setKubeClientInfo(c *clientInfo, client KubeClient, k8sArgs *types.K8sArgs) {
logging.Debugf("setKubeClientInfo: %v, %v, %v", c, client, k8sArgs)
c.Client = client
c.Podnamespace = string(k8sArgs.K8S_POD_NAMESPACE)
c.Podname = string(k8sArgs.K8S_POD_NAME)
}
func SetNetworkStatus(k *clientInfo, netStatus []*types.NetworkStatus) error {
logging.Debugf("SetNetworkStatus: %v, %v", k, netStatus)
pod, err := k.Client.GetPod(k.Podnamespace, k.Podname)
if err != nil {
return logging.Errorf("SetNetworkStatus: failed to query the pod %v in out of cluster comm: %v", k.Podname, err)
}
var networkStatuses string
if netStatus != nil {
var networkStatus []string
for _, status := range netStatus {
data, err := json.MarshalIndent(status, "", " ")
if err != nil {
return logging.Errorf("SetNetworkStatus: error with Marshal Indent: %v", err)
}
networkStatus = append(networkStatus, string(data))
}
networkStatuses = fmt.Sprintf("[%s]", strings.Join(networkStatus, ","))
}
_, err = setPodNetworkAnnotation(k.Client, k.Podnamespace, pod, networkStatuses)
if err != nil {
return logging.Errorf("SetNetworkStatus: failed to update the pod %v in out of cluster comm: %v", k.Podname, err)
}
return nil
}
func setPodNetworkAnnotation(client KubeClient, namespace string, pod *v1.Pod, networkstatus string) (*v1.Pod, error) {
logging.Debugf("setPodNetworkAnnotation: %v, %s, %v, %s", client, namespace, pod, networkstatus)
//if pod annotations is empty, make sure it allocatable
if len(pod.Annotations) == 0 {
pod.Annotations = make(map[string]string)
}
pod.Annotations["k8s.v1.cni.cncf.io/networks-status"] = networkstatus
pod = pod.DeepCopy()
var err error
if resultErr := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
if err != nil {
// Re-get the pod unless it's the first attempt to update
pod, err = client.GetPod(pod.Namespace, pod.Name)
if err != nil {
return err
}
}
pod, err = client.UpdatePodStatus(pod)
return err
}); resultErr != nil {
return nil, logging.Errorf("status update failed for pod %s/%s: %v", pod.Namespace, pod.Name, resultErr)
}
return pod, nil
}
func getPodNetworkAnnotation(client KubeClient, k8sArgs *types.K8sArgs) (string, string, string, error) {
var err error
logging.Debugf("getPodNetworkAnnotation: %v, %v", client, k8sArgs)
pod, err := client.GetPod(string(k8sArgs.K8S_POD_NAMESPACE), string(k8sArgs.K8S_POD_NAME))
if err != nil {
return "", "", "", logging.Errorf("getPodNetworkAnnotation: failed to query the pod %v in out of cluster comm: %v", string(k8sArgs.K8S_POD_NAME), err)
}
return pod.Annotations["k8s.v1.cni.cncf.io/networks"], pod.ObjectMeta.Namespace, string(pod.UID), nil
}
func parsePodNetworkObjectName(podnetwork string) (string, string, string, error) {
var netNsName string
var netIfName string
var networkName string
logging.Debugf("parsePodNetworkObjectName: %s", podnetwork)
slashItems := strings.Split(podnetwork, "/")
if len(slashItems) == 2 {
netNsName = strings.TrimSpace(slashItems[0])
networkName = slashItems[1]
} else if len(slashItems) == 1 {
networkName = slashItems[0]
} else {
return "", "", "", logging.Errorf("Invalid network object (failed at '/')")
}
atItems := strings.Split(networkName, "@")
networkName = strings.TrimSpace(atItems[0])
if len(atItems) == 2 {
netIfName = strings.TrimSpace(atItems[1])
} else if len(atItems) != 1 {
return "", "", "", logging.Errorf("Invalid network object (failed at '@')")
}
// Check and see if each item matches the specification for valid attachment name.
// "Valid attachment names must be comprised of units of the DNS-1123 label format"
// [a-z0-9]([-a-z0-9]*[a-z0-9])?
// And we allow at (@), and forward slash (/) (units separated by commas)
// It must start and end alphanumerically.
allItems := []string{netNsName, networkName, netIfName}
for i := range allItems {
matched, _ := regexp.MatchString("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$", allItems[i])
if !matched && len([]rune(allItems[i])) > 0 {
return "", "", "", logging.Errorf(fmt.Sprintf("Failed to parse: one or more items did not match comma-delimited format (must consist of lower case alphanumeric characters). Must start and end with an alphanumeric character), mismatch @ '%v'", allItems[i]))
}
}
logging.Debugf("parsePodNetworkObjectName: parsed: %s, %s, %s", netNsName, networkName, netIfName)
return netNsName, networkName, netIfName, nil
}
func parsePodNetworkAnnotation(podNetworks, defaultNamespace string) ([]*types.NetworkSelectionElement, error) {
var networks []*types.NetworkSelectionElement
logging.Debugf("parsePodNetworkAnnotation: %s, %s", podNetworks, defaultNamespace)
if podNetworks == "" {
return nil, logging.Errorf("parsePodNetworkAnnotation: pod annotation not having \"network\" as key, refer Multus README.md for the usage guide")
}
if strings.IndexAny(podNetworks, "[{\"") >= 0 {
if err := json.Unmarshal([]byte(podNetworks), &networks); err != nil {
return nil, logging.Errorf("parsePodNetworkAnnotation: failed to parse pod Network Attachment Selection Annotation JSON format: %v", err)
}
} else {
// Comma-delimited list of network attachment object names
for _, item := range strings.Split(podNetworks, ",") {
// Remove leading and trailing whitespace.
item = strings.TrimSpace(item)
// Parse network name (i.e. <namespace>/<network name>@<ifname>)
netNsName, networkName, netIfName, err := parsePodNetworkObjectName(item)
if err != nil {
return nil, logging.Errorf("parsePodNetworkAnnotation: %v", err)
}
networks = append(networks, &types.NetworkSelectionElement{
Name: networkName,
Namespace: netNsName,
InterfaceRequest: netIfName,
})
}
}
for _, net := range networks {
if net.Namespace == "" {
net.Namespace = defaultNamespace
}
}
return networks, nil
}
func getCNIConfigFromFile(name string, confdir string) ([]byte, error) {
logging.Debugf("getCNIConfigFromFile: %s, %s", name, confdir)
// In the absence of valid keys in a Spec, the runtime (or
// meta-plugin) should load and execute a CNI .configlist
// or .config (in that order) file on-disk whose JSON
// “name” key matches this Network object’s name.
// In part, adapted from K8s pkg/kubelet/dockershim/network/cni/cni.go#getDefaultCNINetwork
files, err := libcni.ConfFiles(confdir, []string{".conf", ".json", ".conflist"})
switch {
case err != nil:
return nil, logging.Errorf("No networks found in %s", confdir)
case len(files) == 0:
return nil, logging.Errorf("No networks found in %s", confdir)
}
for _, confFile := range files {
var confList *libcni.NetworkConfigList
if strings.HasSuffix(confFile, ".conflist") {
confList, err = libcni.ConfListFromFile(confFile)
if err != nil {
return nil, logging.Errorf("Error loading CNI conflist file %s: %v", confFile, err)
}
if confList.Name == name || name == "" {
return confList.Bytes, nil
}
} else {
conf, err := libcni.ConfFromFile(confFile)
if err != nil {
return nil, logging.Errorf("Error loading CNI config file %s: %v", confFile, err)
}
if conf.Network.Name == name || name == "" {
// Ensure the config has a "type" so we know what plugin to run.
// Also catches the case where somebody put a conflist into a conf file.
if conf.Network.Type == "" {
return nil, logging.Errorf("Error loading CNI config file %s: no 'type'; perhaps this is a .conflist?", confFile)
}
return conf.Bytes, nil
}
}
}
return nil, logging.Errorf("no network available in the name %s in cni dir %s", name, confdir)
}
// getCNIConfigFromSpec reads a CNI JSON configuration from the NetworkAttachmentDefinition
// object's Spec.Config field and fills in any missing details like the network name
func getCNIConfigFromSpec(configData, netName string) ([]byte, error) {
var rawConfig map[string]interface{}
var err error
logging.Debugf("getCNIConfigFromSpec: %s, %s", configData, netName)
configBytes := []byte(configData)
err = json.Unmarshal(configBytes, &rawConfig)
if err != nil {
return nil, logging.Errorf("getCNIConfigFromSpec: failed to unmarshal Spec.Config: %v", err)
}
// Inject network name if missing from Config for the thick plugin case
if n, ok := rawConfig["name"]; !ok || n == "" {
rawConfig["name"] = netName
configBytes, err = json.Marshal(rawConfig)
if err != nil {
return nil, logging.Errorf("getCNIConfigFromSpec: failed to re-marshal Spec.Config: %v", err)
}
}
return configBytes, nil
}
func cniConfigFromNetworkResource(customResource *types.NetworkAttachmentDefinition, confdir string) ([]byte, error) {
var config []byte
var err error
logging.Debugf("cniConfigFromNetworkResource: %v, %s", customResource, confdir)
emptySpec := types.NetworkAttachmentDefinitionSpec{}
if customResource.Spec == emptySpec {
// Network Spec empty; generate delegate from CNI JSON config
// from the configuration directory that has the same network
// name as the custom resource
config, err = getCNIConfigFromFile(customResource.Metadata.Name, confdir)
if err != nil {
return nil, logging.Errorf("cniConfigFromNetworkResource: err in getCNIConfigFromFile: %v", err)
}
} else {
// Config contains a standard JSON-encoded CNI configuration
// or configuration list which defines the plugin chain to
// execute.
config, err = getCNIConfigFromSpec(customResource.Spec.Config, customResource.Metadata.Name)
if err != nil {
return nil, logging.Errorf("cniConfigFromNetworkResource: err in getCNIConfigFromSpec: %v", err)
}
}
return config, nil
}
func getKubernetesDelegate(client KubeClient, net *types.NetworkSelectionElement, confdir string, podID string, resourceMap map[string]*types.ResourceInfo) (*types.DelegateNetConf, map[string]*types.ResourceInfo, error) {
logging.Debugf("getKubernetesDelegate: %v, %v, %s", client, net, confdir)
rawPath := fmt.Sprintf("/apis/k8s.cni.cncf.io/v1/namespaces/%s/network-attachment-definitions/%s", net.Namespace, net.Name)
netData, err := client.GetRawWithPath(rawPath)
if err != nil {
return nil, resourceMap, logging.Errorf("getKubernetesDelegate: failed to get network resource, refer Multus README.md for the usage guide: %v", err)
}
customResource := &types.NetworkAttachmentDefinition{}
if err := json.Unmarshal(netData, customResource); err != nil {
return nil, resourceMap, logging.Errorf("getKubernetesDelegate: failed to get the netplugin data: %v", err)
}
// Get resourceName annotation from NetworkAttachmentDefinition
deviceID := ""
resourceName, ok := customResource.Metadata.Annotations[resourceNameAnnot]
if ok && podID != "" {
// ResourceName annotation is found; try to get device info from resourceMap
logging.Debugf("getKubernetesDelegate: found resourceName annotation : %s", resourceName)
if resourceMap == nil {
checkpoint, err := checkpoint.GetCheckpoint()
if err != nil {
return nil, resourceMap, logging.Errorf("getKubernetesDelegate: failed to get a checkpoint instance: %v", err)
}
resourceMap, err = checkpoint.GetComputeDeviceMap(podID)
if err != nil {
return nil, resourceMap, logging.Errorf("getKubernetesDelegate: failed to get resourceMap from kubelet checkpoint file: %v", err)
}
logging.Debugf("getKubernetesDelegate(): resourceMap instance: %+v", resourceMap)
}
entry, ok := resourceMap[resourceName]
if ok {
if idCount := len(entry.DeviceIDs); idCount > 0 && idCount > entry.Index {
deviceID = entry.DeviceIDs[entry.Index]
logging.Debugf("getKubernetesDelegate: podID: %s deviceID: %s", podID, deviceID)
entry.Index++ // increment Index for next delegate
}
}
}
configBytes, err := cniConfigFromNetworkResource(customResource, confdir)
if err != nil {
return nil, resourceMap, err
}
delegate, err := types.LoadDelegateNetConf(configBytes, net, deviceID)
if err != nil {
return nil, resourceMap, err
}
return delegate, resourceMap, nil
}
type KubeClient interface {
GetRawWithPath(path string) ([]byte, error)
GetPod(namespace, name string) (*v1.Pod, error)
UpdatePodStatus(pod *v1.Pod) (*v1.Pod, error)
}
func GetK8sArgs(args *skel.CmdArgs) (*types.K8sArgs, error) {
k8sArgs := &types.K8sArgs{}
logging.Debugf("GetK8sArgs: %v", args)
err := cnitypes.LoadArgs(args.Args, k8sArgs)
if err != nil {
return nil, err
}
return k8sArgs, nil
}
// Attempts to load Kubernetes-defined delegates and add them to the Multus config.
// Returns the number of Kubernetes-defined delegates added or an error.
func TryLoadPodDelegates(k8sArgs *types.K8sArgs, conf *types.NetConf, kubeClient KubeClient) (int, *clientInfo, error) {
var err error
clientInfo := &clientInfo{}
logging.Debugf("TryLoadPodDelegates: %v, %v, %v", k8sArgs, conf, kubeClient)
kubeClient, err = GetK8sClient(conf.Kubeconfig, kubeClient)
if err != nil {
return 0, nil, err
}
if kubeClient == nil {
if len(conf.Delegates) == 0 {
// No available kube client and no delegates, we can't do anything
return 0, nil, logging.Errorf("must have either Kubernetes config or delegates, refer Multus README.md for the usage guide")
}
return 0, nil, nil
}
setKubeClientInfo(clientInfo, kubeClient, k8sArgs)
delegate, err := tryLoadK8sPodDefaultNetwork(k8sArgs, conf, kubeClient)
if err != nil {
return 0, nil, logging.Errorf("tryLoadK8sDelegates: Err in loading K8s cluster default network from pod annotation: %v", err)
}
if delegate != nil {
logging.Debugf("tryLoadK8sDelegates: Overwrite the cluster default network with %v from pod annotations", delegate)
conf.Delegates[0] = delegate
}
delegates, err := GetPodNetwork(kubeClient, k8sArgs, conf.ConfDir, conf.NamespaceIsolation)
if err != nil {
if _, ok := err.(*NoK8sNetworkError); ok {
return 0, clientInfo, nil
}
return 0, nil, logging.Errorf("Multus: Err in getting k8s network from pod: %v", err)
}
if err = conf.AddDelegates(delegates); err != nil {
return 0, nil, err
}
return len(delegates), clientInfo, nil
}
func GetK8sClient(kubeconfig string, kubeClient KubeClient) (KubeClient, error) {
logging.Debugf("GetK8sClient: %s, %v", kubeconfig, kubeClient)
// If we get a valid kubeClient (eg from testcases) just return that
// one.
if kubeClient != nil {
return kubeClient, nil
}
var err error
var config *rest.Config
// Otherwise try to create a kubeClient from a given kubeConfig
if kubeconfig != "" {
// uses the current context in kubeconfig
config, err = clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
return nil, logging.Errorf("GetK8sClient: failed to get context for the kubeconfig %v, refer Multus README.md for the usage guide: %v", kubeconfig, err)
}
} else if os.Getenv("KUBERNETES_SERVICE_HOST") != "" && os.Getenv("KUBERNETES_SERVICE_PORT") != "" {
// Try in-cluster config where multus might be running in a kubernetes pod
config, err = rest.InClusterConfig()
if err != nil {
return nil, logging.Errorf("createK8sClient: failed to get context for in-cluster kube config, refer Multus README.md for the usage guide: %v", err)
}
} else {
// No kubernetes config; assume we shouldn't talk to Kube at all
return nil, nil
}
// Specify that we use gRPC
config.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json"
config.ContentType = "application/vnd.kubernetes.protobuf"
// creates the clientset
client, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
return &defaultKubeClient{client: client}, nil
}
func GetPodNetwork(k8sclient KubeClient, k8sArgs *types.K8sArgs, confdir string, confnamespaceisolation bool) ([]*types.DelegateNetConf, error) {
logging.Debugf("GetPodNetwork: %v, %v, %v", k8sclient, k8sArgs, confdir)
netAnnot, defaultNamespace, podID, err := getPodNetworkAnnotation(k8sclient, k8sArgs)
if err != nil {
return nil, err
}
if err != nil {
return nil, logging.Errorf("GetK8sNetwork: failed to get resourceMap for PodUID: %v %v", podID, err)
}
if len(netAnnot) == 0 {
return nil, &NoK8sNetworkError{"no kubernetes network found"}
}
networks, err := parsePodNetworkAnnotation(netAnnot, defaultNamespace)
if err != nil {
return nil, err
}
// resourceMap holds Pod device allocation information; only initizized if CRD contains 'resourceName' annotation.
// This will only be initialized once and all delegate objects can reference this to look up device info.
var resourceMap map[string]*types.ResourceInfo
// Read all network objects referenced by 'networks'
var delegates []*types.DelegateNetConf
for _, net := range networks {
// The pods namespace (stored as defaultNamespace, does not equal the annotation's target namespace in net.Namespace)
// In the case that this is a mismatch when namespaceisolation is enabled, this should be an error.
if confnamespaceisolation {
if defaultNamespace != net.Namespace {
return nil, logging.Errorf("GetPodNetwork: namespace isolation violation: podnamespace: %v / target namespace: %v", defaultNamespace, net.Namespace)
}
}
delegate, updatedResourceMap, err := getKubernetesDelegate(k8sclient, net, confdir, podID, resourceMap)
if err != nil {
return nil, logging.Errorf("GetPodNetwork: failed getting the delegate: %v", err)
}
delegates = append(delegates, delegate)
resourceMap = updatedResourceMap
}
return delegates, nil
}
func getDefaultNetDelegateCRD(client KubeClient, net, confdir, namespace string) (*types.DelegateNetConf, error) {
logging.Debugf("getDefaultNetDelegateCRD: %v, %v, %s, %s", client, net, confdir, namespace)
rawPath := fmt.Sprintf("/apis/k8s.cni.cncf.io/v1/namespaces/%s/network-attachment-definitions/%s", namespace, net)
netData, err := client.GetRawWithPath(rawPath)
if err != nil {
return nil, logging.Errorf("getDefaultNetDelegateCRD: failed to get network resource, refer Multus README.md for the usage guide: %v", err)
}
customResource := &types.NetworkAttachmentDefinition{}
if err := json.Unmarshal(netData, customResource); err != nil {
return nil, logging.Errorf("getDefaultNetDelegateCRD: failed to get the netplugin data: %v", err)
}
configBytes, err := cniConfigFromNetworkResource(customResource, confdir)
if err != nil {
return nil, err
}
delegate, err := types.LoadDelegateNetConf(configBytes, nil, "")
if err != nil {
return nil, err
}
return delegate, nil
}
func getNetDelegate(client KubeClient, netname, confdir, namespace string) (*types.DelegateNetConf, error) {
logging.Debugf("getNetDelegate: %v, %v, %v, %s", client, netname, confdir, namespace)
// option1) search CRD object for the network
delegate, err := getDefaultNetDelegateCRD(client, netname, confdir, namespace)
if err == nil {
return delegate, nil
}
// option2) search CNI json config file
var configBytes []byte
configBytes, err = getCNIConfigFromFile(netname, confdir)
if err == nil {
delegate, err := types.LoadDelegateNetConf(configBytes, nil, "")
if err != nil {
return nil, err
}
return delegate, nil
}
// option3) search directry
fInfo, err := os.Stat(netname)
if err == nil {
if fInfo.IsDir() {
files, err := libcni.ConfFiles(netname, []string{".conf", ".conflist"})
if err != nil {
return nil, err
}
if len(files) > 0 {
var configBytes []byte
configBytes, err = getCNIConfigFromFile("", netname)
if err == nil {
delegate, err := types.LoadDelegateNetConf(configBytes, nil, "")
if err != nil {
return nil, err
}
return delegate, nil
}
return nil, err
}
}
}
return nil, logging.Errorf("getNetDelegate: cannot find network: %v", netname)
}
// GetDefaultNetwork parses 'defaultNetwork' config, gets network json and put it into netconf.Delegates.
func GetDefaultNetworks(k8sArgs *types.K8sArgs, conf *types.NetConf, kubeClient KubeClient) error {
logging.Debugf("GetDefaultNetworks: %v, %v, %v", k8sArgs, conf, kubeClient)
var delegates []*types.DelegateNetConf
kubeClient, err := GetK8sClient(conf.Kubeconfig, kubeClient)
if err != nil {
return err
}
if kubeClient == nil {
if len(conf.Delegates) == 0 {
// No available kube client and no delegates, we can't do anything
return logging.Errorf("must have either Kubernetes config or delegates, refer Multus README.md for the usage guide")
}
return nil
}
delegate, err := getNetDelegate(kubeClient, conf.ClusterNetwork, conf.ConfDir, conf.MultusNamespace)
if err != nil {
return err
}
delegate.MasterPlugin = true
delegates = append(delegates, delegate)
// Pod in kube-system namespace does not have default network for now.
if !types.CheckSystemNamespaces(string(k8sArgs.K8S_POD_NAMESPACE), conf.SystemNamespaces) {
for _, netname := range conf.DefaultNetworks {
delegate, err := getNetDelegate(kubeClient, netname, conf.ConfDir, conf.MultusNamespace)
if err != nil {
return err
}
delegates = append(delegates, delegate)
}
}
if err = conf.AddDelegates(delegates); err != nil {
return err
}
return nil
}
func getPodDefaultNetworkAnnotation(client KubeClient, k8sArgs *types.K8sArgs) (string, error) {
logging.Debugf("getPodDefaultNetworkAnnotation: %v, %v", client, k8sArgs)
pod, err := client.GetPod(string(k8sArgs.K8S_POD_NAMESPACE), string(k8sArgs.K8S_POD_NAME))
if err != nil {
return "", logging.Errorf("getPodDefaultNetworkAnnotation: failed to query the pod %v in out of cluster comm: %v", string(k8sArgs.K8S_POD_NAME), err)
}
if v, ok := pod.Annotations[defaultNetAnnot]; ok {
return v, nil
}
return "", nil
}
// tryLoadK8sPodDefaultNetwork get pod default network from annotations
func tryLoadK8sPodDefaultNetwork(k8sArgs *types.K8sArgs, conf *types.NetConf, kubeClient KubeClient) (*types.DelegateNetConf, error) {
logging.Debugf("tryLoadK8sPodDefaultNetwork: %v, %v", kubeClient, k8sArgs)
netAnnot, err := getPodDefaultNetworkAnnotation(kubeClient, k8sArgs)
if err != nil {
return nil, logging.Errorf("tryLoadK8sPodDefaultNetwork: failed to get pod annotation: %v", err)
}
if netAnnot == "" {
logging.Debugf("tryLoadK8sPodDefaultNetwork: Pod default network annotation is not defined")
return nil, nil
}
// The CRD object of default network should only be defined in multusNamespace
networks, err := parsePodNetworkAnnotation(netAnnot, conf.MultusNamespace)
if err != nil {
return nil, logging.Errorf("tryLoadK8sPodDefaultNetwork: failed to parse CRD object: %v", err)
}
if len(networks) > 1 {
return nil, logging.Errorf("tryLoadK8sPodDefaultNetwork: more than one default network is specified: %s", netAnnot)
}
delegate, _, err := getKubernetesDelegate(kubeClient, networks[0], conf.ConfDir, "", nil)
if err != nil {
return nil, logging.Errorf("tryLoadK8sPodDefaultNetwork: failed getting the delegate: %v", err)
}
delegate.MasterPlugin = true
return delegate, nil
}
|
[
"\"KUBERNETES_SERVICE_HOST\"",
"\"KUBERNETES_SERVICE_PORT\""
] |
[] |
[
"KUBERNETES_SERVICE_HOST",
"KUBERNETES_SERVICE_PORT"
] |
[]
|
["KUBERNETES_SERVICE_HOST", "KUBERNETES_SERVICE_PORT"]
|
go
| 2 | 0 | |
cli/pkg/credentialmanager/handler.go
|
package credentialmanager
import (
"bufio"
"fmt"
"log"
"net/url"
"os"
"path/filepath"
"strings"
"github.com/docker/docker-credential-helpers/credentials"
"github.com/keptn/keptn/cli/pkg/config"
"github.com/keptn/keptn/cli/pkg/file"
keptnutils "github.com/keptn/kubernetes-utils/pkg"
"gopkg.in/yaml.v2"
)
var testEndPoint = url.URL{Scheme: "https", Host: "my-endpoint"}
const testAPIToken = "super-secret"
const testNamespace = "keptn-test-namespace"
const credsLab = "keptn"
const serverURL = "https://keptn.sh"
const installCredsKey = "https://keptn-install.sh"
var MockAuthCreds bool
var MockKubeConfigCheck bool
// GlobalCheckForContextChange ...Since credential manager is called at multiple times, we dont want to check for context switch for one command at multiple places,
// it should be called only for the first time.
var GlobalCheckForContextChange bool
type keptnConfig struct {
APIToken string `yaml:"api_token"`
Endpoint string `yaml:"endpoint"`
ContextName string `yaml:"name"`
KeptnNamespace string `yaml:"namespace"`
}
type keptnConfigFile struct {
Contexts []keptnConfig `yaml:"contexts"`
}
type kubeConfigFileType struct {
CurrentContext string `yaml:"current-context"`
}
var kubeConfigFile kubeConfigFileType
var keptnContext string
func init() {
credentials.SetCredsLabel(credsLab)
}
func setInstallCreds(h credentials.Helper, creds string) error {
c := &credentials.Credentials{
ServerURL: installCredsKey,
Username: "creds",
Secret: creds,
}
return h.Add(c)
}
func getInstallCreds(h credentials.Helper) (string, error) {
_, creds, err := h.Get(installCredsKey)
if err != nil {
return "", err
}
return creds, err
}
func setCreds(h credentials.Helper, endPoint url.URL, apiToken string, namespace string) error {
if MockAuthCreds {
// Do nothing
return nil
}
customServerURL := serverURL + "/" + keptnContext + "/" + namespace
c := &credentials.Credentials{
ServerURL: customServerURL,
Username: url.QueryEscape(endPoint.String()),
Secret: apiToken,
}
return h.Add(c)
}
func getCreds(h credentials.Helper, namespace string) (url.URL, string, error) {
if MockAuthCreds {
return url.URL{}, "", nil
}
customServerURL := serverURL + "/" + keptnContext + "/" + namespace
// Check if creds file is specified in the 'KEPTNCONFIG' environment variable
if customCredsLocation, ok := os.LookupEnv("KEPTNCONFIG"); ok {
if customCredsLocation != "" {
endPoint, apiToken, err := handleCustomCreds(customCredsLocation, namespace)
// If credential is not found in KEPTNCONFIG, use fallback credential manager
if apiToken != "" || err != nil {
return endPoint, apiToken, err
}
}
}
endPointStr, apiToken, err := h.Get(customServerURL)
if err != nil {
return url.URL{}, "", err
}
outURL, _ := url.QueryUnescape(endPointStr)
url, err := url.Parse(outURL)
return *url, apiToken, err
}
func handleCustomCreds(configLocation string, namespace string) (url.URL, string, error) {
fileContent, err := file.ReadFile(configLocation)
if err != nil {
return url.URL{}, "", err
}
var keptnConfig keptnConfigFile
yaml.Unmarshal([]byte(fileContent), &keptnConfig)
for _, context := range keptnConfig.Contexts {
// Keeping default namespace to keptn
if &context.KeptnNamespace == nil || context.KeptnNamespace == "" {
context.KeptnNamespace = "keptn"
}
if context.ContextName == keptnContext && context.KeptnNamespace == namespace {
parsedURL, err := url.Parse(context.Endpoint)
if err != nil {
return url.URL{}, "", err
}
return *parsedURL, context.APIToken, nil
}
}
return url.URL{}, "", nil
}
// initChecks needs to be run when credentialManager is called or initialized
func initChecks(autoApplyNewContext bool) {
if !GlobalCheckForContextChange {
cliConfigManager := config.NewCLIConfigManager()
err := getCurrentContextFromKubeConfig()
if err != nil {
log.Fatal(err)
}
checkForContextChange(cliConfigManager, autoApplyNewContext)
GlobalCheckForContextChange = true
}
}
func getCurrentContextFromKubeConfig() error {
if MockAuthCreds || MockKubeConfigCheck {
// Do nothing
kubeConfigFile.CurrentContext = ""
keptnContext = ""
return nil
}
var kubeconfig string
if os.Getenv("KUBECONFIG") != "" {
kubeconfig = keptnutils.ExpandTilde(os.Getenv("KUBECONFIG"))
} else {
kubeconfig = filepath.Join(
keptnutils.UserHomeDir(), ".kube", "config",
)
}
fileContent, err := file.ReadFile(kubeconfig)
if err != nil {
return err
}
err = yaml.Unmarshal([]byte(fileContent), &kubeConfigFile)
if err != nil {
log.Fatalf("Unmarshal: %v", err)
}
return nil
}
func checkForContextChange(cliConfigManager *config.CLIConfigManager, autoApplyNewContext bool) error {
if MockAuthCreds || MockKubeConfigCheck {
// Do nothing
return nil
}
cliConfig, err := cliConfigManager.LoadCLIConfig()
if err != nil {
log.Fatal(err)
}
// Setting keptnContext from ~/.keptn/config file
keptnContext = cliConfig.CurrentContext
if keptnContext != kubeConfigFile.CurrentContext {
fmt.Printf("Kube context has been changed to %s", kubeConfigFile.CurrentContext)
fmt.Println()
if !autoApplyNewContext && keptnContext != "" {
fmt.Println("Do you want to switch to the new Kube context with the Keptn running there? (y/n)")
reader := bufio.NewReader(os.Stdin)
in, err := reader.ReadString('\n')
if err != nil {
return err
}
in = strings.ToLower(strings.TrimSpace(in))
if !(in == "y" || in == "yes") {
return nil
}
}
cliConfig.CurrentContext = kubeConfigFile.CurrentContext
keptnContext = kubeConfigFile.CurrentContext
err = cliConfigManager.StoreCLIConfig(cliConfig)
if err != nil {
return err
}
}
return nil
}
|
[
"\"KUBECONFIG\"",
"\"KUBECONFIG\""
] |
[] |
[
"KUBECONFIG"
] |
[]
|
["KUBECONFIG"]
|
go
| 1 | 0 | |
pkg/daemon/ceph/config/config.go
|
/*
Copyright 2018 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package config provides methods for creating and formatting Ceph configuration files for daemons.
package config
import (
"fmt"
"net"
"os"
"path"
"strconv"
"strings"
"github.com/coreos/pkg/capnslog"
"github.com/go-ini/ini"
"github.com/pkg/errors"
"github.com/rook/rook/pkg/clusterd"
"github.com/rook/rook/pkg/daemon/ceph/client"
cephutil "github.com/rook/rook/pkg/daemon/ceph/util"
cephver "github.com/rook/rook/pkg/operator/ceph/version"
)
var logger = capnslog.NewPackageLogger("github.com/rook/rook", "cephconfig")
const (
// DefaultKeyringFile is the default name of the file where Ceph stores its keyring info
DefaultKeyringFile = "keyring"
// Msgr2port is the listening port of the messenger v2 protocol
Msgr2port = 3300
msgr1Prefix = "v1:"
msgr2Prefix = "v2:"
)
var (
// DefaultConfigDir is the default dir where Ceph stores its configs. Can be overridden for unit
// tests.
DefaultConfigDir = "/etc/ceph"
// DefaultConfigFile is the default name of the file where Ceph stores its configs. Can be
// overridden for unit tests.
DefaultConfigFile = "ceph.conf"
)
// GlobalConfig represents the [global] sections of Ceph's config file.
type GlobalConfig struct {
EnableExperimental string `ini:"enable experimental unrecoverable data corrupting features,omitempty"`
FSID string `ini:"fsid,omitempty"`
MonMembers string `ini:"mon initial members,omitempty"`
MonHost string `ini:"mon host"`
LogFile string `ini:"log file,omitempty"`
MonClusterLogFile string `ini:"mon cluster log file,omitempty"`
PublicAddr string `ini:"public addr,omitempty"`
PublicNetwork string `ini:"public network,omitempty"`
ClusterAddr string `ini:"cluster addr,omitempty"`
ClusterNetwork string `ini:"cluster network,omitempty"`
MonKeyValueDb string `ini:"mon keyvaluedb"`
MonAllowPoolDelete bool `ini:"mon_allow_pool_delete"`
MaxPgsPerOsd int `ini:"mon_max_pg_per_osd"`
DebugLogDefaultLevel int `ini:"debug default"`
DebugLogRadosLevel int `ini:"debug rados"`
DebugLogMonLevel int `ini:"debug mon"`
DebugLogOSDLevel int `ini:"debug osd"`
DebugLogBluestoreLevel int `ini:"debug bluestore"`
DebugLogFilestoreLevel int `ini:"debug filestore"`
DebugLogJournalLevel int `ini:"debug journal"`
DebugLogLevelDBLevel int `ini:"debug leveldb"`
FileStoreOmapBackend string `ini:"filestore_omap_backend"`
OsdPgBits int `ini:"osd pg bits,omitempty"`
OsdPgpBits int `ini:"osd pgp bits,omitempty"`
OsdPoolDefaultSize int `ini:"osd pool default size,omitempty"`
OsdPoolDefaultMinSize int `ini:"osd pool default min size,omitempty"`
OsdPoolDefaultPgNum int `ini:"osd pool default pg num,omitempty"`
OsdPoolDefaultPgpNum int `ini:"osd pool default pgp num,omitempty"`
OsdMaxObjectNameLen int `ini:"osd max object name len,omitempty"`
OsdMaxObjectNamespaceLen int `ini:"osd max object namespace len,omitempty"`
OsdObjectStore string `ini:"osd objectstore,omitempty"`
RbdDefaultFeatures int `ini:"rbd_default_features,omitempty"`
FatalSignalHandlers string `ini:"fatal signal handlers"`
}
// CephConfig represents an entire Ceph config including all sections.
type CephConfig struct {
*GlobalConfig `ini:"global,omitempty"`
}
// DefaultConfigFilePath returns the full path to Ceph's default config file
func DefaultConfigFilePath() string {
return path.Join(DefaultConfigDir, DefaultConfigFile)
}
// DefaultKeyringFilePath returns the full path to Ceph's default keyring file
func DefaultKeyringFilePath() string {
return path.Join(DefaultConfigDir, DefaultKeyringFile)
}
// GetConfFilePath gets the path of a given cluster's config file
func GetConfFilePath(root, clusterName string) string {
return fmt.Sprintf("%s/%s.config", root, clusterName)
}
// GenerateAdminConnectionConfig calls GenerateAdminConnectionConfigWithSettings with no settings
// overridden.
func GenerateAdminConnectionConfig(context *clusterd.Context, cluster *ClusterInfo) (string, error) {
return GenerateAdminConnectionConfigWithSettings(context, cluster, nil)
}
// GenerateAdminConnectionConfigWithSettings generates a Ceph config and keyring which will allow
// the daemon to connect as an admin. Default config file settings can be overridden by specifying
// some subset of settings.
func GenerateAdminConnectionConfigWithSettings(context *clusterd.Context, cluster *ClusterInfo, settings *CephConfig) (string, error) {
root := path.Join(context.ConfigDir, cluster.Name)
keyringPath := path.Join(root, fmt.Sprintf("%s.keyring", client.AdminUsername))
err := writeKeyring(AdminKeyring(cluster), keyringPath)
if err != nil {
return "", errors.Wrapf(err, "failed to write keyring to %s", root)
}
filePath, err := GenerateConfigFile(context, cluster, root, client.AdminUsername, keyringPath, settings, nil)
if err != nil {
return "", errors.Wrapf(err, "failed to write config to %s", root)
}
logger.Infof("generated admin config in %s", root)
return filePath, nil
}
// GenerateConfigFile generates and writes a config file to disk.
func GenerateConfigFile(context *clusterd.Context, cluster *ClusterInfo, pathRoot, user, keyringPath string, globalConfig *CephConfig, clientSettings map[string]string) (string, error) {
// create the config directory
if err := os.MkdirAll(pathRoot, 0744); err != nil {
logger.Warningf("failed to create config directory at %s: %+v", pathRoot, err)
}
configFile, err := createGlobalConfigFileSection(context, cluster, globalConfig)
if err != nil {
return "", errors.Wrapf(err, "failed to create global config section")
}
qualifiedUser := getQualifiedUser(user)
if err := addClientConfigFileSection(configFile, qualifiedUser, keyringPath, clientSettings); err != nil {
return "", errors.Wrapf(err, "failed to add admin client config section")
}
// write the entire config to disk
filePath := GetConfFilePath(pathRoot, cluster.Name)
logger.Infof("writing config file %s", filePath)
if err := configFile.SaveTo(filePath); err != nil {
return "", errors.Wrapf(err, "failed to save config file %s", filePath)
}
return filePath, nil
}
// prepends "client." if a user namespace is not already specified
func getQualifiedUser(user string) string {
if strings.Index(user, ".") == -1 {
return fmt.Sprintf("client.%s", user)
}
return user
}
// CreateDefaultCephConfig creates a default ceph config file.
func CreateDefaultCephConfig(context *clusterd.Context, cluster *ClusterInfo) (*CephConfig, error) {
cephVersionEnv := os.Getenv("ROOK_CEPH_VERSION")
if cephVersionEnv != "" {
v, err := cephver.ExtractCephVersion(cephVersionEnv)
if err != nil {
return nil, errors.Wrapf(err, "failed to extract ceph version")
}
cluster.CephVersion = *v
}
// extract a list of just the monitor names, which will populate the "mon initial members"
// global config field
monMembers := make([]string, len(cluster.Monitors))
monHosts := make([]string, len(cluster.Monitors))
i := 0
for _, monitor := range cluster.Monitors {
monMembers[i] = monitor.Name
monIP := cephutil.GetIPFromEndpoint(monitor.Endpoint)
// This tries to detect the current port if the mon already exists
// This basically handles the transition between monitors running on 6790 to msgr2
// So whatever the previous monitor port was we keep it
currentMonPort := cephutil.GetPortFromEndpoint(monitor.Endpoint)
monPorts := [2]string{strconv.Itoa(int(Msgr2port)), strconv.Itoa(int(currentMonPort))}
msgr1Endpoint := net.JoinHostPort(monIP, monPorts[1])
// Mimic daemons like OSD won't be able to parse this, so only the operator should get this config
// they will fail with
// unable to parse addrs in 'v1:10.104.92.199:6790,v1:10.110.137.107:6790,v1:10.102.38.86:6790'
// server name not found: v1:10.104.92.199:6790 (Name or service not known)
// 2019-04-25 10:31:08.614 7f5971aae1c0 -1 monclient: get_monmap_and_config cannot identify monitors to contact
// 2019-04-25 10:31:08.614 7f5971aae1c0 -1 monclient: get_monmap_and_config cannot identify monitors to contact
// failed to fetch mon config (--no-mon-config to skip)
// The operator always fails this test since it does not have the env var 'ROOK_CEPH_VERSION'
podName := os.Getenv("POD_NAME")
if cluster.CephVersion.IsAtLeastNautilus() {
monHosts[i] = msgr1Prefix + msgr1Endpoint
} else if podName != "" && strings.Contains(podName, "operator") {
// This is an operator and its version is always based on Nautilus
// so it knows how to parse both msgr1 and msgr2 syntax
prefix := msgrPrefix(currentMonPort)
monHosts[i] = prefix + msgr1Endpoint
} else {
// This is not the operator, it's an OSD and its Ceph version is before Nautilus
monHosts[i] = msgr1Endpoint
}
i++
}
cephLogLevel := logLevelToCephLogLevel(context.LogLevel)
conf := &CephConfig{
GlobalConfig: &GlobalConfig{
FSID: cluster.FSID,
MonMembers: strings.Join(monMembers, " "),
MonHost: strings.Join(monHosts, ","),
PublicAddr: context.NetworkInfo.PublicAddr,
PublicNetwork: context.NetworkInfo.PublicNetwork,
ClusterAddr: context.NetworkInfo.ClusterAddr,
ClusterNetwork: context.NetworkInfo.ClusterNetwork,
MonKeyValueDb: "rocksdb",
MonAllowPoolDelete: true,
MaxPgsPerOsd: 1000,
DebugLogDefaultLevel: cephLogLevel,
DebugLogRadosLevel: cephLogLevel,
DebugLogMonLevel: cephLogLevel,
DebugLogOSDLevel: cephLogLevel,
DebugLogBluestoreLevel: cephLogLevel,
DebugLogFilestoreLevel: cephLogLevel,
DebugLogJournalLevel: cephLogLevel,
DebugLogLevelDBLevel: cephLogLevel,
FileStoreOmapBackend: "rocksdb",
OsdPgBits: 11,
OsdPgpBits: 11,
OsdPoolDefaultSize: 1,
OsdPoolDefaultMinSize: 1,
OsdPoolDefaultPgNum: 100,
OsdPoolDefaultPgpNum: 100,
RbdDefaultFeatures: 3,
FatalSignalHandlers: "false",
},
}
// Everything before 14.2.1
// These new flags control Ceph's daemon logging behavior to files
// By default we set them to False so no logs get written on file
// However they can be activated at any time via the centralized config store
if !cluster.CephVersion.IsAtLeast(cephver.CephVersion{Major: 14, Minor: 2, Extra: 1}) {
conf.LogFile = "/dev/stderr"
conf.MonClusterLogFile = "/dev/stderr"
}
return conf, nil
}
// create a config file with global settings configured, and return an ini file
func createGlobalConfigFileSection(context *clusterd.Context, cluster *ClusterInfo, userConfig *CephConfig) (*ini.File, error) {
var ceph *CephConfig
if userConfig != nil {
// use the user config since it was provided
ceph = userConfig
} else {
var err error
ceph, err = CreateDefaultCephConfig(context, cluster)
if err != nil {
return nil, errors.Wrapf(err, "failed to create default ceph config")
}
}
configFile := ini.Empty()
err := ini.ReflectFrom(configFile, ceph)
return configFile, err
}
// add client config to the ini file
func addClientConfigFileSection(configFile *ini.File, clientName, keyringPath string, settings map[string]string) error {
s, err := configFile.NewSection(clientName)
if err != nil {
return err
}
if _, err := s.NewKey("keyring", keyringPath); err != nil {
return err
}
for key, val := range settings {
if _, err := s.NewKey(key, val); err != nil {
return errors.Wrapf(err, "failed to add key %s", key)
}
}
return nil
}
// convert a Rook log level to a corresponding Ceph log level
func logLevelToCephLogLevel(logLevel capnslog.LogLevel) int {
switch logLevel {
case capnslog.CRITICAL:
case capnslog.ERROR:
case capnslog.WARNING:
return -1
case capnslog.NOTICE:
case capnslog.INFO:
return 0
case capnslog.DEBUG:
return 10
case capnslog.TRACE:
return 100
}
return 0
}
func msgrPrefix(currentMonPort int32) string {
// Some installation might only be listening on v2, so let's set the prefix accordingly
if currentMonPort == Msgr2port {
return msgr2Prefix
}
return msgr1Prefix
}
|
[
"\"ROOK_CEPH_VERSION\"",
"\"POD_NAME\""
] |
[] |
[
"POD_NAME",
"ROOK_CEPH_VERSION"
] |
[]
|
["POD_NAME", "ROOK_CEPH_VERSION"]
|
go
| 2 | 0 | |
pkg/csi/service/wcpguest/controller_test.go
|
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package wcpguest
import (
"context"
"os"
"sync"
"testing"
"time"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
testclient "k8s.io/client-go/kubernetes/fake"
"github.com/container-storage-interface/spec/lib/go/csi"
"sigs.k8s.io/vsphere-csi-driver/pkg/common/config"
"sigs.k8s.io/vsphere-csi-driver/pkg/common/unittestcommon"
"sigs.k8s.io/vsphere-csi-driver/pkg/csi/service/common"
"sigs.k8s.io/vsphere-csi-driver/pkg/csi/service/common/commonco"
k8s "sigs.k8s.io/vsphere-csi-driver/pkg/kubernetes"
)
const (
testVolumeName = "pvc-12345"
// The format of SupervisorPVCName is TanzuKubernetesClusterUID+"-"+ volumeUID.
// The TanzuKubernetesClusterUID is empty in the unit test.
testSupervisorPVCName = "-12345"
testNamespace = "test-namespace"
testStorageClass = "test-storageclass"
)
var (
ctx context.Context
isUnitTest bool
supervisorNamespace string
controllerTestInstance *controllerTest
onceForControllerTest sync.Once
)
type controllerTest struct {
controller *controller
}
func configFromSim() (clientset.Interface, error) {
isUnitTest = true
supervisorClient := testclient.NewSimpleClientset()
supervisorNamespace = testNamespace
return supervisorClient, nil
}
func configFromEnvOrSim(ctx context.Context) (clientset.Interface, error) {
cfg := &config.Config{}
if err := config.FromEnvToGC(ctx, cfg); err != nil {
return configFromSim()
}
isUnitTest = false
restClientConfig := k8s.GetRestClientConfigForSupervisor(ctx, cfg.GC.Endpoint, cfg.GC.Port)
supervisorClient, err := k8s.NewSupervisorClient(ctx, restClientConfig)
if err != nil {
return nil, err
}
return supervisorClient, nil
}
func getControllerTest(t *testing.T) *controllerTest {
onceForControllerTest.Do(func() {
// Create context.
ctx = context.Background()
supervisorClient, err := configFromEnvOrSim(ctx)
if err != nil {
t.Fatal(err)
}
c := &controller{
supervisorClient: supervisorClient,
supervisorNamespace: supervisorNamespace,
}
commonco.ContainerOrchestratorUtility, err =
unittestcommon.GetFakeContainerOrchestratorInterface(common.Kubernetes)
if err != nil {
t.Fatalf("Failed to create co agnostic interface. err=%v", err)
}
controllerTestInstance = &controllerTest{
controller: c,
}
})
return controllerTestInstance
}
func createVolume(ctx context.Context, ct *controllerTest, reqCreate *csi.CreateVolumeRequest,
response chan *csi.CreateVolumeResponse, error chan error) {
defer close(response)
defer close(error)
res, err := ct.controller.CreateVolume(ctx, reqCreate)
response <- res
error <- err
}
// TestGuestCreateVolume creates volume.
func TestGuestClusterControllerFlow(t *testing.T) {
ct := getControllerTest(t)
modes := []csi.VolumeCapability_AccessMode_Mode{
csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER,
csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
}
for _, mode := range modes {
// Create.
params := make(map[string]string)
params[common.AttributeSupervisorStorageClass] = testStorageClass
if v := os.Getenv("SUPERVISOR_STORAGE_CLASS"); v != "" {
params[common.AttributeSupervisorStorageClass] = v
}
capabilities := []*csi.VolumeCapability{
{
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: mode,
},
},
}
reqCreate := &csi.CreateVolumeRequest{
Name: testVolumeName,
CapacityRange: &csi.CapacityRange{
RequiredBytes: 1 * common.GbInBytes,
},
Parameters: params,
VolumeCapabilities: capabilities,
}
var respCreate *csi.CreateVolumeResponse
var err error
if isUnitTest {
// Invoking CreateVolume in a separate thread and then setting the
// Status to Bound explicitly.
response := make(chan *csi.CreateVolumeResponse)
error := make(chan error)
go createVolume(ctx, ct, reqCreate, response, error)
time.Sleep(1 * time.Second)
pvc, _ := ct.controller.supervisorClient.CoreV1().PersistentVolumeClaims(
ct.controller.supervisorNamespace).Get(ctx, testSupervisorPVCName, metav1.GetOptions{})
pvc.Status.Phase = "Bound"
_, err = ct.controller.supervisorClient.CoreV1().PersistentVolumeClaims(
ct.controller.supervisorNamespace).Update(ctx, pvc, metav1.UpdateOptions{})
if err != nil {
t.Fatal(err)
}
respCreate, err = <-response, <-error
} else {
respCreate, err = ct.controller.CreateVolume(ctx, reqCreate)
// Wait for create volume finish.
time.Sleep(1 * time.Second)
}
if err != nil {
t.Fatal(err)
}
supervisorPVCName := respCreate.Volume.VolumeId
// Verify the pvc has been created.
_, err = ct.controller.supervisorClient.CoreV1().PersistentVolumeClaims(
ct.controller.supervisorNamespace).Get(ctx, supervisorPVCName, metav1.GetOptions{})
if err != nil {
t.Fatal(err)
}
// Delete.
reqDelete := &csi.DeleteVolumeRequest{
VolumeId: supervisorPVCName,
}
_, err = ct.controller.DeleteVolume(ctx, reqDelete)
if err != nil {
t.Fatal(err)
}
// Wait for delete volume finish.
time.Sleep(1 * time.Second)
// Verify the pvc has been deleted.
_, err = ct.controller.supervisorClient.CoreV1().PersistentVolumeClaims(
ct.controller.supervisorNamespace).Get(ctx, supervisorPVCName, metav1.GetOptions{})
if !errors.IsNotFound(err) {
t.Fatal(err)
}
}
}
|
[
"\"SUPERVISOR_STORAGE_CLASS\""
] |
[] |
[
"SUPERVISOR_STORAGE_CLASS"
] |
[]
|
["SUPERVISOR_STORAGE_CLASS"]
|
go
| 1 | 0 | |
perception/mil_vision/mil_vision_tools/image_mux.py
|
#!/usr/bin/env python
from __future__ import division
import numpy as np
import cv2
__author__ = 'Kevin Allen'
class ImageMux(object):
'''
Utility to create a customizable square grid of images, with labels and borders,
which will generate a single image from the grid at any time. Useful for
combining several debug images into one.
See bottom of this file for a usage example.
'''
def __init__(self, size=(480, 640), shape=(2, 2), labels=None, keep_ratio=True,
border_color=(255, 255, 255), border_thickness=1, text_color=(255, 255, 255),
text_font=cv2.FONT_HERSHEY_COMPLEX_SMALL, text_scale=1, text_thickness=2):
'''
Contruct an ImageMux grid.
@param size: Tuple (rows, cols) representing the size of the grid image, in pixels
@param shape: Tuple (rows, cols) representing the number of smaller images in the grid
@param labels: List of strings of length shape[0] * shape[1]
@param keep_ratio: If True, do not stretch image to insert into grid pane
@param text_color: Tuple (Blue, Green, Red) color of label text
@param text_font: Integer, a valid OpenCV font to use in label text
@param text_scale: Scaling factor for label text, float
@param text_thickness: Thickness of label text, int
'''
self.size = np.array(size, dtype=np.uint)
self.shape = np.array(shape, dtype=np.uint)
self.keep_ratio = keep_ratio
self.pane_size = np.array(self.size / self.shape, dtype=np.int)
self.border_color = border_color
self.border_thickness = border_thickness
self.text_color = text_color
self.text_font = text_font
self.text_scale = text_scale
self.text_thickness = text_thickness
# If labels not specified, fill a list with None's
if labels is None:
self.labels = [None for _ in xrange(self.shape[0] * self.shape[1])]
else:
assert len(labels) == self.shape[0] * self.shape[1], 'not enough labels'
self.labels = labels
self._image = np.zeros((size[0], size[1], 3), dtype=np.uint8)
def _index_to_tuple(self, index):
'''
Internal helper function, returns row, col index
from a single index integer
'''
return (int(index / self.shape[1]), int(index % self.shape[1]))
def _apply_decorations(self):
'''
Internal helper function, adds border lines and label text to internal image.
'''
# Add border if thickness > 0
if self.border_thickness > 0:
for row in xrange(1, self.shape[0]): # Add horizontal line for rows 1 - m
y = int(self.pane_size[0] * row)
cv2.line(self._image, (0, y), (self.size[1], y), self.border_color, self.border_thickness)
for col in xrange(1, self.shape[1]): # Add vertical line for rows 1 - n
x = int(self.pane_size[1] * col)
cv2.line(self._image, (x, 0), (x, self.size[0]), self.border_color, self.border_thickness)
# Add label text for each pane if it is not None
for i, label in enumerate(self.labels):
if label is None:
continue
tup = self._index_to_tuple(i)
(text_width, text_height), _ = cv2.getTextSize(label, self.text_font, self.text_scale, self.text_thickness)
x = int(self.pane_size[1] * tup[1])
y = int(self.pane_size[0] * tup[0] + text_height)
# Adjust text position to not overlap border
if tup[0] != 0:
y += self.border_thickness
if tup[1] != 0:
x += self.border_thickness
cv2.putText(self._image, label, (x, y),
self.text_font, self.text_scale, self.text_color, self.text_thickness)
def set_image(self, key, img):
'''
Sets the content of one pane in the image grid.
@param key: The index of the pane to set to the data of img
If an integer -> sets pane at index, counting left to right, then top to bottom
If a tuple (row, col) -> set pane at specified (index 0) row and column
@param img: numpy array with shape (m, n, 3) or (m, n, 1) representing the image to insert
in the pane specified in key. If a one channel image, first convert grayscale to BGR.
If keep_ratio was True in contructor, will add black bars as necessary to fill pane.
Otherwise, use standard cv2.resize to fit img into pane.
@raise AssertionError: if key is wrong type of out of bounds
'''
assert isinstance(img, np.ndarray), 'img must be numpy array'
# If image is grayscale, convert to 3 channel
if len(img.shape) == 2 or img.shape[2] == 1:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
if isinstance(key, int): # Accept a single index, ex 5 -> (2, 1)
key = self._index_to_tuple(key)
assert isinstance(key, tuple), 'must be tuple'
assert len(key) == 2, 'index best be 2D'
assert key[0] < self.shape[0] and key[1] < self.shape[1], 'out of bounds'
rows = slice(key[0] * self.pane_size[0], (key[0] + 1) * self.pane_size[0])
cols = slice(key[1] * self.pane_size[1], (key[1] + 1) * self.pane_size[1])
if self.keep_ratio:
row_count = rows.stop - rows.start
col_count = cols.stop - cols.start
ratio = np.array([img.shape[0] / row_count, img.shape[1] / col_count])
scale = 1 / np.max(ratio)
size = (int(img.shape[1] * scale), int(img.shape[0] * scale))
v_border = int((row_count - size[1]) / 2)
h_border = int((col_count - size[0]) / 2)
rows = slice(rows.start + v_border, rows.start + size[1] + v_border)
cols = slice(cols.start + h_border, cols.start + size[0] + h_border)
self._image[rows, cols] = cv2.resize(img, size)
else:
size = (self.pane_size[1], self.pane_size[0])
self._image[rows, cols] = cv2.resize(img, size)
__setitem__ = set_image # Overload index [] operator to set image
def get_image(self):
'''
Returns the image grid, with labels and borders
'''
self._apply_decorations()
return self._image
@property
def image(self): # accessing .image will draw decorations onto image then return the full image grid
return self.get_image()
__call__ = get_image # Overload () operator to access grid image
if __name__ == '__main__':
'''
ImageMux is intended to be used as a class, not an executable.
The following is an example of how to use it in a python program.
Creates a 2x2 grid of Racoon images with labels, using some custom parameters.
To run this yourself, download some images, put them in $HOME/Pictures/[1.jpg, 2.jpg, 3.jpg, 4.jpg]
'''
import os
labels = ['Chubby Racoon', 'Kiddo Racoons', 'wide', 'tall', 'big wide', 'big tall'] # Create strings for labels
images = [cv2.imread(os.path.join(os.environ['HOME'], 'Pictures', str(i + 1) + '.jpg')) for i in xrange(2)]
# Add strange ratio white blocks to test keep_ratio flag
images.append(255 * np.ones((20, 201, 3), dtype=np.uint8)) # A small, wide image
images.append(255 * np.ones((200, 20, 3), dtype=np.uint8)) # A small, tall image
images.append(255 * np.ones((200, 2000, 3), dtype=np.uint8)) # A large, wide image
images.append(255 * np.ones((2000, 200, 3), dtype=np.uint8)) # A large, tall image
t = ImageMux(size=(500, 900), border_color=(0, 0, 255), border_thickness=3, shape=(3, 2),
labels=labels, text_scale=1, keep_ratio=True)
for i in xrange(len(images)):
t[i] = np.array(images[i])
cv2.imshow('Grid', t.image)
cv2.imshow('Grid2', t())
print 'Press any key in GUI window to exit'
cv2.waitKey(0)
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
src/remote/ssh.go
|
package remote
import (
"bytes"
"crypto/x509"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"os"
"path"
"syscall"
"time"
"github.com/usi-lfkeitel/saturn/src/utils"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/agent"
"golang.org/x/crypto/ssh/terminal"
)
var sshClientConfig *ssh.ClientConfig
func LoadPrivateKey(config *utils.Config) error {
authMethods := make([]ssh.AuthMethod, 0, 1)
if config.SSH.PrivateKey != "" {
sshPrivateKey, err := ioutil.ReadFile(config.SSH.PrivateKey)
if err != nil {
return err
}
signer, err := ssh.ParsePrivateKeyWithPassphrase(sshPrivateKey, []byte{})
if err == x509.IncorrectPasswordError {
signer, err = loadPrivateKeyPromptPassphrase(sshPrivateKey)
}
if err != nil {
return err
}
authMethods = append(authMethods, ssh.PublicKeys(signer))
}
if config.SSH.Password != "" {
authMethods = append(authMethods, ssh.Password(config.SSH.Password))
}
if config.SSH.UseAgent {
if a := loadSSHAgent(); a != nil {
authMethods = append(authMethods, a)
}
}
if len(authMethods) == 0 {
authMethods = append(authMethods, ssh.Password(string(getPassword("SSH Password: "))))
}
t, _ := time.ParseDuration(config.SSH.Timeout)
sshClientConfig = &ssh.ClientConfig{
User: config.SSH.Username,
Auth: authMethods,
Timeout: t,
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}
return nil
}
func loadSSHAgent() ssh.AuthMethod {
if sshAgent, err := net.Dial("unix", os.Getenv("SSH_AUTH_SOCK")); err == nil {
return ssh.PublicKeysCallback(agent.NewClient(sshAgent).Signers)
}
return nil
}
func loadPrivateKeyPromptPassphrase(key []byte) (ssh.Signer, error) {
return ssh.ParsePrivateKeyWithPassphrase(key, getPassword("SSH Key Password: "))
}
func getPassword(prompt string) []byte {
fmt.Print(prompt)
bytePassword, err := terminal.ReadPassword(int(syscall.Stdin))
if err != nil {
log.Println(err.Error())
}
fmt.Println("")
return bytes.TrimSpace(bytePassword)
}
func UploadScript(config *utils.Config, hosts map[string]*utils.ConfigHost, genFilename string) error {
f, err := os.Open(genFilename)
if err != nil {
return err
}
defer f.Close()
s, err := f.Stat()
if err != nil {
return err
}
for _, host := range hosts {
if host.Disable {
continue
}
_, err = f.Seek(0, 0) // rewind file reader
if err != nil {
return err
}
if err := uploadRemoteScript(config, host, f, s); err != nil {
if config.Core.HaltOnError {
return err
}
log.Println(err.Error())
host.Disable = true
}
}
return nil
}
func uploadRemoteScript(config *utils.Config, host *utils.ConfigHost, f *os.File, s os.FileInfo) error {
if host.SSHConnection == nil {
if err := host.ConnectSSH(sshClientConfig); err != nil {
return err
}
}
session, err := host.SSHConnection.NewSession()
if err != nil {
return err
}
defer session.Close()
go func() {
w, _ := session.StdinPipe()
defer w.Close()
fmt.Fprintln(w, "D0755", 0, ".saturn") // mkdir
fmt.Fprintf(w, "C%#o %d %s\n", s.Mode().Perm(), s.Size(), path.Base(f.Name()))
io.Copy(w, f)
fmt.Fprint(w, "\x00")
}()
cmd := fmt.Sprintf("scp -rt %s", config.Core.RemoteBaseDir)
return session.Run(cmd)
}
func ExecuteScript(config *utils.Config, hosts map[string]*utils.ConfigHost, filename string) ([]*utils.HostResponse, error) {
filename = path.Base(filename)
responses := make([]*utils.HostResponse, 0, len(hosts))
for _, host := range hosts {
if host.Disable {
continue
}
if host.SSHConnection == nil {
if err := host.ConnectSSH(sshClientConfig); err != nil {
return nil, err
}
}
session, err := host.SSHConnection.NewSession()
if err != nil {
return nil, err
}
var stdoutBuf bytes.Buffer
var stderrBuf bytes.Buffer
session.Stdout = &stdoutBuf
session.Stderr = &stderrBuf
flags := "-d"
if config.Core.SpecialDebug {
flags = ""
}
cmd := fmt.Sprintf("/bin/bash %s/.saturn/%s %s", config.Core.RemoteBaseDir, filename, flags)
if err := session.Run(cmd); err != nil {
fmt.Println(err.Error())
fmt.Println(stderrBuf.String())
session.Close()
continue
}
session.Close()
if stderrBuf.Len() > 0 {
log.Println(stderrBuf.String())
}
if config.Core.Debug {
fmt.Println(stdoutBuf.String())
}
var response utils.HostResponse
if err := json.Unmarshal(stdoutBuf.Bytes(), &response); err != nil {
fmt.Println(stdoutBuf.String())
fmt.Println(err.Error())
continue
}
response.Host = host
responses = append(responses, &response)
}
return responses, nil
}
|
[
"\"SSH_AUTH_SOCK\""
] |
[] |
[
"SSH_AUTH_SOCK"
] |
[]
|
["SSH_AUTH_SOCK"]
|
go
| 1 | 0 | |
wrap/logrus/v1/log_test.go
|
// Copyright 2019 Cisco Systems, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1_test
import (
"bytes"
"errors"
"fmt"
"io"
"os"
"os/exec"
"runtime"
"strings"
"testing"
pkg "github.com/parinithshekar/gitsink/pkg/v1"
logger "github.com/parinithshekar/gitsink/wrap/logrus/v1"
require "github.com/stretchr/testify/require"
)
// Enforce interface implementation.
func TestInterface(t *testing.T) {
var _ pkg.Logger = &logger.Logger{}
}
// Test Error without any fields
func TestLogError(t *testing.T) {
buf := &bytes.Buffer{}
log := customLogger(buf)
err := errors.New("This is an error message.")
log.Errorf("%s", err.Error())
// "time="2019-09-10T10:51:43+05:30" level=error msg="This is an error message." file="log_test.go:43"
expectedLog := fmt.Sprintf("{\"file\":\"%s:%d\",\"func\":\"%s\",\"level\":\"%s\",\"msg\":\"%s\",\"time\":\"", "log_test.go", getLineNumber()-2, getFuncName(), "error", "This is an error message.")
require.Contains(t, buf.String(), expectedLog)
}
// Test Waringf
func TestLogWarningf(t *testing.T) {
buf := &bytes.Buffer{}
log := customLogger(buf)
err := errors.New("This is an warning.")
log.Warningf("%s", err.Error())
// "time="2019-09-10T10:51:43+05:30" level=error msg="This is an error message." file="log_test.go:43"
expectedLog := fmt.Sprintf("{\"file\":\"%s:%d\",\"func\":\"%s\",\"level\":\"%s\",\"msg\":\"%s\",\"time\":\"", "log_test.go", getLineNumber()-2, getFuncName(), "warning", "This is an warning.")
require.Contains(t, buf.String(), expectedLog)
}
// Test logger with WithError method.
func TestWithError(t *testing.T) {
buf := &bytes.Buffer{}
log := customLogger(buf)
// Check Errorf().
checkErrorf(t, log, buf)
// This shouldn't have any effect on WithError.
log.AutoClearFields(false)
checkErrorf(t, log, buf)
}
func checkErrorf(t *testing.T, log pkg.Logger, buf *bytes.Buffer) {
err := errors.New("This is a custom error.")
log.WithError(err).Errorf("Encountered an error.")
// time="2019-09-10T10:56:17+05:30" level=error msg="Encountered an error." file="log_test.go:53" error="This is a custom error."
expectedLog := fmt.Sprintf("{\"error\":\"%s\",\"file\":\"%s:%d\",\"func\":\"%s\",\"level\":\"%s\",\"msg\":\"%s\",\"time\":\"", "This is a custom error.", "log_test.go", getLineNumber()-2, getFuncName(), "error", "Encountered an error.")
require.Contains(t, buf.String(), expectedLog)
// Check if error field is cleared.
log.Errorf("Encountered an error.")
// time="2019-09-10T10:56:17+05:30" level=error msg="Encountered an error." file="log_test.go:53" error="This is a custom error."
expectedLog = fmt.Sprintf("{\"file\":\"%s:%d\",\"func\":\"%s\",\"level\":\"%s\",\"msg\":\"%s\",\"time\":\"", "log_test.go", getLineNumber()-2, getFuncName(), "error", "Encountered an error.")
require.Contains(t, buf.String(), expectedLog)
}
// Test logger with WithField method.
func TestWithField(t *testing.T) {
buf := &bytes.Buffer{}
log := customLogger(buf)
log.WithField("key", "value").WithField("key2", "value2").Errorf("Errors with custom field.")
// time="2019-09-10T11:17:19+05:30" level=error msg="Errors with custom field." file="log_test.go:63" key=value key2=value2
expectedLog := fmt.Sprintf("{\"file\":\"%s:%d\",\"func\":\"%s\",\"key\":\"value\",\"key2\":\"value2\",\"level\":\"%s\",\"msg\":\"%s\",\"time\":\"", "log_test.go", getLineNumber()-2, getFuncName(), "error", "Errors with custom field.")
require.Contains(t, buf.String(), expectedLog)
log.WithField("key3", "value3").Errorf("Errors with custom field again.")
// time="2019-09-10T11:17:19+05:30" level=error msg="Errors with custom field." file="log_test.go:63" key=value key2=value2
expectedLog = fmt.Sprintf("{\"file\":\"%s:%d\",\"func\":\"%s\",\"key3\":\"value3\",\"level\":\"%s\",\"msg\":\"%s\",\"time\":\"", "log_test.go", getLineNumber()-2, getFuncName(), "error", "Errors with custom field again.")
require.Contains(t, buf.String(), expectedLog)
}
// Test concurrent modifications to fields.
func TestConcurrentMods(t *testing.T) {
buf := &bytes.Buffer{}
log := customLogger(buf)
log.SetLevel("debug")
for i := 0; i < 15000; i++ {
go func() {
log.WithError(pkg.ErrNoMatch).WithField("key", "value").Debugf("")
expectedLog := fmt.Sprintf("{\"error\":\"No alert matched in alert config.\",\"file\":\"%s:%d\",\"func\":\"%s\",\"key\":\"value\",\"level\":\"%s\",\"msg\":\"\",\"time\":\"", "log_test.go", getLineNumber()-1, getFuncName(), "debug")
require.Contains(t, buf.String(), expectedLog)
}()
}
}
// Test logger with AutoClear disabled.
func TestAutoClearFieldsDisabled(t *testing.T) {
buf := &bytes.Buffer{}
log := customLogger(buf)
log.AutoClearFields(false)
log.WithField("key", "value").WithField("key2", "value2").Errorf("Errors with custom field.")
// time="2019-09-10T11:17:19+05:30" level=error msg="Errors with custom field." file="log_test.go:63" key=value key2=value2
expectedLog := fmt.Sprintf("{\"file\":\"%s:%d\",\"func\":\"%s\",\"key\":\"value\",\"key2\":\"value2\",\"level\":\"%s\",\"msg\":\"%s\",\"time\":\"", "log_test.go", getLineNumber()-2, getFuncName(), "error", "Errors with custom field.")
require.Contains(t, buf.String(), expectedLog)
log.WithField("key3", "value3").Errorf("Errors with custom field again.")
expectedLog = fmt.Sprintf("{\"file\":\"%s:%d\",\"func\":\"%s\",\"key\":\"value\",\"key2\":\"value2\",\"key3\":\"value3\",\"level\":\"%s\",\"msg\":\"%s\",\"time\":\"", "log_test.go", getLineNumber()-1, getFuncName(), "error", "Errors with custom field again.")
require.Contains(t, buf.String(), expectedLog)
}
func TestWithFields(t *testing.T) {
buf := &bytes.Buffer{}
log := customLogger(buf)
log.SetLevel("debug")
log.WithFields(map[string]interface{}{
"key1": "val1",
"key2": "val2",
}).
WithField("key3", "val3").
Debugf("Errors with custom fields.")
// time="2019-09-10T11:21:40+05:30" level=debug msg="Errors with custom fields." file="log_test.go:77" key1=val1 key2=val2 key3=val3
expectedLog := fmt.Sprintf("{\"file\":\"%s:%d\",\"func\":\"%s\",\"key1\":\"val1\",\"key2\":\"val2\",\"key3\":\"val3\",\"level\":\"%s\",\"msg\":\"%s\",\"time\":\"", "log_test.go", getLineNumber()-2, getFuncName(), "debug", "Errors with custom fields.")
require.Contains(t, buf.String(), expectedLog)
}
// Test debug log level enabled
func TestWithDebugEnabled(t *testing.T) {
buf := &bytes.Buffer{}
log := customLogger(buf)
log.SetLevel("deBug")
log.Debugf("Debug log enabled.")
// time="2019-09-10T11:25:46+05:30" level=debug msg="Debug log enabled." file="log_test.go:88
expectedLog := fmt.Sprintf("{\"file\":\"%s:%d\",\"func\":\"%s\",\"level\":\"%s\",\"msg\":\"%s\",\"time\":\"", "log_test.go", getLineNumber()-2, getFuncName(), "debug", "Debug log enabled.")
require.Contains(t, buf.String(), expectedLog)
}
// Test debug log level disabled
func TestWithDebugDisabled(t *testing.T) {
buf := &bytes.Buffer{}
log := customLogger(buf)
log.SetLevel("info")
log.Debugf("Debug log enabled.")
// "time="2019-09-09 19:12:11" level="DEBUG" tag="test.with.fields" location="log_test.go:90" msg="Debug log enabled."
require.Contains(t, buf.String(), "")
}
// Custom logger that writes to a buffer for testing, instead of os.Stderr
func customLogger(output io.Writer) pkg.Logger {
log := logger.New()
log.SetOutput(output)
return log
}
// Test log.Infof without any fields
func TestLogInfo(t *testing.T) {
buf := &bytes.Buffer{}
log := customLogger(buf)
msg := "This is an info message."
log.Infof(msg)
// "time="2019-09-10T10:51:43+05:30" level=info msg="This is an info message." file="log_test.go:118"
expectedLog := fmt.Sprintf("{\"file\":\"%s:%d\",\"func\":\"%s\",\"level\":\"%s\",\"msg\":\"%s\",\"time\":\"", "log_test.go", getLineNumber()-2, getFuncName(), "info", msg)
require.Contains(t, buf.String(), expectedLog)
}
// Test log.Tracef without any fields
func TestLogTrace(t *testing.T) {
buf := &bytes.Buffer{}
log := customLogger(buf)
log.SetLevel("trace")
msg := "This is an trace message."
log.Tracef(msg)
// "time="2019-09-10T10:51:43+05:30" level=info msg="This is an info message." file="log_test.go:118"
expectedLog := fmt.Sprintf("{\"file\":\"%s:%d\",\"func\":\"%s\",\"level\":\"%s\",\"msg\":\"%s\",\"time\":\"", "log_test.go", getLineNumber()-2, getFuncName(), "trace", msg)
require.Contains(t, buf.String(), expectedLog)
}
// Test log.Debugf without any fields
func TestLogDebug(t *testing.T) {
buf := &bytes.Buffer{}
log := customLogger(buf)
log.SetLevel("debug")
msg := "This is an debug message."
log.Debugf(msg)
// "time="2019-09-10T10:51:43+05:30" level=info msg="This is an info message." file="log_test.go:118"
expectedLog := fmt.Sprintf("{\"file\":\"%s:%d\",\"func\":\"%s\",\"level\":\"%s\",\"msg\":\"%s\",\"time\":\"", "log_test.go", getLineNumber()-2, getFuncName(), "debug", msg)
require.Contains(t, buf.String(), expectedLog)
}
// Test log.Panicf without any fields
func TestLogPanic(t *testing.T) {
buf := &bytes.Buffer{}
log := customLogger(buf)
msg := "This is an panic message."
defer func() {
if r := recover(); r == nil {
t.Errorf("The code did not panic")
}
}()
log.Panicf(msg)
expectedLog := fmt.Sprintf("{\"file\":\"%s:%d\",\"func\":\"%s\",\"level\":\"%s\",\"msg\":\"%s\",\"time\":\"", "log_test.go", getLineNumber()-2, getFuncName(), "panic", msg)
require.Contains(t, buf.String(), expectedLog)
}
// Test log.Fatalf without any fields
func TestFatalF(t *testing.T) {
// log.Fatalf calls os.Exit, so executing it as another process.
msg := "This is an fatal message."
if os.Getenv("BE_CRASHER") == "1" {
log := logger.New()
log.Fatalf("This is an fatal message.")
return
}
cmd := exec.Command(os.Args[0], "-test.run=TestFatalF")
cmd.Env = append(os.Environ(), "BE_CRASHER=1")
output, err := cmd.CombinedOutput()
require.Error(t, err)
//time="2019-09-10T12:41:33+05:30" level=fatal msg="This is an fatal message." file="log_test.go:128"
expectedLog := fmt.Sprintf("{\"file\":\"%s:%d\",\"func\":\"%s\",\"level\":\"%s\",\"msg\":\"%s\",\"time\":\"", "log_test.go", getLineNumber()-9, getFuncName(), "fatal", msg)
require.Contains(t, string(output), expectedLog)
}
// Test log.Fatalf without any fields
func TestFatalFSkip(t *testing.T) {
// log.Fatalf calls os.Exit, so executing it as another process.
buf := &bytes.Buffer{}
log := customLogger(buf)
log.SetLevel("panic")
log.Fatalf("This is an fatal message.")
//time="2019-09-10T12:41:33+05:30" level=fatal msg="This is an fatal message." file="log_test.go:128"
require.Contains(t, buf.String(), "")
}
// Test setting log level.
func TestGetLevel(t *testing.T) {
// log.Fatalf calls os.Exit, so executing it as another process.
log := logger.New()
log.SetLevel("debug")
log.LogLevel()
require.EqualValues(t, "debug", log.LogLevel())
}
// Get line number of caller.
func getLineNumber() int {
_, _, line, _ := runtime.Caller(1)
return line
}
func getFuncName() string {
pc, _, _, _ := runtime.Caller(1)
function := runtime.FuncForPC(pc)
functionSplit := strings.Split(function.Name(), ".")
functionName := functionSplit[len(functionSplit)-1]
return functionName
}
|
[
"\"BE_CRASHER\""
] |
[] |
[
"BE_CRASHER"
] |
[]
|
["BE_CRASHER"]
|
go
| 1 | 0 | |
clib/mininet_test_base.py
|
#!/usr/bin/env python3
"""Base class for all FAUCET unit tests."""
# pylint: disable=missing-function-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-lines
from functools import partial
import collections
import copy
import glob
import ipaddress
import json
import os
import random
import re
import shutil
import string
import subprocess
import tempfile
import time
import unittest
import yaml
import netaddr
import requests
from ryu.ofproto import ofproto_v1_3 as ofp
from mininet.link import Intf as HWIntf # pylint: disable=import-error
from mininet.log import error, output # pylint: disable=import-error
from mininet.net import Mininet
from mininet.util import dumpNodeConnections, pmonitor # pylint: disable=import-error
from clib import mininet_test_util
from clib import mininet_test_topo
from clib.mininet_test_topo import FaucetLink
from clib.tcpdump_helper import TcpdumpHelper
MAX_TEST_VID = 512
OFPVID_PRESENT = 0x1000
MIN_FLAP_TIME = 1
PEER_BGP_AS = 2**16 + 1
IPV4_ETH = 0x0800
IPV6_ETH = 0x86dd
FPING_ARGS = '-s -T 1 -A'
class FaucetTestBase(unittest.TestCase):
"""Base class for all FAUCET unit tests."""
# Number of Faucet controllers to create
NUM_FAUCET_CONTROLLERS = 2
# Number of Gauge controllers to create
NUM_GAUGE_CONTROLLERS = 1
# List of switches (by switch index) to ignore (treating them as outside the Faucet network)
IGNORED_SWITCHES = []
CONTROLLER_CLASS = mininet_test_topo.FAUCET
DP_NAME = 'faucet-1'
_PROM_LINE_RE = re.compile(r'^(.+)\s+([0-9\.\-\+e]+)$')
ONE_GOOD_PING = '1 packets transmitted, 1 received, 0% packet loss'
FAUCET_VIPV4 = ipaddress.ip_interface('10.0.0.254/24')
FAUCET_VIPV4_2 = ipaddress.ip_interface('172.16.0.254/24')
FAUCET_VIPV6 = ipaddress.ip_interface('fc00::1:254/112')
FAUCET_VIPV6_2 = ipaddress.ip_interface('fc01::1:254/112')
OFCTL = 'ovs-ofctl -OOpenFlow13'
VSCTL = 'ovs-vsctl'
OVS_TYPE = 'kernel'
BOGUS_MAC = '01:02:03:04:05:06'
FAUCET_MAC = '0e:00:00:00:00:01'
LADVD = 'ladvd -e lo -t -f'
ONEMBPS = (1024 * 1024)
DB_TIMEOUT = 5
STAT_RELOAD = ''
EVENT_SOCK_HEARTBEAT = ''
CONFIG = ''
CONFIG_GLOBAL = ''
GAUGE_CONFIG_DBS = ''
LOG_LEVEL = 'INFO'
N_UNTAGGED = 0
N_TAGGED = 0
N_EXTENDED = 0
EXTENDED_CLS = None
NUM_DPS = 1
LINKS_PER_HOST = 1
SOFTWARE_ONLY = False
NETNS = False
EVENT_LOGGER_TIMEOUT = 120
FPING_ARGS = FPING_ARGS
FPING_ARGS_SHORT = ' '.join((FPING_ARGS, '-i10 -p100 -t100'))
FPINGS_ARGS_ONE = ' '.join(('fping', FPING_ARGS, '-t100 -c 1'))
REQUIRES_METERS = False
REQUIRES_METADATA = False
_PORT_ACL_TABLE = 0
_VLAN_TABLE = 1
_COPRO_TABLE = 2
_VLAN_ACL_TABLE = 3
_ETH_SRC_TABLE = 4
_IPV4_FIB_TABLE = 5
_IPV6_FIB_TABLE = 6
_VIP_TABLE = 7
_ETH_DST_HAIRPIN_TABLE = 8
_ETH_DST_TABLE = 9
_FLOOD_TABLE = 10
# Standard Gauge port counters.
PORT_VARS = {
'of_port_rx_bytes',
'of_port_tx_bytes',
'of_port_rx_packets',
'of_port_tx_packets',
}
faucet_controllers = None
faucet_of_ports = None
faucet_prom_ports = None
faucet_config_path = None
gauge_controllers = None
gauge_of_ports = None
gauge_controller = None
gauge_of_port = None
config = None
dpid = None
hw_dpid = None
hardware = 'Open vSwitch'
hw_switch = False
prom_port = None
net = None
of_port = None
ctl_privkey = None
ctl_cert = None
ca_certs = None
port_map = {}
switch_map = {}
tmpdir = None
net = None
topo = None
cpn_intf = None
cpn_ipv6 = False
config_ports = {}
event_sock_dir = None
event_socks = []
event_log = None
rand_dpids = set()
def __init__(self, name, config, root_tmpdir, ports_sock, max_test_load,
port_order=None, start_port=None):
super().__init__(name)
self.env = collections.defaultdict(dict)
self.faucet_controllers = []
self.faucet_of_ports = []
self.faucet_prom_ports = []
self.gauge_controllers = []
self.gauge_of_ports = []
self.config = config
self.root_tmpdir = root_tmpdir
self.ports_sock = ports_sock
self.max_test_load = max_test_load
self.port_order = port_order
self.start_port = start_port
self.start_time = None
self.dpid_names = None
self.event_log = None
self.prev_event_id = None
def hosts_name_ordered(self):
"""Return hosts in strict name only order."""
return sorted(self.net.hosts, key=lambda host: host.name)
def switches_name_ordered(self):
"""Return switches in strict name only order."""
return sorted(self.net.switches, key=lambda switch: switch.name)
def first_switch(self):
"""Return first switch by name order."""
if not self.switches_name_ordered():
return None
return self.switches_name_ordered()[0]
def rand_dpid(self):
"""Return a random unused DPID"""
reserved_range = 100
while True:
dpid = random.randint(1, (2**32 - reserved_range)) + reserved_range
if dpid not in self.rand_dpids:
self.rand_dpids.add(dpid)
return str(dpid)
def _set_var(self, controller, var, value):
"""Set controller environment variable to value"""
self.env[controller][var] = value
def _set_vars(self):
"""Set controller additional variables"""
for c_index in range(self.NUM_FAUCET_CONTROLLERS):
self._set_var('faucet-%s' % c_index, 'FAUCET_PROMETHEUS_PORT',
str(self.faucet_prom_ports[c_index]))
def _set_var_path(self, controller, var, path):
"""Update environment variable that is a file path to the correct tmpdir"""
self._set_var(controller, var, os.path.join(self.tmpdir, path))
def _set_static_vars(self):
"""Set static environment variables"""
if self.event_sock_dir and os.path.exists(self.event_sock_dir):
shutil.rmtree(self.event_sock_dir)
self.event_sock_dir = tempfile.mkdtemp()
self.event_socks = []
for c_index in range(self.NUM_FAUCET_CONTROLLERS):
event_sock = os.path.join(self.event_sock_dir, 'event-%s.sock' % c_index)
self.event_socks.append(event_sock)
self._set_var('faucet-%s' % c_index, 'FAUCET_LOG_LEVEL', str(self.LOG_LEVEL))
self._set_var('faucet-%s' % c_index, 'FAUCET_CONFIG_STAT_RELOAD', self.STAT_RELOAD)
self._set_var('faucet-%s' % c_index, 'FAUCET_EVENT_SOCK', event_sock)
self._set_var('faucet-%s' % c_index, 'FAUCET_EVENT_SOCK_HEARTBEAT',
self.EVENT_SOCK_HEARTBEAT)
self._set_var('faucet-%s' % c_index, 'FAUCET_PROMETHEUS_ADDR',
mininet_test_util.LOCALHOSTV6)
self._set_var_path('faucet-%s' % c_index, 'FAUCET_CONFIG', 'faucet.yaml')
self._set_var_path('faucet-%s' % c_index, 'FAUCET_LOG', 'faucet-%s.log' % c_index)
self._set_var_path('faucet-%s' % c_index, 'FAUCET_EXCEPTION_LOG',
'faucet-%s-exception.log' % c_index)
for c_index in range(self.NUM_GAUGE_CONTROLLERS):
self._set_var_path('gauge-%s' % c_index, 'GAUGE_CONFIG', 'gauge.yaml')
self._set_var_path('gauge-%s' % c_index, 'GAUGE_LOG', 'gauge-%s.log' % c_index)
self._set_var_path('gauge-%s' % c_index, 'GAUGE_EXCEPTION_LOG',
'gauge-%s-exception.log' % c_index)
self.faucet_config_path = self.env['faucet-0']['FAUCET_CONFIG']
self.gauge_config_path = self.env['gauge-0']['GAUGE_CONFIG']
self.debug_log_path = os.path.join(
self.tmpdir, 'ofchannel.txt')
self.monitor_stats_file = os.path.join(
self.tmpdir, 'gauge-ports.txt')
self.monitor_state_file = os.path.join(
self.tmpdir, 'gauge-state.txt')
self.monitor_flow_table_dir = os.path.join(
self.tmpdir, 'gauge-flow')
self.monitor_meter_stats_file = os.path.join(
self.tmpdir, 'gauge-meter.txt')
os.mkdir(self.monitor_flow_table_dir)
if self.config is not None:
if 'hw_switch' in self.config:
self.hw_switch = self.config['hw_switch']
if self.hw_switch:
self.dpid = self.config['dpid']
self.cpn_intf = self.config['cpn_intf']
if 'cpn_ipv6' in self.config:
self.cpn_ipv6 = self.config['cpn_ipv6']
self.hardware = self.config['hardware']
if 'ctl_privkey' in self.config:
self.ctl_privkey = self.config['ctl_privkey']
if 'ctl_cert' in self.config:
self.ctl_cert = self.config['ctl_cert']
if 'ca_certs' in self.config:
self.ca_certs = self.config['ca_certs']
dp_ports = self.config['dp_ports']
self.switch_map = dp_ports.copy()
def _enable_event_log(self, timeout=None):
"""Enable analsis of event log contents by copying events to a local log file"""
assert not self.event_log, 'event_log already enabled'
if not timeout:
timeout = self.EVENT_LOGGER_TIMEOUT
self.event_log = os.path.join(self.tmpdir, 'event.log')
self.prev_event_id = 0
controller = self._get_controller()
sock = self.env[self.faucet_controllers[0].name]['FAUCET_EVENT_SOCK']
# Relying on a timeout seems a bit brittle;
# as an alternative we might possibly use something like
# `with popen(cmd...) as proc`to clean up on exceptions
controller.cmd(mininet_test_util.timeout_cmd(
'nc -U %s > %s &' % (sock, self.event_log), timeout))
# pylint: disable=inconsistent-return-statements
def _wait_until_matching_event(self, match_func, timeout=30):
"""Return the next matching event from the event sock, else fail"""
assert timeout >= 1
assert self.event_log and os.path.exists(self.event_log)
for _ in range(timeout):
with open(self.event_log) as events:
for event_str in events:
event = json.loads(event_str)
event_id = event['event_id']
if event_id <= self.prev_event_id:
continue
self.prev_event_id = event_id
try:
if match_func(event):
return event
except KeyError:
pass # Allow for easy dict traversal.
time.sleep(1)
self.fail('matching event not found in event stream')
@staticmethod
def _read_yaml(yaml_path):
with open(yaml_path) as yaml_file:
content = yaml.safe_load(yaml_file.read())
return content
def _get_faucet_conf(self):
"""Return the yaml content from the config file"""
return self._read_yaml(self.faucet_config_path)
@staticmethod
def _annotate_interfaces_conf(yaml_conf):
"""Consistently name interface names/descriptions."""
if 'dps' not in yaml_conf:
return yaml_conf
yaml_conf_remap = copy.deepcopy(yaml_conf)
for dp_key, dp_yaml in yaml_conf['dps'].items():
interfaces_yaml = dp_yaml.get('interfaces', None)
if interfaces_yaml is not None:
remap_interfaces_yaml = {}
for intf_key, orig_intf_conf in interfaces_yaml.items():
intf_conf = copy.deepcopy(orig_intf_conf)
port_no = None
if isinstance(intf_key, int):
port_no = intf_key
number = intf_conf.get('number', port_no)
if isinstance(number, int):
port_no = number
assert isinstance(number, int), '%u %s' % (intf_key, orig_intf_conf)
intf_name = 'b%u' % port_no
intf_conf.update({'name': intf_name, 'description': intf_name})
remap_interfaces_yaml[intf_key] = intf_conf
yaml_conf_remap['dps'][dp_key]['interfaces'] = remap_interfaces_yaml
return yaml_conf_remap
@staticmethod
def _write_yaml_conf(yaml_path, yaml_conf):
assert isinstance(yaml_conf, dict)
new_conf_str = yaml.dump(yaml_conf).encode()
with tempfile.NamedTemporaryFile(
prefix=os.path.basename(yaml_path),
dir=os.path.dirname(yaml_path),
delete=False) as conf_file_tmp:
conf_file_tmp_name = conf_file_tmp.name
conf_file_tmp.write(new_conf_str)
with open(conf_file_tmp_name, 'rb') as conf_file_tmp:
conf_file_tmp_str = conf_file_tmp.read()
assert new_conf_str == conf_file_tmp_str
if os.path.exists(yaml_path):
shutil.copyfile(yaml_path, '%s.%f' % (yaml_path, time.time()))
os.rename(conf_file_tmp_name, yaml_path)
def _init_faucet_config(self):
faucet_config = '\n'.join((
self.get_config_header(
self.CONFIG_GLOBAL,
self.debug_log_path, self.dpid, self.hardware),
self.CONFIG))
config_vars = {}
for config_var in (self.config_ports, self.port_map):
config_vars.update(config_var)
faucet_config = faucet_config % config_vars
yaml_conf = self._annotate_interfaces_conf(yaml.safe_load(faucet_config))
self._write_yaml_conf(self.faucet_config_path, yaml_conf)
def _init_gauge_config(self):
gauge_config = self.get_gauge_config(
self.faucet_config_path,
self.monitor_stats_file,
self.monitor_state_file,
self.monitor_flow_table_dir)
if self.config_ports:
gauge_config = gauge_config % self.config_ports
self._write_yaml_conf(self.gauge_config_path, yaml.safe_load(gauge_config))
def _test_name(self):
return mininet_test_util.flat_test_name(self.id())
def _tmpdir_name(self):
tmpdir = os.path.join(self.root_tmpdir, self._test_name())
os.mkdir(tmpdir)
return tmpdir
def _wait_load(self, load_retries=10):
for _ in range(load_retries):
time.sleep(random.randint(1, 5))
load = os.getloadavg()[0]
if load < self.max_test_load:
return
output('load average too high %f, waiting' % load)
self.fail('load average %f consistently too high' % load)
def _allocate_config_ports(self):
for port_name in self.config_ports:
self.config_ports[port_name] = None
for config in (self.CONFIG, self.CONFIG_GLOBAL, self.GAUGE_CONFIG_DBS):
if re.search(port_name, config):
port = mininet_test_util.find_free_port(
self.ports_sock, self._test_name())
self.config_ports[port_name] = port
output('allocating port %u for %s' % (port, port_name))
def _allocate_faucet_ports(self):
for c_index in range(self.NUM_FAUCET_CONTROLLERS):
if self.hw_switch and c_index == 0:
of_port = self.config['of_port']
else:
of_port = mininet_test_util.find_free_port(
self.ports_sock, self._test_name())
prom_port = mininet_test_util.find_free_port(
self.ports_sock, self._test_name())
self.faucet_of_ports.append(of_port)
self.faucet_prom_ports.append(prom_port)
self.of_port = self.faucet_of_ports[0]
self.prom_port = self.faucet_prom_ports[0]
def _allocate_gauge_ports(self):
for c_index in range(self.NUM_GAUGE_CONTROLLERS):
if self.hw_switch and c_index == 0:
of_port = self.config['gauge_of_port']
else:
of_port = mininet_test_util.find_free_port(
self.ports_sock, self._test_name())
self.gauge_of_ports.append(of_port)
self.gauge_of_port = self.gauge_of_ports[0]
def _stop_net(self):
if self.net is not None:
for switch in self.net.switches:
switch.cmd(
self.VSCTL, 'del-controller', switch.name, '|| true')
self.net.stop()
def setUp(self):
if self.config and 'hw_switch' in self.config:
# Simulating/running hardware switches so only 1 controller configured
# TODO: Handle multiple controllers with hardware tests
self.NUM_FAUCET_CONTROLLERS = 1
self.start_time = time.time()
self.tmpdir = self._tmpdir_name()
self._set_static_vars()
self.topo_class = partial(
mininet_test_topo.FaucetSwitchTopo, port_order=self.port_order,
switch_map=self.switch_map, start_port=self.start_port)
if self.hw_switch:
self.hw_dpid = mininet_test_util.str_int_dpid(self.dpid)
self.dpid = self.hw_dpid
else:
self.dpid = self.rand_dpid()
@staticmethod
def hostns(host):
return '%s' % host.name
def dump_switch_flows(self, switch):
"""Dump switch information to tmpdir"""
for dump_cmd in (
'dump-flows', 'dump-groups', 'dump-meters',
'dump-group-stats', 'dump-ports', 'dump-ports-desc',
'meter-stats'):
switch_dump_name = os.path.join(self.tmpdir, '%s-%s.log' % (switch.name, dump_cmd))
# TODO: occasionally fails with socket error.
switch.cmd('%s %s %s > %s' % (self.OFCTL, dump_cmd, switch.name, switch_dump_name),
success=None)
for other_cmd in ('show', 'list controller', 'list manager'):
other_dump_name = os.path.join(self.tmpdir, '%s.log' % other_cmd.replace(' ', ''))
switch.cmd('%s %s > %s' % (self.VSCTL, other_cmd, other_dump_name))
# pylint: disable=arguments-differ
def tearDown(self, ignore_oferrors=False):
"""Clean up after a test.
ignore_oferrors: return OF errors rather than failing"""
if self.NETNS:
for host in self.hosts_name_ordered()[:1]:
if self.get_host_netns(host):
self.quiet_commands(host, ['ip netns del %s' % self.hostns(host)])
first_switch = self.first_switch()
if first_switch:
self.first_switch().cmd('ip link > %s' % os.path.join(self.tmpdir, 'ip-links.log'))
switch_names = []
for switch in self.net.switches:
switch_names.append(switch.name)
self.dump_switch_flows(switch)
switch.cmd('%s del-br %s' % (self.VSCTL, switch.name))
self._stop_net()
self.net = None
if self.event_sock_dir and os.path.exists(self.event_sock_dir):
shutil.rmtree(self.event_sock_dir)
mininet_test_util.return_free_ports(
self.ports_sock, self._test_name())
if 'OVS_LOGDIR' in os.environ:
ovs_log_dir = os.environ['OVS_LOGDIR']
if ovs_log_dir and os.path.exists(ovs_log_dir):
for ovs_log in glob.glob(os.path.join(ovs_log_dir, '*.log')):
lines = []
for name in switch_names:
lines.extend(self.matching_lines_from_file(name, ovs_log))
if lines:
switch_ovs_log_name = os.path.join(self.tmpdir, os.path.basename(ovs_log))
with open(switch_ovs_log_name, 'w') as switch_ovs_log:
switch_ovs_log.write('\n'.join(lines))
with open(os.path.join(self.tmpdir, 'test_duration_secs'), 'w') as duration_file:
duration_file.write(str(int(time.time() - self.start_time)))
# Must not be any controller exception.
for controller_env in self.env.values():
if 'FAUCET_EXCEPTION_LOG' in controller_env:
self.verify_no_exception(controller_env['FAUCET_EXCEPTION_LOG'])
if 'GAUGE_EXCEPTION_LOG' in controller_env:
self.verify_no_exception(controller_env['GAUGE_EXCEPTION_LOG'])
oferrors = ''
for controller_env in self.env.values():
if 'FAUCET_LOG' in controller_env:
logfile = controller_env['FAUCET_LOG']
elif 'GAUGE_LOG' in controller_env:
logfile = controller_env['GAUGE_LOG']
oldlogfile = '.'.join((logfile, 'old'))
if os.path.exists(oldlogfile):
logfile = oldlogfile
# Verify version is logged.
self.assertTrue(
self.matching_lines_from_file(r'^.+version\s+(\S+)$', logfile),
msg='no version logged in %s' % logfile)
# Verify no OFErrors.
oferrors += '\n\n'.join(self.matching_lines_from_file(r'^.+(OFError.+)$', logfile))
if not ignore_oferrors:
self.assertFalse(oferrors, msg=oferrors)
return oferrors
def _block_non_faucet_packets(self):
def _cmd(cmd):
with subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) as proc:
stdout, stderr = proc.communicate()
self.assertFalse(stdout, msg='%s: %s' % (stdout, cmd))
self.assertFalse(stderr, msg='%s: %s' % (stderr, cmd))
_cmd('ebtables --f OUTPUT')
for phys_port in self.switch_map.values():
phys_mac = self.get_mac_of_intf(phys_port)
for cmd in (
'ip link set dev %s up' % phys_port,
'ip -4 addr flush dev %s' % phys_port,
'ip -6 addr flush dev %s' % phys_port,
'ebtables -A OUTPUT -s %s -o %s -j DROP' % (phys_mac, phys_port)):
_cmd(cmd)
def _attach_physical_switch(self):
"""Bridge a physical switch into test topology.
We do this for now to enable us to reconnect
virtual ethernet interfaces which may already
exist on emulated hosts and other OVS instances.
(One alternative would be to create a Link() class
that uses the hardware interfaces directly.)
We repurpose the first OvS switch in the topology
as a patch panel that transparently connects the
hardware interfaces to the host/switch veth links."""
switch = self.first_switch()
if not switch:
return
# hw_names are the names of the server hardware interfaces
# that are cabled to the device under test, sorted by OF port number
hw_names = [self.switch_map[port] for port in sorted(self.switch_map)]
hw_macs = set()
# ovs_ports are the (sorted) OF port numbers of the OvS interfaces
# that are already attached to the emulated network.
# The actual tests reorder them according to port_map
ovs_ports = sorted(self.topo.switch_ports[switch.name])
# Patch hardware interfaces through to to OvS interfaces
for hw_name, ovs_port in zip(hw_names, ovs_ports):
# Note we've already removed any Linux IP addresses from hw_name
# and blocked traffic to/from its meaningless MAC
hw_mac = self.get_mac_of_intf(hw_name)
self.assertFalse(hw_mac in hw_macs,
'duplicate hardware MAC %s' % hw_mac)
hw_macs.add(hw_mac)
# Create mininet Intf and attach it to the switch
hw_intf = HWIntf(hw_name, node=switch)
switch.attach(hw_intf)
hw_port = switch.ports[hw_intf]
# Connect hw_port <-> ovs_port
src, dst = hw_port, ovs_port
for flow in (
# Drop anything to or from the meaningless hw_mac
'eth_src=%s,priority=2,actions=drop' % hw_mac,
'eth_dst=%s,priority=2,actions=drop' % hw_mac,
# Forward traffic bidirectionally src <-> dst
'in_port=%u,priority=1,actions=output:%u' % (src, dst),
'in_port=%u,priority=1,actions=output:%u' % (dst, src)):
switch.cmd(self.OFCTL, 'add-flow', switch, flow)
def create_port_map(self, dpid):
"""Return a port map {'port_1': port...} for a dpid in self.topo"""
ports = self.topo.dpid_ports(dpid)
port_map = {'port_%d' % i: port for i, port in enumerate(ports, start=1)}
return port_map
def start_net(self):
"""Start Mininet network."""
controller_intf = 'lo'
controller_ipv6 = False
if self.hw_switch:
controller_intf = self.cpn_intf
controller_ipv6 = self.cpn_ipv6
if not self.port_map:
# Sometimes created in build_net for config purposes, sometimes not
self.port_map = self.create_port_map(self.dpid)
self._block_non_faucet_packets()
self._start_faucet(controller_intf, controller_ipv6)
if self.hw_switch:
self._attach_physical_switch()
self._wait_debug_log()
for port_no in self._dp_ports():
self.set_port_up(port_no, wait=False)
dumpNodeConnections(self.hosts_name_ordered())
self.reset_all_ipv4_prefix(prefix=24)
def _get_controller(self):
"""Return first available controller."""
for controller in self.net.controllers:
if isinstance(controller, mininet_test_topo.FAUCET):
return controller
return self.net.controllers[0]
@staticmethod
def _start_gauge_check():
return None
def _start_check(self):
# '_wait_controllers_connected' also checks the 'healthy' state
if not self._wait_controllers_connected():
return 'not all controllers connected to switch'
if not self._wait_ofctl_up():
return 'ofctl not up'
if not self.wait_dp_status(1):
return 'prometheus port not up'
if not self._wait_controllers_healthy():
return 'not all controllers healthy after initial switch connection'
if self.config_ports:
for port_name, port in self.config_ports.items():
if port is not None and not port_name.startswith('gauge'):
if not self._get_controller().listen_port(port):
return 'faucet not listening on %u (%s)' % (
port, port_name)
return self._start_gauge_check()
def _create_faucet_controller(self, index, intf, ipv6):
port = self.faucet_of_ports[index]
name = 'faucet-%s' % index
faucet_controller = self.CONTROLLER_CLASS(
name=name, tmpdir=self.tmpdir,
controller_intf=intf,
controller_ipv6=ipv6,
env=self.env[name],
ctl_privkey=self.ctl_privkey,
ctl_cert=self.ctl_cert,
ca_certs=self.ca_certs,
ports_sock=self.ports_sock,
prom_port=self.get_prom_port(name),
port=port,
test_name=self._test_name())
self.env[faucet_controller.name] = self.env.pop(name)
return faucet_controller
def _create_gauge_controller(self, index, intf, ipv6):
port = self.gauge_of_ports[index]
name = 'gauge-%s' % index
gauge_controller = mininet_test_topo.Gauge(
name=name, tmpdir=self.tmpdir,
env=self.env[name],
controller_intf=intf,
controller_ipv6=ipv6,
ctl_privkey=self.ctl_privkey,
ctl_cert=self.ctl_cert,
ca_certs=self.ca_certs,
port=port)
self.env[gauge_controller.name] = self.env.pop(name)
return gauge_controller
def _start_faucet(self, controller_intf, controller_ipv6):
self.assertIsNone(self.net, 'Cannot invoke _start_faucet() multilpe times')
self.assertTrue(self.NUM_FAUCET_CONTROLLERS > 0, 'Define at least 1 Faucet controller')
self.assertTrue(self.NUM_GAUGE_CONTROLLERS > 0, 'Define at least 1 Gauge controller')
for log in glob.glob(os.path.join(self.tmpdir, '*.log')):
os.remove(log)
# Setup all static configuration
self._allocate_config_ports()
self._allocate_faucet_ports()
self._allocate_gauge_ports()
self._set_vars()
self._init_faucet_config()
self._init_gauge_config()
# Create all the controller instances
self.faucet_controllers = []
for c_index in range(self.NUM_FAUCET_CONTROLLERS):
controller = self._create_faucet_controller(c_index,
controller_intf,
controller_ipv6)
self.faucet_controllers.append(controller)
self.gauge_controllers = []
for c_index in range(self.NUM_GAUGE_CONTROLLERS):
controller = self._create_gauge_controller(c_index,
controller_intf,
controller_ipv6)
self.gauge_controllers.append(controller)
# Use the first Gauge instance for Prometheus scraping
self.gauge_controller = self.gauge_controllers[0]
self._wait_load()
last_error_txt = None
for _ in range(3):
# Start Mininet, connected to the first controller
self.net = Mininet(
self.topo,
link=FaucetLink,
controller=self.faucet_controllers[0])
# Add all the remaining Faucet controllers
# and all the Gauge controllers to the network
for controller in self.faucet_controllers[1:]:
self.net.addController(controller)
for controller in self.gauge_controllers:
self.net.addController(controller)
# Now that all the controllers are running
# and connected, start the Mininet network
self.pre_start_net()
self.net.start()
self._wait_load()
last_error_txt = self._start_check()
if last_error_txt is None:
break
# Existing controllers will be reused on the next cycle
self._stop_net()
last_error_txt += '\n\n' + self._dump_controller_logs()
error('%s: %s' % (self._test_name(), last_error_txt))
time.sleep(mininet_test_util.MIN_PORT_AGE)
if last_error_txt is not None:
self.fail(last_error_txt)
# All controllers are OK, so prepare to keep running the test
self._config_tableids()
self._wait_load()
if self.NETNS:
# TODO: seemingly can't have more than one namespace.
for host in self.hosts_name_ordered()[:1]:
hostns = self.hostns(host)
if self.get_host_netns(host):
self.quiet_commands(host, ['ip netns del %s' % hostns])
self.quiet_commands(host, ['ip netns add %s' % hostns])
self.post_start_net()
def _ofctl_rest_url(self, req):
"""Return control URL for Ryu ofctl module."""
return 'http://[%s]:%u/%s' % (
mininet_test_util.LOCALHOSTV6, self._get_controller().ofctl_port, req)
@staticmethod
def _ofctl(req, params=None):
if params is None:
params = {}
try:
ofctl_result = requests.get(req, params=params).json()
except requests.exceptions.ConnectionError:
return None
return ofctl_result
def _ofctl_up(self):
switches = self._ofctl(self._ofctl_rest_url('stats/switches'))
return isinstance(switches, list) and switches
def _wait_ofctl_up(self, timeout=10):
for _ in range(timeout):
if self._ofctl_up():
return True
time.sleep(1)
return False
def _ofctl_post(self, int_dpid, req, timeout, params=None):
for _ in range(timeout):
try:
ofctl_result = requests.post(
self._ofctl_rest_url(req),
json=params).json()
return ofctl_result[int_dpid]
except (ValueError, TypeError, requests.exceptions.ConnectionError):
# Didn't get valid JSON, try again
time.sleep(1)
continue
return []
def _ofctl_get(self, int_dpid, req, timeout, params=None):
for _ in range(timeout):
ofctl_result = self._ofctl(self._ofctl_rest_url(req), params=params)
try:
return ofctl_result[int_dpid]
except (ValueError, TypeError):
# Didn't get valid JSON, try again
time.sleep(1)
continue
return []
def _portmod(self, int_dpid, port_no, config, mask):
result = requests.post(
self._ofctl_rest_url('stats/portdesc/modify'),
json={'dpid': str(int_dpid), 'port_no': str(port_no),
'config': str(config), 'mask': str(mask)})
# ofctl doesn't use barriers, so cause port_mod to be sent.
self.get_port_stats_from_dpid(int_dpid, port_no)
return result
@staticmethod
def _signal_proc_on_port(host, port, signal):
tcp_pattern = '%s/tcp' % port
fuser_out = host.cmd('fuser %s -k -%u' % (tcp_pattern, signal))
return re.search(r'%s:\s+\d+' % tcp_pattern, fuser_out)
def _get_ofchannel_logs(self):
ofchannel_logs = []
config = self._get_faucet_conf()
for dp_name, dp_config in config['dps'].items():
if 'ofchannel_log' in dp_config:
debug_log = dp_config['ofchannel_log']
ofchannel_logs.append((dp_name, debug_log))
return ofchannel_logs
def _dump_controller_logs(self):
dump_txt = ''
test_logs = glob.glob(os.path.join(self.tmpdir, '*.log'))
for controller in self.net.controllers:
for test_log_name in test_logs:
basename = os.path.basename(test_log_name)
if basename.startswith(controller.name):
with open(test_log_name) as test_log:
dump_txt += '\n'.join((
'',
basename,
'=' * len(basename),
'',
test_log.read()))
break
return dump_txt
def _controllers_healthy(self):
for controller in self.net.controllers:
if not controller.healthy():
return False
for c_index in range(self.NUM_FAUCET_CONTROLLERS):
event_sock = self.event_socks[c_index]
if event_sock and not os.path.exists(event_sock):
error('event socket %s not created\n' % event_sock)
return False
return True
def _controllers_connected(self):
for controller in self.net.controllers:
if not controller.connected():
return False
return True
def _wait_controllers_healthy(self, timeout=30):
for _ in range(timeout):
if self._controllers_healthy():
return True
time.sleep(1)
return False
def _wait_controllers_connected(self, timeout=30):
for _ in range(timeout):
if self._controllers_connected():
return True
time.sleep(1)
return False
def _wait_debug_log(self):
"""Require all switches to have exchanged flows with controller."""
ofchannel_logs = self._get_ofchannel_logs()
for _, debug_log in ofchannel_logs:
for _ in range(60):
if (os.path.exists(debug_log)
and os.path.getsize(debug_log) > 0):
return True
time.sleep(1)
return False
def verify_no_exception(self, exception_log_name):
if not os.path.exists(exception_log_name):
return
with open(exception_log_name) as exception_log:
exception_contents = exception_log.read()
self.assertEqual(
'',
exception_contents,
msg='%s log contains %s' % (
exception_log_name, exception_contents))
@staticmethod
def tcpdump_helper(*args, **kwargs):
return TcpdumpHelper(*args, **kwargs).execute()
@staticmethod
def scapy_template(packet, iface, count=1):
return ('python3 -c \"from scapy.all import * ; sendp(%s, iface=\'%s\', count=%u)"' % (
packet, iface, count))
def scapy_base_udp(self, mac, iface, src_ip, dst_ip, dport, sport, count=1, dst=None):
if dst is None:
dst = 'ff:ff:ff:ff:ff:ff'
return self.scapy_template(
('Ether(dst=\'%s\', src=\'%s\', type=%u) / '
'IP(src=\'%s\', dst=\'%s\') / UDP(dport=%s,sport=%s) ' % (
dst, mac, IPV4_ETH, src_ip, dst_ip, dport, sport)),
iface, count)
def scapy_dhcp(self, mac, iface, count=1, dst=None):
if dst is None:
dst = 'ff:ff:ff:ff:ff:ff'
return self.scapy_template(
('Ether(dst=\'%s\', src=\'%s\', type=%u) / '
'IP(src=\'0.0.0.0\', dst=\'255.255.255.255\') / UDP(dport=67,sport=68) / '
'BOOTP(op=1) / DHCP(options=[(\'message-type\', \'discover\'), (\'end\')])') % (
dst, mac, IPV4_ETH),
iface, count)
def scapy_icmp(self, mac, iface, src_ip, dst_ip, count=1, dst=None):
if dst is None:
dst = 'ff:ff:ff:ff:ff:ff'
return self.scapy_template(
('Ether(dst=\'%s\', src=\'%s\', type=%u) / '
'IP(src=\'%s\', dst=\'%s\') / ICMP()') % (
dst, mac, IPV4_ETH, src_ip, dst_ip),
iface, count)
def scapy_dscp(self, src_mac, dst_mac, dscp_value, iface, count=1):
# creates a packet with L2-L4 headers using scapy
return self.scapy_template(
('Ether(dst=\'%s\', src=\'%s\', type=%u) / '
'IP(src=\'0.0.0.0\', dst=\'255.255.255.255\', tos=%s) / UDP(dport=67,sport=68) / '
'BOOTP(op=1)') % (
dst_mac, src_mac, IPV4_ETH, dscp_value),
iface, count)
def scapy_bcast(self, host, count=1):
return self.scapy_dhcp(host.MAC(), host.defaultIntf(), count)
@staticmethod
def pre_start_net():
"""Hook called after Mininet initialization, before Mininet started."""
return
@staticmethod
def post_start_net():
"""Hook called after Mininet initialization, and after Mininet started."""
return
def get_config_header(self, config_global, debug_log, dpid, hardware):
"""Build v2 FAUCET config header."""
return """
%s
dps:
%s:
ofchannel_log: %s
dp_id: 0x%x
hardware: "%s"
cookie: %u
""" % (config_global, self.DP_NAME, debug_log,
int(dpid), hardware, random.randint(1, 2**64 - 1))
def get_gauge_watcher_config(self):
return """
port_stats:
dps: ['%s']
type: 'port_stats'
interval: 5
db: 'stats_file'
port_state:
dps: ['%s']
type: 'port_state'
interval: 5
db: 'state_file'
flow_table:
dps: ['%s']
type: 'flow_table'
interval: 5
db: 'flow_dir'
""" % (self.DP_NAME, self.DP_NAME, self.DP_NAME)
def get_gauge_config(self, faucet_config_file,
monitor_stats_file,
monitor_state_file,
monitor_flow_table_dir):
"""Build Gauge config."""
return """
faucet_configs:
- %s
watchers:
%s
dbs:
stats_file:
type: 'text'
file: %s
state_file:
type: 'text'
file: %s
flow_dir:
type: 'text'
path: %s
%s
""" % (faucet_config_file,
self.get_gauge_watcher_config(),
monitor_stats_file,
monitor_state_file,
monitor_flow_table_dir,
self.GAUGE_CONFIG_DBS)
@staticmethod
def get_exabgp_conf(peer, peer_config=''):
return """
neighbor %s {
router-id 2.2.2.2;
local-address %s;
connect %s;
peer-as 1;
local-as %s;
%s
}
""" % (peer, peer, '%(bgp_port)d', PEER_BGP_AS, peer_config)
def get_all_groups_desc_from_dpid(self, dpid, timeout=2):
int_dpid = mininet_test_util.str_int_dpid(dpid)
return self._ofctl_get(
int_dpid, 'stats/groupdesc/%s' % int_dpid, timeout)
def get_all_flows_from_dpid(self, dpid, table_id, timeout=10, match=None):
"""Return all flows from DPID."""
int_dpid = mininet_test_util.str_int_dpid(dpid)
params = {}
params['table_id'] = table_id
if match is not None:
params['match'] = match
return self._ofctl_post(
int_dpid, 'stats/flow/%s' % int_dpid, timeout, params=params)
@staticmethod
def _port_stat(port_stats, port):
if port_stats:
for port_stat in port_stats:
if port_stat['port_no'] == port:
return port_stat
return None
def get_port_stats_from_dpid(self, dpid, port, timeout=2):
"""Return port stats for a port."""
int_dpid = mininet_test_util.str_int_dpid(dpid)
port_stats = self._ofctl_get(
int_dpid, 'stats/port/%s/%s' % (int_dpid, port), timeout)
return self._port_stat(port_stats, port)
def get_port_desc_from_dpid(self, dpid, port, timeout=2):
"""Return port desc for a port."""
int_dpid = mininet_test_util.str_int_dpid(dpid)
port_stats = self._ofctl_get(
int_dpid, 'stats/portdesc/%s/%s' % (int_dpid, port), timeout)
return self._port_stat(port_stats, port)
def get_all_meters_from_dpid(self, dpid):
"""Return all meters from DPID"""
int_dpid = mininet_test_util.str_int_dpid(dpid)
return self._ofctl_get(
int_dpid, 'stats/meterconfig/%s' % int_dpid, timeout=10)
def wait_matching_in_group_table(self, action, group_id, timeout=10):
groupdump = os.path.join(self.tmpdir, 'groupdump-%s.txt' % self.dpid)
for _ in range(timeout):
group_dump = self.get_all_groups_desc_from_dpid(self.dpid, 1)
with open(groupdump, 'w') as groupdump_file:
for group_dict in group_dump:
groupdump_file.write(str(group_dict) + '\n')
if group_dict['group_id'] == group_id:
actions = set(group_dict['buckets'][0]['actions'])
if set([action]).issubset(actions):
return True
time.sleep(1)
return False
# TODO: Should this have meter_confs as well or can we just match meter_ids
def get_matching_meters_on_dpid(self, dpid):
meterdump = os.path.join(self.tmpdir, 'meterdump-%s.log' % dpid)
meter_dump = self.get_all_meters_from_dpid(dpid)
with open(meterdump, 'w') as meterdump_file:
meterdump_file.write(str(meter_dump))
return meterdump
def get_matching_flows_on_dpid(self, dpid, match, table_id, timeout=10,
actions=None, hard_timeout=0, cookie=None,
ofa_match=True):
# TODO: Ryu ofctl serializes to old matches.
def to_old_match(match):
old_matches = {
'tcp_dst': 'tp_dst',
'ip_proto': 'nw_proto',
'eth_dst': 'dl_dst',
'eth_type': 'dl_type',
}
if match is not None:
for new_match, old_match in old_matches.items():
if new_match in match:
match[old_match] = match[new_match]
del match[new_match]
return match
flowdump = os.path.join(self.tmpdir, 'flowdump-%s.log' % dpid)
match = to_old_match(match)
match_set = None
exact_mask_match_set = None
if match:
# Different OFAs handle matches with an exact mask, different.
# Most (including OVS) drop the redundant exact mask. But others
# include an exact mask. So we must handle both.
mac_exact = str(netaddr.EUI(2**48 - 1)).replace('-', ':').lower()
match_set = frozenset(match.items())
exact_mask_match = {}
for field, value in match.items():
if isinstance(value, str) and '/' not in value:
value_mac = None
value_ip = None
try:
value_mac = netaddr.EUI(value)
value_ip = ipaddress.ip_address(value)
except (ValueError, netaddr.core.AddrFormatError):
pass
if value_mac:
value = '/'.join((value, mac_exact))
elif value_ip:
ip_exact = str(ipaddress.ip_address(2**value_ip.max_prefixlen - 1))
value = '/'.join((value, ip_exact))
exact_mask_match[field] = value
exact_mask_match_set = frozenset(exact_mask_match.items())
actions_set = None
if actions:
actions_set = frozenset(actions)
for _ in range(timeout):
flow_dicts = []
if ofa_match:
flow_dump = self.get_all_flows_from_dpid(dpid, table_id, match=match)
else:
flow_dump = self.get_all_flows_from_dpid(dpid, table_id)
with open(flowdump, 'w') as flowdump_file:
flowdump_file.write(str(flow_dump))
for flow_dict in flow_dump:
if (cookie is not None
and cookie != flow_dict['cookie']):
continue
if hard_timeout:
if 'hard_timeout' not in flow_dict:
continue
if flow_dict['hard_timeout'] < hard_timeout:
continue
if actions is not None:
flow_actions_set = frozenset(flow_dict['actions'])
if actions:
if not actions_set.issubset( # pytype: disable=attribute-error
flow_actions_set):
continue
else:
if flow_dict['actions']:
continue
if not ofa_match and match is not None:
flow_match_set = frozenset(flow_dict['match'].items())
# pytype: disable=attribute-error
if not (match_set.issubset(flow_match_set)
or exact_mask_match_set.issubset(flow_match_set)):
continue
# pytype: enable=attribute-error
flow_dicts.append(flow_dict)
if flow_dicts:
return flow_dicts
time.sleep(1)
return flow_dicts
def get_matching_flow_on_dpid(self, dpid, match, table_id, timeout=10,
actions=None, hard_timeout=0, cookie=None,
ofa_match=True):
flow_dicts = self.get_matching_flows_on_dpid(
dpid, match, table_id, timeout=timeout,
actions=actions, hard_timeout=hard_timeout, cookie=cookie,
ofa_match=ofa_match)
if flow_dicts:
return flow_dicts[0]
return []
def get_matching_flow(self, match, table_id, timeout=10,
actions=None, hard_timeout=0,
cookie=None, ofa_match=True):
return self.get_matching_flow_on_dpid(
self.dpid, match, table_id, timeout=timeout,
actions=actions, hard_timeout=hard_timeout,
cookie=cookie, ofa_match=ofa_match)
def get_group_id_for_matching_flow(self, match, table_id, timeout=10):
for _ in range(timeout):
flow_dict = self.get_matching_flow(match, table_id, timeout=timeout)
if flow_dict:
for action in flow_dict['actions']:
if action.startswith('GROUP'):
_, group_id = action.split(':')
return int(group_id)
time.sleep(1)
return None
def matching_flow_present_on_dpid(self, dpid, match, table_id, timeout=10,
actions=None, hard_timeout=0, cookie=None,
ofa_match=True):
"""Return True if matching flow is present on a DPID."""
return self.get_matching_flow_on_dpid(
dpid, match, table_id, timeout=timeout,
actions=actions, hard_timeout=hard_timeout, cookie=cookie,
ofa_match=ofa_match)
def matching_flow_present(self, match, table_id, timeout=10,
actions=None, hard_timeout=0, cookie=None,
ofa_match=True):
"""Return True if matching flow is present on default DPID."""
return self.matching_flow_present_on_dpid(
self.dpid, match, table_id, timeout=timeout,
actions=actions, hard_timeout=hard_timeout, cookie=cookie,
ofa_match=ofa_match)
def wait_until_matching_flow(self, match, table_id, timeout=10,
actions=None, hard_timeout=0, cookie=None,
ofa_match=True, dpid=None):
"""Wait (require) for flow to be present on default DPID."""
if dpid is None:
dpid = self.dpid
self.assertTrue(
self.matching_flow_present_on_dpid(
dpid, match, table_id, timeout=timeout,
actions=actions, hard_timeout=hard_timeout, cookie=cookie,
ofa_match=ofa_match),
msg=('match: %s table_id: %u actions: %s' % (match, table_id, actions)))
def wait_until_no_matching_flow(self, match, table_id, timeout=10,
actions=None, hard_timeout=0, cookie=None,
ofa_match=True, dpid=None):
"""Wait for a flow not to be present."""
if dpid is None:
dpid = self.dpid
for _ in range(timeout):
matching_flow = self.matching_flow_present_on_dpid(
dpid, match, table_id, timeout=1,
actions=actions, hard_timeout=hard_timeout, cookie=cookie,
ofa_match=ofa_match)
if not matching_flow:
return
self.fail('%s present' % matching_flow)
def wait_until_controller_flow(self):
self.wait_until_matching_flow(
None, table_id=self._ETH_SRC_TABLE, actions=['OUTPUT:CONTROLLER'])
def mac_learned(self, mac, timeout=10, in_port=None, hard_timeout=1):
"""Return True if a MAC has been learned on default DPID."""
for eth_field, table_id in (
('dl_src', self._ETH_SRC_TABLE),
('dl_dst', self._ETH_DST_TABLE)):
match = {eth_field: '%s' % mac}
match_hard_timeout = 0
if table_id == self._ETH_SRC_TABLE:
if in_port is not None:
match['in_port'] = in_port
match_hard_timeout = hard_timeout
if not self.matching_flow_present(
match, table_id, timeout=timeout, hard_timeout=match_hard_timeout):
return False
return True
def scrape_port_counters(self, ports, port_vars):
"""Scrape Gauge for list of ports and list of variables."""
port_counters = {port: {} for port in ports}
for port in ports:
port_labels = self.port_labels(self.port_map[port])
for port_var in port_vars:
val = self.scrape_prometheus_var(
port_var, labels=port_labels,
controller=self.gauge_controller.name, dpid=True, retries=3)
self.assertIsNotNone(val, '%s missing for port %s' % (port_var, port))
port_counters[port][port_var] = val
# Require port to be up and reporting non-zero speed.
speed = self.scrape_prometheus_var(
'of_port_curr_speed', labels=port_labels,
controller=self.gauge_controller.name, retries=3)
self.assertTrue(speed and speed > 0, msg='%s %s: %s' % (
'of_port_curr_speed', port_labels, speed))
state = self.scrape_prometheus_var(
'of_port_state', labels=port_labels,
controller=self.gauge_controller.name, retries=3)
self.assertFalse(state & ofp.OFPPS_LINK_DOWN, msg='%s %s: %s' % (
'of_port_state', port_labels, state))
return port_counters
def wait_ports_updating(self, ports, port_vars, stimulate_counters_func=None):
"""Return True if list of ports have list of variables all updated."""
if stimulate_counters_func is None:
stimulate_counters_func = self.ping_all_when_learned
ports_not_updated = set(ports)
first_counters = self.scrape_port_counters(ports_not_updated, port_vars)
start_time = time.time()
for _ in range(self.DB_TIMEOUT * 3):
stimulate_counters_func()
now_counters = self.scrape_port_counters(ports_not_updated, port_vars)
updated_ports = set()
for port in ports_not_updated:
first = first_counters[port]
now = now_counters[port]
not_updated = [var for var, val in now.items() if val <= first[var]]
if not_updated:
break
updated_ports.add(port)
ports_not_updated -= updated_ports
if ports_not_updated:
time.sleep(1)
else:
break
end_time = time.time()
error('counter latency up to %u sec\n' % (end_time - start_time))
return not ports_not_updated
@staticmethod
def mac_as_int(mac):
return int(mac.replace(':', ''), 16)
@staticmethod
def mac_from_int(mac_int):
mac_int_str = '%012x' % int(mac_int)
return ':'.join(mac_int_str[i:i + 2] for i in range(0, len(mac_int_str), 2))
def prom_macs_learned(self, port=None, vlan=None):
labels = {
'n': r'\d+',
'port': r'b\d+',
'vlan': r'\d+',
}
if port:
labels.update(self.port_labels(port))
if vlan:
labels['vlan'] = str(vlan)
port_learned_macs_prom = self.scrape_prometheus_var(
'learned_macs', labels=labels, default=[], multiple=True, dpid=True)
macs = [self.mac_from_int(mac_int) for _, mac_int in port_learned_macs_prom if mac_int]
return macs
def prom_mac_learned(self, mac, port=None, vlan=None):
return mac in self.prom_macs_learned(port=port, vlan=vlan)
def host_learned(self, host, timeout=10, in_port=None, hard_timeout=1):
"""Return True if a host has been learned on default DPID."""
return self.mac_learned(host.MAC(), timeout, in_port, hard_timeout=hard_timeout)
@staticmethod
def get_host_intf_mac(host, intf):
return host.cmd('cat /sys/class/net/%s/address' % intf).strip()
def get_host_netns(self, host):
hostns = self.hostns(host)
nses = [netns.split()[0] for netns in host.cmd('ip netns list').splitlines()]
return hostns in nses
@staticmethod
def host_ip(host, family, family_re):
host_ip_cmd = (
r'ip -o -f %s addr show %s|'
'grep -m 1 -Eo "%s %s"|cut -f2 -d " "' % (
family,
host.defaultIntf(),
family,
family_re))
return host.cmd(host_ip_cmd).strip()
def host_ipv4(self, host):
"""Return first IPv4/netmask for host's default interface."""
return self.host_ip(host, 'inet', r'[0-9\\.]+\/[0-9]+')
def host_ipv6(self, host):
"""Return first IPv6/netmask for host's default interface."""
return self.host_ip(host, 'inet6', r'[0-9a-f\:]+\/[0-9]+')
@staticmethod
def reset_ipv4_prefix(host, prefix=24):
host.setIP(host.IP(), prefixLen=prefix)
def reset_all_ipv4_prefix(self, prefix=24):
for host in self.hosts_name_ordered():
self.reset_ipv4_prefix(host, prefix)
def stimulate_host_learn(self, host):
unicast_learn_cli = self.scapy_dhcp(host.MAC(), host.defaultIntf(), dst=self.FAUCET_MAC)
bcast_learn_cli = self.scapy_dhcp(host.MAC(), host.defaultIntf())
results = []
for learn_cli in (unicast_learn_cli, bcast_learn_cli):
results.append(host.cmd(learn_cli))
return ' '.join(results)
def require_host_learned(self, host, retries=8, in_port=None, hard_timeout=1):
"""Require a host be learned on default DPID."""
for _ in range(retries):
if self.host_learned(host, timeout=1, in_port=in_port, hard_timeout=hard_timeout):
return
learn_result = self.stimulate_host_learn(host)
self.fail('Could not learn host %s (%s): %s' % (host, host.MAC(), learn_result))
def get_prom_port(self, controller=None):
if controller is None:
controller = self.faucet_controllers[0].name
return int(self.env[controller]['FAUCET_PROMETHEUS_PORT'])
def get_prom_addr(self, controller=None):
if controller is None:
controller = self.faucet_controllers[0].name
return self.env[controller]['FAUCET_PROMETHEUS_ADDR']
def _prometheus_url(self, controller):
if 'faucet' in controller:
return 'http://[%s]:%u' % (
self.get_prom_addr(), self.get_prom_port())
if 'gauge' in controller:
return 'http://[%s]:%u' % (
self.get_prom_addr(), self.config_ports['gauge_prom_port'])
raise NotImplementedError
def scrape_prometheus(self, controller=None, timeout=15, var=None, verify_consistent=False):
"""
Obtain prometheus statistics
Args:
controller (str): name of the controller for the prometheus variable to scrape for
timeout (int): Timeout for scrape request
var (str): Variable to match on & return
verify_consistent (bool): Verifies that all values for each controller is consistent
"""
all_prom_lines = []
if controller is None:
controller = self.faucet_controllers[0].name
controller_iter = []
if self.net.get(controller) in self.faucet_controllers:
controller_iter = self.faucet_controllers
else:
controller_iter = self.gauge_controllers
for cont in controller_iter:
controller_name = cont.name
url = self._prometheus_url(controller_name)
try:
prom_raw = requests.get(url, {}, timeout=timeout).text
except (requests.exceptions.ConnectionError, requests.exceptions.ReadTimeout):
return []
with open(os.path.join(self.tmpdir, '%s-prometheus.log' % controller_name), 'w') as prom_log:
prom_log.write(prom_raw)
prom_lines = [
prom_line for prom_line in prom_raw.splitlines() if not prom_line.startswith('#')]
if var:
prom_lines = [
prom_line for prom_line in prom_lines if prom_line.startswith(var)]
all_prom_lines.append(prom_lines)
if verify_consistent:
self.verify_prom_var(all_prom_lines)
cont = self.net.get(controller)
index = controller_iter.index(cont)
return all_prom_lines[index]
def verify_prom_var(self, all_prom_lines):
"""
Verifies that all lines scraped from prometheus for each controller is consistent
NOTE: Doesn't work too well as different controllers will have some different
statistics, i.e. cold start time.
So make sure to only set `verify_consistent` in `scrape_prometheus` with
specific variables that you know should be consistent
"""
for lines_a in all_prom_lines:
for lines_b in all_prom_lines:
# pylint: disable=consider-using-enumerate
self.assertEqual(len(lines_a), len(lines_b))
for i in range(len(lines_a)):
prom_line_a = lines_a[i]
prom_line_b = lines_b[i]
match_a = self._PROM_LINE_RE.match(prom_line_a)
match_b = self._PROM_LINE_RE.match(prom_line_b)
self.assertIsNotNone(match_a)
self.assertIsNotNone(match_b)
var_a = match_a.group(1)
var_b = match_b.group(1)
self.assertEqual(var_a, var_b)
val_a = int(float(match_a.group(2)))
val_b = int(float(match_b.group(2)))
self.assertEqual(val_a, val_b, msg='%s %s inconsistent' %
(prom_line_a, prom_line_b))
def parse_prom_var(self, prom_line):
"""Parse prometheus variable, return tuple of variable name, variable value"""
prom_line_match = self._PROM_LINE_RE.match(prom_line)
self.assertIsNotNone(
prom_line_match,
msg='Invalid prometheus line %s' % prom_line)
prom_var = prom_line_match.group(1)
prom_val = int(float(prom_line_match.group(2)))
return (prom_var, prom_val)
def wait_for_prometheus_var(self, var, result_wanted, labels=None,
any_labels=False, default=None, dpid=True,
multiple=False, controller=None, retries=3,
timeout=5, orgreater=False):
if controller is None:
controller = self.faucet_controllers[0].name
for _ in range(timeout):
result = self.scrape_prometheus_var(
var, labels=labels, any_labels=any_labels, default=default,
dpid=dpid, multiple=multiple, controller=controller, retries=retries)
if result == result_wanted:
return True
if orgreater and result > result_wanted:
return True
time.sleep(1)
return False
def scrape_prometheus_var(self, var, labels=None, any_labels=False, default=None,
dpid=True, multiple=False, controller=None, retries=3):
"""
Return parsed, prometheus variable
Args:
var (str): Prometheus variable to scrape for
labels (dict): Labels to apply for the variable search
any_labels (bool): Wildcard label match
default: Default value to return if nothing found
dpid (bool/int): Specific DPID or use default DPID in labels
multiple (bool): Return multiple instances of found matching variables
controller (str): Name of the controller owned variable to search for
retries (int): Number of attempts to scrape a variable
"""
if controller is None:
controller = self.faucet_controllers[0].name
if dpid:
if dpid is True:
dpid = int(self.dpid)
else:
dpid = int(dpid)
if dpid and self.dpid_names:
dp_name = self.dpid_names[str(dpid)]
else:
dp_name = self.DP_NAME
label_values_re = r''
if any_labels:
label_values_re = r'\{[^\}]+\}'
else:
if labels is None:
labels = {}
if dpid:
labels.update({'dp_id': '0x%x' % dpid, 'dp_name': dp_name})
if labels:
label_values = []
for label, value in sorted(labels.items()):
label_values.append('%s="%s"' % (label, value))
label_values_re = r'\{%s\}' % r'\S+'.join(label_values)
var_re = re.compile(r'^%s%s$' % (var, label_values_re))
for i in range(retries):
results = []
prom_lines = self.scrape_prometheus(controller=controller, var=var)
for prom_line in prom_lines:
prom_var, prom_val = self.parse_prom_var(prom_line)
if var_re.match(prom_var):
results.append((var, prom_val))
if not multiple:
break
if results:
if multiple:
return results
return results[0][1]
if i < (retries - 1):
time.sleep(1)
return default
def gauge_smoke_test(self):
watcher_files = set([
self.monitor_stats_file,
self.monitor_state_file,
])
found_watcher_files = set()
for _ in range(60):
for watcher_file in watcher_files:
if (os.path.exists(watcher_file)
and os.path.getsize(watcher_file)):
found_watcher_files.add(watcher_file)
if watcher_files == found_watcher_files \
and bool(os.listdir(self.monitor_flow_table_dir)):
break
self.verify_no_exception(self.env[self.gauge_controller.name]['GAUGE_EXCEPTION_LOG'])
time.sleep(1)
found_watcher_files = set()
missing_watcher_files = watcher_files - found_watcher_files
self.assertEqual(
missing_watcher_files, set(), msg='Gauge missing logs: %s' % missing_watcher_files)
self.hup_controller(self.gauge_controller.name)
self.verify_no_exception(self.env[self.faucet_controllers[0].name]['FAUCET_EXCEPTION_LOG'])
def prometheus_smoke_test(self):
prom_out = '\n'.join(self.scrape_prometheus())
for nonzero_var in (
r'of_packet_ins', r'of_flowmsgs_sent', r'of_dp_connections',
r'faucet_config\S+name=\"flood\"', r'faucet_pbr_version\S+version='):
self.assertTrue(
re.search(r'%s\S+\s+[1-9]+' % nonzero_var, prom_out),
msg='expected %s to be nonzero (%s)' % (nonzero_var, prom_out))
for zero_var in (
'of_errors', 'of_dp_disconnections'):
self.assertTrue(
re.search(r'%s\S+\s+0' % zero_var, prom_out),
msg='expected %s to be present and zero (%s)' % (zero_var, prom_out))
def get_configure_count(self, retries=5, controller=None):
"""Return the number of times FAUCET has processed a reload request."""
if controller is None:
controller = self.faucet_controllers[0].name
for _ in range(retries):
count = self.scrape_prometheus_var(
'faucet_config_reload_requests_total',
dpid=False, controller=controller)
if count:
break
time.sleep(1)
self.assertTrue(count, msg='configure count stayed zero')
return count
def hup_controller(self, controller=None):
"""Send a HUP signal to the controller."""
if controller is None:
controller = self.faucet_controllers[0].name
cont_obj = self.net.get(controller)
self.assertTrue(
self._signal_proc_on_port(cont_obj, int(cont_obj.port), 1))
def reload_conf(self, yaml_conf, conf_path, restart, cold_start,
change_expected=True, host_cache=None, hup=True, dpid=True):
def _update_conf(conf_path, yaml_conf):
if yaml_conf:
yaml_conf = self._annotate_interfaces_conf(yaml_conf)
self._write_yaml_conf(conf_path, yaml_conf)
update_conf_func = partial(_update_conf, conf_path, yaml_conf)
verify_faucet_reconf_func = partial(
self.verify_faucet_reconf,
cold_start=cold_start,
change_expected=change_expected,
reconf_funcs=[update_conf_func], hup=hup, dpid=dpid)
if restart:
if host_cache:
vlan_labels = dict(vlan=host_cache)
old_mac_table = sorted(self.scrape_prometheus_var(
'learned_macs', labels=vlan_labels, multiple=True, default=[], dpid=dpid))
verify_faucet_reconf_func()
new_mac_table = sorted(self.scrape_prometheus_var(
'learned_macs', labels=vlan_labels, multiple=True, default=[], dpid=dpid))
self.assertFalse(
cold_start, msg='host cache is not maintained with cold start')
self.assertTrue(
new_mac_table, msg='no host cache for VLAN %u' % host_cache)
self.assertEqual(
old_mac_table, new_mac_table,
msg='host cache for VLAN %u not same over reload (old %s, new %s)' % (
host_cache, old_mac_table, new_mac_table))
else:
verify_faucet_reconf_func()
return
update_conf_func()
def coldstart_conf(self, hup=True):
orig_conf = self._get_faucet_conf()
cold_start_conf = copy.deepcopy(orig_conf)
if 'routers' in cold_start_conf:
del cold_start_conf['routers']
used_vids = set()
for vlan_name, vlan_conf in cold_start_conf['vlans'].items():
used_vids.add(vlan_conf.get('vid', vlan_name))
unused_vids = list(set(range(2, max(used_vids))) - used_vids)
assert len(unused_vids) >= len(self.port_map)
# Ensure cold start by moving all ports to new, unused VLANs,
# then back again.
for dp_conf in cold_start_conf['dps'].values():
dp_conf['interfaces'] = {
self.port_map[port]: {'native_vlan': unused_vids[i]}
for i, port in enumerate(self.port_map.keys(), start=0)}
for conf in (cold_start_conf, orig_conf):
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=True, hup=hup)
def add_port_config(self, port, port_config, conf=None,
restart=True, cold_start=False,
hup=True):
if conf is None:
conf = self._get_faucet_conf()
conf['dps'][self.DP_NAME]['interfaces'][port] = port_config
self.reload_conf(
conf, self.faucet_config_path,
restart, cold_start, hup=hup)
def change_port_config(self, port, config_name, config_value,
conf=None, restart=True, cold_start=False,
hup=True, change_expected=True):
if conf is None:
conf = self._get_faucet_conf()
if config_name is None:
del conf['dps'][self.DP_NAME]['interfaces'][port]
else:
if config_value is None:
del conf['dps'][self.DP_NAME]['interfaces'][port][config_name]
else:
conf['dps'][self.DP_NAME]['interfaces'][port][config_name] = config_value
self.reload_conf(
conf, self.faucet_config_path,
restart, cold_start, hup=hup, change_expected=change_expected)
def change_vlan_config(self, vlan, config_name, config_value,
conf=None, restart=True, cold_start=False,
hup=True):
if conf is None:
conf = self._get_faucet_conf()
conf['vlans'][vlan][config_name] = config_value
self.reload_conf(
conf, self.faucet_config_path,
restart, cold_start, hup=hup)
def ipv4_vip_bcast(self):
return self.FAUCET_VIPV4.network.broadcast_address
def verify_traveling_dhcp_mac(self, retries=10):
mac = '0e:00:00:00:00:ff'
locations = set()
for host in self.hosts_name_ordered():
for _ in range(retries):
host.cmd(self.scapy_dhcp(mac, host.defaultIntf()))
new_locations = set()
for line in self.scrape_prometheus(var='learned_macs'):
location, mac_float = self.parse_prom_var(line)
if self.mac_from_int(int(float(mac_float))) == mac:
new_locations.add(location)
if locations != new_locations:
break
time.sleep(1)
# TODO: verify port/host association, not just that host moved.
self.assertNotEqual(locations, new_locations)
locations = new_locations
def _verify_xcast(self, received_expected, packets, tcpdump_filter, scapy_cmd, host_a, host_b):
received_packets = False
for _ in range(packets):
tcpdump_txt = self.tcpdump_helper(
host_b, tcpdump_filter,
[partial(host_a.cmd, scapy_cmd)],
packets=1, timeout=2)
msg = '%s (%s) -> %s (%s): %s' % (
host_a, host_a.MAC(), host_b, host_b.MAC(), tcpdump_txt)
received_no_packets = self.tcpdump_rx_packets(tcpdump_txt, packets=0)
received_packets = received_packets or not received_no_packets
if received_packets:
if received_expected is not False:
return True
self.assertTrue(received_expected, msg=msg)
time.sleep(1)
if received_expected is None:
return received_packets
self.assertEqual(received_expected, received_packets, msg=msg)
return None
def verify_broadcast(self, hosts=None, broadcast_expected=True, packets=3):
host_a = self.hosts_name_ordered()[0]
host_b = self.hosts_name_ordered()[-1]
if hosts is not None:
host_a, host_b = hosts
tcpdump_filter = ' and '.join((
'ether dst host ff:ff:ff:ff:ff:ff',
'ether src host %s' % host_a.MAC(),
'udp'))
scapy_cmd = self.scapy_bcast(host_a, count=packets)
return self._verify_xcast(broadcast_expected, packets, tcpdump_filter,
scapy_cmd, host_a, host_b)
def verify_unicast(self, hosts, unicast_expected=True, packets=3):
host_a = self.hosts_name_ordered()[0]
host_b = self.hosts_name_ordered()[-1]
if hosts is not None:
host_a, host_b = hosts
tcpdump_filter = ' and '.join((
'ether dst %s' % host_b.MAC(),
'ether src %s' % host_a.MAC(),
'udp'))
scapy_cmd = self.scapy_template(
('Ether(src=\'%s\', dst=\'%s\', type=%u) / '
'IP(src=\'%s\', dst=\'%s\') / UDP(dport=67,sport=68)') % (
host_a.MAC(), host_b.MAC(), IPV4_ETH,
host_a.IP(), host_b.IP()), host_a.defaultIntf(), count=packets)
return self._verify_xcast(unicast_expected, packets, tcpdump_filter,
scapy_cmd, host_a, host_b)
def verify_empty_caps(self, cap_files):
cap_file_cmds = [
'tcpdump -n -v -A -r %s 2> /dev/null' % cap_file for cap_file in cap_files]
self.quiet_commands(self.net.controllers[0], cap_file_cmds)
def verify_no_bcast_to_self(self, timeout=3):
bcast_cap_files = []
tcpdump_timeout = timeout * len(self.hosts_name_ordered()) * 2
for host in self.hosts_name_ordered():
tcpdump_filter = '-Q in ether src %s' % host.MAC()
bcast_cap_file = os.path.join(self.tmpdir, '%s-bcast.cap' % host)
bcast_cap_files.append(bcast_cap_file)
host.cmd(mininet_test_util.timeout_cmd(
'tcpdump -U -n -c 1 -i %s -w %s %s &' % (
host.defaultIntf(), bcast_cap_file, tcpdump_filter), tcpdump_timeout))
for host in self.hosts_name_ordered():
for bcast_cmd in (
('ndisc6 -w1 fe80::1 %s' % host.defaultIntf()),
('ping -b -i0.1 -c3 %s' % self.ipv4_vip_bcast())):
host.cmd(mininet_test_util.timeout_cmd(bcast_cmd, timeout))
self.verify_empty_caps(bcast_cap_files)
def verify_unicast_not_looped(self, packets=3):
unicast_mac1 = '0e:00:00:00:00:02'
unicast_mac2 = '0e:00:00:00:00:03'
hello_template = (
'Ether(src=\'%s\', dst=\'%s\')/'
'IP(src=\'10.0.0.100\', dst=\'10.0.0.255\')/'
'UDP(dport=9)/'
'b\'hello\'')
tcpdump_filter = '-Q in ether src %s' % unicast_mac1
for host in self.hosts_name_ordered():
host.cmd(
self.scapy_template(
hello_template % (unicast_mac1, 'ff:ff:ff:ff:ff:ff'),
host.defaultIntf()))
host.cmd(
self.scapy_template(
hello_template % (unicast_mac2, 'ff:ff:ff:ff:ff:ff'),
host.defaultIntf()))
tcpdump_txt = self.tcpdump_helper(
host, tcpdump_filter, [
partial(host.cmd, (
self.scapy_template(
hello_template % (unicast_mac1, unicast_mac2),
host.defaultIntf(),
count=packets)))],
timeout=(packets - 1), vflags='-vv', packets=1)
self.verify_no_packets(tcpdump_txt)
def verify_controller_fping(self, host, faucet_vip,
total_packets=100, packet_interval_ms=100, size=64):
fping_bin = 'fping'
if faucet_vip.version == 6:
fping_bin = 'fping6'
fping_cli = '%s %s -b %u -c %u -i %u %s' % (
fping_bin, self.FPING_ARGS_SHORT, size, total_packets,
packet_interval_ms, faucet_vip.ip)
timeout = int(((1000.0 / packet_interval_ms) * total_packets) * 1.5)
fping_out = host.cmd(mininet_test_util.timeout_cmd(
fping_cli, timeout))
error('%s: %s' % (self._test_name(), fping_out))
self.assertTrue(
re.search(r'\s+[1-9][0-9]* ICMP Echo Replies received', fping_out),
msg=fping_out)
def verify_learn_counters(self, vlan, ports, verify_neighbors=False):
# Need to synchronize with stats update thread.
for _ in range(7):
vlan_hosts_learned = self.scrape_prometheus_var(
'vlan_hosts_learned',
{'vlan': str(vlan)})
port_vlan_hosts_learned = 0
prom_macs_learned = 0
for port in ports:
port_no = self.port_map['port_%u' % port]
labels = {'vlan': str(vlan)}
labels.update(self.port_labels(port_no))
port_vlan_hosts_learned += self.scrape_prometheus_var(
'port_vlan_hosts_learned', labels, default=0)
prom_macs_learned += len(self.prom_macs_learned(
vlan=vlan, port=port_no))
if (vlan_hosts_learned == port_vlan_hosts_learned
and vlan_hosts_learned == prom_macs_learned):
break
time.sleep(1)
self.assertEqual(vlan_hosts_learned, port_vlan_hosts_learned)
self.assertEqual(vlan_hosts_learned, prom_macs_learned)
if verify_neighbors:
vlan_neighbors = self.scrape_prometheus_var(
'vlan_neighbors',
{'vlan': str(vlan)})
self.assertEqual(vlan_hosts_learned, vlan_neighbors)
return vlan_hosts_learned
def verify_learning(self, test_net, learn_ip, min_hosts, max_hosts, learn_pps=20):
# TODO: test environment is pretty hard on test host, with this many macvlans
def simplify_intf_conf(host, intf):
for conf_cmd in (
'echo 1 > /proc/sys/net/ipv6/conf/%s/disable_ipv6',
'echo 300 > /proc/sys/net/ipv4/neigh/%s/gc_stale_time',
'ip link set dev %s arp off',):
self.assertEqual('', host.cmd(conf_cmd % intf))
def generate_test_ipas():
test_ipas = []
for ipa in sorted(test_net.hosts()):
if str(ipa).endswith('.0'):
continue
if str(ipa).endswith('.255'):
continue
test_ipas.append(ipa)
if len(test_ipas) == max_hosts + len(self.hosts_name_ordered()):
break
base_ipas = test_ipas[-len(self.hosts_name_ordered()):]
return (base_ipas, test_ipas)
def generate_mac_intfs(test_ipas, other_hosts):
mac_intf_ipv4s = []
for i in range(0, max_hosts):
host = other_hosts[i % len(other_hosts)]
mac_intf = 'mac%u' % i
mac_ipv4 = str(test_ipas[i])
mac_intf_ipv4s.append((host, mac_intf, mac_ipv4))
return mac_intf_ipv4s
first_host = self.hosts_name_ordered()[0]
other_hosts = self.hosts_name_ordered()[1:]
base_ipas, test_ipas = generate_test_ipas()
mac_intf_ipv4s = generate_mac_intfs(test_ipas, other_hosts)
for i, host in enumerate(self.hosts_name_ordered()):
host.setIP(str(base_ipas[i]), prefixLen=test_net.prefixlen)
self.ping_all_when_learned()
learn_hosts = min_hosts
successful_learn_hosts = 0
fping_prefix = 'fping %s -q -c 1' % self.FPING_ARGS_SHORT
pps_ms = 1e3 / learn_pps
while learn_hosts <= max_hosts and successful_learn_hosts < max_hosts:
error('will learn %u hosts\n' % learn_hosts)
start_time = time.time()
learn_host_list = mac_intf_ipv4s[successful_learn_hosts:learn_hosts]
random.shuffle(learn_host_list)
# configure macvlan interfaces and stimulate learning
for host, mac_intf, mac_ipv4 in learn_host_list:
fping_conf_start = time.time()
self.add_macvlan(host, mac_intf, mac_ipv4, ipm=test_net.prefixlen)
simplify_intf_conf(host, mac_intf)
host.cmd('%s -I%s %s' % (fping_prefix, mac_intf, str(learn_ip)))
fping_ms = (time.time() - fping_conf_start) * 1e3
if fping_ms < pps_ms:
time.sleep((pps_ms - fping_ms) / 1e3)
def verify_connectivity(learn_hosts):
error('verifying connectivity')
all_unverified_ips = [str(ipa) for ipa in test_ipas[:learn_hosts]]
random.shuffle(all_unverified_ips)
loss_re = re.compile(
r'^(\S+) : xmt\/rcv\/\%loss = \d+\/\d+\/(\d+)\%.+')
while all_unverified_ips:
unverified_ips = set()
for _ in range(min(learn_pps, len(all_unverified_ips))):
unverified_ips.add(all_unverified_ips.pop())
for _ in range(10):
error('.')
random_unverified_ips = list(unverified_ips)
random.shuffle(random_unverified_ips)
fping_cmd = '%s %s' % (fping_prefix, ' '.join(random_unverified_ips))
fping_lines = first_host.cmd(fping_cmd).splitlines()
for fping_line in fping_lines:
loss_match = loss_re.match(fping_line)
if loss_match:
ipa = loss_match.group(1)
loss = int(loss_match.group(2))
if loss == 0:
unverified_ips.remove(ipa)
if unverified_ips:
time.sleep(0.1 * len(unverified_ips))
else:
break
if unverified_ips:
error('could not verify connectivity for all hosts: %s\n' % unverified_ips)
return False
return self.wait_for_prometheus_var(
'vlan_hosts_learned', learn_hosts, labels={'vlan': '100'},
timeout=15, orgreater=True)
if verify_connectivity(learn_hosts):
learn_time = time.time() - start_time
# dump_packet_counters()
error('verified %u hosts learned in %u sec\n' % (
learn_hosts, learn_time))
successful_learn_hosts = learn_hosts
learn_hosts = min(learn_hosts * 2, max_hosts)
else:
break
self.assertGreaterEqual(successful_learn_hosts, min_hosts)
def verify_vlan_flood_limited(self, vlan_first_host, vlan_second_host,
other_vlan_host):
"""Verify that flooding doesn't cross VLANs."""
for first_host, second_host in (
(vlan_first_host, vlan_second_host),
(vlan_second_host, vlan_first_host)):
tcpdump_filter = 'ether host %s or ether host %s' % (
first_host.MAC(), second_host.MAC())
tcpdump_txt = self.tcpdump_helper(
other_vlan_host, tcpdump_filter, [
partial(first_host.cmd, 'arp -d %s' % second_host.IP()),
partial(first_host.cmd, ' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
packets=1)
self.verify_no_packets(tcpdump_txt)
def verify_ping_mirrored(self, first_host, second_host, mirror_host, both_mirrored=False):
"""Verify that unicast traffic to and from a mirrored port is mirrored."""
self.ping((first_host, second_host))
for host in (first_host, second_host):
self.require_host_learned(host)
self.retry_net_ping(hosts=(first_host, second_host))
tcpdump_filter = (
'(ether src %s or ether src %s) and '
'(icmp[icmptype] == 8 or icmp[icmptype] == 0)') % (
first_host.MAC(), second_host.MAC())
first_ping_second = ' '.join((self.FPINGS_ARGS_ONE, second_host.IP()))
expected_pings = 2
max_expected_pings = 2
if both_mirrored:
max_expected_pings *= 2
tcpdump_txt = self.tcpdump_helper(
mirror_host, tcpdump_filter, [
partial(first_host.cmd, first_ping_second)], packets=(max_expected_pings + 1))
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt),
msg=tcpdump_txt)
self.assertTrue(re.search(
'%s: ICMP echo reply' % first_host.IP(), tcpdump_txt),
msg=tcpdump_txt)
received_pings = self.match_tcpdump_rx_packets(tcpdump_txt)
self.assertGreaterEqual(received_pings, expected_pings)
self.assertLessEqual(received_pings, max_expected_pings)
def verify_bcast_ping_mirrored(self, first_host, second_host, mirror_host,
tagged=False, require_learned=True):
"""Verify that broadcast to a mirrored port, is mirrored."""
if require_learned:
self.ping((first_host, second_host))
for host in (first_host, second_host):
self.require_host_learned(host)
self.retry_net_ping(hosts=(first_host, second_host))
tcpdump_filter = (
'ether src %s and ether dst ff:ff:ff:ff:ff:ff and '
'icmp[icmptype] == 8') % second_host.MAC()
if tagged:
tcpdump_filter = 'vlan and %s' % tcpdump_filter
else:
tcpdump_filter = '%s and not vlan' % tcpdump_filter
second_ping_bcast = 'ping -c3 -b %s' % self.ipv4_vip_bcast()
tcpdump_txt = self.tcpdump_helper(
mirror_host, tcpdump_filter, [
partial(second_host.cmd, second_ping_bcast)],
packets=1)
self.assertTrue(re.search(
'%s: ICMP echo request' % self.ipv4_vip_bcast(), tcpdump_txt),
msg=tcpdump_txt)
def verify_ping_mirrored_multi(self, ping_pairs, mirror_host, both_mirrored=False):
""" Verify that mirroring of multiple switchs works. Method
will both perform a one at a time ping mirror check and a
all at once test where all ping pairs are executed at the
same time.
Args:
ping_pairs (list of tuple): Hosts to ping for tests
in the format '[(host_a, host_b)]` where host_a
will ping host_bs IP.
mirror_host (FaucetHost): host to check mirroring
"""
# Verify individual ping works
for hosts in ping_pairs:
self.verify_ping_mirrored(hosts[0], hosts[1], mirror_host, both_mirrored=both_mirrored)
# Prepare our ping pairs
for hosts in ping_pairs:
self.ping(hosts)
for hosts in ping_pairs:
for host in hosts:
self.require_host_learned(host)
for hosts in ping_pairs:
self.retry_net_ping(hosts=hosts)
mirror_mac = mirror_host.MAC()
tcpdump_filter = (
'not ether src %s and '
'(icmp[icmptype] == 8 or icmp[icmptype] == 0)') % mirror_mac
# Calculate the execpted number of pings we need
# to capture to validate port mirroring
expected_pings = len(ping_pairs) * 2
max_expected_pings = expected_pings
if both_mirrored:
max_expected_pings *= 2
# Generate and run the mirror test pings
ping_commands = []
for hosts in ping_pairs:
ping_commands.append(
lambda hosts=hosts: hosts[0].cmd(' '.join((self.FPINGS_ARGS_ONE, hosts[1].IP()))))
tcpdump_txt = self.tcpdump_helper(
mirror_host, tcpdump_filter, ping_commands, packets=(max_expected_pings + 1))
for hosts in ping_pairs:
self.assertTrue(re.search(
'%s > %s: ICMP echo request' % (hosts[0].IP(), hosts[1].IP()), tcpdump_txt),
msg=tcpdump_txt)
self.assertTrue(re.search(
'%s > %s: ICMP echo reply' % (hosts[1].IP(), hosts[0].IP()), tcpdump_txt),
msg=tcpdump_txt)
received_pings = self.match_tcpdump_rx_packets(tcpdump_txt)
self.assertGreaterEqual(received_pings, expected_pings)
self.assertLessEqual(received_pings, max_expected_pings)
def match_tcpdump_rx_packets(self, tcpdump_txt):
match_re = re.compile(r'.*(\d+) packets* captured.*')
match = match_re.match(tcpdump_txt)
self.assertTrue(match, msg=tcpdump_txt)
packets = int(match.group(1))
return packets
def tcpdump_rx_packets(self, tcpdump_txt, packets=0):
return self.match_tcpdump_rx_packets(tcpdump_txt) == packets
def verify_no_packets(self, tcpdump_txt):
self.assertTrue(self.tcpdump_rx_packets(tcpdump_txt, packets=0), msg=tcpdump_txt)
def verify_eapol_mirrored(self, first_host, second_host, mirror_host):
self.ping((first_host, second_host))
for host in (first_host, second_host):
self.require_host_learned(host)
self.retry_net_ping(hosts=(first_host, second_host))
mirror_mac = mirror_host.MAC()
tmp_eap_conf = os.path.join(self.tmpdir, 'eap.conf')
tcpdump_filter = (
'not ether src %s and ether proto 0x888e' % mirror_mac)
eap_conf_cmd = (
'echo "eapol_version=2\nap_scan=0\nnetwork={\n'
'key_mgmt=IEEE8021X\neap=MD5\nidentity=\\"login\\"\n'
'password=\\"password\\"\n}\n" > %s' % tmp_eap_conf)
wpa_supplicant_cmd = mininet_test_util.timeout_cmd(
'wpa_supplicant -c%s -Dwired -i%s -d' % (
tmp_eap_conf,
first_host.defaultIntf().name),
3)
tcpdump_txt = self.tcpdump_helper(
mirror_host, tcpdump_filter, [
partial(first_host.cmd, eap_conf_cmd),
partial(first_host.cmd, wpa_supplicant_cmd),
partial(first_host.cmd, wpa_supplicant_cmd),
partial(first_host.cmd, wpa_supplicant_cmd)],
timeout=20, packets=1)
self.assertTrue(
re.search('01:80:c2:00:00:03, ethertype EAPOL', tcpdump_txt),
msg=tcpdump_txt)
def bogus_mac_flooded_to_port1(self):
first_host, second_host, third_host = self.hosts_name_ordered()[0:3]
unicast_flood_filter = 'ether host %s' % self.BOGUS_MAC
static_bogus_arp = 'arp -s %s %s' % (first_host.IP(), self.BOGUS_MAC)
curl_first_host = 'curl -m 5 http://%s' % first_host.IP()
tcpdump_txt = self.tcpdump_helper(
first_host, unicast_flood_filter,
[lambda: second_host.cmd(static_bogus_arp),
lambda: second_host.cmd(curl_first_host),
lambda: self.ping(hosts=(second_host, third_host))])
return not self.tcpdump_rx_packets(tcpdump_txt, 0)
def ladvd_cmd(self, ladvd_args, repeats=1, timeout=3):
ladvd_mkdir = 'mkdir -p /var/run/ladvd'
ladvd_all_args = ['%s %s' % (
mininet_test_util.timeout_cmd(self.LADVD, timeout), ladvd_args)] * repeats
ladvd_cmd = ';'.join([ladvd_mkdir] + ladvd_all_args)
return ladvd_cmd
def ladvd_noisemaker(self, send_cmd, tcpdump_filter, hosts=None, timeout=3, repeats=3):
if hosts is None:
hosts = self.hosts_name_ordered()[:2]
first_host = hosts[0]
other_hosts = hosts[1:]
other_host_cmds = []
for other_host in other_hosts:
other_host_cmds.append(partial(other_host.cmd, self.ladvd_cmd(
send_cmd % other_host.defaultIntf(), repeats=3, timeout=timeout)))
tcpdump_txt = self.tcpdump_helper(
first_host, tcpdump_filter, other_host_cmds,
timeout=(timeout * repeats * len(hosts)), packets=1)
self.verify_no_packets(tcpdump_txt)
def verify_lldp_blocked(self, hosts=None, timeout=3):
self.ladvd_noisemaker(
'-L -o %s', 'ether proto 0x88cc',
hosts, timeout=timeout)
def verify_cdp_blocked(self, hosts=None, timeout=3):
self.ladvd_noisemaker(
'-C -o %s', 'ether dst host 01:00:0c:cc:cc:cc and ether[20:2]==0x2000',
hosts, timeout=timeout)
self.wait_nonzero_packet_count_flow(
{'dl_dst': '01:00:0c:cc:cc:cc'}, self._FLOOD_TABLE, actions=[], ofa_match=False)
def verify_faucet_reconf(self, timeout=20,
cold_start=True, change_expected=True,
hup=True, reconf_funcs=None, dpid=True):
"""HUP and verify the HUP was processed."""
if reconf_funcs is None:
reconf_funcs = []
if hup:
for controller in self.faucet_controllers:
reconf_funcs.append(partial(self.hup_controller, controller=controller.name))
var = 'faucet_config_reload_warm_total'
if cold_start:
var = 'faucet_config_reload_cold_total'
old_counts = []
start_configure_counts = []
for controller in self.faucet_controllers:
old_count = int(
self.scrape_prometheus_var(var, controller=controller.name, dpid=dpid, default=0))
old_counts.append(old_count)
start_configure_count = self.get_configure_count(controller=controller.name)
start_configure_counts.append(start_configure_count)
for reconf_func in reconf_funcs:
reconf_func()
for i, controller in enumerate(self.faucet_controllers):
cont_name = controller.name
start_configure_count = start_configure_counts[i]
for _ in range(timeout):
configure_count = self.get_configure_count(controller=cont_name)
if configure_count > start_configure_count:
break
time.sleep(1)
self.assertNotEqual(
start_configure_count, configure_count, 'FAUCET %s did not reconfigure' % cont_name)
if cold_start is not None:
old_count = old_counts[i]
if change_expected:
old_count = old_counts[i]
for _ in range(timeout):
new_count = int(
self.scrape_prometheus_var(var, controller=cont_name,
dpid=dpid, default=0))
if new_count > old_count:
break
time.sleep(1)
self.assertTrue(
new_count > old_count,
msg='FAUCET %s %s did not increment: %u' % (cont_name, var, new_count))
else:
new_count = int(
self.scrape_prometheus_var(var, controller=cont_name,
dpid=dpid, default=0))
self.assertEqual(
old_count, new_count,
msg='FAUCET %s %s incremented: %u' % (cont_name, var, new_count))
self.wait_for_prometheus_var('faucet_config_applied', 1,
controller=cont_name, dpid=None, timeout=30)
self.wait_dp_status(1, controller=cont_name)
def force_faucet_reload(self, new_config):
"""Force FAUCET to reload."""
with open(self.faucet_config_path, 'w') as config_file:
config_file.write(new_config)
self.verify_faucet_reconf(change_expected=False)
def get_host_port_stats(self, hosts_switch_ports):
port_stats = {}
for host, switch_port in hosts_switch_ports:
if host not in port_stats:
port_stats[host] = {}
port_stats[host].update(self.get_port_stats_from_dpid(
self.dpid, switch_port))
return port_stats
def wait_host_stats_updated(self, hosts_switch_ports, timeout, sync_counters_func=None):
first = self.get_host_port_stats(hosts_switch_ports)
for _ in range(timeout):
if sync_counters_func:
sync_counters_func()
if self.get_host_port_stats(hosts_switch_ports) != first:
return
time.sleep(1)
self.fail('port stats for %s never updated' % hosts_switch_ports)
def of_bytes_mbps(self, start_port_stats, end_port_stats, var, seconds):
return (end_port_stats[var] - start_port_stats[var]) * 8 / seconds / self.ONEMBPS
def verify_iperf_min(self, hosts_switch_ports, min_mbps, client_ip, server_ip,
seconds=5, prop=0.2, sync_counters_func=None):
"""Verify minimum performance and OF counters match iperf approximately."""
# Attempt loose counter sync before starting.
self.wait_host_stats_updated(
hosts_switch_ports, timeout=seconds * 2, sync_counters_func=sync_counters_func)
start_port_stats = self.get_host_port_stats(hosts_switch_ports)
hosts = [host for host, _ in hosts_switch_ports]
client_host, server_host = hosts
iperf_mbps = self.iperf(
client_host, client_ip, server_host, server_ip, seconds)
self.assertGreater(iperf_mbps, min_mbps)
# TODO: account for drops.
for _ in range(3):
end_port_stats = self.get_host_port_stats(hosts_switch_ports)
approx_match = True
for host in hosts:
of_rx_mbps = self.of_bytes_mbps(
start_port_stats[host], end_port_stats[host], 'rx_bytes', seconds)
of_tx_mbps = self.of_bytes_mbps(
start_port_stats[host], end_port_stats[host], 'tx_bytes', seconds)
output(of_rx_mbps, of_tx_mbps)
max_of_mbps = float(max(of_rx_mbps, of_tx_mbps))
iperf_to_max = 0
if max_of_mbps:
iperf_to_max = iperf_mbps / max_of_mbps
msg = 'iperf: %fmbps, of: %fmbps (%f)' % (
iperf_mbps, max_of_mbps, iperf_to_max)
error(msg)
if ((iperf_to_max < (1.0 - prop))
or (iperf_to_max > (1.0 + prop))):
approx_match = False
if approx_match:
return
time.sleep(1)
self.fail(msg=msg)
@staticmethod
def port_labels(port_no):
port_name = 'b%u' % port_no
return {'port': port_name, 'port_description': port_name}
def set_dpid_names(self, dpid_names):
self.dpid_names = copy.deepcopy(dpid_names)
def wait_port_status(self, dpid, port_no, status, expected_status, timeout=10):
for _ in range(timeout):
port_status = self.scrape_prometheus_var(
'port_status', self.port_labels(port_no), default=None, dpid=dpid)
if port_status is not None and port_status == expected_status:
return
self._portmod(dpid, port_no, status, ofp.OFPPC_PORT_DOWN)
time.sleep(1)
self.fail('dpid %x port %s status %s != expected %u' % (
dpid, port_no, port_status, expected_status))
def set_port_status(self, dpid, port_no, status, wait):
if dpid is None:
dpid = self.dpid
expected_status = 1
if status == ofp.OFPPC_PORT_DOWN:
expected_status = 0
self._portmod(dpid, port_no, status, ofp.OFPPC_PORT_DOWN)
if wait:
self.wait_port_status(int(dpid), port_no, status, expected_status)
def set_port_down(self, port_no, dpid=None, wait=True):
self.set_port_status(dpid, port_no, ofp.OFPPC_PORT_DOWN, wait)
def set_port_up(self, port_no, dpid=None, wait=True):
self.set_port_status(dpid, port_no, 0, wait)
def wait_dp_status(self, expected_status, controller=None, timeout=30):
if controller is None:
controller = self.faucet_controllers[0].name
return self.wait_for_prometheus_var(
'dp_status', expected_status, any_labels=True, controller=controller,
default=None, timeout=timeout)
def _get_tableid(self, name, retries, default):
return self.scrape_prometheus_var(
'faucet_config_table_names', {'table_name': name},
retries=retries, default=default)
def quiet_commands(self, host, commands):
for command in commands:
result = host.cmd(command)
self.assertEqual('', result, msg='%s: %s' % (command, result))
def _config_tableids(self):
# Wait for VLAN table to appear, rapidly scrape the rest.
self._VLAN_TABLE = self._get_tableid(
'vlan', 1, self._VLAN_TABLE)
self._COPRO_TABLE = self._get_tableid(
'vlan', 1, self._COPRO_TABLE)
self._PORT_ACL_TABLE = self._get_tableid(
'port_acl', 1, self._PORT_ACL_TABLE)
self._VLAN_ACL_TABLE = self._get_tableid(
'vlan_acl', 1, self._VLAN_ACL_TABLE)
self._ETH_SRC_TABLE = self._get_tableid(
'eth_src', 1, self._ETH_SRC_TABLE)
self._IPV4_FIB_TABLE = self._get_tableid(
'ipv4_fib', 1, self._IPV4_FIB_TABLE)
self._IPV6_FIB_TABLE = self._get_tableid(
'ipv6_fib', 1, self._IPV6_FIB_TABLE)
self._VIP_TABLE = self._get_tableid(
'vip', 1, self._VIP_TABLE)
self._ETH_DST_HAIRPIN_TABLE = self._get_tableid(
'eth_dst_hairpin', 1, self._ETH_DST_HAIRPIN_TABLE)
self._ETH_DST_TABLE = self._get_tableid(
'eth_dst', 1, self._ETH_DST_TABLE)
self._FLOOD_TABLE = self._get_tableid(
'flood', 1, self._FLOOD_TABLE)
def _dp_ports(self):
return list(sorted(self.port_map.values()))
def flap_port(self, port_no, flap_time=MIN_FLAP_TIME):
self.set_port_down(port_no)
time.sleep(flap_time)
self.set_port_up(port_no)
def flap_all_switch_ports(self, flap_time=MIN_FLAP_TIME):
"""Flap all ports on switch."""
for port_no in self._dp_ports():
self.flap_port(port_no, flap_time=flap_time)
@staticmethod
def get_mac_of_intf(intf, host=None):
"""Get MAC address of a port."""
address_file_name = '/sys/class/net/%s/address' % intf
if host is None:
with open(address_file_name) as address_file:
address = address_file.read()
else:
address = host.cmd('cat %s' % address_file_name)
return address.strip().lower()
def add_macvlan(self, host, macvlan_intf, ipa=None, ipm=24, mac=None, mode='vepa'):
if mac is None:
mac = ''
else:
mac = 'address %s' % mac
add_cmds = [
'ip link add %s link %s %s type macvlan mode %s' % (
macvlan_intf, host.defaultIntf(), mac, mode),
'ip link set dev %s up' % macvlan_intf]
if ipa:
add_cmds.append(
'ip address add %s/%s brd + dev %s' % (ipa, ipm, macvlan_intf))
self.quiet_commands(host, add_cmds)
def del_macvlan(self, host, macvlan_intf):
self.quiet_commands(host, [
host.cmd('ip link del link %s %s' % (
host.defaultIntf(), macvlan_intf))])
def add_host_ipv6_address(self, host, ip_v6, intf=None):
"""Add an IPv6 address to a Mininet host."""
if intf is None:
intf = host.intf()
self.quiet_commands(host, [
host.cmd('ip -6 addr add %s dev %s' % (ip_v6, intf))])
def add_host_route(self, host, ip_dst, ip_gw):
"""Add an IP route to a Mininet host."""
host.cmd('ip -%u route del %s' % (
ip_dst.version, ip_dst.network.with_prefixlen))
add_cmd = 'ip -%u route add %s via %s' % (
ip_dst.version, ip_dst.network.with_prefixlen, ip_gw)
self.quiet_commands(host, (add_cmd,))
def _ip_ping(self, host, dst, retries, timeout=500,
fping_bin='fping', intf=None, expected_result=True, count=1,
require_host_learned=require_host_learned):
"""Ping a destination from a host"""
if intf is None:
intf = host.defaultIntf()
good_ping = r'xmt/rcv/%%loss = %u/%u/0%%' % (count, count)
ping_cmd = '%s %s -c%u -I%s -t%u %s' % (
fping_bin, self.FPING_ARGS, count, intf, timeout, dst)
if require_host_learned:
self.require_host_learned(host)
pause = timeout / 1e3
for _ in range(retries):
ping_out = host.cmd(ping_cmd)
ping_result = bool(re.search(good_ping, ping_out))
if ping_result:
break
time.sleep(pause)
pause *= 2
self.assertEqual(ping_result, expected_result, msg='%s %s: %s' % (
ping_cmd, ping_result, ping_out))
def one_ipv4_ping(self, host, dst, retries=3, timeout=1000, intf=None,
require_host_learned=True, expected_result=True):
"""Ping an IPv4 destination from a host."""
return self._ip_ping(
host, dst, retries,
timeout=timeout, fping_bin='fping', intf=intf,
require_host_learned=require_host_learned,
expected_result=expected_result)
@staticmethod
def flush_arp_cache(host):
"""Flush the ARP cache for a host."""
host.cmd("ip -s neigh flush all")
def one_ipv4_controller_ping(self, host):
"""Ping the controller from a host with IPv4."""
self.flush_arp_cache(host)
self.one_ipv4_ping(host, self.FAUCET_VIPV4.ip)
self.verify_ipv4_host_learned_mac(
host, self.FAUCET_VIPV4.ip, self.FAUCET_MAC)
def one_ipv6_ping(self, host, dst, retries=5, timeout=1000, intf=None,
require_host_learned=True, expected_result=True):
"""Ping an IPv6 destination from a host."""
return self._ip_ping(
host, dst, retries,
timeout=timeout, fping_bin='fping6', intf=intf,
require_host_learned=require_host_learned,
expected_result=expected_result)
def one_ipv6_controller_ping(self, host):
"""Ping the controller from a host with IPv6."""
self.one_ipv6_ping(host, self.FAUCET_VIPV6.ip)
# TODO: VIP might not be in neighbor table if still tentative/ND used
# non VIP source address.
# Make test host source addresses consistent.
# self.verify_ipv6_host_learned_mac(
# host, self.FAUCET_VIPV6.ip, self.FAUCET_MAC)
def ping_all(self, timeout=3):
"""Provide reasonable timeout default to Mininet's pingAll()."""
return self.net.pingAll(timeout=timeout)
def ping(self, hosts, timeout=3):
"""Provide reasonable timeout default to Mininet's ping()."""
return self.net.ping(hosts, timeout=timeout)
def retry_net_ping(self, hosts=None, required_loss=0, retries=3, timeout=2):
loss = None
for _ in range(retries):
if hosts is None:
loss = self.ping_all(timeout=timeout)
else:
loss = self.net.ping(hosts, timeout=timeout)
if loss <= required_loss:
return
time.sleep(1)
self.fail('ping %f loss > required loss %f' % (loss, required_loss))
@staticmethod
def tcp_port_free(host, port, ipv=4):
listen_out = host.cmd(
mininet_test_util.tcp_listening_cmd(port, ipv))
if listen_out:
return listen_out
return None
def wait_for_tcp_free(self, host, port, timeout=10, ipv=4):
"""Wait for a host to start listening on a port."""
for _ in range(timeout):
listen_out = self.tcp_port_free(host, port, ipv)
if listen_out is None:
return
time.sleep(1)
self.fail('%s busy on port %u (%s)' % (host, port, listen_out))
def wait_for_tcp_listen(self, host, port, timeout=10, ipv=4):
"""Wait for a host to start listening on a port."""
for _ in range(timeout):
listen_out = self.tcp_port_free(host, port, ipv)
if listen_out is not None:
return
time.sleep(1)
self.fail('%s never listened on port %u' % (host, port))
def serve_str_on_tcp_port(self, host, port, serve_str='hello', timeout=20):
"""Serve str on a TCP port on a host."""
host.cmd(mininet_test_util.timeout_cmd(
'echo %s | nc -l %s %u &' % (serve_str, host.IP(), port), timeout))
self.wait_for_tcp_listen(host, port)
def wait_nonzero_packet_count_flow(self, match, table_id, timeout=15,
actions=None, dpid=None, ofa_match=True):
"""Wait for a flow to be present and have a non-zero packet_count."""
if dpid is None:
dpid = self.dpid
for _ in range(timeout):
flow = self.get_matching_flow_on_dpid(
dpid, match, table_id, timeout=1,
actions=actions, ofa_match=ofa_match)
if flow and flow['packet_count'] > 0:
return
time.sleep(1)
if flow:
self.fail('DPID %s flow %s matching %s table ID %s had zero packet count' %
(dpid, flow, match, table_id))
else:
self.fail('no flow matching %s table ID %s' % (match, table_id))
def verify_tp_dst_blocked(self, port, first_host, second_host, table_id=0, mask=None):
"""Verify that a TCP port on a host is blocked from another host."""
client_cmd = mininet_test_util.timeout_cmd('nc %s %u' % (second_host.IP(), port), 5)
self.serve_str_on_tcp_port(second_host, port)
self.quiet_commands(first_host, (client_cmd,))
if table_id is None:
return
match = {
'dl_type': IPV4_ETH, 'ip_proto': 6
}
match_port = int(port)
if mask is not None:
match_port = '/'.join((str(port), str(mask)))
match['tp_dst'] = match_port
self.wait_nonzero_packet_count_flow(match, table_id, ofa_match=False)
# cleanup listening nc (if any)
second_host.cmd(client_cmd)
def verify_tp_dst_notblocked(self, port, first_host, second_host, table_id=0):
"""Verify that a TCP port on a host is NOT blocked from another host."""
serve_str = ''.join(random.choice(string.ascii_letters) for i in range(8))
self.serve_str_on_tcp_port(second_host, port, serve_str=serve_str)
client_str = first_host.cmd('nc -w 10 %s %u' % (second_host.IP(), port)).strip()
self.assertEqual(serve_str, client_str)
if table_id is None:
return
self.wait_nonzero_packet_count_flow(
{'tp_dst': int(port), 'dl_type': IPV4_ETH, 'ip_proto': 6}, table_id)
def bcast_dst_blocked_helper(self, port, first_host, second_host, success_re, retries):
tcpdump_filter = 'udp and ether src %s and ether dst %s' % (
first_host.MAC(), "ff:ff:ff:ff:ff:ff")
target_addr = str(self.FAUCET_VIPV4.network.broadcast_address)
for _ in range(retries):
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
partial(first_host.cmd, (
'date | socat - udp-datagram:%s:%d,broadcast' % (
target_addr, port)))],
packets=1)
if re.search(success_re, tcpdump_txt):
return True
time.sleep(1)
return False
def verify_bcast_dst_blocked(self, port, first_host, second_host):
"""Verify that a UDP port on a host is blocked from broadcast."""
self.assertTrue(self.bcast_dst_blocked_helper(
port, first_host, second_host, r'0 packets received by filter', 1))
def verify_bcast_dst_notblocked(self, port, first_host, second_host):
"""Verify that a UDP port on a host is NOT blocked from broadcast."""
self.assertTrue(self.bcast_dst_blocked_helper(
port, first_host, second_host, r'1 packet received by filter', 3))
@staticmethod
def swap_host_macs(first_host, second_host):
"""Swap the MAC addresses of two Mininet hosts."""
first_host_mac = first_host.MAC()
second_host_mac = second_host.MAC()
first_host.setMAC(second_host_mac)
second_host.setMAC(first_host_mac)
def start_exabgp(self, exabgp_conf, timeout=30, log_prefix=''):
"""Start exabgp process on controller host."""
exabgp_conf_file_name = os.path.join(self.tmpdir, '%sexabgp.conf' % log_prefix)
exabgp_log = os.path.join(self.tmpdir, '%sexabgp.log' % log_prefix)
exabgp_out = os.path.join(self.tmpdir, '%sexabgp.out' % log_prefix)
exabgp_env = ' '.join((
'exabgp.daemon.user=root',
'exabgp.log.all=true',
'exabgp.log.level=DEBUG',
'exabgp.log.destination=%s' % exabgp_log,
))
bgp_port = self.config_ports['bgp_port']
exabgp_conf = exabgp_conf % {'bgp_port': bgp_port}
with open(exabgp_conf_file_name, 'w') as exabgp_conf_file:
exabgp_conf_file.write(exabgp_conf)
controller = self._get_controller()
# Ensure exabgp only attempts one connection.
exabgp_cmd = mininet_test_util.timeout_cmd(
'exabgp %s --once -d 2>&1 > %s &' % (
exabgp_conf_file_name, exabgp_out), 300)
exabgp_cli = 'env %s %s' % (exabgp_env, exabgp_cmd)
controller.cmd(exabgp_cli)
for _ in range(timeout):
if os.path.exists(exabgp_log):
break
time.sleep(1)
self.assertTrue(
os.path.exists(exabgp_log), msg='exabgp (%s) did not start' % exabgp_cli)
return (exabgp_log, exabgp_out)
def wait_bgp_up(self, neighbor, vlan, exabgp_log, exabgp_err):
"""Wait for BGP to come up."""
label_values = {
'neighbor': neighbor,
'vlan': vlan,
}
for _ in range(60):
uptime = self.scrape_prometheus_var(
'bgp_neighbor_uptime', label_values, default=0)
if uptime > 0:
return
time.sleep(1)
exabgp_log_content = []
for log_name in (exabgp_log, exabgp_err):
if os.path.exists(log_name):
with open(log_name) as log:
exabgp_log_content.append(log.read())
self.fail('exabgp did not peer with FAUCET: %s' % '\n'.join(exabgp_log_content))
@staticmethod
def matching_lines_from_file(exp, log_name):
exp_re = re.compile(exp)
with open(log_name) as log_file:
return [log_line for log_line in log_file if exp_re.match(log_line)]
return []
def wait_until_matching_lines_from_file(self, exp, log_name, timeout=30, count=1):
"""Require (count) matching lines to be present in file."""
assert timeout >= 1
lines = []
for _ in range(timeout):
if os.path.exists(log_name):
lines = self.matching_lines_from_file(exp, log_name)
if len(lines) >= count:
return lines
time.sleep(1)
self.fail('%s not found in %s (%d/%d)' % (exp, log_name, len(lines), count))
def wait_until_no_matching_lines_from_file(self, exp, log_name, timeout=30, count=1):
"""Require (count) matching lines to be non-existent in file."""
assert timeout >= 1
lines = []
for _ in range(timeout):
if os.path.exists(log_name):
lines = self.matching_lines_from_file(exp, log_name)
if len(lines) >= count:
return self.fail('%s found in %s (%d/%d)' % (exp, log_name, len(lines), count))
time.sleep(1)
return lines
def wait_until_matching_lines_from_faucet_log_files(self, exp, timeout=30, count=1):
"""Require (count) matching lines to be present in file"""
for controller_env in self.env.values():
if 'FAUCET_LOG' in controller_env:
log_name = controller_env['FAUCET_LOG']
self.wait_until_matching_lines_from_file(exp, log_name, timeout, count)
def wait_until_matching_lines_from_gauge_log_files(self, exp, timeout=30, count=1):
"""Require (count) matching lines to be present in file"""
for controller_env in self.env.values():
if 'GAUGE_LOG' in controller_env:
log_name = controller_env['GAUGE_LOG']
self.wait_until_matching_lines_from_file(exp, log_name, timeout, count)
def exabgp_updates(self, exabgp_log, timeout=60):
"""Verify that exabgp process has received BGP updates."""
controller = self._get_controller()
updates = []
# exabgp should have received our BGP updates
for _ in range(timeout):
updates = controller.cmd(
r'grep UPDATE %s |grep -Eo "\S+ next-hop \S+"' % exabgp_log)
if updates:
break
time.sleep(1)
self.assertTrue(updates, 'exabgp did not receive BGP updates')
return updates
def wait_exabgp_sent_updates(self, exabgp_log_name):
"""Verify that exabgp process has sent BGP updates."""
self.wait_until_matching_lines_from_file(
r'.+>> [1-9]+[0-9]* UPDATE.+', exabgp_log_name, timeout=60)
def start_wpasupplicant(self, host, wpasupplicant_conf, timeout=10, log_prefix='',
wpa_ctrl_socket_path=''):
"""Start wpasupplicant process on Mininet host."""
wpasupplicant_conf_file_name = os.path.join(
self.tmpdir, '%swpasupplicant.conf' % log_prefix)
wpasupplicant_log = os.path.join(
self.tmpdir, '%swpasupplicant.log' % log_prefix)
with open(wpasupplicant_conf_file_name, 'w') as wpasupplicant_conf_file:
wpasupplicant_conf_file.write(wpasupplicant_conf)
wpa_ctrl_socket = ''
if wpa_ctrl_socket_path:
wpa_ctrl_socket = '-C %s' % wpa_ctrl_socket_path
wpasupplicant_cmd = mininet_test_util.timeout_cmd(
'wpa_supplicant -dd -t -c %s -i %s -D wired -f %s %s &' % (
wpasupplicant_conf_file_name, host.defaultIntf(), wpasupplicant_log,
wpa_ctrl_socket), 300)
host.cmd(wpasupplicant_cmd)
for _ in range(timeout):
if os.path.exists(wpasupplicant_log):
break
time.sleep(1)
self.assertTrue(
os.path.exists(wpasupplicant_log),
msg='wpasupplicant (%s) did not start' % wpasupplicant_cmd)
return wpasupplicant_log
def ping_all_when_learned(self, retries=3, hard_timeout=1):
"""Verify all hosts can ping each other once FAUCET has learned them all."""
# Cause hosts to send traffic that FAUCET can use to learn them.
for _ in range(retries):
loss = self.ping_all()
# we should have learned all hosts now, so should have no loss.
for host in self.hosts_name_ordered():
self.require_host_learned(host, hard_timeout=hard_timeout)
if loss == 0:
return
self.assertEqual(0, loss)
def match_table(self, prefix):
exp_prefix = '%s/%s' % (
prefix.network_address, prefix.netmask)
if prefix.version == 6:
nw_dst_match = {'ipv6_dst': exp_prefix, 'dl_type': IPV6_ETH}
table_id = self._IPV6_FIB_TABLE
else:
nw_dst_match = {'nw_dst': exp_prefix, 'dl_type': IPV4_ETH}
table_id = self._IPV4_FIB_TABLE
return (nw_dst_match, table_id)
def wait_for_route_as_flow(self, nexthop, prefix,
vlan_vid=None, timeout=30,
nonzero_packets=False):
"""Verify a route has been added as a flow."""
nw_dst_match, table_id = self.match_table(prefix)
nexthop_action = 'SET_FIELD: {eth_dst:%s}' % nexthop
if vlan_vid is not None:
nw_dst_match['dl_vlan'] = str(vlan_vid)
if nonzero_packets:
self.wait_nonzero_packet_count_flow(
nw_dst_match, table_id, timeout=timeout,
actions=[nexthop_action], ofa_match=False)
else:
self.wait_until_matching_flow(
nw_dst_match, table_id, timeout=timeout,
actions=[nexthop_action], ofa_match=False)
def host_ipv4_alias(self, host, alias_ip, intf=None):
"""Add an IPv4 alias address to a host."""
if intf is None:
intf = host.intf()
del_cmd = 'ip addr del %s dev %s' % (
alias_ip.with_prefixlen, intf)
add_cmd = 'ip addr add %s dev %s label %s:1' % (
alias_ip.with_prefixlen, intf, intf)
host.cmd(del_cmd)
self.quiet_commands(host, (add_cmd,))
@staticmethod
def _ip_neigh(host, ipa, ip_ver):
neighbors = host.cmd('ip -%u neighbor show %s' % (ip_ver, ipa))
neighbors_fields = neighbors.split()
if len(neighbors_fields) >= 5:
return neighbors.split()[4]
return None
def _verify_host_learned_mac(self, host, ipa, ip_ver, mac, retries):
for _ in range(retries):
if self._ip_neigh(host, ipa, ip_ver) == mac:
return
time.sleep(1)
self.fail(
'could not verify %s resolved to %s' % (ipa, mac))
def verify_ipv4_host_learned_mac(self, host, ipa, mac, retries=3):
self._verify_host_learned_mac(host, ipa, 4, mac, retries)
def verify_ipv4_host_learned_host(self, host, learned_host):
learned_ip = ipaddress.ip_interface(self.host_ipv4(learned_host))
self.verify_ipv4_host_learned_mac(host, learned_ip.ip, learned_host.MAC())
def verify_ipv6_host_learned_mac(self, host, ip6, mac, retries=3):
self._verify_host_learned_mac(host, ip6, 6, mac, retries)
def verify_ipv6_host_learned_host(self, host, learned_host):
learned_ip6 = ipaddress.ip_interface(self.host_ipv6(learned_host))
self.verify_ipv6_host_learned_mac(host, learned_ip6.ip, learned_host.MAC())
def iperf_client(self, client_host, iperf_client_cmd):
iperf_results = client_host.cmd(iperf_client_cmd)
iperf_csv = iperf_results.strip().split(',')
if len(iperf_csv) == 9:
return int(iperf_csv[-1]) / self.ONEMBPS
return -1
def iperf(self, client_host, client_ip, server_host, server_ip, seconds):
def run_iperf(iperf_server_cmd, server_host, server_start_exp, port):
server_out = server_host.popen(
iperf_server_cmd,
stdin=mininet_test_util.DEVNULL,
stderr=subprocess.STDOUT,
close_fds=True)
popens = {server_host: server_out}
for host, line in pmonitor(popens):
if host != server_host:
continue
if not re.search(server_start_exp, line):
continue
self.wait_for_tcp_listen(
server_host, port, ipv=server_ip.version)
iperf_mbps = self.iperf_client(
client_host, iperf_client_cmd)
self._signal_proc_on_port(server_host, port, 9)
return iperf_mbps
return None
timeout = (seconds * 3) + 5
for _ in range(3):
port = mininet_test_util.find_free_port(
self.ports_sock, self._test_name())
iperf_base_cmd = 'iperf -f M -p %u' % port
if server_ip.version == 6:
iperf_base_cmd += ' -V'
iperf_server_cmd = '%s -s -B %s' % (iperf_base_cmd, server_ip)
iperf_server_cmd = mininet_test_util.timeout_cmd(
iperf_server_cmd, timeout)
server_start_exp = r'Server listening on TCP port %u' % port
iperf_client_cmd = mininet_test_util.timeout_cmd(
'%s -y c -c %s -B %s -t %u' % (iperf_base_cmd, server_ip, client_ip, seconds),
timeout)
iperf_mbps = run_iperf(iperf_server_cmd, server_host, server_start_exp, port)
if iperf_mbps is not None and iperf_mbps > 0:
return iperf_mbps
time.sleep(1)
if iperf_mbps == -1:
self.fail('iperf client %s did not connect to server %s' % (
iperf_client_cmd, iperf_server_cmd))
self.fail('iperf server %s never started' % iperf_server_cmd)
def verify_ipv4_routing(self, first_host, first_host_routed_ip,
second_host, second_host_routed_ip):
"""Verify one host can IPV4 route to another via FAUCET."""
self.host_ipv4_alias(first_host, first_host_routed_ip)
self.host_ipv4_alias(second_host, second_host_routed_ip)
self.add_host_route(
first_host, second_host_routed_ip, self.FAUCET_VIPV4.ip)
self.add_host_route(
second_host, first_host_routed_ip, self.FAUCET_VIPV4.ip)
self.net.ping(hosts=(first_host, second_host))
self.wait_for_route_as_flow(
first_host.MAC(), first_host_routed_ip.network)
self.wait_for_route_as_flow(
second_host.MAC(), second_host_routed_ip.network)
self.one_ipv4_ping(first_host, second_host_routed_ip.ip)
self.one_ipv4_ping(second_host, first_host_routed_ip.ip)
self.verify_ipv4_host_learned_host(first_host, second_host)
self.verify_ipv4_host_learned_host(second_host, first_host)
# verify at least 1M iperf
for client_host, client_ip, server_host, server_ip in (
(first_host, first_host_routed_ip.ip,
second_host, second_host_routed_ip.ip),
(second_host, second_host_routed_ip.ip,
first_host, first_host_routed_ip.ip)):
iperf_mbps = self.iperf(
client_host, client_ip, server_host, server_ip, 5)
error('%s: %u mbps to %s\n' % (self._test_name(), iperf_mbps, server_ip))
self.assertGreater(iperf_mbps, 1)
# verify packets matched routing flows
self.wait_for_route_as_flow(
first_host.MAC(), first_host_routed_ip.network,
nonzero_packets=True)
self.wait_for_route_as_flow(
second_host.MAC(), second_host_routed_ip.network,
nonzero_packets=True)
def verify_ipv4_routing_mesh(self):
"""Verify hosts can route to each other via FAUCET."""
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_routed_ip = ipaddress.ip_interface('10.0.1.1/24')
second_host_routed_ip = ipaddress.ip_interface('10.0.2.1/24')
second_host_routed_ip2 = ipaddress.ip_interface('10.0.3.1/24')
self.verify_ipv4_routing(
first_host, first_host_routed_ip,
second_host, second_host_routed_ip)
self.verify_ipv4_routing(
first_host, first_host_routed_ip,
second_host, second_host_routed_ip2)
self.swap_host_macs(first_host, second_host)
self.verify_ipv4_routing(
first_host, first_host_routed_ip,
second_host, second_host_routed_ip)
self.verify_ipv4_routing(
first_host, first_host_routed_ip,
second_host, second_host_routed_ip2)
@staticmethod
def host_drop_all_ips(host):
for ipv in (4, 6):
host.cmd('ip -%u addr flush dev %s' % (ipv, host.defaultIntf()))
def setup_ipv6_hosts_addresses(self, first_host, first_host_ip,
first_host_routed_ip, second_host,
second_host_ip, second_host_routed_ip):
"""Configure host IPv6 addresses for testing."""
for host in first_host, second_host:
for intf in ('lo', host.intf()):
host.cmd('ip -6 addr flush dev %s' % intf)
self.add_host_ipv6_address(first_host, first_host_ip)
self.add_host_ipv6_address(second_host, second_host_ip)
self.add_host_ipv6_address(first_host, first_host_routed_ip, intf='lo')
self.add_host_ipv6_address(second_host, second_host_routed_ip, intf='lo')
for host in first_host, second_host:
self.require_host_learned(host)
def verify_ipv6_routing(self, first_host, first_host_ip,
first_host_routed_ip, second_host,
second_host_ip, second_host_routed_ip):
"""Verify one host can IPV6 route to another via FAUCET."""
self.one_ipv6_ping(first_host, second_host_ip.ip)
self.one_ipv6_ping(second_host, first_host_ip.ip)
self.add_host_route(
first_host, second_host_routed_ip, self.FAUCET_VIPV6.ip)
self.add_host_route(
second_host, first_host_routed_ip, self.FAUCET_VIPV6.ip)
self.wait_for_route_as_flow(
first_host.MAC(), first_host_routed_ip.network)
self.wait_for_route_as_flow(
second_host.MAC(), second_host_routed_ip.network)
self.one_ipv6_controller_ping(first_host)
self.one_ipv6_controller_ping(second_host)
self.one_ipv6_ping(first_host, second_host_routed_ip.ip)
# verify at least 1M iperf
for client_host, client_ip, server_host, server_ip in (
(first_host, first_host_routed_ip.ip,
second_host, second_host_routed_ip.ip),
(second_host, second_host_routed_ip.ip,
first_host, first_host_routed_ip.ip)):
iperf_mbps = self.iperf(
client_host, client_ip, server_host, server_ip, 5)
error('%s: %u mbps to %s\n' % (self._test_name(), iperf_mbps, server_ip))
self.assertGreater(iperf_mbps, 1)
self.one_ipv6_ping(first_host, second_host_ip.ip)
self.verify_ipv6_host_learned_mac(
first_host, second_host_ip.ip, second_host.MAC())
self.one_ipv6_ping(second_host, first_host_ip.ip)
self.verify_ipv6_host_learned_mac(
second_host, first_host_ip.ip, first_host.MAC())
def verify_ipv6_routing_pair(self, first_host, first_host_ip,
first_host_routed_ip, second_host,
second_host_ip, second_host_routed_ip):
"""Verify hosts can route IPv6 to each other via FAUCET."""
self.setup_ipv6_hosts_addresses(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip)
self.verify_ipv6_routing(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip)
def verify_ipv6_routing_mesh(self):
"""Verify IPv6 routing between hosts and multiple subnets."""
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_ip = ipaddress.ip_interface('fc00::1:1/112')
second_host_ip = ipaddress.ip_interface('fc00::1:2/112')
first_host_routed_ip = ipaddress.ip_interface('fc00::10:1/112')
second_host_routed_ip = ipaddress.ip_interface('fc00::20:1/112')
second_host_routed_ip2 = ipaddress.ip_interface('fc00::30:1/112')
self.verify_ipv6_routing_pair(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip)
self.verify_ipv6_routing_pair(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip2)
self.swap_host_macs(first_host, second_host)
self.verify_ipv6_routing_pair(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip)
self.verify_ipv6_routing_pair(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip2)
def verify_invalid_bgp_route(self, pattern):
"""Check if we see the pattern in Faucet's log."""
for cont_env in self.env.values():
if 'FAUCET_LOG' in cont_env:
lines = self.matching_lines_from_file(
pattern, cont_env['FAUCET_LOG'])
self.assertGreater(len(lines), 0, msg='%s not found in %s' %
(pattern, cont_env['FAUCET_LOG']))
|
[] |
[] |
[
"OVS_LOGDIR"
] |
[]
|
["OVS_LOGDIR"]
|
python
| 1 | 0 | |
contrib/terraform/terraform.py
|
#!/usr/bin/env python3
#
# Copyright 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# original: https://github.com/CiscoCloud/terraform.py
"""\
Dynamic inventory for Terraform - finds all `.tfstate` files below the working
directory and generates an inventory based on them.
"""
import argparse
from collections import defaultdict
import random
from functools import wraps
import json
import os
import re
VERSION = '0.4.0pre'
def tfstates(root=None):
root = root or os.getcwd()
for dirpath, _, filenames in os.walk(root):
for name in filenames:
if os.path.splitext(name)[-1] == '.tfstate':
yield os.path.join(dirpath, name)
def convert_to_v3_structure(attributes, prefix=''):
""" Convert the attributes from v4 to v3
Receives a dict and return a dictionary """
result = {}
if isinstance(attributes, str):
# In the case when we receive a string (e.g. values for security_groups)
return {'{}{}'.format(prefix, random.randint(1,10**10)): attributes}
for key, value in attributes.items():
if isinstance(value, list):
if len(value):
result['{}{}.#'.format(prefix, key, hash)] = len(value)
for i, v in enumerate(value):
result.update(convert_to_v3_structure(v, '{}{}.{}.'.format(prefix, key, i)))
elif isinstance(value, dict):
result['{}{}.%'.format(prefix, key)] = len(value)
for k, v in value.items():
result['{}{}.{}'.format(prefix, key, k)] = v
else:
result['{}{}'.format(prefix, key)] = value
return result
def iterresources(filenames):
for filename in filenames:
with open(filename, 'r') as json_file:
state = json.load(json_file)
tf_version = state['version']
if tf_version == 3:
for module in state['modules']:
name = module['path'][-1]
for key, resource in module['resources'].items():
yield name, key, resource
elif tf_version == 4:
# In version 4 the structure changes so we need to iterate
# each instance inside the resource branch.
for resource in state['resources']:
name = resource['provider'].split('.')[-1]
for instance in resource['instances']:
key = "{}.{}".format(resource['type'], resource['name'])
if 'index_key' in instance:
key = "{}.{}".format(key, instance['index_key'])
data = {}
data['type'] = resource['type']
data['provider'] = resource['provider']
data['depends_on'] = instance.get('depends_on', [])
data['primary'] = {'attributes': convert_to_v3_structure(instance['attributes'])}
if 'id' in instance['attributes']:
data['primary']['id'] = instance['attributes']['id']
data['primary']['meta'] = instance['attributes'].get('meta',{})
yield name, key, data
else:
raise KeyError('tfstate version %d not supported' % tf_version)
## READ RESOURCES
PARSERS = {}
def _clean_dc(dcname):
# Consul DCs are strictly alphanumeric with underscores and hyphens -
# ensure that the consul_dc attribute meets these requirements.
return re.sub('[^\w_\-]', '-', dcname)
def iterhosts(resources):
'''yield host tuples of (name, attributes, groups)'''
for module_name, key, resource in resources:
resource_type, name = key.split('.', 1)
try:
parser = PARSERS[resource_type]
except KeyError:
continue
yield parser(resource, module_name)
def iterips(resources):
'''yield ip tuples of (instance_id, ip)'''
for module_name, key, resource in resources:
resource_type, name = key.split('.', 1)
if resource_type == 'openstack_compute_floatingip_associate_v2':
yield openstack_floating_ips(resource)
def parses(prefix):
def inner(func):
PARSERS[prefix] = func
return func
return inner
def calculate_mantl_vars(func):
"""calculate Mantl vars"""
@wraps(func)
def inner(*args, **kwargs):
name, attrs, groups = func(*args, **kwargs)
# attrs
if attrs.get('role', '') == 'control':
attrs['consul_is_server'] = True
else:
attrs['consul_is_server'] = False
# groups
if attrs.get('publicly_routable', False):
groups.append('publicly_routable')
return name, attrs, groups
return inner
def _parse_prefix(source, prefix, sep='.'):
for compkey, value in list(source.items()):
try:
curprefix, rest = compkey.split(sep, 1)
except ValueError:
continue
if curprefix != prefix or rest == '#':
continue
yield rest, value
def parse_attr_list(source, prefix, sep='.'):
attrs = defaultdict(dict)
for compkey, value in _parse_prefix(source, prefix, sep):
idx, key = compkey.split(sep, 1)
attrs[idx][key] = value
return list(attrs.values())
def parse_dict(source, prefix, sep='.'):
return dict(_parse_prefix(source, prefix, sep))
def parse_list(source, prefix, sep='.'):
return [value for _, value in _parse_prefix(source, prefix, sep)]
def parse_bool(string_form):
if type(string_form) is bool:
return string_form
token = string_form.lower()[0]
if token == 't':
return True
elif token == 'f':
return False
else:
raise ValueError('could not convert %r to a bool' % string_form)
@parses('packet_device')
def packet_device(resource, tfvars=None):
raw_attrs = resource['primary']['attributes']
name = raw_attrs['hostname']
groups = []
attrs = {
'id': raw_attrs['id'],
'facilities': parse_list(raw_attrs, 'facilities'),
'hostname': raw_attrs['hostname'],
'operating_system': raw_attrs['operating_system'],
'locked': parse_bool(raw_attrs['locked']),
'tags': parse_list(raw_attrs, 'tags'),
'plan': raw_attrs['plan'],
'project_id': raw_attrs['project_id'],
'state': raw_attrs['state'],
# ansible
'ansible_ssh_host': raw_attrs['network.0.address'],
'ansible_ssh_user': 'root', # Use root by default in packet
# generic
'ipv4_address': raw_attrs['network.0.address'],
'public_ipv4': raw_attrs['network.0.address'],
'ipv6_address': raw_attrs['network.1.address'],
'public_ipv6': raw_attrs['network.1.address'],
'private_ipv4': raw_attrs['network.2.address'],
'provider': 'packet',
}
if raw_attrs['operating_system'] == 'coreos_stable':
# For CoreOS set the ssh_user to core
attrs.update({'ansible_ssh_user': 'core'})
# add groups based on attrs
groups.append('packet_operating_system=' + attrs['operating_system'])
groups.append('packet_locked=%s' % attrs['locked'])
groups.append('packet_state=' + attrs['state'])
groups.append('packet_plan=' + attrs['plan'])
# groups specific to kubespray
groups = groups + attrs['tags']
return name, attrs, groups
def openstack_floating_ips(resource):
raw_attrs = resource['primary']['attributes']
attrs = {
'ip': raw_attrs['floating_ip'],
'instance_id': raw_attrs['instance_id'],
}
return attrs
def openstack_floating_ips(resource):
raw_attrs = resource['primary']['attributes']
return raw_attrs['instance_id'], raw_attrs['floating_ip']
@parses('openstack_compute_instance_v2')
@calculate_mantl_vars
def openstack_host(resource, module_name):
raw_attrs = resource['primary']['attributes']
name = raw_attrs['name']
groups = []
attrs = {
'access_ip_v4': raw_attrs['access_ip_v4'],
'access_ip_v6': raw_attrs['access_ip_v6'],
'access_ip': raw_attrs['access_ip_v4'],
'ip': raw_attrs['network.0.fixed_ip_v6'],
#'ip_log': raw_attrs['network.1.fixed_ip_v6'],
#'ip_sriov1': raw_attrs['network.2.fixed_ip_v6'],
#'ip_sriov2': raw_attrs['network.3.fixed_ip_v6'],
'flavor': parse_dict(raw_attrs, 'flavor',
sep='_'),
'id': raw_attrs['id'],
'image': parse_dict(raw_attrs, 'image',
sep='_'),
'key_pair': raw_attrs['key_pair'],
'metadata': parse_dict(raw_attrs, 'metadata'),
'network': parse_attr_list(raw_attrs, 'network'),
'region': raw_attrs.get('region', ''),
'security_groups': parse_list(raw_attrs, 'security_groups'),
# ansible
'ansible_ssh_port': 22,
# workaround for an OpenStack bug where hosts have a different domain
# after they're restarted
'host_domain': 'novalocal',
'use_host_domain': True,
# generic
'public_ipv4': raw_attrs['access_ip_v4'],
'private_ipv4': raw_attrs['access_ip_v4'],
'provider': 'openstack',
}
if 'floating_ip' in raw_attrs:
attrs['private_ipv4'] = raw_attrs['network.0.fixed_ip_v4']
try:
if 'metadata.prefer_ipv6' in raw_attrs and raw_attrs['metadata.prefer_ipv6'] == "1":
attrs.update({
'ansible_ssh_host': re.sub("[\[\]]", "", raw_attrs['access_ip_v6']),
'publicly_routable': True,
})
else:
attrs.update({
'ansible_ssh_host': raw_attrs['access_ip_v4'],
'publicly_routable': True,
})
except (KeyError, ValueError):
attrs.update({'ansible_ssh_host': '', 'publicly_routable': False})
# Handling of floating IPs has changed: https://github.com/terraform-providers/terraform-provider-openstack/blob/master/CHANGELOG.md#010-june-21-2017
# attrs specific to Ansible
if 'metadata.ssh_user' in raw_attrs:
attrs['ansible_ssh_user'] = raw_attrs['metadata.ssh_user']
if 'volume.#' in list(raw_attrs.keys()) and int(raw_attrs['volume.#']) > 0:
device_index = 1
for key, value in list(raw_attrs.items()):
match = re.search("^volume.*.device$", key)
if match:
attrs['disk_volume_device_'+str(device_index)] = value
device_index += 1
# attrs specific to Mantl
attrs.update({
'consul_dc': _clean_dc(attrs['metadata'].get('dc', module_name)),
'role': attrs['metadata'].get('role', 'none'),
'ansible_python_interpreter': attrs['metadata'].get('python_bin','python')
})
# add groups based on attrs
groups.append('os_image=' + attrs['image']['name'])
groups.append('os_flavor=' + attrs['flavor']['name'])
groups.extend('os_metadata_%s=%s' % item
for item in list(attrs['metadata'].items()))
groups.append('os_region=' + attrs['region'])
# groups specific to Mantl
groups.append('role=' + attrs['metadata'].get('role', 'none'))
groups.append('dc=' + attrs['consul_dc'])
# groups specific to kubespray
for group in attrs['metadata'].get('kubespray_groups', "").split(","):
groups.append(group)
return name, attrs, groups
def iter_host_ips(hosts, ips):
'''Update hosts that have an entry in the floating IP list'''
for host in hosts:
host_id = host[1]['id']
if host_id in ips:
ip = ips[host_id]
host[1].update({
'access_ip_v4': ip,
'access_ip': ip,
'public_ipv4': ip,
'ansible_ssh_host': ip,
})
if 'use_access_ip' in host[1]['metadata'] and host[1]['metadata']['use_access_ip'] == "0":
host[1].pop('access_ip')
yield host
## QUERY TYPES
def query_host(hosts, target):
for name, attrs, _ in hosts:
if name == target:
return attrs
return {}
def query_list(hosts):
groups = defaultdict(dict)
meta = {}
for name, attrs, hostgroups in hosts:
for group in set(hostgroups):
# Ansible 2.6.2 stopped supporting empty group names: https://github.com/ansible/ansible/pull/42584/commits/d4cd474b42ed23d8f8aabb2a7f84699673852eaf
# Empty group name defaults to "all" in Ansible < 2.6.2 so we alter empty group names to "all"
if not group: group = "all"
groups[group].setdefault('hosts', [])
groups[group]['hosts'].append(name)
meta[name] = attrs
groups['_meta'] = {'hostvars': meta}
return groups
def query_hostfile(hosts):
out = ['## begin hosts generated by terraform.py ##']
out.extend(
'{}\t{}'.format(attrs['ansible_ssh_host'].ljust(16), name)
for name, attrs, _ in hosts
)
out.append('## end hosts generated by terraform.py ##')
return '\n'.join(out)
def main():
parser = argparse.ArgumentParser(
__file__, __doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter, )
modes = parser.add_mutually_exclusive_group(required=True)
modes.add_argument('--list',
action='store_true',
help='list all variables')
modes.add_argument('--host', help='list variables for a single host')
modes.add_argument('--version',
action='store_true',
help='print version and exit')
modes.add_argument('--hostfile',
action='store_true',
help='print hosts as a /etc/hosts snippet')
parser.add_argument('--pretty',
action='store_true',
help='pretty-print output JSON')
parser.add_argument('--nometa',
action='store_true',
help='with --list, exclude hostvars')
default_root = os.environ.get('TERRAFORM_STATE_ROOT',
os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..', )))
parser.add_argument('--root',
default=default_root,
help='custom root to search for `.tfstate`s in')
args = parser.parse_args()
if args.version:
print('%s %s' % (__file__, VERSION))
parser.exit()
hosts = iterhosts(iterresources(tfstates(args.root)))
# Perform a second pass on the file to pick up floating_ip entries to update the ip address of referenced hosts
ips = dict(iterips(iterresources(tfstates(args.root))))
if ips:
hosts = iter_host_ips(hosts, ips)
if args.list:
output = query_list(hosts)
if args.nometa:
del output['_meta']
print(json.dumps(output, indent=4 if args.pretty else None))
elif args.host:
output = query_host(hosts, args.host)
print(json.dumps(output, indent=4 if args.pretty else None))
elif args.hostfile:
output = query_hostfile(hosts)
print(output)
parser.exit()
if __name__ == '__main__':
main()
|
[] |
[] |
[
"TERRAFORM_STATE_ROOT"
] |
[]
|
["TERRAFORM_STATE_ROOT"]
|
python
| 1 | 0 | |
Navigable/setup.py
|
# copy all file from the folder to ~/nable-bin | C:\nable-bin\ folder
# set the folder to path
import sys
import os
from os import system
import shutil
_arguments = "$2 $3 $4 $5 $6 $7 $9"
_fname = "t-nable"
_commandpath = ""
_ptfrm = "w"
def _fetch_essential():
global _arguments, _cp, _com, _commandpath, _clear, _ptfrm
if sys.platform.startswith("win"):
_arguments = _arguments.replace("$", "%")
_commandpath = f"C:\\nable-bin\\"
_fname = "t-nable.bat"
print("Preparing for windows")
print(f"Environment to path;{_commandpath[1:]}")
system(f'setx /m path "%PATH%;{_commandpath[1:]}"')
else:
_ptfrm = "l"
_arguments += " ${10} ${11} ${12} ${13} ${14} ${15} ${16}"
_commandpath = f"{os.environ['HOME']}/nable-bin/"
__hasnbs(_commandpath)
with open(f"{_commandpath}aliases", "w+") as a:
a.writelines(
[
"alias uprc='source ~/.bashrc'",
]
)
print("Preparing for linux")
system(f"echo PATH=$PATH:{_commandpath}>>~/.bashrc")
system(f"echo source {_commandpath}aliases>>~/.bashrc")
def __hasnbs(nbloc):
if not os.path.exists(nbloc):
os.makedirs(nbloc)
def _fetch_file_to_dir():
_fetch_essential()
global _arguments, _cp, _com, _commandpath, _clear, _ptfrm
fcon = os.listdir(".") # folder
# print(fcon)
# print(_commandpath)
__hasnbs(_commandpath)
for f in fcon:
if os.path.isfile(f):
shutil.copy(f, f"{_commandpath}{f}")
if _ptfrm == "l":
system(f"chmod +x {_commandpath}t-nable")
print(f"Installation Complete!\nDataDir: {_commandpath}")
if __name__ == "__main__":
_fetch_file_to_dir()
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
controller/rscfuzzer/fuzzer.py
|
import logging
import sys
import os
import signal
import shlex
import subprocess
import rscfuzzer.const as const
import time
import stat
import hashlib
import shutil
import json
import pickle
import psutil
import random
import copy
import resource
import mmh3
import magic
import re
import string
from enum import Enum
from rscfuzzer.target import targets
log = logging.getLogger(__name__)
hash_file_v = "hash_v.txt"
hash_file_f = "hash_f.txt"
coverage_file = "coverage.txt"
ld_cmd = "LD_LIBRARY_PATH=/home/gavin/libunwind/build/usr/local/lib"
invalid_list = ['MIN', 'MAX', 12580511, 'RANDOM', 'RANDOM', 'RANDOM']
class ValueMethod(Enum):
VALUE_ALL = 0
VALUE_RANDOM = 1
VALUE_INVALID = 2
class FieldMethod(Enum):
FIELD_ITER = 0
FIELD_RANDOM = 1
FIELD_ALL = 2
class OrderMethod(Enum):
ORDER_RECUR = 0
ORDER_SEP = 1
ORDER_ALL = 2
ORDER_ONE = 3
class SkipMethod(Enum):
SKIP_FAIL = 0
SKIP_ONE = 1
class FuzzResult(Enum):
FUZZ_SUCCESS = 0
FUZZ_SIGTIMEOUT = 1
FUZZ_ERROR = 2
FUZZ_EXECTIMEOUT = 3
FUZZ_RETNOTMATCH = 4
FUZZ_EXITB4POLL = 5
FUZZ_COREDUMP = 6
FUZZ_EXITB4CLIENT = 7
FUZZ_CLIENTFAIL = 8
class Fuzzer:
def __init__(self, config, target_name, start_skip=0):
self.config = config
# check if it is a valid target
if target_name not in targets:
sys.exit(f"{target_name} is not a valid target, "
f"you could add the target into target.py")
self.target_name = target_name
self.target = targets[target_name]
# check if strace dir is set
if "strace_dir" not in config:
sys.exit(f"strace_dir is not set in config")
self.strace_dir = config["strace_dir"]
self.server = self.target.get("server", False)
# get the polling syscall for server
if self.server:
log.info(f"target {target_name} is a server, looking for polling syscall...")
self.poll = self.target.get("poll")
if self.poll is None:
sys.exit("no polling syscall set for the server target")
log.info(f"polling syscall: {self.poll}")
# check if target need sudo
self.sudo = self.target.get("sudo", False)
# check if target have normal return code
self.retcode = self.target.get("retcode", None)
# initialize server process
self.srv_p = None
# setup environment variable
self.target_env = os.environ.copy()
self.setup_env_var()
self.cache_unwind = True
# check strace log file, target config has high priority
self.strace_log = self.target.get("strace_log", None)
if self.strace_log is None:
self.strace_log = self.config.get("strace_log", None)
if self.strace_log is None:
sys.exit("no strace log path set")
try:
self.strace_log_fd = open(self.strace_log, "w+")
except IOError as e:
sys.exit(f"unable to open strace_log file: {self.strace_log}: {e}")
else:
log.error(f"strace log will saved to {self.strace_log}")
try:
os.chmod(self.strace_log, stat.S_IWOTH | stat.S_IROTH)
except IOError as e:
log.info(f"Unable to change permission of strace log file: {self.strace_log}: {e}")
# check if target need to be run in specific directory
self.target_cwd = self.target.get("cwd", None)
# check if non-server application need input TODO: need to handle case where server need stdin
self.input = self.target.get("input", None)
# get process timeout, for non-server application
self.timeout = self.target.get("timeout", 3)
# get target command
self.command = self.target.get("command", None)
if self.command is None:
sys.exit(f"command not set for target: {self.target_name}")
self.syscall_config = self.config.get("syscall_config", None)
if self.syscall_config is None:
sys.exit("syscall config file not provided")
self.record_file = self.config.get("record_file", None)
self.count_file = self.config.get("count_file", "count.txt")
self.iteration = self.config.get("num_iteration", 9)
target_iteration = self.target.get("num_iteration", 9)
if target_iteration is not None:
self.iteration = target_iteration
log.warning(f'number of iteration for each skip count set to {self.iteration}')
self.setup_func = self.target.get("setup_func", None)
self.core_dir = self.config.get("core_dir", "cores")
self.store_core_dir = self.config.get("store_core_dir", "stored_cores")
self.binary = self.command.split(' ')[0].split('/')[-1]
self.executable = os.path.abspath(self.command.split(' ')[0])
print(f"executable abs path is {self.executable}")
self.core_dir = '/shared/cores/'
signal.signal(const.ACCEPT_SIG, signal.SIG_IGN)
# mkdir if necessary
if not os.path.exists(self.core_dir):
os.makedirs(self.core_dir, mode=0o777)
os.chmod(self.core_dir, mode=0o777)
# modify core_dump file
core_command = f"sudo sysctl -w kernel.core_pattern={os.path.abspath(self.core_dir)}/core.%p"
args = shlex.split(core_command)
p = subprocess.Popen(args)
p.wait()
log.warn(f"core pattern command to {core_command}")
self.store_core_dir = os.path.join(self.store_core_dir, self.target_name)
log.info(f"core dump will be stored in {self.core_dir}, and moved to {self.store_core_dir}")
# mkdir if necessary
if not os.path.exists(self.store_core_dir):
os.makedirs(self.store_core_dir, mode=0o777)
os.chmod(self.store_core_dir, mode=0o777)
self.poll_time = self.target.get("poll_time", 3)
self.gdb_p = None
self.stack_set = set()
self.start_skip = start_skip
self.cov = self.target.get("cov", False)
self.cov_cwd = self.target.get("cov_cwd", None)
if self.cov and self.cov_cwd is None:
sys.exit(f"cov_cwd not set for cov target: {self.target_name}")
self.sc_cov = self.target.get("sc_cov", False)
self.hash_file = self.target.get("hash_file", None)
if self.hash_file is not None and self.target_cwd is not None:
self.hash_file = os.path.join(self.target_cwd, self.hash_file)
print(f'hash_file = {self.hash_file}')
self.a_cov = self.target.get('a_cov', False)
self.sysjson = self.target.get('syscall_json', None)
if self.a_cov and (self.hash_file is None or self.sysjson is None):
sys.exit(f"both sysjson and hash_file need to be set for r_cov")
if self.a_cov:
self.syscall_config = self.sysjson
# always use auto generated syscall_json file if provided
if self.sysjson is not None:
self.syscall_config = self.sysjson
if self.sc_cov and self.hash_file is None:
sys.exit(f"hash_file not set for sc_cov target: {self.target_name}")
self.fuzz_valid = self.target.get("fuzz_valid", False)
self.vanila_cov = {}
self.fuzz_cov = {}
self.start_time = time.time()
if os.path.isfile(hash_file_v):
try:
file = open(hash_file_v, 'rb')
self.vanila_cov = pickle.load(file)
file.close()
except:
pass
if os.path.isfile(hash_file_f):
try:
file = open(hash_file_f, 'rb')
self.fuzz_cov = pickle.load(file)
file.close()
except:
pass
json_dict = {}
with open(self.syscall_config) as f:
json_dict = json.load(f)
mismatch_syscalls = []
syscall_list = json_dict["syscalls"]
self.supported = []
for item in syscall_list:
if item["name"] in const.syscall_field_index.keys():
self.supported.append(item["name"])
else:
mismatch_syscalls.append(item["name"])
if len(mismatch_syscalls) > 0:
print("there are some syscalls in value dict but not in index dict!")
print(mismatch_syscalls)
print("supported syscalls: ")
print(self.supported)
self.value_dict = {}
# rearrange the json dict in a friendly way
for item in syscall_list:
append_dict = {}
syscall_name = item["name"]
for key,value in item.items():
if key != "name":
append_dict[key] = value
# add append_dict to value_dict
self.value_dict[syscall_name] = append_dict
print(self.value_dict)
# measurement option:
self.measurement = False
self.not_write = False
self.print_trace = False
self.accept_time = 0
self.client_time = 0
self.after_time = 0
self.accept_hash = self.target.get("accept_hash", -1)
# list that contains vanilla syscall list, only contain supported
self.vanilla_syscall_dict = {}
self.overall_set = set()
# coverage contain all the invocations's hash key = syscall+hash
self.coverage_dict = {}
# load coverage file if exist
if os.path.isfile(coverage_file):
try:
file = open(coverage_file, 'rb')
self.coverage_dict = pickle.load(file)
file.close()
except:
pass
# load overallset
for key in self.coverage_dict.keys():
split_list = key.split('@')
syscall = split_list[0]
if syscall in self.value_dict.keys():
self.overall_set.add(key)
print(f"size of loaded coverage is {len(self.coverage_dict)}, size of loaded overalset is {len(self.overall_set)}")
# syscall_set contain all syscalls in the application
self.unsupported_syscalls = set()
self.vanilla_list = None
# print unsupported syscalls
self.get_unsupported_syscalls()
self.max_depth = self.config.get('max_depth', 50)
self.reference_file = os.path.abspath(self.config.get('reference_file', "reference.txt"))
proc = psutil.Process()
log.warning(f'opened files: {proc.open_files()}')
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
log.warning(f'soft = {soft}, hard = {hard}')
resource.setrlimit(resource.RLIMIT_NOFILE, (1048576, 1048576))
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
log.warning(f'after set: soft = {soft}, hard = {hard}')
# get method to update value index
value_method_name = self.target.get("value_method", "VALUE_ALL")
if value_method_name not in ValueMethod.__members__:
log.error(f'{value_method_name} is not a valid value method')
self.value_method = ValueMethod.VALUE_ALL
else:
self.value_method = ValueMethod[value_method_name]
log.warning(f'value method set to {self.value_method}')
# get method to update field index
field_method_name = self.target.get("field_method", "FIELD_ITER")
if field_method_name not in FieldMethod.__members__:
log.error(f'{field_method_name} is not a valid field method')
self.field_method = FieldMethod.FIELD_ITER
else:
self.field_method = FieldMethod[field_method_name]
if self.field_method != FieldMethod.FIELD_ITER:
self.value_method = ValueMethod.VALUE_RANDOM
log.warning(f'value method reset to {self.value_method} because of field method')
log.warning(f'field method set to {self.field_method}')
order_method_name = self.target.get("order_method", "ORDER_RECUR")
if order_method_name not in OrderMethod.__members__:
log.error(f'{order_method_name} is not a valid order method')
self.order_method = OrderMethod.ORDER_RECUR
else:
self.order_method = OrderMethod[order_method_name]
log.warning(f'order method set to {self.order_method}')
skip_method_name = self.target.get("skip_method", "SKIP_FAIL")
if skip_method_name not in SkipMethod.__members__:
log.error(f'{skip_method_name} is not a valid skip method')
self.skip_method = SkipMethod.SKIP_FAIL
else:
self.skip_method = SkipMethod[skip_method_name]
log.warning(f'skip method set to {self.skip_method}')
# make a dir for storing random files
self.random_dir = self.config.get("random_dir", "/random")
# mkdir random dir necessary
if not os.path.exists(self.random_dir):
os.makedirs(self.random_dir, mode=0o666)
os.chmod(self.random_dir, mode=0o666)
self.clear_random_files()
# make a dir for storing error strace files
self.errorlog_dir = self.config.get("error_dir", "/errorlog")
# mkdir random dir necessary
if not os.path.exists(self.errorlog_dir):
os.makedirs(self.errorlog_dir, mode=0o666)
os.chmod(self.errorlog_dir, mode=0o666)
self.errorcount = 0
# a set to record fuzzed syscalls
self.fuzzed_set = set()
# -1 stand for repeat time = number of field
# other wise use 3 instead
self.all_field_repeat = self.target.get('field_repeat', 3)
log.warning(f"field_repeat time set to {self.all_field_repeat}")
self.benchmark_cmd = None
self.overhead_test= False
self.iteration_count = 0
self.run_fuzz_function_time = 0
self.run_parse_function_time = 0
self.no_signal = False
def clear_time_measurement(self):
self.accept_time = 0
self.client_time = 0
self.after_time = 0
def setup_env_var(self):
# add libunwind library to LD_LIBRARY_PATH
self.target_env['LD_LIBRARY_PATH'] = '/home/gavin/libunwind/build/usr/local/lib'
env_dict = self.target.get("env")
if env_dict is not None:
for key, value in env_dict.items():
self.target_env[key] = value
log.info(f"env var: {key} -> {value}")
def clear_cores(self):
if not os.path.exists(self.core_dir):
os.makedirs(self.core_dir, mode=0o777)
os.chmod(self.core_dir, mode=0o777)
for f in os.listdir(self.core_dir):
try:
os.remove(os.path.join(self.core_dir, f))
except:
pass
def clear_random_files(self):
for f in os.listdir(self.random_dir):
try:
os.remove(os.path.join(self.random_dir, f))
except:
pass
def clear_hash(self):
if self.sc_cov:
try:
os.remove(self.hash_file)
except:
pass
def run_hundred_measurement(self, before_poll, client, indicator):
self.clear_time_measurement()
start = time.time()
for i in range(100):
self.run_interceptor_vanilla(before_poll, client)
print(self.retcode, end='', flush=True)
end = time.time()
print(f'\nrun time of vanilla {indicator}: {end - start}, after time: {self.after_time}, '
f'acccept_time: {self.accept_time}')
def run_measurement(self):
# run the vanilla version first
self.sc_cov = False
if not self.server:
start = time.time()
for i in range(100):
self.run_interceptor_vanilla(True, None, True)
print(self.retcode, end='', flush=True)
end = time.time()
print(f'run time of origin: {end - start} ')
self.run_hundred_measurement(True, None, "no client strace")
self.sc_cov = True
self.not_write = True
self.cache_unwind = False
self.run_hundred_measurement(True, None, "no client trace stack (ori_unwind)")
self.sc_cov = True
self.not_write = True
self.cache_unwind = True
self.run_hundred_measurement(True, None, "no client record stack (cache unwind)")
self.print_trace = False
if self.server:
if self.accept_hash == -1:
# test with client
self.sc_cov = False
self.run_hundred_measurement(False, self.target.get("clients")[0], "vanilla client")
self.sc_cov = True
self.not_write = True
self.cache_unwind = False
self.run_hundred_measurement(False, self.target.get("clients")[0], "client trace stack(ori_unwind)")
self.sc_cov = True
self.not_write = True
self.print_trace = False
self.cache_unwind = True
self.run_hundred_measurement(False, self.target.get("clients")[0], "client record stack(cache_unwind)")
def parse_syscall_order(self, before=True):
syscall_order = []
poll_found = False
# find continous poll syscall
poll_start = False
poll_count = 0
with open(self.hash_file) as fp:
lines = fp.readlines()
for line in lines:
syscall, hash, stack = self.parse_syscall_stack(line)
if before:
syscall_order.append((syscall, hash, stack))
else:
# only add syscall after poll found
if not poll_found:
if syscall == self.poll:
poll_found = True
poll_start = True
poll_count = 1
syscall_order.append((syscall, hash, stack))
else:
if poll_start:
# neglect multiple poll syscall
if syscall != self.poll:
poll_count += 1
else:
poll_start = False
syscall_order.append((syscall, hash, stack))
else:
syscall_order.append((syscall, hash, stack))
return syscall_order
def parse_syscall_stack_string_hash(self, line):
temp = line.split(': ')
syscall = temp[0]
hash_str = temp[1]
stack = temp[2].replace('%', '\n')
return syscall, hash_str, stack
def parse_syscall_stack(self, line):
temp = line.split(': ')
syscall = temp[0]
hash_val = int(temp[1])
stack = temp[2].replace('%', '\n')
return syscall, hash_val, stack
def run_magic_test(self):
self.clear_hash()
# run the vanilla version first before poll
ret = self.run_interceptor_vanilla(True, None)
self.parse_supported_hash("before.txt")
if ret == 0:
log.info(f"vanilla cov run success, before_poll = true")
if self.server:
if "clients" not in self.target:
log.error(f"No client defiend for target {self.target_name}")
return
# test the part after polling separately for each client
for client in self.target.get("clients"):
ret = self.run_interceptor_vanilla(False, client)
self.parse_supported_hash("after.txt")
if ret == 0:
log.info(f"vanilla cov run success, before_poll = false")
def store_syscall_coverage(self):
file = open(coverage_file, 'wb+')
pickle.dump(self.coverage_dict, file)
file.close()
def parse_supported_hash(self, target_syscall=None, target_hash=None, vanilla=False):
# return supported newly found syscall invocation, use dictionary to preserve order
support_new_syscall_dict = {}
unsupported_dict = {}
has_target = target_syscall is not None and target_hash is not None
# if not has target, always update overall set, for vanilla set
target_found = not has_target
if not os.path.isfile(self.hash_file):
log.error(f"{self.hash_file} does not exist after server")
return None
with open(self.hash_file) as fp:
lines = fp.readlines()
for line in lines:
syscall, hash_str, stack = self.parse_syscall_stack_string_hash(line)
# check if syscall already encountered in overallstack
str_key = f'{syscall}@{hash_str}'
if syscall in self.supported:
# check if syscall match target
if has_target and syscall == target_syscall and hash_str == target_hash:
target_found = True
# if not, add to new stack
if not str_key in self.overall_set or vanilla:
support_new_syscall_dict[str_key] = stack
elif str_key not in self.coverage_dict.keys():
unsupported_dict[str_key] = stack
# always record coverage
self.coverage_dict[str_key] = stack
# if target syscall found otherwise return null
if target_found:
log.debug(f'{len(unsupported_dict)} new unsupported invokation found')
return support_new_syscall_dict
else:
return None
def parse_hash(self, vanilla=True):
# hardcode filename
with open(self.hash_file) as fp:
lines = fp.readlines()
dict = self.vanila_cov
if not vanilla:
dict = self.fuzz_cov
for line in lines:
syscall, hash, stack = self.parse_syscall_stack(line)
pair = dict.get(hash)
if pair is None:
# if not vanilla:
# log.info(f'new syscall found: ({hash}, {syscall}): \n {stack}')
# print(f'new syscall found: ({hash}, {syscall}): \n {stack}')
# log.info(f'new count: {len(self.fuzz_cov) - len(self.vanila_cov)}/{len(self.vanila_cov)}')
# print(f'new count: {len(self.fuzz_cov) - len(self.vanila_cov)}/{len(self.vanila_cov)}')
dict[hash] = (syscall, 1, stack)
else:
dict[hash] = (syscall, pair[1]+1, stack)
if vanilla:
file = open(hash_file_v, 'wb+')
pickle.dump(dict, file)
file.close()
else:
file = open(hash_file_f, 'wb+')
pickle.dump(dict, file)
file.close()
def clear_cov(self):
if self.cov:
clear_cmd = "find . -name '*.gcda' -type f -delete"
args = shlex.split(clear_cmd)
p = subprocess.Popen(args, cwd=self.cov_cwd)
p.wait()
def store_cov_info(self, name):
if self.cov:
store_cmd = f"lcov -c --directory=./ -o {name}.info"
args = shlex.split(store_cmd)
p = subprocess.Popen(args, cwd=self.cov_cwd)
p.wait()
log.info(f"cov info stored to {name}.info")
def merge_cov_info(self, name1, name2, output):
if self.cov:
store_cmd = f"lcov -a {name1}.info -a {name2}.info -o {output}.info"
args = shlex.split(store_cmd)
p = subprocess.Popen(args, cwd=self.cov_cwd)
p.wait()
def clear_record(self):
if self.record_file is None:
return
try:
os.remove(self.record_file)
except FileNotFoundError:
pass
def clear_strace_log(self):
self.strace_log_fd.truncate(0)
self.strace_log_fd.seek(0, 0)
def kill_servers(self):
""" kill all running server to avoid port unavaliable """
if self.srv_p:
try:
os.killpg(os.getpgid(self.srv_p.pid), signal.SIGTERM)
except ProcessLookupError:
self.srv_p.kill()
# wait for fewer seconds to terminate
try:
ret = self.srv_p.wait(5) # wait until cov properly save the output
log.info(f"server terminate after sigint with return code {ret}")
except:
log.error("server terminate time out, force kill")
try:
os.killpg(os.getpgid(self.srv_p.pid), signal.SIGKILL)
except ProcessLookupError:
self.srv_p.kill()
self.srv_p = None
for proc in psutil.process_iter():
# check whether the process name matches
try:
if self.executable in proc.exe():
log.debug(f"found not killed process, kill it {self.executable}")
proc.kill()
except:
continue
if self.srv_p is not None:
self.srv_p.kill()
def kill_gdb(self):
if self.gdb_p:
self.gdb_p.kill()
def handle_core_dump_script(self, retcode=None, targets=None, skip_count=-1):
core_list = []
# get all core dumps in the core_dir
for f in os.listdir(self.core_dir):
if 'core.' in f:
core_list.append(os.path.join(self.core_dir, f))
for file in core_list:
# get the type of core dump and
try:
type_info = magic.from_file(file)
except Exception as e:
log.error(f'error read file type: {e}')
continue
m = re.search(r"execfn: \'(\S+)\'", type_info)
if m is None:
# not a recognized core dump file
log.error(f'core file not recoginized: {file}, type info: {type_info}, target = {targets}')
dst = os.path.join(self.store_core_dir, f"core.empty")
shutil.copy(file, dst)
dst = os.path.join(self.store_core_dir, f"strace.empty.txt")
shutil.copy(self.strace_log, dst)
core_exec = self.executable
else:
# check if exec matches
core_exec = m.group(1)
if core_exec != self.executable:
log.error(f'core file does not belong to target binary: {core_exec}, target = {targets}')
# run gdb to get stack string using python script
# make sure previous gdb session is killed
# remove temp file if exist
if os.path.exists(const.gdb_temp_file):
os.remove(const.gdb_temp_file)
core_binary = core_exec.split('/')[-1]
self.kill_gdb()
args_gdb = shlex.split(f'gdb {core_exec} {file} -q -x {const.gdb_script}')
try:
subprocess.run(args_gdb, timeout=5, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
except subprocess.TimeoutExpired as TE:
log.error(f'gdb subprocess timeout {TE}')
continue
except Exception as e:
log.error(f'gdb subprocess error: {e}')
continue
# parsing the output:
data = ''
if not os.path.isfile(const.gdb_temp_file):
log.error('gdb temp file read failed')
continue
with open(const.gdb_temp_file, 'r') as output_f:
data = output_f.read()
if len(data) <= 0:
log.error('gdb temp file read failed')
continue
# data should not contain error message:
if "gdb script error:" in data:
log.error(data)
continue
hash = mmh3.hash64(data, signed=False)[0]
if '__GI_abort' in data:
continue
if hash not in self.stack_set:
self.stack_set.add(hash)
log.info(f"new stack found:\n{data}")
log.debug(f'original core file = {file}')
hash_str = f'{hash}.{retcode}.{core_binary}'
# store the core with records
dst = os.path.join(self.store_core_dir, f"core.{hash_str}")
shutil.copy(file, dst)
log.info(f"core file stored to {dst}")
log.error(f"New Core Found: stored to {dst}, retcode=[{retcode}], targets=[{targets}], skip_count=[{skip_count}]")
# copy the record file as well
dst = os.path.join(self.store_core_dir, f"record.{hash_str}.txt")
shutil.copy(self.record_file, dst)
log.info(f"record file stored to {dst}")
# copy strace log as well
dst = os.path.join(self.store_core_dir, f"strace.{hash_str}.txt")
shutil.copy(self.strace_log, dst)
log.info(f"strace file stored to {dst}")
log.info("finish handle core dump")
return len(core_list)
def handle_core_dump(self, retcode=None, targets=None, skip_count=-1):
core_list = []
# log.info("handle core dump")
for f in os.listdir(self.core_dir):
if 'core.' in f:
core_list.append(os.path.join(self.core_dir, f))
for file in core_list:
self.kill_gdb()
self.gdb_p = subprocess.Popen(
["gdb", "-q", self.command.split(' ')[0]],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
close_fds=True,
env=self.target_env,
)
self.gdb_p.stdin.write(("core-file " + file).encode("utf-8") + b"\n")
self.gdb_p.stdin.flush()
pattern_not_found = False
data = ''
while True:
data = self.gdb_p.stdout.readline().decode("utf-8")
if const.top_stack_pattern.match(data):
break
if const.not_found_pattern.match(data) or const.gdb_not_found.match(data):
self.kill_gdb()
pattern_not_found = True
break
if pattern_not_found:
log.error(f"core anlyze failed: {data}, parh: {file}")
continue
self.gdb_p.stdin.write("bt".encode("utf-8") + b"\n")
self.gdb_p.stdin.flush()
self.gdb_p.stdin.write("p".encode("utf-8") + b"\n")
self.gdb_p.stdin.flush()
stack_string = ''
while True:
data = self.gdb_p.stdout.readline().decode("utf-8")
if " at " in data:
stack_string += data.split(' at ')[-1]
if const.empty_history_pattern.match(data):
break
self.kill_gdb()
# hash the string use sha1 to save memory
hash_object = hashlib.sha1(stack_string.encode())
hash_str = hash_object.hexdigest()
if hash_str not in self.stack_set:
self.stack_set.add(hash_str)
log.info(f"new stack found: {stack_string}")
hash_str = f'{hash_str}.{retcode}'
# store the core with records
dst = os.path.join(self.store_core_dir, f"core.{hash_str}")
shutil.copy(file, dst)
log.info(f"core file stored to {dst}")
log.error(f"New Core Found: stored to {dst}, retcode=[{retcode}], "
f"targets=[{targets}], skip_count=[{skip_count}]")
# copy the record file as well
dst = os.path.join(self.store_core_dir, f"record.{hash_str}.txt")
shutil.copy(self.record_file, dst)
log.info(f"record file stored to {dst}")
# copy strace log as well
dst = os.path.join(self.store_core_dir, f"strace.{hash_str}.txt")
shutil.copy(self.strace_log, dst)
log.info(f"strace file stored to {dst}")
# log.info("finish handle core dump")
return len(core_list)
def run_cov(self):
log.info(f"running cov test")
self.clear_cov()
# run the vanilla version first before poll
ret = self.run_interceptor_vanilla(True, None)
ret = self.run_interceptor_vanilla(True, None)
ret = self.run_interceptor_vanilla(True, None)
if ret == 0:
log.info(f"vanilla cov run success, before_poll = true")
# run the vanilla version with clients if possible
if self.server:
if "clients" not in self.target:
log.error(f"No client defiend for target {self.target_name}")
return
# test the part after polling separately for each client
for client in self.target.get("clients"):
ret = self.run_interceptor_vanilla(False, client)
ret = self.run_interceptor_vanilla(False, client)
ret = self.run_interceptor_vanilla(False, client)
if ret == 0:
log.info(f"vanilla cov run success, before_poll = false")
# store the cov file for vanilla runs
self.store_cov_info("vanilla")
self.clear_cov()
# run the test
self.run_interceptor_fuzz(True, None)
for client in self.target.get("clients"):
self.run_interceptor_fuzz(False, client)
# store the cov file for fuzz
self.store_cov_info("fuzz")
self.clear_cov()
def compare_syscall(self, one_syscall):
# compare with the first run value
standard = one_syscall[0]
for i in range(1, len(one_syscall)):
if one_syscall[i][0] != standard[0]:
return False
if one_syscall[i][1] != standard[1]:
return False
if one_syscall[i][2] != standard[2]:
return False
return True
def print_differ(self, order, differ, file_name):
with open(file_name, 'w+') as f:
f.write(f"syscall order after {differ}:\n")
for i in range(differ, len(order)):
f.write(f"num: {i}, syscall: {order[i][0]}, hash: {order[i][1]}\n")
f.write(f"{order[i][2]}")
def print_diff(self, diff_set, total_dict):
print('printing diffset: ')
for hash in diff_set:
print(total_dict[hash])
def check_new_syscall(self, orders):
total_dict = {}
sets = [set(), set(), set()]
for i in range(len(orders)):
for item in orders[i]:
sets[i].add(item[1])
total_dict[item[1]] = (item[0], item[2])
differ_01 = sets[0] - sets[1]
differ_10 = sets[1] - sets[0]
differ_02 = sets[0] - sets[2]
differ_20 = sets[2] - sets[1]
self.print_diff(differ_01, total_dict)
self.print_diff(differ_10, total_dict)
self.print_diff(differ_02, total_dict)
self.print_diff(differ_20, total_dict)
def compare_syscall_orders(self, orders, tag):
num_order = len(orders)
min_len = len(orders[0])
len_differ = False
for i in range(0, num_order):
length = len(orders[i])
log.info(f"iteration {i}: {length} syscalls")
if length != min_len:
len_differ = True
min_len = min(min_len, length)
differ = -1
for i in range(0, min_len):
one_syscall = []
for j in range(0, num_order):
one_syscall.append(orders[j][i])
equal = self.compare_syscall(one_syscall)
if not equal:
log.info(f'order differ from {i}th syscall')
differ = i
for j in range(0, num_order):
print(f'order {j}')
print(orders[j][i])
break
# print following syscalls, also print if length not match
if differ < 0 and len_differ:
differ = min_len
print(f'no order different before minlen, but length differnt, min_len = {min_len}')
if differ > 0:
for i in range(num_order):
log.info(f'differ syscalls in iteration {i}')
self.print_differ(orders[i], differ, f'differ/{self.target_name}_{tag}_{i}')
def print_measuerment(self):
end_time = time.time()
total_time = end_time - self.start_time
if self.iteration_count != 0:
log.warning(f"total runtime is {total_time},\n "
f"totalnumber of it = {self.iteration_count}, \n"
f"average time is {total_time / self.iteration_count}, \n"
f"fuzzer functiontime = {self.run_fuzz_function_time}\n"
f"average fuzzer function time = {self.run_fuzz_function_time / self.iteration_count}")
else:
log.error("no iteration")
self.start_time = time.time()
self.run_fuzz_function_time = 0
self.iteration_count = 0
def clear_exit(self):
self.kill_servers()
self.kill_gdb()
self.store_syscall_coverage()
self.print_measuerment()
sys.exit(0)
def check_syscall_order(self):
log.info(f"check syscall order, run the vanila version three times")
syscall_orders = []
for i in range(0, 3):
log.info(f"syscall order before client")
self.clear_hash()
self.run_interceptor_vanilla(True, None)
syscall_orders.append(self.parse_syscall_order())
self.compare_syscall_orders(syscall_orders, 'v')
self.check_new_syscall(syscall_orders)
self.print_differ(syscall_orders[0], 0, f'differ/{self.target_name}_allv_0')
self.print_differ(syscall_orders[1], 0, f'differ/{self.target_name}_allv_1')
self.print_differ(syscall_orders[1], 0, f'differ/{self.target_name}_allv_2')
syscall_orders = []
for client in self.target.get("clients"):
log.info(f"syscall order after client")
for i in range(0, 3):
log.info(f"syscall order after client")
self.clear_hash()
self.run_interceptor_vanilla(False, client)
syscall_orders.append(self.parse_syscall_order(False))
self.compare_syscall_orders(syscall_orders, 'c')
self.check_new_syscall(syscall_orders)
self.print_differ(syscall_orders[0], 0, f'differ/{self.target_name}_allc_0')
self.print_differ(syscall_orders[1], 0, f'differ/{self.target_name}_allc_1')
self.print_differ(syscall_orders[1], 0, f'differ/{self.target_name}_allc_2')
'''extend value list with invalid values, min, max and rand'''
def extend_value_list(self, value_list):
# deep copy
new_value_list = copy.deepcopy(value_list)
new_value_list.extend(invalid_list)
return new_value_list
def get_random_int(self):
random_list = [random.randint(const.INT_MIN, const.INT_MAX),
random.randint(const.SHRT_MIN, const.SHRT_MAX),
random.randint(-sys.maxsize/2, sys.maxsize/2)]
return random.choice(random_list)
def remove_file(self, file):
try:
os.remove(file)
except Exception as e:
log.error(f"remove file failed {e}")
else:
log.debug(f"file removed: {file}")
def get_value_list(self, field_key, syscall_dict):
value_list = None
if self.value_method == ValueMethod.VALUE_ALL:
if field_key is None or syscall_dict is None:
log.error(f'field_key and sycall_dict should not be none for value_all method')
self.clear_exit()
value_list = syscall_dict.get(field_key)
if value_list is None:
# dose not have value in valid set, create an empty one
value_list = []
value_list = self.extend_value_list(value_list)
elif self.value_method == ValueMethod.VALUE_RANDOM:
value_list = ['RANDOM'] * const.RANDOM_REPEAT
elif self.value_method == ValueMethod.VALUE_INVALID:
value_list = invalid_list
return value_list
def generate_random_file_name(self):
rand_str = ''.join(random.choices(string.ascii_letters + string.digits, k=10))
file_name = os.path.join(self.random_dir, f'RAND_{rand_str}')
return file_name
def extract_value_from_index(self, index_target):
syscall_name = index_target[0]
field_index = index_target[2]
value_index = index_target[3]
syscall_field_list = const.syscall_field_index[syscall_name]
if self.field_method == FieldMethod.FIELD_ALL:
output_value_list = []
for i in range(0, len(syscall_field_list)):
output_value_list.append(self.generate_random_file_name())
return output_value_list
if field_index >= len(syscall_field_list):
log.error(f'field_index out of bound: {field_index}/{len(syscall_field_list)}')
self.clear_exit()
# append _v to the field name
field_key = f'{syscall_field_list[field_index]}_v'
# check if value index out of bound
syscall_dict = self.value_dict.get(syscall_name)
if syscall_dict is None:
log.error(f'syscall {syscall_name} not found in value dict')
self.clear_exit()
value_list = self.get_value_list(field_key, syscall_dict)
if value_index >= len(value_list):
log.error(f'value_index out of bound: {value_index}/{len(value_list)}')
self.clear_exit()
# check if value is RANDOM
if value_list[value_index] == "RANDOM":
return self.generate_random_file_name()
return value_list[value_index]
def remove_random_file(self, value):
# only one field, one value
if self.field_method == FieldMethod.FIELD_ITER:
if not isinstance(value, str) or "RAND" not in value:
return
self.remove_file(value)
# all field, multiple file
elif self.field_method == FieldMethod.FIELD_ALL:
for item in value:
if not isinstance(item, str) or "RAND" not in item:
continue
self.remove_file(item)
'''try next value/field, return 0 if success, -1 if no more value/field to explore'''
def update_target(self, index_target, value_target):
syscall_name = index_target[0]
field_index = index_target[2]
value_index = index_target[3]
syscall_field_list = const.syscall_field_index[syscall_name]
max_field_index = len(syscall_field_list)
field_key = None
syscall_dict = None
if self.field_method == FieldMethod.FIELD_ITER:
max_field_index = len(syscall_field_list)
if field_index >= max_field_index:
log.error(f'field_index out of bound: {field_index}/{max_field_index}')
self.clear_exit()
# append _v to the field name
field_key = f'{syscall_field_list[field_index]}_v'
# check if value index out of bound
syscall_dict = self.value_dict.get(syscall_name)
if syscall_dict is None:
log.error(f'syscall {syscall_name} not found in value dict')
self.clear_exit()
elif self.field_method == FieldMethod.FIELD_RANDOM:
max_field_index = const.RANDOM_REPEAT
elif self.field_method == FieldMethod.FIELD_ALL:
max_field_index = self.all_field_repeat
if max_field_index == -1:
max_field_index = len(syscall_field_list)
value_list = self.get_value_list(field_key, syscall_dict)
# before increment, we need to remove the unused random file
self.remove_random_file(value_target[3])
# update value_index if possible
if value_index + 1 < len(value_list):
value_index += 1
# value index cannot increase further, increment field index
elif field_index + 1 < max_field_index:
field_index += 1
value_index = 0
else:
# both cannot increase further return -1
return -1
# update index target and value target
index_target[2] = field_index
index_target[3] = value_index
# update value target
if self.field_method == FieldMethod.FIELD_ITER:
value_target[2] = field_index
value_target[3] = self.extract_value_from_index(index_target)
elif self.field_method == FieldMethod.FIELD_RANDOM:
value_target[2] = random.randrange(max_field_index)
value_target[3] = value_list[value_index]
elif self.field_method == FieldMethod.FIELD_ALL:
value_target[2] = -1
value_target[3] = self.extract_value_from_index(index_target)
return 0
'''an recursive function'''
def fuzz_with_targets(self, index_targets, value_targets, depth, before_poll=True, client=None):
if depth >= self.max_depth:
log.info('depth reach maximum')
return
log.info(f'current depth = {depth}')
current_index_target = index_targets[depth]
current_value_target = value_targets[depth]
while True:
new_syscall_dict = None
new_unsupported_dict = None
# run the fuzzer, retry 3 times if target syscall not appear
log.debug(value_targets)
for retry in range(0, const.INVOCATION_NOT_FOUND_RETRY):
fuzz_ret_code, retcode = self.run_fuzzer_with_targets(value_targets, before_poll, client)
log.debug(f'Fuzz return code is: {fuzz_ret_code}')
# get the new list
new_syscall_dict = self.parse_supported_hash(current_index_target[0], current_index_target[1])
# parse_supported_hash will return None if target not found
if new_syscall_dict is not None:
break
else:
log.debug(f'target not found retry: {retry}')
if new_syscall_dict is None:
log.info('target syscall not found')
stack_str = self.coverage_dict[f'{current_index_target[0]}@{current_index_target[1]}']
if stack_str is not None:
log.info(stack_str)
else:
log.info('stack string is None?')
# skip this target if still not found
return
if self.order_method == OrderMethod.ORDER_RECUR:
if depth + 1 < self.max_depth:
log.debug(f'{len(new_syscall_dict)} new invocations found!')
# update overall set and explore next depth
self.overall_set.update(new_syscall_dict.keys())
if len(new_syscall_dict) > 0:
log.info(f"number of overallset = {len(self.overall_set)}")
for i in range(len(new_syscall_dict.keys())):
str_key = list(new_syscall_dict.keys())[i]
split_list = str_key.split('@')
stack_str = new_syscall_dict[str_key]
syscall = split_list[0]
hash_str = split_list[1]
# construct a target, syscall, hash_str, field index, field value
next_index_target = [syscall, hash_str, 0, 0]
next_value_target = [syscall, hash_str, 0, self.extract_value_from_index(next_index_target)]
if self.field_method == FieldMethod.FIELD_ALL:
next_value_target[2] = -1
# create a deepcopy of target list
next_index_targets = copy.deepcopy(index_targets)
next_value_targets = copy.deepcopy(value_targets)
next_index_targets.append(next_index_target)
next_value_targets.append(next_value_target)
log.info(f'recursive fuzz newly found syscall {str_key}:'
f' {i}/{len(new_syscall_dict)}, depth = {depth}, '
f'targets = {next_value_targets}')
log.info(stack_str)
# call the recursive function on the two new list
self.fuzz_with_targets(next_index_targets, next_value_targets, depth+1, before_poll, client)
else:
log.info('depth reach maximum')
elif self.order_method == OrderMethod.ORDER_ONE:
# do not go further, just update the overall set:
self.overall_set.update(new_syscall_dict.keys())
if len(new_syscall_dict) > 0:
log.info(f"number of overallset = {len(self.overall_set)}")
# try next value/field
ret = self.update_target(current_index_target, current_value_target)
if ret == -1:
break
def recursive_fuzz_main_loop(self, vanilla_list, before_poll=True, client=None):
self.start_time = time.time()
# generate initial target reference
for i in range(len(vanilla_list.keys())):
if i < self.start_skip:
continue
str_key = list(vanilla_list.keys())[i]
split_list = str_key.split('@')
log.warning(f'start recursive fuzz from vanilla_set '
f'{str_key}: {i}/{len(vanilla_list)}')
syscall = split_list[0]
hash_str = split_list[1]
# construct a target, syscall, hash_str, field index, field value
first_index_target = [syscall, hash_str, 0, 0]
first_value_target = [syscall, hash_str, 0, self.extract_value_from_index(first_index_target)]
if self.field_method == FieldMethod.FIELD_ALL:
first_value_target[2] = -1
index_targets = [first_index_target]
value_targets = [first_value_target]
self.store_syscall_coverage()
# call the recursive function on the two list, pass by value
self.fuzz_with_targets(copy.deepcopy(index_targets), copy.deepcopy(value_targets), 0, before_poll, client)
def parse_and_get_unsupported_set(self):
self.vanilla_list = []
with open(self.hash_file) as fp:
lines = fp.readlines()
for line in lines:
syscall, hash_str, stack = self.parse_syscall_stack_string_hash(line)
if syscall not in self.supported:
self.unsupported_syscalls.add(syscall)
else:
self.vanilla_list.append(syscall)
def get_unsupported_syscalls(self):
print('getting unsupported syscall set')
self.clear_hash()
ret = self.run_interceptor_vanilla(True, None)
if ret == 0:
log.info(f"vanilla cov run success (get unsupport list), before_poll = true")
self.parse_and_get_unsupported_set()
if self.server:
if "clients" not in self.target:
log.error(f"No client defiend for target {self.target_name}")
return
# test the part after polling separately for each client
for client in self.target.get("clients"):
self.clear_hash()
ret = self.run_interceptor_vanilla(False, client)
if ret == 0:
log.info(f"vanilla cov run success (get unsupport list), before_poll = false")
self.parse_and_get_unsupported_set()
log.warning(f'unsupported syscalls:\n{self.unsupported_syscalls}')
def run_recursive_fuzz(self):
log.info(f"running recursive fuzzer")
self.clear_hash()
# get the client
client = None
if self.server:
if "clients" not in self.target:
log.error(f"No client defiend for target {self.target_name}")
return
# test the part after polling separately for each client
client = self.target.get("clients")[0]
# run the vanilla version first, always use one run
ret = self.run_interceptor_vanilla(False, client)
if ret == 0:
log.info(f"vanilla cov run success ! ")
# generate vanila syscall list
vanilla_list = self.parse_supported_hash(vanilla=True)
# update overall set
self.overall_set.update(vanilla_list.keys())
print(f'size of vanilla_list is: {len(vanilla_list)}, size of overallset is {len(self.coverage_dict)}')
if vanilla_list is None:
log.error("failed to get vanilla list, terminate")
self.clear_exit()
# store coverage
self.store_syscall_coverage()
# start fuzzing
if self.order_method == OrderMethod.ORDER_RECUR or self.order_method == OrderMethod.ORDER_ONE:
self.recursive_fuzz_main_loop(vanilla_list, False, client)
elif self.order_method == OrderMethod.ORDER_SEP:
self.sep_fuzz_main_loop(client)
elif self.order_method == OrderMethod.ORDER_ALL:
self.all_fuzz_main_loop(client)
def get_syscall_count(self):
try:
with open(self.count_file, 'r') as f:
data = f.read()
return int(data)
except Exception as e:
log.error(f"read count_file failed {e}")
return -2
def blind_fuzz_loop(self, client, target_syscall):
# initialize the skip count
skip_count = 0
should_increase = True
# get the max invocation of target from vanilla call:
vanill_invocation = len(self.vanilla_list)
if target_syscall is not None:
vanill_invocation = 0
for syscall in self.vanilla_list:
if syscall == target_syscall:
vanill_invocation += 1
log.warning(f"max invocation for target{target_syscall} is {vanill_invocation}")
while should_increase:
log.warning(f'skip count = {skip_count}')
should_increase = False
# for each skip_count increase
result_list = []
num_new_invocation = 0
min_syscount = -1
for i in range(self.iteration):
# do the fuzzing
fuzz_ret_code, retcode = self.run_fuzzer_with_targets(None, False, client, target_syscall, skip_count)
log.debug(f'Fuzz return code is: {fuzz_ret_code}')
# parse newly found syscalls:
new_syscall_dict = self.parse_supported_hash()
if new_syscall_dict is None:
log.error('new_syscall_dict should not be none without target')
if len(new_syscall_dict) > 0:
# update overall set
self.overall_set.update(new_syscall_dict.keys())
log.debug(f"{len(new_syscall_dict)} new invocation found")
log.info(f"number of overallset = {len(self.overall_set)}")
num_new_invocation += len(new_syscall_dict)
result_list.append((fuzz_ret_code, retcode, len(new_syscall_dict)))
if fuzz_ret_code != FuzzResult.FUZZ_SUCCESS:
should_increase = True
# get the syscall count returned by strace
syscount = self.get_syscall_count()
if syscount == -1:
log.error(f'syscall {target_syscall} not found')
if min_syscount == -1:
min_syscount = syscount
min_syscount = min(syscount, min_syscount)
log.info(f"result list for {target_syscall}:{skip_count} is {result_list}")
if min_syscount < 0:
should_increase = False
# decide if we should increase and how to increase
# stop increase if skip_count > invocation*1.5 in vanilla run and no new invocation found
if skip_count > vanill_invocation * 1.3 and num_new_invocation == 0 and should_increase:
should_increase = False
log.error(f'terminate because no new invocation found and skip count too larget')
if should_increase:
# increase the skip count:
if self.skip_method == SkipMethod.SKIP_ONE:
skip_count = skip_count + 1
if skip_count == vanill_invocation:
should_increase = False
log.error(f"skip count increase to the last one in vanilla list: {skip_count}, stop")
elif self.skip_method == SkipMethod.SKIP_FAIL:
if min_syscount <= skip_count:
log.error(f'how could max_syscount {min_syscount} smaller than skip_count{skip_count}')
skip_count = skip_count + 1
# if min_syscount increase to much, just plus 1
elif min_syscount > (skip_count + (vanill_invocation * 0.2)):
log.error(f"sys count increase too much{min_syscount}, maybe enter infinite loop, change to +1")
skip_count = skip_count + 1
else:
skip_count = min_syscount
self.store_syscall_coverage()
# main loop of fuzz each syscall separately
def sep_fuzz_main_loop(self, client):
# for each supported syscall in vanilla run, do the fuzz
# extract syscall_list
fuzz_syscall_list = []
for syscall in self.supported:
if syscall in self.vanilla_list:
fuzz_syscall_list.append(syscall)
for i in range(len(fuzz_syscall_list)):
target_syscall = fuzz_syscall_list[i]
log.warning(f'start fuzz syscall {target_syscall} ({i}/{len(fuzz_syscall_list)})')
self.blind_fuzz_loop(client, target_syscall)
# main loop of fuzz all syscall
def all_fuzz_main_loop(self, client):
self.blind_fuzz_loop(client, None)
def run_sc_cov(self):
log.info(f"running sc cov test")
self.clear_hash()
# run the vanilla version first before poll
ret = self.run_interceptor_vanilla(True, None)
self.parse_hash()
if ret == 0:
log.info(f"vanilla cov run success, before_poll = true")
if self.server:
if "clients" not in self.target:
log.error(f"No client defiend for target {self.target_name}")
return
# test the part after polling separately for each client
for client in self.target.get("clients"):
ret = self.run_interceptor_vanilla(False, client)
self.parse_hash()
if ret == 0:
log.info(f"vanilla cov run success, before_poll = false")
unsupported_set = set()
support_count = 0
ignore_count = 0
for key, value in self.vanila_cov.items():
if value[0] in self.supported or value[0] in const.will_do:
support_count += 1
elif value[0] in const.ignore_syscall:
ignore_count += 1
else:
unsupported_set.add(value[0])
log.info(f"support {support_count}/{len(self.vanila_cov)}, "
f"{float(support_count)/float(len(self.vanila_cov)) * 100.0}%")
log.info(f"support remove ignore {support_count}/{len(self.vanila_cov) - ignore_count}, "
f"{float(support_count) / float(len(self.vanila_cov) - ignore_count) * 100.0}%")
log.warning(f"usupported set: {unsupported_set}")
# run the test
# copy the vanilla_cov to fuzz_cov
for key, value in self.vanila_cov.items():
if key not in self.fuzz_cov.keys():
self.fuzz_cov[key] = value
self.run_interceptor_fuzz(True, None)
for client in self.target.get("clients"):
self.run_interceptor_fuzz(False, client)
new_count = 0
for key, value in self.fuzz_cov.items():
if key not in self.vanila_cov.keys():
new_count += 1
log.warning(f"newly added system calls: {new_count}/{len(self.vanila_cov)}, "
f"{float(new_count)/float(len(self.vanila_cov)) * 100.0}%")
def run(self):
if self.cov:
self.run_cov()
return
if self.sc_cov:
self.run_sc_cov()
return
# test the application or part before polling in a server
self.test_target(True)
# if target is a server, also fuzz the second part
if self.server:
if "clients" not in self.target:
log.error(f"No client defiend for target {self.target_name}")
return
# test the part after polling separately for each client
for client in self.target.get("clients"):
self.test_target(False, client)
def test_target(self, before_poll=True, client=None):
# run the vanilla version first
ret = self.run_interceptor_vanilla(before_poll, client)
if ret == 0:
log.info(f"vanilla run success, before_poll = {before_poll}")
# run the test version
self.run_interceptor_fuzz(before_poll, client)
def run_interceptor_vanilla(self, before_poll=True, client=None, origin=False):
if self.setup_func is not None:
self.setup_func()
# construct the strace command
strace_cmd = f"{os.path.join(self.strace_dir, 'strace')} -ff"
if self.server:
cur_pid = os.getpid() # pass pid to the strace, it will send SIGUSR1 back
strace_cmd = f"{strace_cmd} -j {self.poll} -J {cur_pid}"
# unnecessary for vanilla run
# if not before_poll and client is not None:
# strace_cmd = f"{strace_cmd} -l"
if self.print_trace:
strace_cmd = f"{strace_cmd} -k"
elif self.sc_cov:
strace_cmd = f"{strace_cmd} -n {self.hash_file}"
if self.not_write:
strace_cmd = f"{strace_cmd} -N"
if self.accept_hash > 0:
strace_cmd = f"{strace_cmd} -Q {self.accept_hash}"
strace_cmd = f"{strace_cmd} {self.command}"
if origin:
strace_cmd = self.command
ld_path = ""
if self.cache_unwind:
ld_path = ld_cmd
if self.sudo:
strace_cmd = f"sudo -E {ld_path} {strace_cmd}"
# strace_cmd = f"sudo -E /home/gavin/strace/strace -ff -j epoll_wait -J {cur_pid} -G -B 644 -K /home/gavin/rsyscall_fuzzer/controller/syscall.json -L /home/gavin/rsyscall_fuzzer/controller/record.txt -n syscov_memcached.txt /home/gavin/memcached-1.5.20/memcached -p 11111 -U 11111 -u gavin"
# run the interceptor, make sure nothing else is running
self.kill_servers()
log.info(f"running vanilla target with command {strace_cmd}")
args = shlex.split(strace_cmd)
# Block signal until sigwait (if caught, it will become pending)
signal.pthread_sigmask(signal.SIG_BLOCK, [const.ACCEPT_SIG])
self.srv_p = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=self.strace_log_fd,
stderr=self.strace_log_fd,
preexec_fn=os.setsid,
close_fds=True,
cwd=self.target_cwd,
env=self.target_env)
# wait for accept signal if it is a server
if self.server:
# ignore signal
# signal.pthread_sigmask(signal.SIG_UNBLOCK, [const.ACCEPT_SIG])
# Wait for sigmax-7, or acknowledge if it is already pending
start = time.time()
# use polling instead of timeout
wait_start = time.time()
wait_end = time.time()
log.debug("wait for server's signal ...")
while wait_end - wait_start < self.poll_time:
ret = signal.sigtimedwait([const.ACCEPT_SIG], 0) # poll the signal
if ret is not None: # singal received
break
# check server state
retcode = self.srv_p.poll()
if retcode is not None:
log.debug(f'server terminated before reach accept, retcode = {retcode}')
break
wait_end = time.time()
signal.pthread_sigmask(signal.SIG_UNBLOCK, [const.ACCEPT_SIG])
if ret is None: # timeout
log.debug("signal timeout!")
retcode = self.srv_p.poll()
if retcode is not None:
log.debug(f'server terminated before reach accept, retcode = {retcode}')
self.kill_servers()
sys.exit("signal wait timeout during vanilla run, terminate the process")
end = time.time()
self.accept_time += (end - start)
if ret:
logging.debug(f"sig {const.ACCEPT_SIG} received!")
else:
sys.exit("signal wait timeout during vanilla run, terminate the process")
# check if this turn only test before poll:
if before_poll:
# check if the server crashes,
ret = self.srv_p.poll()
if ret is None: # terminate the server and return
os.killpg(os.getpgid(self.srv_p.pid), signal.SIGTERM)
start = time.time()
self.srv_p.wait() # wait until strace properly save the output
end = time.time()
self.after_time += (end-start)
log.info("vanilla test before polling success")
return 0
# server terminate before client, report error
else:
self.kill_servers()
sys.exit(f"server terminate before client, retcode = {ret}")
# after polling for server
if client is None:
self.kill_servers()
sys.exit("error: client not set when test after polling")
start = time.time()
for j in range(const.CLIENT_RETRY):
client_ret = client()
if client_ret == 0:
break
else:
print(f'retry: {j}')
end = time.time()
self.client_time += (end - start)
if client_ret != 0:
self.kill_servers()
sys.exit("error: client failed during vanilla run!")
else:
log.info("client success during vanilla run!")
# check if server terminated
if self.retcode is not None:
# wait for server to terminate
start = time.time()
try:
retcode = self.srv_p.wait(timeout=self.timeout) # wait for server to terminate after client
except (TimeoutError, subprocess.TimeoutExpired):
self.kill_servers()
sys.exit("server timeout after client (should terminate), kill the server")
else:
if self.retcode != retcode:
self.kill_servers()
sys.exit(f"server terminate after client, expect retcode:{self.retcode}, actual: {retcode}")
else:
return 0
end = time.time()
self.after_time += (end - start)
retcode = self.srv_p.poll()
if retcode is None:
os.killpg(os.getpgid(self.srv_p.pid), signal.SIGTERM)
start = time.time()
self.srv_p.wait() # wait until strace properly save the output
end = time.time()
self.after_time += (end-start)
log.info(f"server still running after client, terminate the server")
# for non-server target
else:
if self.input:
self.srv_p.communicate(self.input.encode("utf-8").decode('unicode_escape').encode("utf-8"))
try:
retcode = self.srv_p.wait(self.timeout) # wait for 2 second, if not ret, something happened
except subprocess.TimeoutExpired:
# timeout, kill the program and do nothing
self.kill_servers()
sys.exit(f"application timeout")
else:
if self.retcode is None: # set the normal retcode to retcode
self.retcode = retcode
log.warning(f"normal retcode set to {retcode} for the non-server target")
elif self.retcode != retcode:
self.kill_servers()
sys.exit(f"application terminate with error in vanilla run, "
f"expect retcode:{self.retcode}, actual: {retcode}")
return 0
def run_interceptor_fuzz(self, before_poll=True, client=None):
skip_count = self.start_skip
should_increase = True
while should_increase: # fuzzing loop, end until application terminate properly
should_increase = False
# construct strace command
strace_cmd = f"{os.path.join(self.strace_dir, 'strace')} -ff"
if self.server:
cur_pid = os.getpid() # pass pid to the strace, it will send SIGUSR1 back
strace_cmd = f"{strace_cmd} -j {self.poll} -J {cur_pid}"
if not before_poll and client is not None:
strace_cmd = f"{strace_cmd} -l"
# add skip count to the command '-G -B', add syscall config, -G means start fuzzing
strace_cmd = f"{strace_cmd} -G -B {skip_count} -K {os.path.abspath(self.syscall_config)}"
# add record file if setted
if self.record_file is not None:
strace_cmd = f"{strace_cmd} -L {os.path.abspath(self.record_file)}"
# if test cov -m: only fuzz with valid value -M: cov support, not fuzz cov syscall
if self.cov:
strace_cmd = f"{strace_cmd} -M"
if self.fuzz_valid:
strace_cmd = f"{strace_cmd} -m"
if self.sc_cov:
strace_cmd = f"{strace_cmd} -n {self.hash_file}"
if self.accept_hash > 0:
strace_cmd = f"{strace_cmd} -Q {self.accept_hash}"
strace_cmd = f"{strace_cmd} {self.command}"
if self.cache_unwind:
ld_path = ld_cmd
if self.sudo:
strace_cmd = f"sudo -E {ld_path} {strace_cmd}"
log.info(f"start fuzzing with command {strace_cmd}, "
f"num_iterations = {self.iteration}, skip_count={skip_count}")
args = shlex.split(strace_cmd)
failed_iters = []
for i in range(0, self.iteration):
# run the command multiple times
# clear core dumps
self.clear_cores()
self.clear_record()
self.clear_strace_log()
self.clear_hash()
# make sure no server is running
self.kill_servers()
# initialize the retcode with a magic number
retcode = 10086
if self.setup_func is not None:
self.setup_func()
log.debug(f"start iteration {i}")
# signal.signal(const.ACCEPT_SIG, signal.SIG_IGN)
# Block signal until sigwait (if caught, it will become pending)
signal.pthread_sigmask(signal.SIG_BLOCK, [const.ACCEPT_SIG])
# signal.pthread_sigmask(signal.SIG_UNBLOCK, [const.ACCEPT_SIG])
self.srv_p = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=self.strace_log_fd,
stderr=self.strace_log_fd,
preexec_fn=os.setsid,
cwd=self.target_cwd,
close_fds=True,
env=self.target_env)
if not self.server:
if self.input:
self.srv_p.communicate(self.input.encode("utf-8").decode('unicode_escape').encode("utf-8"))
try:
retcode = self.srv_p.wait(self.timeout) # wait for 2 second, if not ret, something happened
except subprocess.TimeoutExpired:
# timeout, kill the program and record failure
self.kill_servers()
should_increase = True
failed_iters.append((i, 'timeout_n'))
else:
if self.retcode != retcode:
self.kill_servers()
# return code do not match
failed_iters.append((i, retcode))
should_increase = True
else: # handle servers
# check if server exist before wait for signal (save time)
# time.sleep(0.5)
retcode = self.srv_p.poll()
log.debug("check server exist before wait for signal")
if retcode is not None:
failed_iters.append((i, retcode))
should_increase = True
else:
# use polling instead of timeout
wait_start = time.time()
wait_end = time.time()
log.debug("wait for server's signal ...")
while wait_end - wait_start < self.poll_time:
ret = signal.sigtimedwait([const.ACCEPT_SIG], 0) # poll the signal
if ret is not None: # singal received
break
# check server state
retcode = self.srv_p.poll()
if retcode is not None:
log.debug(f'server terminated before reach accept, retcode = {retcode}')
failed_iters.append((i, retcode))
should_increase = True
break
wait_end = time.time()
signal.pthread_sigmask(signal.SIG_UNBLOCK, [const.ACCEPT_SIG])
if ret is None: # timeout
log.debug("signal timeout!")
failed_iters.append((i, 'timeout_p'))
should_increase = True
# check server state
retcode = self.srv_p.poll()
if retcode is not None:
failed_iters.append((i, retcode))
self.kill_servers()
# exit(0)
else:
log.debug("signal received!")
# check if this turn only test before poll:
if before_poll:
# check if the server crashes,
ret = self.srv_p.poll()
if ret is None: # terminate the server and return
os.killpg(os.getpgid(self.srv_p.pid), signal.SIGTERM)
log.debug("terminate the server, wait until it terminate..")
try:
self.srv_p.wait(5) # wait until strace properly save the output
except:
log.debug("server terminate timeout, force kill")
self.kill_servers()
log.debug("server terminated")
# server terminate before client, report error
else:
self.kill_servers()
failed_iters.append((i, 'exit_b'))
should_increase = True
else: # after polling, connect a client
log.debug("connecting client ...")
for j in range(const.CLIENT_RETRY):
client_ret = client()
if client_ret == 0:
break
log.debug(f"client ret code {client_ret}")
if client_ret != 0:
log.debug(f"client failed, kill server, wait ... ")
os.killpg(os.getpgid(self.srv_p.pid), signal.SIGTERM)
try:
self.srv_p.wait(5) # wait until strace properly save the output
except:
log.debug("server terminate timeout, force kill")
self.kill_servers()
log.debug(f"server terminated ... ")
failed_iters.append((i, 'client_f'))
should_increase = True
else: # client success, check state of server
try: # wait for server to terminate after client
retcode = self.srv_p.wait(timeout=self.timeout)
except (TimeoutError, subprocess.TimeoutExpired):
log.debug("server still exist after client, try to terminate it ...")
os.killpg(os.getpgid(self.srv_p.pid), signal.SIGTERM)
try:
self.srv_p.wait(5) # wait until cov properly save the output
except:
log.error("server terminate time out, force kill")
self.kill_servers()
log.debug("server terminated!")
if self.retcode is not None: # should exit
failed_iters.append((i, 'timeout_a'))
should_increase = True
else:
if retcode != self.retcode: # check if retcode match
self.kill_servers()
failed_iters.append((i, retcode))
should_increase = True
# handle core dumped
core_ret = self.handle_core_dump(skip_count=skip_count)
if core_ret is None:
print("are you kiddingme ? how could this be NOne?")
elif core_ret > 0:
self.kill_servers()
failed_iters.append((i, 'core'))
should_increase = True
# for iteration, code in failed_iters:
# if iteration == i:
# log.info(f"{iteration}: {code}")
self.parse_hash(False)
log.debug("finish parse hash")
# output list if necessary
log.info(failed_iters)
if should_increase:
skip_count = skip_count+1
def convert_value_target_to_string(self, value_target):
value_string = f'{value_target[3]}'
# the value string for FIELD_ALL will contain all the values joined with @
if self.field_method == FieldMethod.FIELD_ALL:
sep = '@'
value_list_string = map(str, value_target[3])
value_string = sep.join(value_list_string)
return f'{value_target[0]} {value_target[1]} {value_target[2]} {value_string}\n'
def run_100_benchmark(self, client, name, parse_hash=False, target=None):
start = time.time()
for i in range(100):
fuzz_ret_code, retcode = self.run_fuzzer_with_targets(target, False, client)
print(f'{fuzz_ret_code}:{retcode}', end=' ')
if parse_hash:
self.parse_supported_hash()
end = time.time()
total = (end - start)
print(f"\n{name} run time = {total}: {total / 100}")
def run_benchmark(self):
clients = self.target.get("clients")
if len(clients) > 0:
client = clients[0]
else:
client = None
ret = self.run_interceptor_vanilla(False, client)
if ret == 0:
log.info(f"vanilla cov run success ! ")
# generate vanila syscall list
vanilla_list = self.parse_supported_hash(vanilla=True)
str_key = list(vanilla_list.keys())[0]
split_list = str_key.split('@')
syscall = split_list[0]
hash_str = split_list[1]
# construct a target, syscall, hash_str, field index, field value
first_index_target = [syscall, hash_str, 0, 0]
first_value_target = [syscall, hash_str, 0, self.extract_value_from_index(first_index_target)]
value_targets = [first_value_target]
self.no_signal = True
self.benchmark_cmd = f"{self.command}"
stored_record_file = self.record_file
self.record_file = None
# run the vanilla strace 100 times
self.run_100_benchmark(client, "vanilla application")
self.print_measuerment()
self.no_signal = False
# check for vanilla strace
strace_cmd = f"{os.path.join(self.strace_dir, 'strace')} -ff"
if self.server:
cur_pid = os.getpid() # pass pid to the strace, it will send SIGUSR1 back
strace_cmd = f"{strace_cmd} -j {self.poll} -J {cur_pid}"
self.benchmark_cmd = f"{strace_cmd} {self.command}"
# run the vanilla strace 100 times
self.run_100_benchmark(client, "vanilla strace")
self.print_measuerment()
# check for stack trace
strace_cmd = f"{os.path.join(self.strace_dir, 'strace')} -ff"
if self.server:
cur_pid = os.getpid() # pass pid to the strace, it will send SIGUSR1 back
strace_cmd = f"{strace_cmd} -j {self.poll} -J {cur_pid}"
strace_cmd = f"{strace_cmd} -n {self.hash_file}"
if self.accept_hash > 0:
strace_cmd = f"{strace_cmd} -Q {self.accept_hash}"
self.benchmark_cmd = f"{strace_cmd} {self.command}"
self.run_100_benchmark(client, "stack trace")
self.print_measuerment()
# check for add reference
self.record_file = stored_record_file
strace_cmd = f"{os.path.join(self.strace_dir, 'strace')} -ff"
if self.server:
cur_pid = os.getpid() # pass pid to the strace, it will send SIGUSR1 back
strace_cmd = f"{strace_cmd} -j {self.poll} -J {cur_pid}"
strace_cmd = f"{strace_cmd} -n {self.hash_file}"
if self.accept_hash > 0:
strace_cmd = f"{strace_cmd} -Q {self.accept_hash}"
# add fuzzing
strace_cmd = f"{strace_cmd} -G -a"
# -R means recursive fuzz, provide ref file
strace_cmd = f"{strace_cmd} -R {self.reference_file}"
# add record file if set
if self.record_file is not None:
strace_cmd = f"{strace_cmd} -L {os.path.abspath(self.record_file)}"
self.benchmark_cmd = f"{strace_cmd} {self.command}"
self.run_100_benchmark(client, "fuzz", target=value_targets)
self.print_measuerment()
self.run_100_benchmark(client, "parse hash", True, target=value_targets)
self.print_measuerment()
def parse_record_file(self):
newly_fuzzed = []
if not os.path.isfile(self.record_file):
log.error("record file not exist!")
return
with open(self.record_file) as fp:
lines = fp.readlines()
for line in lines:
if "syscall: " in line and "hash: " in line:
parts = line.split(", hash: ")
syscall = parts[0].split(": ")[-1]
hash_str = parts[1].strip()
str_key = f'{syscall}@{hash_str}'
if str_key not in self.fuzzed_set:
self.fuzzed_set.add(str_key)
newly_fuzzed.append(str_key)
if len(newly_fuzzed) > 0:
log.info(f"newly fuzzed {len(newly_fuzzed)} syscalls. total={len(self.fuzzed_set)}, {newly_fuzzed}")
def run_fuzzer_with_targets(self, value_targets, before_poll, client, target_syscall=None, skip_count=-1):
fuzzer_start_time = time.time()
retry_flag = False
if value_targets is None and self.order_method == OrderMethod.ORDER_RECUR and self.benchmark_cmd is None:
log.error('value_target should not be none for recursive fuzz')
self.clear_exit()
if value_targets is not None:
# write the fuzzing target into file
with open(self.reference_file, 'w+') as f:
for value_target in value_targets:
# syscall, hash, field_idnex, value
f.write(self.convert_value_target_to_string(value_target))
# construct strace command
strace_cmd = f"{os.path.join(self.strace_dir, 'strace')} -ff"
if self.server:
cur_pid = os.getpid() # pass pid to the strace, it will send SIGUSR1 back
strace_cmd = f"{strace_cmd} -j {self.poll} -J {cur_pid}"
# -G means start fuzzing
strace_cmd = f"{strace_cmd} -G"
if self.overhead_test:
strace_cmd = f"{strace_cmd} -a"
# -R means recursive fuzz, provide ref file
if value_targets is not None:
strace_cmd = f"{strace_cmd} -R {self.reference_file}"
# add record file if set
if self.record_file is not None:
strace_cmd = f"{strace_cmd} -L {os.path.abspath(self.record_file)}"
# always add hash_file
strace_cmd = f"{strace_cmd} -n {self.hash_file}"
if self.accept_hash > 0:
strace_cmd = f"{strace_cmd} -Q {self.accept_hash}"
# fuzz all or fuzz separately, need to provide a supported syscall list for fuzz all
if self.order_method == OrderMethod.ORDER_ALL:
strace_cmd = f"{strace_cmd} -H ALL -K {os.path.abspath(self.syscall_config)}"
elif self.order_method == OrderMethod.ORDER_SEP:
if target_syscall is None:
log.error('target sys call should not be none for ORDER_SEP')
self.clear_exit()
strace_cmd = f"{strace_cmd} -H {target_syscall}"
if self.order_method == OrderMethod.ORDER_ALL or self.order_method == OrderMethod.ORDER_SEP:
# should always have a skip_count for those two method, and always have a count_file specified
if skip_count == -1:
log.error(f'skip count must be set for order method: {self.order_method}')
self.clear_exit()
strace_cmd = f"{strace_cmd} -B {skip_count} -N {os.path.abspath(self.count_file)}"
strace_cmd = f"{strace_cmd} {self.command}"
if self.benchmark_cmd is not None:
strace_cmd = self.benchmark_cmd
if self.cache_unwind:
ld_path = ld_cmd
if self.sudo:
strace_cmd = f"sudo -E {ld_path} {strace_cmd}"
log.debug(f"start fuzzing with command {strace_cmd}")
args = shlex.split(strace_cmd)
# do some clean up before run
self.clear_cores()
self.clear_record()
self.clear_strace_log()
self.clear_hash()
# make sure no server is running
self.kill_servers()
# initialize the retcode with a magic number
retcode = 10086
fuzz_ret_code = FuzzResult.FUZZ_SUCCESS
if self.setup_func is not None:
self.setup_func()
# Block signal until sigwait (if caught, it will become pending)
signal.pthread_sigmask(signal.SIG_BLOCK, [const.ACCEPT_SIG])
# running...
try:
self.srv_p = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=self.strace_log_fd,
stderr=self.strace_log_fd,
preexec_fn=os.setsid,
cwd=self.target_cwd,
close_fds=True,
env=self.target_env)
except:
proc = psutil.Process()
log.error(f'opened files: {proc.open_files()}, too many open file?')
self.kill_servers()
fuzz_ret_code = FuzzResult.FUZZ_ERROR
return fuzz_ret_code, retcode
if not self.server:
if self.input:
self.srv_p.communicate(self.input.encode("utf-8").decode('unicode_escape').encode("utf-8"))
try:
retcode = self.srv_p.wait(self.timeout) # wait for 2 second, if not ret, something happened
except subprocess.TimeoutExpired:
# timeout, kill the program
self.kill_servers()
fuzz_ret_code = FuzzResult.FUZZ_EXECTIMEOUT
else:
if self.retcode != retcode:
self.kill_servers()
fuzz_ret_code = FuzzResult.FUZZ_RETNOTMATCH
else: # handle servers
# check if server exist before wait for signal (save time)
# time.sleep(0.5)
retcode = self.srv_p.poll()
log.debug("check server exist before wait for signal")
if retcode is not None:
log.debug('server exit before signal')
fuzz_ret_code = FuzzResult.FUZZ_EXITB4POLL
else:
signal_received = False
client_retry = 1
if self.no_signal:
client_retry = 100
signal_received = True
else:
# use polling instead of timeout
wait_start = time.time()
wait_end = time.time()
log.debug("wait for server's signal ...")
while wait_end - wait_start < self.poll_time:
ret = signal.sigtimedwait([const.ACCEPT_SIG], 0) # poll the signal
if ret is not None: # singal received
break
# check server state
retcode = self.srv_p.poll()
if retcode is not None:
log.debug(f'server terminated before reach accept, retcode = {retcode}')
fuzz_ret_code = FuzzResult.FUZZ_EXITB4POLL
break
wait_end = time.time()
signal.pthread_sigmask(signal.SIG_UNBLOCK, [const.ACCEPT_SIG])
if ret is None: # timeout
log.debug("signal timeout!")
retcode = self.srv_p.poll()
fuzz_ret_code = FuzzResult.FUZZ_SIGTIMEOUT
if retcode is not None:
fuzz_ret_code = FuzzResult.FUZZ_EXITB4POLL
log.debug(f'server terminated before reach accept, retcode = {retcode}')
self.kill_servers()
else:
signal_received = True
log.debug("signal received!")
if signal_received:
# check if this turn only test before poll:
if before_poll:
# check if the server crashes,
retcode = self.srv_p.poll()
if retcode is None: # terminate the server and return
os.killpg(os.getpgid(self.srv_p.pid), signal.SIGTERM)
log.debug("terminate the server, wait until it terminate..")
try:
self.srv_p.wait(5) # wait until strace properly save the output
except:
log.debug("server terminate timeout, force kill")
self.kill_servers()
log.debug("server terminated")
# server terminate before client, report error
else:
self.kill_servers()
else: # after polling, connect a client
log.debug("connecting client ...")
retry_count = 0
for j in range(client_retry):
client_ret = client()
if client_ret == 0:
break
else:
retry_count += 1
retry_flag = True
log.debug(f"client ret code {client_ret}")
if client_ret != 0:
log.debug(f"client failed, kill server, wait ... ")
# if self.order_method != OrderMethod.ORDER_RECUR:
# try:
# self.srv_p.wait(5) # wait until strace properly save the output
# except:
# pass
fuzz_ret_code = FuzzResult.FUZZ_CLIENTFAIL
self.kill_servers()
log.debug(f"server terminated ... ")
else: # client success, check state of server
if self.retcode is not None: # server should exit
try: # wait for server to terminate after client
retcode = self.srv_p.wait(timeout=self.timeout)
except (TimeoutError, subprocess.TimeoutExpired):
log.debug("server still exist after client, try to terminate it ...")
os.kill(self.srv_p.pid, signal.SIGTERM)
fuzz_ret_code = FuzzResult.FUZZ_EXECTIMEOUT
try:
self.srv_p.wait(8) # wait until cov properly save the output
except:
log.error("server terminate time out, force kill")
self.kill_servers()
else:
log.debug("server terminated!")
if retcode != self.retcode:
fuzz_ret_code = FuzzResult.FUZZ_RETNOTMATCH
# if server suppose to run inifinitely, just kill it
else:
self.kill_servers()
# handle core dumped
core_ret = self.handle_core_dump_script(retcode, value_targets, skip_count)
if core_ret is None:
log.error("are you kiddingme ? how could this be NOne?")
if retcode == -11 and core_ret == 0:
log.error(f'Retcode is -11 but no core found, target = {value_targets}')
elif core_ret > 0:
self.kill_servers()
fuzz_ret_code = FuzzResult.FUZZ_COREDUMP
# parse record file and check if new syscall get fuzzed
if self.record_file is not None:
self.parse_record_file()
if retcode == 244:
stored_error = os.path.join(self.errorlog_dir, f"err_{self.errorcount}.txt")
shutil.copy(self.strace_log, stored_error)
log.error(f"strace retcode is 1, store file to {stored_error}")
self.errorcount += 1
fuzzer_end_time = time.time()
if not retry_flag:
self.run_fuzz_function_time += (fuzzer_end_time-fuzzer_start_time)
self.iteration_count += 1
else:
self.run_fuzz_function_time += (fuzzer_end_time - fuzzer_start_time)
self.iteration_count += 1
# print(f"retry:{retry_count}", end=',')
return fuzz_ret_code, retcode
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
model_scheduled_report.go
|
/*
* Uptrends API v4
*
* This document describes Uptrends API version 4. This Swagger environment also lets you execute API methods directly. Please note that this is not a sandbox environment: these API methods operate directly on your actual Uptrends account. For more information, please visit https://www.uptrends.com/api.
*
* API version: 1.0.0
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package uptrends
type ScheduledReport struct {
ScheduledReportGuid string `json:"ScheduledReportGuid,omitempty"`
Hash string `json:"Hash,omitempty"`
DashboardGuid string `json:"DashboardGuid,omitempty"`
FileType *ScheduledReportFileType `json:"FileType,omitempty"`
IsActive bool `json:"IsActive,omitempty"`
Schedule *Schedule `json:"Schedule,omitempty"`
SelectedPeriod *PresetPeriodTypeWithExclusive `json:"SelectedPeriod,omitempty"`
InternalNotes string `json:"InternalNotes,omitempty"`
Notes string `json:"Notes,omitempty"`
Recipients *Recipients `json:"Recipients,omitempty"`
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
store/cloudflare/cloudflare.go
|
// Package cloudflare is a store implementation backed by cloudflare workers kv
// Note that the cloudflare workers KV API is eventually consistent.
package cloudflare
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"math"
"net/http"
"net/url"
"os"
"strconv"
"time"
"github.com/blastbao/go-micro/store"
"github.com/pkg/errors"
"github.com/ReneKroon/ttlcache"
)
const (
apiBaseURL = "https://api.cloudflare.com/client/v4/"
)
type workersKV struct {
options store.Options
// cf account id
account string
// cf api token
token string
// cf kv namespace
namespace string
// http client to use
httpClient *http.Client
// cache
cache *ttlcache.Cache
}
// apiResponse is a cloudflare v4 api response
type apiResponse struct {
Result []struct {
ID string `json:"id"`
Type string `json:"type"`
Name string `json:"name"`
Expiration string `json:"expiration"`
Content string `json:"content"`
Proxiable bool `json:"proxiable"`
Proxied bool `json:"proxied"`
TTL int `json:"ttl"`
Priority int `json:"priority"`
Locked bool `json:"locked"`
ZoneID string `json:"zone_id"`
ZoneName string `json:"zone_name"`
ModifiedOn time.Time `json:"modified_on"`
CreatedOn time.Time `json:"created_on"`
} `json:"result"`
Success bool `json:"success"`
Errors []apiMessage `json:"errors"`
// not sure Messages is ever populated?
Messages []apiMessage `json:"messages"`
ResultInfo struct {
Page int `json:"page"`
PerPage int `json:"per_page"`
Count int `json:"count"`
TotalCount int `json:"total_count"`
} `json:"result_info"`
}
// apiMessage is a Cloudflare v4 API Error
type apiMessage struct {
Code int `json:"code"`
Message string `json:"message"`
}
// getOptions returns account id, token and namespace
func getOptions() (string, string, string) {
accountID := os.Getenv("CF_ACCOUNT_ID")
apiToken := os.Getenv("CF_API_TOKEN")
namespace := os.Getenv("KV_NAMESPACE_ID")
return accountID, apiToken, namespace
}
func validateOptions(account, token, namespace string) {
if len(account) == 0 {
log.Fatal("Store: CF_ACCOUNT_ID is blank")
}
if len(token) == 0 {
log.Fatal("Store: CF_API_TOKEN is blank")
}
if len(namespace) == 0 {
log.Fatal("Store: KV_NAMESPACE_ID is blank")
}
}
func (w *workersKV) Init(opts ...store.Option) error {
for _, o := range opts {
o(&w.options)
}
if len(w.options.Namespace) > 0 {
w.namespace = w.options.Namespace
}
ttl := w.options.Context.Value("STORE_CACHE_TTL")
if ttl != nil {
ttlint64, ok := ttl.(int64)
if !ok {
log.Fatal("STORE_CACHE_TTL from context must be type int64")
}
w.cache = ttlcache.NewCache()
w.cache.SetTTL(time.Duration(ttlint64))
w.cache.SkipTtlExtensionOnHit(true)
}
return nil
}
func (w *workersKV) list(prefix string) ([]string, error) {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
path := fmt.Sprintf("accounts/%s/storage/kv/namespaces/%s/keys", w.account, w.namespace)
body := make(map[string]string)
if len(prefix) > 0 {
body["prefix"] = prefix
}
response, _, _, err := w.request(ctx, http.MethodGet, path, body, make(http.Header))
if err != nil {
return nil, err
}
a := &apiResponse{}
if err := json.Unmarshal(response, a); err != nil {
return nil, err
}
if !a.Success {
messages := ""
for _, m := range a.Errors {
messages += strconv.Itoa(m.Code) + " " + m.Message + "\n"
}
return nil, errors.New(messages)
}
keys := make([]string, 0, len(a.Result))
for _, r := range a.Result {
keys = append(keys, r.Name)
}
return keys, nil
}
// In the cloudflare workers KV implemention, List() doesn't guarantee
// anything as the workers API is eventually consistent.
func (w *workersKV) List() ([]*store.Record, error) {
keys, err := w.list("")
if err != nil {
return nil, err
}
var gerr error
var records []*store.Record
for _, key := range keys {
r, err := w.Read(key)
if err != nil {
gerr = err
continue
}
records = append(records, r...)
}
return records, gerr
}
func (w *workersKV) Read(key string, opts ...store.ReadOption) ([]*store.Record, error) {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
var options store.ReadOptions
for _, o := range opts {
o(&options)
}
keys := []string{key}
if options.Prefix {
k, err := w.list(key)
if err != nil {
return nil, err
}
keys = k
}
//nolint:prealloc
var records []*store.Record
for _, k := range keys {
if w.cache != nil {
if resp, hit := w.cache.Get(k); hit {
if record, ok := resp.(*store.Record); ok {
records = append(records, record)
continue
}
}
}
path := fmt.Sprintf("accounts/%s/storage/kv/namespaces/%s/values/%s", w.account, w.namespace, url.PathEscape(k))
response, headers, status, err := w.request(ctx, http.MethodGet, path, nil, make(http.Header))
if err != nil {
return records, err
}
if status < 200 || status >= 300 {
return records, errors.New("Received unexpected Status " + strconv.Itoa(status) + string(response))
}
record := &store.Record{
Key: k,
Value: response,
}
if expiry := headers.Get("Expiration"); len(expiry) != 0 {
expiryUnix, err := strconv.ParseInt(expiry, 10, 64)
if err != nil {
return records, err
}
record.Expiry = time.Until(time.Unix(expiryUnix, 0))
}
w.cache.Set(record.Key, record)
records = append(records, record)
}
return records, nil
}
func (w *workersKV) Write(r *store.Record) error {
// Set it in local cache, with the global TTL from options
if w.cache != nil {
w.cache.Set(r.Key, r)
}
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
path := fmt.Sprintf("accounts/%s/storage/kv/namespaces/%s/values/%s", w.account, w.namespace, url.PathEscape(r.Key))
if r.Expiry != 0 {
// Minimum cloudflare TTL is 60 Seconds
exp := int(math.Max(60, math.Round(r.Expiry.Seconds())))
path = path + "?expiration_ttl=" + strconv.Itoa(exp)
}
headers := make(http.Header)
resp, _, _, err := w.request(ctx, http.MethodPut, path, r.Value, headers)
if err != nil {
return err
}
a := &apiResponse{}
if err := json.Unmarshal(resp, a); err != nil {
return err
}
if !a.Success {
messages := ""
for _, m := range a.Errors {
messages += strconv.Itoa(m.Code) + " " + m.Message + "\n"
}
return errors.New(messages)
}
return nil
}
func (w *workersKV) Delete(key string) error {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
path := fmt.Sprintf("accounts/%s/storage/kv/namespaces/%s/values/%s", w.account, w.namespace, url.PathEscape(key))
resp, _, _, err := w.request(ctx, http.MethodDelete, path, nil, make(http.Header))
if err != nil {
return err
}
a := &apiResponse{}
if err := json.Unmarshal(resp, a); err != nil {
return err
}
if !a.Success {
messages := ""
for _, m := range a.Errors {
messages += strconv.Itoa(m.Code) + " " + m.Message + "\n"
}
return errors.New(messages)
}
return nil
}
func (w *workersKV) request(ctx context.Context, method, path string, body interface{}, headers http.Header) ([]byte, http.Header, int, error) {
var jsonBody []byte
var err error
if body != nil {
if paramBytes, ok := body.([]byte); ok {
jsonBody = paramBytes
} else {
jsonBody, err = json.Marshal(body)
if err != nil {
return nil, nil, 0, errors.Wrap(err, "error marshalling params to JSON")
}
}
} else {
jsonBody = nil
}
var reqBody io.Reader
if jsonBody != nil {
reqBody = bytes.NewReader(jsonBody)
}
req, err := http.NewRequestWithContext(ctx, method, apiBaseURL+path, reqBody)
if err != nil {
return nil, nil, 0, errors.Wrap(err, "error creating new request")
}
for key, value := range headers {
req.Header[key] = value
}
// set token if it exists
if len(w.token) > 0 {
req.Header.Set("Authorization", "Bearer "+w.token)
}
// set the user agent to micro
req.Header.Set("User-Agent", "micro/1.0 (https://micro.mu)")
// Official cloudflare client does exponential backoff here
// TODO: retry and use util/backoff
resp, err := w.httpClient.Do(req)
if err != nil {
return nil, nil, 0, err
}
defer resp.Body.Close()
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
return respBody, resp.Header, resp.StatusCode, err
}
return respBody, resp.Header, resp.StatusCode, nil
}
func (w *workersKV) String() string {
return "cloudflare"
}
// NewStore returns a cloudflare Store implementation.
// Account ID, Token and Namespace must either be passed as options or
// environment variables. If set as env vars we expect the following;
// CF_API_TOKEN to a cloudflare API token scoped to Workers KV.
// CF_ACCOUNT_ID to contain a string with your cloudflare account ID.
// KV_NAMESPACE_ID to contain the namespace UUID for your KV storage.
func NewStore(opts ...store.Option) store.Store {
var options store.Options
for _, o := range opts {
o(&options)
}
// get options from environment
account, token, namespace := getOptions()
if len(account) == 0 {
account = getAccount(options.Context)
}
if len(token) == 0 {
token = getToken(options.Context)
}
if len(namespace) == 0 {
namespace = options.Namespace
}
// validate options are not blank or log.Fatal
validateOptions(account, token, namespace)
return &workersKV{
account: account,
namespace: namespace,
token: token,
options: options,
httpClient: &http.Client{},
}
}
|
[
"\"CF_ACCOUNT_ID\"",
"\"CF_API_TOKEN\"",
"\"KV_NAMESPACE_ID\""
] |
[] |
[
"KV_NAMESPACE_ID",
"CF_API_TOKEN",
"CF_ACCOUNT_ID"
] |
[]
|
["KV_NAMESPACE_ID", "CF_API_TOKEN", "CF_ACCOUNT_ID"]
|
go
| 3 | 0 | |
test/functional/test_runner.py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
Functional tests are disabled on Windows by default. Use --force to run them anyway.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:PivxTestFramework.main`.
"""
import argparse
from collections import deque
import configparser
import datetime
import os
import time
import shutil
import signal
import sys
import subprocess
import tempfile
import re
import logging
# Formatting. Default colors to empty strings.
BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
BLUE = ('\033[0m', '\033[0;34m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
BASE_SCRIPTS= [
# Scripts that are run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'wallet_basic.py', # ~ 498 sec
'wallet_backup.py', # ~ 477 sec
# vv Tests less than 5m vv
'wallet_zapwallettxes.py', # ~ 300 sec
'p2p_time_offset.py', # ~ 267 sec
'rpc_fundrawtransaction.py', # ~ 260 sec
'mining_pos_coldStaking.py', # ~ 215 sec
'mining_pos_reorg.py', # ~ 212 sec
'wallet_abandonconflict.py', # ~ 212 sec
'wallet_hd.py', # ~ 210 sec
'wallet_zerocoin_publicspends.py', # ~ 202 sec
'feature_logging.py', # ~ 200 sec
'rpc_rawtransaction.py', # ~ 193 sec
'wallet_keypool_topup.py', # ~ 174 sec
'wallet_txn_doublespend.py --mineblock', # ~ 157 sec
'wallet_txn_clone.py --mineblock', # ~ 157 sec
'rpc_spork.py', # ~ 156 sec
'interface_rest.py', # ~ 154 sec
'feature_proxy.py', # ~ 143 sec
'feature_uacomment.py', # ~ 130 sec
'wallet_upgrade.py', # ~ 124 sec
'wallet_import_stakingaddress.py', # ~ 123 sec
# vv Tests less than 2m vv
'p2p_disconnect_ban.py', # ~ 118 sec
'wallet_listreceivedby.py', # ~ 117 sec
'mining_pos_fakestake.py', # ~ 113 sec
'feature_reindex.py', # ~ 110 sec
'interface_http.py', # ~ 105 sec
'wallet_listtransactions.py', # ~ 97 sec
'mempool_reorg.py', # ~ 92 sec
'sapling_wallet_persistence.py', # ~ 90 sec
'wallet_encryption.py', # ~ 89 sec
'wallet_keypool.py', # ~ 88 sec
'wallet_dump.py', # ~ 83 sec
'rpc_net.py', # ~ 83 sec
'rpc_bip38.py', # ~ 82 sec
'rpc_deprecated.py', # ~ 80 sec
'interface_bitcoin_cli.py', # ~ 80 sec
'mempool_packages.py', # ~ 63 sec
# vv Tests less than 60s vv
'wallet_labels.py', # ~ 57 sec
'rpc_signmessage.py', # ~ 54 sec
'mempool_resurrect.py', # ~ 51 sec
'rpc_budget.py', # ~ 50 sec
'mempool_spend_coinbase.py', # ~ 50 sec
'rpc_signrawtransaction.py', # ~ 50 sec
'rpc_decodescript.py', # ~ 50 sec
'rpc_blockchain.py', # ~ 50 sec
'wallet_disable.py', # ~ 50 sec
'mining_v5_upgrade.py', # ~ 48 sec
'feature_help.py', # ~ 30 sec
# Don't append tests at the end to avoid merge conflicts
# Put them in a random line within the section that fits their approximate run-time
# 'feature_block.py',
# 'wallet_importmulti.py',
# 'mempool_limit.py', # We currently don't limit our mempool_reorg
# 'interface_zmq.py',
# 'rpc_getchaintips.py',
# 'mempool_persist.py',
# 'rpc_users.py',
# 'p2p_mempool.py',
# 'mining_prioritisetransaction.py',
# 'p2p_invalid_block.py',
# 'p2p_invalid_tx.py',
# 'wallet_import_rescan.py',
# 'mining_basic.py',
# 'wallet_bumpfee.py',
# 'wallet_listsinceblock.py',
# 'p2p_leak.py',
# 'feature_cltv.py',
# 'feature_minchainwork.py',
# 'p2p_fingerprint.py',
# 'p2p_unrequested_blocks.py',
# 'feature_config_args.py',
]
EXTENDED_SCRIPTS = [
# These tests are not run by the travis build process.
# Longest test should go first, to favor running tests in parallel
# vv Tests less than 20m vv
'feature_fee_estimation.py', # ~ 360 sec
# vv Tests less than 5m vv
# vv Tests less than 2m vv
#'p2p_timeouts.py',
# vv Tests less than 60s vv
#'p2p_feefilter.py',
'rpc_bind.py',
# vv Tests less than 30s vv
#'example_test.py',
'feature_notifications.py',
'rpc_invalidateblock.py',
]
LEGACY_SKIP_TESTS = [
# These tests are not run when the flag --legacywallet is used
'feature_help.py',
'feature_logging.py',
'feature_reindex.py',
'feature_proxy.py',
'feature_uacomment.py',
'interface_bitcoin_cli.py',
'interface_http.py',
'interface_rest.py',
'mempool_reorg.py',
'mempool_resurrect.py',
'mempool_spend_coinbase.py',
'p2p_disconnect_ban.py',
'p2p_time_offset.py',
'rpc_bip38.py',
'rpc_blockchain.py',
'rpc_budget.py',
'rpc_decodescript.py',
'rpc_fundrawtransaction.py',
'rpc_net.py',
'rpc_signmessage.py',
'rpc_spork.py',
'sapling_wallet_persistence.py',
'wallet_hd.py', # no HD tests for pre-HD wallets
'wallet_upgrade.py', # can't upgrade to pre-HD wallet
]
# Place EXTENDED_SCRIPTS first since it has the 3 longest running tests
ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
def main():
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--combinedlogslen', '-c', type=int, default=0, help='print a combined log (of length n lines) from all test nodes and test framework to the console on failure.')
parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
parser.add_argument('--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).')
parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--quiet', '-q', action='store_true', help='only print dots, results summary and failure logs')
parser.add_argument('--legacywallet', '-w', action='store_true', help='create pre-HD wallets only')
parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs")
args, unknown_args = parser.parse_known_args()
# args to be passed on always start with two dashes; tests are the remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] != "--"]
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile))
passon_args.append("--configfile=%s" % configfile)
if args.legacywallet:
passon_args.append("--legacywallet")
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
# Create base test directory
tmpdir = "%s/cryptoflow_test_runner_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
os.makedirs(tmpdir)
logging.debug("Temporary test directory at %s" % tmpdir)
enable_wallet = config["components"].getboolean("ENABLE_WALLET")
enable_utils = config["components"].getboolean("ENABLE_UTILS")
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
if config["environment"]["EXEEXT"] == ".exe" and not args.force:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print("Tests currently disabled on Windows by default. Use --force option to enable")
sys.exit(0)
if not (enable_wallet and enable_utils and enable_bitcoind):
print("No functional tests to run. Wallet, utils, and cryptoflowd must all be enabled")
print("Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make")
sys.exit(0)
# Build list of tests
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the ALL_SCRIPTS list. Accept the name with or without .py extension.
tests = [re.sub("\.py$", "", t) + ".py" for t in tests]
test_list = []
for t in tests:
if t in ALL_SCRIPTS:
test_list.append(t)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], t))
else:
# No individual tests have been specified.
# Run all base tests, and optionally run extended tests.
test_list = BASE_SCRIPTS
if args.extended:
# place the EXTENDED_SCRIPTS first since the three longest ones
# are there and the list is shorter
test_list = EXTENDED_SCRIPTS + test_list
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
tests_excl = [re.sub("\.py$", "", t) + ".py" for t in args.exclude.split(',')]
for exclude_test in tests_excl:
if exclude_test in test_list:
test_list.remove(exclude_test)
else:
print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test))
# If --legacywallet, remove extra test cases
if args.legacywallet:
test_list = [x for x in test_list if x not in LEGACY_SKIP_TESTS]
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script (with args removed) and exit.
parser.print_help()
subprocess.check_call([(config["environment"]["SRCDIR"] + '/test/functional/' + test_list[0].split()[0])] + ['-h'])
sys.exit(0)
check_script_list(config["environment"]["SRCDIR"])
check_script_prefixes()
if not args.keepcache:
shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True)
run_tests(test_list,
config["environment"]["SRCDIR"],
config["environment"]["BUILDDIR"],
config["environment"]["EXEEXT"],
tmpdir,
args.jobs, args.coverage,
passon_args, args.combinedlogslen,
args.keepcache)
def run_tests(test_list, src_dir, build_dir, exeext, tmpdir, jobs=1, enable_coverage=False, args=[], combined_logs_len=0, keep_cache=False):
# Warn if cryptoflowd is already running (unix only)
try:
if subprocess.check_output(["pidof", "cryptoflowd"]) is not None:
print("%sWARNING!%s There is already a cryptoflowd process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0]))
except (OSError, subprocess.SubprocessError):
pass
# Warn if there is a cache directory
cache_dir = "%s/test/cache" % build_dir
if os.path.isdir(cache_dir):
print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir))
#Set env vars
if "BITCOIND" not in os.environ:
os.environ["BITCOIND"] = build_dir + '/src/cryptoflowd' + exeext
os.environ["BITCOINCLI"] = build_dir + '/src/cryptoflow-cli' + exeext
tests_dir = src_dir + '/test/functional/'
flags = ["--srcdir={}/src".format(build_dir)] + args
flags.append("--cachedir=%s" % cache_dir)
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug("Initializing coverage directory at %s" % coverage.dir)
else:
coverage = None
if len(test_list) > 1 and jobs > 1:
# Populate cache
# Send a ping message every 5 minutes to not get stalled on Travis.
import threading
pingTime = 5 * 60
stopTimer = False
def pingTravis():
if stopTimer:
return
print("- Creating cache in progress...")
sys.stdout.flush()
threading.Timer(pingTime, pingTravis).start()
if not keep_cache:
pingTravis()
try:
subprocess.check_output([tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir])
except subprocess.CalledProcessError as e:
sys.stdout.buffer.write(e.output)
raise
finally:
stopTimer = True
#Run Tests
job_queue = TestHandler(jobs, tests_dir, tmpdir, test_list, flags)
time0 = time.time()
test_results = []
max_len_name = len(max(test_list, key=len))
test_count = len(test_list)
for i in range(test_count):
test_result, testdir, stdout, stderr = job_queue.get_next()
test_results.append(test_result)
done_str = "{}/{} - {}{}{}".format(i + 1, test_count, BOLD[1], test_result.name, BOLD[0])
if test_result.status == "Passed":
if stderr == "":
logging.debug("%s passed, Duration: %s s" % (done_str, test_result.time))
else:
logging.debug("%s passed (with warnings), Duration: %s s" % (done_str, test_result.time))
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
elif test_result.status == "Skipped":
logging.debug("%s skipped" % (done_str))
else:
print("%s failed, Duration: %s s\n" % (done_str, test_result.time))
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
if combined_logs_len and os.path.isdir(testdir):
# Print the final `combinedlogslen` lines of the combined logs
print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD[1], combined_logs_len, BOLD[0]))
print('\n============')
print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
print('============\n')
combined_logs, _ = subprocess.Popen([os.path.join(tests_dir, 'combine_logs.py'), '-c', testdir], universal_newlines=True, stdout=subprocess.PIPE).communicate()
print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
print_results(test_results, max_len_name, (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(map(lambda test_result: test_result.was_successful, test_results))
sys.exit(not all_passed)
def print_results(test_results, max_len_name, runtime):
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=lambda result: result.name.lower())
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
status = TICK + "Passed" if all_passed else CROSS + "Failed"
results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
results += "Runtime: %s s\n" % (runtime)
print(results)
class TestHandler:
"""
Trigger the test scripts passed in via the list.
"""
def __init__(self, num_tests_parallel, tests_dir, tmpdir, test_list=None, flags=None):
assert(num_tests_parallel >= 1)
self.num_jobs = num_tests_parallel
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.test_list = test_list
self.flags = flags
self.num_running = 0
# In case there is a graveyard of zombie cryptoflowds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
self.portseed_offset = int(time.time() * 1000) % 625
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
t = self.test_list.pop(0)
portseed = len(self.test_list) + self.portseed_offset
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = t.split()
testdir = "{}/{}_{}".format(self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)
tmpdir_arg = ["--tmpdir={}".format(testdir)]
self.jobs.append((t,
time.time(),
subprocess.Popen([self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
testdir,
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
# Print remaining running jobs when all jobs have been started.
if not self.test_list:
print("Remaining jobs: [{}]".format(", ".join(j[0] for j in self.jobs)))
dot_count = 0
while True:
# Return first proc that finishes
time.sleep(.5)
for j in self.jobs:
(name, time0, proc, testdir, log_out, log_err) = j
if os.getenv('TRAVIS') == 'true' and int(time.time() - time0) > 20 * 60:
# In travis, timeout individual tests after 20 minutes (to stop tests hanging and not
# providing useful output.
proc.send_signal(signal.SIGINT)
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)]
log_out.close(), log_err.close()
if proc.returncode == TEST_EXIT_PASSED:
status = "Passed"
elif proc.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
self.num_running -= 1
self.jobs.remove(j)
clearline = '\r' + (' ' * dot_count) + '\r'
print(clearline, end='', flush=True)
dot_count = 0
return TestResult(name, status, int(time.time() - time0)), testdir, stdout, stderr
print('.', end='', flush=True)
dot_count += 1
class TestResult():
def __init__(self, name, status, time):
self.name = name
self.status = status
self.time = time
self.padding = 0
def __repr__(self):
if self.status == "Passed":
color = BLUE
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def check_script_prefixes():
"""Check that at most a handful of the
test scripts don't start with one of the allowed name prefixes."""
# LEEWAY is provided as a transition measure, so that pull-requests
# that introduce new tests that don't conform with the naming
# convention don't immediately cause the tests to fail.
LEEWAY = 10
good_prefixes_re = re.compile("(example|feature|interface|mempool|mining|p2p|rpc|wallet|zerocoin|sapling)_")
bad_script_names = [script for script in ALL_SCRIPTS if good_prefixes_re.match(script) is None]
if len(bad_script_names) > 0:
print("INFO: %d tests not meeting naming conventions:" % (len(bad_script_names)))
print(" %s" % ("\n ".join(sorted(bad_script_names))))
assert len(bad_script_names) <= LEEWAY, "Too many tests not following naming convention! (%d found, maximum: %d)" % (len(bad_script_names), LEEWAY)
def check_script_list(src_dir):
"""Check scripts directory.
Check that there are no scripts in the functional tests directory which are
not being run by pull-tester.py."""
script_dir = src_dir + '/test/functional/'
python_files = set([t for t in os.listdir(script_dir) if t[-3:] == ".py"])
missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS)))
if len(missed_tests) != 0:
print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests)))
if os.getenv('TRAVIS') == 'true':
# On travis this warning is an error to prevent merging incomplete commits into master
sys.exit(1)
class RPCCoverage():
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `cryptoflow-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
main()
|
[] |
[] |
[
"BITCOINCLI",
"TRAVIS",
"BITCOIND"
] |
[]
|
["BITCOINCLI", "TRAVIS", "BITCOIND"]
|
python
| 3 | 0 | |
plugins/govppmux/plugin_impl_govppmux.go
|
// Copyright (c) 2019 Cisco and/or its affiliates.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package govppmux
import (
"context"
"encoding/gob"
"fmt"
"os"
"sort"
"strings"
"sync"
"time"
"git.fd.io/govpp.git/adapter"
govppapi "git.fd.io/govpp.git/api"
govpp "git.fd.io/govpp.git/core"
"git.fd.io/govpp.git/proxy"
"github.com/ligato/cn-infra/datasync/resync"
"github.com/ligato/cn-infra/health/statuscheck"
"github.com/ligato/cn-infra/infra"
"github.com/ligato/cn-infra/logging"
"github.com/ligato/cn-infra/rpc/rest"
"github.com/pkg/errors"
"go.ligato.io/vpp-agent/v3/plugins/govppmux/vppcalls"
"go.ligato.io/vpp-agent/v3/plugins/vpp"
"go.ligato.io/vpp-agent/v3/plugins/vpp/binapi"
_ "go.ligato.io/vpp-agent/v3/plugins/govppmux/vppcalls/vpp1904"
_ "go.ligato.io/vpp-agent/v3/plugins/govppmux/vppcalls/vpp1908"
_ "go.ligato.io/vpp-agent/v3/plugins/govppmux/vppcalls/vpp2001"
_ "go.ligato.io/vpp-agent/v3/plugins/govppmux/vppcalls/vpp2001_324"
)
var (
disabledSocketClient = os.Getenv("GOVPPMUX_NOSOCK") != ""
)
// Plugin is the govppmux plugin implementation.
type Plugin struct {
Deps
config *Config
vpeHandler vppcalls.VppCoreAPI
binapiVersion vpp.Version
vppConn *govpp.Connection
vppConChan chan govpp.ConnectionEvent
lastConnErr error
vppapiChan govppapi.Channel
statsAdapter adapter.StatsAPI
statsConn *govpp.StatsConnection
proxy *proxy.Server
// infoMu synchonizes access to fields
// vppInfo and lastEvent
infoMu sync.Mutex
vppInfo VPPInfo
lastEvent govpp.ConnectionEvent
cancel context.CancelFunc
wg sync.WaitGroup
}
// Deps defines dependencies for the govppmux plugin.
type Deps struct {
infra.PluginDeps
HTTPHandlers rest.HTTPHandlers
StatusCheck statuscheck.PluginStatusWriter
Resync *resync.Plugin
}
// Init is the entry point called by Agent Core. A single binary-API connection to VPP is established.
func (p *Plugin) Init() (err error) {
if p.config, err = p.loadConfig(); err != nil {
return err
}
p.Log.Debugf("config: %+v", p.config)
// set GoVPP config
govpp.HealthCheckProbeInterval = p.config.HealthCheckProbeInterval
govpp.HealthCheckReplyTimeout = p.config.HealthCheckReplyTimeout
govpp.HealthCheckThreshold = p.config.HealthCheckThreshold
govpp.DefaultReplyTimeout = p.config.ReplyTimeout
// register REST API handlers
p.registerHandlers(p.HTTPHandlers)
var address string
useShm := disabledSocketClient || p.config.ConnectViaShm || p.config.ShmPrefix != ""
if useShm {
address = p.config.ShmPrefix
} else {
address = p.config.BinAPISocketPath
}
// TODO: Async connect & automatic reconnect support is not yet implemented in the agent,
// so synchronously wait until connected to VPP.
startTime := time.Now()
p.Log.Debugf("connecting to VPP..")
vppAdapter := NewVppAdapter(address, useShm)
p.vppConn, p.vppConChan, err = govpp.AsyncConnect(vppAdapter, p.config.RetryConnectCount, p.config.RetryConnectTimeout)
if err != nil {
return err
}
// wait for connection event
for {
event, ok := <-p.vppConChan
if !ok {
return errors.Errorf("VPP connection state channel closed")
}
if event.State == govpp.Connected {
break
} else if event.State == govpp.Failed || event.State == govpp.Disconnected {
return errors.Errorf("unable to establish connection to VPP (%v)", event.Error)
} else {
p.Log.Debugf("VPP connection state: %+v", event)
}
}
took := time.Since(startTime)
p.Log.Debugf("connection to VPP established (took %s)", took.Round(time.Millisecond))
if err := p.updateVPPInfo(); err != nil {
return errors.WithMessage(err, "retrieving VPP info failed")
}
// Connect to VPP status socket
var statsSocket string
if p.config.StatsSocketPath != "" {
statsSocket = p.config.StatsSocketPath
} else {
statsSocket = adapter.DefaultStatsSocket
}
statsAdapter := NewStatsAdapter(statsSocket)
if statsAdapter == nil {
p.Log.Warnf("Unable to connect to the VPP statistics socket, nil stats adapter", err)
} else if p.statsConn, err = govpp.ConnectStats(statsAdapter); err != nil {
p.Log.Warnf("Unable to connect to the VPP statistics socket, %v", err)
p.statsAdapter = nil
}
if p.config.ProxyEnabled {
// register binapi messages to gob package (required for proxy)
msgList := binapi.Versions[p.binapiVersion]
for _, msg := range msgList.AllMessages() {
gob.Register(msg)
}
err := p.startProxy(NewVppAdapter(address, useShm), NewStatsAdapter(statsSocket))
if err != nil {
return err
}
p.Log.Infof("VPP proxy ready")
}
return nil
}
// AfterInit reports status check.
func (p *Plugin) AfterInit() error {
// Register providing status reports (push mode)
p.StatusCheck.Register(p.PluginName, nil)
p.StatusCheck.ReportStateChange(p.PluginName, statuscheck.OK, nil)
var ctx context.Context
ctx, p.cancel = context.WithCancel(context.Background())
p.wg.Add(1)
go p.handleVPPConnectionEvents(ctx)
return nil
}
// Close cleans up the resources allocated by the govppmux plugin.
func (p *Plugin) Close() error {
p.cancel()
p.wg.Wait()
defer func() {
if p.vppConn != nil {
p.vppConn.Disconnect()
}
if p.statsAdapter != nil {
if err := p.statsAdapter.Disconnect(); err != nil {
p.Log.Errorf("VPP statistics socket adapter disconnect error: %v", err)
}
}
}()
if p.proxy != nil {
p.proxy.DisconnectBinapi()
p.proxy.DisconnectStats()
}
return nil
}
func (p *Plugin) Version() vpp.Version {
return p.binapiVersion
}
func (p *Plugin) CheckCompatiblity(msgs ...govppapi.Message) error {
p.infoMu.Lock()
defer p.infoMu.Unlock()
if p.vppapiChan == nil {
apiChan, err := p.vppConn.NewAPIChannel()
if err != nil {
return err
}
p.vppapiChan = apiChan
}
return p.vppapiChan.CheckCompatiblity(msgs...)
}
func (p *Plugin) Stats() govppapi.StatsProvider {
if p.statsConn == nil {
return nil
}
return p
}
func (p *Plugin) BinapiVersion() vpp.Version {
return p.binapiVersion
}
// VPPInfo returns information about VPP session.
func (p *Plugin) VPPInfo() VPPInfo {
p.infoMu.Lock()
defer p.infoMu.Unlock()
return p.vppInfo
}
// IsPluginLoaded returns true if plugin is loaded.
func (p *Plugin) IsPluginLoaded(plugin string) bool {
p.infoMu.Lock()
defer p.infoMu.Unlock()
for _, p := range p.vppInfo.Plugins {
if p.Name == plugin {
return true
}
}
return false
}
func (p *Plugin) updateVPPInfo() (err error) {
if p.vppConn == nil {
return fmt.Errorf("VPP connection is nil")
}
p.vppapiChan, err = p.vppConn.NewAPIChannel()
if err != nil {
return err
}
p.binapiVersion, err = binapi.CompatibleVersion(p.vppapiChan)
if err != nil {
return err
}
p.vpeHandler, err = vppcalls.NewHandler(p)
if err != nil {
return errors.New("no compatible VPP handler found")
}
ctx := context.TODO()
version, err := p.vpeHandler.RunCli(ctx, "show version verbose")
if err != nil {
p.Log.Warnf("RunCli error: %v", err)
} else {
p.Log.Debugf("vpp# show version verbose\n%s", version)
}
cmdline, err := p.vpeHandler.RunCli(ctx, "show version cmdline")
if err != nil {
p.Log.Warnf("RunCli error: %v", err)
} else {
out := strings.Replace(cmdline, "\n", "", -1)
p.Log.Debugf("vpp# show version cmdline:\n%s", out)
}
ver, err := p.vpeHandler.GetVersion(ctx)
if err != nil {
return err
}
session, err := p.vpeHandler.GetSession(ctx)
if err != nil {
return err
}
p.Log.WithFields(logging.Fields{
"PID": session.PID,
"ClientID": session.ClientIdx,
}).Infof("VPP version: %v", ver.Version)
modules, err := p.vpeHandler.GetModules(ctx)
if err != nil {
return err
}
p.Log.Debugf("VPP has %d core modules: %v", len(modules), modules)
plugins, err := p.vpeHandler.GetPlugins(ctx)
if err != nil {
return err
}
sort.Slice(plugins, func(i, j int) bool { return plugins[i].Name < plugins[j].Name })
p.Log.Debugf("VPP loaded %d plugins", len(plugins))
for _, plugin := range plugins {
p.Log.Debugf(" - plugin: %v", plugin)
}
p.infoMu.Lock()
p.vppInfo = VPPInfo{
Connected: true,
VersionInfo: *ver,
SessionInfo: *session,
Plugins: plugins,
}
p.infoMu.Unlock()
p.Log.Debugf("found %d registered VPP handlers", len(vpp.GetHandlers()))
for name, handler := range vpp.GetHandlers() {
versions := handler.Versions()
p.Log.Debugf("- handler: %-10s has %d versions: %v", name, len(versions), versions)
}
return nil
}
// handleVPPConnectionEvents handles VPP connection events.
func (p *Plugin) handleVPPConnectionEvents(ctx context.Context) {
defer p.wg.Done()
for {
select {
case event, ok := <-p.vppConChan:
if !ok {
p.lastConnErr = errors.Errorf("VPP connection state channel closed")
p.StatusCheck.ReportStateChange(p.PluginName, statuscheck.Error, p.lastConnErr)
return
}
if event.State == govpp.Connected {
if err := p.updateVPPInfo(); err != nil {
p.Log.Errorf("updating VPP info failed: %v", err)
}
if p.config.ReconnectResync && p.lastConnErr != nil {
p.Log.Info("Starting resync after VPP reconnect")
if p.Resync != nil {
p.Resync.DoResync()
p.lastConnErr = nil
} else {
p.Log.Warn("Expected resync after VPP reconnect could not start because of missing Resync plugin")
}
}
p.StatusCheck.ReportStateChange(p.PluginName, statuscheck.OK, nil)
} else if event.State == govpp.Failed || event.State == govpp.Disconnected {
p.infoMu.Lock()
p.vppInfo.Connected = false
p.infoMu.Unlock()
p.lastConnErr = errors.Errorf("VPP connection lost (event: %+v)", event)
p.StatusCheck.ReportStateChange(p.PluginName, statuscheck.Error, p.lastConnErr)
} else {
p.Log.Debugf("VPP connection state: %+v", event)
}
p.infoMu.Lock()
p.lastEvent = event
p.infoMu.Unlock()
case <-ctx.Done():
return
}
}
}
func (p *Plugin) startProxy(vppapi adapter.VppAPI, statsapi adapter.StatsAPI) (err error) {
p.Log.Infof("starting VPP proxy")
p.proxy, err = proxy.NewServer()
if err != nil {
return errors.WithMessage(err, "creating proxy failed")
}
if err = p.proxy.ConnectBinapi(vppapi); err != nil {
return errors.WithMessage(err, "connecting binapi for proxy failed")
}
if err = p.proxy.ConnectStats(statsapi); err != nil {
return errors.WithMessage(err, "connecting stats for proxy failed")
}
return nil
}
|
[
"\"GOVPPMUX_NOSOCK\""
] |
[] |
[
"GOVPPMUX_NOSOCK"
] |
[]
|
["GOVPPMUX_NOSOCK"]
|
go
| 1 | 0 | |
vkquick/bases/json_parser.py
|
from __future__ import annotations
import abc
import typing as ty
class JSONParser(abc.ABC):
"""
Неймспейс, объединяющий методы сериализации и десериализации
JSON в один протокол. Имплементации используются для
декодирования/кодированния JSON ответов от вк.
Имплементации некоторых из JSON-библиотек можно
найти в [json_parsers.py](../json_parsers.py)
"""
@staticmethod
@abc.abstractmethod
def dumps(data: ty.Dict[str, ty.Any]) -> ty.Union[str, bytes]:
"""
Метод, сериализующий JSON в строку
Args:
data: Сериализуемое значение (передаются только словари)
Returns:
JSON-строку
"""
@staticmethod
@abc.abstractmethod
def loads(string: ty.Union[str, bytes]) -> ty.Dict[str, ty.Any]:
"""
Метод, сериализующий JSON из строки
Args:
string: JSON-строка
Returns:
Словарь, который был объектом в строке
"""
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
mesonbuild/programs.py
|
# Copyright 2013-2020 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Representations and logic for External and Internal Programs."""
import functools
import os
import shutil
import stat
import sys
import re
import typing as T
from pathlib import Path
from . import mesonlib
from . import mlog
from .mesonlib import MachineChoice
if T.TYPE_CHECKING:
from .environment import Environment
from .interpreter import Interpreter
class ExternalProgram(mesonlib.HoldableObject):
"""A program that is found on the system."""
windows_exts = ('exe', 'msc', 'com', 'bat', 'cmd')
for_machine = MachineChoice.BUILD
def __init__(self, name: str, command: T.Optional[T.List[str]] = None,
silent: bool = False, search_dir: T.Optional[str] = None,
extra_search_dirs: T.Optional[T.List[str]] = None):
self.name = name
self.path: T.Optional[str] = None
self.cached_version: T.Optional[str] = None
if command is not None:
self.command = mesonlib.listify(command)
if mesonlib.is_windows():
cmd = self.command[0]
args = self.command[1:]
# Check whether the specified cmd is a path to a script, in
# which case we need to insert the interpreter. If not, try to
# use it as-is.
ret = self._shebang_to_cmd(cmd)
if ret:
self.command = ret + args
else:
self.command = [cmd] + args
else:
all_search_dirs = [search_dir]
if extra_search_dirs:
all_search_dirs += extra_search_dirs
for d in all_search_dirs:
self.command = self._search(name, d)
if self.found():
break
if self.found():
# Set path to be the last item that is actually a file (in order to
# skip options in something like ['python', '-u', 'file.py']. If we
# can't find any components, default to the last component of the path.
for arg in reversed(self.command):
if arg is not None and os.path.isfile(arg):
self.path = arg
break
else:
self.path = self.command[-1]
if not silent:
# ignore the warning because derived classes never call this __init__
# method, and thus only the found() method of this class is ever executed
if self.found(): # lgtm [py/init-calls-subclass]
mlog.log('Program', mlog.bold(name), 'found:', mlog.green('YES'),
'(%s)' % ' '.join(self.command))
else:
mlog.log('Program', mlog.bold(name), 'found:', mlog.red('NO'))
def summary_value(self) -> T.Union[str, mlog.AnsiDecorator]:
if not self.found():
return mlog.red('NO')
return self.path
def __repr__(self) -> str:
r = '<{} {!r} -> {!r}>'
return r.format(self.__class__.__name__, self.name, self.command)
def description(self) -> str:
'''Human friendly description of the command'''
return ' '.join(self.command)
def get_version(self, interpreter: 'Interpreter') -> str:
if not self.cached_version:
raw_cmd = self.get_command() + ['--version']
cmd: T.List[T.Union[str, ExternalProgram]] = [self, '--version']
res = interpreter.run_command_impl(interpreter.current_node, cmd, {}, True)
if res.returncode != 0:
m = 'Running {!r} failed'
raise mesonlib.MesonException(m.format(raw_cmd))
output = res.stdout.strip()
if not output:
output = res.stderr.strip()
match = re.search(r'([0-9][0-9\.]+)', output)
if not match:
m = 'Could not find a version number in output of {!r}'
raise mesonlib.MesonException(m.format(raw_cmd))
self.cached_version = match.group(1)
return self.cached_version
@classmethod
def from_bin_list(cls, env: 'Environment', for_machine: MachineChoice, name: str) -> 'ExternalProgram':
# There is a static `for_machine` for this class because the binary
# aways runs on the build platform. (It's host platform is our build
# platform.) But some external programs have a target platform, so this
# is what we are specifying here.
command = env.lookup_binary_entry(for_machine, name)
if command is None:
return NonExistingExternalProgram()
return cls.from_entry(name, command)
@staticmethod
@functools.lru_cache(maxsize=None)
def _windows_sanitize_path(path: str) -> str:
# Ensure that we use USERPROFILE even when inside MSYS, MSYS2, Cygwin, etc.
if 'USERPROFILE' not in os.environ:
return path
# The WindowsApps directory is a bit of a problem. It contains
# some zero-sized .exe files which have "reparse points", that
# might either launch an installed application, or might open
# a page in the Windows Store to download the application.
#
# To handle the case where the python interpreter we're
# running on came from the Windows Store, if we see the
# WindowsApps path in the search path, replace it with
# dirname(sys.executable).
appstore_dir = Path(os.environ['USERPROFILE']) / 'AppData' / 'Local' / 'Microsoft' / 'WindowsApps'
paths = []
for each in path.split(os.pathsep):
if Path(each) != appstore_dir:
paths.append(each)
elif 'WindowsApps' in sys.executable:
paths.append(os.path.dirname(sys.executable))
return os.pathsep.join(paths)
@staticmethod
def from_entry(name: str, command: T.Union[str, T.List[str]]) -> 'ExternalProgram':
if isinstance(command, list):
if len(command) == 1:
command = command[0]
# We cannot do any searching if the command is a list, and we don't
# need to search if the path is an absolute path.
if isinstance(command, list) or os.path.isabs(command):
if isinstance(command, str):
command = [command]
return ExternalProgram(name, command=command, silent=True)
assert isinstance(command, str)
# Search for the command using the specified string!
return ExternalProgram(command, silent=True)
@staticmethod
def _shebang_to_cmd(script: str) -> T.Optional[T.List[str]]:
"""
Check if the file has a shebang and manually parse it to figure out
the interpreter to use. This is useful if the script is not executable
or if we're on Windows (which does not understand shebangs).
"""
try:
with open(script, encoding='utf-8') as f:
first_line = f.readline().strip()
if first_line.startswith('#!'):
# In a shebang, everything before the first space is assumed to
# be the command to run and everything after the first space is
# the single argument to pass to that command. So we must split
# exactly once.
commands = first_line[2:].split('#')[0].strip().split(maxsplit=1)
if mesonlib.is_windows():
# Windows does not have UNIX paths so remove them,
# but don't remove Windows paths
if commands[0].startswith('/'):
commands[0] = commands[0].split('/')[-1]
if len(commands) > 0 and commands[0] == 'env':
commands = commands[1:]
# Windows does not ship python3.exe, but we know the path to it
if len(commands) > 0 and commands[0] == 'python3':
commands = mesonlib.python_command + commands[1:]
elif mesonlib.is_haiku():
# Haiku does not have /usr, but a lot of scripts assume that
# /usr/bin/env always exists. Detect that case and run the
# script with the interpreter after it.
if commands[0] == '/usr/bin/env':
commands = commands[1:]
# We know what python3 is, we're running on it
if len(commands) > 0 and commands[0] == 'python3':
commands = mesonlib.python_command + commands[1:]
else:
# Replace python3 with the actual python3 that we are using
if commands[0] == '/usr/bin/env' and commands[1] == 'python3':
commands = mesonlib.python_command + commands[2:]
elif commands[0].split('/')[-1] == 'python3':
commands = mesonlib.python_command + commands[1:]
return commands + [script]
except Exception as e:
mlog.debug(str(e))
mlog.debug(f'Unusable script {script!r}')
return None
def _is_executable(self, path: str) -> bool:
suffix = os.path.splitext(path)[-1].lower()[1:]
execmask = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
if mesonlib.is_windows():
if suffix in self.windows_exts:
return True
elif os.stat(path).st_mode & execmask:
return not os.path.isdir(path)
return False
def _search_dir(self, name: str, search_dir: T.Optional[str]) -> T.Optional[list]:
if search_dir is None:
return None
trial = os.path.join(search_dir, name)
if os.path.exists(trial):
if self._is_executable(trial):
return [trial]
# Now getting desperate. Maybe it is a script file that is
# a) not chmodded executable, or
# b) we are on windows so they can't be directly executed.
return self._shebang_to_cmd(trial)
else:
if mesonlib.is_windows():
for ext in self.windows_exts:
trial_ext = f'{trial}.{ext}'
if os.path.exists(trial_ext):
return [trial_ext]
return None
def _search_windows_special_cases(self, name: str, command: str) -> T.List[T.Optional[str]]:
'''
Lots of weird Windows quirks:
1. PATH search for @name returns files with extensions from PATHEXT,
but only self.windows_exts are executable without an interpreter.
2. @name might be an absolute path to an executable, but without the
extension. This works inside MinGW so people use it a lot.
3. The script is specified without an extension, in which case we have
to manually search in PATH.
4. More special-casing for the shebang inside the script.
'''
if command:
# On Windows, even if the PATH search returned a full path, we can't be
# sure that it can be run directly if it's not a native executable.
# For instance, interpreted scripts sometimes need to be run explicitly
# with an interpreter if the file association is not done properly.
name_ext = os.path.splitext(command)[1]
if name_ext[1:].lower() in self.windows_exts:
# Good, it can be directly executed
return [command]
# Try to extract the interpreter from the shebang
commands = self._shebang_to_cmd(command)
if commands:
return commands
return [None]
# Maybe the name is an absolute path to a native Windows
# executable, but without the extension. This is technically wrong,
# but many people do it because it works in the MinGW shell.
if os.path.isabs(name):
for ext in self.windows_exts:
command = f'{name}.{ext}'
if os.path.exists(command):
return [command]
# On Windows, interpreted scripts must have an extension otherwise they
# cannot be found by a standard PATH search. So we do a custom search
# where we manually search for a script with a shebang in PATH.
search_dirs = self._windows_sanitize_path(os.environ.get('PATH', '')).split(';')
for search_dir in search_dirs:
commands = self._search_dir(name, search_dir)
if commands:
return commands
return [None]
def _search(self, name: str, search_dir: T.Optional[str]) -> T.List[T.Optional[str]]:
'''
Search in the specified dir for the specified executable by name
and if not found search in PATH
'''
commands = self._search_dir(name, search_dir)
if commands:
return commands
# Do a standard search in PATH
path = os.environ.get('PATH', None)
if mesonlib.is_windows() and path:
path = self._windows_sanitize_path(path)
command = shutil.which(name, path=path)
if mesonlib.is_windows():
return self._search_windows_special_cases(name, command)
# On UNIX-like platforms, shutil.which() is enough to find
# all executables whether in PATH or with an absolute path
return [command]
def found(self) -> bool:
return self.command[0] is not None
def get_command(self) -> T.List[str]:
return self.command[:]
def get_path(self) -> T.Optional[str]:
return self.path
def get_name(self) -> str:
return self.name
class NonExistingExternalProgram(ExternalProgram): # lgtm [py/missing-call-to-init]
"A program that will never exist"
def __init__(self, name: str = 'nonexistingprogram') -> None:
self.name = name
self.command = [None]
self.path = None
def __repr__(self) -> str:
r = '<{} {!r} -> {!r}>'
return r.format(self.__class__.__name__, self.name, self.command)
def found(self) -> bool:
return False
class EmptyExternalProgram(ExternalProgram): # lgtm [py/missing-call-to-init]
'''
A program object that returns an empty list of commands. Used for cases
such as a cross file exe_wrapper to represent that it's not required.
'''
def __init__(self) -> None:
self.name = None
self.command = []
self.path = None
def __repr__(self) -> str:
r = '<{} {!r} -> {!r}>'
return r.format(self.__class__.__name__, self.name, self.command)
def found(self) -> bool:
return True
class OverrideProgram(ExternalProgram):
"""A script overriding a program."""
def find_external_program(env: 'Environment', for_machine: MachineChoice, name: str,
display_name: str, default_names: T.List[str],
allow_default_for_cross: bool = True) -> T.Generator['ExternalProgram', None, None]:
"""Find an external program, chcking the cross file plus any default options."""
# Lookup in cross or machine file.
potential_cmd = env.lookup_binary_entry(for_machine, name)
if potential_cmd is not None:
mlog.debug(f'{display_name} binary for {for_machine} specified from cross file, native file, '
f'or env var as {potential_cmd}')
yield ExternalProgram.from_entry(name, potential_cmd)
# We never fallback if the user-specified option is no good, so
# stop returning options.
return
mlog.debug(f'{display_name} binary missing from cross or native file, or env var undefined.')
# Fallback on hard-coded defaults, if a default binary is allowed for use
# with cross targets, or if this is not a cross target
if allow_default_for_cross or not (for_machine is MachineChoice.HOST and env.is_cross_build(for_machine)):
for potential_path in default_names:
mlog.debug(f'Trying a default {display_name} fallback at', potential_path)
yield ExternalProgram(potential_path, silent=True)
else:
mlog.debug('Default target is not allowed for cross use')
|
[] |
[] |
[
"PATH",
"USERPROFILE"
] |
[]
|
["PATH", "USERPROFILE"]
|
python
| 2 | 0 | |
examples/lammps/melting/lammps-4nodes.py
|
# a small 4-node example
# input file
infile = 'in.melt'
# --- include the following 4 lines each time ---
import networkx as nx
import os
import imp
wf = imp.load_source('workflow', os.environ['DECAF_PREFIX'] + '/python/decaf.py')
# --- set your options here ---
# path to .so module for dataflow callback functions
mod_path = os.environ['DECAF_PREFIX'] + '/examples/lammps/melting/mod_lammps.so'
# define workflow graph
# 4-node workflow
#
# print (1 proc)
# /
# lammps (4 procs)
# \
# print2 (1 proc) - print (1 proc)
#
# entire workflow takes 10 procs (1 link proc between each producer consumer pair)
#
# --- Graph definition ---
lammps = wf.Node("lammps", start_proc=0, nprocs=4, func='lammps', cmdline='./lammps')
outPort0 = lammps.addOutputPort("out")
print1 = wf.Node("print1", start_proc=5, nprocs=1, func='print', cmdline='./lammps')
inPort1 = print1.addInputPort("in")
print2 = wf.Node("print2", start_proc=7, nprocs=1, func='print2', cmdline='./lammps')
inPort2 = print2.addInputPort("in")
outPort2 = print2.addOutputPort("out")
print3 = wf.Node("print3", start_proc=9, nprocs=1, func='print', cmdline='./lammps')
inPort3 = print3.addInputPort("in")
link1 = wf.Edge(lammps.getOutputPort("out"), print1.getInputPort("in"), start_proc=4, nprocs=1, func='dflow',
path=mod_path, prod_dflow_redist='count', dflow_con_redist='count', cmdline='./lammps')
link2 = wf.Edge(lammps.getOutputPort("out"), print2.getInputPort("in"), start_proc=6, nprocs=1, func='dflow',
path=mod_path, prod_dflow_redist='count', dflow_con_redist='count', cmdline='./lammps')
link3 = wf.Edge(print2.getOutputPort("out"), print3.getInputPort("in"), start_proc=8, nprocs=1, func='dflow',
path=mod_path, prod_dflow_redist='count', dflow_con_redist='count', cmdline='./lammps')
# --- convert the nx graph into a workflow data structure and run the workflow ---
wf.processGraph("lammps",infile)
|
[] |
[] |
[
"DECAF_PREFIX"
] |
[]
|
["DECAF_PREFIX"]
|
python
| 1 | 0 | |
test/functional/test_framework/util.py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Helpful routines for regression testing."""
from base64 import b64encode
from binascii import unhexlify
from decimal import Decimal, ROUND_DOWN
from subprocess import CalledProcessError
import inspect
import json
import logging
import os
import random
import re
import time
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
from io import BytesIO
logger = logging.getLogger("TestFramework.utils")
# Assert functions
##################
def assert_approx(v, vexp, vspan=0.00001):
"""Assert that `v` is within `vspan` of `vexp`"""
if v < vexp - vspan:
raise AssertionError("%s < [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
if v > vexp + vspan:
raise AssertionError("%s > [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = round(tx_size * fee_per_kB / 1000, 8)
if fee < target_fee:
raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)" % (str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 2) * fee_per_kB / 1000:
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError(
"Expected substring not found in error message:\nsubstring: '{}'\nerror message: '{}'.".format(
message, e.error['message']))
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
"""Execute a process and asserts the process return code and output.
Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
and verifies that the return code and output are as expected. Throws AssertionError if
no CalledProcessError was raised or if the return code and output are not as expected.
Args:
returncode (int): the process return code.
output (string): [a substring of] the process output.
fun (function): the function to call. This should execute a process.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was raised or if the error code/message are not as expected.
Args:
code (int), optional: the error code returned by the RPC call (defined
in src/rpc/protocol.h). Set to None if checking the error code is not required.
message (string), optional: [a substring of] the error string returned by the
RPC call. Set to None if checking the error string is not required.
fun (function): the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
"""Tries to run an rpc command.
Test against error code and message if the rpc fails.
Returns whether a JSONRPCException was raised."""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError(
"Expected substring not found in error message:\nsubstring: '{}'\nerror message: '{}'.".format(
message, e.error['message']))
return True
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
# Utility functions
###################
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.000003")
satoshis = int(json.loads(json.dumps(float(n))) * 1.0e6)
if satoshis != 20000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def EncodeDecimal(o):
if isinstance(o, Decimal):
return str(o)
raise TypeError(repr(o) + " is not JSON serializable")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.000001'), rounding=ROUND_DOWN)
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
time_end = time.time() + timeout
while attempt < attempts and time.time() < time_end:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(0.05)
# Print the cause of the timeout
predicate_source = "''''\n" + inspect.getsource(predicate) + "'''"
logger.error("wait_until() failed. Predicate: {}".format(predicate_source))
if attempt >= attempts:
raise AssertionError("Predicate {} not true after {} attempts".format(predicate_source, attempts))
elif time.time() >= time_end:
raise AssertionError("Predicate {} not true after {} seconds".format(predicate_source, timeout))
raise RuntimeError('Unreachable')
# RPC/P2P connection constants and functions
############################################
# The maximum number of nodes a single test can spawn
MAX_NODES = 12
# Don't assign rpc or p2p ports lower than this
PORT_MIN = int(os.getenv('TEST_RUNNER_PORT_MIN', default=11000))
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
def get_rpc_proxy(url, node_number, *, timeout=None, coveragedir=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
coveragedir (str): Directory
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
coveragedir, node_number) if coveragedir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert n <= MAX_NODES
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_url(datadir, i, chain, rpchost):
rpc_u, rpc_p = get_auth_cookie(datadir, chain)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
# Node functions
################
def initialize_datadir(dirname, n, chain):
datadir = get_datadir_path(dirname, n)
if not os.path.isdir(datadir):
os.makedirs(datadir)
# Translate chain name to config name
if chain == 'testnet3':
chain_name_conf_arg = 'testnet'
chain_name_conf_section = 'test'
else:
chain_name_conf_arg = chain
chain_name_conf_section = chain
with open(os.path.join(datadir, "slimcoin.conf"), 'w', encoding='utf8') as f:
f.write("{}=1\n".format(chain_name_conf_arg))
f.write("[{}]\n".format(chain_name_conf_section))
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("fallbackfee=0.0002\n")
f.write("server=1\n")
f.write("keypool=1\n")
f.write("discover=0\n")
f.write("dnsseed=0\n")
f.write("listenonion=0\n")
f.write("printtoconsole=0\n")
f.write("upnp=0\n")
f.write("shrinkdebugfile=0\n")
os.makedirs(os.path.join(datadir, 'stderr'), exist_ok=True)
os.makedirs(os.path.join(datadir, 'stdout'), exist_ok=True)
return datadir
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def append_config(datadir, options):
with open(os.path.join(datadir, "bitcoin.conf"), 'a', encoding='utf8') as f:
for option in options:
f.write(option + "\n")
def get_auth_cookie(datadir, chain):
user = None
password = None
if os.path.isfile(os.path.join(datadir, "slimcoin.conf")):
with open(os.path.join(datadir, "slimcoin.conf"), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None # Ensure that there is only one rpcuser line
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None # Ensure that there is only one rpcpassword line
password = line.split("=")[1].strip("\n")
try:
with open(os.path.join(datadir, chain, ".cookie"), 'r', encoding="ascii") as f:
userpass = f.read()
split_userpass = userpass.split(':')
user = split_userpass[0]
password = split_userpass[1]
except OSError:
pass
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
# If a cookie file exists in the given datadir, delete it.
def delete_cookie_file(datadir, chain):
if os.path.isfile(os.path.join(datadir, chain, ".cookie")):
logger.debug("Deleting leftover cookie file")
os.remove(os.path.join(datadir, chain, ".cookie"))
def softfork_active(node, key):
"""Return whether a softfork is active."""
return node.getblockchaininfo()['softforks'][key]['active']
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def disconnect_nodes(from_connection, node_num):
for peer_id in [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]:
try:
from_connection.disconnectnode(nodeid=peer_id)
except JSONRPCException as e:
# If this node is disconnected between calculating the peer id
# and issuing the disconnect, don't worry about it.
# This avoids a race condition if we're mass-disconnecting peers.
if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED
raise
# wait to disconnect
wait_until(lambda: [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == [], timeout=5)
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:" + str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
wait_until(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo()))
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
stop_time = time.time() + timeout
while time.time() <= stop_time:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash.count(best_hash[0]) == len(rpc_connections):
return
# Check that each peer has at least one connection
assert (all([len(x.getpeerinfo()) for x in rpc_connections]))
time.sleep(wait)
raise AssertionError("Block sync timed out:{}".format("".join("\n {!r}".format(b) for b in best_hash)))
def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True):
"""
Wait until everybody has the same transactions in their memory
pools
"""
stop_time = time.time() + timeout
while time.time() <= stop_time:
pool = [set(r.getrawmempool()) for r in rpc_connections]
if pool.count(pool[0]) == len(rpc_connections):
if flush_scheduler:
for r in rpc_connections:
r.syncwithvalidationinterfacequeue()
return
# Check that each peer has at least one connection
assert (all([len(x.getpeerinfo()) for x in rpc_connections]))
time.sleep(wait)
raise AssertionError("Mempool sync timed out:{}".format("".join("\n {!r}".format(m) for m in pool)))
# Transaction/Block functions
#############################
def find_output(node, txid, amount, *, blockhash=None):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1, blockhash)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert confirmations_required >= 0
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({"txid": t["txid"], "vout": t["vout"], "address": t["address"]})
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d" % (amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out + fee
change = amount_in - amount
if change > amount * 2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change / 2).quantize(Decimal('0.000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment * random.randint(0, fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount + fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransactionwithwallet(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], 0)
return (txid, signresult["hex"], fee)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
to_generate = int(0.5 * count) + 101
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value / 2)
outputs[addr2] = satoshi_round(send_value / 2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransactionwithwallet(raw_tx)["hex"]
node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert len(utxos) >= count
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for i in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = []
from .messages import CTxOut
txout = CTxOut()
txout.nValue = 0
txout.scriptPubKey = hex_str_to_bytes(script_pubkey)
for k in range(128):
txouts.append(txout)
return txouts
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
from .messages import CTransaction
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = satoshi_round(change)
rawtx = node.createrawtransaction(inputs, outputs)
tx = CTransaction()
tx.deserialize(BytesIO(hex_str_to_bytes(rawtx)))
for txout in txouts:
tx.vout.append(txout)
newtx = tx.serialize().hex()
signresult = node.signrawtransactionwithwallet(newtx, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], 0)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
def find_vout_for_address(node, txid, addr):
"""
Locate the vout index of the given transaction sending to the
given address. Raises runtime error exception if not found.
"""
tx = node.getrawtransaction(txid, True)
for i in range(len(tx["vout"])):
if any([addr == a for a in tx["vout"][i]["scriptPubKey"]["addresses"]]):
return i
raise RuntimeError("Vout not found for address: txid=%s, addr=%s" % (txid, addr))
|
[] |
[] |
[
"TEST_RUNNER_PORT_MIN"
] |
[]
|
["TEST_RUNNER_PORT_MIN"]
|
python
| 1 | 0 | |
comptests/docker.go
|
package comptests
import (
"log"
"net"
"os"
"os/exec"
"time"
"github.com/ONSdigital/dp-find-insights-poc-api/pkg/database"
"gorm.io/driver/postgres"
"gorm.io/gorm"
)
const DefaultDSN = "postgres://insights:insights@localhost:54322/censustest"
const DefaultPostgresPW = "mylocalsecret"
func SetupDockerDB(dsn string) {
log.SetFlags(log.LstdFlags | log.Lshortfile)
_, _, host, port, _ := database.ParseDSN(dsn)
user := "postgres"
db := "postgres"
// get password from env, or fall back to default
var pw string
envPW := os.Getenv("POSTGRES_PASSWORD")
if envPW != "" {
pw = envPW
} else {
pw = DefaultPostgresPW
os.Setenv("POSTGRES_PASSWORD", DefaultPostgresPW)
}
dsn = database.CreatDSN(user, pw, host, port, db)
// is docker postgres+postgis running?
_, err := net.DialTimeout("tcp", net.JoinHostPort("localhost", port), time.Second)
if err != nil {
log.Println("starting postgres docker")
go func() {
cmd := exec.Command("docker", "run", "--rm", "--name", "postgis", "--publish", port+":5432", "-e", "POSTGRES_PASSWORD="+pw, "postgis/postgis")
if err := cmd.Run(); err != nil {
log.Fatalf("is docker installed and running? %v", err)
}
}()
// poll for start up
for {
time.Sleep(time.Second)
_, err := gorm.Open(postgres.Open(dsn), &gorm.Config{})
if err == nil {
log.Printf("connected to %s", dsn)
break
}
log.Println("polling for started docker...")
}
}
log.Println("postgres docker running")
}
func KillDockerDB() {
cmd := exec.Command("docker", "container", "kill", "postgis")
if err := cmd.Run(); err != nil {
log.Print(err)
}
log.Fatal("exiting")
}
|
[
"\"POSTGRES_PASSWORD\""
] |
[] |
[
"POSTGRES_PASSWORD"
] |
[]
|
["POSTGRES_PASSWORD"]
|
go
| 1 | 0 | |
airflow/executors/celery_executor.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""CeleryExecutor
.. seealso::
For more information on how the CeleryExecutor works, take a look at the guide:
:ref:`executor:CeleryExecutor`
"""
import datetime
import logging
import math
import operator
import os
import subprocess
import time
import traceback
from collections import OrderedDict
from multiprocessing import Pool, cpu_count
from typing import Any, Dict, List, Mapping, MutableMapping, Optional, Set, Tuple, Union
from celery import Celery, Task, states as celery_states
from celery.backends.base import BaseKeyValueStoreBackend
from celery.backends.database import DatabaseBackend, session_cleanup
from celery.result import AsyncResult
from celery.signals import import_modules as celery_import_modules
from setproctitle import setproctitle # pylint: disable=no-name-in-module
import airflow.settings as settings
from airflow.config_templates.default_celery import DEFAULT_CELERY_CONFIG
from airflow.configuration import conf
from airflow.exceptions import AirflowException, AirflowTaskTimeout
from airflow.executors.base_executor import BaseExecutor, CommandType, EventBufferValueType
from airflow.models.taskinstance import SimpleTaskInstance, TaskInstance, TaskInstanceKey
from airflow.stats import Stats
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.net import get_hostname
from airflow.utils.state import State
from airflow.utils.timeout import timeout
from airflow.utils.timezone import utcnow
log = logging.getLogger(__name__)
# Make it constant for unit test.
CELERY_FETCH_ERR_MSG_HEADER = 'Error fetching Celery task state'
CELERY_SEND_ERR_MSG_HEADER = 'Error sending Celery task'
OPERATION_TIMEOUT = conf.getfloat('celery', 'operation_timeout', fallback=1.0)
'''
To start the celery worker, run the command:
airflow celery worker
'''
if conf.has_option('celery', 'celery_config_options'):
celery_configuration = conf.getimport('celery', 'celery_config_options')
else:
celery_configuration = DEFAULT_CELERY_CONFIG
app = Celery(conf.get('celery', 'CELERY_APP_NAME'), config_source=celery_configuration)
@app.task
def execute_command(command_to_exec: CommandType) -> None:
"""Executes command."""
BaseExecutor.validate_command(command_to_exec)
log.info("Executing command in Celery: %s", command_to_exec)
if settings.EXECUTE_TASKS_NEW_PYTHON_INTERPRETER:
_execute_in_subprocess(command_to_exec)
else:
_execute_in_fork(command_to_exec)
def _execute_in_fork(command_to_exec: CommandType) -> None:
pid = os.fork()
if pid:
# In parent, wait for the child
pid, ret = os.waitpid(pid, 0)
if ret == 0:
return
raise AirflowException('Celery command failed on host: ' + get_hostname())
from airflow.sentry import Sentry
ret = 1
try:
from airflow.cli.cli_parser import get_parser
settings.engine.pool.dispose()
settings.engine.dispose()
parser = get_parser()
# [1:] - remove "airflow" from the start of the command
args = parser.parse_args(command_to_exec[1:])
args.shut_down_logging = False
setproctitle(f"airflow task supervisor: {command_to_exec}")
args.func(args)
ret = 0
except Exception as e: # pylint: disable=broad-except
log.error("Failed to execute task %s.", str(e))
ret = 1
finally:
Sentry.flush()
logging.shutdown()
os._exit(ret) # pylint: disable=protected-access
def _execute_in_subprocess(command_to_exec: CommandType) -> None:
env = os.environ.copy()
try:
# pylint: disable=unexpected-keyword-arg
subprocess.check_output(command_to_exec, stderr=subprocess.STDOUT, close_fds=True, env=env)
# pylint: disable=unexpected-keyword-arg
except subprocess.CalledProcessError as e:
log.exception('execute_command encountered a CalledProcessError')
log.error(e.output)
msg = 'Celery command failed on host: ' + get_hostname()
raise AirflowException(msg)
class ExceptionWithTraceback:
"""
Wrapper class used to propagate exceptions to parent processes from subprocesses.
:param exception: The exception to wrap
:type exception: Exception
:param exception_traceback: The stacktrace to wrap
:type exception_traceback: str
"""
def __init__(self, exception: Exception, exception_traceback: str):
self.exception = exception
self.traceback = exception_traceback
# Task instance that is sent over Celery queues
# TaskInstanceKey, SimpleTaskInstance, Command, queue_name, CallableTask
TaskInstanceInCelery = Tuple[TaskInstanceKey, SimpleTaskInstance, CommandType, Optional[str], Task]
def send_task_to_executor(
task_tuple: TaskInstanceInCelery,
) -> Tuple[TaskInstanceKey, CommandType, Union[AsyncResult, ExceptionWithTraceback]]:
"""Sends task to executor."""
key, _, command, queue, task_to_run = task_tuple
try:
with timeout(seconds=OPERATION_TIMEOUT):
result = task_to_run.apply_async(args=[command], queue=queue)
except Exception as e: # pylint: disable=broad-except
exception_traceback = f"Celery Task ID: {key}\n{traceback.format_exc()}"
result = ExceptionWithTraceback(e, exception_traceback)
return key, command, result
# pylint: disable=unused-import
@celery_import_modules.connect
def on_celery_import_modules(*args, **kwargs):
"""
Preload some "expensive" airflow modules so that every task process doesn't have to import it again and
again.
Loading these for each task adds 0.3-0.5s *per task* before the task can run. For long running tasks this
doesn't matter, but for short tasks this starts to be a noticeable impact.
"""
import jinja2.ext # noqa: F401
import numpy # noqa: F401
import airflow.jobs.local_task_job
import airflow.macros
import airflow.operators.bash
import airflow.operators.python
import airflow.operators.subdag # noqa: F401
try:
import kubernetes.client # noqa: F401
except ImportError:
pass
# pylint: enable=unused-import
class CeleryExecutor(BaseExecutor):
"""
CeleryExecutor is recommended for production use of Airflow. It allows
distributing the execution of task instances to multiple worker nodes.
Celery is a simple, flexible and reliable distributed system to process
vast amounts of messages, while providing operations with the tools
required to maintain such a system.
"""
def __init__(self):
super().__init__()
# Celery doesn't support bulk sending the tasks (which can become a bottleneck on bigger clusters)
# so we use a multiprocessing pool to speed this up.
# How many worker processes are created for checking celery task state.
self._sync_parallelism = conf.getint('celery', 'SYNC_PARALLELISM')
if self._sync_parallelism == 0:
self._sync_parallelism = max(1, cpu_count() - 1)
self.bulk_state_fetcher = BulkStateFetcher(self._sync_parallelism)
self.tasks = {}
# Mapping of tasks we've adopted, ordered by the earliest date they timeout
self.adopted_task_timeouts: Dict[TaskInstanceKey, datetime.datetime] = OrderedDict()
self.task_adoption_timeout = datetime.timedelta(
seconds=conf.getint('celery', 'task_adoption_timeout', fallback=600)
)
self.task_publish_retries: Dict[TaskInstanceKey, int] = OrderedDict()
self.task_publish_max_retries = conf.getint('celery', 'task_publish_max_retries', fallback=3)
def start(self) -> None:
self.log.debug('Starting Celery Executor using %s processes for syncing', self._sync_parallelism)
def _num_tasks_per_send_process(self, to_send_count: int) -> int:
"""
How many Celery tasks should each worker process send.
:return: Number of tasks that should be sent per process
:rtype: int
"""
return max(1, int(math.ceil(1.0 * to_send_count / self._sync_parallelism)))
def trigger_tasks(self, open_slots: int) -> None:
"""
Overwrite trigger_tasks function from BaseExecutor
:param open_slots: Number of open slots
:return:
"""
sorted_queue = self.order_queued_tasks_by_priority()
task_tuples_to_send: List[TaskInstanceInCelery] = []
for _ in range(min(open_slots, len(self.queued_tasks))):
key, (command, _, queue, simple_ti) = sorted_queue.pop(0)
task_tuple = (key, simple_ti, command, queue, execute_command)
task_tuples_to_send.append(task_tuple)
if key not in self.task_publish_retries:
self.task_publish_retries[key] = 1
if task_tuples_to_send:
self._process_tasks(task_tuples_to_send)
def _process_tasks(self, task_tuples_to_send: List[TaskInstanceInCelery]) -> None:
first_task = next(t[4] for t in task_tuples_to_send)
# Celery state queries will stuck if we do not use one same backend
# for all tasks.
cached_celery_backend = first_task.backend
key_and_async_results = self._send_tasks_to_celery(task_tuples_to_send)
self.log.debug('Sent all tasks.')
for key, _, result in key_and_async_results:
if isinstance(result, ExceptionWithTraceback) and isinstance(
result.exception, AirflowTaskTimeout
):
if key in self.task_publish_retries and (
self.task_publish_retries.get(key) <= self.task_publish_max_retries
):
Stats.incr("celery.task_timeout_error")
self.log.info(
"[Try %s of %s] Task Timeout Error for Task: (%s).",
self.task_publish_retries[key],
self.task_publish_max_retries,
key,
)
self.task_publish_retries[key] += 1
continue
self.queued_tasks.pop(key)
self.task_publish_retries.pop(key)
if isinstance(result, ExceptionWithTraceback):
self.log.error( # pylint: disable=logging-not-lazy
CELERY_SEND_ERR_MSG_HEADER + ": %s\n%s\n", result.exception, result.traceback
)
self.event_buffer[key] = (State.FAILED, None)
elif result is not None:
result.backend = cached_celery_backend
self.running.add(key)
self.tasks[key] = result
# Store the Celery task_id in the event buffer. This will get "overwritten" if the task
# has another event, but that is fine, because the only other events are success/failed at
# which point we don't need the ID anymore anyway
self.event_buffer[key] = (State.QUEUED, result.task_id)
# If the task runs _really quickly_ we may already have a result!
self.update_task_state(key, result.state, getattr(result, 'info', None))
def _send_tasks_to_celery(self, task_tuples_to_send: List[TaskInstanceInCelery]):
if len(task_tuples_to_send) == 1 or self._sync_parallelism == 1:
# One tuple, or max one process -> send it in the main thread.
return list(map(send_task_to_executor, task_tuples_to_send))
# Use chunks instead of a work queue to reduce context switching
# since tasks are roughly uniform in size
chunksize = self._num_tasks_per_send_process(len(task_tuples_to_send))
num_processes = min(len(task_tuples_to_send), self._sync_parallelism)
def reset_signals():
# Since we are run from inside the SchedulerJob, we don't to
# inherit the signal handlers that we registered there.
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGUSR2, signal.SIG_DFL)
with Pool(processes=num_processes, initializer=reset_signals) as send_pool:
key_and_async_results = send_pool.map(
send_task_to_executor, task_tuples_to_send, chunksize=chunksize
)
return key_and_async_results
def sync(self) -> None:
if not self.tasks:
self.log.debug("No task to query celery, skipping sync")
return
self.update_all_task_states()
if self.adopted_task_timeouts:
self._check_for_stalled_adopted_tasks()
def _check_for_stalled_adopted_tasks(self):
"""
See if any of the tasks we adopted from another Executor run have not
progressed after the configured timeout.
If they haven't, they likely never made it to Celery, and we should
just resend them. We do that by clearing the state and letting the
normal scheduler loop deal with that
"""
now = utcnow()
timedout_keys = []
for key, stalled_after in self.adopted_task_timeouts.items():
if stalled_after > now:
# Since items are stored sorted, if we get to a stalled_after
# in the future then we can stop
break
# If the task gets updated to STARTED (which Celery does) or has
# already finished, then it will be removed from this list -- so
# the only time it's still in this list is when it a) never made it
# to celery in the first place (i.e. race condition somewhere in
# the dying executor) or b) a really long celery queue and it just
# hasn't started yet -- better cancel it and let the scheduler
# re-queue rather than have this task risk stalling for ever
timedout_keys.append(key)
if timedout_keys:
self.log.error(
"Adopted tasks were still pending after %s, assuming they never made it to celery and "
"clearing:\n\t%s",
self.task_adoption_timeout,
"\n\t".join([repr(x) for x in timedout_keys]),
)
for key in timedout_keys:
self.event_buffer[key] = (State.FAILED, None)
del self.tasks[key]
del self.adopted_task_timeouts[key]
def debug_dump(self) -> None:
"""Called in response to SIGUSR2 by the scheduler"""
super().debug_dump()
self.log.info(
"executor.tasks (%d)\n\t%s", len(self.tasks), "\n\t".join(map(repr, self.tasks.items()))
)
self.log.info(
"executor.adopted_task_timeouts (%d)\n\t%s",
len(self.adopted_task_timeouts),
"\n\t".join(map(repr, self.adopted_task_timeouts.items())),
)
def update_all_task_states(self) -> None:
"""Updates states of the tasks."""
self.log.debug("Inquiring about %s celery task(s)", len(self.tasks))
state_and_info_by_celery_task_id = self.bulk_state_fetcher.get_many(self.tasks.values())
self.log.debug("Inquiries completed.")
for key, async_result in list(self.tasks.items()):
state, info = state_and_info_by_celery_task_id.get(async_result.task_id)
if state:
self.update_task_state(key, state, info)
def change_state(self, key: TaskInstanceKey, state: str, info=None) -> None:
super().change_state(key, state, info)
self.tasks.pop(key, None)
self.adopted_task_timeouts.pop(key, None)
def update_task_state(self, key: TaskInstanceKey, state: str, info: Any) -> None:
"""Updates state of a single task."""
try:
if state == celery_states.SUCCESS:
self.success(key, info)
elif state in (celery_states.FAILURE, celery_states.REVOKED):
self.fail(key, info)
elif state == celery_states.STARTED:
# It's now actually running, so know it made it to celery okay!
self.adopted_task_timeouts.pop(key, None)
elif state == celery_states.PENDING:
pass
else:
self.log.info("Unexpected state for %s: %s", key, state)
except Exception: # noqa pylint: disable=broad-except
self.log.exception("Error syncing the Celery executor, ignoring it.")
def end(self, synchronous: bool = False) -> None:
if synchronous:
while any(task.state not in celery_states.READY_STATES for task in self.tasks.values()):
time.sleep(5)
self.sync()
def execute_async(
self,
key: TaskInstanceKey,
command: CommandType,
queue: Optional[str] = None,
executor_config: Optional[Any] = None,
):
"""Do not allow async execution for Celery executor."""
raise AirflowException("No Async execution for Celery executor.")
def terminate(self):
pass
def try_adopt_task_instances(self, tis: List[TaskInstance]) -> List[TaskInstance]:
# See which of the TIs are still alive (or have finished even!)
#
# Since Celery doesn't store "SENT" state for queued commands (if we create an AsyncResult with a made
# up id it just returns PENDING state for it), we have to store Celery's task_id against the TI row to
# look at in future.
#
# This process is not perfect -- we could have sent the task to celery, and crashed before we were
# able to record the AsyncResult.task_id in the TaskInstance table, in which case we won't adopt the
# task (it'll either run and update the TI state, or the scheduler will clear and re-queue it. Either
# way it won't get executed more than once)
#
# (If we swapped it around, and generated a task_id for Celery, stored that in TI and enqueued that
# there is also still a race condition where we could generate and store the task_id, but die before
# we managed to enqueue the command. Since neither way is perfect we always have to deal with this
# process not being perfect.)
celery_tasks = {}
not_adopted_tis = []
for ti in tis:
if ti.external_executor_id is not None:
celery_tasks[ti.external_executor_id] = (AsyncResult(ti.external_executor_id), ti)
else:
not_adopted_tis.append(ti)
if not celery_tasks:
# Nothing to adopt
return tis
states_by_celery_task_id = self.bulk_state_fetcher.get_many(
map(operator.itemgetter(0), celery_tasks.values())
)
adopted = []
cached_celery_backend = next(iter(celery_tasks.values()))[0].backend
for celery_task_id, (state, info) in states_by_celery_task_id.items():
result, ti = celery_tasks[celery_task_id]
result.backend = cached_celery_backend
# Set the correct elements of the state dicts, then update this
# like we just queried it.
self.adopted_task_timeouts[ti.key] = ti.queued_dttm + self.task_adoption_timeout
self.tasks[ti.key] = result
self.running.add(ti.key)
self.update_task_state(ti.key, state, info)
adopted.append(f"{ti} in state {state}")
if adopted:
task_instance_str = '\n\t'.join(adopted)
self.log.info(
"Adopted the following %d tasks from a dead executor\n\t%s", len(adopted), task_instance_str
)
return not_adopted_tis
def fetch_celery_task_state(async_result: AsyncResult) -> Tuple[str, Union[str, ExceptionWithTraceback], Any]:
"""
Fetch and return the state of the given celery task. The scope of this function is
global so that it can be called by subprocesses in the pool.
:param async_result: a tuple of the Celery task key and the async Celery object used
to fetch the task's state
:type async_result: tuple(str, celery.result.AsyncResult)
:return: a tuple of the Celery task key and the Celery state and the celery info
of the task
:rtype: tuple[str, str, str]
"""
try:
with timeout(seconds=OPERATION_TIMEOUT):
# Accessing state property of celery task will make actual network request
# to get the current state of the task
info = async_result.info if hasattr(async_result, 'info') else None
return async_result.task_id, async_result.state, info
except Exception as e: # pylint: disable=broad-except
exception_traceback = f"Celery Task ID: {async_result}\n{traceback.format_exc()}"
return async_result.task_id, ExceptionWithTraceback(e, exception_traceback), None
def _tasks_list_to_task_ids(async_tasks) -> Set[str]:
return {a.task_id for a in async_tasks}
class BulkStateFetcher(LoggingMixin):
"""
Gets status for many Celery tasks using the best method available
If BaseKeyValueStoreBackend is used as result backend, the mget method is used.
If DatabaseBackend is used as result backend, the SELECT ...WHERE task_id IN (...) query is used
Otherwise, multiprocessing.Pool will be used. Each task status will be downloaded individually.
"""
def __init__(self, sync_parallelism=None):
super().__init__()
self._sync_parallelism = sync_parallelism
def get_many(self, async_results) -> Mapping[str, EventBufferValueType]:
"""Gets status for many Celery tasks using the best method available."""
if isinstance(app.backend, BaseKeyValueStoreBackend):
result = self._get_many_from_kv_backend(async_results)
return result
if isinstance(app.backend, DatabaseBackend):
result = self._get_many_from_db_backend(async_results)
return result
result = self._get_many_using_multiprocessing(async_results)
self.log.debug("Fetched %d states for %d task", len(result), len(async_results))
return result
def _get_many_from_kv_backend(self, async_tasks) -> Mapping[str, EventBufferValueType]:
task_ids = _tasks_list_to_task_ids(async_tasks)
keys = [app.backend.get_key_for_task(k) for k in task_ids]
values = app.backend.mget(keys)
task_results = [app.backend.decode_result(v) for v in values if v]
task_results_by_task_id = {task_result["task_id"]: task_result for task_result in task_results}
return self._prepare_state_and_info_by_task_dict(task_ids, task_results_by_task_id)
def _get_many_from_db_backend(self, async_tasks) -> Mapping[str, EventBufferValueType]:
task_ids = _tasks_list_to_task_ids(async_tasks)
session = app.backend.ResultSession()
task_cls = app.backend.task_cls
with session_cleanup(session):
tasks = session.query(task_cls).filter(task_cls.task_id.in_(task_ids)).all()
task_results = [app.backend.meta_from_decoded(task.to_dict()) for task in tasks]
task_results_by_task_id = {task_result["task_id"]: task_result for task_result in task_results}
return self._prepare_state_and_info_by_task_dict(task_ids, task_results_by_task_id)
@staticmethod
def _prepare_state_and_info_by_task_dict(
task_ids, task_results_by_task_id
) -> Mapping[str, EventBufferValueType]:
state_info: MutableMapping[str, EventBufferValueType] = {}
for task_id in task_ids:
task_result = task_results_by_task_id.get(task_id)
if task_result:
state = task_result["status"]
info = None if not hasattr(task_result, "info") else task_result["info"]
else:
state = celery_states.PENDING
info = None
state_info[task_id] = state, info
return state_info
def _get_many_using_multiprocessing(self, async_results) -> Mapping[str, EventBufferValueType]:
num_process = min(len(async_results), self._sync_parallelism)
with Pool(processes=num_process) as sync_pool:
chunksize = max(1, math.floor(math.ceil(1.0 * len(async_results) / self._sync_parallelism)))
task_id_to_states_and_info = sync_pool.map(
fetch_celery_task_state, async_results, chunksize=chunksize
)
states_and_info_by_task_id: MutableMapping[str, EventBufferValueType] = {}
for task_id, state_or_exception, info in task_id_to_states_and_info:
if isinstance(state_or_exception, ExceptionWithTraceback):
self.log.error( # pylint: disable=logging-not-lazy
CELERY_FETCH_ERR_MSG_HEADER + ":%s\n%s\n",
state_or_exception.exception,
state_or_exception.traceback,
)
else:
states_and_info_by_task_id[task_id] = state_or_exception, info
return states_and_info_by_task_id
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
flask-sp/flask_sp/blueprints/books.py
|
import os
from urllib.parse import urlparse
from flask import Blueprint, redirect, request, url_for
from flask_sp.saml.auth import AuthenticationManager
blueprint = Blueprint('books', __name__, url_prefix='/books')
IDP_HOSTNAME = os.environ.get('IDP_HOSTNAME', 'idp.hilbertteam.net')
IDP_ENTITYID = os.environ.get('IDP_ENTITYID', 'http://idp.hilbertteam.net/idp/shibboleth')
@blueprint.route('/<book>', methods=('GET',))
def index(book):
auth_manager = AuthenticationManager(IDP_ENTITYID)
authentication_manager = AuthenticationManager()
authenticated = True
while True:
if request.referrer:
parse_result = urlparse(request.referrer)
if IDP_HOSTNAME in parse_result.netloc:
break
user = authentication_manager.finish_authentication()
if user:
break
authenticated = False
break
if authenticated:
return redirect(url_for('static', filename='books/' + book, _external=True))
return redirect(auth_manager.start_authentication(request.url))
|
[] |
[] |
[
"IDP_ENTITYID",
"IDP_HOSTNAME"
] |
[]
|
["IDP_ENTITYID", "IDP_HOSTNAME"]
|
python
| 2 | 0 | |
spring-boot-project/spring-boot-devtools/src/main/java/org/springframework/boot/devtools/env/DevToolsHomePropertiesPostProcessor.java
|
/*
* Copyright 2012-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.devtools.env;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.function.Function;
import java.util.function.Supplier;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.devtools.system.DevToolsEnablementDeducer;
import org.springframework.boot.env.EnvironmentPostProcessor;
import org.springframework.boot.env.PropertiesPropertySourceLoader;
import org.springframework.boot.env.PropertySourceLoader;
import org.springframework.boot.env.YamlPropertySourceLoader;
import org.springframework.core.env.ConfigurableEnvironment;
import org.springframework.core.env.PropertySource;
import org.springframework.core.io.FileSystemResource;
import org.springframework.util.ClassUtils;
import org.springframework.util.StringUtils;
/**
* {@link EnvironmentPostProcessor} to add devtools properties from the user's home
* directory.
*
* @author Phillip Webb
* @author Andy Wilkinson
* @author HaiTao Zhang
* @author Madhura Bhave
* @since 1.3.0
*/
public class DevToolsHomePropertiesPostProcessor implements EnvironmentPostProcessor {
private static final String LEGACY_FILE_NAME = ".spring-boot-devtools.properties";
private static final String[] FILE_NAMES = new String[] { "spring-boot-devtools.yml", "spring-boot-devtools.yaml",
"spring-boot-devtools.properties" };
private static final String CONFIG_PATH = "/.config/spring-boot/";
private static final Set<PropertySourceLoader> PROPERTY_SOURCE_LOADERS;
private final Properties systemProperties;
private final Map<String, String> environmentVariables;
static {
Set<PropertySourceLoader> propertySourceLoaders = new HashSet<>();
propertySourceLoaders.add(new PropertiesPropertySourceLoader());
if (ClassUtils.isPresent("org.yaml.snakeyaml.Yaml", null)) {
propertySourceLoaders.add(new YamlPropertySourceLoader());
}
PROPERTY_SOURCE_LOADERS = Collections.unmodifiableSet(propertySourceLoaders);
}
public DevToolsHomePropertiesPostProcessor() {
this(System.getenv(), System.getProperties());
}
DevToolsHomePropertiesPostProcessor(Map<String, String> environmentVariables, Properties systemProperties) {
this.environmentVariables = environmentVariables;
this.systemProperties = systemProperties;
}
@Override
public void postProcessEnvironment(ConfigurableEnvironment environment, SpringApplication application) {
if (DevToolsEnablementDeducer.shouldEnable(Thread.currentThread())) {
List<PropertySource<?>> propertySources = getPropertySources();
if (propertySources.isEmpty()) {
addPropertySource(propertySources, LEGACY_FILE_NAME, (file) -> "devtools-local");
}
propertySources.forEach(environment.getPropertySources()::addFirst);
}
}
private List<PropertySource<?>> getPropertySources() {
List<PropertySource<?>> propertySources = new ArrayList<>();
for (String fileName : FILE_NAMES) {
addPropertySource(propertySources, CONFIG_PATH + fileName, this::getPropertySourceName);
}
return propertySources;
}
private String getPropertySourceName(File file) {
return "devtools-local: [" + file.toURI() + "]";
}
private void addPropertySource(List<PropertySource<?>> propertySources, String fileName,
Function<File, String> propertySourceNamer) {
File home = getHomeDirectory();
File file = (home != null) ? new File(home, fileName) : null;
FileSystemResource resource = (file != null) ? new FileSystemResource(file) : null;
if (resource != null && resource.exists() && resource.isFile()) {
addPropertySource(propertySources, resource, propertySourceNamer);
}
}
private void addPropertySource(List<PropertySource<?>> propertySources, FileSystemResource resource,
Function<File, String> propertySourceNamer) {
try {
String name = propertySourceNamer.apply(resource.getFile());
for (PropertySourceLoader loader : PROPERTY_SOURCE_LOADERS) {
if (canLoadFileExtension(loader, resource.getFilename())) {
propertySources.addAll(loader.load(name, resource));
}
}
}
catch (IOException ex) {
throw new IllegalStateException("Unable to load " + resource.getFilename(), ex);
}
}
private boolean canLoadFileExtension(PropertySourceLoader loader, String name) {
return Arrays.stream(loader.getFileExtensions())
.anyMatch((fileExtension) -> StringUtils.endsWithIgnoreCase(name, fileExtension));
}
protected File getHomeDirectory() {
return getHomeDirectory(() -> this.environmentVariables.get("SPRING_DEVTOOLS_HOME"),
() -> this.systemProperties.getProperty("spring.devtools.home"),
() -> this.systemProperties.getProperty("user.home"));
}
@SafeVarargs
private final File getHomeDirectory(Supplier<String>... pathSuppliers) {
for (Supplier<String> pathSupplier : pathSuppliers) {
String path = pathSupplier.get();
if (StringUtils.hasText(path)) {
return new File(path);
}
}
return null;
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
Lib/test/test_site.py
|
"""Tests for 'site'.
Tests assume the initial paths in sys.path once the interpreter has begun
executing have not been removed.
"""
import unittest
import test.support
from test.support import captured_stderr, TESTFN, EnvironmentVarGuard
import builtins
import os
import sys
import re
import encodings
import urllib.request
import urllib.error
import subprocess
import sysconfig
from copy import copy
# These tests are not particularly useful if Python was invoked with -S.
# If you add tests that are useful under -S, this skip should be moved
# to the class level.
if sys.flags.no_site:
raise unittest.SkipTest("Python was invoked with -S")
import site
if site.ENABLE_USER_SITE and not os.path.isdir(site.USER_SITE):
# need to add user site directory for tests
os.makedirs(site.USER_SITE)
site.addsitedir(site.USER_SITE)
class HelperFunctionsTests(unittest.TestCase):
"""Tests for helper functions.
"""
def setUp(self):
"""Save a copy of sys.path"""
self.sys_path = sys.path[:]
self.old_base = site.USER_BASE
self.old_site = site.USER_SITE
self.old_prefixes = site.PREFIXES
self.original_vars = sysconfig._CONFIG_VARS
self.old_vars = copy(sysconfig._CONFIG_VARS)
def tearDown(self):
"""Restore sys.path"""
sys.path[:] = self.sys_path
site.USER_BASE = self.old_base
site.USER_SITE = self.old_site
site.PREFIXES = self.old_prefixes
sysconfig._CONFIG_VARS = self.original_vars
sysconfig._CONFIG_VARS.clear()
sysconfig._CONFIG_VARS.update(self.old_vars)
def test_makepath(self):
# Test makepath() have an absolute path for its first return value
# and a case-normalized version of the absolute path for its
# second value.
path_parts = ("Beginning", "End")
original_dir = os.path.join(*path_parts)
abs_dir, norm_dir = site.makepath(*path_parts)
self.assertEqual(os.path.abspath(original_dir), abs_dir)
if original_dir == os.path.normcase(original_dir):
self.assertEqual(abs_dir, norm_dir)
else:
self.assertEqual(os.path.normcase(abs_dir), norm_dir)
def test_init_pathinfo(self):
dir_set = site._init_pathinfo()
for entry in [site.makepath(path)[1] for path in sys.path
if path and os.path.isdir(path)]:
self.assertIn(entry, dir_set,
"%s from sys.path not found in set returned "
"by _init_pathinfo(): %s" % (entry, dir_set))
def pth_file_tests(self, pth_file):
"""Contain common code for testing results of reading a .pth file"""
self.assertIn(pth_file.imported, sys.modules,
"%s not in sys.modules" % pth_file.imported)
self.assertIn(site.makepath(pth_file.good_dir_path)[0], sys.path)
self.assertFalse(os.path.exists(pth_file.bad_dir_path))
def test_addpackage(self):
# Make sure addpackage() imports if the line starts with 'import',
# adds directories to sys.path for any line in the file that is not a
# comment or import that is a valid directory name for where the .pth
# file resides; invalid directories are not added
pth_file = PthFile()
pth_file.cleanup(prep=True) # to make sure that nothing is
# pre-existing that shouldn't be
try:
pth_file.create()
site.addpackage(pth_file.base_dir, pth_file.filename, set())
self.pth_file_tests(pth_file)
finally:
pth_file.cleanup()
def make_pth(self, contents, pth_dir='.', pth_name=TESTFN):
# Create a .pth file and return its (abspath, basename).
pth_dir = os.path.abspath(pth_dir)
pth_basename = pth_name + '.pth'
pth_fn = os.path.join(pth_dir, pth_basename)
pth_file = open(pth_fn, 'w', encoding='utf-8')
self.addCleanup(lambda: os.remove(pth_fn))
pth_file.write(contents)
pth_file.close()
return pth_dir, pth_basename
def test_addpackage_import_bad_syntax(self):
# Issue 10642
pth_dir, pth_fn = self.make_pth("import bad)syntax\n")
with captured_stderr() as err_out:
site.addpackage(pth_dir, pth_fn, set())
self.assertRegex(err_out.getvalue(), "line 1")
self.assertRegex(err_out.getvalue(),
re.escape(os.path.join(pth_dir, pth_fn)))
# XXX: the previous two should be independent checks so that the
# order doesn't matter. The next three could be a single check
# but my regex foo isn't good enough to write it.
self.assertRegex(err_out.getvalue(), 'Traceback')
self.assertRegex(err_out.getvalue(), r'import bad\)syntax')
self.assertRegex(err_out.getvalue(), 'SyntaxError')
def test_addpackage_import_bad_exec(self):
# Issue 10642
pth_dir, pth_fn = self.make_pth("randompath\nimport nosuchmodule\n")
with captured_stderr() as err_out:
site.addpackage(pth_dir, pth_fn, set())
self.assertRegex(err_out.getvalue(), "line 2")
self.assertRegex(err_out.getvalue(),
re.escape(os.path.join(pth_dir, pth_fn)))
# XXX: ditto previous XXX comment.
self.assertRegex(err_out.getvalue(), 'Traceback')
self.assertRegex(err_out.getvalue(), 'ImportError')
@unittest.skipIf(sys.platform == "win32", "Windows does not raise an "
"error for file paths containing null characters")
def test_addpackage_import_bad_pth_file(self):
# Issue 5258
pth_dir, pth_fn = self.make_pth("abc\x00def\n")
with captured_stderr() as err_out:
site.addpackage(pth_dir, pth_fn, set())
self.assertRegex(err_out.getvalue(), "line 1")
self.assertRegex(err_out.getvalue(),
re.escape(os.path.join(pth_dir, pth_fn)))
# XXX: ditto previous XXX comment.
self.assertRegex(err_out.getvalue(), 'Traceback')
self.assertRegex(err_out.getvalue(), 'TypeError')
def test_addsitedir(self):
# Same tests for test_addpackage since addsitedir() essentially just
# calls addpackage() for every .pth file in the directory
pth_file = PthFile()
pth_file.cleanup(prep=True) # Make sure that nothing is pre-existing
# that is tested for
try:
pth_file.create()
site.addsitedir(pth_file.base_dir, set())
self.pth_file_tests(pth_file)
finally:
pth_file.cleanup()
@unittest.skipUnless(site.ENABLE_USER_SITE, "requires access to PEP 370 "
"user-site (site.ENABLE_USER_SITE)")
def test_s_option(self):
usersite = site.USER_SITE
self.assertIn(usersite, sys.path)
env = os.environ.copy()
rc = subprocess.call([sys.executable, '-c',
'import sys; sys.exit(%r in sys.path)' % usersite],
env=env)
self.assertEqual(rc, 1)
env = os.environ.copy()
rc = subprocess.call([sys.executable, '-s', '-c',
'import sys; sys.exit(%r in sys.path)' % usersite],
env=env)
if usersite == site.getsitepackages()[0]:
self.assertEqual(rc, 1)
else:
self.assertEqual(rc, 0)
env = os.environ.copy()
env["PYTHONNOUSERSITE"] = "1"
rc = subprocess.call([sys.executable, '-c',
'import sys; sys.exit(%r in sys.path)' % usersite],
env=env)
if usersite == site.getsitepackages()[0]:
self.assertEqual(rc, 1)
else:
self.assertEqual(rc, 0)
env = os.environ.copy()
env["PYTHONUSERBASE"] = "/tmp"
rc = subprocess.call([sys.executable, '-c',
'import sys, site; sys.exit(site.USER_BASE.startswith("/tmp"))'],
env=env)
self.assertEqual(rc, 1)
def test_getuserbase(self):
site.USER_BASE = None
user_base = site.getuserbase()
# the call sets site.USER_BASE
self.assertEqual(site.USER_BASE, user_base)
# let's set PYTHONUSERBASE and see if it uses it
site.USER_BASE = None
import sysconfig
sysconfig._CONFIG_VARS = None
with EnvironmentVarGuard() as environ:
environ['PYTHONUSERBASE'] = 'xoxo'
self.assertTrue(site.getuserbase().startswith('xoxo'),
site.getuserbase())
def test_getusersitepackages(self):
site.USER_SITE = None
site.USER_BASE = None
user_site = site.getusersitepackages()
# the call sets USER_BASE *and* USER_SITE
self.assertEqual(site.USER_SITE, user_site)
self.assertTrue(user_site.startswith(site.USER_BASE), user_site)
def test_getsitepackages(self):
site.PREFIXES = ['xoxo']
dirs = site.getsitepackages()
if (sys.platform == "darwin" and
sysconfig.get_config_var("PYTHONFRAMEWORK")):
# OS X framework builds
site.PREFIXES = ['Python.framework']
dirs = site.getsitepackages()
self.assertEqual(len(dirs), 3)
wanted = os.path.join('/Library',
sysconfig.get_config_var("PYTHONFRAMEWORK"),
sys.version[:3],
'site-packages')
self.assertEqual(dirs[2], wanted)
elif os.sep == '/':
# OS X non-framwework builds, Linux, FreeBSD, etc
self.assertEqual(len(dirs), 2)
wanted = os.path.join('xoxo', 'lib', 'python' + sys.version[:3],
'site-packages')
self.assertEqual(dirs[0], wanted)
wanted = os.path.join('xoxo', 'lib', 'site-python')
self.assertEqual(dirs[1], wanted)
else:
# other platforms
self.assertEqual(len(dirs), 2)
self.assertEqual(dirs[0], 'xoxo')
wanted = os.path.join('xoxo', 'lib', 'site-packages')
self.assertEqual(dirs[1], wanted)
class PthFile(object):
"""Helper class for handling testing of .pth files"""
def __init__(self, filename_base=TESTFN, imported="time",
good_dirname="__testdir__", bad_dirname="__bad"):
"""Initialize instance variables"""
self.filename = filename_base + ".pth"
self.base_dir = os.path.abspath('')
self.file_path = os.path.join(self.base_dir, self.filename)
self.imported = imported
self.good_dirname = good_dirname
self.bad_dirname = bad_dirname
self.good_dir_path = os.path.join(self.base_dir, self.good_dirname)
self.bad_dir_path = os.path.join(self.base_dir, self.bad_dirname)
def create(self):
"""Create a .pth file with a comment, blank lines, an ``import
<self.imported>``, a line with self.good_dirname, and a line with
self.bad_dirname.
Creation of the directory for self.good_dir_path (based off of
self.good_dirname) is also performed.
Make sure to call self.cleanup() to undo anything done by this method.
"""
FILE = open(self.file_path, 'w')
try:
print("#import @bad module name", file=FILE)
print("\n", file=FILE)
print("import %s" % self.imported, file=FILE)
print(self.good_dirname, file=FILE)
print(self.bad_dirname, file=FILE)
finally:
FILE.close()
os.mkdir(self.good_dir_path)
def cleanup(self, prep=False):
"""Make sure that the .pth file is deleted, self.imported is not in
sys.modules, and that both self.good_dirname and self.bad_dirname are
not existing directories."""
if os.path.exists(self.file_path):
os.remove(self.file_path)
if prep:
self.imported_module = sys.modules.get(self.imported)
if self.imported_module:
del sys.modules[self.imported]
else:
if self.imported_module:
sys.modules[self.imported] = self.imported_module
if os.path.exists(self.good_dir_path):
os.rmdir(self.good_dir_path)
if os.path.exists(self.bad_dir_path):
os.rmdir(self.bad_dir_path)
class ImportSideEffectTests(unittest.TestCase):
"""Test side-effects from importing 'site'."""
def setUp(self):
"""Make a copy of sys.path"""
self.sys_path = sys.path[:]
def tearDown(self):
"""Restore sys.path"""
sys.path[:] = self.sys_path
def test_abs_paths(self):
# Make sure all imported modules have their __file__ and __cached__
# attributes as absolute paths. Arranging to put the Lib directory on
# PYTHONPATH would cause the os module to have a relative path for
# __file__ if abs_paths() does not get run. sys and builtins (the
# only other modules imported before site.py runs) do not have
# __file__ or __cached__ because they are built-in.
parent = os.path.relpath(os.path.dirname(os.__file__))
env = os.environ.copy()
env['PYTHONPATH'] = parent
code = ('import os, sys',
# use ASCII to avoid locale issues with non-ASCII directories
'os_file = os.__file__.encode("ascii", "backslashreplace")',
r'sys.stdout.buffer.write(os_file + b"\n")',
'os_cached = os.__cached__.encode("ascii", "backslashreplace")',
r'sys.stdout.buffer.write(os_cached + b"\n")')
command = '\n'.join(code)
# First, prove that with -S (no 'import site'), the paths are
# relative.
proc = subprocess.Popen([sys.executable, '-S', '-c', command],
env=env,
stdout=subprocess.PIPE)
stdout, stderr = proc.communicate()
self.assertEqual(proc.returncode, 0)
os__file__, os__cached__ = stdout.splitlines()[:2]
self.assertFalse(os.path.isabs(os__file__))
self.assertFalse(os.path.isabs(os__cached__))
# Now, with 'import site', it works.
proc = subprocess.Popen([sys.executable, '-c', command],
env=env,
stdout=subprocess.PIPE)
stdout, stderr = proc.communicate()
self.assertEqual(proc.returncode, 0)
os__file__, os__cached__ = stdout.splitlines()[:2]
self.assertTrue(os.path.isabs(os__file__))
self.assertTrue(os.path.isabs(os__cached__))
def test_no_duplicate_paths(self):
# No duplicate paths should exist in sys.path
# Handled by removeduppaths()
site.removeduppaths()
seen_paths = set()
for path in sys.path:
self.assertNotIn(path, seen_paths)
seen_paths.add(path)
@unittest.skip('test not implemented')
def test_add_build_dir(self):
# Test that the build directory's Modules directory is used when it
# should be.
# XXX: implement
pass
def test_setting_quit(self):
# 'quit' and 'exit' should be injected into builtins
self.assertTrue(hasattr(builtins, "quit"))
self.assertTrue(hasattr(builtins, "exit"))
def test_setting_copyright(self):
# 'copyright', 'credits', and 'license' should be in builtins
self.assertTrue(hasattr(builtins, "copyright"))
self.assertTrue(hasattr(builtins, "credits"))
self.assertTrue(hasattr(builtins, "license"))
def test_setting_help(self):
# 'help' should be set in builtins
self.assertTrue(hasattr(builtins, "help"))
def test_aliasing_mbcs(self):
if sys.platform == "win32":
import locale
if locale.getdefaultlocale()[1].startswith('cp'):
for value in encodings.aliases.aliases.values():
if value == "mbcs":
break
else:
self.fail("did not alias mbcs")
def test_sitecustomize_executed(self):
# If sitecustomize is available, it should have been imported.
if "sitecustomize" not in sys.modules:
try:
import sitecustomize
except ImportError:
pass
else:
self.fail("sitecustomize not imported automatically")
@test.support.requires_resource('network')
@test.support.system_must_validate_cert
@unittest.skipUnless(sys.version_info[3] == 'final',
'only for released versions')
@unittest.skipUnless(hasattr(urllib.request, "HTTPSHandler"),
'need SSL support to download license')
def test_license_exists_at_url(self):
# This test is a bit fragile since it depends on the format of the
# string displayed by license in the absence of a LICENSE file.
url = license._Printer__data.split()[1]
req = urllib.request.Request(url, method='HEAD')
try:
with test.support.transient_internet(url):
with urllib.request.urlopen(req) as data:
code = data.getcode()
except urllib.error.HTTPError as e:
code = e.code
self.assertEqual(code, 200, msg="Can't find " + url)
class StartupImportTests(unittest.TestCase):
def test_startup_imports(self):
# This tests checks which modules are loaded by Python when it
# initially starts upon startup.
popen = subprocess.Popen([sys.executable, '-I', '-v', '-c',
'import sys; print(set(sys.modules))'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = popen.communicate()
stdout = stdout.decode('utf-8')
stderr = stderr.decode('utf-8')
modules = eval(stdout)
self.assertIn('site', modules)
# http://bugs.python.org/issue19205
re_mods = {'re', '_sre', 'sre_compile', 'sre_constants', 'sre_parse'}
# _osx_support uses the re module in many placs
if sys.platform != 'darwin':
self.assertFalse(modules.intersection(re_mods), stderr)
# http://bugs.python.org/issue9548
self.assertNotIn('locale', modules, stderr)
if sys.platform != 'darwin':
# http://bugs.python.org/issue19209
self.assertNotIn('copyreg', modules, stderr)
# http://bugs.python.org/issue19218>
collection_mods = {'_collections', 'collections', 'functools',
'heapq', 'itertools', 'keyword', 'operator',
'reprlib', 'types', 'weakref'
}.difference(sys.builtin_module_names)
self.assertFalse(modules.intersection(collection_mods), stderr)
if __name__ == "__main__":
unittest.main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
java/testng/W3CChromeTest.java
|
import org.openqa.selenium.WebDriver;
import org.testng.ITestResult;
import org.testng.annotations.*;
import org.testng.asserts.*;
import org.openqa.selenium.MutableCapabilities;
import org.openqa.selenium.chrome.ChromeOptions;
import org.openqa.selenium.remote.DesiredCapabilities;
import org.openqa.selenium.remote.RemoteWebDriver;
import org.openqa.selenium.JavascriptExecutor;
import java.lang.reflect.Method;
import java.net.MalformedURLException;
import java.net.URL;
public class W3CChromeTest {
protected WebDriver driver;
/**
* @BeforeMethod is a TestNG annotation that defines specific prerequisite test method behaviors.
In the example below we:
- Define Environment Variables for Sauce Credentials ("SAUCE_USERNAME" and "SAUCE_ACCESS_KEY")
- Define Chrome Options such as W3C protocol
- Define the "sauce:options" capabilities, indicated by the "sauceOpts" MutableCapability object
- Define the WebDriver capabilities, indicated by the "caps" DesiredCapabilities object
- Define the service URL for communicating with SauceLabs.com indicated by "sauceURL" string
- Set the URL to sauceURl
- Set the driver instance to a RemoteWebDriver
- Pass "url" and "caps" as parameters of the RemoteWebDriver
For more information visit the docs: http://static.javadoc.io/org.testng/testng/6.9.4/org/testng/annotations/BeforeMethod.html
*/
@BeforeMethod
public void setup(Method method) throws MalformedURLException {
String username = System.getenv("SAUCE_USERNAME");
String accessKey = System.getenv("SAUCE_ACCESS_KEY");
String methodName = method.getName();
/** ChomeOptions allows us to set browser-specific behavior such as profile settings, headless capabilities, insecure tls certs,
and in this example--the W3C protocol
For more information see: https://seleniumhq.github.io/selenium/docs/api/java/org/openqa/selenium/chrome/ChromeOptions.html */
ChromeOptions chromeOpts = new ChromeOptions();
chromeOpts.setExperimentalOption("w3c", true);
/** The MutableCapabilities class came into existence with Selenium 3.6.0 and acts as the parent class for
all browser implementations--including the ChromeOptions class extension.
Fore more information see: https://seleniumhq.github.io/selenium/docs/api/java/org/openqa/selenium/MutableCapabilities.html */
MutableCapabilities sauceOpts = new MutableCapabilities();
sauceOpts.setCapability("name", methodName);
sauceOpts.setCapability("seleniumVersion", "3.141.59");
sauceOpts.setCapability("username", username);
sauceOpts.setCapability("accessKey", accessKey);
sauceOpts.setCapability("tags", "w3c-chrome-tests")
/** Below we see the use of our other capability objects, 'chromeOpts' and 'sauceOpts',
defined in ChromeOptions.CAPABILITY and sauce:options respectively.
*/
DesiredCapabilities caps = new DesiredCapabilities();
caps.setCapability(ChromeOptions.CAPABILITY, chromeOpts);
caps.setCapability("sauce:options", sauceOpts);
caps.setCapability("browserName", "googlechrome");
caps.setCapability("browserVersion", "71.0");
caps.setCapability("platformName", "windows 10");
/** Finally, we pass our DesiredCapabilities object 'caps' as a parameter of our RemoteWebDriver instance */
String sauceUrl = "https://ondemand.saucelabs.com:443/wd/hub";
URL url = new URL(sauceUrl);
driver = new RemoteWebDriver(url, caps);
}
/**
* @Test is a TestNG annotation that defines the actual test case, along with the test execution commands.
In the example below we:
- Navigate to our SUT (site under test), 'https://www.saucedemo.com'
- Store the current page title in a String called 'getTitle'
- Assert that the page title equals "Swag Labs"
For more information visit the docs: http://static.javadoc.io/org.testng/testng/6.9.4/org/testng/annotations/Test.html
*/
@Test
public void w3cChromeTest() throws AssertionError {
driver.navigate().to("https://www.saucedemo.com");
String getTitle = driver.getTitle();
Assert.assertEquals(getTitle, "Swag Labs");
}
/**
* @AfterMethod is a TestNG annotation that defines any postrequisite test method tasks .
In the example below we:
- Pass the ITestResult class results to a parameter called 'result'
- Use the JavascriptExecutor class to send our test 'result' to Sauce Labs with a "passed" flag
if the test was successful, or a "failed" flag if the test was unsuccessful.
- Teardown the RemoteWebDriver session with a 'driver.quit()' command so that the test VM doesn't hang.
For more information visit the docs: http://static.javadoc.io/org.testng/testng/6.9.4/org/testng/annotations/AfterMethod.html
*/
@AfterMethod
public void teardown(ITestResult result) {
((JavascriptExecutor)driver).executeScript("sauce:job-result=" + (result.isSuccess() ? "passed" : "failed"));
driver.quit();
}
}
|
[
"\"SAUCE_USERNAME\"",
"\"SAUCE_ACCESS_KEY\""
] |
[] |
[
"SAUCE_USERNAME",
"SAUCE_ACCESS_KEY"
] |
[]
|
["SAUCE_USERNAME", "SAUCE_ACCESS_KEY"]
|
java
| 2 | 0 | |
tun/tun_darwin.go
|
/* SPDX-License-Identifier: MIT
*
* Copyright (C) 2017-2021 WireGuard LLC. All Rights Reserved.
*/
package tun
import (
"errors"
"fmt"
"net"
"os"
"sync"
"syscall"
"time"
"unsafe"
"golang.org/x/net/ipv6"
"golang.org/x/sys/unix"
)
const utunControlName = "com.apple.net.utun_control"
type NativeTun struct {
name string
tunFile *os.File
events chan Event
errors chan error
routeSocket int
closeOnce sync.Once
}
func retryInterfaceByIndex(index int) (iface *net.Interface, err error) {
for i := 0; i < 20; i++ {
iface, err = net.InterfaceByIndex(index)
if err != nil && errors.Is(err, syscall.ENOMEM) {
time.Sleep(time.Duration(i) * time.Second / 3)
continue
}
return iface, err
}
return nil, err
}
func (tun *NativeTun) routineRouteListener(tunIfindex int) {
var (
statusUp bool
statusMTU int
)
defer close(tun.events)
data := make([]byte, os.Getpagesize())
for {
retry:
n, err := unix.Read(tun.routeSocket, data)
if err != nil {
if errno, ok := err.(syscall.Errno); ok && errno == syscall.EINTR {
goto retry
}
tun.errors <- err
return
}
if n < 14 {
continue
}
if data[3 /* type */] != unix.RTM_IFINFO {
continue
}
ifindex := int(*(*uint16)(unsafe.Pointer(&data[12 /* ifindex */])))
if ifindex != tunIfindex {
continue
}
iface, err := retryInterfaceByIndex(ifindex)
if err != nil {
tun.errors <- err
return
}
// Up / Down event
up := (iface.Flags & net.FlagUp) != 0
if up != statusUp && up {
tun.events <- EventUp
}
if up != statusUp && !up {
tun.events <- EventDown
}
statusUp = up
// MTU changes
if iface.MTU != statusMTU {
tun.events <- EventMTUUpdate
}
statusMTU = iface.MTU
}
}
func CreateTUN(name string, mtu int, nopi bool) (Device, error) {
ifIndex := -1
if name != "utun" {
_, err := fmt.Sscanf(name, "utun%d", &ifIndex)
if err != nil || ifIndex < 0 {
return nil, fmt.Errorf("Interface name must be utun[0-9]*")
}
}
fd, err := unix.Socket(unix.AF_SYSTEM, unix.SOCK_DGRAM, 2)
if err != nil {
return nil, err
}
ctlInfo := &unix.CtlInfo{}
copy(ctlInfo.Name[:], []byte(utunControlName))
err = unix.IoctlCtlInfo(fd, ctlInfo)
if err != nil {
unix.Close(fd)
return nil, fmt.Errorf("IoctlGetCtlInfo: %w", err)
}
sc := &unix.SockaddrCtl{
ID: ctlInfo.Id,
Unit: uint32(ifIndex) + 1,
}
err = unix.Connect(fd, sc)
if err != nil {
unix.Close(fd)
return nil, err
}
err = unix.SetNonblock(fd, true)
if err != nil {
unix.Close(fd)
return nil, err
}
tun, err := CreateTUNFromFile(os.NewFile(uintptr(fd), ""), mtu, nopi)
if err == nil && name == "utun" {
fname := os.Getenv("WG_TUN_NAME_FILE")
if fname != "" {
os.WriteFile(fname, []byte(tun.(*NativeTun).name+"\n"), 0o400)
}
}
return tun, err
}
func CreateTUNFromFile(file *os.File, mtu int, nopi bool) (Device, error) {
tun := &NativeTun{
tunFile: file,
events: make(chan Event, 10),
errors: make(chan error, 5),
}
name, err := tun.Name()
if err != nil {
tun.tunFile.Close()
return nil, err
}
tunIfindex, err := func() (int, error) {
iface, err := net.InterfaceByName(name)
if err != nil {
return -1, err
}
return iface.Index, nil
}()
if err != nil {
tun.tunFile.Close()
return nil, err
}
tun.routeSocket, err = unix.Socket(unix.AF_ROUTE, unix.SOCK_RAW, unix.AF_UNSPEC)
if err != nil {
tun.tunFile.Close()
return nil, err
}
go tun.routineRouteListener(tunIfindex)
if mtu > 0 {
err = tun.setMTU(mtu)
if err != nil {
tun.Close()
return nil, err
}
}
return tun, nil
}
func (tun *NativeTun) Name() (string, error) {
var err error
tun.operateOnFd(func(fd uintptr) {
tun.name, err = unix.GetsockoptString(
int(fd),
2, /* #define SYSPROTO_CONTROL 2 */
2, /* #define UTUN_OPT_IFNAME 2 */
)
})
if err != nil {
return "", fmt.Errorf("GetSockoptString: %w", err)
}
return tun.name, nil
}
func (tun *NativeTun) File() *os.File {
return tun.tunFile
}
func (tun *NativeTun) Events() chan Event {
return tun.events
}
func (tun *NativeTun) Read(buff []byte, offset int) (int, error) {
select {
case err := <-tun.errors:
return 0, err
default:
mu := sync.Mutex{}
mu.Lock()
defer mu.Unlock()
bf := make([]byte, len(buff))
n, err := tun.tunFile.Read(bf[:])
if n < offset {
return 0, err
}
copy(buff, bf[offset:])
return n - offset, err
}
}
func (tun *NativeTun) Write(buff []byte, offset int) (int, error) {
// reserve space for header
mu := sync.Mutex{}
mu.Lock()
defer mu.Unlock()
bf := make([]byte, len(buff)+4)
// add packet information header
bf[0] = 0x00
bf[1] = 0x00
bf[2] = 0x00
if buff[4]>>4 == ipv6.Version {
bf[3] = unix.AF_INET6
} else {
bf[3] = unix.AF_INET
}
copy(bf[4:], buff)
// write
return tun.tunFile.Write(bf)
}
func (tun *NativeTun) Flush() error {
// TODO: can flushing be implemented by buffering and using sendmmsg?
return nil
}
func (tun *NativeTun) Close() error {
var err1, err2 error
tun.closeOnce.Do(func() {
err1 = tun.tunFile.Close()
if tun.routeSocket != -1 {
unix.Shutdown(tun.routeSocket, unix.SHUT_RDWR)
err2 = unix.Close(tun.routeSocket)
} else if tun.events != nil {
close(tun.events)
}
})
if err1 != nil {
return err1
}
return err2
}
func (tun *NativeTun) setMTU(n int) error {
fd, err := unix.Socket(
unix.AF_INET,
unix.SOCK_DGRAM,
0,
)
if err != nil {
return err
}
defer unix.Close(fd)
var ifr unix.IfreqMTU
copy(ifr.Name[:], tun.name)
ifr.MTU = int32(n)
err = unix.IoctlSetIfreqMTU(fd, &ifr)
if err != nil {
return fmt.Errorf("failed to set MTU on %s: %w", tun.name, err)
}
return nil
}
func (tun *NativeTun) MTU() (int, error) {
fd, err := unix.Socket(
unix.AF_INET,
unix.SOCK_DGRAM,
0,
)
if err != nil {
return 0, err
}
defer unix.Close(fd)
ifr, err := unix.IoctlGetIfreqMTU(fd, tun.name)
if err != nil {
return 0, fmt.Errorf("failed to get MTU on %s: %w", tun.name, err)
}
return int(ifr.MTU), nil
}
|
[
"\"WG_TUN_NAME_FILE\""
] |
[] |
[
"WG_TUN_NAME_FILE"
] |
[]
|
["WG_TUN_NAME_FILE"]
|
go
| 1 | 0 | |
log/config_test.go
|
package log
import (
"os"
"strconv"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestNsqURL(t *testing.T) {
c := config{
NSQHost: "somehost",
NSQPort: 3333,
}
assert.Equal(t, c.nsqURL(), "somehost:3333")
}
func TestStopTimeoutDuration(t *testing.T) {
c := config{
StopTimeoutSeconds: 60,
}
assert.Equal(t, c.stopTimeoutDuration(), time.Duration(c.StopTimeoutSeconds)*time.Second)
}
func TestParseConfig(t *testing.T) {
os.Setenv("NSQ_TOPIC", "topic")
os.Setenv("NSQ_CHANNEL", "channel")
os.Setenv("NSQ_HANDLER_COUNT", "3")
os.Setenv("AGGREGATOR_STOP_TIMEOUT_SEC", "2")
port, err := strconv.Atoi(os.Getenv("DEIS_NSQD_SERVICE_PORT_TRANSPORT"))
assert.NoError(t, err)
c, err := parseConfig("foo")
assert.NoError(t, err)
assert.Equal(t, c.NSQHost, os.Getenv("DEIS_NSQD_SERVICE_HOST"))
assert.Equal(t, c.NSQPort, port)
assert.Equal(t, c.NSQTopic, "topic")
assert.Equal(t, c.NSQChannel, "channel")
assert.Equal(t, c.NSQHandlerCount, 3)
assert.Equal(t, c.StopTimeoutSeconds, 2)
}
|
[
"\"DEIS_NSQD_SERVICE_PORT_TRANSPORT\"",
"\"DEIS_NSQD_SERVICE_HOST\""
] |
[] |
[
"DEIS_NSQD_SERVICE_HOST",
"DEIS_NSQD_SERVICE_PORT_TRANSPORT"
] |
[]
|
["DEIS_NSQD_SERVICE_HOST", "DEIS_NSQD_SERVICE_PORT_TRANSPORT"]
|
go
| 2 | 0 | |
crawler.py
|
import re
import os
from TwitterAPI import TwitterAPI
from pymongo import MongoClient, GEOSPHERE
import datetime
twitter_access_token = os.getenv('TWITTER_ACCESS_TOKEN')
twitter_access_token_secret = os.getenv('TWITTER_ACCESS_TOKEN_SECRET')
twitter_consumer_key = os.getenv('TWITTER_CONSUMER_KEY')
twitter_consumer_secret = os.getenv('TWITTER_CONSUMER_SECRET')
def tweet_text_words(tweet_text):
tweet_text = re.sub(r'[^\x00-\x7F]+',' ', tweet_text) # remove non-printable characters
tweet_words = re.split('\s', tweet_text) # split text into words
tweet_words = map(lambda w: re.sub('\#.*', '', w), tweet_words) # remove hashtags
tweet_words = map(lambda w: re.sub('\@.*', '', w), tweet_words) # remove user mentions
tweet_words = map(lambda w: re.sub('^.*http.*', '', w), tweet_words) # remove links
tweet_words = filter(lambda w: w != '', tweet_words) # filter empty words
return tweet_words
def tweet_get_geolocation(tweet):
if 'coordinates' in tweet:
geo = tweet['coordinates']
if geo is not None and 'type' in geo and geo['type'] == 'Point' and 'coordinates' in geo:
coordinates = geo['coordinates']
return {
'latitude': coordinates[1],
'longitude': coordinates[0]
}
return None
else:
return None
def tweet_process(tweet, stopwords, mongo_db):
tweet_text_dirty = tweet['text']
tweet_words = tweet_text_words(tweet_text_dirty)
tweet_text = ' '.join(tweet_words)
tweet_words_filtered = filter(lambda w: len(w) > 3 and w not in stopwords and w.isalnum(), map(lambda w: w.lower(), tweet_words))
tweet_geolocation = tweet_get_geolocation(tweet)
tweet_language = tweet['lang']
mongo_db_languages = mongo_db['languages0']
if tweet_language != 'und' and tweet_geolocation is not None:
mongo_db_languages.insert({
'language': tweet_language,
'location': [
tweet_geolocation['longitude'],
tweet_geolocation['latitude']
],
"time": datetime.datetime.utcnow()
})
mongo_db_words = mongo_db['words0']
if tweet_language != 'und' and tweet_geolocation is not None:
for word in tweet_words_filtered:
mongo_db_words.insert({
'word': word,
'location': [
tweet_geolocation['longitude'],
tweet_geolocation['latitude']
],
"time": datetime.datetime.utcnow()
})
print(tweet_text, tweet_geolocation, tweet_language, tweet_words_filtered)
def read_stopwords():
stopwords_file = open('stopwords')
lines = stopwords_file.readlines()
return [line.strip() for line in lines]
def read_countries():
countries_file = open('countries')
lines = countries_file.readlines()
countries = {}
for line in lines:
words = line.rstrip().split(' ')
countries[words[0]] = ','.join(words[1:])
return countries
if __name__ == '__main__':
mongo_url = os.getenv('MONGOLAB_URI')
mongo_client = MongoClient(mongo_url)
mongo_db = mongo_client.get_default_database()
stopwords = read_stopwords()
twitter_api = TwitterAPI(twitter_consumer_key, twitter_consumer_secret, twitter_access_token, twitter_access_token_secret)
countries = read_countries()
twitter_stream = twitter_api.request('statuses/filter', {'locations': countries['France'] })
for tweet in twitter_stream:
if 'text' in tweet:
tweet_process(tweet, stopwords, mongo_db)
|
[] |
[] |
[
"TWITTER_CONSUMER_KEY",
"TWITTER_CONSUMER_SECRET",
"MONGOLAB_URI",
"TWITTER_ACCESS_TOKEN_SECRET",
"TWITTER_ACCESS_TOKEN"
] |
[]
|
["TWITTER_CONSUMER_KEY", "TWITTER_CONSUMER_SECRET", "MONGOLAB_URI", "TWITTER_ACCESS_TOKEN_SECRET", "TWITTER_ACCESS_TOKEN"]
|
python
| 5 | 0 | |
controllers/tc000010_no_outputs_test.go
|
package controllers
import (
"context"
"os"
"testing"
"time"
. "github.com/onsi/gomega"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta1"
infrav1 "github.com/weaveworks/tf-controller/api/v1alpha1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)
// +kubebuilder:docs-gen:collapse=Imports
func Test_000010_no_outputs_test(t *testing.T) {
Spec("This spec describes the behaviour of a Terraform resource with no backend, and `auto` approve.")
It("should be reconciled to have available outputs.")
const (
sourceName = "test-tf-controller-no-output"
terraformName = "helloworld-no-outputs"
)
g := NewWithT(t)
ctx := context.Background()
Given("a GitRepository")
By("defining a new GitRepository resource.")
testRepo := sourcev1.GitRepository{
ObjectMeta: metav1.ObjectMeta{
Name: sourceName,
Namespace: "flux-system",
},
Spec: sourcev1.GitRepositorySpec{
URL: "https://github.com/openshift-fluxv2-poc/podinfo",
Reference: &sourcev1.GitRepositoryRef{
Branch: "master",
},
Interval: metav1.Duration{Duration: time.Second * 30},
GitImplementation: "go-git",
},
}
By("creating the GitRepository resource in the cluster.")
It("should be created successfully.")
g.Expect(k8sClient.Create(ctx, &testRepo)).Should(Succeed())
defer func() { g.Expect(k8sClient.Delete(ctx, &testRepo)).Should(Succeed()) }()
Given("the GitRepository's reconciled status.")
By("setting the GitRepository's status, with the downloadable BLOB's URL, and the correct checksum.")
updatedTime := time.Now()
testRepo.Status = sourcev1.GitRepositoryStatus{
ObservedGeneration: int64(1),
Conditions: []metav1.Condition{
{
Type: "Ready",
Status: metav1.ConditionTrue,
LastTransitionTime: metav1.Time{Time: updatedTime},
Reason: "GitOperationSucceed",
Message: "Fetched revision: master/b8e362c206e3d0cbb7ed22ced771a0056455a2fb",
},
},
URL: server.URL() + "/file.tar.gz",
Artifact: &sourcev1.Artifact{
Path: "gitrepository/flux-system/test-tf-controller/b8e362c206e3d0cbb7ed22ced771a0056455a2fb.tar.gz",
URL: server.URL() + "/file.tar.gz",
Revision: "master/b8e362c206e3d0cbb7ed22ced771a0056455a2fb",
Checksum: "80ddfd18eb96f7d31cadc1a8a5171c6e2d95df3f6c23b0ed9cd8dddf6dba1406", // must be the real checksum value
LastUpdateTime: metav1.Time{Time: updatedTime},
},
}
It("should be updated successfully.")
g.Expect(k8sClient.Status().Update(ctx, &testRepo)).Should(Succeed())
Given("a Terraform resource with auto approve, attached to the given GitRepository resource.")
By("creating a new TF resource and attaching to the repo via `sourceRef`.")
helloWorldTF := infrav1.Terraform{
ObjectMeta: metav1.ObjectMeta{
Name: terraformName,
Namespace: "flux-system",
},
Spec: infrav1.TerraformSpec{
ApprovePlan: "auto",
Path: "./terraform-hello-world-example",
SourceRef: infrav1.CrossNamespaceSourceReference{
Kind: "GitRepository",
Name: sourceName,
Namespace: "flux-system",
},
Interval: metav1.Duration{Duration: time.Second * 10},
},
}
It("should be created and attached successfully.")
g.Expect(k8sClient.Create(ctx, &helloWorldTF)).Should(Succeed())
defer func() { g.Expect(k8sClient.Delete(ctx, &helloWorldTF)).Should(Succeed()) }()
By("checking that the TF resource existed inside the cluster.")
helloWorldTFKey := types.NamespacedName{Namespace: "flux-system", Name: terraformName}
createdHelloWorldTF := infrav1.Terraform{}
g.Eventually(func() bool {
err := k8sClient.Get(ctx, helloWorldTFKey, &createdHelloWorldTF)
if err != nil {
return false
}
return true
}, timeout, interval).Should(BeTrue())
It("should be reconciled and contain some status conditions.")
By("checking that the TF resource's status conditions has some elements.")
g.Eventually(func() int {
err := k8sClient.Get(ctx, helloWorldTFKey, &createdHelloWorldTF)
if err != nil {
return -1
}
return len(createdHelloWorldTF.Status.Conditions)
}, timeout, interval).ShouldNot(BeZero())
It("should be planned.")
By("checking that the Plan's reason of the TF resource become `TerraformPlannedWithChanges`.")
g.Eventually(func() interface{} {
err := k8sClient.Get(ctx, helloWorldTFKey, &createdHelloWorldTF)
if err != nil {
return nil
}
for _, c := range createdHelloWorldTF.Status.Conditions {
if c.Type == "Plan" {
return map[string]interface{}{
"Type": c.Type,
"Reason": c.Reason,
"Message": c.Message,
}
}
}
return createdHelloWorldTF.Status
}, timeout, interval).Should(Equal(map[string]interface{}{
"Type": "Plan",
"Reason": "TerraformPlannedWithChanges",
"Message": "Plan generated",
}))
It("should generate the Secret containing the plan named with branch and commit id.")
By("checking that the Secret contains plan-master-b8e362c206e3d0cbb7ed22ced771a0056455a2fb in its labels.")
tfplanKey := types.NamespacedName{Namespace: "flux-system", Name: "tfplan-default-" + terraformName}
tfplanSecret := corev1.Secret{}
g.Eventually(func() map[string]interface{} {
err := k8sClient.Get(ctx, tfplanKey, &tfplanSecret)
if err != nil {
return nil
}
return map[string]interface{}{
"SavedPlan": tfplanSecret.Labels["savedPlan"],
"Is TFPlan empty ?": string(tfplanSecret.Data["tfplan"]) == "",
"HasEncodingAnnotation": tfplanSecret.Annotations["encoding"] == "gzip",
}
}, timeout, interval).Should(Equal(map[string]interface{}{
"SavedPlan": "plan-master-b8e362c206e3d0cbb7ed22ced771a0056455a2fb",
"Is TFPlan empty ?": false,
"HasEncodingAnnotation": true,
}))
It("should contain an Apply condition saying that the plan were apply successfully.")
By("checking that the reason of the Apply condition is TerraformAppliedSucceed, and the LastAppliedPlan is the plan.")
g.Eventually(func() map[string]interface{} {
err := k8sClient.Get(ctx, helloWorldTFKey, &createdHelloWorldTF)
if err != nil {
return nil
}
for _, c := range createdHelloWorldTF.Status.Conditions {
if c.Type == "Apply" {
return map[string]interface{}{
"Type": c.Type,
"Reason": c.Reason,
"Message": c.Message,
"LastAppliedPlan": createdHelloWorldTF.Status.Plan.LastApplied,
}
}
}
return nil
}, timeout, interval).Should(Equal(map[string]interface{}{
"Type": "Apply",
"Reason": infrav1.TFExecApplySucceedReason,
"Message": "Applied successfully",
"LastAppliedPlan": "plan-master-b8e362c206e3d0cbb7ed22ced771a0056455a2fb",
}))
It("should have an available output.")
By("checking that the Terraform resource's .status.availableOutputs contains hello_world as an output name.")
g.Eventually(func() []string {
err := k8sClient.Get(ctx, helloWorldTFKey, &createdHelloWorldTF)
if err != nil {
return nil
}
return createdHelloWorldTF.Status.AvailableOutputs
}, timeout, interval).Should(Equal([]string{"hello_world"}))
if os.Getenv("DISABLE_TF_K8S_BACKEND") == "1" {
It("should not produce a Secret because the controller runs locally, outside Kubernetes.")
By("checking there are no secret generated by default.")
tfStateKey := types.NamespacedName{Namespace: "flux-system", Name: "tfstate-default-" + terraformName}
tfStateSecret := corev1.Secret{}
g.Eventually(func() string {
err := k8sClient.Get(ctx, tfStateKey, &tfStateSecret)
if err != nil {
return err.Error()
}
return tfStateSecret.Name
}, timeout, interval).Should(Equal("secrets \"tfstate-default-helloworld-no-outputs\" not found"))
} else {
// TODO there's must be the default tfstate secret
}
}
|
[
"\"DISABLE_TF_K8S_BACKEND\""
] |
[] |
[
"DISABLE_TF_K8S_BACKEND"
] |
[]
|
["DISABLE_TF_K8S_BACKEND"]
|
go
| 1 | 0 | |
util/plot.py
|
import matplotlib.pyplot as plt
def plot(data):
if 'key' not in data:
raise KeyError("x-axis label needs to be named as key in the data dict.")
key = data['key']
fig, ax1 = plt.subplots()
ax1.set_xlabel('Threshold of possibility to categorize')
ax1.set_ylabel('Percentage in total population')
# ax2 = ax1.twinx()
# ax2.set_ylable('ratio of male to female user')
for lines in data:
if lines == 'key':
continue
else :
line_name = lines
line_value = data[lines]
ax1.plot(key, line_value, '-o', label=line_name)
plt.legend(loc='best')
plt.show()
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
alphazero/mcts.py
|
import os
import sys
import math
import random
import numpy as np
from copy import deepcopy
sys.path.append(os.path.join(os.environ["HOME"], "AlphaTTT"))
from environment import Environment
from alphazero.database import prepare_state
np.random.seed(80085)
random.seed(80085)
def PUCT_score(child_value, child_prior, parent_visit_count, child_visit_count, c_puct):
pb_c = child_prior * math.sqrt(parent_visit_count) / (child_visit_count + 1)
return child_value + c_puct * pb_c
class MCTS():
def __init__(self, model, root_state, args):
'''
model - class with predict method that returns a valid policy and value
root_state - board_len x board_len array with the initial state of the game
args:
num_simulations - number of leaf node expansions per search
alpha - mixing constant between policy and dirichlet noise
dirichlet_alpha - dirichlet constant for generating dirichlet distribution
c_puct - exploration constant in PUCT score
'''
self.model = model
self.root = deepcopy(root_state)
self.args = args
self.Qsa = {} # self.Qsa(s, a) = Q value for (s, a)
self.Nsa = {} # self.Nsa(s, a) = (s, a) visit count
self.Ns = {} # self.Ns(s) = s visit count
self.Ps = {} # self.Ps(s) = list of available actions in s and corresponding raw probabilities
self.Es = {} # terminal states, potentially going to do this if not too computationally expensive and dirty
# Add dirichlet noise to initial root node
self.add_dirichlet()
def add_dirichlet(self):
rs = self.root.tobytes()
if rs not in self.Ps:
self.find_leaf(deepcopy(self.root))
if self.Es[rs] == 10:
dirichlet = np.random.dirichlet([self.args["dirichlet_alpha"]]*len(self.Ps[rs]))
for i, (move, prob) in enumerate(self.Ps[rs]):
self.Ps[rs][i] = (move, (1 - self.args["alpha"]) * prob + dirichlet[i] * self.args["alpha"])
def search(self): # builds the search tree from the root node
for i in range(self.args["num_simulations"]):
self.find_leaf(deepcopy(self.root))
return
def find_leaf(self, state):
s = state.tobytes()
if s not in self.Es:
self.Es[s] = Environment.game_over(state)
if self.Es[s] != 10:
# terminal state
return -self.Es[s]
if s not in self.Ps: # expand leaf node
p, v = self.model.predict(prepare_state(state))
availability_mask = (state == 0)
p *= availability_mask
if np.sum(p) > 0.0:
p /= np.sum(p) # re-normalize
move_probs = []
for i, row in enumerate(p):
for j, prob in enumerate(row):
if state[i][j] == 0:
move_probs.append(((i, j), prob))
self.Ps[s] = move_probs
self.Ns[s] = 1
return -v
max_puct = -float('inf')
max_action = None
for move, prob in self.Ps[s]:
(Nc, Qc) = (self.Nsa[(s, move)], self.Qsa[(s, move)]) if (s, move) in self.Nsa else (0, 0.0)
puct = PUCT_score(Qc, prob, self.Ns[s], Nc, self.args["c_puct"])
if puct > max_puct:
max_puct = puct
max_action = move
a = max_action
state[a] = 1
state *= -1
v = self.find_leaf(state)
if (s, a) in self.Nsa:
self.Nsa[(s, a)] += 1
self.Qsa[(s, a)] = (self.Nsa[(s, a)] * self.Qsa[(s, a)] + v) / (self.Nsa[(s, a)] + 1)
else:
self.Nsa[(s, a)] = 1
self.Qsa[(s, a)] = v
self.Ns[s] += 1
return -v
def get_pi(self, tau=1.0, as_prob=True):
move_dist = np.zeros((len(self.root), len(self.root)))
rs = self.root.tobytes()
for move, _ in self.Ps[rs]:
move_dist[move] = self.Nsa[(rs, move)] if (rs, move) in self.Nsa else 0
if as_prob is True:
if tau < 0.1: # protecting from numerical overflow
z = np.zeros(move_dist.shape)
move = np.unravel_index(np.argmax(move_dist), move_dist.shape)
z[move[0]][move[1]] = 1.0
move_dist = z
else:
move_dist = np.power(move_dist, 1.0/tau)
if np.sum(move_dist) > 0.0:
move_dist /= np.sum(move_dist)
return move_dist
def select_move(self, tau=1.0, external_move=None):
if external_move is None:
probas = self.get_pi(tau)
selected_move = int(np.random.choice(len(probas.flatten()), 1, p=probas.flatten()))
selected_move = np.unravel_index(selected_move, probas.shape)
else:
selected_move = external_move
self.root[selected_move] = 1
self.root *= -1
# Add dirichlet noise to new root node:
self.add_dirichlet()
return selected_move
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
Tutoriales/gRPC/Server/server.go
|
// Paquete principal, acá iniciará la ejecución
package main
// Importar dependencias, notar que estamos en un módulo llamado grpctuiter
import (
"context"
"fmt"
"os"
"google.golang.org/grpc"
"log"
"net"
"tuiterserver/greet.pb"
)
// Iniciar una estructura que posteriormente gRPC utilizará para realizar un server
type server struct{}
// Función que será llamada desde el cliente
// Debemos pasarle un contexto donde se ejecutara la funcion
// Y utilizar las clases que fueron generadas por nuestro proto file
// Retornara una respuesta como la definimos en nuestro protofile o un error
func (*server) Greet(ctx context.Context, req *greetpb.GreetRequest) (*greetpb.GreetResponse, error) {
fmt.Printf(">> SERVER: Función Greet llamada con éxito. Datos: %v\n", req)
// Todos los datos podemos obtenerlos desde req
// Tendra la misma estructura que definimos en el protofile
// Para ello utilizamos en este caso el GetGreeting
firstName := req.GetGreeting().GetFirstName()
message := req.GetGreeting().GetMessage()
result := firstName + " dice " + message
fmt.Printf(">> SERVER: %s\n", result)
// Creamos un nuevo objeto GreetResponse definido en el protofile
res := &greetpb.GreetResponse{
Result: result,
}
return res, nil
}
// Funcion principal
func main() {
// Leer el host de las variables del ambiente
host := os.Getenv("HOST")
fmt.Println(">> SERVER: Iniciando en ", host)
// Primero abrir un puerto para poder escuchar
// Lo abrimos en este puerto arbitrario
lis, err := net.Listen("tcp", host)
if err != nil {
log.Fatalf(">> SERVER: Error inicializando el servidor: %v", err)
}
fmt.Println(">> SERVER: Empezando server gRPC")
// Ahora si podemos iniciar un server de gRPC
s := grpc.NewServer()
// Registrar el servicio utilizando el codigo que nos genero el protofile
greetpb.RegisterGreetServiceServer(s, &server{})
fmt.Println(">> SERVER: Escuchando servicio...")
// Iniciar a servir el servidor, si hay un error salirse
if err := s.Serve(lis); err != nil {
log.Fatalf(">> SERVER: Error inicializando el listener: %v", err)
}
}
|
[
"\"HOST\""
] |
[] |
[
"HOST"
] |
[]
|
["HOST"]
|
go
| 1 | 0 | |
qa/pull-tester/rpc-tests.py
|
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Run Regression Test Suite
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts, other
than:
- `-extended`: run the "extended" test suite in addition to the basic one.
- `-win`: signal that this is running in a Windows environment, and we
should run the tests.
- `--coverage`: this generates a basic coverage report for the RPC
interface.
For a description of arguments recognized by test scripts, see
`qa/pull-tester/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
from tests_config import *
#If imported values are not defined then set to zero (or disabled)
if 'ENABLE_WALLET' not in vars():
ENABLE_WALLET=0
if 'ENABLE_BITCOIND' not in vars():
ENABLE_BITCOIND=0
if 'ENABLE_UTILS' not in vars():
ENABLE_UTILS=0
if 'ENABLE_ZMQ' not in vars():
ENABLE_ZMQ=0
ENABLE_COVERAGE=0
#Create a set to store arguments and create the passOn string
opts = set()
passOn = ""
p = re.compile("^--")
bold = ("","")
if (os.name == 'posix'):
bold = ('\033[0m', '\033[1m')
for arg in sys.argv[1:]:
if arg == '--coverage':
ENABLE_COVERAGE = 1
elif (p.match(arg) or arg == "-h"):
passOn += " " + arg
else:
opts.add(arg)
#Set env vars
buildDir = BUILDDIR
if "DASHD" not in os.environ:
os.environ["DASHD"] = buildDir + '/src/corvusd' + EXEEXT
if "DASHCLI" not in os.environ:
os.environ["DASHCLI"] = buildDir + '/src/corvus-cli' + EXEEXT
if EXEEXT == ".exe" and "-win" not in opts:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print "Win tests currently disabled by default. Use -win option to enable"
sys.exit(0)
if not (ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_BITCOIND == 1):
print "No rpc tests to run. Wallet, utils, and bitcoind must all be enabled"
sys.exit(0)
# python-zmq may not be installed. Handle this gracefully and with some helpful info
if ENABLE_ZMQ:
try:
import zmq
except ImportError as e:
print("ERROR: \"import zmq\" failed. Set ENABLE_ZMQ=0 or " \
"to run zmq tests, see dependency info in /qa/README.md.")
raise e
#Tests
testScripts = [
'bip68-112-113-p2p.py',
'wallet.py',
'wallet-hd.py',
'listtransactions.py',
'receivedby.py',
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rawtransactions.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'mempool_limit.py',
'httpbasics.py',
'multi_rpc.py',
'zapwallettxes.py',
'proxy_test.py',
'merkle_blocks.py',
'fundrawtransaction.py',
'fundrawtransaction-hd.py',
'signrawtransactions.py',
'walletbackup.py',
'nodehandling.py',
'reindex.py',
'addressindex.py',
'timestampindex.py',
'spentindex.py',
'decodescript.py',
'p2p-fullblocktest.py', # NOTE: needs dash_hash to pass
'blockchain.py',
'disablewallet.py',
'sendheaders.py', # NOTE: needs dash_hash to pass
'keypool.py',
'keypool-hd.py',
'prioritise_transaction.py',
'invalidblockrequest.py', # NOTE: needs dash_hash to pass
'invalidtxrequest.py', # NOTE: needs dash_hash to pass
'abandonconflict.py',
'p2p-versionbits-warning.py',
]
if ENABLE_ZMQ:
testScripts.append('zmq_test.py')
testScriptsExt = [
'bip9-softforks.py',
'bip65-cltv.py',
'bip65-cltv-p2p.py', # NOTE: needs dash_hash to pass
'bip68-sequence.py',
'bipdersig-p2p.py', # NOTE: needs dash_hash to pass
'bipdersig.py',
'getblocktemplate_longpoll.py', # FIXME: "socket.error: [Errno 54] Connection reset by peer" on my Mac, same as https://github.com/bitcoin/bitcoin/issues/6651
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
# 'pruning.py', # Prune mode is incompatible with -txindex.
'forknotify.py',
'invalidateblock.py',
# 'rpcbind_test.py', #temporary, bug in libevent, see #6655
'smartfees.py',
'maxblocksinflight.py',
'p2p-acceptblock.py', # NOTE: needs dash_hash to pass
'mempool_packages.py',
'maxuploadtarget.py',
# 'replace-by-fee.py', # RBF is disabled in Corvus Glaive
]
def runtests():
coverage = None
if ENABLE_COVERAGE:
coverage = RPCCoverage()
print("Initializing coverage directory at %s\n" % coverage.dir)
rpcTestDir = buildDir + '/qa/rpc-tests/'
run_extended = '-extended' in opts
cov_flag = coverage.flag if coverage else ''
flags = " --srcdir %s/src %s %s" % (buildDir, cov_flag, passOn)
#Run Tests
for i in range(len(testScripts)):
if (len(opts) == 0
or (len(opts) == 1 and "-win" in opts )
or run_extended
or testScripts[i] in opts
or re.sub(".py$", "", testScripts[i]) in opts ):
print("Running testscript %s%s%s ..." % (bold[1], testScripts[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScripts[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
# exit if help is called so we print just one set of
# instructions
p = re.compile(" -h| --help")
if p.match(passOn):
sys.exit(0)
# Run Extended Tests
for i in range(len(testScriptsExt)):
if (run_extended or testScriptsExt[i] in opts
or re.sub(".py$", "", testScriptsExt[i]) in opts):
print(
"Running 2nd level testscript "
+ "%s%s%s ..." % (bold[1], testScriptsExt[i], bold[0]))
time0 = time.time()
subprocess.check_call(
rpcTestDir + testScriptsExt[i] + flags, shell=True)
print("Duration: %s s\n" % (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
print("Cleaning up coverage data")
coverage.cleanup()
class RPCCoverage(object):
"""
Coverage reporting utilities for pull-tester.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: qa/rpc-tests/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir %s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `qa/rpc-tests/test-framework/coverage.py`
REFERENCE_FILENAME = 'rpc_interface.txt'
COVERAGE_FILE_PREFIX = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, REFERENCE_FILENAME)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(COVERAGE_FILE_PREFIX):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
runtests()
|
[] |
[] |
[
"DASHD",
"DASHCLI"
] |
[]
|
["DASHD", "DASHCLI"]
|
python
| 2 | 0 | |
vendor/github.com/yvasiyarov/swagger/parser/parser.go
|
package parser
import (
"encoding/json"
"go/ast"
goparser "go/parser"
"go/token"
"log"
"os"
"path/filepath"
"regexp"
"runtime"
"strings"
"fmt"
)
var vendoringPath string
type Parser struct {
Listing *ResourceListing
TopLevelApis map[string]*ApiDeclaration
PackagesCache map[string]map[string]*ast.Package
CurrentPackage string
TypeDefinitions map[string]map[string]*ast.TypeSpec
PackagePathCache map[string]string
PackageImports map[string]map[string][]string
BasePath, ControllerClass, Ignore string
IsController func(*ast.FuncDecl, string) bool
TypesImplementingMarshalInterface map[string]string
}
func NewParser() *Parser {
return &Parser{
Listing: &ResourceListing{
Infos: Infomation{},
Apis: make([]*ApiRef, 0),
},
PackagesCache: make(map[string]map[string]*ast.Package),
TopLevelApis: make(map[string]*ApiDeclaration),
TypeDefinitions: make(map[string]map[string]*ast.TypeSpec),
PackagePathCache: make(map[string]string),
PackageImports: make(map[string]map[string][]string),
TypesImplementingMarshalInterface: make(map[string]string),
}
}
func (parser *Parser) IsImplementMarshalInterface(typeName string) bool {
_, ok := parser.TypesImplementingMarshalInterface[typeName]
return ok
}
//Read web/main.go to get General info
func (parser *Parser) ParseGeneralApiInfo(mainApiFile string) {
fileSet := token.NewFileSet()
fileTree, err := goparser.ParseFile(fileSet, mainApiFile, nil, goparser.ParseComments)
if err != nil {
log.Fatalf("Can not parse general API information: %v\n", err)
}
parser.Listing.BasePath = "{{.}}"
parser.Listing.SwaggerVersion = SwaggerVersion
if fileTree.Comments != nil {
for _, comment := range fileTree.Comments {
for _, commentLine := range strings.Split(comment.Text(), "\n") {
attribute := strings.ToLower(strings.Split(commentLine, " ")[0])
switch attribute {
case "@apiversion":
parser.Listing.ApiVersion = strings.TrimSpace(commentLine[len(attribute):])
case "@apititle":
parser.Listing.Infos.Title = strings.TrimSpace(commentLine[len(attribute):])
case "@apidescription":
parser.Listing.Infos.Description = strings.TrimSpace(commentLine[len(attribute):])
case "@termsofserviceurl":
parser.Listing.Infos.TermsOfServiceUrl = strings.TrimSpace(commentLine[len(attribute):])
case "@contact":
parser.Listing.Infos.Contact = strings.TrimSpace(commentLine[len(attribute):])
case "@licenseurl":
parser.Listing.Infos.LicenseUrl = strings.TrimSpace(commentLine[len(attribute):])
case "@license":
parser.Listing.Infos.License = strings.TrimSpace(commentLine[len(attribute):])
case "@basepath":
parser.Listing.BasePath = strings.TrimSpace(commentLine[len(attribute):])
}
}
}
}
}
func (parser *Parser) GetResourceListingJson() []byte {
json, err := json.MarshalIndent(parser.Listing, "", " ")
if err != nil {
log.Fatalf("Can not serialise ResourceListing to JSON: %v\n", err)
}
return json
}
func (parser *Parser) GetApiDescriptionJson() []byte {
json, err := json.MarshalIndent(parser.TopLevelApis, "", " ")
if err != nil {
log.Fatalf("Can not serialise []ApiDescription to JSON: %v\n", err)
}
return json
}
func (parser *Parser) CheckRealPackagePath(packagePath string) string {
packagePath = strings.Trim(packagePath, "\"")
if cachedResult, ok := parser.PackagePathCache[packagePath]; ok {
return cachedResult
}
// Hack vendoring of 'golang.org/x' by the standard library
if strings.HasPrefix(packagePath, "golang_org/x/") {
packagePath = filepath.Join("vendor", packagePath)
}
pkgRealpath := ""
goVersion := runtime.Version()
// check if vendor is enabled for version GO 1.5 or 1.6
vendorEnable := true
if goVersion == "go1.5" || goVersion == "go1.6" {
if os.Getenv("GO15VENDOREXPERIMENT") == "0" {
vendorEnable = false
}
}
// first check vendor folder, vendoring in GO 1.7 and greater is officially supported
// evaluate if the user specified a different vendor directory rather
// than using current working directory to find vendor
if vendorEnable {
var vendorPath string
if vendoringPath == "" {
vendorPath = filepath.Join("vendor", packagePath)
} else {
vendorPath = fmt.Sprintf("%s/%s", vendoringPath, packagePath)
}
if evalutedPath, err := filepath.EvalSymlinks(vendorPath); err == nil {
if _, err := os.Stat(evalutedPath); err == nil {
pkgRealpath = evalutedPath
}
}
}
// next, check GOPATH
if pkgRealpath == "" {
gopath := os.Getenv("GOPATH")
if gopath == "" {
log.Fatalf("Please, set $GOPATH environment variable\n")
}
gopathsList := filepath.SplitList(gopath)
for _, path := range gopathsList {
if evalutedPath, err := filepath.EvalSymlinks(filepath.Join(path, "src", packagePath)); err == nil {
if _, err := os.Stat(evalutedPath); err == nil {
pkgRealpath = evalutedPath
break
}
}
}
}
// next, check GOROOT (/src)
if pkgRealpath == "" {
goroot := filepath.Clean(runtime.GOROOT())
if goroot == "" {
log.Fatalf("Please, set $GOROOT environment variable\n")
}
if evalutedPath, err := filepath.EvalSymlinks(filepath.Join(goroot, "src", packagePath)); err == nil {
if _, err := os.Stat(evalutedPath); err == nil {
pkgRealpath = evalutedPath
}
}
// next, check GOROOT (/src/pkg) (for golang < v1.4)
if pkgRealpath == "" {
if evalutedPath, err := filepath.EvalSymlinks(filepath.Join(goroot, "src", "pkg", packagePath)); err == nil {
if _, err := os.Stat(evalutedPath); err == nil {
pkgRealpath = evalutedPath
}
}
}
}
parser.PackagePathCache[packagePath] = pkgRealpath
return pkgRealpath
}
func (parser *Parser) GetRealPackagePath(packagePath string) string {
pkgRealpath := parser.CheckRealPackagePath(packagePath)
if pkgRealpath == "" {
log.Fatalf("Can not find package %s \n", packagePath)
}
return pkgRealpath
}
func (parser *Parser) GetPackageAst(packagePath string) map[string]*ast.Package {
//log.Printf("Parse %s package\n", packagePath)
if cache, ok := parser.PackagesCache[packagePath]; ok {
return cache
} else {
fileSet := token.NewFileSet()
astPackages, err := goparser.ParseDir(fileSet, packagePath, ParserFileFilter, goparser.ParseComments)
if err != nil {
log.Fatalf("Parse of %s pkg cause error: %s\n", packagePath, err)
}
parser.PackagesCache[packagePath] = astPackages
return astPackages
}
}
func (parser *Parser) AddOperation(op *Operation) {
path := []string{}
for _, pathPart := range strings.Split(op.Path, "/") {
if pathPart = strings.TrimSpace(pathPart); pathPart != "" {
path = append(path, pathPart)
}
}
resource := path[0]
if op.ForceResource != "" {
resource = op.ForceResource
}
api, ok := parser.TopLevelApis[resource]
if !ok {
api = NewApiDeclaration()
api.ApiVersion = parser.Listing.ApiVersion
api.SwaggerVersion = SwaggerVersion
api.ResourcePath = "/" + resource
api.BasePath = parser.Listing.BasePath
parser.TopLevelApis[resource] = api
}
found := false
for _, apiRef := range parser.Listing.Apis {
if apiRef.Path == api.ResourcePath {
found = true
}
}
if !found {
apiRef := &ApiRef{
Path: api.ResourcePath,
Description: op.Summary,
}
parser.Listing.Apis = append(parser.Listing.Apis, apiRef)
}
api.AddOperation(op)
}
func (parser *Parser) ParseApi(packageNames, vendorPath string) {
vendoringPath = vendorPath
packages := parser.ScanPackages(strings.Split(packageNames, ","))
for _, packageName := range packages {
parser.ParseTypeDefinitions(packageName)
}
for _, packageName := range packages {
parser.ParseApiDescription(packageName)
}
}
func (parser *Parser) ScanPackages(packages []string) []string {
res := make([]string, len(packages))
existsPackages := make(map[string]bool)
for _, packageName := range packages {
if v, ok := existsPackages[packageName]; !ok || v == false {
// Add package
existsPackages[packageName] = true
res = append(res, packageName)
// get it's real path
pkgRealPath := parser.GetRealPackagePath(packageName)
// Then walk
var walker filepath.WalkFunc = func(path string, info os.FileInfo, err error) error {
// avoid listing hidden directories with initial "_" names and vendor dir
if info.IsDir() && !strings.Contains(path, "/_") && !strings.Contains(path, "/vendor") {
if idx := strings.Index(path, packageName); idx != -1 {
pack := path[idx:]
if v, ok := existsPackages[pack]; !ok || v == false {
existsPackages[pack] = true
res = append(res, pack)
}
}
}
return nil
}
filepath.Walk(pkgRealPath, walker)
}
}
return res
}
func (parser *Parser) ParseTypeDefinitions(packageName string) {
parser.CurrentPackage = packageName
pkgRealPath := parser.GetRealPackagePath(packageName)
// log.Printf("Parse type definition of %#v\n", packageName)
if _, ok := parser.TypeDefinitions[pkgRealPath]; !ok {
parser.TypeDefinitions[pkgRealPath] = make(map[string]*ast.TypeSpec)
}
astPackages := parser.GetPackageAst(pkgRealPath)
for _, astPackage := range astPackages {
for _, astFile := range astPackage.Files {
for _, astDeclaration := range astFile.Decls {
if generalDeclaration, ok := astDeclaration.(*ast.GenDecl); ok && generalDeclaration.Tok == token.TYPE {
for _, astSpec := range generalDeclaration.Specs {
if typeSpec, ok := astSpec.(*ast.TypeSpec); ok {
parser.TypeDefinitions[pkgRealPath][typeSpec.Name.String()] = typeSpec
}
}
}
}
}
}
//log.Fatalf("Type definition parsed %#v\n", parser.ParseImportStatements(packageName))
for importedPackage, _ := range parser.ParseImportStatements(packageName) {
//log.Printf("Import: %v, %v\n", importedPackage, v)
parser.ParseTypeDefinitions(importedPackage)
}
}
func (parser *Parser) ParseImportStatements(packageName string) map[string]bool {
parser.CurrentPackage = packageName
pkgRealPath := parser.GetRealPackagePath(packageName)
imports := make(map[string]bool)
astPackages := parser.GetPackageAst(pkgRealPath)
parser.PackageImports[pkgRealPath] = make(map[string][]string)
for _, astPackage := range astPackages {
for _, astFile := range astPackage.Files {
for _, astImport := range astFile.Imports {
importedPackageName := strings.Trim(astImport.Path.Value, "\"")
if !parser.isIgnoredPackage(importedPackageName) {
realPath := parser.GetRealPackagePath(importedPackageName)
//log.Printf("path: %#v, original path: %#v", realPath, astImport.Path.Value)
if _, ok := parser.TypeDefinitions[realPath]; !ok {
imports[importedPackageName] = true
//log.Printf("Parse %s, Add new import definition:%s\n", packageName, astImport.Path.Value)
}
var importedPackageAlias string
if astImport.Name != nil && astImport.Name.Name != "." && astImport.Name.Name != "_" {
importedPackageAlias = astImport.Name.Name
} else {
importPath := strings.Split(importedPackageName, "/")
importedPackageAlias = importPath[len(importPath)-1]
}
isExists := false
for _, v := range parser.PackageImports[pkgRealPath][importedPackageAlias] {
if v == importedPackageName {
isExists = true
}
}
if !isExists {
parser.PackageImports[pkgRealPath][importedPackageAlias] = append(parser.PackageImports[pkgRealPath][importedPackageAlias], importedPackageName)
}
}
}
}
}
return imports
}
func (parser *Parser) GetModelDefinition(model string, packageName string) *ast.TypeSpec {
pkgRealPath := parser.CheckRealPackagePath(packageName)
if pkgRealPath == "" {
return nil
}
packageModels, ok := parser.TypeDefinitions[pkgRealPath]
if !ok {
return nil
}
astTypeSpec, _ := packageModels[model]
return astTypeSpec
}
func (parser *Parser) FindModelDefinition(modelName string, currentPackage string) (*ast.TypeSpec, string) {
var model *ast.TypeSpec
var modelPackage string
modelNameParts := strings.Split(modelName, ".")
//if no dot in name - it can be only model from current package
if len(modelNameParts) == 1 {
modelPackage = currentPackage
if model = parser.GetModelDefinition(modelName, currentPackage); model == nil {
log.Fatalf("Can not find definition of %s model. Current package %s", modelName, currentPackage)
}
} else {
//first try to assume what name is absolute
absolutePackageName := strings.Join(modelNameParts[:len(modelNameParts)-1], "/")
modelNameFromPath := modelNameParts[len(modelNameParts)-1]
modelPackage = absolutePackageName
if model = parser.GetModelDefinition(modelNameFromPath, absolutePackageName); model == nil {
//can not get model by absolute name.
if len(modelNameParts) > 2 {
log.Fatalf("Can not find definition of %s model. Name looks like absolute, but model not found in %s package", modelNameFromPath, absolutePackageName)
}
// lets try to find it in imported packages
pkgRealPath := parser.CheckRealPackagePath(currentPackage)
if imports, ok := parser.PackageImports[pkgRealPath]; !ok {
log.Fatalf("Can not find definition of %s model. Package %s dont import anything", modelNameFromPath, pkgRealPath)
} else if relativePackage, ok := imports[modelNameParts[0]]; !ok {
log.Fatalf("Package %s is not imported to %s, Imported: %#v\n", modelNameParts[0], currentPackage, imports)
} else {
var modelFound bool
for _, packageName := range relativePackage {
if model = parser.GetModelDefinition(modelNameFromPath, packageName); model != nil {
modelPackage = packageName
modelFound = true
break
}
}
if !modelFound {
log.Fatalf("Can not find definition of %s model in package %s", modelNameFromPath, relativePackage)
}
}
}
}
return model, modelPackage
}
func (parser *Parser) ParseApiDescription(packageName string) {
parser.CurrentPackage = packageName
pkgRealPath := parser.GetRealPackagePath(packageName)
astPackages := parser.GetPackageAst(pkgRealPath)
for _, astPackage := range astPackages {
for _, astFile := range astPackage.Files {
for _, astDescription := range astFile.Decls {
switch astDeclaration := astDescription.(type) {
case *ast.FuncDecl:
if parser.IsController(astDeclaration, parser.ControllerClass) {
operation := NewOperation(parser, packageName)
if astDeclaration.Doc != nil && astDeclaration.Doc.List != nil {
for _, comment := range astDeclaration.Doc.List {
if err := operation.ParseComment(comment.Text); err != nil {
log.Printf("Can not parse comment for function: %v, package: %v, got error: %v\n", astDeclaration.Name.String(), packageName, err)
}
}
}
if operation.Path != "" {
parser.AddOperation(operation)
}
}
}
}
for _, astComment := range astFile.Comments {
for _, commentLine := range strings.Split(astComment.Text(), "\n") {
parser.ParseSubApiDescription(commentLine)
}
}
}
}
}
// Parse sub api declaration
// @SubApi Very fancy API [/fancy-api]
func (parser *Parser) ParseSubApiDescription(commentLine string) {
if !strings.HasPrefix(commentLine, "@SubApi") {
return
} else {
commentLine = strings.TrimSpace(commentLine[len("@SubApi"):])
}
re := regexp.MustCompile(`([^\[]+)\[{1}([\w\_\-/]+)`)
if matches := re.FindStringSubmatch(commentLine); len(matches) != 3 {
log.Printf("Can not parse sub api description %s, skipped", commentLine)
} else {
found := false
for _, ref := range parser.Listing.Apis {
if ref.Path == matches[2] {
found = true
ref.Description = strings.TrimSpace(matches[1])
}
}
if !found {
subApi := &ApiRef{Path: matches[2],
Description: strings.TrimSpace(matches[1]),
}
parser.Listing.Apis = append(parser.Listing.Apis, subApi)
}
}
}
func (parser *Parser) isIgnoredPackage(packageName string) bool {
r, _ := regexp.Compile("appengine+")
matched, err := regexp.MatchString(parser.Ignore, packageName)
if err != nil {
log.Fatalf("The -ignore argument is not a valid regular expression: %v\n", err)
}
return packageName == "C" || r.MatchString(packageName) || matched
}
func ParserFileFilter(info os.FileInfo) bool {
name := info.Name()
return !info.IsDir() && !strings.HasPrefix(name, ".") && strings.HasSuffix(name, ".go") && !strings.HasSuffix(name, "_test.go")
}
|
[
"\"GO15VENDOREXPERIMENT\"",
"\"GOPATH\""
] |
[] |
[
"GOPATH",
"GO15VENDOREXPERIMENT"
] |
[]
|
["GOPATH", "GO15VENDOREXPERIMENT"]
|
go
| 2 | 0 | |
store/store_postgres.go
|
package store
import (
"os"
"time"
"github.com/jmoiron/sqlx"
// Import the PostgreSQL driver
_ "github.com/lib/pq"
"github.com/Bowbaq/sauron/model"
)
const schema = `
CREATE TABLE IF NOT EXISTS state (
owner varchar,
name varchar,
branch varchar DEFAULT '',
path varchar DEFAULT '',
timestamp timestamp,
sha varchar(40),
last_checked timestamp,
CONSTRAINT key PRIMARY KEY(owner, name, branch, path)
);
`
const dropSchema = `
DROP TABLE IF EXISTS state;
`
type postgresStore struct {
db *sqlx.DB
}
// NewPostgres instantiates a new concrete Store. NewPostgres panics if the dataSource is invalid
// or if the schema cannot be initialized.
func NewPostgres(dataSource string) Store {
ps := &postgresStore{
db: sqlx.MustConnect("postgres", dataSource),
}
if os.Getenv("DROP_TABLES") != "" {
ps.db.MustExec(dropSchema)
}
ps.db.MustExec(schema)
return ps
}
// GetLastUpdate returns the last time a repository was updated.
func (ps *postgresStore) GetLastUpdate(key WatchKey) (model.Update, error) {
rows, err := ps.db.NamedQuery(`
SELECT timestamp, sha FROM state
WHERE owner = :repository.owner AND name = :repository.name AND branch = :branch AND path = :path`,
key,
)
if err != nil {
return model.Update{}, err
}
var u model.Update
rows.Next()
err = rows.StructScan(&u)
if err != nil {
if err.Error() == "sql: Rows are closed" {
return u, nil
}
return model.Update{}, err
}
return u, nil
}
// RecordUpdate records the last update for a specific repository.
func (ps *postgresStore) RecordUpdate(key WatchKey, update model.Update) error {
_, err := ps.db.NamedExec(`
INSERT INTO state (
owner, name, branch, path, timestamp, sha, last_checked
)
VALUES (
:repository.owner, :repository.name, :branch, :path, :timestamp, :sha, :last_checked
)
ON CONFLICT (owner, name, branch, path) DO UPDATE SET
timestamp = :timestamp, sha = :sha, last_checked = :last_checked`,
struct {
WatchKey
RepoState
}{
WatchKey: key,
RepoState: RepoState{
Update: update,
LastChecked: time.Now().UTC(),
},
},
)
return err
}
// SetLastChecked records the last check time for a specific repository.
func (ps *postgresStore) SetLastChecked(key WatchKey) error {
_, err := ps.db.NamedExec(
`UPDATE state SET last_checked = :last_checked
WHERE owner = :repository.owner AND name = :repository.name AND branch = :branch AND path = :path`,
struct {
WatchKey
RepoState
}{
WatchKey: key,
RepoState: RepoState{
LastChecked: time.Now().UTC(),
},
},
)
return err
}
|
[
"\"DROP_TABLES\""
] |
[] |
[
"DROP_TABLES"
] |
[]
|
["DROP_TABLES"]
|
go
| 1 | 0 | |
tools/model_export/det_pytorch2onnx.py
|
import argparse
import numpy as np
import onnx
import onnxruntime as rt
import torch
import os
import os.path as osp
from functools import partial
from configs import cfg
from mvt.engines.predictor import get_detector
from mvt.utils.config_util import get_task_cfg
from mvt.utils.io_util import imread
from mvt.utils.geometric_util import imresize
from mvt.utils.photometric_util import imnormalize
from mvt.cores.ops import multiclass_nms
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
def generate_inputs_and_wrap_model(config_path, checkpoint_path, input_config):
"""Prepare sample input and wrap model for ONNX export.
The ONNX export API only accept args, and all inputs should be
torch.Tensor or corresponding types (such as tuple of tensor).
So we should call this function before exporting. This function will:
1. generate corresponding inputs which are used to execute the model.
2. Wrap the model's forward function.
For example, the Det models' forward function has a parameter
``return_loss:bool``. As we want to set it as False while export API
supports neither bool type or kwargs. So we have to replace the forward
like: ``model.forward = partial(model.forward, return_loss=False)``
Args:
config_path (str): the config for the model we want to export to ONNX
checkpoint_path (str): Path to the corresponding checkpoint
input_config (dict): the exactly data in this dict depends on the
framework. For MMSeg, we can just declare the input shape,
and generate the dummy data accordingly. However, for MMDet,
we may pass the real img path, or the NMS will return None
as there is no legal bbox.
Returns:
tuple: (model, tensor_data) wrapped model which can be called by \
model(*tensor_data) and a list of inputs which are used to execute \
the model while exporting.
"""
model = get_detector(cfg, checkpoint_path, device="cpu")
one_img, one_meta = preprocess_example_input(input_config)
tensor_data = [one_img]
model.forward = partial(model.forward, img_metas=[[one_meta]], return_loss=False)
return model, tensor_data
def preprocess_example_input(input_config):
"""Prepare an example input image for ``generate_inputs_and_wrap_model``.
Args:
input_config (dict): customized config describing the example input.
Returns:
tuple: (one_img, one_meta), tensor of the example input image and \
meta information for the example input image.
"""
input_path = input_config["input_path"]
input_shape = input_config["input_shape"]
one_img = imread(input_path)
if "normalize_cfg" in input_config.keys():
normalize_cfg = input_config["normalize_cfg"]
mean = np.array(normalize_cfg["mean"], dtype=np.float32)
std = np.array(normalize_cfg["std"], dtype=np.float32)
one_img = imnormalize(one_img, mean, std)
one_img = imresize(one_img, input_shape[2:][::-1]).transpose(2, 0, 1)
one_img = torch.from_numpy(one_img).unsqueeze(0).float().requires_grad_(True)
(_, C, H, W) = input_shape
one_meta = {
"img_shape": (H, W, C),
"ori_shape": (H, W, C),
"pad_shape": (H, W, C),
"filename": "<demo>.png",
"scale_factor": 1.0,
"flip": False,
}
return one_img, one_meta
def det_pth2onnx(
config_path,
checkpoint_path,
input_img,
input_shape,
opset_version=11,
show=False,
output_file="tmp.onnx",
verify=False,
normalize_cfg=None,
):
input_config = {
"input_shape": input_shape,
"input_path": input_img,
"normalize_cfg": normalize_cfg,
}
# prepare original model and meta for verifying the onnx model
# get config
get_task_cfg(cfg, config_path)
num_classes = cfg.MODEL.BBOX_HEAD[
"num_classes"
] # two stage models are not support currently
cfg.MODEL.TRAIN_CFG = None
orig_model = get_detector(cfg, checkpoint_path, device="cpu")
one_img, one_meta = preprocess_example_input(input_config)
model, tensor_data = generate_inputs_and_wrap_model(
cfg, checkpoint_path, input_config
)
# roll implementation
def roll(input, shifts, dims):
if isinstance(shifts, int):
shifts = [shifts]
if isinstance(dims, int):
dims = [dims]
assert len(shifts) == len(dims)
for shift, dim in zip(shifts, dims):
dim_len = input.shape[dim]
shift = torch.tensor(shift)
if shift > 0:
shift = dim_len - shift % dim_len
else:
shift = -shift
inds = (torch.arange(dim_len) + shift) % dim_len
input = torch.index_select(input, dim, inds)
return input
torch.roll = roll
torch.onnx.export(
model,
tensor_data,
output_file,
export_params=True,
keep_initializers_as_inputs=True,
verbose=show,
opset_version=opset_version,
)
model.forward = orig_model.forward
print(f"Successfully exported ONNX model: {output_file}")
if verify:
# check by onnx
onnx_model = onnx.load(output_file)
onnx.checker.check_model(onnx_model)
# check the numerical value
# get pytorch output
pytorch_result = model(tensor_data, [[one_meta]], return_loss=False)
# get onnx output
input_all = [node.name for node in onnx_model.graph.input]
input_initializer = [node.name for node in onnx_model.graph.initializer]
net_feed_input = list(set(input_all) - set(input_initializer))
assert len(net_feed_input) == 1
sess = rt.InferenceSession(output_file)
from mvt.cores.bbox import bbox2result
out_list = sess.run(None, {net_feed_input[0]: one_img.detach().numpy()})
# det_bboxes, det_labels = out_list
ml_bboxes = out_list[0]
ml_cls_scores = out_list[1]
ml_conf_scores = out_list[2]
# # only compare a part of result
conf_thr = cfg.MODEL.TEST_CFG.get("conf_thr", -1)
conf_inds = np.where(ml_conf_scores > conf_thr)
ml_bboxes = ml_bboxes[conf_inds]
ml_cls_scores = ml_cls_scores[conf_inds]
ml_conf_scores = ml_conf_scores[conf_inds]
det_bboxes, det_labels = multiclass_nms(
torch.from_numpy(ml_bboxes),
torch.from_numpy(ml_cls_scores),
cfg.MODEL.TEST_CFG["score_thr"],
cfg.MODEL.TEST_CFG["nms"],
cfg.MODEL.TEST_CFG["max_per_img"],
score_factors=ml_conf_scores,
)
onnx_results = bbox2result(det_bboxes, det_labels, num_classes)
print(pytorch_result)
print(onnx_results)
# assert np.allclose(
# pytorch_result[0][38][0][:4], onnx_results[38][0]
# [:4]), 'The outputs are different between Pytorch and ONNX'
# print('The numerical values are the same between Pytorch and ONNX')
def parse_args():
parser = argparse.ArgumentParser(description="Convert MTL models to ONNX")
parser.add_argument("config", help="test config file path")
parser.add_argument("checkpoint", help="checkpoint file")
parser.add_argument("--input-img", type=str, help="Images for input")
parser.add_argument("--show", action="store_true", help="show onnx graph")
parser.add_argument("--output-file", type=str, default="tmp.onnx")
parser.add_argument("--opset-version", type=int, default=11)
parser.add_argument(
"--verify",
action="store_true",
help="verify the onnx model output against pytorch output",
)
parser.add_argument(
"--shape", type=int, nargs="+", default=[416, 416], help="input image size"
)
parser.add_argument(
"--mean",
type=float,
nargs="+",
default=[0, 0, 0],
help="mean value used for preprocess input data",
)
parser.add_argument(
"--std",
type=float,
nargs="+",
default=[255, 255, 255],
help="variance value used for preprocess input data",
)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
# assert args.opset_version == 11, 'Only support opset 11 now'
if not args.input_img:
args.input_img = osp.join(
osp.dirname(__file__), "../meta/test_data/a0519qvbyom_001.jpg"
)
if len(args.shape) == 1:
input_shape = (1, 3, args.shape[0], args.shape[0])
elif len(args.shape) == 2:
input_shape = (1, 3) + tuple(args.shape)
else:
raise ValueError("invalid input shape")
assert len(args.mean) == 3
assert len(args.std) == 3
# print(args.shape, args.mean, args.std)
normalize_cfg = {"mean": args.mean, "std": args.std}
# convert model to onnx file
det_pth2onnx(
args.config,
args.checkpoint,
args.input_img,
input_shape,
opset_version=args.opset_version,
show=args.show,
output_file=args.output_file,
verify=args.verify,
normalize_cfg=normalize_cfg,
)
|
[] |
[] |
[
"KMP_DUPLICATE_LIB_OK"
] |
[]
|
["KMP_DUPLICATE_LIB_OK"]
|
python
| 1 | 0 | |
tests/sparseml/pytorch/datasets/classification/test_mnist.py
|
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from torch.utils.data import Dataset
from sparseml.pytorch.datasets import DatasetRegistry, MNISTDataset
def _validate_mnist(dataset: Dataset):
assert len(dataset) > 0
item = dataset[0]
assert isinstance(item, tuple)
assert item[0].shape[0] == 1
assert item[0].shape[1] == 28
assert item[0].shape[2] == 28
assert item[1] < 10
@pytest.mark.skipif(
os.getenv("NM_ML_SKIP_PYTORCH_TESTS", False),
reason="Skipping pytorch tests",
)
@pytest.mark.skipif(
os.getenv("NM_ML_SKIP_DATASET_TESTS", False),
reason="Skipping dataset tests",
)
def test_mnist():
train_dataset = MNISTDataset(train=True)
_validate_mnist(train_dataset)
val_dataset = MNISTDataset(train=False)
_validate_mnist(val_dataset)
reg_dataset = DatasetRegistry.create("mnist", train=False)
_validate_mnist(reg_dataset)
|
[] |
[] |
[
"NM_ML_SKIP_DATASET_TESTS",
"NM_ML_SKIP_PYTORCH_TESTS"
] |
[]
|
["NM_ML_SKIP_DATASET_TESTS", "NM_ML_SKIP_PYTORCH_TESTS"]
|
python
| 2 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.