filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
external/heng/doodle/2018-10-10/common.py
|
from include import *
from utility.draw import *
from utility.file import *
from net.pytorchviz import *
from net.rate import *
from net.layer.function import *
from net.layer.sync_batchnorm import *
from net.center_loss import *
#--
from dataset.transform import *
# kaggle ---
# DATA_DIR = '/root/share/project/kaggle/google_doodle/data'
DATA_DIR = '/media/ssd/data/kaggle/google_doodle/data'
#---------------------------------------------------------------------------------
class Struct(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
#---------------------------------------------------------------------------------
print('@%s: ' % os.path.basename(__file__))
if 1:
SEED = 35202 #123 #int(time.time()) #
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED)
print ('\tset random seed')
print ('\t\tSEED=%d'%SEED)
if 1:
torch.backends.cudnn.benchmark = True ##uses the inbuilt cudnn auto-tuner to find the fastest convolution algorithms. -
torch.backends.cudnn.enabled = True
print ('\tset cuda environment')
print ('\t\ttorch.__version__ =', torch.__version__)
print ('\t\ttorch.version.cuda =', torch.version.cuda)
print ('\t\ttorch.backends.cudnn.version() =', torch.backends.cudnn.version())
try:
print ('\t\tos[\'CUDA_VISIBLE_DEVICES\'] =',os.environ['CUDA_VISIBLE_DEVICES'])
NUM_CUDA_DEVICES = len(os.environ['CUDA_VISIBLE_DEVICES'].split(','))
except Exception:
print ('\t\tos[\'CUDA_VISIBLE_DEVICES\'] =','None')
NUM_CUDA_DEVICES = 1
print ('\t\ttorch.cuda.device_count() =', torch.cuda.device_count())
#print ('\t\ttorch.cuda.current_device() =', torch.cuda.current_device())
print('')
#---------------------------------------------------------------------------------
## useful : http://forums.fast.ai/t/model-visualization/12365/2
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
cni/network/plugin/main.go
|
// Copyright 2017 Microsoft. All rights reserved.
// MIT License
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"reflect"
"time"
"github.com/Azure/azure-container-networking/cni"
"github.com/Azure/azure-container-networking/cni/network"
"github.com/Azure/azure-container-networking/common"
acn "github.com/Azure/azure-container-networking/common"
"github.com/Azure/azure-container-networking/log"
"github.com/Azure/azure-container-networking/platform"
"github.com/Azure/azure-container-networking/telemetry"
"github.com/containernetworking/cni/pkg/skel"
)
const (
hostNetAgentURL = "http://168.63.129.16/machine/plugins?comp=netagent&type=cnireport"
ipamQueryURL = "http://168.63.129.16/machine/plugins?comp=nmagent&type=getinterfaceinfov1"
pluginName = "CNI"
telemetryNumRetries = 5
telemetryWaitTimeInMilliseconds = 200
name = "azure-vnet"
)
// Version is populated by make during build.
var version string
// Command line arguments for CNI plugin.
var args = acn.ArgumentList{
{
Name: acn.OptVersion,
Shorthand: acn.OptVersionAlias,
Description: "Print version information",
Type: "bool",
DefaultValue: false,
},
}
// Prints version information.
func printVersion() {
fmt.Printf("Azure CNI Version %v\n", version)
}
// send error report to hostnetagent if CNI encounters any error.
func reportPluginError(reportManager *telemetry.ReportManager, tb *telemetry.TelemetryBuffer, err error) {
log.Printf("Report plugin error")
reflect.ValueOf(reportManager.Report).Elem().FieldByName("ErrorMessage").SetString(err.Error())
if err := reportManager.SendReport(tb); err != nil {
log.Errorf("SendReport failed due to %v", err)
}
}
func validateConfig(jsonBytes []byte) error {
var conf struct {
Name string `json:"name"`
}
if err := json.Unmarshal(jsonBytes, &conf); err != nil {
return fmt.Errorf("error reading network config: %s", err)
}
if conf.Name == "" {
return fmt.Errorf("missing network name")
}
return nil
}
func getCmdArgsFromEnv() (string, *skel.CmdArgs, error) {
log.Printf("Going to read from stdin")
stdinData, err := ioutil.ReadAll(os.Stdin)
if err != nil {
return "", nil, fmt.Errorf("error reading from stdin: %v", err)
}
cmdArgs := &skel.CmdArgs{
ContainerID: os.Getenv("CNI_CONTAINERID"),
Netns: os.Getenv("CNI_NETNS"),
IfName: os.Getenv("CNI_IFNAME"),
Args: os.Getenv("CNI_ARGS"),
Path: os.Getenv("CNI_PATH"),
StdinData: stdinData,
}
cmd := os.Getenv("CNI_COMMAND")
return cmd, cmdArgs, nil
}
func handleIfCniUpdate(update func(*skel.CmdArgs) error) (bool, error) {
isupdate := true
if os.Getenv("CNI_COMMAND") != cni.CmdUpdate {
return false, nil
}
log.Printf("CNI UPDATE received.")
_, cmdArgs, err := getCmdArgsFromEnv()
if err != nil {
log.Printf("Received error while retrieving cmds from environment: %+v", err)
return isupdate, err
}
log.Printf("Retrieved command args for update +%v", cmdArgs)
err = validateConfig(cmdArgs.StdinData)
if err != nil {
log.Printf("Failed to handle CNI UPDATE, err:%v.", err)
return isupdate, err
}
err = update(cmdArgs)
if err != nil {
log.Printf("Failed to handle CNI UPDATE, err:%v.", err)
return isupdate, err
}
return isupdate, nil
}
// Main is the entry point for CNI network plugin.
func main() {
// Initialize and parse command line arguments.
acn.ParseArgs(&args, printVersion)
vers := acn.GetArg(acn.OptVersion).(bool)
if vers {
printVersion()
os.Exit(0)
}
var (
config common.PluginConfig
err error
)
log.SetName(name)
log.SetLevel(log.LevelInfo)
if err = log.SetTarget(log.TargetLogfile); err != nil {
fmt.Printf("Failed to setup cni logging: %v\n", err)
return
}
defer log.Close()
config.Version = version
reportManager := &telemetry.ReportManager{
HostNetAgentURL: hostNetAgentURL,
ContentType: telemetry.ContentType,
Report: &telemetry.CNIReport{
Context: "AzureCNI",
SystemDetails: telemetry.SystemInfo{},
InterfaceDetails: telemetry.InterfaceInfo{},
BridgeDetails: telemetry.BridgeInfo{},
},
}
cniReport := reportManager.Report.(*telemetry.CNIReport)
upTime, err := platform.GetLastRebootTime()
if err == nil {
cniReport.VMUptime = upTime.Format("2006-01-02 15:04:05")
}
cniReport.GetReport(pluginName, version, ipamQueryURL)
startTime := time.Now().UnixNano() / int64(time.Millisecond)
netPlugin, err := network.NewPlugin(name, &config)
if err != nil {
log.Printf("Failed to create network plugin, err:%v.\n", err)
return
}
netPlugin.SetCNIReport(cniReport)
// CNI Acquires lock
if err = netPlugin.Plugin.InitializeKeyValueStore(&config); err != nil {
log.Errorf("Failed to initialize key-value store of network plugin, err:%v.\n", err)
tb := telemetry.NewTelemetryBuffer("")
if tberr := tb.Connect(); tberr == nil {
reportPluginError(reportManager, tb, err)
tb.Close()
}
return
}
// Start telemetry process if not already started. This should be done inside lock, otherwise multiple process
// end up creating/killing telemetry process results in undesired state.
tb := telemetry.NewTelemetryBuffer("")
tb.ConnectToTelemetryService(telemetryNumRetries, telemetryWaitTimeInMilliseconds)
defer tb.Close()
t := time.Now()
cniReport.Timestamp = t.Format("2006-01-02 15:04:05")
defer func() {
if errUninit := netPlugin.Plugin.UninitializeKeyValueStore(); errUninit != nil {
log.Errorf("Failed to uninitialize key-value store of network plugin, err:%v.\n", errUninit)
}
if recover() != nil {
return
}
}()
if err = netPlugin.Start(&config); err != nil {
log.Errorf("Failed to start network plugin, err:%v.\n", err)
reportPluginError(reportManager, tb, err)
panic("network plugin start fatal error")
}
handled, err := handleIfCniUpdate(netPlugin.Update)
if handled == true {
log.Printf("CNI UPDATE finished.")
} else if err = netPlugin.Execute(cni.PluginApi(netPlugin)); err != nil {
log.Errorf("Failed to execute network plugin, err:%v.\n", err)
}
endTime := time.Now().UnixNano() / int64(time.Millisecond)
reflect.ValueOf(reportManager.Report).Elem().FieldByName("OperationDuration").SetInt(int64(endTime - startTime))
netPlugin.Stop()
// release cni lock
if errUninit := netPlugin.Plugin.UninitializeKeyValueStore(); errUninit != nil {
log.Errorf("Failed to uninitialize key-value store of network plugin, err:%v.\n", errUninit)
}
if err != nil {
reportPluginError(reportManager, tb, err)
panic("network plugin execute fatal error")
}
// Report CNI successfully finished execution.
reflect.ValueOf(reportManager.Report).Elem().FieldByName("CniSucceeded").SetBool(true)
if err = reportManager.SendReport(tb); err != nil {
log.Errorf("SendReport failed due to %v", err)
} else {
log.Printf("Sending report succeeded")
}
}
|
[
"\"CNI_CONTAINERID\"",
"\"CNI_NETNS\"",
"\"CNI_IFNAME\"",
"\"CNI_ARGS\"",
"\"CNI_PATH\"",
"\"CNI_COMMAND\"",
"\"CNI_COMMAND\""
] |
[] |
[
"CNI_ARGS",
"CNI_IFNAME",
"CNI_PATH",
"CNI_CONTAINERID",
"CNI_COMMAND",
"CNI_NETNS"
] |
[]
|
["CNI_ARGS", "CNI_IFNAME", "CNI_PATH", "CNI_CONTAINERID", "CNI_COMMAND", "CNI_NETNS"]
|
go
| 6 | 0 | |
internal/breaking/check_service_methods_not_deleted.go
|
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package breaking
import (
"github.com/gongxulei/prototool/internal/extract"
"github.com/gongxulei/prototool/internal/text"
)
func checkServiceMethodsNotDeleted(addFailure func(*text.Failure), from *extract.PackageSet, to *extract.PackageSet) error {
return forEachServicePair(addFailure, from, to, checkServiceMethodsNotDeletedService)
}
func checkServiceMethodsNotDeletedService(addFailure func(*text.Failure), from *extract.Service, to *extract.Service) error {
fromMethodNameToMethod := from.MethodNameToMethod()
toMethodNameToMethod := to.MethodNameToMethod()
for methodName := range fromMethodNameToMethod {
if _, ok := toMethodNameToMethod[methodName]; !ok {
addFailure(newServiceMethodsNotDeletedFailure(from.FullyQualifiedName(), methodName))
}
}
return nil
}
func newServiceMethodsNotDeletedFailure(serviceName string, methodName string) *text.Failure {
return newTextFailuref(`Service method %q on service %q was deleted.`, methodName, serviceName)
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
viper.go
|
// Copyright © 2014 Steve Francia <[email protected]>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
// Viper is an application configuration system.
// It believes that applications can be configured a variety of ways
// via flags, ENVIRONMENT variables, configuration files retrieved
// from the file system, or a remote key/value store.
// Each item takes precedence over the item below it:
// overrides
// flag
// env
// config
// key/value store
// default
package viper
import (
"bytes"
"encoding/csv"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"os"
"path/filepath"
"reflect"
"strings"
"sync"
"time"
"github.com/fsnotify/fsnotify"
"github.com/hashicorp/hcl"
"github.com/hashicorp/hcl/hcl/printer"
"github.com/magiconair/properties"
"github.com/mitchellh/mapstructure"
"github.com/pelletier/go-toml"
"github.com/spf13/afero"
"github.com/spf13/cast"
jww "github.com/spf13/jwalterweatherman"
"github.com/spf13/pflag"
"github.com/subosito/gotenv"
"gopkg.in/ini.v1"
"gopkg.in/yaml.v2"
)
// ConfigMarshalError happens when failing to marshal the configuration.
type ConfigMarshalError struct {
err error
}
// Error returns the formatted configuration error.
func (e ConfigMarshalError) Error() string {
return fmt.Sprintf("While marshaling config: %s", e.err.Error())
}
var v *Viper
type RemoteResponse struct {
Value []byte
Error error
}
func init() {
v = New()
}
type remoteConfigFactory interface {
Get(rp RemoteProvider) (io.Reader, error)
Watch(rp RemoteProvider) (io.Reader, error)
WatchChannel(rp RemoteProvider) (<-chan *RemoteResponse, chan bool)
}
// RemoteConfig is optional, see the remote package
var RemoteConfig remoteConfigFactory
// UnsupportedConfigError denotes encountering an unsupported
// configuration filetype.
type UnsupportedConfigError string
// Error returns the formatted configuration error.
func (str UnsupportedConfigError) Error() string {
return fmt.Sprintf("Unsupported Config Type %q", string(str))
}
// UnsupportedRemoteProviderError denotes encountering an unsupported remote
// provider. Currently only etcd and Consul are supported.
type UnsupportedRemoteProviderError string
// Error returns the formatted remote provider error.
func (str UnsupportedRemoteProviderError) Error() string {
return fmt.Sprintf("Unsupported Remote Provider Type %q", string(str))
}
// RemoteConfigError denotes encountering an error while trying to
// pull the configuration from the remote provider.
type RemoteConfigError string
// Error returns the formatted remote provider error
func (rce RemoteConfigError) Error() string {
return fmt.Sprintf("Remote Configurations Error: %s", string(rce))
}
// ConfigFileNotFoundError denotes failing to find configuration file.
type ConfigFileNotFoundError struct {
name, locations string
}
// Error returns the formatted configuration error.
func (fnfe ConfigFileNotFoundError) Error() string {
return fmt.Sprintf("Config File %q Not Found in %q", fnfe.name, fnfe.locations)
}
// ConfigFileAlreadyExistsError denotes failure to write new configuration file.
type ConfigFileAlreadyExistsError string
// Error returns the formatted error when configuration already exists.
func (faee ConfigFileAlreadyExistsError) Error() string {
return fmt.Sprintf("Config File %q Already Exists", string(faee))
}
// A DecoderConfigOption can be passed to viper.Unmarshal to configure
// mapstructure.DecoderConfig options
type DecoderConfigOption func(*mapstructure.DecoderConfig)
// DecodeHook returns a DecoderConfigOption which overrides the default
// DecoderConfig.DecodeHook value, the default is:
//
// mapstructure.ComposeDecodeHookFunc(
// mapstructure.StringToTimeDurationHookFunc(),
// mapstructure.StringToSliceHookFunc(","),
// )
func DecodeHook(hook mapstructure.DecodeHookFunc) DecoderConfigOption {
return func(c *mapstructure.DecoderConfig) {
c.DecodeHook = hook
}
}
// Viper is a prioritized configuration registry. It
// maintains a set of configuration sources, fetches
// values to populate those, and provides them according
// to the source's priority.
// The priority of the sources is the following:
// 1. overrides
// 2. flags
// 3. env. variables
// 4. config file
// 5. key/value store
// 6. defaults
//
// For example, if values from the following sources were loaded:
//
// Defaults : {
// "secret": "",
// "user": "default",
// "endpoint": "https://localhost"
// }
// Config : {
// "user": "root"
// "secret": "defaultsecret"
// }
// Env : {
// "secret": "somesecretkey"
// }
//
// The resulting config will have the following values:
//
// {
// "secret": "somesecretkey",
// "user": "root",
// "endpoint": "https://localhost"
// }
type Viper struct {
// Delimiter that separates a list of keys
// used to access a nested value in one go
keyDelim string
// A set of paths to look for the config file in
configPaths []string
// The filesystem to read config from.
fs afero.Fs
// A set of remote providers to search for the configuration
remoteProviders []*defaultRemoteProvider
// Name of file to look for inside the path
configName string
configFile string
configType string
configPermissions os.FileMode
envPrefix string
automaticEnvApplied bool
envKeyReplacer StringReplacer
allowEmptyEnv bool
config map[string]interface{}
override map[string]interface{}
defaults map[string]interface{}
kvstore map[string]interface{}
pflags map[string]FlagValue
env map[string]string
aliases map[string]string
typeByDefValue bool
// Store read properties on the object so that we can write back in order with comments.
// This will only be used if the configuration read is a properties file.
properties *properties.Properties
onConfigChange func(fsnotify.Event)
onGetCallMetric func(key string, value interface{})
}
// New returns an initialized Viper instance.
func New() *Viper {
v := new(Viper)
v.keyDelim = "."
v.configName = "config"
v.configPermissions = os.FileMode(0644)
v.fs = afero.NewOsFs()
v.config = make(map[string]interface{})
v.override = make(map[string]interface{})
v.defaults = make(map[string]interface{})
v.kvstore = make(map[string]interface{})
v.pflags = make(map[string]FlagValue)
v.env = make(map[string]string)
v.aliases = make(map[string]string)
v.typeByDefValue = false
return v
}
// Option configures Viper using the functional options paradigm popularized by Rob Pike and Dave Cheney.
// If you're unfamiliar with this style,
// see https://commandcenter.blogspot.com/2014/01/self-referential-functions-and-design.html and
// https://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis.
type Option interface {
apply(v *Viper)
}
type optionFunc func(v *Viper)
func (fn optionFunc) apply(v *Viper) {
fn(v)
}
// KeyDelimiter sets the delimiter used for determining key parts.
// By default it's value is ".".
func KeyDelimiter(d string) Option {
return optionFunc(func(v *Viper) {
v.keyDelim = d
})
}
// StringReplacer applies a set of replacements to a string.
type StringReplacer interface {
// Replace returns a copy of s with all replacements performed.
Replace(s string) string
}
// EnvKeyReplacer sets a replacer used for mapping environment variables to internal keys.
func EnvKeyReplacer(r StringReplacer) Option {
return optionFunc(func(v *Viper) {
v.envKeyReplacer = r
})
}
// NewWithOptions creates a new Viper instance.
func NewWithOptions(opts ...Option) *Viper {
v := New()
for _, opt := range opts {
opt.apply(v)
}
return v
}
// Reset is intended for testing, will reset all to default settings.
// In the public interface for the viper package so applications
// can use it in their testing as well.
func Reset() {
v = New()
SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "dotenv", "env", "ini"}
SupportedRemoteProviders = []string{"etcd", "consul", "firestore"}
}
type defaultRemoteProvider struct {
provider string
endpoint string
path string
secretKeyring string
}
func (rp defaultRemoteProvider) Provider() string {
return rp.provider
}
func (rp defaultRemoteProvider) Endpoint() string {
return rp.endpoint
}
func (rp defaultRemoteProvider) Path() string {
return rp.path
}
func (rp defaultRemoteProvider) SecretKeyring() string {
return rp.secretKeyring
}
// RemoteProvider stores the configuration necessary
// to connect to a remote key/value store.
// Optional secretKeyring to unencrypt encrypted values
// can be provided.
type RemoteProvider interface {
Provider() string
Endpoint() string
Path() string
SecretKeyring() string
}
// SupportedExts are universally supported extensions.
var SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl", "dotenv", "env", "ini"}
// SupportedRemoteProviders are universally supported remote providers.
var SupportedRemoteProviders = []string{"etcd", "consul", "firestore"}
func OnConfigChange(run func(in fsnotify.Event)) { v.OnConfigChange(run) }
func (v *Viper) OnConfigChange(run func(in fsnotify.Event)) {
v.onConfigChange = run
}
func WatchConfig() { v.WatchConfig() }
func (v *Viper) WatchConfig() {
initWG := sync.WaitGroup{}
initWG.Add(1)
go func() {
watcher, err := fsnotify.NewWatcher()
if err != nil {
log.Fatal(err)
}
defer watcher.Close()
// we have to watch the entire directory to pick up renames/atomic saves in a cross-platform way
filename, err := v.getConfigFile()
if err != nil {
log.Printf("error: %v\n", err)
initWG.Done()
return
}
configFile := filepath.Clean(filename)
configDir, _ := filepath.Split(configFile)
realConfigFile, _ := filepath.EvalSymlinks(filename)
eventsWG := sync.WaitGroup{}
eventsWG.Add(1)
go func() {
for {
select {
case event, ok := <-watcher.Events:
if !ok { // 'Events' channel is closed
eventsWG.Done()
return
}
currentConfigFile, _ := filepath.EvalSymlinks(filename)
// we only care about the config file with the following cases:
// 1 - if the config file was modified or created
// 2 - if the real path to the config file changed (eg: k8s ConfigMap replacement)
const writeOrCreateMask = fsnotify.Write | fsnotify.Create
if (filepath.Clean(event.Name) == configFile &&
event.Op&writeOrCreateMask != 0) ||
(currentConfigFile != "" && currentConfigFile != realConfigFile) {
realConfigFile = currentConfigFile
err := v.ReadInConfig()
if err != nil {
log.Printf("error reading config file: %v\n", err)
}
if v.onConfigChange != nil {
v.onConfigChange(event)
}
} else if filepath.Clean(event.Name) == configFile &&
event.Op&fsnotify.Remove&fsnotify.Remove != 0 {
eventsWG.Done()
return
}
case err, ok := <-watcher.Errors:
if ok { // 'Errors' channel is not closed
log.Printf("watcher error: %v\n", err)
}
eventsWG.Done()
return
}
}
}()
watcher.Add(configDir)
initWG.Done() // done initializing the watch in this go routine, so the parent routine can move on...
eventsWG.Wait() // now, wait for event loop to end in this go-routine...
}()
initWG.Wait() // make sure that the go routine above fully ended before returning
}
// SetConfigFile explicitly defines the path, name and extension of the config file.
// Viper will use this and not check any of the config paths.
func SetConfigFile(in string) { v.SetConfigFile(in) }
func (v *Viper) SetConfigFile(in string) {
if in != "" {
v.configFile = in
}
}
// SetEnvPrefix defines a prefix that ENVIRONMENT variables will use.
// E.g. if your prefix is "spf", the env registry will look for env
// variables that start with "SPF_".
func SetEnvPrefix(in string) { v.SetEnvPrefix(in) }
func (v *Viper) SetEnvPrefix(in string) {
if in != "" {
v.envPrefix = in
}
}
func (v *Viper) mergeWithEnvPrefix(in string) string {
if v.envPrefix != "" {
return strings.ToUpper(v.envPrefix + "_" + in)
}
return strings.ToUpper(in)
}
// AllowEmptyEnv tells Viper to consider set,
// but empty environment variables as valid values instead of falling back.
// For backward compatibility reasons this is false by default.
func AllowEmptyEnv(allowEmptyEnv bool) { v.AllowEmptyEnv(allowEmptyEnv) }
func (v *Viper) AllowEmptyEnv(allowEmptyEnv bool) {
v.allowEmptyEnv = allowEmptyEnv
}
// TODO: should getEnv logic be moved into find(). Can generalize the use of
// rewriting keys many things, Ex: Get('someKey') -> some_key
// (camel case to snake case for JSON keys perhaps)
// getEnv is a wrapper around os.Getenv which replaces characters in the original
// key. This allows env vars which have different keys than the config object
// keys.
func (v *Viper) getEnv(key string) (string, bool) {
if v.envKeyReplacer != nil {
key = v.envKeyReplacer.Replace(key)
}
val, ok := os.LookupEnv(key)
return val, ok && (v.allowEmptyEnv || val != "")
}
// ConfigFileUsed returns the file used to populate the config registry.
func ConfigFileUsed() string { return v.ConfigFileUsed() }
func (v *Viper) ConfigFileUsed() string { return v.configFile }
// AddConfigPath adds a path for Viper to search for the config file in.
// Can be called multiple times to define multiple search paths.
func AddConfigPath(in string) { v.AddConfigPath(in) }
func (v *Viper) AddConfigPath(in string) {
if in != "" {
absin := absPathify(in)
jww.INFO.Println("adding", absin, "to paths to search")
if !stringInSlice(absin, v.configPaths) {
v.configPaths = append(v.configPaths, absin)
}
}
}
// AddRemoteProvider adds a remote configuration source.
// Remote Providers are searched in the order they are added.
// provider is a string value: "etcd", "consul" or "firestore" are currently supported.
// endpoint is the url. etcd requires http://ip:port consul requires ip:port
// path is the path in the k/v store to retrieve configuration
// To retrieve a config file called myapp.json from /configs/myapp.json
// you should set path to /configs and set config name (SetConfigName()) to
// "myapp"
func AddRemoteProvider(provider, endpoint, path string) error {
return v.AddRemoteProvider(provider, endpoint, path)
}
func (v *Viper) AddRemoteProvider(provider, endpoint, path string) error {
if !stringInSlice(provider, SupportedRemoteProviders) {
return UnsupportedRemoteProviderError(provider)
}
if provider != "" && endpoint != "" {
jww.INFO.Printf("adding %s:%s to remote provider list", provider, endpoint)
rp := &defaultRemoteProvider{
endpoint: endpoint,
provider: provider,
path: path,
}
if !v.providerPathExists(rp) {
v.remoteProviders = append(v.remoteProviders, rp)
}
}
return nil
}
// AddSecureRemoteProvider adds a remote configuration source.
// Secure Remote Providers are searched in the order they are added.
// provider is a string value: "etcd", "consul" or "firestore" are currently supported.
// endpoint is the url. etcd requires http://ip:port consul requires ip:port
// secretkeyring is the filepath to your openpgp secret keyring. e.g. /etc/secrets/myring.gpg
// path is the path in the k/v store to retrieve configuration
// To retrieve a config file called myapp.json from /configs/myapp.json
// you should set path to /configs and set config name (SetConfigName()) to
// "myapp"
// Secure Remote Providers are implemented with github.com/bketelsen/crypt
func AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error {
return v.AddSecureRemoteProvider(provider, endpoint, path, secretkeyring)
}
func (v *Viper) AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error {
if !stringInSlice(provider, SupportedRemoteProviders) {
return UnsupportedRemoteProviderError(provider)
}
if provider != "" && endpoint != "" {
jww.INFO.Printf("adding %s:%s to remote provider list", provider, endpoint)
rp := &defaultRemoteProvider{
endpoint: endpoint,
provider: provider,
path: path,
secretKeyring: secretkeyring,
}
if !v.providerPathExists(rp) {
v.remoteProviders = append(v.remoteProviders, rp)
}
}
return nil
}
func AddGetMetric(getCallMetric func(key string, val interface{})) { v.AddGetMetric(getCallMetric) }
func (v *Viper) AddGetMetric(getCallMetric func(key string, val interface{})) {
v.onGetCallMetric = getCallMetric
}
func (v *Viper) providerPathExists(p *defaultRemoteProvider) bool {
for _, y := range v.remoteProviders {
if reflect.DeepEqual(y, p) {
return true
}
}
return false
}
// searchMap recursively searches for a value for path in source map.
// Returns nil if not found.
// Note: This assumes that the path entries and map keys are lower cased.
func (v *Viper) searchMap(source map[string]interface{}, path []string) interface{} {
if len(path) == 0 {
return source
}
next, ok := source[path[0]]
if ok {
// Fast path
if len(path) == 1 {
return next
}
// Nested case
switch next.(type) {
case map[interface{}]interface{}:
return v.searchMap(cast.ToStringMap(next), path[1:])
case map[string]interface{}:
// Type assertion is safe here since it is only reached
// if the type of `next` is the same as the type being asserted
return v.searchMap(next.(map[string]interface{}), path[1:])
default:
// got a value but nested key expected, return "nil" for not found
return nil
}
}
return nil
}
// searchMapWithPathPrefixes recursively searches for a value for path in source map.
//
// While searchMap() considers each path element as a single map key, this
// function searches for, and prioritizes, merged path elements.
// e.g., if in the source, "foo" is defined with a sub-key "bar", and "foo.bar"
// is also defined, this latter value is returned for path ["foo", "bar"].
//
// This should be useful only at config level (other maps may not contain dots
// in their keys).
//
// Note: This assumes that the path entries and map keys are lower cased.
func (v *Viper) searchMapWithPathPrefixes(source map[string]interface{}, path []string) interface{} {
if len(path) == 0 {
return source
}
// search for path prefixes, starting from the longest one
for i := len(path); i > 0; i-- {
prefixKey := strings.ToLower(strings.Join(path[0:i], v.keyDelim))
next, ok := source[prefixKey]
if ok {
// Fast path
if i == len(path) {
return next
}
// Nested case
var val interface{}
switch next.(type) {
case map[interface{}]interface{}:
val = v.searchMapWithPathPrefixes(cast.ToStringMap(next), path[i:])
case map[string]interface{}:
// Type assertion is safe here since it is only reached
// if the type of `next` is the same as the type being asserted
val = v.searchMapWithPathPrefixes(next.(map[string]interface{}), path[i:])
default:
// got a value but nested key expected, do nothing and look for next prefix
}
if val != nil {
return val
}
}
}
// not found
return nil
}
// isPathShadowedInDeepMap makes sure the given path is not shadowed somewhere
// on its path in the map.
// e.g., if "foo.bar" has a value in the given map, it “shadows”
// "foo.bar.baz" in a lower-priority map
func (v *Viper) isPathShadowedInDeepMap(path []string, m map[string]interface{}) string {
var parentVal interface{}
for i := 1; i < len(path); i++ {
parentVal = v.searchMap(m, path[0:i])
if parentVal == nil {
// not found, no need to add more path elements
return ""
}
switch parentVal.(type) {
case map[interface{}]interface{}:
continue
case map[string]interface{}:
continue
default:
// parentVal is a regular value which shadows "path"
return strings.Join(path[0:i], v.keyDelim)
}
}
return ""
}
// isPathShadowedInFlatMap makes sure the given path is not shadowed somewhere
// in a sub-path of the map.
// e.g., if "foo.bar" has a value in the given map, it “shadows”
// "foo.bar.baz" in a lower-priority map
func (v *Viper) isPathShadowedInFlatMap(path []string, mi interface{}) string {
// unify input map
var m map[string]interface{}
switch mi.(type) {
case map[string]string, map[string]FlagValue:
m = cast.ToStringMap(mi)
default:
return ""
}
// scan paths
var parentKey string
for i := 1; i < len(path); i++ {
parentKey = strings.Join(path[0:i], v.keyDelim)
if _, ok := m[parentKey]; ok {
return parentKey
}
}
return ""
}
// isPathShadowedInAutoEnv makes sure the given path is not shadowed somewhere
// in the environment, when automatic env is on.
// e.g., if "foo.bar" has a value in the environment, it “shadows”
// "foo.bar.baz" in a lower-priority map
func (v *Viper) isPathShadowedInAutoEnv(path []string) string {
var parentKey string
for i := 1; i < len(path); i++ {
parentKey = strings.Join(path[0:i], v.keyDelim)
if _, ok := v.getEnv(v.mergeWithEnvPrefix(parentKey)); ok {
return parentKey
}
}
return ""
}
// SetTypeByDefaultValue enables or disables the inference of a key value's
// type when the Get function is used based upon a key's default value as
// opposed to the value returned based on the normal fetch logic.
//
// For example, if a key has a default value of []string{} and the same key
// is set via an environment variable to "a b c", a call to the Get function
// would return a string slice for the key if the key's type is inferred by
// the default value and the Get function would return:
//
// []string {"a", "b", "c"}
//
// Otherwise the Get function would return:
//
// "a b c"
func SetTypeByDefaultValue(enable bool) { v.SetTypeByDefaultValue(enable) }
func (v *Viper) SetTypeByDefaultValue(enable bool) {
v.typeByDefValue = enable
}
// GetViper gets the global Viper instance.
func GetViper() *Viper {
return v
}
// Get can retrieve any value given the key to use.
// Get is case-insensitive for a key.
// Get has the behavior of returning the value associated with the first
// place from where it is set. Viper will check in the following order:
// override, flag, env, config file, key/value store, default
//
// Get returns an interface. For a specific value use one of the Get____ methods.
func Get(key string) interface{} { return v.Get(key) }
func (v *Viper) Get(key string) interface{} {
lcaseKey := strings.ToLower(key)
val := v.find(lcaseKey, true)
if v.onGetCallMetric != nil {
v.onGetCallMetric(key, val)
}
if val == nil {
return nil
}
if v.typeByDefValue {
// TODO(bep) this branch isn't covered by a single test.
valType := val
path := strings.Split(lcaseKey, v.keyDelim)
defVal := v.searchMap(v.defaults, path)
if defVal != nil {
valType = defVal
}
switch valType.(type) {
case bool:
return cast.ToBool(val)
case string:
return cast.ToString(val)
case int32, int16, int8, int:
return cast.ToInt(val)
case uint:
return cast.ToUint(val)
case uint32:
return cast.ToUint32(val)
case uint64:
return cast.ToUint64(val)
case int64:
return cast.ToInt64(val)
case float64, float32:
return cast.ToFloat64(val)
case time.Time:
return cast.ToTime(val)
case time.Duration:
return cast.ToDuration(val)
case []string:
return cast.ToStringSlice(val)
case []int:
return cast.ToIntSlice(val)
}
}
return val
}
// Sub returns new Viper instance representing a sub tree of this instance.
// Sub is case-insensitive for a key.
func Sub(key string) *Viper { return v.Sub(key) }
func (v *Viper) Sub(key string) *Viper {
subv := New()
data := v.Get(key)
if data == nil {
return nil
}
if reflect.TypeOf(data).Kind() == reflect.Map {
subv.config = cast.ToStringMap(data)
return subv
}
return nil
}
// GetString returns the value associated with the key as a string.
func GetString(key string) string { return v.GetString(key) }
func (v *Viper) GetString(key string) string {
return cast.ToString(v.Get(key))
}
// GetBool returns the value associated with the key as a boolean.
func GetBool(key string) bool { return v.GetBool(key) }
func (v *Viper) GetBool(key string) bool {
return cast.ToBool(v.Get(key))
}
// GetInt returns the value associated with the key as an integer.
func GetInt(key string) int { return v.GetInt(key) }
func (v *Viper) GetInt(key string) int {
return cast.ToInt(v.Get(key))
}
// GetInt32 returns the value associated with the key as an integer.
func GetInt32(key string) int32 { return v.GetInt32(key) }
func (v *Viper) GetInt32(key string) int32 {
return cast.ToInt32(v.Get(key))
}
// GetInt64 returns the value associated with the key as an integer.
func GetInt64(key string) int64 { return v.GetInt64(key) }
func (v *Viper) GetInt64(key string) int64 {
return cast.ToInt64(v.Get(key))
}
// GetUint returns the value associated with the key as an unsigned integer.
func GetUint(key string) uint { return v.GetUint(key) }
func (v *Viper) GetUint(key string) uint {
return cast.ToUint(v.Get(key))
}
// GetUint32 returns the value associated with the key as an unsigned integer.
func GetUint32(key string) uint32 { return v.GetUint32(key) }
func (v *Viper) GetUint32(key string) uint32 {
return cast.ToUint32(v.Get(key))
}
// GetUint64 returns the value associated with the key as an unsigned integer.
func GetUint64(key string) uint64 { return v.GetUint64(key) }
func (v *Viper) GetUint64(key string) uint64 {
return cast.ToUint64(v.Get(key))
}
// GetFloat64 returns the value associated with the key as a float64.
func GetFloat64(key string) float64 { return v.GetFloat64(key) }
func (v *Viper) GetFloat64(key string) float64 {
return cast.ToFloat64(v.Get(key))
}
// GetTime returns the value associated with the key as time.
func GetTime(key string) time.Time { return v.GetTime(key) }
func (v *Viper) GetTime(key string) time.Time {
return cast.ToTime(v.Get(key))
}
// GetDuration returns the value associated with the key as a duration.
func GetDuration(key string) time.Duration { return v.GetDuration(key) }
func (v *Viper) GetDuration(key string) time.Duration {
return cast.ToDuration(v.Get(key))
}
// GetIntSlice returns the value associated with the key as a slice of int values.
func GetIntSlice(key string) []int { return v.GetIntSlice(key) }
func (v *Viper) GetIntSlice(key string) []int {
return cast.ToIntSlice(v.Get(key))
}
// GetStringSlice returns the value associated with the key as a slice of strings.
func GetStringSlice(key string) []string { return v.GetStringSlice(key) }
func (v *Viper) GetStringSlice(key string) []string {
return cast.ToStringSlice(v.Get(key))
}
// GetStringMap returns the value associated with the key as a map of interfaces.
func GetStringMap(key string) map[string]interface{} { return v.GetStringMap(key) }
func (v *Viper) GetStringMap(key string) map[string]interface{} {
return cast.ToStringMap(v.Get(key))
}
// GetStringMapString returns the value associated with the key as a map of strings.
func GetStringMapString(key string) map[string]string { return v.GetStringMapString(key) }
func (v *Viper) GetStringMapString(key string) map[string]string {
return cast.ToStringMapString(v.Get(key))
}
// GetStringMapStringSlice returns the value associated with the key as a map to a slice of strings.
func GetStringMapStringSlice(key string) map[string][]string { return v.GetStringMapStringSlice(key) }
func (v *Viper) GetStringMapStringSlice(key string) map[string][]string {
return cast.ToStringMapStringSlice(v.Get(key))
}
// GetSizeInBytes returns the size of the value associated with the given key
// in bytes.
func GetSizeInBytes(key string) uint { return v.GetSizeInBytes(key) }
func (v *Viper) GetSizeInBytes(key string) uint {
sizeStr := cast.ToString(v.Get(key))
return parseSizeInBytes(sizeStr)
}
// UnmarshalKey takes a single key and unmarshals it into a Struct.
func UnmarshalKey(key string, rawVal interface{}, opts ...DecoderConfigOption) error {
return v.UnmarshalKey(key, rawVal, opts...)
}
func (v *Viper) UnmarshalKey(key string, rawVal interface{}, opts ...DecoderConfigOption) error {
err := decode(v.Get(key), defaultDecoderConfig(rawVal, opts...))
if err != nil {
return err
}
return nil
}
// Unmarshal unmarshals the config into a Struct. Make sure that the tags
// on the fields of the structure are properly set.
func Unmarshal(rawVal interface{}, opts ...DecoderConfigOption) error {
return v.Unmarshal(rawVal, opts...)
}
func (v *Viper) Unmarshal(rawVal interface{}, opts ...DecoderConfigOption) error {
err := decode(v.AllSettings(), defaultDecoderConfig(rawVal, opts...))
if err != nil {
return err
}
return nil
}
// defaultDecoderConfig returns default mapsstructure.DecoderConfig with suppot
// of time.Duration values & string slices
func defaultDecoderConfig(output interface{}, opts ...DecoderConfigOption) *mapstructure.DecoderConfig {
c := &mapstructure.DecoderConfig{
Metadata: nil,
Result: output,
WeaklyTypedInput: true,
DecodeHook: mapstructure.ComposeDecodeHookFunc(
mapstructure.StringToTimeDurationHookFunc(),
mapstructure.StringToSliceHookFunc(","),
),
}
for _, opt := range opts {
opt(c)
}
return c
}
// A wrapper around mapstructure.Decode that mimics the WeakDecode functionality
func decode(input interface{}, config *mapstructure.DecoderConfig) error {
decoder, err := mapstructure.NewDecoder(config)
if err != nil {
return err
}
return decoder.Decode(input)
}
// UnmarshalExact unmarshals the config into a Struct, erroring if a field is nonexistent
// in the destination struct.
func UnmarshalExact(rawVal interface{}, opts ...DecoderConfigOption) error {
return v.UnmarshalExact(rawVal, opts...)
}
func (v *Viper) UnmarshalExact(rawVal interface{}, opts ...DecoderConfigOption) error {
config := defaultDecoderConfig(rawVal, opts...)
config.ErrorUnused = true
err := decode(v.AllSettings(), config)
if err != nil {
return err
}
return nil
}
// BindPFlags binds a full flag set to the configuration, using each flag's long
// name as the config key.
func BindPFlags(flags *pflag.FlagSet) error { return v.BindPFlags(flags) }
func (v *Viper) BindPFlags(flags *pflag.FlagSet) error {
return v.BindFlagValues(pflagValueSet{flags})
}
// BindPFlag binds a specific key to a pflag (as used by cobra).
// Example (where serverCmd is a Cobra instance):
//
// serverCmd.Flags().Int("port", 1138, "Port to run Application server on")
// Viper.BindPFlag("port", serverCmd.Flags().Lookup("port"))
//
func BindPFlag(key string, flag *pflag.Flag) error { return v.BindPFlag(key, flag) }
func (v *Viper) BindPFlag(key string, flag *pflag.Flag) error {
return v.BindFlagValue(key, pflagValue{flag})
}
// BindFlagValues binds a full FlagValue set to the configuration, using each flag's long
// name as the config key.
func BindFlagValues(flags FlagValueSet) error { return v.BindFlagValues(flags) }
func (v *Viper) BindFlagValues(flags FlagValueSet) (err error) {
flags.VisitAll(func(flag FlagValue) {
if err = v.BindFlagValue(flag.Name(), flag); err != nil {
return
}
})
return nil
}
// BindFlagValue binds a specific key to a FlagValue.
func BindFlagValue(key string, flag FlagValue) error { return v.BindFlagValue(key, flag) }
func (v *Viper) BindFlagValue(key string, flag FlagValue) error {
if flag == nil {
return fmt.Errorf("flag for %q is nil", key)
}
v.pflags[strings.ToLower(key)] = flag
return nil
}
// BindEnv binds a Viper key to a ENV variable.
// ENV variables are case sensitive.
// If only a key is provided, it will use the env key matching the key, uppercased.
// EnvPrefix will be used when set when env name is not provided.
func BindEnv(input ...string) error { return v.BindEnv(input...) }
func (v *Viper) BindEnv(input ...string) error {
var key, envkey string
if len(input) == 0 {
return fmt.Errorf("missing key to bind to")
}
key = strings.ToLower(input[0])
if len(input) == 1 {
envkey = v.mergeWithEnvPrefix(key)
} else {
envkey = input[1]
}
v.env[key] = envkey
return nil
}
// Given a key, find the value.
//
// Viper will check to see if an alias exists first.
// Viper will then check in the following order:
// flag, env, config file, key/value store.
// Lastly, if no value was found and flagDefault is true, and if the key
// corresponds to a flag, the flag's default value is returned.
//
// Note: this assumes a lower-cased key given.
func (v *Viper) find(lcaseKey string, flagDefault bool) interface{} {
var (
val interface{}
exists bool
path = strings.Split(lcaseKey, v.keyDelim)
nested = len(path) > 1
)
// compute the path through the nested maps to the nested value
if nested && v.isPathShadowedInDeepMap(path, castMapStringToMapInterface(v.aliases)) != "" {
return nil
}
// if the requested key is an alias, then return the proper key
lcaseKey = v.realKey(lcaseKey)
path = strings.Split(lcaseKey, v.keyDelim)
nested = len(path) > 1
// Set() override first
val = v.searchMap(v.override, path)
if val != nil {
return val
}
if nested && v.isPathShadowedInDeepMap(path, v.override) != "" {
return nil
}
// PFlag override next
flag, exists := v.pflags[lcaseKey]
if exists && flag.HasChanged() {
switch flag.ValueType() {
case "int", "int8", "int16", "int32", "int64":
return cast.ToInt(flag.ValueString())
case "bool":
return cast.ToBool(flag.ValueString())
case "stringSlice":
s := strings.TrimPrefix(flag.ValueString(), "[")
s = strings.TrimSuffix(s, "]")
res, _ := readAsCSV(s)
return res
case "intSlice":
s := strings.TrimPrefix(flag.ValueString(), "[")
s = strings.TrimSuffix(s, "]")
res, _ := readAsCSV(s)
return cast.ToIntSlice(res)
default:
return flag.ValueString()
}
}
if nested && v.isPathShadowedInFlatMap(path, v.pflags) != "" {
return nil
}
// Env override next
if v.automaticEnvApplied {
// even if it hasn't been registered, if automaticEnv is used,
// check any Get request
if val, ok := v.getEnv(v.mergeWithEnvPrefix(lcaseKey)); ok {
return val
}
if nested && v.isPathShadowedInAutoEnv(path) != "" {
return nil
}
}
envkey, exists := v.env[lcaseKey]
if exists {
if val, ok := v.getEnv(envkey); ok {
return val
}
}
if nested && v.isPathShadowedInFlatMap(path, v.env) != "" {
return nil
}
// Config file next
val = v.searchMapWithPathPrefixes(v.config, path)
if val != nil {
return val
}
if nested && v.isPathShadowedInDeepMap(path, v.config) != "" {
return nil
}
// K/V store next
val = v.searchMap(v.kvstore, path)
if val != nil {
return val
}
if nested && v.isPathShadowedInDeepMap(path, v.kvstore) != "" {
return nil
}
// Default next
val = v.searchMap(v.defaults, path)
if val != nil {
return val
}
if nested && v.isPathShadowedInDeepMap(path, v.defaults) != "" {
return nil
}
if flagDefault {
// last chance: if no value is found and a flag does exist for the key,
// get the flag's default value even if the flag's value has not been set.
if flag, exists := v.pflags[lcaseKey]; exists {
switch flag.ValueType() {
case "int", "int8", "int16", "int32", "int64":
return cast.ToInt(flag.ValueString())
case "bool":
return cast.ToBool(flag.ValueString())
case "stringSlice":
s := strings.TrimPrefix(flag.ValueString(), "[")
s = strings.TrimSuffix(s, "]")
res, _ := readAsCSV(s)
return res
case "intSlice":
s := strings.TrimPrefix(flag.ValueString(), "[")
s = strings.TrimSuffix(s, "]")
res, _ := readAsCSV(s)
return cast.ToIntSlice(res)
default:
return flag.ValueString()
}
}
// last item, no need to check shadowing
}
return nil
}
func readAsCSV(val string) ([]string, error) {
if val == "" {
return []string{}, nil
}
stringReader := strings.NewReader(val)
csvReader := csv.NewReader(stringReader)
return csvReader.Read()
}
// IsSet checks to see if the key has been set in any of the data locations.
// IsSet is case-insensitive for a key.
func IsSet(key string) bool { return v.IsSet(key) }
func (v *Viper) IsSet(key string) bool {
lcaseKey := strings.ToLower(key)
val := v.find(lcaseKey, false)
return val != nil
}
// AutomaticEnv has Viper check ENV variables for all.
// keys set in config, default & flags
func AutomaticEnv() { v.AutomaticEnv() }
func (v *Viper) AutomaticEnv() {
v.automaticEnvApplied = true
}
// SetEnvKeyReplacer sets the strings.Replacer on the viper object
// Useful for mapping an environmental variable to a key that does
// not match it.
func SetEnvKeyReplacer(r *strings.Replacer) { v.SetEnvKeyReplacer(r) }
func (v *Viper) SetEnvKeyReplacer(r *strings.Replacer) {
v.envKeyReplacer = r
}
// RegisterAlias creates an alias that provides another accessor for the same key.
// This enables one to change a name without breaking the application.
func RegisterAlias(alias string, key string) { v.RegisterAlias(alias, key) }
func (v *Viper) RegisterAlias(alias string, key string) {
v.registerAlias(alias, strings.ToLower(key))
}
func (v *Viper) registerAlias(alias string, key string) {
alias = strings.ToLower(alias)
if alias != key && alias != v.realKey(key) {
_, exists := v.aliases[alias]
if !exists {
// if we alias something that exists in one of the maps to another
// name, we'll never be able to get that value using the original
// name, so move the config value to the new realkey.
if val, ok := v.config[alias]; ok {
delete(v.config, alias)
v.config[key] = val
}
if val, ok := v.kvstore[alias]; ok {
delete(v.kvstore, alias)
v.kvstore[key] = val
}
if val, ok := v.defaults[alias]; ok {
delete(v.defaults, alias)
v.defaults[key] = val
}
if val, ok := v.override[alias]; ok {
delete(v.override, alias)
v.override[key] = val
}
v.aliases[alias] = key
}
} else {
jww.WARN.Println("Creating circular reference alias", alias, key, v.realKey(key))
}
}
func (v *Viper) realKey(key string) string {
newkey, exists := v.aliases[key]
if exists {
jww.DEBUG.Println("Alias", key, "to", newkey)
return v.realKey(newkey)
}
return key
}
// InConfig checks to see if the given key (or an alias) is in the config file.
func InConfig(key string) bool { return v.InConfig(key) }
func (v *Viper) InConfig(key string) bool {
// if the requested key is an alias, then return the proper key
key = v.realKey(key)
_, exists := v.config[key]
return exists
}
// SetDefault sets the default value for this key.
// SetDefault is case-insensitive for a key.
// Default only used when no value is provided by the user via flag, config or ENV.
func SetDefault(key string, value interface{}) { v.SetDefault(key, value) }
func (v *Viper) SetDefault(key string, value interface{}) {
// If alias passed in, then set the proper default
key = v.realKey(strings.ToLower(key))
value = toCaseInsensitiveValue(value)
path := strings.Split(key, v.keyDelim)
lastKey := strings.ToLower(path[len(path)-1])
deepestMap := deepSearch(v.defaults, path[0:len(path)-1])
// set innermost value
deepestMap[lastKey] = value
}
// Set sets the value for the key in the override register.
// Set is case-insensitive for a key.
// Will be used instead of values obtained via
// flags, config file, ENV, default, or key/value store.
func Set(key string, value interface{}) { v.Set(key, value) }
func (v *Viper) Set(key string, value interface{}) {
// If alias passed in, then set the proper override
key = v.realKey(strings.ToLower(key))
value = toCaseInsensitiveValue(value)
path := strings.Split(key, v.keyDelim)
lastKey := strings.ToLower(path[len(path)-1])
deepestMap := deepSearch(v.override, path[0:len(path)-1])
// set innermost value
deepestMap[lastKey] = value
}
// ReadInConfig will discover and load the configuration file from disk
// and key/value stores, searching in one of the defined paths.
func ReadInConfig() error { return v.ReadInConfig() }
func (v *Viper) ReadInConfig() error {
jww.INFO.Println("Attempting to read in config file")
filename, err := v.getConfigFile()
if err != nil {
return err
}
if !stringInSlice(v.getConfigType(), SupportedExts) {
return UnsupportedConfigError(v.getConfigType())
}
jww.DEBUG.Println("Reading file: ", filename)
file, err := afero.ReadFile(v.fs, filename)
if err != nil {
return err
}
config := make(map[string]interface{})
err = v.unmarshalReader(bytes.NewReader(file), config)
if err != nil {
return err
}
v.config = config
return nil
}
// MergeInConfig merges a new configuration with an existing config.
func MergeInConfig() error { return v.MergeInConfig() }
func (v *Viper) MergeInConfig() error {
jww.INFO.Println("Attempting to merge in config file")
filename, err := v.getConfigFile()
if err != nil {
return err
}
if !stringInSlice(v.getConfigType(), SupportedExts) {
return UnsupportedConfigError(v.getConfigType())
}
file, err := afero.ReadFile(v.fs, filename)
if err != nil {
return err
}
return v.MergeConfig(bytes.NewReader(file))
}
// ReadConfig will read a configuration file, setting existing keys to nil if the
// key does not exist in the file.
func ReadConfig(in io.Reader) error { return v.ReadConfig(in) }
func (v *Viper) ReadConfig(in io.Reader) error {
v.config = make(map[string]interface{})
return v.unmarshalReader(in, v.config)
}
// MergeConfig merges a new configuration with an existing config.
func MergeConfig(in io.Reader) error { return v.MergeConfig(in) }
func (v *Viper) MergeConfig(in io.Reader) error {
cfg := make(map[string]interface{})
if err := v.unmarshalReader(in, cfg); err != nil {
return err
}
return v.MergeConfigMap(cfg)
}
// MergeConfigMap merges the configuration from the map given with an existing config.
// Note that the map given may be modified.
func MergeConfigMap(cfg map[string]interface{}) error { return v.MergeConfigMap(cfg) }
func (v *Viper) MergeConfigMap(cfg map[string]interface{}) error {
if v.config == nil {
v.config = make(map[string]interface{})
}
insensitiviseMap(cfg)
mergeMaps(cfg, v.config, nil)
return nil
}
// WriteConfig writes the current configuration to a file.
func WriteConfig() error { return v.WriteConfig() }
func (v *Viper) WriteConfig() error {
filename, err := v.getConfigFile()
if err != nil {
return err
}
return v.writeConfig(filename, true)
}
// SafeWriteConfig writes current configuration to file only if the file does not exist.
func SafeWriteConfig() error { return v.SafeWriteConfig() }
func (v *Viper) SafeWriteConfig() error {
if len(v.configPaths) < 1 {
return errors.New("missing configuration for 'configPath'")
}
return v.SafeWriteConfigAs(filepath.Join(v.configPaths[0], v.configName+"."+v.configType))
}
// WriteConfigAs writes current configuration to a given filename.
func WriteConfigAs(filename string) error { return v.WriteConfigAs(filename) }
func (v *Viper) WriteConfigAs(filename string) error {
return v.writeConfig(filename, true)
}
// SafeWriteConfigAs writes current configuration to a given filename if it does not exist.
func SafeWriteConfigAs(filename string) error { return v.SafeWriteConfigAs(filename) }
func (v *Viper) SafeWriteConfigAs(filename string) error {
alreadyExists, err := afero.Exists(v.fs, filename)
if alreadyExists && err == nil {
return ConfigFileAlreadyExistsError(filename)
}
return v.writeConfig(filename, false)
}
func (v *Viper) writeConfig(filename string, force bool) error {
jww.INFO.Println("Attempting to write configuration to file.")
var configType string
ext := filepath.Ext(filename)
if ext != "" {
configType = ext[1:]
} else {
configType = v.configType
}
if configType == "" {
return fmt.Errorf("config type could not be determined for %s", filename)
}
if !stringInSlice(configType, SupportedExts) {
return UnsupportedConfigError(configType)
}
if v.config == nil {
v.config = make(map[string]interface{})
}
flags := os.O_CREATE | os.O_TRUNC | os.O_WRONLY
if !force {
flags |= os.O_EXCL
}
f, err := v.fs.OpenFile(filename, flags, v.configPermissions)
if err != nil {
return err
}
defer f.Close()
if err := v.marshalWriter(f, configType); err != nil {
return err
}
return f.Sync()
}
// Unmarshal a Reader into a map.
// Should probably be an unexported function.
func unmarshalReader(in io.Reader, c map[string]interface{}) error {
return v.unmarshalReader(in, c)
}
func (v *Viper) unmarshalReader(in io.Reader, c map[string]interface{}) error {
buf := new(bytes.Buffer)
buf.ReadFrom(in)
switch strings.ToLower(v.getConfigType()) {
case "yaml", "yml":
if err := yaml.Unmarshal(buf.Bytes(), &c); err != nil {
return ConfigParseError{err}
}
case "json":
if err := json.Unmarshal(buf.Bytes(), &c); err != nil {
return ConfigParseError{err}
}
case "hcl":
obj, err := hcl.Parse(buf.String())
if err != nil {
return ConfigParseError{err}
}
if err = hcl.DecodeObject(&c, obj); err != nil {
return ConfigParseError{err}
}
case "toml":
tree, err := toml.LoadReader(buf)
if err != nil {
return ConfigParseError{err}
}
tmap := tree.ToMap()
for k, v := range tmap {
c[k] = v
}
case "dotenv", "env":
env, err := gotenv.StrictParse(buf)
if err != nil {
return ConfigParseError{err}
}
for k, v := range env {
c[k] = v
}
case "properties", "props", "prop":
v.properties = properties.NewProperties()
var err error
if v.properties, err = properties.Load(buf.Bytes(), properties.UTF8); err != nil {
return ConfigParseError{err}
}
for _, key := range v.properties.Keys() {
value, _ := v.properties.Get(key)
// recursively build nested maps
path := strings.Split(key, ".")
lastKey := strings.ToLower(path[len(path)-1])
deepestMap := deepSearch(c, path[0:len(path)-1])
// set innermost value
deepestMap[lastKey] = value
}
case "ini":
cfg := ini.Empty()
err := cfg.Append(buf.Bytes())
if err != nil {
return ConfigParseError{err}
}
sections := cfg.Sections()
for i := 0; i < len(sections); i++ {
section := sections[i]
keys := section.Keys()
for j := 0; j < len(keys); j++ {
key := keys[j]
value := cfg.Section(section.Name()).Key(key.Name()).String()
c[section.Name()+"."+key.Name()] = value
}
}
}
insensitiviseMap(c)
return nil
}
// Marshal a map into Writer.
func (v *Viper) marshalWriter(f afero.File, configType string) error {
c := v.AllSettings()
switch configType {
case "json":
b, err := json.MarshalIndent(c, "", " ")
if err != nil {
return ConfigMarshalError{err}
}
_, err = f.WriteString(string(b))
if err != nil {
return ConfigMarshalError{err}
}
case "hcl":
b, err := json.Marshal(c)
if err != nil {
return ConfigMarshalError{err}
}
ast, err := hcl.Parse(string(b))
if err != nil {
return ConfigMarshalError{err}
}
err = printer.Fprint(f, ast.Node)
if err != nil {
return ConfigMarshalError{err}
}
case "prop", "props", "properties":
if v.properties == nil {
v.properties = properties.NewProperties()
}
p := v.properties
for _, key := range v.AllKeys() {
_, _, err := p.Set(key, v.GetString(key))
if err != nil {
return ConfigMarshalError{err}
}
}
_, err := p.WriteComment(f, "#", properties.UTF8)
if err != nil {
return ConfigMarshalError{err}
}
case "dotenv", "env":
lines := []string{}
for _, key := range v.AllKeys() {
envName := strings.ToUpper(strings.Replace(key, ".", "_", -1))
val := v.Get(key)
lines = append(lines, fmt.Sprintf("%v=%v", envName, val))
}
s := strings.Join(lines, "\n")
if _, err := f.WriteString(s); err != nil {
return ConfigMarshalError{err}
}
case "toml":
t, err := toml.TreeFromMap(c)
if err != nil {
return ConfigMarshalError{err}
}
s := t.String()
if _, err := f.WriteString(s); err != nil {
return ConfigMarshalError{err}
}
case "yaml", "yml":
b, err := yaml.Marshal(c)
if err != nil {
return ConfigMarshalError{err}
}
if _, err = f.WriteString(string(b)); err != nil {
return ConfigMarshalError{err}
}
case "ini":
keys := v.AllKeys()
cfg := ini.Empty()
ini.PrettyFormat = false
for i := 0; i < len(keys); i++ {
key := keys[i]
lastSep := strings.LastIndex(key, ".")
sectionName := key[:(lastSep)]
keyName := key[(lastSep + 1):]
if sectionName == "default" {
sectionName = ""
}
cfg.Section(sectionName).Key(keyName).SetValue(Get(key).(string))
}
cfg.WriteTo(f)
}
return nil
}
func keyExists(k string, m map[string]interface{}) string {
lk := strings.ToLower(k)
for mk := range m {
lmk := strings.ToLower(mk)
if lmk == lk {
return mk
}
}
return ""
}
func castToMapStringInterface(
src map[interface{}]interface{}) map[string]interface{} {
tgt := map[string]interface{}{}
for k, v := range src {
tgt[fmt.Sprintf("%v", k)] = v
}
return tgt
}
func castMapStringToMapInterface(src map[string]string) map[string]interface{} {
tgt := map[string]interface{}{}
for k, v := range src {
tgt[k] = v
}
return tgt
}
func castMapFlagToMapInterface(src map[string]FlagValue) map[string]interface{} {
tgt := map[string]interface{}{}
for k, v := range src {
tgt[k] = v
}
return tgt
}
// mergeMaps merges two maps. The `itgt` parameter is for handling go-yaml's
// insistence on parsing nested structures as `map[interface{}]interface{}`
// instead of using a `string` as the key for nest structures beyond one level
// deep. Both map types are supported as there is a go-yaml fork that uses
// `map[string]interface{}` instead.
func mergeMaps(
src, tgt map[string]interface{}, itgt map[interface{}]interface{}) {
for sk, sv := range src {
tk := keyExists(sk, tgt)
if tk == "" {
jww.TRACE.Printf("tk=\"\", tgt[%s]=%v", sk, sv)
tgt[sk] = sv
if itgt != nil {
itgt[sk] = sv
}
continue
}
tv, ok := tgt[tk]
if !ok {
jww.TRACE.Printf("tgt[%s] != ok, tgt[%s]=%v", tk, sk, sv)
tgt[sk] = sv
if itgt != nil {
itgt[sk] = sv
}
continue
}
svType := reflect.TypeOf(sv)
tvType := reflect.TypeOf(tv)
if svType != tvType {
jww.ERROR.Printf(
"svType != tvType; key=%s, st=%v, tt=%v, sv=%v, tv=%v",
sk, svType, tvType, sv, tv)
continue
}
jww.TRACE.Printf("processing key=%s, st=%v, tt=%v, sv=%v, tv=%v",
sk, svType, tvType, sv, tv)
switch ttv := tv.(type) {
case map[interface{}]interface{}:
jww.TRACE.Printf("merging maps (must convert)")
tsv := sv.(map[interface{}]interface{})
ssv := castToMapStringInterface(tsv)
stv := castToMapStringInterface(ttv)
mergeMaps(ssv, stv, ttv)
case map[string]interface{}:
jww.TRACE.Printf("merging maps")
mergeMaps(sv.(map[string]interface{}), ttv, nil)
default:
jww.TRACE.Printf("setting value")
tgt[tk] = sv
if itgt != nil {
itgt[tk] = sv
}
}
}
}
// ReadRemoteConfig attempts to get configuration from a remote source
// and read it in the remote configuration registry.
func ReadRemoteConfig() error { return v.ReadRemoteConfig() }
func (v *Viper) ReadRemoteConfig() error {
return v.getKeyValueConfig()
}
func WatchRemoteConfig() error { return v.WatchRemoteConfig() }
func (v *Viper) WatchRemoteConfig() error {
return v.watchKeyValueConfig()
}
func (v *Viper) WatchRemoteConfigOnChannel() error {
return v.watchKeyValueConfigOnChannel()
}
// Retrieve the first found remote configuration.
func (v *Viper) getKeyValueConfig() error {
if RemoteConfig == nil {
return RemoteConfigError("Enable the remote features by doing a blank import of the viper/remote package: '_ github.com/spf13/viper/remote'")
}
for _, rp := range v.remoteProviders {
val, err := v.getRemoteConfig(rp)
if err != nil {
continue
}
v.kvstore = val
return nil
}
return RemoteConfigError("No Files Found")
}
func (v *Viper) getRemoteConfig(provider RemoteProvider) (map[string]interface{}, error) {
reader, err := RemoteConfig.Get(provider)
if err != nil {
return nil, err
}
err = v.unmarshalReader(reader, v.kvstore)
return v.kvstore, err
}
// Retrieve the first found remote configuration.
func (v *Viper) watchKeyValueConfigOnChannel() error {
for _, rp := range v.remoteProviders {
respc, _ := RemoteConfig.WatchChannel(rp)
// Todo: Add quit channel
go func(rc <-chan *RemoteResponse) {
for {
b := <-rc
reader := bytes.NewReader(b.Value)
v.unmarshalReader(reader, v.kvstore)
}
}(respc)
return nil
}
return RemoteConfigError("No Files Found")
}
// Retrieve the first found remote configuration.
func (v *Viper) watchKeyValueConfig() error {
for _, rp := range v.remoteProviders {
val, err := v.watchRemoteConfig(rp)
if err != nil {
continue
}
v.kvstore = val
return nil
}
return RemoteConfigError("No Files Found")
}
func (v *Viper) watchRemoteConfig(provider RemoteProvider) (map[string]interface{}, error) {
reader, err := RemoteConfig.Watch(provider)
if err != nil {
return nil, err
}
err = v.unmarshalReader(reader, v.kvstore)
return v.kvstore, err
}
// AllKeys returns all keys holding a value, regardless of where they are set.
// Nested keys are returned with a v.keyDelim separator
func AllKeys() []string { return v.AllKeys() }
func (v *Viper) AllKeys() []string {
m := map[string]bool{}
// add all paths, by order of descending priority to ensure correct shadowing
m = v.flattenAndMergeMap(m, castMapStringToMapInterface(v.aliases), "")
m = v.flattenAndMergeMap(m, v.override, "")
m = v.mergeFlatMap(m, castMapFlagToMapInterface(v.pflags))
m = v.mergeFlatMap(m, castMapStringToMapInterface(v.env))
m = v.flattenAndMergeMap(m, v.config, "")
m = v.flattenAndMergeMap(m, v.kvstore, "")
m = v.flattenAndMergeMap(m, v.defaults, "")
// convert set of paths to list
a := make([]string, 0, len(m))
for x := range m {
a = append(a, x)
}
return a
}
// flattenAndMergeMap recursively flattens the given map into a map[string]bool
// of key paths (used as a set, easier to manipulate than a []string):
// - each path is merged into a single key string, delimited with v.keyDelim
// - if a path is shadowed by an earlier value in the initial shadow map,
// it is skipped.
// The resulting set of paths is merged to the given shadow set at the same time.
func (v *Viper) flattenAndMergeMap(shadow map[string]bool, m map[string]interface{}, prefix string) map[string]bool {
if shadow != nil && prefix != "" && shadow[prefix] {
// prefix is shadowed => nothing more to flatten
return shadow
}
if shadow == nil {
shadow = make(map[string]bool)
}
var m2 map[string]interface{}
if prefix != "" {
prefix += v.keyDelim
}
for k, val := range m {
fullKey := prefix + k
switch val.(type) {
case map[string]interface{}:
m2 = val.(map[string]interface{})
case map[interface{}]interface{}:
m2 = cast.ToStringMap(val)
default:
// immediate value
shadow[strings.ToLower(fullKey)] = true
continue
}
// recursively merge to shadow map
shadow = v.flattenAndMergeMap(shadow, m2, fullKey)
}
return shadow
}
// mergeFlatMap merges the given maps, excluding values of the second map
// shadowed by values from the first map.
func (v *Viper) mergeFlatMap(shadow map[string]bool, m map[string]interface{}) map[string]bool {
// scan keys
outer:
for k := range m {
path := strings.Split(k, v.keyDelim)
// scan intermediate paths
var parentKey string
for i := 1; i < len(path); i++ {
parentKey = strings.Join(path[0:i], v.keyDelim)
if shadow[parentKey] {
// path is shadowed, continue
continue outer
}
}
// add key
shadow[strings.ToLower(k)] = true
}
return shadow
}
// AllSettings merges all settings and returns them as a map[string]interface{}.
func AllSettings() map[string]interface{} { return v.AllSettings() }
func (v *Viper) AllSettings() map[string]interface{} {
m := map[string]interface{}{}
// start from the list of keys, and construct the map one value at a time
for _, k := range v.AllKeys() {
value := v.Get(k)
if value == nil {
// should not happen, since AllKeys() returns only keys holding a value,
// check just in case anything changes
continue
}
path := strings.Split(k, v.keyDelim)
lastKey := strings.ToLower(path[len(path)-1])
deepestMap := deepSearch(m, path[0:len(path)-1])
// set innermost value
deepestMap[lastKey] = value
}
return m
}
// SetFs sets the filesystem to use to read configuration.
func SetFs(fs afero.Fs) { v.SetFs(fs) }
func (v *Viper) SetFs(fs afero.Fs) {
v.fs = fs
}
// SetConfigName sets name for the config file.
// Does not include extension.
func SetConfigName(in string) { v.SetConfigName(in) }
func (v *Viper) SetConfigName(in string) {
if in != "" {
v.configName = in
v.configFile = ""
}
}
// SetConfigType sets the type of the configuration returned by the
// remote source, e.g. "json".
func SetConfigType(in string) { v.SetConfigType(in) }
func (v *Viper) SetConfigType(in string) {
if in != "" {
v.configType = in
}
}
// SetConfigPermissions sets the permissions for the config file.
func SetConfigPermissions(perm os.FileMode) { v.SetConfigPermissions(perm) }
func (v *Viper) SetConfigPermissions(perm os.FileMode) {
v.configPermissions = perm.Perm()
}
func (v *Viper) getConfigType() string {
if v.configType != "" {
return v.configType
}
cf, err := v.getConfigFile()
if err != nil {
return ""
}
ext := filepath.Ext(cf)
if len(ext) > 1 {
return ext[1:]
}
return ""
}
func (v *Viper) getConfigFile() (string, error) {
if v.configFile == "" {
cf, err := v.findConfigFile()
if err != nil {
return "", err
}
v.configFile = cf
}
return v.configFile, nil
}
func (v *Viper) searchInPath(in string) (filename string) {
jww.DEBUG.Println("Searching for config in ", in)
for _, ext := range SupportedExts {
jww.DEBUG.Println("Checking for", filepath.Join(in, v.configName+"."+ext))
if b, _ := exists(v.fs, filepath.Join(in, v.configName+"."+ext)); b {
jww.DEBUG.Println("Found: ", filepath.Join(in, v.configName+"."+ext))
return filepath.Join(in, v.configName+"."+ext)
}
}
if v.configType != "" {
if b, _ := exists(v.fs, filepath.Join(in, v.configName)); b {
return filepath.Join(in, v.configName)
}
}
return ""
}
// Search all configPaths for any config file.
// Returns the first path that exists (and is a config file).
func (v *Viper) findConfigFile() (string, error) {
jww.INFO.Println("Searching for config in ", v.configPaths)
for _, cp := range v.configPaths {
file := v.searchInPath(cp)
if file != "" {
return file, nil
}
}
return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)}
}
// Debug prints all configuration registries for debugging
// purposes.
func Debug() { v.Debug() }
func (v *Viper) Debug() {
fmt.Printf("Aliases:\n%#v\n", v.aliases)
fmt.Printf("Override:\n%#v\n", v.override)
fmt.Printf("PFlags:\n%#v\n", v.pflags)
fmt.Printf("Env:\n%#v\n", v.env)
fmt.Printf("Key/Value Store:\n%#v\n", v.kvstore)
fmt.Printf("Config:\n%#v\n", v.config)
fmt.Printf("Defaults:\n%#v\n", v.defaults)
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| 0 | 0 | |
_content/talks/2017/state-of-go/stdlib/sort/sort_test.go
|
// +build go1.8
package main
import (
"fmt"
"math/rand"
"os"
"sort"
"strconv"
"testing"
)
type Person struct {
Name string
AgeYears int
SSN int64
}
type byName []Person
func (b byName) Len() int { return len(b) }
func (b byName) Less(i, j int) bool { return b[i].Name < b[j].Name }
func (b byName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
type byAge []Person
func (b byAge) Len() int { return len(b) }
func (b byAge) Less(i, j int) bool { return b[i].AgeYears < b[j].AgeYears }
func (b byAge) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
type bySSN []Person
func (b bySSN) Len() int { return len(b) }
func (b bySSN) Less(i, j int) bool { return b[i].SSN < b[j].SSN }
func (b bySSN) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func BenchmarkSortSort(b *testing.B) {
p := manyPeople()
for i := 0; i < b.N; i++ {
sort.Sort(byName(p))
sort.Sort(byAge(p))
sort.Sort(bySSN(p))
}
}
func BenchmarkSortSlice(b *testing.B) {
p := manyPeople()
for i := 0; i < b.N; i++ {
sort.Slice(p, func(i, j int) bool { return p[i].Name < p[j].Name })
sort.Slice(p, func(i, j int) bool { return p[i].AgeYears < p[j].AgeYears })
sort.Slice(p, func(i, j int) bool { return p[i].SSN < p[j].SSN })
}
}
func manyPeople() []Person {
n, err := strconv.Atoi(os.Getenv("N"))
if err != nil {
panic(err)
}
p := make([]Person, n)
for i := range p {
p[i].AgeYears = rand.Intn(100)
p[i].SSN = rand.Int63n(1000000000)
p[i].Name = fmt.Sprintf("Mr or Ms %d", p[i].AgeYears)
}
return p
}
|
[
"\"N\""
] |
[] |
[
"N"
] |
[]
|
["N"]
|
go
| 1 | 0 | |
projects/adaptive_learning/train.py
|
#!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""Train model for ppl metric with pre-selected parameters.
These parameters have some variance in their final perplexity, but they were
used to achieve the pre-trained model.
"""
import os
from parlai.scripts.train_model import TrainLoop, setup_args
from parlai.agents.hy_lib.common_utils import override_opt
from projects.adaptive_learning.utils import set_teacher_args
from projects.adaptive_learning.utils import TENSORBOARD_METRICS
PARLAI_HOME = os.getenv('PARLAI_HOME')
OVERRIDE = {
"datatype": 'train',
"max_train_time": -1,
"batchsize": 4,
"learningrate": 5e-4,
"dropout": 0.2,
"gradient_clip": 0.1,
"batch_sort": True,
"validation_every_n_secs": 30,
"validation_every_n_epochs": -1,
"validation_metric": 'ppl',
"validation_metric_mode": 'min',
"validation_patience": 12,
"log_every_n_secs": 1,
"shuffle": False,
"numworkers": 40,
"multigpu": False,
"num_epochs": 20,
"display_examples": False,
"history_size": -1,
"text_truncate": 128,
"label_truncate": 128,
"truncate": 128,
"gpu": 0,
"batch_sort_field": 'label',
"pytorch_teacher_batch_sort": False,
"pace_by": 'sample',
"T": 3000,
"c0": 0.01,
"p": 2,
"beam_size": 1,
}
if __name__ == '__main__':
parser = setup_args()
parser = set_teacher_args(parser)
parser.set_defaults(
task='adaptive_learning:personachat_h3_sparse',
subtasks='avg_nidf:intrep_word:lastuttsim:loss_of_seq2seq:post_sim',
model='parlai.agents.adaptive_learning.seq2seq:AdaSeq2seqAgent',
model_file=os.path.join(PARLAI_HOME, 'models/adaptive_learning/personachat_h3_sparse'),
dict_lower=True,
dict_minfreq=-1,
hiddensize=512,
embeddingsize=300,
attention='general',
attention_time='post',
numlayers=2,
rnn_class='lstm',
lookuptable='enc_dec',
optimizer='adam',
weight_decay=0,
embedding_type='glove',
momentum=0.95,
bidirectional=True,
numsoftmax=1,
no_cuda=False,
dict_maxtokens=20000,
dict_tokenizer='split',
lr_scheduler='invsqrt',
warmup_updates=2000,
split_lines=True,
delimiter='__EOT__',
tensorboard_log=True,
tensorboard_log_teacher=True,
tensorboard_metrics=TENSORBOARD_METRICS,
reward_metric='total_metric',
reward_metric_mode='max',
save_after_valid=False,
)
parser.set_defaults(**OVERRIDE)
opt = parser.parse_args()
opt = override_opt(opt, OVERRIDE)
TrainLoop(opt).train()
|
[] |
[] |
[
"PARLAI_HOME"
] |
[]
|
["PARLAI_HOME"]
|
python
| 1 | 0 | |
pkg/object/ceph.go
|
//go:build ceph
// +build ceph
/*
* JuiceFS, Copyright 2020 Juicedata, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package object
import (
"bytes"
"errors"
"fmt"
"io"
"net/url"
"os"
"reflect"
"sort"
"strings"
"sync"
"github.com/ceph/go-ceph/rados"
)
type ceph struct {
DefaultObjectStorage
name string
conn *rados.Conn
free chan *rados.IOContext
}
func (c *ceph) String() string {
return fmt.Sprintf("ceph://%s/", c.name)
}
func (c *ceph) Create() error {
names, err := c.conn.ListPools()
if err != nil {
return err
}
for _, name := range names {
if name == c.name {
return nil
}
}
return c.conn.MakePool(c.name)
}
func (c *ceph) newContext() (*rados.IOContext, error) {
select {
case ctx := <-c.free:
return ctx, nil
default:
return c.conn.OpenIOContext(c.name)
}
}
func (c *ceph) release(ctx *rados.IOContext) {
select {
case c.free <- ctx:
default:
ctx.Destroy()
}
}
func (c *ceph) do(f func(ctx *rados.IOContext) error) (err error) {
ctx, err := c.newContext()
if err != nil {
return err
}
err = f(ctx)
if err != nil {
ctx.Destroy()
} else {
c.release(ctx)
}
return
}
type cephReader struct {
c *ceph
ctx *rados.IOContext
key string
off int64
limit int64
}
func (r *cephReader) Read(buf []byte) (n int, err error) {
if r.limit > 0 && int64(len(buf)) > r.limit {
buf = buf[:r.limit]
}
n, err = r.ctx.Read(r.key, buf, uint64(r.off))
r.off += int64(n)
if r.limit > 0 {
r.limit -= int64(n)
}
if err == nil && n < len(buf) {
err = io.EOF
}
return
}
func (r *cephReader) Close() error {
if r.ctx != nil {
r.c.release(r.ctx)
r.ctx = nil
}
return nil
}
func (c *ceph) Get(key string, off, limit int64) (io.ReadCloser, error) {
ctx, err := c.newContext()
if err != nil {
return nil, err
}
return &cephReader{c, ctx, key, off, limit}, nil
}
var cephPool = sync.Pool{
New: func() interface{} {
return make([]byte, 1<<20)
},
}
func (c *ceph) Put(key string, in io.Reader) error {
return c.do(func(ctx *rados.IOContext) error {
if b, ok := in.(*bytes.Reader); ok {
v := reflect.ValueOf(b)
data := v.Elem().Field(0).Bytes()
return ctx.WriteFull(key, data)
}
buf := cephPool.Get().([]byte)
defer cephPool.Put(buf)
var off uint64
for {
n, err := in.Read(buf)
if n > 0 {
if err = ctx.Write(key, buf[:n], off); err != nil {
return err
}
off += uint64(n)
} else {
if err == io.EOF {
return nil
}
return err
}
}
})
}
func (c *ceph) Delete(key string) error {
return c.do(func(ctx *rados.IOContext) error {
return ctx.Delete(key)
})
}
func (c *ceph) Head(key string) (Object, error) {
var o *obj
err := c.do(func(ctx *rados.IOContext) error {
stat, err := ctx.Stat(key)
if err != nil {
return err
}
o = &obj{key, int64(stat.Size), stat.ModTime, strings.HasSuffix(key, "/")}
return nil
})
if err == rados.ErrNotFound {
err = os.ErrNotExist
}
return o, err
}
func (c *ceph) ListAll(prefix, marker string) (<-chan Object, error) {
var objs = make(chan Object, 1000)
err := c.do(func(ctx *rados.IOContext) error {
iter, err := ctx.Iter()
if err != nil {
close(objs)
return err
}
defer iter.Close()
// FIXME: this will be really slow for many objects
keys := make([]string, 0, 1000)
for iter.Next() {
key := iter.Value()
if key <= marker || !strings.HasPrefix(key, prefix) {
continue
}
keys = append(keys, key)
}
// the keys are not ordered, sort them first
sort.Strings(keys)
// TODO: parallel
go func() {
defer close(objs)
for _, key := range keys {
st, err := ctx.Stat(key)
if err != nil {
if errors.Is(err, rados.ErrNotFound) {
logger.Warnf("Skip non-existent key: %s", key)
continue
}
objs <- nil
logger.Errorf("Stat key %s: %s", key, err)
return
}
objs <- &obj{key, int64(st.Size), st.ModTime, strings.HasSuffix(key, "/")}
}
}()
return nil
})
return objs, err
}
func newCeph(endpoint, cluster, user string) (ObjectStorage, error) {
if !strings.Contains(endpoint, "://") {
endpoint = fmt.Sprintf("ceph://%s", endpoint)
}
uri, err := url.ParseRequestURI(endpoint)
if err != nil {
return nil, fmt.Errorf("Invalid endpoint %s: %s", endpoint, err)
}
name := uri.Host
conn, err := rados.NewConnWithClusterAndUser(cluster, user)
if err != nil {
return nil, fmt.Errorf("Can't create connection to cluster %s for user %s: %s", cluster, user, err)
}
if os.Getenv("JFS_NO_CHECK_OBJECT_STORAGE") == "" {
if err := conn.ReadDefaultConfigFile(); err != nil {
return nil, fmt.Errorf("Can't read default config file: %s", err)
}
if err := conn.Connect(); err != nil {
return nil, fmt.Errorf("Can't connect to cluster %s: %s", cluster, err)
}
}
return &ceph{
name: name,
conn: conn,
free: make(chan *rados.IOContext, 50),
}, nil
}
func init() {
Register("ceph", newCeph)
}
|
[
"\"JFS_NO_CHECK_OBJECT_STORAGE\""
] |
[] |
[
"JFS_NO_CHECK_OBJECT_STORAGE"
] |
[]
|
["JFS_NO_CHECK_OBJECT_STORAGE"]
|
go
| 1 | 0 | |
theano/pylearn2_benchmark.py
|
import os
import sys
import numpy as np
import math
import theano
if not theano.config.device.startswith('gpu'):
import theano.sandbox.cuda
theano.sandbox.cuda.use('gpu')
theano.config.floatX = 'float32'
try:
import theano.misc.pycuda_init
import pycuda.driver
except ImportError:
print "Note: pycuda not available, no timing via CUDA events possible"
import time
pycuda = None
import theano
try:
import theano.sandbox.cuda.dnn
if not theano.sandbox.cuda.dnn.dnn_available():
del theano.sandbox.cuda.dnn
raise ImportError
except (ImportError, NameError):
print "Note: cuDNN not available"
try:
from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
except ImportError:
FilterActs = None
print "Note: pylearn2's cuda-convnet wrapper not available"
else:
from theano.sandbox.cuda.basic_ops import gpu_contiguous
number = 10 # nb of steps in loop to average over
repeat = 1 # nb of trials to pick the minimum of
runs = [
{
'ni': 3,
'no': 96,
'kw': 11,
'kh': 11,
'iw': 128,
'ih': 128,
'bs': 128,
'dw': 1,
'dh': 1,
},
{
'ni': 64,
'no': 128,
'kw': 9,
'kh': 9,
'iw': 64,
'ih': 64,
'bs': 128,
'dw': 1,
'dh': 1,
},
{
'ni': 128,
'no': 128,
'kw': 9,
'kh': 9,
'iw': 32,
'ih': 32,
'bs': 128,
'dw': 1,
'dh': 1,
},
{
'ni': 128,
'no': 128,
'kw': 7,
'kh': 7,
'iw': 16,
'ih': 16,
'bs': 128,
'dw': 1,
'dh': 1,
},
{
'ni': 384,
'no': 384,
'kw': 3,
'kh': 3,
'iw': 13,
'ih': 13,
'bs': 128,
'dw': 1,
'dh': 1,
}
]
def time_run(fn):
times = []
fn() # warm-up call, not timed
if pycuda:
theano.sandbox.cuda.synchronize()
start = pycuda.driver.Event()
end = pycuda.driver.Event()
for _ in range(repeat):
start.record()
for _ in range(number):
fn()
end.record()
end.synchronize()
times.append(start.time_till(end) / 1e3 / number)
else:
for _ in range(repeat):
theano.sandbox.cuda.synchronize()
start = time.time()
for _ in range(number):
fn()
theano.sandbox.cuda.synchronize()
times.append((time.time() - start) / number)
return min(times)
def print_graph(fn):
if int(os.environ.get('PRINT_GRAPH', 0)):
# debugprint of graph (in blue text)
print '\033[1;34m'
theano.printing.debugprint(fn)
print '\033[1;m'
def benchmark_three_ways(name, sharedX, sharedY, sharedW, X, Y, gW, gX, mode=None):
# benchmark fprop
try:
fprop = theano.function([], [],
givens=[(X, sharedX)],
updates=[(sharedY, Y)],
mode=mode,
name=name + " fprop")
tm = time_run(fprop)
print '{: <50} ==> {: <13} ==> {: >7}'.format(name, 'fprop', int(tm*1000))
print_graph(fprop)
del fprop
except Exception, e:
print name, 'fprop: FAILED', str(e).split('\n', 1)[0]
# benchmark bprop wrt input
try:
bprop = theano.function([], [],
# the nvidia wrapper need this (in fact could be optional for subsample==(1, 1)
givens=[(X, sharedX)],
updates=[(sharedX, gX)],
mode=mode,
name=name + " bprop inputs")
tm = time_run(bprop)
print '{: <50} ==> {: <13} ==> {: >7}'.format(name, 'bprop inputs', int(tm*1000))
print_graph(bprop)
del bprop
except Exception, e:
print name, 'bprop inputs: FAILED', str(e).split('\n', 1)[0]
# benchmark bprop wrt weights
try:
bprop = theano.function([], [],
givens=[(X, sharedX)],
updates=[(sharedW, gW)],
mode=mode,
name=name + " bprop weights")
tm = time_run(bprop)
print '{: <50} ==> {: <13} ==> {: >7}'.format(name, 'bprop weights', int(tm*1000))
print_graph(bprop)
del bprop
except Exception, e:
print name, 'bprop weights: FAILED', str(e).split('\n', 1)[0]
print ''
def parse_custom_config(s):
# parses a custom configuration string of the format:
# iAxBxC,kDxExF,bG,sHxJ where A: input channels, B: input width, C: input height,
# D: output channels, E: kernel width, F: kernel height, G: batchsize,
# H: horizontal stride, J: vertical stride (with G, H, J being optional)
run = {'bs': 128, 'dw': 1, 'dh': 1}
defs = {'i': ['ni', 'iw', 'ih'],
'k': ['no', 'kw', 'kh'],
'b': ['bs'],
's': ['dw', 'dh']}
for part in s.split(','):
p, args = part[0], map(int, part[1:].split('x'))
run.update(zip(defs[p], args))
return run
if len(sys.argv) > 1:
# allow specifying the runs on command line, 1-indexed (i.e., 1 2 5)
runs = [runs[int(r) - 1] for r in sys.argv[1:] if r[0] != 'i']
# allow specifying custom configurations on command line (e.g., i3x80x15,k32x3x7,b256)
runs.extend([parse_custom_config(r) for r in sys.argv[1:] if r[0] == 'i'])
# allow specifying benchmarks to skip via a SKIP environment variable
skip_tests = os.environ.get("SKIP", '').lower().split(',')
for run in runs:
# params for run:
# (input channels, output channels, kernel width, kernel height, batchsize, image width, image height, horizontal stride, vertical stride)
ni, no, kw, kh, bs, iw, ih, dw, dh = run['ni'], run['no'], run['kw'], run['kh'], run['bs'], run['iw'], run['ih'], run['dw'], run['dh']
print ''
print 'CONFIG: input =', ni, 'x', iw, 'x', ih, '* ker =', ni, 'x', no, 'x', kw, 'x', kh, '( bs =', bs, ', stride =', dw, ')'
ops = 2 # ops per point
mode = theano.compile.get_default_mode()
# benchmark Theano legacy convolution
# Mimic THEANO_FLAGS=optimizer_excluding=conv_gemm:conv_dnn
input_shape = (bs, ni, ih, iw)
filter_shape = (no, ni, kh, kw)
try:
sharedX = theano.shared(np.random.randn(*input_shape).astype('float32'), name='sharedX')
sharedY = theano.shared(np.random.randn(bs, no, (ih-kh)/dh+1, (iw-kw)/dw+1).astype('float32'), name='sharedY')
sharedW = theano.shared(np.random.randn(*filter_shape).astype('float32'), name='sharedW')
except MemoryError, e:
print "SKIPPING config due to the memory error below"
print e
continue
X = theano.tensor.tensor4('X')
Y = theano.tensor.nnet.conv.conv2d(X, sharedW, input_shape, filter_shape, subsample=(dh,dw))
gW = theano.grad(None, wrt=sharedW, known_grads={Y: sharedY})
gX = theano.grad(None, wrt=X, known_grads={Y: sharedY})
if 'legacy' not in skip_tests:
benchmark_three_ways('theano.tensor.nnet.conv.conv2d',
sharedX, sharedY, sharedW, X, Y, gW, gX,
mode.excluding('conv_gemm', 'conv_dnn'))
# benchmark Theano meta-optimizer
# Mimic THEANO_FLAGS=optimizer_including=conv_meta
if 'meta' not in skip_tests:
benchmark_three_ways('(experimental) meta-optimizer',
sharedX, sharedY, sharedW, X, Y, gW, gX,
mode.including('conv_meta'))
# benchmark Theano FFT convolution
# Mimic THEANO_FLAGS=optimizer_including=conv_fft
if 'fft' not in skip_tests:
benchmark_three_ways('theano.sandbox.cuda.fftconv.conv2d_fft',
sharedX, sharedY, sharedW, X, Y, gW, gX,
mode.including('conv_fft'))
# benchmark cudnn, convolution with kernel flipping
if hasattr(theano.sandbox.cuda, 'dnn') and 'dnn' not in skip_tests:
benchmark_three_ways('(auto) theano.sandbox.cuda.dnn.GpuDnnConv',
sharedX, sharedY, sharedW, X, Y, gW, gX,
mode.including('conv_dnn'))
# benchmark caffe-like gemm convolution
# Mimic THEANO_FLAGS=optimizer_excluding=conv_dnn
if 'gemm' not in skip_tests and 'caffe' not in skip_tests:
benchmark_three_ways('(auto) theano.sandbox.cuda.blas.GpuCorrMM',
sharedX, sharedY, sharedW, X, Y, gW, gX,
mode.excluding('conv_dnn'))
# benchmark caffe-like gemm convolution again, directly, w/o kernel flipping
Y = theano.sandbox.cuda.blas.GpuCorrMM(subsample=(dh, dw))(
gpu_contiguous(X), gpu_contiguous(sharedW))
gW = theano.grad(None, wrt=sharedW, known_grads={Y: sharedY})
gX = theano.grad(None, wrt=X, known_grads={Y: sharedY})
benchmark_three_ways('(manual) theano.sandbox.cuda.blas.GpuCorrMM',
sharedX, sharedY, sharedW, X, Y, gW, gX)
# benchmark nvidia convolution directly
if hasattr(theano.sandbox.cuda, 'dnn') and 'dnn' not in skip_tests:
Y = theano.sandbox.cuda.dnn.dnn_conv(X, sharedW, 'valid',
subsample=(dh, dw))
gW = theano.grad(None, wrt=sharedW, known_grads={Y: sharedY})
gX = theano.grad(None, wrt=X, known_grads={Y: sharedY})
benchmark_three_ways(
'(manual conv) theano.sandbox.cuda.dnn.GpuDnnConv',
sharedX, sharedY, sharedW, X, Y, gW, gX)
if int(os.environ.get('DNN_CORR', 0)):
# without flipping (just as fast as manual conv; set DNN_CORR=1 to run)
Y = theano.sandbox.cuda.dnn.dnn_conv(X, sharedW, 'valid',
subsample=(dh, dw),
conv_mode='cross')
gW = theano.grad(None, wrt=sharedW, known_grads={Y: sharedY})
gX = theano.grad(None, wrt=X, known_grads={Y: sharedY})
benchmark_three_ways(
'(manual corr) theano.sandbox.cuda.dnn.GpuDnnConv',
sharedX, sharedY, sharedW, X, Y, gW, gX)
del sharedX
del sharedY
del sharedW
# benchmark cuda-convnet convolution
# we use the pylearn2 wrapper for cuda-convnet (http://benanne.github.io/2014/04/03/faster-convolutions-in-theano.html)
if (FilterActs is None) or ('convnet' in skip_tests):
continue # skip cuda-convnet if pylearn2 wrapper is not available
#(channels, rows, columns, batch_size)
inputBatch = np.random.randn(ni, ih, iw, bs)
sharedX = theano.shared(inputBatch.astype('float32'))
sharedY = theano.shared(np.random.randn(no, (ih-kh)/dh+1, (iw-kw)/dw+1, bs).astype('float32'))
# (channels, rows, columns, number of filters)
sharedW = theano.shared(np.random.randn(ni, kh, kw, no).astype('float32'))
contiguous_input = gpu_contiguous(sharedX)
contiguous_filters = gpu_contiguous(sharedW)
for partial_sum in (None, 1):
Y = FilterActs(partial_sum=partial_sum)(contiguous_input, contiguous_filters)
gW = theano.grad(None, wrt=sharedW, known_grads={Y: sharedY})
gX = theano.grad(None, wrt=sharedX, known_grads={Y: sharedY})
benchmark_three_ways('pylearn2.sandbox.cuda_convnet(partial_sum=%r)' % partial_sum,
sharedX, sharedY, sharedW, X, Y, gW, gX)
del sharedX
del sharedY
del sharedW
|
[] |
[] |
[
"SKIP",
"DNN_CORR",
"PRINT_GRAPH"
] |
[]
|
["SKIP", "DNN_CORR", "PRINT_GRAPH"]
|
python
| 3 | 0 | |
_/Chapter 12/educa/manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "educa.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
tests/integration/integration_test.go
|
// Copyright 2016-2018, Pulumi Corporation. All rights reserved.
package ints
import (
"bytes"
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"testing"
"time"
"github.com/pulumi/pulumi/sdk/go/common/util/contract"
"github.com/stretchr/testify/assert"
"github.com/pulumi/pulumi/pkg/resource/deploy/providers"
"github.com/pulumi/pulumi/pkg/secrets/cloud"
"github.com/pulumi/pulumi/pkg/testing/integration"
"github.com/pulumi/pulumi/sdk/go/common/apitype"
"github.com/pulumi/pulumi/sdk/go/common/resource"
"github.com/pulumi/pulumi/sdk/go/common/resource/config"
ptesting "github.com/pulumi/pulumi/sdk/go/common/testing"
"github.com/pulumi/pulumi/sdk/go/common/workspace"
)
const WindowsOS = "windows"
// assertPerfBenchmark implements the integration.TestStatsReporter interface, and reports test
// failures when a scenario exceeds the provided threshold.
type assertPerfBenchmark struct {
T *testing.T
MaxPreviewDuration time.Duration
MaxUpdateDuration time.Duration
}
func (t assertPerfBenchmark) ReportCommand(stats integration.TestCommandStats) {
var maxDuration *time.Duration
if strings.HasPrefix(stats.StepName, "pulumi-preview") {
maxDuration = &t.MaxPreviewDuration
}
if strings.HasPrefix(stats.StepName, "pulumi-update") {
maxDuration = &t.MaxUpdateDuration
}
if maxDuration != nil && *maxDuration != 0 {
if stats.ElapsedSeconds < maxDuration.Seconds() {
t.T.Logf(
"Test step %q was under threshold. %.2fs (max %.2fs)",
stats.StepName, stats.ElapsedSeconds, maxDuration.Seconds())
} else {
t.T.Errorf(
"Test step %q took longer than expected. %.2fs vs. max %.2fs",
stats.StepName, stats.ElapsedSeconds, maxDuration.Seconds())
}
}
}
// TestEmptyNodeJS simply tests that we can run an empty NodeJS project.
func TestEmptyNodeJS(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("empty", "nodejs"),
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
})
}
// TestEmptyPython simply tests that we can run an empty Python project.
func TestEmptyPython(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("empty", "python"),
Dependencies: []string{
filepath.Join("..", "..", "sdk", "python", "env", "src"),
},
Quick: true,
})
}
// TestEmptyGo simply tests that we can build and run an empty Go project.
func TestEmptyGo(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("empty", "go"),
Dependencies: []string{
"github.com/pulumi/pulumi/sdk",
},
Quick: true,
})
}
// TestEmptyGoRun exercises the 'go run' invocation path that doesn't require an explicit build step.
func TestEmptyGoRun(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("empty", "gorun"),
Dependencies: []string{
"github.com/pulumi/pulumi/sdk",
},
Quick: true,
})
}
// TestEmptyGoRunMain exercises the 'go run' invocation path with a 'main' entrypoint specified in Pulumi.yml
func TestEmptyGoRunMain(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("empty", "gorun_main"),
Dependencies: []string{
"github.com/pulumi/pulumi/sdk",
},
Quick: true,
})
}
// TestEmptyDotNet simply tests that we can run an empty .NET project.
func TestEmptyDotNet(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("empty", "dotnet"),
Dependencies: []string{"Pulumi"},
Quick: true,
})
}
// Tests emitting many engine events doesn't result in a performance problem.
func TestEngineEventPerf(t *testing.T) {
// Prior to pulumi/pulumi#2303, a preview or update would take ~40s.
// Since then, it should now be down to ~4s, with additional padding,
// since some Travis machines (especially the macOS ones) seem quite slow
// to begin with.
benchmarkEnforcer := &assertPerfBenchmark{
T: t,
MaxPreviewDuration: 8 * time.Second,
MaxUpdateDuration: 8 * time.Second,
}
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: "ee_perf",
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
ReportStats: benchmarkEnforcer,
// Don't run in parallel since it is sensitive to system resources.
NoParallel: true,
})
}
// TestEngineEvents ensures that the test framework properly records and reads engine events.
func TestEngineEvents(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: "single_resource",
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
// Ensure that we have a non-empty list of events.
assert.NotEmpty(t, stackInfo.Events)
// Ensure that we have two "ResourcePre" events: one for the stack and one for our resource.
preEventResourceTypes := []string{}
for _, e := range stackInfo.Events {
if e.ResourcePreEvent != nil {
preEventResourceTypes = append(preEventResourceTypes, e.ResourcePreEvent.Metadata.Type)
}
}
assert.Equal(t, 2, len(preEventResourceTypes))
assert.Contains(t, preEventResourceTypes, "pulumi:pulumi:Stack")
assert.Contains(t, preEventResourceTypes, "pulumi-nodejs:dynamic:Resource")
},
})
}
// TestProjectMain tests out the ability to override the main entrypoint.
func TestProjectMain(t *testing.T) {
test := integration.ProgramTestOptions{
Dir: "project_main",
Dependencies: []string{"@pulumi/pulumi"},
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
// Simple runtime validation that just ensures the checkpoint was written and read.
assert.NotNil(t, stackInfo.Deployment)
},
}
integration.ProgramTest(t, &test)
t.Run("Error_AbsolutePath", func(t *testing.T) {
e := ptesting.NewEnvironment(t)
defer func() {
if !t.Failed() {
e.DeleteEnvironment()
}
}()
e.ImportDirectory("project_main_abs")
e.RunCommand("pulumi", "login", "--cloud-url", e.LocalURL())
e.RunCommand("pulumi", "stack", "init", "main-abs")
stdout, stderr := e.RunCommandExpectError("pulumi", "up", "--non-interactive", "--skip-preview")
assert.Equal(t, "Updating (main-abs):\n \n", stdout)
assert.Contains(t, stderr, "project 'main' must be a relative path")
e.RunCommand("pulumi", "stack", "rm", "--yes")
})
t.Run("Error_ParentFolder", func(t *testing.T) {
e := ptesting.NewEnvironment(t)
defer func() {
if !t.Failed() {
e.DeleteEnvironment()
}
}()
e.ImportDirectory("project_main_parent")
e.RunCommand("pulumi", "login", "--cloud-url", e.LocalURL())
e.RunCommand("pulumi", "stack", "init", "main-parent")
stdout, stderr := e.RunCommandExpectError("pulumi", "up", "--non-interactive", "--skip-preview")
assert.Equal(t, "Updating (main-parent):\n \n", stdout)
assert.Contains(t, stderr, "project 'main' must be a subfolder")
e.RunCommand("pulumi", "stack", "rm", "--yes")
})
}
// TestStackProjectName ensures we can read the Pulumi stack and project name from within the program.
func TestStackProjectName(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: "stack_project_name",
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
})
}
// TestStackTagValidation verifies various error scenarios related to stack names and tags.
func TestStackTagValidation(t *testing.T) {
t.Run("Error_StackName", func(t *testing.T) {
e := ptesting.NewEnvironment(t)
defer func() {
if !t.Failed() {
e.DeleteEnvironment()
}
}()
e.RunCommand("git", "init")
e.ImportDirectory("stack_project_name")
e.RunCommand("pulumi", "login", "--cloud-url", e.LocalURL())
stdout, stderr := e.RunCommandExpectError("pulumi", "stack", "init", "invalid name (spaces, parens, etc.)")
assert.Equal(t, "", stdout)
assert.Contains(t, stderr, "stack names may only contain alphanumeric, hyphens, underscores, or periods")
})
t.Run("Error_DescriptionLength", func(t *testing.T) {
e := ptesting.NewEnvironment(t)
defer func() {
if !t.Failed() {
e.DeleteEnvironment()
}
}()
e.RunCommand("git", "init")
e.ImportDirectory("stack_project_name")
e.RunCommand("pulumi", "login", "--cloud-url", e.LocalURL())
prefix := "lorem ipsum dolor sit amet" // 26
prefix = prefix + prefix + prefix + prefix // 104
prefix = prefix + prefix + prefix + prefix // 416 + the current Pulumi.yaml's description
// Change the contents of the Description property of Pulumi.yaml.
yamlPath := filepath.Join(e.CWD, "Pulumi.yaml")
err := integration.ReplaceInFile("description: ", "description: "+prefix, yamlPath)
assert.NoError(t, err)
stdout, stderr := e.RunCommandExpectError("pulumi", "stack", "init", "valid-name")
assert.Equal(t, "", stdout)
assert.Contains(t, stderr, "error: could not create stack:")
assert.Contains(t, stderr, "validating stack properties:")
assert.Contains(t, stderr, "stack tag \"pulumi:description\" value is too long (max length 256 characters)")
})
}
func TestRemoveWithResourcesBlocked(t *testing.T) {
if os.Getenv("PULUMI_ACCESS_TOKEN") == "" {
t.Skipf("Skipping: PULUMI_ACCESS_TOKEN is not set")
}
e := ptesting.NewEnvironment(t)
defer func() {
if !t.Failed() {
e.DeleteEnvironment()
}
}()
stackName, err := resource.NewUniqueHex("rm-test-", 8, -1)
contract.AssertNoErrorf(err, "resource.NewUniqueHex should not fail with no maximum length is set")
e.ImportDirectory("single_resource")
e.RunCommand("pulumi", "stack", "init", stackName)
e.RunCommand("yarn", "link", "@pulumi/pulumi")
e.RunCommand("pulumi", "up", "--non-interactive", "--skip-preview")
_, stderr := e.RunCommandExpectError("pulumi", "stack", "rm", "--yes")
assert.Contains(t, stderr, "--force")
e.RunCommand("pulumi", "destroy", "--skip-preview", "--non-interactive", "--yes")
e.RunCommand("pulumi", "stack", "rm", "--yes")
}
// TestStackOutputs ensures we can export variables from a stack and have them get recorded as outputs.
func TestStackOutputsNodeJS(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("stack_outputs", "nodejs"),
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
// Ensure the checkpoint contains a single resource, the Stack, with two outputs.
fmt.Printf("Deployment: %v", stackInfo.Deployment)
assert.NotNil(t, stackInfo.Deployment)
if assert.Equal(t, 1, len(stackInfo.Deployment.Resources)) {
stackRes := stackInfo.Deployment.Resources[0]
assert.NotNil(t, stackRes)
assert.Equal(t, resource.RootStackType, stackRes.URN.Type())
assert.Equal(t, 0, len(stackRes.Inputs))
assert.Equal(t, 2, len(stackRes.Outputs))
assert.Equal(t, "ABC", stackRes.Outputs["xyz"])
assert.Equal(t, float64(42), stackRes.Outputs["foo"])
}
},
})
}
func TestStackOutputsPython(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("stack_outputs", "python"),
Dependencies: []string{
filepath.Join("..", "..", "sdk", "python", "env", "src"),
},
Quick: true,
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
// Ensure the checkpoint contains a single resource, the Stack, with two outputs.
fmt.Printf("Deployment: %v", stackInfo.Deployment)
assert.NotNil(t, stackInfo.Deployment)
if assert.Equal(t, 1, len(stackInfo.Deployment.Resources)) {
stackRes := stackInfo.Deployment.Resources[0]
assert.NotNil(t, stackRes)
assert.Equal(t, resource.RootStackType, stackRes.URN.Type())
assert.Equal(t, 0, len(stackRes.Inputs))
assert.Equal(t, 2, len(stackRes.Outputs))
assert.Equal(t, "ABC", stackRes.Outputs["xyz"])
assert.Equal(t, float64(42), stackRes.Outputs["foo"])
}
},
})
}
func TestStackOutputsDotNet(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("stack_outputs", "dotnet"),
Dependencies: []string{"Pulumi"},
Quick: true,
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
// Ensure the checkpoint contains a single resource, the Stack, with two outputs.
fmt.Printf("Deployment: %v", stackInfo.Deployment)
assert.NotNil(t, stackInfo.Deployment)
if assert.Equal(t, 1, len(stackInfo.Deployment.Resources)) {
stackRes := stackInfo.Deployment.Resources[0]
assert.NotNil(t, stackRes)
assert.Equal(t, resource.RootStackType, stackRes.URN.Type())
assert.Equal(t, 0, len(stackRes.Inputs))
assert.Equal(t, 2, len(stackRes.Outputs))
assert.Equal(t, "ABC", stackRes.Outputs["xyz"])
assert.Equal(t, float64(42), stackRes.Outputs["foo"])
}
},
})
}
// TestStackOutputsJSON ensures the CLI properly formats stack outputs as JSON when requested.
func TestStackOutputsJSON(t *testing.T) {
e := ptesting.NewEnvironment(t)
defer func() {
if !t.Failed() {
e.DeleteEnvironment()
}
}()
e.ImportDirectory(filepath.Join("stack_outputs", "nodejs"))
e.RunCommand("yarn", "link", "@pulumi/pulumi")
e.RunCommand("pulumi", "login", "--cloud-url", e.LocalURL())
e.RunCommand("pulumi", "stack", "init", "stack-outs")
e.RunCommand("pulumi", "up", "--non-interactive", "--skip-preview")
stdout, _ := e.RunCommand("pulumi", "stack", "output", "--json")
assert.Equal(t, `{
"foo": 42,
"xyz": "ABC"
}
`, stdout)
}
// TestStackOutputsDisplayed ensures that outputs are printed at the end of an update
func TestStackOutputsDisplayed(t *testing.T) {
stdout := &bytes.Buffer{}
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("stack_outputs", "nodejs"),
Dependencies: []string{"@pulumi/pulumi"},
Quick: false,
Verbose: true,
Stdout: stdout,
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
output := stdout.String()
// ensure we get the outputs info both for the normal update, and for the no-change update.
assert.Contains(t, output, "Outputs:\n foo: 42\n xyz: \"ABC\"\n\nResources:\n + 1 created")
assert.Contains(t, output, "Outputs:\n foo: 42\n xyz: \"ABC\"\n\nResources:\n 1 unchanged")
},
})
}
// TestStackOutputsSuppressed ensures that outputs whose values are intentionally suppresses don't show.
func TestStackOutputsSuppressed(t *testing.T) {
stdout := &bytes.Buffer{}
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("stack_outputs", "nodejs"),
Dependencies: []string{"@pulumi/pulumi"},
Quick: false,
Verbose: true,
Stdout: stdout,
UpdateCommandlineFlags: []string{"--suppress-outputs"},
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
output := stdout.String()
assert.NotContains(t, output, "Outputs:\n foo: 42\n xyz: \"ABC\"\n")
assert.NotContains(t, output, "Outputs:\n foo: 42\n xyz: \"ABC\"\n")
},
})
}
// TestStackParenting tests out that stacks and components are parented correctly.
func TestStackParenting(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: "stack_parenting",
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
// Ensure the checkpoint contains resources parented correctly. This should look like this:
//
// A F
// / \ \
// B C G
// / \
// D E
//
// with the caveat, of course, that A and F will share a common parent, the implicit stack.
assert.NotNil(t, stackInfo.Deployment)
if assert.Equal(t, 9, len(stackInfo.Deployment.Resources)) {
stackRes := stackInfo.Deployment.Resources[0]
assert.NotNil(t, stackRes)
assert.Equal(t, resource.RootStackType, stackRes.Type)
assert.Equal(t, "", string(stackRes.Parent))
urns := make(map[string]resource.URN)
for _, res := range stackInfo.Deployment.Resources[1:] {
assert.NotNil(t, res)
urns[string(res.URN.Name())] = res.URN
switch res.URN.Name() {
case "a", "f":
assert.NotEqual(t, "", res.Parent)
assert.Equal(t, stackRes.URN, res.Parent)
case "b", "c":
assert.Equal(t, urns["a"], res.Parent)
case "d", "e":
assert.Equal(t, urns["c"], res.Parent)
case "g":
assert.Equal(t, urns["f"], res.Parent)
case "default":
// Default providers are not parented.
assert.Equal(t, "", string(res.Parent))
default:
t.Fatalf("unexpected name %s", res.URN.Name())
}
}
}
},
})
}
func TestStackBadParenting(t *testing.T) {
if runtime.GOOS == WindowsOS {
t.Skip("Temporarily skipping test on Windows - pulumi/pulumi#3811")
}
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: "stack_bad_parenting",
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
ExpectFailure: true,
})
}
// TestStackDependencyGraph tests that the dependency graph of a stack is saved
// in the checkpoint file.
func TestStackDependencyGraph(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: "stack_dependencies",
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
assert.NotNil(t, stackInfo.Deployment)
latest := stackInfo.Deployment
assert.True(t, len(latest.Resources) >= 2)
sawFirst := false
sawSecond := false
for _, res := range latest.Resources {
urn := string(res.URN)
if strings.Contains(urn, "dynamic:Resource::first") {
// The first resource doesn't depend on anything.
assert.Equal(t, 0, len(res.Dependencies))
sawFirst = true
} else if strings.Contains(urn, "dynamic:Resource::second") {
// The second resource uses an Output property of the first resource, so it
// depends directly on first.
assert.Equal(t, 1, len(res.Dependencies))
assert.True(t, strings.Contains(string(res.Dependencies[0]), "dynamic:Resource::first"))
sawSecond = true
}
}
assert.True(t, sawFirst && sawSecond)
},
})
}
// TestStackComponentDotNet tests the programming model of defining a stack as an explicit top-level component.
func TestStackComponentDotNet(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("stack_component", "dotnet"),
Dependencies: []string{"Pulumi"},
Quick: true,
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
// Ensure the checkpoint contains a single resource, the Stack, with two outputs.
fmt.Printf("Deployment: %v", stackInfo.Deployment)
assert.NotNil(t, stackInfo.Deployment)
if assert.Equal(t, 1, len(stackInfo.Deployment.Resources)) {
stackRes := stackInfo.Deployment.Resources[0]
assert.NotNil(t, stackRes)
assert.Equal(t, resource.RootStackType, stackRes.URN.Type())
assert.Equal(t, 0, len(stackRes.Inputs))
assert.Equal(t, 2, len(stackRes.Outputs))
assert.Equal(t, "ABC", stackRes.Outputs["abc"])
assert.Equal(t, float64(42), stackRes.Outputs["Foo"])
}
},
})
}
// TestConfigSave ensures that config commands in the Pulumi CLI work as expected.
func TestConfigSave(t *testing.T) {
e := ptesting.NewEnvironment(t)
defer func() {
if !t.Failed() {
e.DeleteEnvironment()
}
}()
// Initialize an empty stack.
path := filepath.Join(e.RootPath, "Pulumi.yaml")
err := (&workspace.Project{
Name: "testing-config",
Runtime: workspace.NewProjectRuntimeInfo("nodejs", nil),
}).Save(path)
assert.NoError(t, err)
e.RunCommand("pulumi", "login", "--cloud-url", e.LocalURL())
e.RunCommand("pulumi", "stack", "init", "testing-2")
e.RunCommand("pulumi", "stack", "init", "testing-1")
// Now configure and save a few different things:
e.RunCommand("pulumi", "config", "set", "configA", "value1")
e.RunCommand("pulumi", "config", "set", "configB", "value2", "--stack", "testing-2")
e.RunCommand("pulumi", "stack", "select", "testing-2")
e.RunCommand("pulumi", "config", "set", "configD", "value4")
e.RunCommand("pulumi", "config", "set", "configC", "value3", "--stack", "testing-1")
// Now read back the config using the CLI:
{
stdout, _ := e.RunCommand("pulumi", "config", "get", "configB")
assert.Equal(t, "value2\n", stdout)
}
{
// the config in a different stack, so this should error.
stdout, stderr := e.RunCommandExpectError("pulumi", "config", "get", "configA")
assert.Equal(t, "", stdout)
assert.NotEqual(t, "", stderr)
}
{
// but selecting the stack should let you see it
stdout, _ := e.RunCommand("pulumi", "config", "get", "configA", "--stack", "testing-1")
assert.Equal(t, "value1\n", stdout)
}
// Finally, check that the stack file contains what we expected.
validate := func(k string, v string, cfg config.Map) {
key, err := config.ParseKey("testing-config:config:" + k)
assert.NoError(t, err)
d, ok := cfg[key]
assert.True(t, ok, "config key %v should be set", k)
dv, err := d.Value(nil)
assert.NoError(t, err)
assert.Equal(t, v, dv)
}
testStack1, err := workspace.LoadProjectStack(filepath.Join(e.CWD, "Pulumi.testing-1.yaml"))
assert.NoError(t, err)
testStack2, err := workspace.LoadProjectStack(filepath.Join(e.CWD, "Pulumi.testing-2.yaml"))
assert.NoError(t, err)
assert.Equal(t, 2, len(testStack1.Config))
assert.Equal(t, 2, len(testStack2.Config))
validate("configA", "value1", testStack1.Config)
validate("configC", "value3", testStack1.Config)
validate("configB", "value2", testStack2.Config)
validate("configD", "value4", testStack2.Config)
e.RunCommand("pulumi", "stack", "rm", "--yes")
}
// TestConfigPaths ensures that config commands with paths work as expected.
func TestConfigPaths(t *testing.T) {
e := ptesting.NewEnvironment(t)
defer func() {
if !t.Failed() {
e.DeleteEnvironment()
}
}()
// Initialize an empty stack.
path := filepath.Join(e.RootPath, "Pulumi.yaml")
err := (&workspace.Project{
Name: "testing-config",
Runtime: workspace.NewProjectRuntimeInfo("nodejs", nil),
}).Save(path)
assert.NoError(t, err)
e.RunCommand("pulumi", "login", "--cloud-url", e.LocalURL())
e.RunCommand("pulumi", "stack", "init", "testing")
namespaces := []string{"", "my:"}
tests := []struct {
Key string
Value string
Secret bool
Path bool
TopLevelKey string
TopLevelExpectedValue string
}{
{
Key: "aConfigValue",
Value: "this value is a value",
TopLevelKey: "aConfigValue",
TopLevelExpectedValue: "this value is a value",
},
{
Key: "anotherConfigValue",
Value: "this value is another value",
TopLevelKey: "anotherConfigValue",
TopLevelExpectedValue: "this value is another value",
},
{
Key: "bEncryptedSecret",
Value: "this super secret is encrypted",
Secret: true,
TopLevelKey: "bEncryptedSecret",
TopLevelExpectedValue: "this super secret is encrypted",
},
{
Key: "anotherEncryptedSecret",
Value: "another encrypted secret",
Secret: true,
TopLevelKey: "anotherEncryptedSecret",
TopLevelExpectedValue: "another encrypted secret",
},
{
Key: "[]",
Value: "square brackets value",
TopLevelKey: "[]",
TopLevelExpectedValue: "square brackets value",
},
{
Key: "x.y",
Value: "x.y value",
TopLevelKey: "x.y",
TopLevelExpectedValue: "x.y value",
},
{
Key: "0",
Value: "0 value",
Path: true,
TopLevelKey: "0",
TopLevelExpectedValue: "0 value",
},
{
Key: "true",
Value: "value",
Path: true,
TopLevelKey: "true",
TopLevelExpectedValue: "value",
},
{
Key: `["test.Key"]`,
Value: "test key value",
Path: true,
TopLevelKey: "test.Key",
TopLevelExpectedValue: "test key value",
},
{
Key: `nested["test.Key"]`,
Value: "nested test key value",
Path: true,
TopLevelKey: "nested",
TopLevelExpectedValue: `{"test.Key":"nested test key value"}`,
},
{
Key: "outer.inner",
Value: "value",
Path: true,
TopLevelKey: "outer",
TopLevelExpectedValue: `{"inner":"value"}`,
},
{
Key: "names[0]",
Value: "a",
Path: true,
TopLevelKey: "names",
TopLevelExpectedValue: `["a"]`,
},
{
Key: "names[1]",
Value: "b",
Path: true,
TopLevelKey: "names",
TopLevelExpectedValue: `["a","b"]`,
},
{
Key: "names[2]",
Value: "c",
Path: true,
TopLevelKey: "names",
TopLevelExpectedValue: `["a","b","c"]`,
},
{
Key: "names[3]",
Value: "super secret name",
Path: true,
Secret: true,
TopLevelKey: "names",
TopLevelExpectedValue: `["a","b","c","super secret name"]`,
},
{
Key: "servers[0].port",
Value: "80",
Path: true,
TopLevelKey: "servers",
TopLevelExpectedValue: `[{"port":80}]`,
},
{
Key: "servers[0].host",
Value: "example",
Path: true,
TopLevelKey: "servers",
TopLevelExpectedValue: `[{"host":"example","port":80}]`,
},
{
Key: "a.b[0].c",
Value: "true",
Path: true,
TopLevelKey: "a",
TopLevelExpectedValue: `{"b":[{"c":true}]}`,
},
{
Key: "a.b[1].c",
Value: "false",
Path: true,
TopLevelKey: "a",
TopLevelExpectedValue: `{"b":[{"c":true},{"c":false}]}`,
},
{
Key: "tokens[0]",
Value: "shh",
Path: true,
Secret: true,
TopLevelKey: "tokens",
TopLevelExpectedValue: `["shh"]`,
},
{
Key: "foo.bar",
Value: "don't tell",
Path: true,
Secret: true,
TopLevelKey: "foo",
TopLevelExpectedValue: `{"bar":"don't tell"}`,
},
{
Key: "semiInner.a.b.c.d",
Value: "1",
Path: true,
TopLevelKey: "semiInner",
TopLevelExpectedValue: `{"a":{"b":{"c":{"d":1}}}}`,
},
{
Key: "wayInner.a.b.c.d.e.f.g.h.i.j.k",
Value: "false",
Path: true,
TopLevelKey: "wayInner",
TopLevelExpectedValue: `{"a":{"b":{"c":{"d":{"e":{"f":{"g":{"h":{"i":{"j":{"k":false}}}}}}}}}}}`,
},
// Overwriting a top-level string value is allowed.
{
Key: "aConfigValue.inner",
Value: "new value",
Path: true,
TopLevelKey: "aConfigValue",
TopLevelExpectedValue: `{"inner":"new value"}`,
},
{
Key: "anotherConfigValue[0]",
Value: "new value",
Path: true,
TopLevelKey: "anotherConfigValue",
TopLevelExpectedValue: `["new value"]`,
},
{
Key: "bEncryptedSecret.inner",
Value: "new value",
Path: true,
TopLevelKey: "bEncryptedSecret",
TopLevelExpectedValue: `{"inner":"new value"}`,
},
{
Key: "anotherEncryptedSecret[0]",
Value: "new value",
Path: true,
TopLevelKey: "anotherEncryptedSecret",
TopLevelExpectedValue: `["new value"]`,
},
}
validateConfigGet := func(key string, value string, path bool) {
args := []string{"config", "get", key}
if path {
args = append(args, "--path")
}
stdout, stderr := e.RunCommand("pulumi", args...)
assert.Equal(t, fmt.Sprintf("%s\n", value), stdout)
assert.Equal(t, "", stderr)
}
for _, ns := range namespaces {
for _, test := range tests {
key := fmt.Sprintf("%s%s", ns, test.Key)
topLevelKey := fmt.Sprintf("%s%s", ns, test.TopLevelKey)
// Set the value.
args := []string{"config", "set"}
if test.Secret {
args = append(args, "--secret")
}
if test.Path {
args = append(args, "--path")
}
args = append(args, key, test.Value)
stdout, stderr := e.RunCommand("pulumi", args...)
assert.Equal(t, "", stdout)
assert.Equal(t, "", stderr)
// Get the value and validate it.
validateConfigGet(key, test.Value, test.Path)
// Get the top-level value and validate it.
validateConfigGet(topLevelKey, test.TopLevelExpectedValue, false /*path*/)
}
}
badKeys := []string{
// Syntax errors.
"root[",
`root["nested]`,
"root.array[abc]",
"root.[1]",
// First path segment must be a non-empty string.
`[""]`,
"[0]",
// Index out of range.
"names[-1]",
"names[5]",
// A "secure" key that is a map with a single string value is reserved by the system.
"key.secure",
"super.nested.map.secure",
// Type mismatch.
"outer[0]",
"names.nested",
"outer.inner.nested",
"outer.inner[0]",
}
for _, ns := range namespaces {
for _, badKey := range badKeys {
key := fmt.Sprintf("%s%s", ns, badKey)
stdout, stderr := e.RunCommandExpectError("pulumi", "config", "set", "--path", key, "value")
assert.Equal(t, "", stdout)
assert.NotEqual(t, "", stderr)
}
}
e.RunCommand("pulumi", "stack", "rm", "--yes")
}
// Tests basic configuration from the perspective of a Pulumi program.
func TestConfigBasicNodeJS(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("config_basic", "nodejs"),
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
Config: map[string]string{
"aConfigValue": "this value is a value",
},
Secrets: map[string]string{
"bEncryptedSecret": "this super secret is encrypted",
},
OrderedConfig: []integration.ConfigValue{
{Key: "outer.inner", Value: "value", Path: true},
{Key: "names[0]", Value: "a", Path: true},
{Key: "names[1]", Value: "b", Path: true},
{Key: "names[2]", Value: "c", Path: true},
{Key: "names[3]", Value: "super secret name", Path: true, Secret: true},
{Key: "servers[0].port", Value: "80", Path: true},
{Key: "servers[0].host", Value: "example", Path: true},
{Key: "a.b[0].c", Value: "true", Path: true},
{Key: "a.b[1].c", Value: "false", Path: true},
{Key: "tokens[0]", Value: "shh", Path: true, Secret: true},
{Key: "foo.bar", Value: "don't tell", Path: true, Secret: true},
},
})
}
func TestConfigCaptureNodeJS(t *testing.T) {
if runtime.GOOS == WindowsOS {
t.Skip("Temporarily skipping test on Windows - pulumi/pulumi#3811")
}
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("config_capture_e2e", "nodejs"),
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
Config: map[string]string{
"value": "it works",
},
})
}
func TestInvalidVersionInPackageJson(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("invalid_package_json"),
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
Config: map[string]string{},
})
}
// Tests basic configuration from the perspective of a Pulumi program.
func TestConfigBasicPython(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("config_basic", "python"),
Dependencies: []string{
filepath.Join("..", "..", "sdk", "python", "env", "src"),
},
Quick: true,
Config: map[string]string{
"aConfigValue": "this value is a Pythonic value",
},
Secrets: map[string]string{
"bEncryptedSecret": "this super Pythonic secret is encrypted",
},
OrderedConfig: []integration.ConfigValue{
{Key: "outer.inner", Value: "value", Path: true},
{Key: "names[0]", Value: "a", Path: true},
{Key: "names[1]", Value: "b", Path: true},
{Key: "names[2]", Value: "c", Path: true},
{Key: "names[3]", Value: "super secret name", Path: true, Secret: true},
{Key: "servers[0].port", Value: "80", Path: true},
{Key: "servers[0].host", Value: "example", Path: true},
{Key: "a.b[0].c", Value: "true", Path: true},
{Key: "a.b[1].c", Value: "false", Path: true},
{Key: "tokens[0]", Value: "shh", Path: true, Secret: true},
{Key: "foo.bar", Value: "don't tell", Path: true, Secret: true},
},
})
}
// Tests basic configuration from the perspective of a Pulumi Go program.
func TestConfigBasicGo(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("config_basic", "go"),
Dependencies: []string{
"github.com/pulumi/pulumi/sdk",
},
Quick: true,
Config: map[string]string{
"aConfigValue": "this value is a value",
},
Secrets: map[string]string{
"bEncryptedSecret": "this super secret is encrypted",
},
OrderedConfig: []integration.ConfigValue{
{Key: "outer.inner", Value: "value", Path: true},
{Key: "names[0]", Value: "a", Path: true},
{Key: "names[1]", Value: "b", Path: true},
{Key: "names[2]", Value: "c", Path: true},
{Key: "names[3]", Value: "super secret name", Path: true, Secret: true},
{Key: "servers[0].port", Value: "80", Path: true},
{Key: "servers[0].host", Value: "example", Path: true},
{Key: "a.b[0].c", Value: "true", Path: true},
{Key: "a.b[1].c", Value: "false", Path: true},
{Key: "tokens[0]", Value: "shh", Path: true, Secret: true},
{Key: "foo.bar", Value: "don't tell", Path: true, Secret: true},
},
})
}
// Tests basic configuration from the perspective of a Pulumi .NET program.
func TestConfigBasicDotNet(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("config_basic", "dotnet"),
Dependencies: []string{"Pulumi"},
Quick: true,
Config: map[string]string{
"aConfigValue": "this value is a value",
},
Secrets: map[string]string{
"bEncryptedSecret": "this super secret is encrypted",
},
OrderedConfig: []integration.ConfigValue{
{Key: "outer.inner", Value: "value", Path: true},
{Key: "names[0]", Value: "a", Path: true},
{Key: "names[1]", Value: "b", Path: true},
{Key: "names[2]", Value: "c", Path: true},
{Key: "names[3]", Value: "super secret name", Path: true, Secret: true},
{Key: "servers[0].port", Value: "80", Path: true},
{Key: "servers[0].host", Value: "example", Path: true},
{Key: "a.b[0].c", Value: "true", Path: true},
{Key: "a.b[1].c", Value: "false", Path: true},
{Key: "tokens[0]", Value: "shh", Path: true, Secret: true},
{Key: "foo.bar", Value: "don't tell", Path: true, Secret: true},
},
})
}
// Tests an explicit provider instance.
func TestExplicitProvider(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: "explicit_provider",
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
assert.NotNil(t, stackInfo.Deployment)
latest := stackInfo.Deployment
// Expect one stack resource, two provider resources, and two custom resources.
assert.True(t, len(latest.Resources) == 5)
var defaultProvider *apitype.ResourceV3
var explicitProvider *apitype.ResourceV3
for _, res := range latest.Resources {
urn := res.URN
switch urn.Name() {
case "default":
assert.True(t, providers.IsProviderType(res.Type))
assert.Nil(t, defaultProvider)
prov := res
defaultProvider = &prov
case "p":
assert.True(t, providers.IsProviderType(res.Type))
assert.Nil(t, explicitProvider)
prov := res
explicitProvider = &prov
case "a":
prov, err := providers.ParseReference(res.Provider)
assert.NoError(t, err)
assert.NotNil(t, defaultProvider)
defaultRef, err := providers.NewReference(defaultProvider.URN, defaultProvider.ID)
assert.NoError(t, err)
assert.Equal(t, defaultRef.String(), prov.String())
case "b":
prov, err := providers.ParseReference(res.Provider)
assert.NoError(t, err)
assert.NotNil(t, explicitProvider)
explicitRef, err := providers.NewReference(explicitProvider.URN, explicitProvider.ID)
assert.NoError(t, err)
assert.Equal(t, explicitRef.String(), prov.String())
}
}
assert.NotNil(t, defaultProvider)
assert.NotNil(t, explicitProvider)
},
})
}
// Tests that reads of unknown IDs do not fail.
func TestGetCreated(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: "get_created",
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
})
}
// Tests that stack references work in Node.
func TestStackReferenceNodeJS(t *testing.T) {
if runtime.GOOS == WindowsOS {
t.Skip("Temporarily skipping test on Windows - pulumi/pulumi#3811")
}
if owner := os.Getenv("PULUMI_TEST_OWNER"); owner == "" {
t.Skipf("Skipping: PULUMI_TEST_OWNER is not set")
}
opts := &integration.ProgramTestOptions{
Dir: filepath.Join("stack_reference", "nodejs"),
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
Config: map[string]string{
"org": os.Getenv("PULUMI_TEST_OWNER"),
},
EditDirs: []integration.EditDir{
{
Dir: "step1",
Additive: true,
},
{
Dir: "step2",
Additive: true,
},
},
}
integration.ProgramTest(t, opts)
}
func TestStackReferencePython(t *testing.T) {
if runtime.GOOS == WindowsOS {
t.Skip("Temporarily skipping test on Windows - pulumi/pulumi#3811")
}
if owner := os.Getenv("PULUMI_TEST_OWNER"); owner == "" {
t.Skipf("Skipping: PULUMI_TEST_OWNER is not set")
}
opts := &integration.ProgramTestOptions{
Dir: filepath.Join("stack_reference", "python"),
Dependencies: []string{
filepath.Join("..", "..", "sdk", "python", "env", "src"),
},
Quick: true,
Config: map[string]string{
"org": os.Getenv("PULUMI_TEST_OWNER"),
},
EditDirs: []integration.EditDir{
{
Dir: "step1",
Additive: true,
},
{
Dir: "step2",
Additive: true,
},
},
}
integration.ProgramTest(t, opts)
}
func TestMultiStackReferencePython(t *testing.T) {
if runtime.GOOS == WindowsOS {
t.Skip("Temporarily skipping test on Windows - pulumi/pulumi#3811")
}
if owner := os.Getenv("PULUMI_TEST_OWNER"); owner == "" {
t.Skipf("Skipping: PULUMI_TEST_OWNER is not set")
}
// build a stack with an export
exporterOpts := &integration.ProgramTestOptions{
Dir: filepath.Join("stack_reference_multi", "python", "exporter"),
Dependencies: []string{
filepath.Join("..", "..", "sdk", "python", "env", "src"),
},
Quick: true,
Config: map[string]string{
"org": os.Getenv("PULUMI_TEST_OWNER"),
},
NoParallel: true,
}
// we're going to manually initialize and then defer the deletion of this stack
exporterPt := integration.ProgramTestManualLifeCycle(t, exporterOpts)
exporterPt.TestFinished = false
err := exporterPt.TestLifeCyclePrepare()
assert.NoError(t, err)
err = exporterPt.TestLifeCycleInitialize()
assert.NoError(t, err)
defer func() {
destroyErr := exporterPt.TestLifeCycleDestroy()
assert.NoError(t, destroyErr)
exporterPt.TestFinished = true
exporterPt.TestCleanUp()
}()
err = exporterPt.TestPreviewUpdateAndEdits()
assert.NoError(t, err)
exporterStackName := exporterOpts.GetStackName().String()
importerOpts := &integration.ProgramTestOptions{
Dir: filepath.Join("stack_reference_multi", "python", "importer"),
Dependencies: []string{
filepath.Join("..", "..", "sdk", "python", "env", "src"),
},
Quick: true,
Config: map[string]string{
"org": os.Getenv("PULUMI_TEST_OWNER"),
"exporter_stack_name": exporterStackName,
},
NoParallel: true,
}
integration.ProgramTest(t, importerOpts)
}
// Tests that stack references work in .NET.
func TestStackReferenceDotnet(t *testing.T) {
if runtime.GOOS == WindowsOS {
t.Skip("Temporarily skipping test on Windows - pulumi/pulumi#3811")
}
if owner := os.Getenv("PULUMI_TEST_OWNER"); owner == "" {
t.Skipf("Skipping: PULUMI_TEST_OWNER is not set")
}
opts := &integration.ProgramTestOptions{
Dir: filepath.Join("stack_reference", "dotnet"),
Dependencies: []string{"Pulumi"},
Quick: true,
Config: map[string]string{
"org": os.Getenv("PULUMI_TEST_OWNER"),
},
EditDirs: []integration.EditDir{
{
Dir: "step1",
Additive: true,
},
{
Dir: "step2",
Additive: true,
},
},
}
integration.ProgramTest(t, opts)
}
// Tests that stack references work in Go.
func TestStackReferenceGo(t *testing.T) {
if runtime.GOOS == WindowsOS {
t.Skip("Temporarily skipping test on Windows - pulumi/pulumi#3811")
}
if owner := os.Getenv("PULUMI_TEST_OWNER"); owner == "" {
t.Skipf("Skipping: PULUMI_TEST_OWNER is not set")
}
opts := &integration.ProgramTestOptions{
Dir: filepath.Join("stack_reference", "go"),
Dependencies: []string{
"github.com/pulumi/pulumi/sdk",
},
Quick: true,
Config: map[string]string{
"org": os.Getenv("PULUMI_TEST_OWNER"),
},
EditDirs: []integration.EditDir{
{
Dir: "step1",
Additive: true,
},
{
Dir: "step2",
Additive: true,
},
},
}
integration.ProgramTest(t, opts)
}
// Tests that we issue an error if we fail to locate the Python command when running
// a Python example.
func TestPython3NotInstalled(t *testing.T) {
stderr := &bytes.Buffer{}
badPython := "python3000"
expectedError := fmt.Sprintf(
"error: Failed to locate any of %q on your PATH. Have you installed Python 3.6 or greater?",
[]string{badPython})
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("empty", "python"),
Dependencies: []string{
filepath.Join("..", "..", "sdk", "python", "env", "src"),
},
Quick: true,
Env: []string{
// Note: we use PULUMI_PYTHON_CMD to override the default behavior of searching
// for Python 3, since anyone running tests surely already has Python 3 installed on their
// machine. The code paths are functionally the same.
fmt.Sprintf("PULUMI_PYTHON_CMD=%s", badPython),
},
ExpectFailure: true,
Stderr: stderr,
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
output := stderr.String()
assert.Contains(t, output, expectedError)
},
})
}
// TestProviderSecretConfig that a first class provider can be created when it has secrets as part of its config.
func TestProviderSecretConfig(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: "provider_secret_config",
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
})
}
// Tests dynamic provider in Python.
func TestDynamicPython(t *testing.T) {
var randomVal string
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("dynamic", "python"),
Dependencies: []string{
filepath.Join("..", "..", "sdk", "python", "env", "src"),
},
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
randomVal = stack.Outputs["random_val"].(string)
},
EditDirs: []integration.EditDir{{
Dir: "step1",
Additive: true,
ExtraRuntimeValidation: func(t *testing.T, stack integration.RuntimeValidationStackInfo) {
assert.Equal(t, randomVal, stack.Outputs["random_val"].(string))
},
}},
})
}
func TestResourceWithSecretSerialization(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: "secret_outputs",
Dependencies: []string{"@pulumi/pulumi"},
Quick: true,
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
// The program exports two resources, one named `withSecret` who's prefix property should be secret
// and one named `withoutSecret` which should not. We serialize both of the these as POJO objects, so
// they appear as maps in the output.
withSecretProps, ok := stackInfo.Outputs["withSecret"].(map[string]interface{})
assert.Truef(t, ok, "POJO output was not serialized as a map")
withoutSecretProps, ok := stackInfo.Outputs["withoutSecret"].(map[string]interface{})
assert.Truef(t, ok, "POJO output was not serialized as a map")
// The secret prop should have been serialized as a secret
secretPropValue, ok := withSecretProps["prefix"].(map[string]interface{})
assert.Truef(t, ok, "secret output was not serialized as a secret")
assert.Equal(t, resource.SecretSig, secretPropValue[resource.SigKey].(string))
// And here, the prop was not set, it should just be a string value
_, isString := withoutSecretProps["prefix"].(string)
assert.Truef(t, isString, "non-secret output was not a string")
},
})
}
func TestStackReferenceSecretsNodejs(t *testing.T) {
if runtime.GOOS == WindowsOS {
t.Skip("Temporarily skipping test on Windows - pulumi/pulumi#3811")
}
owner := os.Getenv("PULUMI_TEST_OWNER")
if owner == "" {
t.Skipf("Skipping: PULUMI_TEST_OWNER is not set")
}
d := "stack_reference_secrets"
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join(d, "nodejs", "step1"),
Dependencies: []string{"@pulumi/pulumi"},
Config: map[string]string{
"org": owner,
},
Quick: true,
EditDirs: []integration.EditDir{
{
Dir: filepath.Join(d, "nodejs", "step2"),
Additive: true,
ExpectNoChanges: true,
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
_, isString := stackInfo.Outputs["refNormal"].(string)
assert.Truef(t, isString, "referenced non-secret output was not a string")
secretPropValue, ok := stackInfo.Outputs["refSecret"].(map[string]interface{})
assert.Truef(t, ok, "secret output was not serialized as a secret")
assert.Equal(t, resource.SecretSig, secretPropValue[resource.SigKey].(string))
},
},
},
})
}
func TestStackReferenceSecretsDotnet(t *testing.T) {
if runtime.GOOS == WindowsOS {
t.Skip("Temporarily skipping test on Windows - pulumi/pulumi#3811")
}
owner := os.Getenv("PULUMI_TEST_OWNER")
if owner == "" {
t.Skipf("Skipping: PULUMI_TEST_OWNER is not set")
}
d := "stack_reference_secrets"
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join(d, "dotnet", "step1"),
Dependencies: []string{"Pulumi"},
Config: map[string]string{
"org": owner,
},
Quick: true,
EditDirs: []integration.EditDir{
{
Dir: filepath.Join(d, "dotnet", "step2"),
Additive: true,
ExpectNoChanges: true,
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
_, isString := stackInfo.Outputs["refNormal"].(string)
assert.Truef(t, isString, "referenced non-secret output was not a string")
secretPropValue, ok := stackInfo.Outputs["refSecret"].(map[string]interface{})
assert.Truef(t, ok, "secret output was not serialized as a secret")
assert.Equal(t, resource.SecretSig, secretPropValue[resource.SigKey].(string))
},
},
},
})
}
func TestCloudSecretProvider(t *testing.T) {
kmsKeyAlias := os.Getenv("PULUMI_TEST_KMS_KEY_ALIAS")
if kmsKeyAlias == "" {
t.Skipf("Skipping: PULUMI_TEST_KMS_KEY_ALIAS is not set")
}
testOptions := integration.ProgramTestOptions{
Dir: "cloud_secrets_provider",
Dependencies: []string{"@pulumi/pulumi"},
SecretsProvider: fmt.Sprintf("awskms://alias/%s", kmsKeyAlias),
Secrets: map[string]string{
"mysecret": "THISISASECRET",
},
ExtraRuntimeValidation: func(t *testing.T, stackInfo integration.RuntimeValidationStackInfo) {
secretsProvider := stackInfo.Deployment.SecretsProviders
assert.NotNil(t, secretsProvider)
assert.Equal(t, secretsProvider.Type, "cloud")
_, err := cloud.NewCloudSecretsManagerFromState(secretsProvider.State)
assert.NoError(t, err)
out, ok := stackInfo.Outputs["out"].(map[string]interface{})
assert.True(t, ok)
_, ok = out["ciphertext"]
assert.True(t, ok)
},
}
localTestOptions := testOptions.With(integration.ProgramTestOptions{
CloudURL: "file://~",
})
// Run with default Pulumi service backend
t.Run("service", func(t *testing.T) { integration.ProgramTest(t, &testOptions) })
// Also run with local backend
t.Run("local", func(t *testing.T) { integration.ProgramTest(t, &localTestOptions) })
}
func TestPartialValuesNode(t *testing.T) {
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("partial_values", "nodejs"),
Dependencies: []string{"@pulumi/pulumi"},
AllowEmptyPreviewChanges: true,
})
}
func TestPartialValuesPython(t *testing.T) {
if runtime.GOOS == WindowsOS {
t.Skip("Temporarily skipping test on Windows - pulumi/pulumi#3811")
}
integration.ProgramTest(t, &integration.ProgramTestOptions{
Dir: filepath.Join("partial_values", "python"),
Dependencies: []string{
filepath.Join("..", "..", "sdk", "python", "env", "src"),
},
AllowEmptyPreviewChanges: true,
})
}
|
[
"\"PULUMI_ACCESS_TOKEN\"",
"\"PULUMI_TEST_OWNER\"",
"\"PULUMI_TEST_OWNER\"",
"\"PULUMI_TEST_OWNER\"",
"\"PULUMI_TEST_OWNER\"",
"\"PULUMI_TEST_OWNER\"",
"\"PULUMI_TEST_OWNER\"",
"\"PULUMI_TEST_OWNER\"",
"\"PULUMI_TEST_OWNER\"",
"\"PULUMI_TEST_OWNER\"",
"\"PULUMI_TEST_OWNER\"",
"\"PULUMI_TEST_OWNER\"",
"\"PULUMI_TEST_OWNER\"",
"\"PULUMI_TEST_OWNER\"",
"\"PULUMI_TEST_KMS_KEY_ALIAS\""
] |
[] |
[
"PULUMI_ACCESS_TOKEN",
"PULUMI_TEST_OWNER",
"PULUMI_TEST_KMS_KEY_ALIAS"
] |
[]
|
["PULUMI_ACCESS_TOKEN", "PULUMI_TEST_OWNER", "PULUMI_TEST_KMS_KEY_ALIAS"]
|
go
| 3 | 0 | |
sd/etcdv3/integration_test.go
|
// +build flaky_integration
package etcdv3
import (
"context"
"io"
"os"
"testing"
"time"
"github.com/jjggzz/kit/endpoint"
"github.com/jjggzz/kit/log"
"github.com/jjggzz/kit/sd"
)
func runIntegration(settings integrationSettings, client Client, service Service, t *testing.T) {
// Verify test data is initially empty.
entries, err := client.GetEntries(settings.key)
if err != nil {
t.Fatalf("GetEntries(%q): expected no error, got one: %v", settings.key, err)
}
if len(entries) > 0 {
t.Fatalf("GetEntries(%q): expected no instance entries, got %d", settings.key, len(entries))
}
t.Logf("GetEntries(%q): %v (OK)", settings.key, entries)
// Instantiate a new Registrar, passing in test data.
registrar := NewRegistrar(
client,
service,
log.With(log.NewLogfmtLogger(os.Stderr), "component", "registrar"),
)
// Register our instance.
registrar.Register()
t.Log("Registered")
// Retrieve entries from etcd manually.
entries, err = client.GetEntries(settings.key)
if err != nil {
t.Fatalf("client.GetEntries(%q): %v", settings.key, err)
}
if want, have := 1, len(entries); want != have {
t.Fatalf("client.GetEntries(%q): want %d, have %d", settings.key, want, have)
}
if want, have := settings.value, entries[0]; want != have {
t.Fatalf("want %q, have %q", want, have)
}
instancer, err := NewInstancer(
client,
settings.prefix,
log.With(log.NewLogfmtLogger(os.Stderr), "component", "instancer"),
)
if err != nil {
t.Fatalf("NewInstancer: %v", err)
}
t.Log("Constructed Instancer OK")
defer instancer.Stop()
endpointer := sd.NewEndpointer(
instancer,
func(string) (endpoint.Endpoint, io.Closer, error) { return endpoint.Nop, nil, nil },
log.With(log.NewLogfmtLogger(os.Stderr), "component", "instancer"),
)
t.Log("Constructed Endpointer OK")
defer endpointer.Close()
if !within(time.Second, func() bool {
endpoints, err := endpointer.Endpoints()
return err == nil && len(endpoints) == 1
}) {
t.Fatal("Endpointer didn't see Register in time")
}
t.Log("Endpointer saw Register OK")
// Deregister first instance of test data.
registrar.Deregister()
t.Log("Deregistered")
// Check it was deregistered.
if !within(time.Second, func() bool {
endpoints, err := endpointer.Endpoints()
t.Logf("Checking Deregister: len(endpoints) = %d, err = %v", len(endpoints), err)
return err == nil && len(endpoints) == 0
}) {
t.Fatalf("Endpointer didn't see Deregister in time")
}
// Verify test data no longer exists in etcd.
entries, err = client.GetEntries(settings.key)
if err != nil {
t.Fatalf("GetEntries(%q): expected no error, got one: %v", settings.key, err)
}
if len(entries) > 0 {
t.Fatalf("GetEntries(%q): expected no entries, got %v", settings.key, entries)
}
t.Logf("GetEntries(%q): %v (OK)", settings.key, entries)
}
type integrationSettings struct {
addr string
prefix string
instance string
key string
value string
}
func testIntegrationSettings(t *testing.T) integrationSettings {
var settings integrationSettings
settings.addr = os.Getenv("ETCD_ADDR")
if settings.addr == "" {
t.Skip("ETCD_ADDR not set; skipping integration test")
}
settings.prefix = "/services/foosvc/" // known at compile time
settings.instance = "1.2.3.4:8080" // taken from runtime or platform, somehow
settings.key = settings.prefix + settings.instance
settings.value = "http://" + settings.instance // based on our transport
return settings
}
// Package sd/etcd provides a wrapper around the etcd key/value store. This
// example assumes the user has an instance of etcd installed and running
// locally on port 2379.
func TestIntegration(t *testing.T) {
settings := testIntegrationSettings(t)
client, err := NewClient(context.Background(), []string{settings.addr}, ClientOptions{
DialTimeout: 2 * time.Second,
DialKeepAlive: 2 * time.Second,
})
if err != nil {
t.Fatalf("NewClient(%q): %v", settings.addr, err)
}
service := Service{
Key: settings.key,
Value: settings.value,
}
runIntegration(settings, client, service, t)
}
func TestIntegrationTTL(t *testing.T) {
settings := testIntegrationSettings(t)
client, err := NewClient(context.Background(), []string{settings.addr}, ClientOptions{
DialTimeout: 2 * time.Second,
DialKeepAlive: 2 * time.Second,
})
if err != nil {
t.Fatalf("NewClient(%q): %v", settings.addr, err)
}
service := Service{
Key: settings.key,
Value: settings.value,
TTL: NewTTLOption(time.Second*3, time.Second*10),
}
defer client.Deregister(service)
runIntegration(settings, client, service, t)
}
func TestIntegrationRegistrarOnly(t *testing.T) {
settings := testIntegrationSettings(t)
client, err := NewClient(context.Background(), []string{settings.addr}, ClientOptions{
DialTimeout: 2 * time.Second,
DialKeepAlive: 2 * time.Second,
})
if err != nil {
t.Fatalf("NewClient(%q): %v", settings.addr, err)
}
service := Service{
Key: settings.key,
Value: settings.value,
TTL: NewTTLOption(time.Second*3, time.Second*10),
}
defer client.Deregister(service)
// Verify test data is initially empty.
entries, err := client.GetEntries(settings.key)
if err != nil {
t.Fatalf("GetEntries(%q): expected no error, got one: %v", settings.key, err)
}
if len(entries) > 0 {
t.Fatalf("GetEntries(%q): expected no instance entries, got %d", settings.key, len(entries))
}
t.Logf("GetEntries(%q): %v (OK)", settings.key, entries)
// Instantiate a new Registrar, passing in test data.
registrar := NewRegistrar(
client,
service,
log.With(log.NewLogfmtLogger(os.Stderr), "component", "registrar"),
)
// Register our instance.
registrar.Register()
t.Log("Registered")
// Deregister our instance. (so we test registrar only scenario)
registrar.Deregister()
t.Log("Deregistered")
}
func within(d time.Duration, f func() bool) bool {
deadline := time.Now().Add(d)
for time.Now().Before(deadline) {
if f() {
return true
}
time.Sleep(d / 10)
}
return false
}
|
[
"\"ETCD_ADDR\""
] |
[] |
[
"ETCD_ADDR"
] |
[]
|
["ETCD_ADDR"]
|
go
| 1 | 0 | |
demo/goroutine_channel_demo/06/main.go
|
package main
import (
"fmt"
"time"
)
// goroutine 中出现panic解决
func main() {
go func1()
go func2() // 如果在主线程中其中一个goroutine方法报错,其它协程都不会执行
// 解决方法:defer + recover捕获异常
time.Sleep(time.Second)
}
func func1() {
for i := 0; i < 5; i++ {
time.Sleep(time.Millisecond * 50)
fmt.Println("hello word")
}
}
func func2() {
defer func() {
if error := recover(); error != nil {
fmt.Println("func2 发生错误", error)
}
}()
// map1 := make(map[string]string)
var map1 map[string]string // 故意报错
map1["李四"] = "喝酒"
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
test_functools.py
|
import os
import itertools
import time
import copy
import random
import functools
import platform
from unittest import mock
import pytest
from jaraco.classes import properties
from jaraco.functools import Throttler, method_cache, retry_call, retry
class TestThrottler:
@pytest.mark.xfail(
os.environ.get('GITHUB_ACTIONS') # type: ignore
and platform.system() == 'Darwin',
reason="Performance is heavily throttled on Github Actions Mac runs",
)
def test_function_throttled(self):
"""
Ensure the throttler actually throttles calls.
"""
# set up a function to be called
counter = itertools.count()
# set up a version of `next` that is only called 30 times per second
limited_next = Throttler(next, 30)
# for one second, call next as fast as possible
deadline = time.time() + 1
while time.time() < deadline:
limited_next(counter)
# ensure the counter was advanced about 30 times
assert 28 <= next(counter) <= 32
# ensure that another burst of calls after some idle period will also
# get throttled
time.sleep(1)
deadline = time.time() + 1
counter = itertools.count()
while time.time() < deadline:
limited_next(counter)
assert 28 <= next(counter) <= 32
def test_reconstruct_unwraps(self):
"""
The throttler should be re-usable - if one wants to throttle a
function that's aready throttled, the original function should be
used.
"""
wrapped = Throttler(next, 30)
wrapped_again = Throttler(wrapped, 60)
assert wrapped_again.func is next
assert wrapped_again.max_rate == 60
def test_throttled_method(self):
class ThrottledMethodClass:
@Throttler
def echo(self, arg):
return arg
tmc = ThrottledMethodClass()
assert tmc.echo('foo') == 'foo'
class TestMethodCache:
bad_vers = '(3, 5, 0) <= sys.version_info < (3, 5, 2)'
@pytest.mark.skipif(bad_vers, reason="https://bugs.python.org/issue25447")
def test_deepcopy(self):
"""
A deepcopy of an object with a method cache should still
succeed.
"""
class ClassUnderTest:
calls = 0
@method_cache
def method(self, value):
self.calls += 1
return value
ob = ClassUnderTest()
copy.deepcopy(ob)
ob.method(1)
copy.deepcopy(ob)
def test_special_methods(self):
"""
Test method_cache with __getitem__ and __getattr__.
"""
class ClassUnderTest:
getitem_calls = 0
getattr_calls = 0
@method_cache
def __getitem__(self, item):
self.getitem_calls += 1
return item
@method_cache
def __getattr__(self, name):
self.getattr_calls += 1
return name
ob = ClassUnderTest()
# __getitem__
ob[1] + ob[1]
assert ob.getitem_calls == 1
# __getattr__
ob.one + ob.one
assert ob.getattr_calls == 1
@pytest.mark.xfail(reason="can't replace property with cache; #6")
def test_property(self):
"""
Can a method_cache decorated method also be a property?
"""
class ClassUnderTest:
@property
@method_cache
def mything(self): # pragma: nocover
return random.random()
ob = ClassUnderTest()
assert ob.mything == ob.mything
@pytest.mark.xfail(reason="can't replace property with cache; #6")
def test_non_data_property(self):
"""
A non-data property also does not work because the property
gets replaced with a method.
"""
class ClassUnderTest:
@properties.NonDataProperty
@method_cache
def mything(self):
return random.random()
ob = ClassUnderTest()
assert ob.mything == ob.mything
class TestRetry:
def attempt(self, arg=None):
if next(self.fails_left):
raise ValueError("Failed!")
if arg:
arg.touch()
return "Success"
def set_to_fail(self, times):
self.fails_left = itertools.count(times, -1)
def test_set_to_fail(self):
"""
Test this test's internal failure mechanism.
"""
self.set_to_fail(times=2)
with pytest.raises(ValueError):
self.attempt()
with pytest.raises(ValueError):
self.attempt()
assert self.attempt() == 'Success'
def test_retry_call_succeeds(self):
self.set_to_fail(times=2)
res = retry_call(self.attempt, retries=2, trap=ValueError)
assert res == "Success"
def test_retry_call_fails(self):
"""
Failing more than the number of retries should
raise the underlying error.
"""
self.set_to_fail(times=3)
with pytest.raises(ValueError) as res:
retry_call(self.attempt, retries=2, trap=ValueError)
assert str(res.value) == 'Failed!'
def test_retry_multiple_exceptions(self):
self.set_to_fail(times=2)
errors = ValueError, NameError
res = retry_call(self.attempt, retries=2, trap=errors)
assert res == "Success"
def test_retry_exception_superclass(self):
self.set_to_fail(times=2)
res = retry_call(self.attempt, retries=2, trap=Exception)
assert res == "Success"
def test_default_traps_nothing(self):
self.set_to_fail(times=1)
with pytest.raises(ValueError):
retry_call(self.attempt, retries=1)
def test_default_does_not_retry(self):
self.set_to_fail(times=1)
with pytest.raises(ValueError):
retry_call(self.attempt, trap=Exception)
def test_cleanup_called_on_exception(self):
calls = random.randint(1, 10)
cleanup = mock.Mock()
self.set_to_fail(times=calls)
retry_call(self.attempt, retries=calls, cleanup=cleanup, trap=Exception)
assert cleanup.call_count == calls
assert cleanup.called_with()
def test_infinite_retries(self):
self.set_to_fail(times=999)
cleanup = mock.Mock()
retry_call(self.attempt, retries=float('inf'), cleanup=cleanup, trap=Exception)
assert cleanup.call_count == 999
def test_with_arg(self):
self.set_to_fail(times=0)
arg = mock.Mock()
bound = functools.partial(self.attempt, arg)
res = retry_call(bound)
assert res == 'Success'
assert arg.touch.called
def test_decorator(self):
self.set_to_fail(times=1)
attempt = retry(retries=1, trap=Exception)(self.attempt)
res = attempt()
assert res == "Success"
def test_decorator_with_arg(self):
self.set_to_fail(times=0)
attempt = retry()(self.attempt)
arg = mock.Mock()
res = attempt(arg)
assert res == 'Success'
assert arg.touch.called
|
[] |
[] |
[
"GITHUB_ACTIONS"
] |
[]
|
["GITHUB_ACTIONS"]
|
python
| 1 | 0 | |
backend/manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_34001.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
examples/helloworld/src/main/java/example/HelloWorld.java
|
/*
* Copyright 2018 Google LLC. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package example;
import com.google.common.io.Resources;
import java.io.IOException;
import java.net.URISyntaxException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
public class HelloWorld {
public static void main(String[] args) throws URISyntaxException, IOException {
Path worldFile = Paths.get(Resources.getResource("world").toURI());
String world = new String(Files.readAllBytes(worldFile), StandardCharsets.UTF_8);
System.out.println("Hello " + world);
System.out.println(System.getenv());
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
examples/whitelabel/whitelabel.go
|
package main
import (
"fmt"
"log"
"os"
"github.com/sendgrid/sendgrid-go/v3"
)
// Createadomainwhitelabel : Create a domain whitelabel.
// POST /whitelabel/domains
func Createadomainwhitelabel() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/domains", host)
request.Method = "POST"
request.Body = []byte(` {
"automatic_security": false,
"custom_spf": true,
"default": true,
"domain": "example.com",
"ips": [
"192.168.1.1",
"192.168.1.2"
],
"subdomain": "news",
"username": "[email protected]"
}`)
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// Listalldomainwhitelabels : List all domain whitelabels.
// GET /whitelabel/domains
func Listalldomainwhitelabels() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/domains", host)
request.Method = "GET"
queryParams := make(map[string]string)
queryParams["username"] = "test_string"
queryParams["domain"] = "test_string"
queryParams["exclude_subusers"] = "true"
queryParams["limit"] = "1"
queryParams["offset"] = "1"
request.QueryParams = queryParams
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// Getthedefaultdomainwhitelabel : Get the default domain whitelabel.
// GET /whitelabel/domains/default
func Getthedefaultdomainwhitelabel() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/domains/default", host)
request.Method = "GET"
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// Listthedomainwhitelabelassociatedwiththegivenuser : List the domain whitelabel associated with the given user.
// GET /whitelabel/domains/subuser
func Listthedomainwhitelabelassociatedwiththegivenuser() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/domains/subuser", host)
request.Method = "GET"
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// Disassociateadomainwhitelabelfromagivenuser : Disassociate a domain whitelabel from a given user.
// DELETE /whitelabel/domains/subuser
func Disassociateadomainwhitelabelfromagivenuser() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/domains/subuser", host)
request.Method = "DELETE"
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// Updateadomainwhitelabel : Update a domain whitelabel.
// PATCH /whitelabel/domains/{domain_id}
func Updateadomainwhitelabel() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/domains/{domain_id}", host)
request.Method = "PATCH"
request.Body = []byte(` {
"custom_spf": true,
"default": false
}`)
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// Retrieveadomainwhitelabel : Retrieve a domain whitelabel.
// GET /whitelabel/domains/{domain_id}
func Retrieveadomainwhitelabel() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/domains/{domain_id}", host)
request.Method = "GET"
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// Deleteadomainwhitelabel : Delete a domain whitelabel.
// DELETE /whitelabel/domains/{domain_id}
func Deleteadomainwhitelabel() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/domains/{domain_id}", host)
request.Method = "DELETE"
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// Associateadomainwhitelabelwithagivenuser : Associate a domain whitelabel with a given user.
// POST /whitelabel/domains/{domain_id}/subuser
func Associateadomainwhitelabelwithagivenuser() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/domains/{domain_id}/subuser", host)
request.Method = "POST"
request.Body = []byte(` {
"username": "[email protected]"
}`)
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// AddanIPtoadomainwhitelabel : Add an IP to a domain whitelabel.
// POST /whitelabel/domains/{id}/ips
func AddanIPtoadomainwhitelabel() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/domains/{id}/ips", host)
request.Method = "POST"
request.Body = []byte(` {
"ip": "192.168.0.1"
}`)
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// RemoveanIPfromadomainwhitelabel : Remove an IP from a domain whitelabel.
// DELETE /whitelabel/domains/{id}/ips/{ip}
func RemoveanIPfromadomainwhitelabel() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/domains/{id}/ips/{ip}", host)
request.Method = "DELETE"
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// Validateadomainwhitelabel : Validate a domain whitelabel.
// POST /whitelabel/domains/{id}/validate
func Validateadomainwhitelabel() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/domains/{id}/validate", host)
request.Method = "POST"
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// CreateanIPwhitelabel : Create an IP whitelabel
// POST /whitelabel/ips
func CreateanIPwhitelabel() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/ips", host)
request.Method = "POST"
request.Body = []byte(` {
"domain": "example.com",
"ip": "192.168.1.1",
"subdomain": "email"
}`)
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// RetrieveallIPwhitelabels : Retrieve all IP whitelabels
// GET /whitelabel/ips
func RetrieveallIPwhitelabels() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/ips", host)
request.Method = "GET"
queryParams := make(map[string]string)
queryParams["ip"] = "test_string"
queryParams["limit"] = "1"
queryParams["offset"] = "1"
request.QueryParams = queryParams
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// RetrieveanIPwhitelabel : Retrieve an IP whitelabel
// GET /whitelabel/ips/{id}
func RetrieveanIPwhitelabel() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/ips/{id}", host)
request.Method = "GET"
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// DeleteanIPwhitelabel : Delete an IP whitelabel
// DELETE /whitelabel/ips/{id}
func DeleteanIPwhitelabel() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/ips/{id}", host)
request.Method = "DELETE"
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// ValidateanIPwhitelabel : Validate an IP whitelabel
// POST /whitelabel/ips/{id}/validate
func ValidateanIPwhitelabel() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/ips/{id}/validate", host)
request.Method = "POST"
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// CreateaLinkWhitelabel : Create a Link Whitelabel
// POST /whitelabel/links
func CreateaLinkWhitelabel() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/links", host)
request.Method = "POST"
request.Body = []byte(` {
"default": true,
"domain": "example.com",
"subdomain": "mail"
}`)
queryParams := make(map[string]string)
queryParams["limit"] = "1"
queryParams["offset"] = "1"
request.QueryParams = queryParams
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// Retrievealllinkwhitelabels : Retrieve all link whitelabels
// GET /whitelabel/links
func Retrievealllinkwhitelabels() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/links", host)
request.Method = "GET"
queryParams := make(map[string]string)
queryParams["limit"] = "1"
request.QueryParams = queryParams
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// RetrieveaDefaultLinkWhitelabel : Retrieve a Default Link Whitelabel
// GET /whitelabel/links/default
func RetrieveaDefaultLinkWhitelabel() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/links/default", host)
request.Method = "GET"
queryParams := make(map[string]string)
queryParams["domain"] = "test_string"
request.QueryParams = queryParams
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// RetrieveAssociatedLinkWhitelabel : Retrieve Associated Link Whitelabel
// GET /whitelabel/links/subuser
func RetrieveAssociatedLinkWhitelabel() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/links/subuser", host)
request.Method = "GET"
queryParams := make(map[string]string)
queryParams["username"] = "test_string"
request.QueryParams = queryParams
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// DisassociateaLinkWhitelabel : Disassociate a Link Whitelabel
// DELETE /whitelabel/links/subuser
func DisassociateaLinkWhitelabel() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/links/subuser", host)
request.Method = "DELETE"
queryParams := make(map[string]string)
queryParams["username"] = "test_string"
request.QueryParams = queryParams
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// UpdateaLinkWhitelabel : Update a Link Whitelabel
// PATCH /whitelabel/links/{id}
func UpdateaLinkWhitelabel() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/links/{id}", host)
request.Method = "PATCH"
request.Body = []byte(` {
"default": true
}`)
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// RetrieveaLinkWhitelabel : Retrieve a Link Whitelabel
// GET /whitelabel/links/{id}
func RetrieveaLinkWhitelabel() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/links/{id}", host)
request.Method = "GET"
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// DeleteaLinkWhitelabel : Delete a Link Whitelabel
// DELETE /whitelabel/links/{id}
func DeleteaLinkWhitelabel() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/links/{id}", host)
request.Method = "DELETE"
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// ValidateaLinkWhitelabel : Validate a Link Whitelabel
// POST /whitelabel/links/{id}/validate
func ValidateaLinkWhitelabel() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/links/{id}/validate", host)
request.Method = "POST"
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
// AssociateaLinkWhitelabel : Associate a Link Whitelabel
// POST /whitelabel/links/{link_id}/subuser
func AssociateaLinkWhitelabel() {
apiKey := os.Getenv("SENDGRID_API_KEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/whitelabel/links/{link_id}/subuser", host)
request.Method = "POST"
request.Body = []byte(` {
"username": "[email protected]"
}`)
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
func main() {
// add your function calls here
}
|
[
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\"",
"\"SENDGRID_API_KEY\""
] |
[] |
[
"SENDGRID_API_KEY"
] |
[]
|
["SENDGRID_API_KEY"]
|
go
| 1 | 0 | |
Instagram/wsgi.py
|
"""
WSGI config for Instagram project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Instagram.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
web/impact/manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE',
'impact.settings')
from configurations.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
qa/rpc-tests/util.py
|
# Copyright (c) 2014 The Bitcoin Core developers
# Copyright (c) 2014-2015 The Dash developers
# Copyright (c) 2019 The Vzuh developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(1)
def sync_mempools(rpc_connections):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(1)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "vzuh.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
vzuhd and vzuh-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run vzuhd:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("BITCOIND", "vzuhd"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
subprocess.check_call([ os.getenv("BITCOINCLI", "vzuh-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:[email protected]:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].setgenerate(True, 1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in vzuh.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None):
"""
Start a vzuhd and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
args = [ os.getenv("BITCOIND", "vzuhd"), "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
subprocess.check_call([ os.getenv("BITCOINCLI", "vzuh-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
proxy = AuthServiceProxy(url)
proxy.url = url # store URL on proxy for info
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None):
"""
Start multiple vzuhds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using it's output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
|
[] |
[] |
[
"BITCOINCLI",
"BITCOIND"
] |
[]
|
["BITCOINCLI", "BITCOIND"]
|
python
| 2 | 0 | |
BioClients/fda/aer/Client.py
|
#!/usr/bin/env python3
"""
OpenFDA Adverse Event Reports REST API client.
https://open.fda.gov/apis/
"""
###
import sys,os,re,json,argparse,time,yaml,logging
from ... import fda
#
#############################################################################
def ReadParamFile(fparam):
params={};
with open(fparam, 'r') as fh:
for param in yaml.load_all(fh, Loader=yaml.BaseLoader):
for k,v in param.items():
params[k] = v
return params
##############################################################################
if __name__=='__main__':
NMAX=100;
epilog='''\
Example UNII: 786Z46389E
'''
parser = argparse.ArgumentParser(description='OpenFDA Adverse Event Reports client', epilog=epilog)
ops = ['search', 'counts', 'info', 'showfields']
parser.add_argument("op", choices=ops, help='operation')
parser.add_argument("--o", dest="ofile", help="output (TSV)")
parser.add_argument("--drug_class", help="EPC pharmacologic class")
parser.add_argument("--drug_ind", help="drug indication")
parser.add_argument("--drug_unii", help="drug ID UNII")
parser.add_argument("--drug_ndc", help="drug ID NDC")
parser.add_argument("--drug_spl", help="drug ID SPL")
parser.add_argument("--serious", type=bool, help="serious adverse events")
parser.add_argument("--fatal", type=bool, help="fatal adverse events (seriousnessdeath)")
parser.add_argument("--tfrom", help="time-from (received by FDA) (YYYYMMDD)")
parser.add_argument("--tto", default=time.strftime('%Y%m%d',time.localtime()), help="time-to (received by FDA) (YYYYMMDD)")
parser.add_argument("--rawquery")
parser.add_argument("--nmax", type=int, default=NMAX, help="max returned records")
parser.add_argument("--api_host", default=fda.aer.API_HOST)
parser.add_argument("--api_base_path", default=fda.aer.API_BASE_PATH)
parser.add_argument("--param_file", default=os.environ['HOME']+"/.fda.yaml")
parser.add_argument("--api_key")
parser.add_argument("-v", "--verbose", default=0, action="count")
args = parser.parse_args()
logging.basicConfig(format='%(levelname)s:%(message)s', level=(logging.DEBUG if args.verbose>1 else logging.INFO))
api_base_url = 'https://'+args.api_host+args.api_base_path
fout = open(args.ofile, "w+") if args.ofile else sys.stdout
params = ReadParamFile(args.param_file)
if args.api_key: params['API_KEY'] = args.api_key
if not params['API_KEY']:
parser.error('Please specify valid API_KEY via --api_key or --param_file')
t0=time.time()
if args.op == "search":
fda.aer.Utils.Search(args.drug_class, args.drug_ind, args.drug_unii, args.drug_ndc, args.drug_spl, args.tfrom, args.tto, args.serious, args.fatal, args.rawquery, args.nmax, params['API_KEY'], api_base_url, fout)
elif args.op == "info":
rval = fda.aer.Utils.Info(api_base_url)
for field in rval.keys():
if field=='results': continue
print(f"{field:16s}: {rval[field]}")
elif args.op == "counts":
print(fda.aer.Utils.GetCounts(args.tfrom, args.tto, api_base_url))
elif args.op == "showfields":
fields = fda.aer.Utils.GetFields(api_base_url)
for field in fields:
print(f"\t{field}")
else:
parser.error(f"Invalid operation: {args.op}")
logging.info(f"Elapsed time: {time.strftime('%Hh:%Mm:%Ss',time.gmtime(time.time()-t0))}")
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
src/tools/fuse/src/elektra_fuse/elektra_util.py
|
from pathlib import Path
import os, re, errno, sys, subprocess
import kdb
elektra_namespaces = ["user:", "system:", "dir:", "spec:", "cascading:", "proc:"]
parent_key = None # not yet set
dir_file_special_name = "®elektra.value"
xattr_kdb_file = "®elektra.file"
#translates from filesystem (that are below the "pid"-level) paths to elektra paths (e.g. '/user:/dir/@elektra.value' -> 'user:/dir', '/cascading:/key' -> '/key', assuming parent_key == "/")
def os_path_to_elektra_path(os_path):
#inject parent_key after namespace into os_path
namespace, *rest = Path(os_path).parts[1:]
os_path = str(Path(namespace, parent_key.name[1:], *rest))
elektra_path = os_path
if Path(elektra_path).name == dir_file_special_name:
elektra_path = str(Path(elektra_path).parent).strip("/")
if re.match("^cascading:|^/cascading:", elektra_path):
elektra_path = re.sub("^cascading:|^/cascading:", "", elektra_path)
if elektra_path == "":
elektra_path = "/"
else:
elektra_path = elektra_path.strip("/") #remove slashes ('/' is reserved for the cascading namespace)
if elektra_path.endswith(":"):
elektra_path = elektra_path + "/" #special case intruced around elektra v5 (the root of a namespace needs a trailing slash)
return elektra_path
#returns a kdb instance (with mocked argv, envp)
def _get_kdb_instance():
config = kdb.KeySet(0)
contract = kdb.KeySet(0)
custom_envp = [ "%s=%s" % (k, v) for (k, v) in os.environ.items() ]
kdb.goptsContract (contract, sys.argv, custom_envp, parent_key, config)
db = kdb.KDB(contract)
#monkey patch db.get as
#- proc:/ keys are only available through a cascading lookup (See manpage elektra-namespaces: "Keys in the namespace proc ... are ignored by kdbGet ... ")
#- we don't want spec: keys to appear in the cascading namespace
orig_get = db.get
def patched_get(ks, orig_root):
justified_root = re.sub("^proc:/", "/", str(orig_root))
status = orig_get(ks, justified_root)
if kdb.Key(orig_root).isCascading():
for key_to_remove in ks.filter(lambda key: key.isSpec()):
ks.remove(key_to_remove)
return status
db.get = patched_get
return db
def size_of_file(os_path):
return len(file_contents(os_path))
def is_directory_empty(os_path):
dirs, files = ls(os_path)
return not bool(dirs) and not bool(files)
#performs function of the "kdb file" command
def get_kdb_file(os_path):
elektra_path = os_path_to_elektra_path(os_path)
resolved_file_path = subprocess.check_output(["kdb", "file", elektra_path]).decode().strip()
return resolved_file_path
def update_key_value(os_path: str, new_value: bytes):
# kdb.kdb.KDBException, may be thrown
# validation => whole key needs to be written at once
with _get_kdb_instance() as db:
path = os_path_to_elektra_path(os_path)
ks = kdb.KeySet()
db.get(ks, path)
key = ks[path]
#try to save new_value as UTF-8 string in case it can be decoded as such
try:
new_value_as_string = new_value.decode(encoding="utf-8", errors="strict")
key.value = new_value_as_string
except UnicodeDecodeError:
raise OSError(errno.ENOTSUP) #general binary meta-keys are not supported
db.set(ks, path) #using key instead of path here deleted the key
#may throw KeyError
def file_contents(os_path):
key, _ = get_key_and_keyset(os_path)
if key.isString():
return key.value.encode(encoding='UTF-8') #return bytes in all cases
elif key.isBinary():
return key.value
else:
raise Error("Unsupported key type")
#creates key, or, if key already exists, does nothing
def create_key(os_path):
path = os_path_to_elektra_path(os_path)
with _get_kdb_instance() as db:
ks = kdb.KeySet()
db.get(ks, path)
if not path in ks:
key = kdb.Key(path)
ks.append(key)
keys_modified = db.set(ks, path)
if keys_modified != 1:
raise OSError(errno.EIO)
#could also be an attempt to create an already existing key. in this rare case the error code does not fit.
def get_meta_map(os_path):
key, _ = get_key_and_keyset(os_path)
return { meta.name:meta.value for meta in key.getMeta() }
def has_meta(os_path, name):
try:
meta_map = get_meta_map(os_path)
return name in meta_map
except KeyError:
return False
#get_meta, set_meta may throw KeyError
def get_meta(os_path, name):
return get_meta_map(os_path)[name]
def set_meta(os_path, name, value):
meta_map = get_meta_map(os_path)
meta_map[name] = value
update_meta_map(os_path, meta_map)
def update_meta_map(os_path, new_meta_map):
path = os_path_to_elektra_path(os_path)
with _get_kdb_instance() as db:
ks = kdb.KeySet()
db.get(ks, path)
key = ks[path]
#delete old meta keys
for meta_key in key.getMeta():
key.delMeta(meta_key.name)
#insert new meta keys
for keyname in new_meta_map.keys():
key.setMeta(keyname, new_meta_map[keyname])
db.set(ks, path)
#may throw KeyError
def get_key_and_keyset(os_path):
path = os_path_to_elektra_path(os_path)
with _get_kdb_instance() as db:
ks = kdb.KeySet()
db.get(ks, path)
key = ks[path]
return (key, ks)
#returns tuple inidicating if path is dir, is file
def key_type(os_path):
if os_path in [".", "..", "/", "/user:", "/system:", "/spec:", "/dir:", "/cascading:", "/proc:"]:
return (True, False)
dir_listing, file_listing = ls(os_path)
return (bool(dir_listing), bool(file_listing))
def is_list_prefix(prefix, list_):
if len(prefix) > len(list_):
return False
for (i, item) in enumerate(prefix):
if list_[i] != item:
return False
return True
def is_path_prefix(prefix, path):
#remove (potential) trailing / to cope with special case introduced in os_path_to_elektra_path
prefix = re.sub("/$", "", prefix)
return is_list_prefix(prefix.split("/"), path.split("/"))
def _remove_namespace_prefix(elektra_path):
return re.sub("^.*:", "", elektra_path)
#returns tuple of dirs, files of given path (does not include '.', '..')
def ls(os_path):
path = os_path_to_elektra_path(os_path)
root = kdb.Key(path)
is_root_level = len(path) > 1 and path.endswith("/") # special case
with _get_kdb_instance() as db:
ks = kdb.KeySet()
db.get(ks, root)
#only retain keys that are below the root (kdb.get does not gurantee this property)
ks_filtered = kdb.KeySet()
for key in ks:
if key.isBelowOrSame(root):
ks_filtered.append(key)
path_without_namespace = _remove_namespace_prefix(path)
result_keys_without_namespace = map(_remove_namespace_prefix, ks_filtered.unpack_names())
below = {name.split(path_without_namespace)[1] for name in result_keys_without_namespace if is_path_prefix(path_without_namespace, name)}
dirs = {name.split("/")[0 if is_root_level else 1] for name in below if "/" in name}
files = {name for name in below if not "/" in name}.difference(dirs)
if '' in files:
files.remove('')
files.add(dir_file_special_name)
return (dirs, files)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
bnpy/viz/PlotTrace.py
|
'''
PlotTrace.py
Executable for plotting trace stats of learning algorithm progress, including
* objective function (ELBO) vs laps thru data
* number of active components vs laps thru data
* hamming distance vs laps thru data
Usage (command-line)
-------
python -m bnpy.viz.PlotTrace dataName jobpattern [kwargs]
'''
import numpy as np
import argparse
import glob
import os
import scipy.io
from viz.PlotUtil import pylab
from ioutil import BNPYArgParser
from ioutil.CountReader import loadKeffForTask
from viz.JobFilter import filterJobs
taskidsHelpMsg = "ids of trials/runs to plot from given job." + \
" Example: '4' or '1,2,3' or '2-6'."
Colors = [(0, 0, 0), # black
(0, 0, 1), # blue
(1, 0, 0), # red
(0, 1, 0.25), # green (darker)
(1, 0, 1), # magenta
(0, 1, 1), # cyan
(1, 0.6, 0), # orange
]
LabelMap = dict(laps='num pass thru data',
iters='num alg steps',
times='elapsed time (sec)',
K='num topics K',
evidence='train objective',
)
LabelMap['laps-saved-params'] = 'num pass thru data'
LabelMap['hamming-distance'] = 'Hamming dist.'
LabelMap['Keff'] = 'num topics K'
def plotJobsThatMatchKeywords(jpathPattern='/tmp/', **kwargs):
''' Create line plots for jobs matching pattern and provided kwargs
'''
if not jpathPattern.startswith(os.path.sep):
jpathPattern = os.path.join(os.environ['BNPYOUTDIR'], jpathPattern)
jpaths, legNames = filterJobs(jpathPattern, **kwargs)
plotJobs(jpaths, legNames, **kwargs)
def plotJobs(jpaths, legNames, styles=None, density=2,
xvar='laps', yvar='evidence', loc='upper right',
xmin=None, xmax=None,
taskids=None, savefilename=None, tickfontsize=None,
bbox_to_anchor=None, **kwargs):
''' Create line plots for provided jobs.
'''
nLines = len(jpaths)
if nLines == 0:
raise ValueError('Empty job list. Nothing to plot.')
nLeg = len(legNames)
for lineID in xrange(nLines):
if styles is None:
curStyle = dict(colorID=lineID)
else:
curStyle = styles[lineID]
task_kwargs = dict(**kwargs)
task_kwargs.update(curStyle)
plot_all_tasks_for_job(jpaths[lineID], legNames[lineID],
xvar=xvar, yvar=yvar,
taskids=taskids, density=density, **task_kwargs)
# Y-axis limit determination
# If we have "enough" data about the run beyond two full passes of dataset,
# we zoom in on the region of data beyond lap 2
if xvar == 'laps' and yvar == 'evidence':
xmax = 0
ymin = np.inf
ymin2 = np.inf
ymax = -np.inf
allRunsHaveXBeyond1 = True
for line in pylab.gca().get_lines():
xd = line.get_xdata()
yd = line.get_ydata()
if xd.size < 3:
allRunsHaveXBeyond1 = False
continue
posLap1 = np.searchsorted(xd, 1.0)
posLap2 = np.searchsorted(xd, 2.0)
if posLap1 < xd.size:
ymin = np.minimum(ymin, yd[posLap1])
ymax = np.maximum(ymax, yd[posLap1:].max())
if posLap2 < xd.size:
ymin2 = np.minimum(ymin2, yd[posLap2])
xmax = np.maximum(xmax, xd.max())
if xd.max() <= 1:
allRunsHaveXBeyond1 = False
if allRunsHaveXBeyond1 and xmax > 1.5:
# If all relevant curves extend beyond x=1, only show that part
xmin = 1.0 - 1e-5
else:
xmin = 0
if allRunsHaveXBeyond1 and ymin2 < ymax:
range1 = ymax - ymin
range2 = ymax - ymin2
if 10 * range2 < range1:
# Y values jump from lap1 to lap2 is enormous,
# so let's just show y values from lap2 onward...
ymin = ymin2
if (not np.allclose(ymax, ymin)) and allRunsHaveXBeyond1:
pylab.ylim([ymin, ymax + 0.1 * (ymax - ymin)])
pylab.xlim([xmin, xmax + .05 * (xmax - xmin)])
if loc is not None and len(jpaths) > 1:
pylab.legend(loc=loc, bbox_to_anchor=bbox_to_anchor)
if tickfontsize is not None:
pylab.tick_params(axis='both', which='major', labelsize=tickfontsize)
if savefilename is not None:
try:
pylab.show(block=False)
except TypeError:
pass # when using IPython notebook
pylab.savefig(savefilename, bbox_inches='tight', pad_inches=0)
else:
try:
pylab.show(block=True)
except TypeError:
pass # when using IPython notebook
def plot_all_tasks_for_job(jobpath, label, taskids=None,
color=None,
colorID=0,
density=2,
yvar='evidence',
markersize=10,
linewidth=2,
linestyle='-',
drawLineToXMax=None,
showOnlyAfterLap=0,
xvar='laps',
**kwargs):
''' Create line plot in current figure for each task/run of jobpath
'''
if not os.path.exists(jobpath):
if not jobpath.startswith(os.path.sep):
jobpath_tmp = os.path.join(os.environ['BNPYOUTDIR'], jobpath)
if not os.path.exists(jobpath_tmp):
raise ValueError("PATH NOT FOUND: %s" % (jobpath))
jobpath = jobpath_tmp
if color is None:
color = Colors[colorID % len(Colors)]
taskids = BNPYArgParser.parse_task_ids(jobpath, taskids)
if yvar == 'hamming-distance':
yspfile = os.path.join(jobpath, taskids[0], yvar + '-saved-params.txt')
if xvar == 'laps' and os.path.isfile(yspfile):
xvar = 'laps-saved-params'
for tt, taskid in enumerate(taskids):
xs = None
ys = None
laps = None
try:
var_ext = ''
ytxtfile = os.path.join(jobpath, taskid, yvar + '.txt')
if not os.path.isfile(ytxtfile):
var_ext = '-saved-params'
ytxtfile = os.path.join(
jobpath, taskid, yvar + var_ext + '.txt')
ys = np.loadtxt(ytxtfile)
if ytxtfile.count('saved-params'):
laptxtfile = os.path.join(jobpath, taskid, 'laps-saved-params.txt')
else:
laptxtfile = os.path.join(jobpath, taskid, 'laps.txt')
except IOError as e:
# TODO: when is this code needed?
# xs, ys = loadXYFromTopicModelFiles(jobpath, taskid)
try:
if isinstance(xs, np.ndarray) and yvar.count('Keff'):
ys = loadKeffForTask(
os.path.join(jobpath, taskid), **kwargs)
assert xs.size == ys.size
else:
# Heldout metrics
xs, ys = loadXYFromTopicModelSummaryFiles(
jobpath, taskid, xvar=xvar, yvar=yvar)
if showOnlyAfterLap and showOnlyAfterLap > 0:
laps, _ = loadXYFromTopicModelSummaryFiles(
jobpath, taskid, xvar='laps', yvar=yvar)
except ValueError:
try:
xs, ys = loadXYFromTopicModelSummaryFiles(jobpath, taskid)
except ValueError:
raise e
if yvar == 'hamming-distance' or yvar == 'Keff':
if xvar == 'laps-saved-params':
# fix off-by-one error, if we save an extra dist on final lap
if xs.size == ys.size - 1:
ys = ys[:-1]
elif ys.size == xs.size - 1:
xs = xs[:-1] # fix off-by-one error, if we quit early
elif xs.size != ys.size:
# Try to subsample both time series at laps where they
# intersect
laps_x = np.loadtxt(os.path.join(jobpath, taskid, 'laps.txt'))
laps_y = np.loadtxt(os.path.join(jobpath, taskid,
'laps-saved-params.txt'))
assert xs.size == laps_x.size
if ys.size == laps_y.size - 1:
laps_y = laps_y[:-1]
xs = xs[np.in1d(laps_x, laps_y)]
ys = ys[np.in1d(laps_y, laps_x)]
if xs.size != ys.size:
raise ValueError('Dimension mismatch. len(xs)=%d, len(ys)=%d'
% (xs.size, ys.size))
# Cleanup laps data. Verify that it is sorted, with no collisions.
if xvar == 'laps':
diff = xs[1:] - xs[:-1]
goodIDs = np.flatnonzero(diff >= 0)
if len(goodIDs) < xs.size - 1:
print( 'WARNING: looks like multiple runs writing to this file!')
print( jobpath)
print( 'Task: ', taskid)
print( len(goodIDs), xs.size - 1)
xs = np.hstack([xs[goodIDs], xs[-1]])
ys = np.hstack([ys[goodIDs], ys[-1]])
if xvar == 'laps' and yvar == 'evidence':
mask = xs >= 1.0
xs = xs[mask]
ys = ys[mask]
elif showOnlyAfterLap:
# print "Filtering for data recorded at lap >= %s" % (
# showOnlyAfterLap)
if laps is None:
laps = np.loadtxt(laptxtfile)
mask = laps >= showOnlyAfterLap
xs = xs[mask]
ys = ys[mask]
# Force plot density (data points per lap) to desired specification
# This avoids making plots that have huge file sizes,
# due to too much content in the given display space
if xvar == 'laps' and xs.size > 20 and np.sum(xs > 5) > 10:
if (xs[-1] - xs[9]) != 0:
curDensity = (xs.size - 10) / (xs[-1] - xs[9])
else:
curDensity = density
while curDensity > density and xs.size > 11:
# Thin xs and ys data by a factor of 2
# while preserving the first 10 data points
xs = np.hstack([xs[:10], xs[10::2]])
ys = np.hstack([ys[:10], ys[10::2]])
curDensity = (xs.size - 10) / (xs[-1] - xs[9])
plotargs = dict(
markersize=markersize,
linewidth=linewidth,
linestyle=linestyle,
label=None,
color=color, markeredgecolor=color)
for key in kwargs:
if key in plotargs:
plotargs[key] = kwargs[key]
if tt == 0:
plotargs['label'] = label
pylab.plot(xs, ys, **plotargs)
if drawLineToXMax:
xs_dashed = np.asarray([xs[-1], drawLineToXMax])
ys_dashed = np.asarray([ys[-1], ys[-1]])
plotargs['label'] = None
pylab.plot(xs_dashed, ys_dashed, '--', **plotargs)
pylab.xlabel(LabelMap[xvar])
if yvar in LabelMap:
yLabelStr = LabelMap[yvar]
if yvar == 'Keff' and 'effCountThr' in kwargs:
effCountThr = float(kwargs['effCountThr'])
yLabelStr = yLabelStr + ' > %s' % (str(effCountThr))
pylab.ylabel(yLabelStr)
def loadXYFromTopicModelSummaryFiles(jobpath, taskid, xvar='laps', yvar='K'):
''' Load x and y variables for line plots from TopicModel files
'''
ypath = os.path.join(jobpath, taskid, 'predlik-' + yvar + '.txt')
if not os.path.exists(ypath):
raise ValueError('Summary text file not found: ' + ypath)
if xvar.count('lap'):
xpath = os.path.join(jobpath, taskid, 'predlik-lapTrain.txt')
elif xvar.count('time'):
xpath = os.path.join(jobpath, taskid, 'predlik-timeTrain.txt')
else:
xpath = os.path.join(jobpath, taskid, 'predlik-' + xvar + '.txt')
xs = np.loadtxt(xpath)
ys = np.loadtxt(ypath)
# HACK!
##if yvar.count('Lik') and jobpath.count('Berk') and np.max(ys) > 100:
## ys /= 64
return xs, ys
def loadXYFromTopicModelFiles(jobpath, taskid, xvar='laps', yvar='K'):
''' Load x and y variables for line plots from TopicModel files
'''
tmpathList = glob.glob(os.path.join(jobpath, taskid, 'Lap*TopicModel.mat'))
if len(tmpathList) < 1:
raise ValueError('No TopicModel.mat files found')
tmpathList.sort() # ascending, from lap 0 to lap 1 to lap 100 to ...
basenames = [x.split(os.path.sep)[-1] for x in tmpathList]
laps = np.asarray([float(x[3:11]) for x in basenames])
Ks = np.zeros_like(laps)
for tt, tmpath in enumerate(tmpathList):
if yvar == 'K':
Q = scipy.io.loadmat(tmpath, variable_names=['K', 'probs'])
try:
Ks[tt] = Q['K']
except KeyError:
Ks[tt] = Q['probs'].size
else:
raise ValueError('Unknown yvar type for topic model: ' + yvar)
return laps, Ks
def parse_args(xvar='laps', yvar='evidence'):
''' Returns Namespace of parsed arguments retrieved from command line
'''
parser = argparse.ArgumentParser()
parser.add_argument('dataName', type=str, default='AsteriskK8')
parser.add_argument('jpath', type=str, default='demo*')
parser.add_argument('--xvar', type=str, default=xvar,
choices=LabelMap.keys(),
help="name of x axis variable to plot.")
parser.add_argument('--yvar', type=str, default=yvar,
#choices=LabelMap.keys(),
help="name of y axis variable to plot.")
helpMsg = "ids of trials/runs to plot from given job." + \
" Example: '4' or '1,2,3' or '2-6'."
parser.add_argument(
'--taskids', type=str, default=None, help=helpMsg)
parser.add_argument(
'--savefilename', type=str, default=None,
help="location where to save figure (absolute path directory)")
args, unkList = parser.parse_known_args()
argDict = BNPYArgParser.arglist_to_kwargs(unkList, doConvertFromStr=False)
argDict.update(args.__dict__)
argDict['jpathPattern'] = os.path.join(os.environ['BNPYOUTDIR'],
args.dataName,
args.jpath)
del argDict['dataName']
del argDict['jpath']
return argDict
plotJobsThatMatch = plotJobsThatMatchKeywords
if __name__ == "__main__":
argDict = parse_args('laps', 'evidence')
plotJobsThatMatchKeywords(**argDict)
|
[] |
[] |
[
"BNPYOUTDIR"
] |
[]
|
["BNPYOUTDIR"]
|
python
| 1 | 0 | |
templates/charger-wbec.go
|
package templates
import (
"github.com/evcc-io/config/registry"
)
func init() {
template := registry.Template{
Class: "charger",
Type: "custom",
Name: "wbec",
Sample: `status:
source: mqtt
topic: wbec/lp/1/status
enabled:
source: mqtt
topic: wbec/lp/1/enabled
enable:
source: mqtt
topic: wbec/lp/1/enable
maxcurrent:
source: mqtt
topic: wbec/lp/1/maxcurrent`,
}
registry.Add(template)
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
x/crisis/genesis.go
|
package crisis
import (
sdk "github.com/deep2chain/sscq/types"
)
// GenesisState - crisis genesis state
type GenesisState struct {
ConstantFee sdk.Coin `json:"constant_fee"`
}
// NewGenesisState creates a new GenesisState object
func NewGenesisState(constantFee sdk.Coin) GenesisState {
return GenesisState{
ConstantFee: constantFee,
}
}
// DefaultGenesisState creates a default GenesisState object
func DefaultGenesisState() GenesisState {
return GenesisState{
ConstantFee: sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(1000)),
}
}
// new crisis genesis
func InitGenesis(ctx sdk.Context, keeper Keeper, data GenesisState) {
keeper.SetConstantFee(ctx, data.ConstantFee)
}
// ExportGenesis returns a GenesisState for a given context and keeper.
func ExportGenesis(ctx sdk.Context, keeper Keeper) GenesisState {
constantFee := keeper.GetConstantFee(ctx)
return NewGenesisState(constantFee)
}
// ValidateGenesis - placeholder function
func ValidateGenesis(data GenesisState) error {
return nil
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
docs/internal/investigations/runtime-governor-poc/runtime/component/cmd/main.go
|
package main
import (
"errors"
"fmt"
"log"
"time"
"github.com/kyma-project/control-plane/docs/internal/investigations/runtime-governor-poc/runtime/component/internal/dapr"
"github.com/kyma-project/control-plane/docs/internal/investigations/runtime-governor-poc/runtime/component/internal/kcp"
"github.com/dapr/dapr/pkg/apis/components/v1alpha1"
"github.com/vrischmann/envconfig"
)
const (
selectorMetadataName = "selector"
)
type Config struct {
Interval time.Duration `envconfig:"default=1s,optional"`
KubeconfigPath string `envconfig:"optional"`
RuntimeID string `envconfig:"default=1"`
URL string `envconfig:"default=https://runtime-governor.kyma.local"`
}
func main() {
cfg := Config{}
err := envconfig.InitWithPrefix(&cfg, "APP")
if err != nil {
panic(err)
}
kcpCli := kcp.NewClient(cfg.URL)
daprCli := dapr.NewClientOrDie(cfg.KubeconfigPath)
for {
time.Sleep(cfg.Interval)
resource, err := kcpCli.Fetch(cfg.RuntimeID)
if err != nil {
log.Println("Error when fetching the configuration data")
log.Println(err.Error())
continue
}
reload, err := daprCli.UpsertComponent(resource, resource.Namespace)
if err != nil {
log.Println("Error when upserting the dapr Component")
log.Println(err.Error())
continue
}
if reload {
selector, err := getSelectorFromMetadata(resource.Spec.Metadata)
if err != nil {
log.Println("Error when parsing the selector from resource metadata")
log.Println(err.Error())
continue
}
if err := daprCli.DeletePodsForSelector(selector, resource.Namespace); err != nil {
log.Printf("Error when deleting pods for given selector (%s)\n", selector)
log.Println(err.Error())
continue
}
log.Println("Successfully reloaded configuration")
}
}
fmt.Println("Finished successfully!")
}
func getSelectorFromMetadata(metadata []v1alpha1.MetadataItem) (string, error) {
for _, item := range metadata {
if item.Name == selectorMetadataName {
return item.Value, nil
}
}
return "", errors.New("selector not found")
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
src/hello/wsgi.py
|
"""
WSGI config for hello project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hello.settings")
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
typeidea/typeidea/wsgi.py
|
"""
WSGI config for typeidea project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
# os.environ.setdefault("DJANGO_SETTINGS_MODULE", "typeidea.settings")
profile = os.environ.get('TYPEIDEA_PROFILE', 'line')
os.environ.setdefault("DJANGO_SETTINGS_MODULE",
"typeidea.settings.{}".format(profile))
application = get_wsgi_application()
|
[] |
[] |
[
"TYPEIDEA_PROFILE"
] |
[]
|
["TYPEIDEA_PROFILE"]
|
python
| 1 | 0 | |
datastore/datastore.go
|
package datastore
import (
"github.com/HouzuoGuo/tiedot/db"
"time"
"log"
"io"
"encoding/json"
"github.com/fatih/structs"
"os"
)
type Member struct {
Id int `json:"id"`
Type string `json:"type"`
Degrees float64 `json:"degrees"`
Size float64 `json:"size"`
Color string `json:"color"`
E float64 `json:"e"`
F float64 `json:"f"`
}
type Composite struct {
Id int
CreatedAt time.Time `json:omitempty`
Members []Member `json:"members"`
Name string `json:"name"`
Layers []int `json:"layers"`
}
func (composite Composite) toMap() map[string]interface{} {
return structs.Map(composite)
}
func DecodeComposite(r io.ReadCloser) (composite *Composite, err error) {
composite = new(Composite)
err = json.NewDecoder(r).Decode(composite)
return
}
func CreateComposite(r io.ReadCloser) (composite *Composite, err error) {
composite, err = DecodeComposite(r)
if err != nil {
return
}
composites := dbConn.Use("Composites")
composite.CreatedAt = time.Now()
id, err := composites.Insert(composite.toMap())
if err != nil {
panic(err)
}
composite.Id = id
return
}
func GetAllComposites() []Composite {
collection := dbConn.Use("Composites")
var composites []Composite
collection.ForEachDoc(func(id int, docContent []byte) bool {
composite := new(Composite)
json.Unmarshal(docContent, &composite)
composite.Id = id
composites = append(composites, *composite)
return true
})
return composites
}
func DeleteComposite(id int) error {
collection := dbConn.Use("Composites")
return collection.Delete(id)
}
var dbConn *db.DB
var err error
func init() {
dbDir := os.Getenv("TANDY_TIEDOT_DIR")
if dbDir == "" {
log.Fatal("Please specify location of database in TANDY_TIEDOT_DIR environment variable")
}
log.Println("Spinning up tiedot database at", dbDir)
dbConn, err = db.OpenDB(dbDir)
if err != nil {
panic(err)
}
setupCollections := []string{"Composites"}
// Create collections that don't exist yet
existingCollections := dbConn.AllCols()
for _, collection := range (setupCollections) {
log.Println("Checking for collection", collection, "...")
if stringInSlice(collection, existingCollections) {
log.Println("...exists")
} else {
log.Println("...creating it")
if err := dbConn.Create(collection); err != nil {
panic(err)
}
}
}
// TODO: could provide some statup stats -- number of records in table, or something
}
// stringInSlice checks for presence of a string in a slice
func stringInSlice(str string, list []string) bool {
for _, v := range list {
if v == str {
return true
}
}
return false
}
|
[
"\"TANDY_TIEDOT_DIR\""
] |
[] |
[
"TANDY_TIEDOT_DIR"
] |
[]
|
["TANDY_TIEDOT_DIR"]
|
go
| 1 | 0 | |
app/service/bbq/video/model/util.go
|
package model
import (
"fmt"
"reflect"
"strings"
)
// Implode function like php
func Implode(list interface{}, seq string) string {
listValue := reflect.Indirect(reflect.ValueOf(list))
if listValue.Kind() != reflect.Slice {
return ""
}
count := listValue.Len()
listStr := make([]string, 0, count)
for i := 0; i < count; i++ {
v := listValue.Index(i)
if str, err := getValue(v); err == nil {
listStr = append(listStr, str)
}
}
return strings.Join(listStr, seq)
}
func getValue(value reflect.Value) (res string, err error) {
switch value.Kind() {
case reflect.Ptr:
res, err = getValue(value.Elem())
default:
res = fmt.Sprint(value.Interface())
}
return
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
gfauto/gfauto/fuzz.py
|
# -*- coding: utf-8 -*-
# Copyright 2019 The GraphicsFuzz Project Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fuzzing module.
The main entry point to GraphicsFuzz Auto.
"""
import argparse
import enum
import os
import random
import secrets
import shutil
import sys
from pathlib import Path
from typing import List, Optional
from gfauto import (
artifact_util,
binaries_util,
devices_util,
fuzz_glsl_test,
fuzz_spirv_test,
gflogging,
interrupt_util,
settings_util,
shader_job_util,
test_util,
util,
)
from gfauto.device_pb2 import Device, DevicePreprocess
from gfauto.gflogging import log
from gfauto.util import check_dir_exists
# Root:
# - donors/ (contains GLSL shader jobs)
# - temp/ (contains staging directories with random names)
# - reports/ (contains reports)
# Staging directory.
# - source_template/ (source directory but with no test metadata yet)
# - test_1/, test_2/, etc. (test directories)
# Test directory:
# - source/ (source directory, with test.json and other files)
# - results/ (results)
# Report: a test directory with a reduction for a specific device.
# E.g. v signature v device name added
# reports/crashes/Null_point/123_opt_pixel/
# - source/
# - results/
# - laptop/
# - reference/ variant/
# - ... (see below for a more detailed example)
# - pixel/
# - reference/ variant/
# - ... (see below for a more detailed example)
# - reductions/ (since this is a report for a pixel device, we have reductions)
# - ... (see below for a more detailed example)
# - temp/123/ (a staging directory; not a proper test_dir, as it only has "source_template", not "source".)
# - source_template/
# - --test.json-- this will NOT be present because this is just a source template directory.
# - reference/ variant/
# - shader.json, shader.{comp,frag}
# - 123_no_opt/ 123_opt_O/ 123_opt_Os/ 123_opt_rand_1/ etc. (Proper test_dirs, as they have "source". These may be
# copied to become a report if a bug is found.)
# - source/ (same as source_template, but with test.json)
# - results/
# - pixel/ other_phone/ laptop/ etc.
# - reference/ variant/
# - test.amber
# - image.png
# - STATUS
# - log.txt
# - (all other result files and intermediate files for running the shader on the device)
# - reductions/ (reductions are only added once the staging directory is copied to the reports directory)
# - reduction_1/ reduction_blah/ etc. (reduction name; also a test_dir)
# - source/ (same as other source dirs, but with the final reduced shader source)
# - reduction_work/
# - reference/ variant/
# - shader.json, shader_reduction_001_success.json,
# shader_reduction_002_failed.json, etc., shader_reduced_final.json
# - shader/ shader_reduction_001/
# (these are the result directories for each step, containing STATUS, etc.)
#
DONORS_DIR = "donors"
REFERENCES_DIR = "references"
REFERENCE_IMAGE_FILE_NAME = "reference.png"
VARIANT_IMAGE_FILE_NAME = "variant.png"
BUFFER_FILE_NAME = "buffer.bin"
BEST_REDUCTION_NAME = "best"
AMBER_RUN_TIME_LIMIT = 30
STATUS_TOOL_CRASH = "TOOL_CRASH"
STATUS_CRASH = "CRASH"
STATUS_UNRESPONSIVE = "UNRESPONSIVE"
STATUS_TOOL_TIMEOUT = "TOOL_TIMEOUT"
STATUS_TIMEOUT = "TIMEOUT"
STATUS_SUCCESS = "SUCCESS"
# Number of bits for seeding the RNG.
# Python normally uses 256 bits internally when seeding its RNG, hence this choice.
ITERATION_SEED_BITS = 256
FUZZ_FAILURES_DIR_NAME = "fuzz_failures"
class FuzzingTool(enum.Enum):
GLSL_FUZZ = "GLSL_FUZZ"
SPIRV_FUZZ = "SPIRV_FUZZ"
def get_random_name() -> str:
# TODO: could change to human-readable random name or the date.
return util.get_random_name()
def get_fuzzing_tool_pattern(
glsl_fuzz_iterations: int, spirv_fuzz_iterations: int
) -> List[FuzzingTool]:
fuzzing_tool_pattern = [FuzzingTool.GLSL_FUZZ] * glsl_fuzz_iterations
fuzzing_tool_pattern += [FuzzingTool.SPIRV_FUZZ] * spirv_fuzz_iterations
# If empty, we default to just running GLSL_FUZZ repeatedly.
if not fuzzing_tool_pattern:
fuzzing_tool_pattern = [FuzzingTool.GLSL_FUZZ]
return fuzzing_tool_pattern
def main() -> None:
parser = argparse.ArgumentParser(
description="Fuzz devices using glsl-fuzz and/or spirv-fuzz to generate tests. "
"By default, repeatedly generates tests using glsl-fuzz. "
"You can instead specify the number of times each tool will run; "
"glsl-fuzz runs G times, then spirv-fuzz runs S times, then the pattern repeats. "
"By default, G=0 and S=0, in which case glsl-fuzz is hardcoded to run. "
'Each run of glsl-fuzz/spirv-fuzz uses a random "iteration seed", which can be used to replay the invocation of the tool and the steps that follow. '
)
parser.add_argument(
"--settings",
help="Path to the settings JSON file for this fuzzing instance.",
default=str(settings_util.DEFAULT_SETTINGS_FILE_PATH),
)
parser.add_argument(
"--iteration_seed",
help="The seed to use for one fuzzing iteration (useful for reproducing an issue).",
)
parser.add_argument(
"--glsl_fuzz_iterations",
metavar="G",
help="Run glsl-fuzz G times to generate some tests, before moving on to the next tool.",
action="store",
default=0,
type=int,
)
parser.add_argument(
"--spirv_fuzz_iterations",
metavar="S",
help="Run spirv-fuzz S times to generate some tests, before moving on to the next tool.",
action="store",
default=0,
type=int,
)
parser.add_argument(
"--allow_no_stack_traces",
help="Continue even if we cannot get stack traces (using catchsegv or cdb).",
action="store_true",
)
parser.add_argument(
"--active_device",
help="Add an active device name, overriding those in the settings.json file. "
"Can be used multiple times to add multiple devices. "
"E.g. --active_device host --active_device host_with_alternative_icd. "
"This allows sharing a single settings.json file between multiple instances of gfauto_fuzz. "
"Note that a host_preprocessor device will automatically be added as the first active device, if it is missing. ",
action="append",
)
parsed_args = parser.parse_args(sys.argv[1:])
settings_path = Path(parsed_args.settings)
iteration_seed: Optional[int] = None if parsed_args.iteration_seed is None else int(
parsed_args.iteration_seed
)
glsl_fuzz_iterations: int = parsed_args.glsl_fuzz_iterations
spirv_fuzz_iterations: int = parsed_args.spirv_fuzz_iterations
allow_no_stack_traces: bool = parsed_args.allow_no_stack_traces
active_device_names: Optional[List[str]] = parsed_args.active_device
# E.g. [GLSL_FUZZ, GLSL_FUZZ, SPIRV_FUZZ] will run glsl-fuzz twice, then spirv-fuzz once, then repeat.
fuzzing_tool_pattern = get_fuzzing_tool_pattern(
glsl_fuzz_iterations=glsl_fuzz_iterations,
spirv_fuzz_iterations=spirv_fuzz_iterations,
)
with util.file_open_text(Path(f"log_{get_random_name()}.txt"), "w") as log_file:
gflogging.push_stream_for_logging(log_file)
try:
main_helper(
settings_path,
iteration_seed,
fuzzing_tool_pattern,
allow_no_stack_traces,
active_device_names=active_device_names,
)
except settings_util.NoSettingsFile as exception:
log(str(exception))
finally:
gflogging.pop_stream_for_logging()
def try_get_root_file() -> Path:
try:
return artifact_util.artifact_path_get_root()
except FileNotFoundError:
log(
"Could not find ROOT file (in the current directory or above) to mark where binaries should be stored. "
"Creating a ROOT file in the current directory."
)
return util.file_write_text(Path(artifact_util.ARTIFACT_ROOT_FILE_NAME), "")
def main_helper( # pylint: disable=too-many-locals, too-many-branches, too-many-statements;
settings_path: Path,
iteration_seed_override: Optional[int] = None,
fuzzing_tool_pattern: Optional[List[FuzzingTool]] = None,
allow_no_stack_traces: bool = False,
override_sigint: bool = True,
use_amber_vulkan_loader: bool = False,
active_device_names: Optional[List[str]] = None,
) -> None:
if not fuzzing_tool_pattern:
fuzzing_tool_pattern = [FuzzingTool.GLSL_FUZZ]
util.update_gcov_environment_variable_if_needed()
if override_sigint:
interrupt_util.override_sigint()
try_get_root_file()
settings = settings_util.read_or_create(settings_path)
active_devices = devices_util.get_active_devices(
settings.device_list, active_device_names=active_device_names
)
# Add host_preprocessor device if it is missing.
if not active_devices[0].HasField("preprocess"):
active_devices.insert(
0, Device(name="host_preprocessor", preprocess=DevicePreprocess())
)
reports_dir = Path() / "reports"
fuzz_failures_dir = reports_dir / FUZZ_FAILURES_DIR_NAME
temp_dir = Path() / "temp"
references_dir = Path() / REFERENCES_DIR
donors_dir = Path() / DONORS_DIR
spirv_fuzz_shaders_dir = Path() / "spirv_fuzz_shaders"
# Log a warning if there is no tool on the PATH for printing stack traces.
prepended = util.prepend_catchsegv_if_available([], log_warning=True)
if not allow_no_stack_traces and not prepended:
raise AssertionError("Stopping because we cannot get stack traces.")
spirv_fuzz_shaders: List[Path] = []
references: List[Path] = []
if FuzzingTool.SPIRV_FUZZ in fuzzing_tool_pattern:
check_dir_exists(spirv_fuzz_shaders_dir)
spirv_fuzz_shaders = sorted(spirv_fuzz_shaders_dir.rglob("*.json"))
if FuzzingTool.GLSL_FUZZ in fuzzing_tool_pattern:
check_dir_exists(references_dir)
check_dir_exists(donors_dir)
# TODO: make GraphicsFuzz find donors recursively.
references = sorted(references_dir.rglob("*.json"))
# Filter to only include .json files that have at least one shader (.frag, .vert, .comp) file.
references = [
ref for ref in references if shader_job_util.get_related_files(ref)
]
binary_manager = binaries_util.get_default_binary_manager(
settings=settings
).get_child_binary_manager(list(settings.custom_binaries), prepend=True)
if use_amber_vulkan_loader:
library_path = binary_manager.get_binary_path_by_name(
binaries_util.AMBER_VULKAN_LOADER_NAME
).path.parent
util.add_library_paths_to_environ([library_path], os.environ)
fuzzing_tool_index = 0
while True:
interrupt_util.interrupt_if_needed()
# We have to use "is not None" because the seed could be 0.
if iteration_seed_override is not None:
iteration_seed = iteration_seed_override
else:
iteration_seed = secrets.randbits(ITERATION_SEED_BITS)
log(f"Iteration seed: {iteration_seed}")
random.seed(iteration_seed)
staging_name = get_random_name()[:8]
staging_dir = temp_dir / staging_name
try:
util.mkdir_p_new(staging_dir)
except FileExistsError:
if iteration_seed_override is not None:
raise
log(f"Staging directory already exists: {str(staging_dir)}")
log(f"Starting new iteration.")
continue
# Pseudocode:
# - Create test_dir(s) in staging directory.
# - Run test_dir(s) on all active devices (stop early if appropriate).
# - For each test failure on each device, copy the test to reports_dir, adding the device and crash signature.
# - Reduce each report (on the given device).
# - Produce a summary for each report.
fuzzing_tool = fuzzing_tool_pattern[fuzzing_tool_index]
fuzzing_tool_index = (fuzzing_tool_index + 1) % len(fuzzing_tool_pattern)
if fuzzing_tool == FuzzingTool.SPIRV_FUZZ:
fuzz_spirv_test.fuzz_spirv(
staging_dir,
reports_dir,
fuzz_failures_dir,
active_devices,
spirv_fuzz_shaders,
settings,
binary_manager,
)
elif fuzzing_tool == FuzzingTool.GLSL_FUZZ:
fuzz_glsl_test.fuzz_glsl(
staging_dir,
reports_dir,
fuzz_failures_dir,
active_devices,
references,
donors_dir,
settings,
binary_manager,
)
else:
raise AssertionError(f"Unknown fuzzing tool: {fuzzing_tool}")
if iteration_seed_override is not None:
log("Stopping due to iteration_seed")
break
shutil.rmtree(staging_dir)
def create_summary_and_reproduce(
test_dir: Path, binary_manager: binaries_util.BinaryManager
) -> None:
util.mkdirs_p(test_dir / "summary")
test_metadata = test_util.metadata_read(test_dir)
# noinspection PyTypeChecker
if test_metadata.HasField("glsl") or test_metadata.HasField("spirv_fuzz"):
fuzz_glsl_test.create_summary_and_reproduce(test_dir, binary_manager)
else:
raise AssertionError("Unrecognized test type")
if __name__ == "__main__":
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
scripts/jenkins/build.py
|
#!/usr/bin/env python
import argparse
import logging
import os
import random
import signal
import subprocess
import sys
from Queue import Queue
logging.basicConfig(level="INFO")
log = logging.getLogger(__name__)
PROJECT_NAME = "cla_frontend"
background_processes = Queue()
def parse_args():
parser = argparse.ArgumentParser(description="Build project ready for testing by Jenkins.")
parser.add_argument("envname", help="e.g. integration, production, etc.")
parser.add_argument(
"--backend-hash",
default="",
help="cla_backend *commit hash* to run tests against; " "defaults to latest develop branch commit",
)
parser.add_argument("--skip-tests", nargs="*", choices=("django", "karma"), help="skip tests: django, karma")
return parser.parse_args()
def run(command, background=False, **kwargs):
if "shell" not in kwargs:
kwargs["shell"] = True
log.info("Running {command}".format(command=command))
if background:
process = subprocess.Popen(command, **kwargs)
background_processes.put(process)
return process
return_code = subprocess.call(command, **kwargs)
if return_code:
sys.exit(return_code)
def make_virtualenv(env):
venv_path = "/tmp/jenkins/envs/{project}-{env}".format(project=PROJECT_NAME, env=env)
if not os.path.isdir(venv_path):
run("/usr/local/bin/virtualenv {path}".format(path=venv_path))
return venv_path
def install_dependencies(venv_path):
run("{venv}/bin/pip install -U setuptools pip wheel".format(venv=venv_path))
run("{venv}/bin/pip install -r requirements/jenkins.txt".format(venv=venv_path))
def clean_pyc():
run("find . -name '*.pyc' -delete")
def wait_until_available(url):
wget = run(
("wget {url} -O/dev/null -t 20 --retry-connrefused --waitretry=2 " "-T 60").format(url=url), background=True
)
wget.wait()
def remove_old_static_assets():
run("rm -rf cla_frontend/assets-src/vendor")
# run('rm -rf cla_frontend/assets')
def update_static_assets(venv_path):
run("%s/bin/python manage.py builddata constants_json" % venv_path)
bundle = run("bundle install", background=True)
npm_prune = run("npm prune", background=True)
bower_prune = run("bower prune", background=True)
npm_prune.wait()
npm = run("npm install", background=True)
bower_prune.wait()
bower = run("bower install", background=True)
npm.wait()
npm_update = run("npm update", background=True)
npm_update.wait()
bower.wait()
bundle.wait()
gulp = run("gulp build", background=True)
gulp.wait()
def run_python_tests(venv_path):
return run(
("%s/bin/python manage.py jenkins --coverage-rcfile=.coveragerc " "--settings=cla_frontend.settings.jenkins")
% venv_path,
background=True,
)
def _port(start_from=8100, up_to=8299):
port = random.randint(start_from, up_to)
while True:
yield port
port += 1
gen_port = _port()
def run_server(env, backend_hash, jenkins_build_path):
venv = "/tmp/jenkins/envs/cla_backend-%s" % env
project_dir = "/srv/jenkins/shared-backend/%s-%s" % (PROJECT_NAME, env)
if not os.path.isdir(project_dir):
os.makedirs(project_dir)
if not os.path.isdir(os.path.join(project_dir, ".git")):
run(
"cd {project_dir} && git clone https://github.com/ministryofjustice/cla_backend.git .".format(
project_dir=project_dir
)
)
if backend_hash:
run(
"cd {project_dir} && git fetch --prune && git checkout -f {backend_hash}".format(
project_dir=project_dir, backend_hash=backend_hash
)
)
else:
run(
"cd {project_dir} && git fetch --prune && git checkout develop && git pull".format(project_dir=project_dir)
)
backend_port = next(gen_port)
os.environ["CLA_BACKEND_PORT"] = str(backend_port)
os.environ["BACKEND_TEST_DB_SUFFIX"] = "4%s" % PROJECT_NAME
fixtures = (
"initial_groups.json",
"kb_from_knowledgebase.json",
"initial_category.json",
"test_provider.json",
"initial_mattertype.json",
"test_auth_clients.json",
"initial_media_codes.json",
"test_rotas.json",
"test_casearchived.json",
"test_providercases.json",
"test_provider_allocations.json",
"initial_complaint_categories",
)
log_stdout = os.path.join(jenkins_build_path, "cla_backend.stdout.log")
log_stderr = os.path.join(jenkins_build_path, "cla_backend.stderr.log")
run(
(
"cd {project_dir} && "
"{venv}/bin/python manage.py testserver {fixtures} "
"--addrport {port} --noinput "
"--settings=cla_backend.settings.jenkins "
"1> {log_stdout} "
"2> {log_stderr}"
).format(
project_dir=project_dir,
venv=venv,
fixtures=" ".join(fixtures),
port=backend_port,
log_stdout=log_stdout,
log_stderr=log_stderr,
),
background=True,
)
def run_integration_tests(venv_path, jenkins_build_path, skip_tests):
run_karma = "karma" not in skip_tests
wait_until_available("http://localhost:{port}/admin/".format(port=os.environ.get("CLA_BACKEND_PORT")))
frontend_port = next(gen_port)
os.environ["CLA_FRONTEND_PORT"] = str(frontend_port)
os.path.join(jenkins_build_path, "cla_frontend.stdout.log")
os.path.join(jenkins_build_path, "cla_frontend.stderr.log")
if run_karma:
run("npm run test-single-run", background=True)
def kill_child_processes(pid, sig=signal.SIGTERM):
ps_cmd = subprocess.Popen("ps -o pid --ppid {0} --noheaders".format(pid), shell=True, stdout=subprocess.PIPE)
ps_out = ps_cmd.stdout.read()
ps_cmd.wait()
for pid_str in ps_out.split("\n")[:-1]:
os.kill(int(pid_str), sig)
def kill_all_background_processes():
while not background_processes.empty():
process = background_processes.get()
try:
kill_child_processes(process.pid)
process.kill()
except OSError:
pass
def main():
try:
jenkins_workspace_path = os.environ["WORKSPACE"]
jenkins_build_path = os.path.join(jenkins_workspace_path, "..", "builds", os.environ["BUILD_NUMBER"])
jenkins_build_path = os.path.abspath(jenkins_build_path)
args = parse_args()
skip_tests = set(args.skip_tests)
venv_path = make_virtualenv(args.envname)
install_dependencies(venv_path)
remove_old_static_assets()
update_static_assets(venv_path)
clean_pyc()
python_tests = None
if "django" not in skip_tests:
python_tests = run_python_tests(venv_path)
if {"karma"} - skip_tests:
run_server(args.envname, args.backend_hash, jenkins_build_path)
if python_tests:
python_tests.wait()
run_integration_tests(venv_path, jenkins_build_path, skip_tests)
finally:
kill_all_background_processes()
if __name__ == "__main__":
main()
|
[] |
[] |
[
"CLA_FRONTEND_PORT",
"WORKSPACE",
"BUILD_NUMBER",
"CLA_BACKEND_PORT",
"BACKEND_TEST_DB_SUFFIX"
] |
[]
|
["CLA_FRONTEND_PORT", "WORKSPACE", "BUILD_NUMBER", "CLA_BACKEND_PORT", "BACKEND_TEST_DB_SUFFIX"]
|
python
| 5 | 0 | |
src/test/java/com/github/mstavares/jkyc/logic/ptcc/KYCTests.java
|
package com.github.mstavares.jkyc.logic.ptcc;
import com.github.mstavares.jkyc.gateways.ptcc.config.ConfigurationModule;
import com.github.mstavares.jkyc.gateways.ptcc.logic.PortugueseCC;
import com.github.mstavares.jkyc.logic.ptcc.verifiers.sod.exceptions.SODVerifierException;
import com.github.mstavares.jkyc.logic.ptcc.verifiers.wallet.exceptions.WalletAddressVerifierException;
import com.github.mstavares.jkyc.logic.ptcc.verifiers.x509certificate.exceptions.CertificateChainException;
import com.github.mstavares.jkyc.logic.ptcc.verifiers.x509certificate.exceptions.CertificateRevokedException;
import com.github.mstavares.jkyc.models.ptcc.DataIDRoot;
import com.google.gson.Gson;
import com.google.gson.JsonObject;
import com.google.inject.Guice;
import com.google.inject.Injector;
import org.bouncycastle.jce.provider.BouncyCastleProvider;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.contrib.java.lang.system.EnvironmentVariables;
import java.io.FileReader;
import java.io.IOException;
import java.security.KeyStoreException;
import java.security.NoSuchAlgorithmException;
import java.security.Security;
import java.security.cert.CertificateException;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertThat;
public class KYCTests {
@Rule
public final EnvironmentVariables environmentVariables = new EnvironmentVariables();
private PortugueseCC portugueseCC;
@Before
public void setup() throws IOException {
Security.addProvider(new BouncyCastleProvider());
environmentVariables.set("KYC_CONFIG", "test.properties");
Injector injector = Guice.createInjector(new ConfigurationModule(System.getenv("KYC_CONFIG")));
portugueseCC = injector.getInstance(PortugueseCC.class);
}
@Test
public void testOK() throws IOException, CertificateException, WalletAddressVerifierException, CertificateRevokedException, NoSuchAlgorithmException, SODVerifierException, CertificateChainException, KeyStoreException {
JsonObject jsonObject = new Gson().fromJson(new FileReader(KYCTests.class.getClassLoader().getResource("DataId_Test_Card_OK.json").getFile()), JsonObject.class);
DataIDRoot data = new Gson().fromJson(jsonObject.toString(), DataIDRoot.class);
assertThat(portugueseCC.verify(data), is(true));
}
@Test(expected = WalletAddressVerifierException.class)
public void testWalletSignatureFailure() throws IOException, CertificateException, WalletAddressVerifierException, CertificateRevokedException, NoSuchAlgorithmException, SODVerifierException, CertificateChainException, KeyStoreException {
JsonObject jsonObject = new Gson().fromJson(new FileReader(KYCTests.class.getClassLoader().getResource("DataId_Test_Card_WA.json").getFile()), JsonObject.class);
DataIDRoot data = new Gson().fromJson(jsonObject.toString(), DataIDRoot.class);
portugueseCC.verify(data);
}
@Test(expected = SODVerifierException.class)
public void testSODIdentityAttributesFailure() throws IOException, CertificateException, WalletAddressVerifierException, CertificateRevokedException, NoSuchAlgorithmException, SODVerifierException, CertificateChainException, KeyStoreException {
JsonObject jsonObject = new Gson().fromJson(new FileReader(KYCTests.class.getClassLoader().getResource("DataId_Test_Card_SOD_ID.json").getFile()), JsonObject.class);
DataIDRoot data = new Gson().fromJson(jsonObject.toString(), DataIDRoot.class);
portugueseCC.verify(data);
}
@Test(expected = SODVerifierException.class)
public void testSODAddressAttributesFailure() throws IOException, CertificateException, WalletAddressVerifierException, CertificateRevokedException, NoSuchAlgorithmException, SODVerifierException, CertificateChainException, KeyStoreException {
JsonObject jsonObject = new Gson().fromJson(new FileReader(KYCTests.class.getClassLoader().getResource("DataId_Test_Card_SOD_ADD.json").getFile()), JsonObject.class);
DataIDRoot data = new Gson().fromJson(jsonObject.toString(), DataIDRoot.class);
portugueseCC.verify(data);
}
}
|
[
"\"KYC_CONFIG\""
] |
[] |
[
"KYC_CONFIG"
] |
[]
|
["KYC_CONFIG"]
|
java
| 1 | 0 | |
tests/test_jinja2.py
|
# -*- coding: utf-8 -*-
import os
import six
from opinionated_configparser import OpinionatedConfigParser
if six.PY2:
UNICODE = u"ééé"
else:
UNICODE = "ééé"
TEST_DICT1 = {
"section1": {
"key1": "value1{{ENV_VAR}}",
"key2": "{{UNICODE_ENV_VAR}}"
}
}
def test_env1():
if "ENV_VAR" in os.environ:
del os.environ["ENV_VAR"]
x = OpinionatedConfigParser()
x.read_dict(TEST_DICT1)
assert x.get("section1", "key1") == "value1"
def test_env2():
os.environ["ENV_VAR"] = "foo"
x = OpinionatedConfigParser()
x.read_dict(TEST_DICT1)
assert x.get("section1", "key1") == "value1foo"
del os.environ["ENV_VAR"]
def test_env3():
if six.PY2:
os.environ["UNICODE_ENV_VAR"] = UNICODE.encode("utf8")
else:
os.environ["UNICODE_ENV_VAR"] = UNICODE
x = OpinionatedConfigParser()
x.read_dict(TEST_DICT1)
assert x.get("section1", "key2") == UNICODE
del os.environ["UNICODE_ENV_VAR"]
|
[] |
[] |
[
"UNICODE_ENV_VAR",
"ENV_VAR"
] |
[]
|
["UNICODE_ENV_VAR", "ENV_VAR"]
|
python
| 2 | 0 | |
src/test/java/nitezh/ministock/dataaccess/IexStockQuoteRepositoryTests.java
|
/*
The MIT License
Copyright (c) 2013 Nitesh Patel http://niteshpatel.github.io/ministocks
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
package nitezh.ministock.dataaccess;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import org.junit.Assume;
import org.junit.Before;
import org.junit.Test;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import nitezh.ministock.domain.StockQuote;
import nitezh.ministock.mocks.MockCache;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
public class IexStockQuoteRepositoryTests {
private IexStockQuoteRepository quoteRepository;
@Before
public void setUp() {
FxChangeRepository fxRepository = new FxChangeRepository();
quoteRepository = new IexStockQuoteRepository(fxRepository);
}
@Test
public void retrieveQuotesAsJson() {
// Skipif
Assume.assumeTrue(System.getenv("CIRCLECI") == null);
// Arrange
List<String> symbols = Arrays.asList("AAPL", "GOOG");
JSONArray json = null;
// Act
try {
json = this.quoteRepository.retrieveQuotesAsJson(new MockCache(), symbols);
} catch (JSONException ignored) {
}
// Assert
assertNotNull(json);
assertEquals(2, json.length());
JSONObject aaplJson = json.optJSONObject(0);
assertEquals("AAPL", aaplJson.optString("symbol"));
assertTrue(Arrays.asList("NasdaqNM", "NMS", "Nasdaq Global Select").contains(aaplJson.optString("exchange")));
assertEquals("Apple Inc.", aaplJson.optString("name"));
JSONObject googJson = json.optJSONObject(1);
assertEquals("GOOG", googJson.optString("symbol"));
assertTrue(Arrays.asList("NasdaqNM", "NMS", "Nasdaq Global Select").contains(googJson.optString("exchange")));
assertEquals("Alphabet Inc.", googJson.optString("name"));
}
@Test
public void getQuotes() {
// Skipif
Assume.assumeTrue(System.getenv("CIRCLECI") == null);
// Arrange
List<String> symbols = Arrays.asList("AAPL", "GOOG");
// Act
HashMap<String, StockQuote> stockQuotes = quoteRepository.getQuotes(new MockCache(), symbols);
// Assert
assertEquals(2, stockQuotes.size());
StockQuote aaplQuote = stockQuotes.get("AAPL");
assertEquals("AAPL", aaplQuote.getSymbol());
assertTrue(Arrays.asList("NasdaqNM", "NMS", "Nasdaq Global Select").contains(aaplQuote.getExchange()));
assertEquals("Apple Inc.", aaplQuote.getName());
StockQuote googQuote = stockQuotes.get("GOOG");
assertEquals("GOOG", googQuote.getSymbol());
assertTrue(Arrays.asList("NasdaqNM", "NMS", "Nasdaq Global Select").contains(googQuote.getExchange()));
assertEquals("Alphabet Inc.", googQuote.getName());
}
}
|
[
"\"CIRCLECI\"",
"\"CIRCLECI\""
] |
[] |
[
"CIRCLECI"
] |
[]
|
["CIRCLECI"]
|
java
| 1 | 0 | |
cmd/meta.go
|
package cmd
import (
"bytes"
"context"
"fmt"
"log"
"os"
"path/filepath"
"sort"
"strings"
"github.com/AlecAivazis/survey/v2"
"github.com/b4b4r07/afx/pkg/config"
"github.com/b4b4r07/afx/pkg/env"
"github.com/b4b4r07/afx/pkg/errors"
"github.com/b4b4r07/afx/pkg/github"
"github.com/b4b4r07/afx/pkg/helpers/shell"
"github.com/b4b4r07/afx/pkg/printers"
"github.com/b4b4r07/afx/pkg/state"
"github.com/b4b4r07/afx/pkg/update"
"github.com/fatih/color"
"github.com/mattn/go-shellwords"
)
type metaCmd struct {
env *env.Config
packages []config.Package
main *config.Main
state *state.State
configs map[string]config.Config
updateMessageChan chan *update.ReleaseInfo
}
func (m *metaCmd) init() error {
m.updateMessageChan = make(chan *update.ReleaseInfo)
go func() {
log.Printf("[DEBUG] (goroutine): checking new updates...")
release, err := checkForUpdate(Version)
if err != nil {
log.Printf("[ERROR] (goroutine): cannot check for new updates: %s", err)
}
m.updateMessageChan <- release
}()
root := filepath.Join(os.Getenv("HOME"), ".afx")
cfgRoot := filepath.Join(os.Getenv("HOME"), ".config", "afx")
cache := filepath.Join(root, "cache.json")
files, err := config.WalkDir(cfgRoot)
if err != nil {
return errors.Wrapf(err, "%s: failed to walk dir", cfgRoot)
}
var pkgs []config.Package
app := &config.DefaultMain
m.configs = map[string]config.Config{}
for _, file := range files {
cfg, err := config.Read(file)
if err != nil {
return errors.Wrapf(err, "%s: failed to read config", file)
}
parsed, err := cfg.Parse()
if err != nil {
return errors.Wrapf(err, "%s: failed to parse config", file)
}
pkgs = append(pkgs, parsed...)
// Append config to one struct
m.configs[file] = cfg
if cfg.Main != nil {
app = cfg.Main
}
}
m.main = app
if err := config.Validate(pkgs); err != nil {
return errors.Wrap(err, "failed to validate packages")
}
pkgs, err = config.Sort(pkgs)
if err != nil {
return errors.Wrap(err, "failed to resolve dependencies between packages")
}
m.packages = pkgs
m.env = env.New(cache)
m.env.Add(env.Variables{
"AFX_CONFIG_PATH": env.Variable{Value: cfgRoot},
"AFX_LOG": env.Variable{},
"AFX_LOG_PATH": env.Variable{},
"AFX_COMMAND_PATH": env.Variable{Default: filepath.Join(os.Getenv("HOME"), "bin")},
"AFX_SHELL": env.Variable{Default: m.main.Shell},
"AFX_SUDO_PASSWORD": env.Variable{
Input: env.Input{
When: config.HasSudoInCommandBuildSteps(m.packages),
Message: "Please enter sudo command password",
Help: "Some packages build steps requires sudo command",
},
},
"GITHUB_TOKEN": env.Variable{
Input: env.Input{
When: config.HasGitHubReleaseBlock(m.packages),
Message: "Please type your GITHUB_TOKEN",
Help: "To fetch GitHub Releases, GitHub token is required",
},
},
"AFX_NO_UPDATE_NOTIFIER": env.Variable{},
})
for k, v := range m.main.Env {
log.Printf("[DEBUG] main: set env: %s=%s", k, v)
os.Setenv(k, v)
}
log.Printf("[DEBUG] mkdir %s\n", root)
os.MkdirAll(root, os.ModePerm)
log.Printf("[DEBUG] mkdir %s\n", os.Getenv("AFX_COMMAND_PATH"))
os.MkdirAll(os.Getenv("AFX_COMMAND_PATH"), os.ModePerm)
resourcers := make([]state.Resourcer, len(m.packages))
for i, pkg := range m.packages {
resourcers[i] = pkg
}
s, err := state.Open(filepath.Join(root, "state.json"), resourcers)
if err != nil {
return errors.Wrap(err, "failed to open state file")
}
m.state = s
log.Printf("[INFO] state additions: (%d) %#v", len(s.Additions), state.Keys(s.Additions))
log.Printf("[INFO] state deletions: (%d) %#v", len(s.Deletions), state.Keys(s.Deletions))
log.Printf("[INFO] state changes: (%d) %#v", len(s.Changes), state.Keys(s.Changes))
log.Printf("[INFO] state unchanges: (%d) []string{...skip...}", len(s.NoChanges))
return nil
}
func printForUpdate(uriCh chan *update.ReleaseInfo) {
switch Version {
case "unset":
return
}
log.Printf("[DEBUG] checking updates on afx repo...")
newRelease := <-uriCh
if newRelease != nil {
fmt.Fprintf(os.Stdout, "\n\n%s %s -> %s\n",
color.YellowString("A new release of afx is available:"),
color.CyanString("v"+Version),
color.CyanString(newRelease.Version))
fmt.Fprintf(os.Stdout, "%s\n\n", color.YellowString(newRelease.URL))
fmt.Fprintf(os.Stdout, "To upgrade, run: afx self-update\n")
}
}
func (m *metaCmd) printForUpdate() error {
if m.updateMessageChan == nil {
return errors.New("update message chan is not set")
}
printForUpdate(m.updateMessageChan)
return nil
}
func (m *metaCmd) prompt() (config.Package, error) {
if m.main.FilterCmd == "" {
return nil, errors.New("filter_command is not set")
}
var stdin, stdout bytes.Buffer
p := shellwords.NewParser()
p.ParseEnv = true
p.ParseBacktick = true
args, err := p.Parse(m.main.FilterCmd)
if err != nil {
return nil, errors.New("failed to parse filter command in main config")
}
cmd := shell.Shell{
Stdin: &stdin,
Stdout: &stdout,
Stderr: os.Stderr,
Command: args[0],
Args: args[1:],
}
for _, pkg := range m.packages {
fmt.Fprintln(&stdin, pkg.GetName())
}
if err := cmd.Run(context.Background()); err != nil {
return nil, err
}
search := func(name string) config.Package {
for _, pkg := range m.packages {
if pkg.GetName() == name {
return pkg
}
}
return nil
}
for _, line := range strings.Split(stdout.String(), "\n") {
if pkg := search(line); pkg != nil {
return pkg, nil
}
}
return nil, errors.New("pkg not found")
}
func (m *metaCmd) askRunCommand(op interface{}, pkgs []string) (bool, error) {
var do string
switch op.(type) {
case installCmd:
do = "install"
case uninstallCmd:
do = "uninstall"
case updateCmd:
do = "update"
case checkCmd:
do = "check"
default:
return false, errors.New("unsupported command type")
}
length := 3
target := strings.Join(pkgs, ", ")
if len(pkgs) > length {
target = fmt.Sprintf("%s, ... (%d packages)", strings.Join(pkgs[:length], ", "), len(pkgs))
}
yes := false
confirm := survey.Confirm{
Message: fmt.Sprintf("OK to %s these packages? %s", do, color.YellowString(target)),
}
if len(pkgs) > length {
helpMessage := "\n"
sort.Strings(pkgs)
for _, pkg := range pkgs {
helpMessage += fmt.Sprintf("- %s\n", pkg)
}
confirm.Help = helpMessage
}
if err := survey.AskOne(&confirm, &yes); err != nil {
return false, errors.Wrap(err, "failed to get input from console")
}
return yes, nil
}
func shouldCheckForUpdate() bool {
if os.Getenv("AFX_NO_UPDATE_NOTIFIER") != "" {
return false
}
return !isCI() && printers.IsTerminal(os.Stdout) && printers.IsTerminal(os.Stderr)
}
// based on https://github.com/watson/ci-info/blob/HEAD/index.js
func isCI() bool {
return os.Getenv("CI") != "" || // GitHub Actions, Travis CI, CircleCI, Cirrus CI, GitLab CI, AppVeyor, CodeShip, dsari
os.Getenv("BUILD_NUMBER") != "" || // Jenkins, TeamCity
os.Getenv("RUN_ID") != "" // TaskCluster, dsari
}
func checkForUpdate(currentVersion string) (*update.ReleaseInfo, error) {
if !shouldCheckForUpdate() {
return nil, nil
}
client := github.NewClient()
stateFilePath := filepath.Join(os.Getenv("HOME"), ".afx", "version.json")
return update.CheckForUpdate(client, stateFilePath, Repository, Version)
}
func (m metaCmd) GetPackage(resource state.Resource) config.Package {
for _, pkg := range m.packages {
if pkg.GetName() == resource.Name {
return pkg
}
}
return nil
}
func (m metaCmd) GetPackages(resources []state.Resource) []config.Package {
var pkgs []config.Package
for _, resource := range resources {
pkgs = append(pkgs, m.GetPackage(resource))
}
return pkgs
}
func (m metaCmd) GetConfig() config.Config {
var all config.Config
for _, config := range m.configs {
if config.Main != nil {
all.Main = config.Main
}
all.GitHub = append(all.GitHub, config.GitHub...)
all.Gist = append(all.Gist, config.Gist...)
all.HTTP = append(all.HTTP, config.HTTP...)
all.Local = append(all.Local, config.Local...)
}
return all
}
|
[
"\"HOME\"",
"\"HOME\"",
"\"HOME\"",
"\"AFX_COMMAND_PATH\"",
"\"AFX_COMMAND_PATH\"",
"\"AFX_NO_UPDATE_NOTIFIER\"",
"\"CI\"",
"\"BUILD_NUMBER\"",
"\"RUN_ID\"",
"\"HOME\""
] |
[] |
[
"AFX_NO_UPDATE_NOTIFIER",
"AFX_COMMAND_PATH",
"CI",
"BUILD_NUMBER",
"RUN_ID",
"HOME"
] |
[]
|
["AFX_NO_UPDATE_NOTIFIER", "AFX_COMMAND_PATH", "CI", "BUILD_NUMBER", "RUN_ID", "HOME"]
|
go
| 6 | 0 | |
src/tools/nuscenes-devkit/eval/detection/tests/test_evaluate.py
|
# nuScenes dev-kit.
# Code written by Oscar Beijbom, 2019.
import json
import os
import random
import shutil
import unittest
from typing import Dict
import numpy as np
from tqdm import tqdm
from nuscenes import NuScenes
from nuscenes.eval.common.config import config_factory
from nuscenes.eval.detection.constants import DETECTION_NAMES
from nuscenes.eval.detection.evaluate import DetectionEval
from nuscenes.eval.detection.utils import category_to_detection_name, detection_name_to_rel_attributes
from nuscenes.utils.splits import create_splits_scenes
class TestMain(unittest.TestCase):
res_mockup = 'nusc_eval.json'
res_eval_folder = 'tmp'
def tearDown(self):
if os.path.exists(self.res_mockup):
os.remove(self.res_mockup)
if os.path.exists(self.res_eval_folder):
shutil.rmtree(self.res_eval_folder)
@staticmethod
def _mock_submission(nusc: NuScenes, split: str) -> Dict[str, dict]:
"""
Creates "reasonable" submission (results and metadata) by looping through the mini-val set, adding 1 GT
prediction per sample. Predictions will be permuted randomly along all axes.
"""
def random_class(category_name: str) -> str:
# Alter 10% of the valid labels.
class_names = sorted(DETECTION_NAMES)
tmp = category_to_detection_name(category_name)
if tmp is not None and np.random.rand() < .9:
return tmp
else:
return class_names[np.random.randint(0, len(class_names) - 1)]
def random_attr(name: str) -> str:
"""
This is the most straight-forward way to generate a random attribute.
Not currently used b/c we want the test fixture to be back-wards compatible.
"""
# Get relevant attributes.
rel_attributes = detection_name_to_rel_attributes(name)
if len(rel_attributes) == 0:
# Empty string for classes without attributes.
return ''
else:
# Pick a random attribute otherwise.
return rel_attributes[np.random.randint(0, len(rel_attributes))]
mock_meta = {
'use_camera': False,
'use_lidar': True,
'use_radar': False,
'use_map': False,
'use_external': False,
}
mock_results = {}
splits = create_splits_scenes()
val_samples = []
for sample in nusc.sample:
if nusc.get('scene', sample['scene_token'])['name'] in splits[split]:
val_samples.append(sample)
for sample in tqdm(val_samples, leave=False):
sample_res = []
for ann_token in sample['anns']:
ann = nusc.get('sample_annotation', ann_token)
detection_name = random_class(ann['category_name'])
sample_res.append(
{
'sample_token': sample['token'],
'translation': list(np.array(ann['translation']) + 5 * (np.random.rand(3) - 0.5)),
'size': list(np.array(ann['size']) * 2 * (np.random.rand(3) + 0.5)),
'rotation': list(np.array(ann['rotation']) + ((np.random.rand(4) - 0.5) * .1)),
'velocity': list(nusc.box_velocity(ann_token)[:2] * (np.random.rand(3)[:2] + 0.5)),
'detection_name': detection_name,
'detection_score': random.random(),
'attribute_name': random_attr(detection_name)
})
mock_results[sample['token']] = sample_res
mock_submission = {
'meta': mock_meta,
'results': mock_results
}
return mock_submission
def test_delta(self):
"""
This tests runs the evaluation for an arbitrary random set of predictions.
This score is then captured in this very test such that if we change the eval code,
this test will trigger if the results changed.
"""
random.seed(42)
np.random.seed(42)
assert 'NUSCENES' in os.environ, 'Set NUSCENES env. variable to enable tests.'
nusc = NuScenes(version='v1.0-mini', dataroot=os.environ['NUSCENES'], verbose=False)
with open(self.res_mockup, 'w') as f:
json.dump(self._mock_submission(nusc, 'mini_val'), f, indent=2)
cfg = config_factory('detection_cvpr_2019')
nusc_eval = DetectionEval(nusc, cfg, self.res_mockup, eval_set='mini_val', output_dir=self.res_eval_folder,
verbose=False)
metrics, md_list = nusc_eval.evaluate()
# 1. Score = 0.22082865720221012. Measured on the branch "release_v0.2" on March 7 2019.
# 2. Score = 0.2199307290627096. Changed to measure center distance from the ego-vehicle.
# 3. Score = 0.24954451673961747. Changed to 1.0-mini and cleaned up build script.
# 4. Score = 0.20478832626986893. Updated treatment of cones, barriers, and other algo tunings.
# 5. Score = 0.2043569666105005. AP calculation area is changed from >=min_recall to >min_recall.
# 6. Score = 0.20636954644294506. After bike-rack filtering.
# 7. Score = 0.20237925145690996. After TP reversion bug.
# 8. Score = 0.24047129251302665. After bike racks bug.
# 9. Score = 0.24104572227466886. After bug fix in calc_tp. Include the max recall and exclude the min recall.
# 10. Score = 0.19449091580477748. Changed to use v1.0 mini_val split.
self.assertAlmostEqual(metrics.nd_score, 0.19449091580477748)
if __name__ == '__main__':
unittest.main()
|
[] |
[] |
[
"NUSCENES"
] |
[]
|
["NUSCENES"]
|
python
| 1 | 0 | |
index.py
|
import json
import os
import smtplib
import time
from email.mime.text import MIMEText
from email.utils import formataddr
def main(event, context):
send_key = os.environ.get('send_key') # 设置的send_key
core_content = event.get('body') # 邮件的主要参数
core_body = json.loads(core_content)
get_send_key = core_body.get("send_key") # 获取到的send_key
mail_acct = core_body.get("mail_acct") # 发件人邮箱账号
mail_paswd = core_body.get("mail_paswd") # 发件人邮箱密码
mail_to = core_body.get("mail_to") # 收件人邮箱账号
smtp_server = core_body.get("smtp_server") # 设置的smtp server
smtp_port = core_body.get("smtp_port") # 设置的smtp port
subject = core_body.get("subject") # 邮件的主题
content = core_body.get("content") # 邮件的正文
from_nikename = core_body.get("from_nikename") # 显示的发件人昵称
to_nikename = core_body.get("to_nikename") # 显示的收件人昵称
send_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
def mail():
ret = True
try:
msg = MIMEText(content, 'plain', 'utf-8')
msg['From'] = formataddr([from_nikename, mail_acct]) # 括号里的对应发件人邮箱昵称、发件人邮箱账号
msg['To'] = formataddr([to_nikename, mail_to]) # 括号里的对应收件人邮箱昵称、收件人邮箱账号
msg['Subject'] = subject # 邮件的主题,也可以说是标题
server = smtplib.SMTP_SSL(smtp_server, smtp_port) # 发件人邮箱中的SMTP服务器,SMTP端口
server.login(mail_acct, mail_paswd) # 括号中对应的是发件人邮箱账号、邮箱密码
server.sendmail(mail_acct, [mail_to, ], msg.as_string()) # 括号中对应的是发件人邮箱账号、收件人邮箱账号、发送邮件
server.quit() # 关闭连接
except Exception: # 如果 try 中的语句没有执行,则会执行下面的 ret=False
ret = False
return ret
if get_send_key == send_key:
ret = mail()
else:
ret = False
print("send_key ERROR!")
if ret:
data = { '响应' : "邮件发送成功", '发送时间':send_time, '邮件主题' : subject, '邮件正文' : content}
body = json.dumps(data, sort_keys=True, indent=2, separators=(',', ': '), ensure_ascii=False)
print("Sms send success!")
resp = {
"isBase64Encoded": False,
"send_date": send_time,
"statusCode": 200,
"headers": {"Content-Type":"application/json; charset=UTF-8"},
"body": body
}
return(resp)
else:
data = { '响应' : "邮件发送失败", '发送时间':send_time, '邮件主题' : subject, '邮件正文' : content}
body = json.dumps(data, sort_keys=True, indent=2, separators=(',', ': '), ensure_ascii=False)
resp = {
"isBase64Encoded": False,
"send_date": send_time,
"statusCode": 300,
"headers": {"Content-Type":"application/json; charset=UTF-8"},
"body": body
}
return(resp)
|
[] |
[] |
[
"send_key"
] |
[]
|
["send_key"]
|
python
| 1 | 0 | |
logstash.go
|
package logstash
import (
"encoding/json"
"errors"
"log"
"net"
"os"
"strings"
"time"
"github.com/fsouza/go-dockerclient"
"github.com/gliderlabs/logspout/router"
)
func init() {
router.AdapterFactories.Register(NewLogstashAdapter, "logstash")
}
// LogstashAdapter is an adapter that streams UDP JSON to Logstash.
type LogstashAdapter struct {
conn net.Conn
route *router.Route
containerTags map[string][]string
logstashFields map[string]map[string]string
decodeJsonLogs map[string]bool
}
// NewLogstashAdapter creates a LogstashAdapter with UDP as the default transport.
func NewLogstashAdapter(route *router.Route) (router.LogAdapter, error) {
transport, found := router.AdapterTransports.Lookup(route.AdapterTransport("udp"))
if !found {
return nil, errors.New("unable to find adapter: " + route.Adapter)
}
for {
conn, err := transport.Dial(route.Address, route.Options)
if err == nil {
return &LogstashAdapter{
route: route,
conn: conn,
containerTags: make(map[string][]string),
logstashFields: make(map[string]map[string]string),
decodeJsonLogs: make(map[string]bool),
}, nil
}
if os.Getenv("RETRY_STARTUP") == "" {
return nil, err
}
log.Println("Retrying:", err)
time.Sleep(2 * time.Second)
}
}
// Get container tags configured with the environment variable LOGSTASH_TAGS
func GetContainerTags(c *docker.Container, a *LogstashAdapter) []string {
if tags, ok := a.containerTags[c.ID]; ok {
return tags
}
tags := []string{}
tagsStr := os.Getenv("LOGSTASH_TAGS")
for _, e := range c.Config.Env {
if strings.HasPrefix(e, "LOGSTASH_TAGS=") {
tagsStr = strings.TrimPrefix(e, "LOGSTASH_TAGS=")
break
}
}
if len(tagsStr) > 0 {
tags = strings.Split(tagsStr, ",")
}
a.containerTags[c.ID] = tags
return tags
}
// Get logstash fields configured with the environment variable LOGSTASH_FIELDS
func GetLogstashFields(c *docker.Container, a *LogstashAdapter) map[string]string {
if fields, ok := a.logstashFields[c.ID]; ok {
return fields
}
fieldsStr := os.Getenv("LOGSTASH_FIELDS")
fields := map[string]string{}
for _, e := range c.Config.Env {
if strings.HasPrefix(e, "LOGSTASH_FIELDS=") {
fieldsStr = strings.TrimPrefix(e, "LOGSTASH_FIELDS=")
}
}
if len(fieldsStr) > 0 {
for _, f := range strings.Split(fieldsStr, ",") {
sp := strings.Split(f, "=")
k, v := sp[0], sp[1]
fields[k] = v
}
}
a.logstashFields[c.ID] = fields
return fields
}
// Get boolean indicating whether json logs should be decoded (or added as message),
// configured with the environment variable DECODE_JSON_LOGS
func IsDecodeJsonLogs(c *docker.Container, a *LogstashAdapter) bool {
if decodeJsonLogs, ok := a.decodeJsonLogs[c.ID]; ok {
return decodeJsonLogs
}
decodeJsonLogsStr := os.Getenv("DECODE_JSON_LOGS")
for _, e := range c.Config.Env {
if strings.HasPrefix(e, "DECODE_JSON_LOGS=") {
decodeJsonLogsStr = strings.TrimPrefix(e, "DECODE_JSON_LOGS=")
}
}
decodeJsonLogs := decodeJsonLogsStr != "false"
a.decodeJsonLogs[c.ID] = decodeJsonLogs
return decodeJsonLogs
}
// Stream implements the router.LogAdapter interface.
func (a *LogstashAdapter) Stream(logstream chan *router.Message) {
for m := range logstream {
dockerInfo := DockerInfo{
Name: m.Container.Name,
ID: m.Container.ID,
Image: m.Container.Config.Image,
Hostname: m.Container.Config.Hostname,
Service: m.Container.Config.Labels.Description,
}
if os.Getenv("DOCKER_LABELS") != "" {
dockerInfo.Labels = make(map[string]string)
for label, value := range m.Container.Config.Labels {
dockerInfo.Labels[strings.Replace(label, ".", "_", -1)] = value
}
}
tags := GetContainerTags(m.Container, a)
fields := GetLogstashFields(m.Container, a)
var js []byte
var data map[string]interface{}
var err error
// Try to parse JSON-encoded m.Data. If it wasn't JSON, create an empty object
// and use the original data as the message.
if IsDecodeJsonLogs(m.Container, a) {
err = json.Unmarshal([]byte(m.Data), &data)
}
if err != nil || data == nil {
data = make(map[string]interface{})
data["message"] = m.Data
}
for k, v := range fields {
data[k] = v
}
data["docker"] = dockerInfo
data["stream"] = m.Source
data["tags"] = tags
// Return the JSON encoding
if js, err = json.Marshal(data); err != nil {
// Log error message and continue parsing next line, if marshalling fails
log.Println("logstash: could not marshal JSON:", err)
continue
}
// To work with tls and tcp transports via json_lines codec
js = append(js, byte('\n'))
for {
_, err := a.conn.Write(js)
if err == nil {
break
}
if os.Getenv("RETRY_SEND") == "" {
log.Fatal("logstash: could not write:", err)
} else {
time.Sleep(2 * time.Second)
}
}
}
}
type DockerInfo struct {
Name string `json:"name"`
ID string `json:"id"`
Image string `json:"image"`
Hostname string `json:"hostname"`
Labels map[string]string `json:"labels"`
Service string `json:"com.docker.compose.service"`
}
|
[
"\"RETRY_STARTUP\"",
"\"LOGSTASH_TAGS\"",
"\"LOGSTASH_FIELDS\"",
"\"DECODE_JSON_LOGS\"",
"\"DOCKER_LABELS\"",
"\"RETRY_SEND\""
] |
[] |
[
"DECODE_JSON_LOGS",
"DOCKER_LABELS",
"LOGSTASH_TAGS",
"RETRY_STARTUP",
"LOGSTASH_FIELDS",
"RETRY_SEND"
] |
[]
|
["DECODE_JSON_LOGS", "DOCKER_LABELS", "LOGSTASH_TAGS", "RETRY_STARTUP", "LOGSTASH_FIELDS", "RETRY_SEND"]
|
go
| 6 | 0 | |
backend/cabrenter/main.py
|
# File containing the API endpoints.
import os
import dotenv
import uvicorn
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from cabrenter.models.request_models import CabConfiguration
from cabrenter.repositories.cab_finder_repo import MostSuitableCabCosmos
from cabrenter.repositories.driver_workspace_repo import DriverWorkspaceCosmos
from cabrenter.use_cases.driver_workspace import DriverWorkspace
from cabrenter.use_cases.find_optimal_cab import FindMostSuitableCabs
dotenv.load_dotenv()
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app = FastAPI()
@app.get("/")
def welcome():
return "Welcome To The Clean Architecture Workshop"
#--------------------#
# Use Case 1 #
#--------------------#
@app.get("/api/cabs")
def get_cabs_for_user(city: str, max_price: int):
repo = MostSuitableCabCosmos(
connection_string=os.getenv("CONNECTION_STRING")
)
response = FindMostSuitableCabs(repo=repo).get(
city=city,
max_price=max_price
)
return response
#--------------------#
# Use Case 2 #
#--------------------#
@app.post("/api/driver/{driver_id}/cabs")
def create_cab(driver_id: int, cab: CabConfiguration):
CosmosRepo = DriverWorkspaceCosmos(
connection_string=os.getenv("CONNECTION_STRING")
)
usecase = DriverWorkspace(repo=CosmosRepo)
response = usecase.add_cab(
driver_id=driver_id,
city=cab.city,
brand=cab.brand,
hourly_price=cab.hourly_price
)
return response
@app.get("/api/driver/{driver_id}/cabs")
def get_cabs(driver_id: int):
CosmosRepo = DriverWorkspaceCosmos(
connection_string=os.getenv("CONNECTION_STRING")
)
usecase = DriverWorkspace(repo=CosmosRepo)
response = usecase.get_cabs(
driver_id=driver_id,
)
return response
@app.get("/api/driver/{driver_id}/cabs/{cab_id}")
def get_cab(driver_id: int, cab_id: str):
CosmosRepo = DriverWorkspaceCosmos(
connection_string=os.getenv("CONNECTION_STRING")
)
usecase = DriverWorkspace(repo=CosmosRepo)
response = usecase.get_cab(
driver_id, cab_id
)
return response
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)
|
[] |
[] |
[
"CONNECTION_STRING"
] |
[]
|
["CONNECTION_STRING"]
|
python
| 1 | 0 | |
cmd/buildkitd/main.go
|
package main
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"net"
"os"
"os/user"
"path/filepath"
"sort"
"strconv"
"strings"
"time"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/pkg/seed"
"github.com/containerd/containerd/pkg/userns"
"github.com/containerd/containerd/platforms"
"github.com/containerd/containerd/remotes/docker"
"github.com/containerd/containerd/sys"
sddaemon "github.com/coreos/go-systemd/v22/daemon"
"github.com/docker/docker/pkg/reexec"
"github.com/docker/go-connections/sockets"
"github.com/gofrs/flock"
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
"github.com/moby/buildkit/cache/remotecache"
"github.com/moby/buildkit/cache/remotecache/gha"
inlineremotecache "github.com/moby/buildkit/cache/remotecache/inline"
localremotecache "github.com/moby/buildkit/cache/remotecache/local"
registryremotecache "github.com/moby/buildkit/cache/remotecache/registry"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/cmd/buildkitd/config"
"github.com/moby/buildkit/control"
"github.com/moby/buildkit/executor/oci"
"github.com/moby/buildkit/frontend"
dockerfile "github.com/moby/buildkit/frontend/dockerfile/builder"
"github.com/moby/buildkit/frontend/gateway"
"github.com/moby/buildkit/frontend/gateway/forwarder"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/solver/bboltcachestorage"
"github.com/moby/buildkit/util/apicaps"
"github.com/moby/buildkit/util/appcontext"
"github.com/moby/buildkit/util/appdefaults"
"github.com/moby/buildkit/util/archutil"
"github.com/moby/buildkit/util/bklog"
"github.com/moby/buildkit/util/grpcerrors"
"github.com/moby/buildkit/util/profiler"
"github.com/moby/buildkit/util/resolver"
"github.com/moby/buildkit/util/stack"
"github.com/moby/buildkit/util/tracing/detect"
_ "github.com/moby/buildkit/util/tracing/detect/jaeger"
_ "github.com/moby/buildkit/util/tracing/env"
"github.com/moby/buildkit/util/tracing/transform"
"github.com/moby/buildkit/version"
"github.com/moby/buildkit/worker"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"go.opentelemetry.io/otel/propagation"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
"go.opentelemetry.io/otel/trace"
tracev1 "go.opentelemetry.io/proto/otlp/collector/trace/v1"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc"
)
func init() {
apicaps.ExportedProduct = "buildkit"
stack.SetVersionInfo(version.Version, version.Revision)
seed.WithTimeAndRand()
reexec.Init()
// overwrites containerd/log.G
log.G = bklog.GetLogger
}
var propagators = propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{})
type workerInitializerOpt struct {
config *config.Config
sessionManager *session.Manager
traceSocket string
}
type workerInitializer struct {
fn func(c *cli.Context, common workerInitializerOpt) ([]worker.Worker, error)
// less priority number, more preferred
priority int
}
var (
appFlags []cli.Flag
workerInitializers []workerInitializer
)
func registerWorkerInitializer(wi workerInitializer, flags ...cli.Flag) {
workerInitializers = append(workerInitializers, wi)
sort.Slice(workerInitializers,
func(i, j int) bool {
return workerInitializers[i].priority < workerInitializers[j].priority
})
appFlags = append(appFlags, flags...)
}
func main() {
cli.VersionPrinter = func(c *cli.Context) {
fmt.Println(c.App.Name, version.Package, c.App.Version, version.Revision)
}
app := cli.NewApp()
app.Name = "buildkitd"
app.Usage = "build daemon"
app.Version = version.Version
defaultConf, err := defaultConf()
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
os.Exit(1)
}
rootlessUsage := "set all the default options to be compatible with rootless containers"
if userns.RunningInUserNS() {
app.Flags = append(app.Flags, cli.BoolTFlag{
Name: "rootless",
Usage: rootlessUsage + " (default: true)",
})
} else {
app.Flags = append(app.Flags, cli.BoolFlag{
Name: "rootless",
Usage: rootlessUsage,
})
}
groupValue := func(gid *int) string {
if gid == nil {
return ""
}
return strconv.Itoa(*gid)
}
app.Flags = append(app.Flags,
cli.StringFlag{
Name: "config",
Usage: "path to config file",
Value: defaultConfigPath(),
},
cli.BoolFlag{
Name: "debug",
Usage: "enable debug output in logs",
},
cli.StringFlag{
Name: "root",
Usage: "path to state directory",
Value: defaultConf.Root,
},
cli.StringSliceFlag{
Name: "addr",
Usage: "listening address (socket or tcp)",
Value: &cli.StringSlice{defaultConf.GRPC.Address[0]},
},
cli.StringFlag{
Name: "group",
Usage: "group (name or gid) which will own all Unix socket listening addresses",
Value: groupValue(defaultConf.GRPC.GID),
},
cli.StringFlag{
Name: "debugaddr",
Usage: "debugging address (eg. 0.0.0.0:6060)",
Value: defaultConf.GRPC.DebugAddress,
},
cli.StringFlag{
Name: "tlscert",
Usage: "certificate file to use",
Value: defaultConf.GRPC.TLS.Cert,
},
cli.StringFlag{
Name: "tlskey",
Usage: "key file to use",
Value: defaultConf.GRPC.TLS.Key,
},
cli.StringFlag{
Name: "tlscacert",
Usage: "ca certificate to verify clients",
Value: defaultConf.GRPC.TLS.CA,
},
cli.StringSliceFlag{
Name: "allow-insecure-entitlement",
Usage: "allows insecure entitlements e.g. network.host, security.insecure",
},
)
app.Flags = append(app.Flags, appFlags...)
app.Action = func(c *cli.Context) error {
// TODO: On Windows this always returns -1. The actual "are you admin" check is very Windows-specific.
// See https://github.com/golang/go/issues/28804#issuecomment-505326268 for the "short" version.
if os.Geteuid() > 0 {
return errors.New("rootless mode requires to be executed as the mapped root in a user namespace; you may use RootlessKit for setting up the namespace")
}
ctx, cancel := context.WithCancel(appcontext.Context())
defer cancel()
cfg, err := LoadFile(c.GlobalString("config"))
if err != nil {
return err
}
setDefaultConfig(&cfg)
if err := applyMainFlags(c, &cfg); err != nil {
return err
}
logrus.SetFormatter(&logrus.TextFormatter{FullTimestamp: true})
if cfg.Debug {
logrus.SetLevel(logrus.DebugLevel)
}
if cfg.GRPC.DebugAddress != "" {
if err := setupDebugHandlers(cfg.GRPC.DebugAddress); err != nil {
return err
}
}
tp, err := detect.TracerProvider()
if err != nil {
return err
}
streamTracer := otelgrpc.StreamServerInterceptor(otelgrpc.WithTracerProvider(tp), otelgrpc.WithPropagators(propagators))
unary := grpc_middleware.ChainUnaryServer(unaryInterceptor(ctx, tp), grpcerrors.UnaryServerInterceptor)
stream := grpc_middleware.ChainStreamServer(streamTracer, grpcerrors.StreamServerInterceptor)
opts := []grpc.ServerOption{grpc.UnaryInterceptor(unary), grpc.StreamInterceptor(stream)}
server := grpc.NewServer(opts...)
// relative path does not work with nightlyone/lockfile
root, err := filepath.Abs(cfg.Root)
if err != nil {
return err
}
cfg.Root = root
if err := os.MkdirAll(root, 0700); err != nil {
return errors.Wrapf(err, "failed to create %s", root)
}
lockPath := filepath.Join(root, "buildkitd.lock")
lock := flock.New(lockPath)
locked, err := lock.TryLock()
if err != nil {
return errors.Wrapf(err, "could not lock %s", lockPath)
}
if !locked {
return errors.Errorf("could not lock %s, another instance running?", lockPath)
}
defer func() {
lock.Unlock()
os.RemoveAll(lockPath)
}()
controller, err := newController(c, &cfg)
if err != nil {
return err
}
controller.Register(server)
ents := c.GlobalStringSlice("allow-insecure-entitlement")
if len(ents) > 0 {
cfg.Entitlements = []string{}
for _, e := range ents {
switch e {
case "security.insecure":
cfg.Entitlements = append(cfg.Entitlements, e)
case "network.host":
cfg.Entitlements = append(cfg.Entitlements, e)
default:
return fmt.Errorf("invalid entitlement : %v", e)
}
}
}
errCh := make(chan error, 1)
if err := serveGRPC(cfg.GRPC, server, errCh); err != nil {
return err
}
select {
case serverErr := <-errCh:
err = serverErr
cancel()
case <-ctx.Done():
err = ctx.Err()
}
bklog.G(ctx).Infof("stopping server")
if os.Getenv("NOTIFY_SOCKET") != "" {
notified, notifyErr := sddaemon.SdNotify(false, sddaemon.SdNotifyStopping)
bklog.G(ctx).Debugf("SdNotifyStopping notified=%v, err=%v", notified, notifyErr)
}
server.GracefulStop()
return err
}
app.After = func(_ *cli.Context) error {
return detect.Shutdown(context.TODO())
}
profiler.Attach(app)
if err := app.Run(os.Args); err != nil {
fmt.Fprintf(os.Stderr, "buildkitd: %+v\n", err)
os.Exit(1)
}
}
func serveGRPC(cfg config.GRPCConfig, server *grpc.Server, errCh chan error) error {
addrs := cfg.Address
if len(addrs) == 0 {
return errors.New("--addr cannot be empty")
}
tlsConfig, err := serverCredentials(cfg.TLS)
if err != nil {
return err
}
eg, _ := errgroup.WithContext(context.Background())
listeners := make([]net.Listener, 0, len(addrs))
for _, addr := range addrs {
l, err := getListener(addr, *cfg.UID, *cfg.GID, tlsConfig)
if err != nil {
for _, l := range listeners {
l.Close()
}
return err
}
listeners = append(listeners, l)
}
if os.Getenv("NOTIFY_SOCKET") != "" {
notified, notifyErr := sddaemon.SdNotify(false, sddaemon.SdNotifyReady)
logrus.Debugf("SdNotifyReady notified=%v, err=%v", notified, notifyErr)
}
for _, l := range listeners {
func(l net.Listener) {
eg.Go(func() error {
defer l.Close()
logrus.Infof("running server on %s", l.Addr())
return server.Serve(l)
})
}(l)
}
go func() {
errCh <- eg.Wait()
}()
return nil
}
func defaultConfigPath() string {
if userns.RunningInUserNS() {
return filepath.Join(appdefaults.UserConfigDir(), "buildkitd.toml")
}
return filepath.Join(appdefaults.ConfigDir, "buildkitd.toml")
}
func defaultConf() (config.Config, error) {
cfg, err := LoadFile(defaultConfigPath())
if err != nil {
var pe *os.PathError
if !errors.As(err, &pe) {
return config.Config{}, err
}
return cfg, nil
}
setDefaultConfig(&cfg)
return cfg, nil
}
func setDefaultNetworkConfig(nc config.NetworkConfig) config.NetworkConfig {
if nc.Mode == "" {
nc.Mode = "auto"
}
if nc.CNIConfigPath == "" {
nc.CNIConfigPath = "/etc/buildkit/cni.json"
}
if nc.CNIBinaryPath == "" {
nc.CNIBinaryPath = "/opt/cni/bin"
}
return nc
}
func setDefaultConfig(cfg *config.Config) {
orig := *cfg
if cfg.Root == "" {
cfg.Root = appdefaults.Root
}
if len(cfg.GRPC.Address) == 0 {
cfg.GRPC.Address = []string{appdefaults.Address}
}
if cfg.Workers.OCI.Platforms == nil {
cfg.Workers.OCI.Platforms = archutil.SupportedPlatforms(false)
}
if cfg.Workers.Containerd.Platforms == nil {
cfg.Workers.Containerd.Platforms = archutil.SupportedPlatforms(false)
}
cfg.Workers.OCI.NetworkConfig = setDefaultNetworkConfig(cfg.Workers.OCI.NetworkConfig)
cfg.Workers.Containerd.NetworkConfig = setDefaultNetworkConfig(cfg.Workers.Containerd.NetworkConfig)
if userns.RunningInUserNS() {
// if buildkitd is being executed as the mapped-root (not only EUID==0 but also $USER==root)
// in a user namespace, we need to enable the rootless mode but
// we don't want to honor $HOME for setting up default paths.
if u := os.Getenv("USER"); u != "" && u != "root" {
if orig.Root == "" {
cfg.Root = appdefaults.UserRoot()
}
if len(orig.GRPC.Address) == 0 {
cfg.GRPC.Address = []string{appdefaults.UserAddress()}
}
appdefaults.EnsureUserAddressDir()
}
}
}
func applyMainFlags(c *cli.Context, cfg *config.Config) error {
if c.IsSet("debug") {
cfg.Debug = c.Bool("debug")
}
if c.IsSet("root") {
cfg.Root = c.String("root")
}
if c.IsSet("addr") || len(cfg.GRPC.Address) == 0 {
addrs := c.StringSlice("addr")
if len(addrs) > 1 {
addrs = addrs[1:] // https://github.com/urfave/cli/issues/160
}
cfg.GRPC.Address = make([]string, 0, len(addrs))
for _, v := range addrs {
cfg.GRPC.Address = append(cfg.GRPC.Address, v)
}
}
if c.IsSet("allow-insecure-entitlement") {
// override values from config
cfg.Entitlements = c.StringSlice("allow-insecure-entitlement")
}
if c.IsSet("debugaddr") {
cfg.GRPC.DebugAddress = c.String("debugaddr")
}
if cfg.GRPC.UID == nil {
uid := os.Getuid()
cfg.GRPC.UID = &uid
}
if cfg.GRPC.GID == nil {
gid := os.Getgid()
cfg.GRPC.GID = &gid
}
if group := c.String("group"); group != "" {
gid, err := groupToGid(group)
if err != nil {
return err
}
cfg.GRPC.GID = &gid
}
if tlscert := c.String("tlscert"); tlscert != "" {
cfg.GRPC.TLS.Cert = tlscert
}
if tlskey := c.String("tlskey"); tlskey != "" {
cfg.GRPC.TLS.Key = tlskey
}
if tlsca := c.String("tlscacert"); tlsca != "" {
cfg.GRPC.TLS.CA = tlsca
}
return nil
}
// Convert a string containing either a group name or a stringified gid into a numeric id)
func groupToGid(group string) (int, error) {
if group == "" {
return os.Getgid(), nil
}
var (
err error
id int
)
// Try and parse as a number, if the error is ErrSyntax
// (i.e. its not a number) then we carry on and try it as a
// name.
if id, err = strconv.Atoi(group); err == nil {
return id, nil
} else if err.(*strconv.NumError).Err != strconv.ErrSyntax {
return 0, err
}
ginfo, err := user.LookupGroup(group)
if err != nil {
return 0, err
}
group = ginfo.Gid
if id, err = strconv.Atoi(group); err != nil {
return 0, err
}
return id, nil
}
func getListener(addr string, uid, gid int, tlsConfig *tls.Config) (net.Listener, error) {
addrSlice := strings.SplitN(addr, "://", 2)
if len(addrSlice) < 2 {
return nil, errors.Errorf("address %s does not contain proto, you meant unix://%s ?",
addr, addr)
}
proto := addrSlice[0]
listenAddr := addrSlice[1]
switch proto {
case "unix", "npipe":
if tlsConfig != nil {
logrus.Warnf("TLS is disabled for %s", addr)
}
return sys.GetLocalListener(listenAddr, uid, gid)
case "fd":
return listenFD(listenAddr, tlsConfig)
case "tcp":
if tlsConfig == nil {
logrus.Warnf("TLS is not enabled for %s. enabling mutual TLS authentication is highly recommended", addr)
}
return sockets.NewTCPSocket(listenAddr, tlsConfig)
default:
return nil, errors.Errorf("addr %s not supported", addr)
}
}
func unaryInterceptor(globalCtx context.Context, tp trace.TracerProvider) grpc.UnaryServerInterceptor {
withTrace := otelgrpc.UnaryServerInterceptor(otelgrpc.WithTracerProvider(tp), otelgrpc.WithPropagators(propagators))
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go func() {
select {
case <-ctx.Done():
case <-globalCtx.Done():
cancel()
}
}()
if strings.HasSuffix(info.FullMethod, "opentelemetry.proto.collector.trace.v1.TraceService/Export") {
return handler(ctx, req)
}
resp, err = withTrace(ctx, req, info, handler)
if err != nil {
logrus.Errorf("%s returned error: %+v", info.FullMethod, stack.Formatter(err))
}
return
}
}
func serverCredentials(cfg config.TLSConfig) (*tls.Config, error) {
certFile := cfg.Cert
keyFile := cfg.Key
caFile := cfg.CA
if certFile == "" && keyFile == "" {
return nil, nil
}
err := errors.New("you must specify key and cert file if one is specified")
if certFile == "" {
return nil, err
}
if keyFile == "" {
return nil, err
}
certificate, err := tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
return nil, errors.Wrap(err, "could not load server key pair")
}
tlsConf := &tls.Config{
Certificates: []tls.Certificate{certificate},
}
if caFile != "" {
certPool := x509.NewCertPool()
ca, err := ioutil.ReadFile(caFile)
if err != nil {
return nil, errors.Wrap(err, "could not read ca certificate")
}
// Append the client certificates from the CA
if ok := certPool.AppendCertsFromPEM(ca); !ok {
return nil, errors.New("failed to append ca cert")
}
tlsConf.ClientAuth = tls.RequireAndVerifyClientCert
tlsConf.ClientCAs = certPool
}
return tlsConf, nil
}
func newController(c *cli.Context, cfg *config.Config) (*control.Controller, error) {
sessionManager, err := session.NewManager()
if err != nil {
return nil, err
}
tc, err := detect.Exporter()
if err != nil {
return nil, err
}
var traceSocket string
if tc != nil {
traceSocket = filepath.Join(cfg.Root, "otel-grpc.sock")
if err := runTraceController(traceSocket, tc); err != nil {
return nil, err
}
}
wc, err := newWorkerController(c, workerInitializerOpt{
config: cfg,
sessionManager: sessionManager,
traceSocket: traceSocket,
})
if err != nil {
return nil, err
}
frontends := map[string]frontend.Frontend{}
frontends["dockerfile.v0"] = forwarder.NewGatewayForwarder(wc, dockerfile.Build)
frontends["gateway.v0"] = gateway.NewGatewayFrontend(wc)
cacheStorage, err := bboltcachestorage.NewStore(filepath.Join(cfg.Root, "cache.db"))
if err != nil {
return nil, err
}
resolverFn := resolverFunc(cfg)
w, err := wc.GetDefault()
if err != nil {
return nil, err
}
remoteCacheExporterFuncs := map[string]remotecache.ResolveCacheExporterFunc{
"registry": registryremotecache.ResolveCacheExporterFunc(sessionManager, resolverFn),
"local": localremotecache.ResolveCacheExporterFunc(sessionManager),
"inline": inlineremotecache.ResolveCacheExporterFunc(),
"gha": gha.ResolveCacheExporterFunc(),
}
remoteCacheImporterFuncs := map[string]remotecache.ResolveCacheImporterFunc{
"registry": registryremotecache.ResolveCacheImporterFunc(sessionManager, w.ContentStore(), resolverFn),
"local": localremotecache.ResolveCacheImporterFunc(sessionManager),
"gha": gha.ResolveCacheImporterFunc(),
}
return control.NewController(control.Opt{
SessionManager: sessionManager,
WorkerController: wc,
Frontends: frontends,
ResolveCacheExporterFuncs: remoteCacheExporterFuncs,
ResolveCacheImporterFuncs: remoteCacheImporterFuncs,
CacheKeyStorage: cacheStorage,
Entitlements: cfg.Entitlements,
TraceCollector: tc,
})
}
func resolverFunc(cfg *config.Config) docker.RegistryHosts {
return resolver.NewRegistryConfig(cfg.Registries)
}
func newWorkerController(c *cli.Context, wiOpt workerInitializerOpt) (*worker.Controller, error) {
wc := &worker.Controller{}
nWorkers := 0
for _, wi := range workerInitializers {
ws, err := wi.fn(c, wiOpt)
if err != nil {
return nil, err
}
for _, w := range ws {
p := formatPlatforms(w.Platforms(false))
logrus.Infof("found worker %q, labels=%v, platforms=%v", w.ID(), w.Labels(), p)
archutil.WarnIfUnsupported(p)
if err = wc.Add(w); err != nil {
return nil, err
}
nWorkers++
}
}
if nWorkers == 0 {
return nil, errors.New("no worker found, rebuild the buildkit daemon?")
}
defaultWorker, err := wc.GetDefault()
if err != nil {
return nil, err
}
logrus.Infof("found %d workers, default=%q", nWorkers, defaultWorker.ID())
logrus.Warn("currently, only the default worker can be used.")
return wc, nil
}
func attrMap(sl []string) (map[string]string, error) {
m := map[string]string{}
for _, v := range sl {
parts := strings.SplitN(v, "=", 2)
if len(parts) != 2 {
return nil, errors.Errorf("invalid value %s", v)
}
m[parts[0]] = parts[1]
}
return m, nil
}
func formatPlatforms(p []ocispecs.Platform) []string {
str := make([]string, 0, len(p))
for _, pp := range p {
str = append(str, platforms.Format(platforms.Normalize(pp)))
}
return str
}
func parsePlatforms(platformsStr []string) ([]ocispecs.Platform, error) {
out := make([]ocispecs.Platform, 0, len(platformsStr))
for _, s := range platformsStr {
p, err := platforms.Parse(s)
if err != nil {
return nil, err
}
out = append(out, platforms.Normalize(p))
}
return out, nil
}
func getGCPolicy(cfg config.GCConfig, root string) []client.PruneInfo {
if cfg.GC != nil && !*cfg.GC {
return nil
}
if len(cfg.GCPolicy) == 0 {
cfg.GCPolicy = config.DefaultGCPolicy(root, cfg.GCKeepStorage)
}
out := make([]client.PruneInfo, 0, len(cfg.GCPolicy))
for _, rule := range cfg.GCPolicy {
out = append(out, client.PruneInfo{
Filter: rule.Filters,
All: rule.All,
KeepBytes: rule.KeepBytes,
KeepDuration: time.Duration(rule.KeepDuration) * time.Second,
})
}
return out
}
func getDNSConfig(cfg *config.DNSConfig) *oci.DNSConfig {
var dns *oci.DNSConfig
if cfg != nil {
dns = &oci.DNSConfig{
Nameservers: cfg.Nameservers,
Options: cfg.Options,
SearchDomains: cfg.SearchDomains,
}
}
return dns
}
func runTraceController(p string, exp sdktrace.SpanExporter) error {
server := grpc.NewServer()
tracev1.RegisterTraceServiceServer(server, &traceCollector{exporter: exp})
uid := os.Getuid()
l, err := sys.GetLocalListener(p, uid, uid)
if err != nil {
return err
}
if err := os.Chmod(p, 0666); err != nil {
l.Close()
return err
}
go server.Serve(l)
return nil
}
type traceCollector struct {
*tracev1.UnimplementedTraceServiceServer
exporter sdktrace.SpanExporter
}
func (t *traceCollector) Export(ctx context.Context, req *tracev1.ExportTraceServiceRequest) (*tracev1.ExportTraceServiceResponse, error) {
err := t.exporter.ExportSpans(ctx, transform.Spans(req.GetResourceSpans()))
if err != nil {
return nil, err
}
return &tracev1.ExportTraceServiceResponse{}, nil
}
|
[
"\"NOTIFY_SOCKET\"",
"\"NOTIFY_SOCKET\"",
"\"USER\""
] |
[] |
[
"USER",
"NOTIFY_SOCKET"
] |
[]
|
["USER", "NOTIFY_SOCKET"]
|
go
| 2 | 0 | |
consumer/main.go
|
package main
import (
"io/ioutil"
"log"
"net/http"
"os"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/sqs"
"github.com/go-chi/chi"
"github.com/go-chi/chi/middleware"
"github.com/opentracing/opentracing-go"
zipkinot "github.com/openzipkin-contrib/zipkin-go-opentracing"
"github.com/openzipkin/zipkin-go"
zipkinhttp "github.com/openzipkin/zipkin-go/reporter/http"
)
func setupGlobalTracer() {
// zipkin / opentracing specific stuff
// set up a span reporter
var reporterOpts []zipkinhttp.ReporterOption
reporterOpts = append(reporterOpts, zipkinhttp.Logger(log.New(ioutil.Discard, "", log.LstdFlags)))
reporter := zipkinhttp.NewReporter("http://zipkin:9411/api/v2/spans", reporterOpts...)
// create our local service endpoint
endpoint, err := zipkin.NewEndpoint("consumer", "localhost:3001")
if err != nil {
log.Fatalf("unable to create local endpoint: %+v\n", err)
}
// initialize our tracer
nativeTracer, err := zipkin.NewTracer(reporter, zipkin.WithLocalEndpoint(endpoint))
if err != nil {
log.Fatalf("unable to create tracer: %+v\n", err)
}
// use zipkin-go-opentracing to wrap our tracer
tracer := zipkinot.Wrap(nativeTracer)
// optionally set as Global OpenTracing tracer instance
log.Println(tracer)
opentracing.SetGlobalTracer(tracer)
}
func main() {
setupGlobalTracer()
r := chi.NewRouter()
svc := sqs.New(session.New(), &aws.Config{
Endpoint: aws.String(os.Getenv("SQS_SERVER")),
Region: aws.String("us-east-1"),
Credentials: credentials.NewStaticCredentials(
"id",
"secret",
"token",
)})
r.Use(middleware.Logger)
go func() {
for {
queueURL := os.Getenv("SQS_SERVER") + "/queue/" + os.Getenv("QUEUE_URL")
result, err := svc.ReceiveMessage(&sqs.ReceiveMessageInput{
AttributeNames: []*string{
aws.String(sqs.MessageSystemAttributeNameSentTimestamp),
},
MessageAttributeNames: []*string{
aws.String(sqs.QueueAttributeNameAll),
},
QueueUrl: aws.String(queueURL),
VisibilityTimeout: aws.Int64(20),
WaitTimeSeconds: aws.Int64(10),
})
if err != nil {
log.Println("Fail to receive message: ", err)
} else if len(result.Messages) > 0 {
for _, m := range result.Messages {
span := opentracing.StartSpan("Message received")
log.Println("Message received: ", m)
_, err := svc.DeleteMessage(&sqs.DeleteMessageInput{
QueueUrl: aws.String(queueURL),
ReceiptHandle: m.ReceiptHandle,
})
if err != nil {
log.Println(err.Error())
}
span.Finish()
}
}
time.Sleep(time.Second)
}
}()
r.Get("/send", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("consumer working"))
})
log.Println("Consumer running...")
http.ListenAndServe(":3001", r)
}
|
[
"\"SQS_SERVER\"",
"\"SQS_SERVER\"",
"\"QUEUE_URL\""
] |
[] |
[
"QUEUE_URL",
"SQS_SERVER"
] |
[]
|
["QUEUE_URL", "SQS_SERVER"]
|
go
| 2 | 0 | |
knative-operator/pkg/controller/knativekafka/knativekafka_controller.go
|
package knativekafka
import (
"context"
"fmt"
"os"
mfc "github.com/manifestival/controller-runtime-client"
mf "github.com/manifestival/manifestival"
operatorv1alpha1 "github.com/openshift-knative/serverless-operator/knative-operator/pkg/apis/operator/v1alpha1"
"github.com/openshift-knative/serverless-operator/knative-operator/pkg/common"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
const (
// DO NOT change to something else in the future!
// This needs to remain "knative-kafka-openshift" to be compatible with earlier versions in the future versions.
finalizerName = "knative-kafka-openshift"
)
var (
log = logf.Log.WithName("controller_knativekafka")
role = mf.Any(mf.ByKind("ClusterRole"), mf.ByKind("Role"))
rolebinding = mf.Any(mf.ByKind("ClusterRoleBinding"), mf.ByKind("RoleBinding"))
roleOrRoleBinding = mf.Any(role, rolebinding)
)
type stage func(*mf.Manifest, *operatorv1alpha1.KnativeKafka) error
// Add creates a new KnativeKafka Controller and adds it to the Manager. The Manager will set fields on the Controller
// and Start it when the Manager is Started.
func Add(mgr manager.Manager) error {
reconciler, err := newReconciler(mgr)
if err != nil {
return err
}
return add(mgr, reconciler)
}
// newReconciler returns a new reconcile.Reconciler
func newReconciler(mgr manager.Manager) (*ReconcileKnativeKafka, error) {
kafkaChannelManifest, err := mf.ManifestFrom(mf.Path(os.Getenv("KAFKACHANNEL_MANIFEST_PATH")))
if err != nil {
return nil, fmt.Errorf("failed to load KafkaChannel manifest: %w", err)
}
kafkaSourceManifest, err := mf.ManifestFrom(mf.Path(os.Getenv("KAFKASOURCE_MANIFEST_PATH")))
if err != nil {
return nil, fmt.Errorf("failed to load KafkaSource manifest: %w", err)
}
reconcileKnativeKafka := ReconcileKnativeKafka{
client: mgr.GetClient(),
scheme: mgr.GetScheme(),
rawKafkaChannelManifest: kafkaChannelManifest,
rawKafkaSourceManifest: kafkaSourceManifest,
}
return &reconcileKnativeKafka, nil
}
// add adds a new Controller to mgr with r as the reconcile.Reconciler
func add(mgr manager.Manager, r *ReconcileKnativeKafka) error {
// Create a new controller
c, err := controller.New("knativekafka-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return err
}
// Watch for changes to primary resource KnativeKafka
err = c.Watch(&source.Kind{Type: &operatorv1alpha1.KnativeKafka{}}, &handler.EnqueueRequestForObject{})
if err != nil {
return err
}
gvkToResource := common.BuildGVKToResourceMap(r.rawKafkaChannelManifest, r.rawKafkaSourceManifest)
for _, t := range gvkToResource {
err = c.Watch(&source.Kind{Type: t}, common.EnqueueRequestByOwnerAnnotations(common.KafkaOwnerName, common.KafkaOwnerNamespace))
if err != nil {
return err
}
}
return nil
}
// blank assignment to verify that ReconcileKnativeKafka implements reconcile.Reconciler
var _ reconcile.Reconciler = &ReconcileKnativeKafka{}
// ReconcileKnativeKafka reconciles a KnativeKafka object
type ReconcileKnativeKafka struct {
// This client, initialized using mgr.Client() above, is a split client
// that reads objects from the cache and writes to the apiserver
client client.Client
scheme *runtime.Scheme
rawKafkaChannelManifest mf.Manifest
rawKafkaSourceManifest mf.Manifest
}
// Reconcile reads that state of the cluster for a KnativeKafka object and makes changes based on the state read
// and what is in the KnativeKafka.Spec
func (r *ReconcileKnativeKafka) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
reqLogger.Info("Reconciling KnativeKafka")
// Fetch the KnativeKafka instance
original := &operatorv1alpha1.KnativeKafka{}
err := r.client.Get(context.TODO(), request.NamespacedName, original)
if err != nil {
if errors.IsNotFound(err) {
// Request object not found, could have been deleted after reconcile request.
// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
// Return and don't requeue
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
return reconcile.Result{}, err
}
// check for deletion
if original.GetDeletionTimestamp() != nil {
return reconcile.Result{}, r.delete(original)
}
instance := original.DeepCopy()
reconcileErr := r.reconcileKnativeKafka(instance)
if !equality.Semantic.DeepEqual(original.Status, instance.Status) {
if err := r.client.Status().Update(context.TODO(), instance); err != nil {
return reconcile.Result{}, fmt.Errorf("failed to update status: %w", err)
}
}
common.KnativeKafkaUpG = common.KnativeUp.WithLabelValues("kafka_status")
if instance.Status.IsReady() {
common.KnativeKafkaUpG.Set(1)
} else {
common.KnativeKafkaUpG.Set(0)
}
return reconcile.Result{}, reconcileErr
}
func (r *ReconcileKnativeKafka) reconcileKnativeKafka(instance *operatorv1alpha1.KnativeKafka) error {
instance.Status.InitializeConditions()
// install the components that are enabled
if err := r.executeInstallStages(instance); err != nil {
return err
}
// delete the components that are disabled
if err := r.executeDeleteStages(instance); err != nil {
return err
}
return nil
}
func (r *ReconcileKnativeKafka) executeInstallStages(instance *operatorv1alpha1.KnativeKafka) error {
manifest, err := r.buildManifest(instance, manifestBuildEnabledOnly)
if err != nil {
return fmt.Errorf("failed to load and build manifest: %w", err)
}
stages := []stage{
r.ensureFinalizers,
r.transform,
r.apply,
r.checkDeployments,
}
return executeStages(instance, manifest, stages)
}
func (r *ReconcileKnativeKafka) executeDeleteStages(instance *operatorv1alpha1.KnativeKafka) error {
manifest, err := r.buildManifest(instance, manifestBuildDisabledOnly)
if err != nil {
return fmt.Errorf("failed to load and build manifest: %w", err)
}
stages := []stage{
r.transform,
r.deleteResources,
}
return executeStages(instance, manifest, stages)
}
// set a finalizer to clean up cluster-scoped resources and resources from other namespaces
func (r *ReconcileKnativeKafka) ensureFinalizers(_ *mf.Manifest, instance *operatorv1alpha1.KnativeKafka) error {
for _, finalizer := range instance.GetFinalizers() {
if finalizer == finalizerName {
return nil
}
}
log.Info("Adding finalizer")
instance.SetFinalizers(append(instance.GetFinalizers(), finalizerName))
return r.client.Update(context.TODO(), instance)
}
func (r *ReconcileKnativeKafka) transform(manifest *mf.Manifest, instance *operatorv1alpha1.KnativeKafka) error {
log.Info("Transforming manifest")
m, err := manifest.Transform(
mf.InjectOwner(instance),
common.SetAnnotations(map[string]string{
common.KafkaOwnerName: instance.Name,
common.KafkaOwnerNamespace: instance.Namespace,
}),
setBootstrapServers(instance.Spec.Channel.BootstrapServers),
setAuthSecret(instance.Spec.Channel.AuthSecretNamespace, instance.Spec.Channel.AuthSecretName),
ImageTransform(common.BuildImageOverrideMapFromEnviron(os.Environ(), "KAFKA_IMAGE_"), log),
)
if err != nil {
return fmt.Errorf("failed to transform manifest: %w", err)
}
*manifest = m
return nil
}
// Install Knative Kafka components
func (r *ReconcileKnativeKafka) apply(manifest *mf.Manifest, instance *operatorv1alpha1.KnativeKafka) error {
log.Info("Installing manifest")
// The Operator needs a higher level of permissions if it 'bind's non-existent roles.
// To avoid this, we strictly order the manifest application as (Cluster)Roles, then
// (Cluster)RoleBindings, then the rest of the manifest.
if err := manifest.Filter(role).Apply(); err != nil {
instance.Status.MarkInstallFailed(err.Error())
return fmt.Errorf("failed to apply (cluster)roles in manifest: %w", err)
}
if err := manifest.Filter(rolebinding).Apply(); err != nil {
instance.Status.MarkInstallFailed(err.Error())
return fmt.Errorf("failed to apply (cluster)rolebindings in manifest: %w", err)
}
if err := manifest.Filter(not(roleOrRoleBinding)).Apply(); err != nil {
instance.Status.MarkInstallFailed(err.Error())
return fmt.Errorf("failed to apply non rbac manifest: %w", err)
}
instance.Status.MarkInstallSucceeded()
return nil
}
func (r *ReconcileKnativeKafka) checkDeployments(manifest *mf.Manifest, instance *operatorv1alpha1.KnativeKafka) error {
log.Info("Checking deployments")
for _, u := range manifest.Filter(mf.ByKind("Deployment")).Resources() {
u := u // To avoid memory aliasing
resource, err := manifest.Client.Get(&u)
if err != nil {
instance.Status.MarkDeploymentsNotReady()
if errors.IsNotFound(err) {
return nil
}
return err
}
deployment := &appsv1.Deployment{}
if err := scheme.Scheme.Convert(resource, deployment, nil); err != nil {
return err
}
if !isDeploymentAvailable(deployment) {
instance.Status.MarkDeploymentsNotReady()
return nil
}
}
instance.Status.MarkDeploymentsAvailable()
return nil
}
// Delete Knative Kafka resources
func (r *ReconcileKnativeKafka) deleteResources(manifest *mf.Manifest, instance *operatorv1alpha1.KnativeKafka) error {
if len(manifest.Resources()) <= 0 {
return nil
}
log.Info("Deleting resources in manifest")
if err := manifest.Filter(mf.NoCRDs, not(roleOrRoleBinding)).Delete(); err != nil {
return fmt.Errorf("failed to remove non-crd/non-rbac resources: %w", err)
}
// Delete Roles last, as they may be useful for human operators to clean up.
if err := manifest.Filter(roleOrRoleBinding).Delete(); err != nil {
return fmt.Errorf("failed to remove rbac: %w", err)
}
return nil
}
func isDeploymentAvailable(d *appsv1.Deployment) bool {
for _, c := range d.Status.Conditions {
if c.Type == appsv1.DeploymentAvailable && c.Status == corev1.ConditionTrue {
return true
}
}
return false
}
// general clean-up. required for the resources that cannot be garbage collected with the owner reference mechanism
func (r *ReconcileKnativeKafka) delete(instance *operatorv1alpha1.KnativeKafka) error {
defer common.KnativeUp.DeleteLabelValues("kafka_status")
finalizers := sets.NewString(instance.GetFinalizers()...)
if !finalizers.Has(finalizerName) {
log.Info("Finalizer has already been removed, nothing to do")
return nil
}
log.Info("Running cleanup logic")
log.Info("Deleting KnativeKafka")
if err := r.deleteKnativeKafka(instance); err != nil {
return fmt.Errorf("failed to delete KnativeKafka: %w", err)
}
// The above might take a while, so we refetch the resource again in case it has changed.
refetched := &operatorv1alpha1.KnativeKafka{}
if err := r.client.Get(context.TODO(), types.NamespacedName{Namespace: instance.Namespace, Name: instance.Name}, refetched); err != nil {
return fmt.Errorf("failed to refetch KnativeKafka: %w", err)
}
// Update the refetched finalizer list.
finalizers = sets.NewString(refetched.GetFinalizers()...)
finalizers.Delete(finalizerName)
refetched.SetFinalizers(finalizers.List())
if err := r.client.Update(context.TODO(), refetched); err != nil {
return fmt.Errorf("failed to update KnativeKafka with removed finalizer: %w", err)
}
return nil
}
func (r *ReconcileKnativeKafka) deleteKnativeKafka(instance *operatorv1alpha1.KnativeKafka) error {
manifest, err := r.buildManifest(instance, manifestBuildAll)
if err != nil {
return fmt.Errorf("failed to build manifest: %w", err)
}
stages := []stage{
r.transform,
r.deleteResources,
}
return executeStages(instance, manifest, stages)
}
type manifestBuild int
const (
manifestBuildEnabledOnly manifestBuild = iota
manifestBuildDisabledOnly
manifestBuildAll
)
func (r *ReconcileKnativeKafka) buildManifest(instance *operatorv1alpha1.KnativeKafka, build manifestBuild) (*mf.Manifest, error) {
var resources []unstructured.Unstructured
if build == manifestBuildAll || (build == manifestBuildEnabledOnly && instance.Spec.Channel.Enabled) || (build == manifestBuildDisabledOnly && !instance.Spec.Channel.Enabled) {
resources = append(resources, r.rawKafkaChannelManifest.Resources()...)
}
if build == manifestBuildAll || (build == manifestBuildEnabledOnly && instance.Spec.Source.Enabled) || (build == manifestBuildDisabledOnly && !instance.Spec.Source.Enabled) {
resources = append(resources, r.rawKafkaSourceManifest.Resources()...)
}
manifest, err := mf.ManifestFrom(
mf.Slice(resources),
mf.UseClient(mfc.NewClient(r.client)),
mf.UseLogger(log.WithName("mf")))
if err != nil {
return nil, fmt.Errorf("failed to build Kafka manifest: %w", err)
}
return &manifest, nil
}
// setBootstrapServers sets Kafka bootstrapServers value in config-kafka
func setBootstrapServers(bootstrapServers string) mf.Transformer {
return func(u *unstructured.Unstructured) error {
if u.GetKind() == "ConfigMap" && u.GetName() == "config-kafka" {
log.Info("Found ConfigMap config-kafka, updating it with bootstrapServers from spec")
if err := unstructured.SetNestedField(u.Object, bootstrapServers, "data", "bootstrapServers"); err != nil {
return err
}
}
return nil
}
}
// setAuthSecret sets Kafka auth secret namespace and name value in config-kafka
func setAuthSecret(secretNamespace, secretName string) mf.Transformer {
return func(u *unstructured.Unstructured) error {
if u.GetKind() == "ConfigMap" && u.GetName() == "config-kafka" {
log.Info("Found ConfigMap config-kafka, updating it with authSecretName and authSecretNamespace from spec")
if err := unstructured.SetNestedField(u.Object, secretNamespace, "data", "authSecretNamespace"); err != nil {
return err
}
if err := unstructured.SetNestedField(u.Object, secretName, "data", "authSecretName"); err != nil {
return err
}
}
return nil
}
}
func executeStages(instance *operatorv1alpha1.KnativeKafka, manifest *mf.Manifest, stages []stage) error {
// Execute each stage in sequence until one returns an error
for _, stage := range stages {
if err := stage(manifest, instance); err != nil {
return err
}
}
return nil
}
// TODO: get rid of this when we update to Manifestival version that has this function
var not = func(pred mf.Predicate) mf.Predicate {
return func(u *unstructured.Unstructured) bool {
return !pred(u)
}
}
|
[
"\"KAFKACHANNEL_MANIFEST_PATH\"",
"\"KAFKASOURCE_MANIFEST_PATH\""
] |
[] |
[
"KAFKASOURCE_MANIFEST_PATH",
"KAFKACHANNEL_MANIFEST_PATH"
] |
[]
|
["KAFKASOURCE_MANIFEST_PATH", "KAFKACHANNEL_MANIFEST_PATH"]
|
go
| 2 | 0 | |
delivery-platform/resources/repos/app-templates/java/src/main/java/com/example/simple/web/HelloController.java
|
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.example.simple.web;
import org.springframework.web.bind.annotation.RestController;
import org.springframework.util.StringUtils;
import org.springframework.web.bind.annotation.RequestMapping;
@RestController
public class HelloController {
@RequestMapping("/")
public String index() {
String environment = System.getenv("ENVIRONMENT");
String response = "<html><head><title>SimpleApp</title></head><body><h1>Super Simple Java App</h1>";
if (!StringUtils.isEmpty(environment)) {
response += "\n\n<h2>Environment: " + environment + "</h2>";
}
response += "</body></html>";
return response;
}
}
|
[
"\"ENVIRONMENT\""
] |
[] |
[
"ENVIRONMENT"
] |
[]
|
["ENVIRONMENT"]
|
java
| 1 | 0 | |
core/build.py
|
#!/usr/bin/env python3
# pylint: disable=invalid-name, too-many-branches, too-many-statements, broad-except, too-many-arguments, too-many-instance-attributes, line-too-long
'''
this script replaces build.sh, coz bash/sed/awk is driving me insane
'''
import argparse
import atexit
import glob
import json
import os
import random
import readline
import shutil
import subprocess
import sys
import tempfile
import traceback
import uuid
class GoBuild:
'''
all-in-one builder
'''
def __init__(self, target="cc",
cc_indicator="cc_indicator", cc_ip="[cc_ipaddr]", cc_other_names=""):
self.target = target
self.GOOS = os.getenv("GOOS")
self.GOARCH = os.getenv("GOARCH")
if self.GOOS is None:
self.GOOS = "linux"
if self.target == "agentw":
self.GOOS = "windows"
if self.GOARCH is None:
self.GOARCH = "amd64"
# CA
self.CA = ""
# tags
self.CCIP = cc_ip
self.CC_OTHER_NAMES = cc_other_names
self.INDICATOR = cc_indicator
self.UUID = str(uuid.uuid1())
self.VERSION = get_version()
# webroot
if 'webroot' in CACHED_CONF:
self.WebRoot = CACHED_CONF['webroot']
else:
self.WebRoot = str(uuid.uuid1())
CACHED_CONF['webroot'] = self.WebRoot
# OpSep
if 'opsep' in CACHED_CONF:
self.OpSep = CACHED_CONF['opsep']
else:
self.OpSep = str(uuid.uuid1())
CACHED_CONF['opsep'] = self.OpSep
# pid file name
if 'pid_file' in CACHED_CONF:
self.PIDFile = CACHED_CONF['pid_file']
else:
self.PIDFile = rand_str(random.randint(3, 10))
CACHED_CONF['pid_file'] = self.PIDFile
# util path name
if 'utils_path' in CACHED_CONF:
self.UtilsPath = CACHED_CONF['utils_path']
else:
self.UtilsPath = rand_str(random.randint(3, 10))
CACHED_CONF['utils_path'] = self.UtilsPath
# socket name
if 'socket' in CACHED_CONF:
self.Socket = CACHED_CONF['socket']
else:
self.Socket = rand_str(random.randint(3, 10))
CACHED_CONF['socket'] = self.Socket
# indicator text
if 'indicator_text' in CACHED_CONF:
self.INDICATOR_TEXT = CACHED_CONF['indicator_text']
else:
self.INDICATOR_TEXT = "emp3r0r"
CACHED_CONF['indicator_text'] = self.INDICATOR_TEXT
# agent root directory
if "agent_root" in CACHED_CONF:
self.AgentRoot = CACHED_CONF['agent_root']
else:
# by default mkdir in current directory
self.AgentRoot = f"{rand_str(random.randint(5, 10))}"
CACHED_CONF['agent_root'] = self.AgentRoot
# DoH
if "doh_server" not in CACHED_CONF:
CACHED_CONF['doh_server'] = ""
# agent proxy
if "agent_proxy" not in CACHED_CONF:
CACHED_CONF['agent_proxy'] = ""
# cdn proxy
if "cdn_proxy" not in CACHED_CONF:
CACHED_CONF['cdn_proxy'] = ""
def build(self):
'''
cd to cmd and run go build
'''
self.gen_certs()
# CA
if 'ca' in CACHED_CONF:
log_warn(
f"Using cached CA cert ({CACHED_CONF['ca']}),\nmake sure you have the coresponding keypair signed by it")
self.CA = CACHED_CONF['ca']
else:
f = open("./tls/rootCA.crt")
self.CA = f.read()
f.close()
# cache CA, too
CACHED_CONF['ca'] = self.CA
# cache version
CACHED_CONF['version'] = self.VERSION
# write cached configs
json_file = open(BUILD_JSON, "w+")
json.dump(CACHED_CONF, json_file, indent=4)
json_file.close()
self.set_tags()
# copy the server/cc keypair to ./build for later use
if os.path.isdir("./tls"):
log_warn("[*] Copying CC keypair to ./build")
for f in glob.glob("./tls/emp3r0r-*pem"):
print(f" Copy {f} to ./build")
shutil.copy(f, "./build")
try:
os.chdir(f"./cmd/{self.target}")
except BaseException:
log_error(f"Cannot cd to cmd/{self.target}")
return
log_warn("GO BUILD starts...")
build_target = f"../../build/{self.target}"
if self.target == "agent":
build_target = f"../../build/{self.target}-{self.UUID}"
elif self.target == "agentw":
build_target = f"../../build/{self.target}-{self.UUID}.exe"
# go mod
os.system('go mod tidy')
cmd = f'''GOOS={self.GOOS} GOARCH={self.GOARCH} CGO_ENABLED=0''' + \
f""" go build -o {build_target} -ldflags='-s -w' -trimpath"""
# garble
if shutil.which("garble") and self.target != "cc" and args.garble:
cmd = f'''GOOS={self.GOOS} GOARCH={self.GOARCH} CGO_ENABLED=0 GOPRIVATE=''' + \
f''' garble -literals -tiny build -o {build_target} -ldflags="-v" -trimpath .'''
os.system(cmd)
log_warn("GO BUILD ends...")
os.chdir("../../")
self.unset_tags()
targetFile = f"./build/{build_target.split('/')[-1]}"
if os.path.exists(targetFile):
log_warn(f"{targetFile} generated")
else:
log_error("go build failed")
sys.exit(1)
# pack agent binary with packer
if self.target == "agent" and args.pack:
shutil.copy(targetFile, "../packer/agent")
os.chdir("../packer")
os.system("bash ./build.sh")
os.system("CGO_ENABLED=0 ./cryptor.exe")
shutil.move("agent.packed.exe", f"../core/{targetFile}")
os.chdir("../core")
os.chmod(targetFile, 0o755)
log_warn(f"{targetFile} packed")
def gen_certs(self):
'''
generate server cert/key, and CA if necessary
'''
if "ccip" in CACHED_CONF:
if self.CCIP == CACHED_CONF['ccip'] and os.path.exists("./build/emp3r0r-key.pem"):
return
log_warn("[!] Generating new certs...")
try:
os.chdir("./tls")
os.system(
f"bash ./genkey-with-ip-san.sh {self.UUID} {self.UUID}.com {self.CCIP} {self.CC_OTHER_NAMES}")
os.rename(f"./{self.UUID}-cert.pem", "./emp3r0r-cert.pem")
os.rename(f"./{self.UUID}-key.pem", "./emp3r0r-key.pem")
os.chdir("..")
except BaseException as exc:
log_error(
f"[-] Something went wrong, see above for details: {exc}")
sys.exit(1)
def set_tags(self):
'''
modify some tags in the source
'''
# backup source file
try:
shutil.copy("./lib/tun/tls.go", "/tmp/tls.go")
shutil.copy("./lib/tun/api.go", "/tmp/api.go")
shutil.copy("./lib/data/def.go", "/tmp/def.go")
except BaseException:
log_error(f"Failed to backup source files:\n{traceback.format_exc()}")
sys.exit(1)
# version
sed("./lib/data/def.go",
'''Version = "[version_string]"''', f'''Version = "{self.VERSION}"''')
if self.target == "agent":
# guardian shellcode
sed("./lib/data/def.go",
"[persistence_shellcode]", CACHED_CONF['guardian_shellcode'])
sed("./lib/data/def.go",
"[persistence_agent_path]", CACHED_CONF['guardian_agent_path'])
# CA
sed("./lib/tun/tls.go", "[emp3r0r_ca]", self.CA)
# webroot
sed("./lib/tun/api.go", 'WebRoot = "emp3r0r"', f'WebRoot = "{self.WebRoot}"')
# opsep
sed("./lib/data/def.go",
'''OpSep = "cb433bd1-354c-4802-a4fa-ece518f3ded1"''',
f'''OpSep = "{self.OpSep}"''')
# Socket name
sed("./lib/data/def.go",
'''SocketName = AgentRoot + "/.socket"''',
f'''SocketName = AgentRoot + "/{self.Socket}"''')
# utils path
sed("./lib/data/def.go",
'''UtilsPath = AgentRoot + "/bin"''',
f'''UtilsPath = AgentRoot + "/{self.UtilsPath}"''')
# PID file name
sed("./lib/data/def.go",
'''PIDFile = AgentRoot + "/.pid"''',
f'''PIDFile = AgentRoot + "/{self.PIDFile}"''')
# CC IP
sed("./lib/data/def.go",
"CCAddress = \"https://[cc_ipaddr]\"", f"CCAddress = \"https://{self.CCIP}\"")
# agent root path
sed("./lib/data/def.go",
"AgentRoot = \"[agent_root]\"", f"AgentRoot = \"{self.AgentRoot}\"")
# indicator
sed("./lib/data/def.go",
"CCIndicator = \"[cc_indicator]\"", f"CCIndicator = \"{self.INDICATOR}\"")
# indicator wait
if 'indicator_wait_min' in CACHED_CONF:
sed("./lib/data/def.go",
"IndicatorWaitMin = 30", f"IndicatorWaitMin = {CACHED_CONF['indicator_wait_min']}")
if 'indicator_wait_max' in CACHED_CONF:
sed("./lib/data/def.go",
"IndicatorWaitMax = 120", f"IndicatorWaitMax = {CACHED_CONF['indicator_wait_max']}")
# broadcast_interval
if 'broadcast_interval_min' in CACHED_CONF:
sed("./lib/data/def.go",
"BroadcastIntervalMin = 30", f"BroadcastIntervalMin = {CACHED_CONF['broadcast_interval_min']}")
if 'broadcast_interval_max' in CACHED_CONF:
sed("./lib/data/def.go",
"BroadcastIntervalMax = 120", f"BroadcastIntervalMax = {CACHED_CONF['broadcast_interval_max']}")
# cc indicator text
sed("./lib/data/def.go",
"CCIndicatorText = \"[indicator_text]\"", f"CCIndicatorText = \"{self.INDICATOR_TEXT}\"")
# agent UUID
sed("./lib/data/def.go",
"AgentUUID = \"[agent_uuid]\"", f"AgentUUID = \"{self.UUID}\"")
# DoH
sed("./lib/data/def.go",
"DoHServer = \"\"", f"DoHServer = \"{CACHED_CONF['doh_server']}\"")
# CDN
sed("./lib/data/def.go",
"CDNProxy = \"\"", f"CDNProxy = \"{CACHED_CONF['cdn_proxy']}\"")
# Agent Proxy
sed("./lib/data/def.go",
"AgentProxy = \"\"", f"AgentProxy = \"{CACHED_CONF['agent_proxy']}\"")
# ports
sed("./lib/data/def.go",
"CCPort = \"[cc_port]\"", f"CCPort = \"{CACHED_CONF['cc_port']}\"")
sed("./lib/data/def.go",
"SSHDPort = \"[sshd_port]\"", f"SSHDPort = \"{CACHED_CONF['sshd_port']}\"")
sed("./lib/data/def.go",
"ProxyPort = \"[proxy_port]\"", f"ProxyPort = \"{CACHED_CONF['proxy_port']}\"")
sed("./lib/data/def.go",
"BroadcastPort = \"[broadcast_port]\"", f"BroadcastPort = \"{CACHED_CONF['broadcast_port']}\"")
def unset_tags(self):
# restore source files
try:
shutil.move("/tmp/def.go", "./lib/data/def.go")
shutil.move("/tmp/tls.go", "./lib/tun/tls.go")
shutil.move("/tmp/api.go", "./lib/tun/api.go")
except BaseException:
log_error(traceback.format_exc())
def clean():
'''
clean build output
'''
to_rm = glob.glob("./tls/emp3r0r*") + glob.glob("./tls/openssl-*") + \
glob.glob("./build/*") + glob.glob("./tls/*.csr")
for f in to_rm:
try:
# remove directories too
if os.path.isdir(f):
os.system(f"rm -rf {f}")
else:
# we don't need to delete the config file
if f.endswith("build.json"):
continue
os.remove(f)
print(" Deleted "+f)
except BaseException:
log_error(traceback.format_exc)
def sed(path, old, new):
'''
works like `sed -i s/old/new/g file`
'''
rf = open(path)
text = rf.read()
to_write = text.replace(old, new)
rf.close()
f = open(path, "w")
f.write(to_write)
f.close()
def yes_no(prompt):
'''
y/n?
'''
if yes_to_all:
log_warn(f"Choosing 'yes' for '{prompt}'")
return True
answ = input(prompt + " [Y/n] ").lower().strip()
if answ in ["n", "no", "nah", "nay"]:
return False
return True
def rand_str(length):
'''
random string
'''
uuidstr = str(uuid.uuid4()).replace('-', '')
# we don't want the string to be long
if length >= len(uuidstr):
return uuidstr
return uuidstr[:length]
def main(target):
'''
main main main
'''
ccip = ""
indicator = ""
use_cached = False
if target == "clean":
clean()
return
# cc IP
if "ccip" in CACHED_CONF:
ccip = CACHED_CONF['ccip']
use_cached = yes_no(f"Use cached CC address ({ccip})?")
if not use_cached:
if yes_no("Clean everything and start over?"):
clean()
ccip = input(
"CC server address (domain name or ip address, can be more than one, separate with space):\n> ").strip()
CACHED_CONF['ccip'] = ccip
if len(ccip.split()) > 1:
CACHED_CONF['ccip'] = ccip.split()[0]
if target == "cc":
cc_other = ""
if len(ccip.split()) > 1:
cc_other = ' '.join(ccip[1:])
gobuild = GoBuild(target="cc", cc_ip=ccip, cc_other_names=cc_other)
gobuild.build()
return
if target not in ("agent", "agentw"):
print("Unknown target")
return
# indicator
use_cached = False
if "cc_indicator" in CACHED_CONF:
indicator = CACHED_CONF['cc_indicator']
use_cached = yes_no(f"Use cached CC indicator ({indicator})?")
if not use_cached:
indicator = input(
"CC status indicator URL (leave empty to disable): ").strip()
CACHED_CONF['cc_indicator'] = indicator
if CACHED_CONF['cc_indicator'] != "":
# indicator text
use_cached = False
if "indicator_text" in CACHED_CONF:
use_cached = yes_no(
f"Use cached CC indicator text ({CACHED_CONF['indicator_text']})?")
if not use_cached:
indicator_text = input(
"CC status indicator text (leave empty to disable): ").strip()
CACHED_CONF['indicator_text'] = indicator_text
# Agent proxy
use_cached = False
if "agent_proxy" in CACHED_CONF:
use_cached = yes_no(
f"Use cached agent proxy ({CACHED_CONF['agent_proxy']})?")
if not use_cached:
agentproxy = input(
"Proxy server for agent (leave empty to disable): ").strip()
CACHED_CONF['agent_proxy'] = agentproxy
# CDN
use_cached = False
if "cdn_proxy" in CACHED_CONF:
use_cached = yes_no(
f"Use cached CDN server ({CACHED_CONF['cdn_proxy']})?")
if not use_cached:
cdn = input("CDN websocket server (leave empty to disable): ").strip()
CACHED_CONF['cdn_proxy'] = cdn
# DoH
use_cached = False
if "doh_server" in CACHED_CONF:
use_cached = yes_no(
f"Use cached DoH server ({CACHED_CONF['doh_server']})?")
if not use_cached:
doh = input("DNS over HTTP server (leave empty to disable): ").strip()
CACHED_CONF['doh_server'] = doh
# guardian shellcode
path = f"/tmp/{next(tempfile._get_candidate_names())}"
CACHED_CONF['guardian_shellcode'] = gen_guardian_shellcode(path)
CACHED_CONF['guardian_agent_path'] = path
# option to disable autoproxy and broadcasting
if not yes_no("Use autoproxy (will enable UDP broadcasting)"):
CACHED_CONF['broadcast_interval_max'] = 0
gobuild = GoBuild(target=target, cc_indicator=indicator, cc_ip=ccip)
gobuild.build()
def log_error(msg):
'''
print in red
'''
print("\u001b[31m"+msg+"\u001b[0m")
def log_warn(msg):
'''
print in yellow
'''
print("\u001b[33m"+msg+"\u001b[0m")
def save(prev_h_len, hfile):
'''
append to histfile
'''
new_h_len = readline.get_current_history_length()
readline.set_history_length(1000)
readline.append_history_file(new_h_len - prev_h_len, hfile)
# JSON config file, cache some user data
BUILD_JSON = "./build/build.json"
CACHED_CONF = {}
if os.path.exists(BUILD_JSON):
try:
jsonf = open(BUILD_JSON)
CACHED_CONF = json.load(jsonf)
jsonf.close()
except BaseException:
log_warn(traceback.format_exc())
def rand_port():
'''
returns a random int between 1024 and 65535
'''
return str(random.randint(1025, 65534))
def randomize_ports():
'''
randomize every port used by emp3r0r agent,
cache them in build.json
'''
if 'cc_port' not in CACHED_CONF:
CACHED_CONF['cc_port'] = rand_port()
if 'sshd_port' not in CACHED_CONF:
CACHED_CONF['sshd_port'] = rand_port()
if 'proxy_port' not in CACHED_CONF:
CACHED_CONF['proxy_port'] = rand_port()
if 'broadcast_port' not in CACHED_CONF:
CACHED_CONF['broadcast_port'] = rand_port()
def gen_guardian_shellcode(path):
'''
../shellcode/gen.py
'''
if not shutil.which("nasm"):
log_error("nasm not found")
try:
pwd = os.getcwd()
os.chdir("../shellcode")
out = subprocess.check_output(["python3", "gen.py", path])
os.chdir(pwd)
shellcode = out.decode('utf-8')
if "\\x48" not in shellcode:
log_error("Failed to generate shellcode: "+out)
return "N/A"
except BaseException:
log_error(traceback.format_exc())
return "N/A"
return shellcode
def get_version():
'''
print current version
'''
try:
check = "git describe --tags"
out = subprocess.check_output(
["/bin/sh", "-c", check],
stderr=subprocess.STDOUT, timeout=3)
except KeyboardInterrupt:
return "Unknown"
except BaseException:
check = "git describe --always"
try:
out = subprocess.check_output(
["/bin/sh", "-c", check],
stderr=subprocess.STDOUT, timeout=3)
except BaseException:
try:
versionf = open(".version")
version = versionf.read().strip()
versionf.close()
return version
except BaseException:
return "Unknown"
return out.decode("utf-8").strip()
# command line args
yes_to_all = False
parser = argparse.ArgumentParser(description="Build emp3r0r CC/Agent bianaries")
parser.add_argument('--target', type=str, required=True,
help='Build target, can be cc/agent/agentw')
parser.add_argument('--pack', action="store_true", required=False,
help='Pack agent binary, only available under Linux, do not use with --dll')
parser.add_argument('--dll', action="store_true", required=False,
help='Load agent binary into any processes using shared library injection')
parser.add_argument('--garble', action="store_true", required=False,
help='Obfuscate agent binary with garble')
parser.add_argument('--yes', action="store_true", required=False,
help='Do not ask questions, take default answers')
args = parser.parse_args()
if args.yes:
yes_to_all = True
try:
randomize_ports()
if not os.path.exists("./build"):
os.mkdir("./build")
# support GNU readline interface, command history
histfile = "./build/.build_py_history"
try:
readline.read_history_file(histfile)
h_len = readline.get_current_history_length()
except FileNotFoundError:
open(histfile, 'wb').close()
h_len = 0
atexit.register(save, h_len, histfile)
main(args.target)
except (KeyboardInterrupt, EOFError, SystemExit):
sys.exit(0)
except BaseException:
log_error(f"[!] Exception:\n{traceback.format_exc()}")
|
[] |
[] |
[
"GOARCH",
"GOOS"
] |
[]
|
["GOARCH", "GOOS"]
|
python
| 2 | 0 | |
pkg/config/derive.go
|
package config
import (
"os"
"strings"
"github.com/ghodss/yaml"
"github.com/openshift/openshift-azure/pkg/api"
)
type derived struct{}
var Derived derived
func (derived) SystemReserved(cs *api.OpenShiftManagedCluster, role api.AgentPoolProfileRole) string {
for _, pool := range cs.Properties.AgentPoolProfiles {
if pool.Role != role {
continue
}
return api.DefaultVMSizeKubeArguments[pool.VMSize][role][api.SystemReserved]
}
return ""
}
func (derived) KubeReserved(cs *api.OpenShiftManagedCluster, role api.AgentPoolProfileRole) string {
for _, pool := range cs.Properties.AgentPoolProfiles {
if pool.Role != role {
continue
}
return api.DefaultVMSizeKubeArguments[pool.VMSize][role][api.KubeReserved]
}
return ""
}
func (derived) PublicHostname(cs *api.OpenShiftManagedCluster) string {
if cs.Properties.PublicHostname != "" {
return cs.Properties.PublicHostname
}
return cs.Properties.FQDN
}
func (derived) RouterLBCNamePrefix(cs *api.OpenShiftManagedCluster) string {
return strings.Split(cs.Properties.RouterProfiles[0].FQDN, ".")[0]
}
func (derived) MasterLBCNamePrefix(cs *api.OpenShiftManagedCluster) string {
return strings.Split(cs.Properties.FQDN, ".")[0]
}
func (derived) CloudProviderConf(cs *api.OpenShiftManagedCluster) ([]byte, error) {
return yaml.Marshal(map[string]string{
"tenantId": cs.Properties.AzProfile.TenantID,
"subscriptionId": cs.Properties.AzProfile.SubscriptionID,
"aadClientId": cs.Properties.ServicePrincipalProfile.ClientID,
"aadClientSecret": cs.Properties.ServicePrincipalProfile.Secret,
"resourceGroup": cs.Properties.AzProfile.ResourceGroup,
"location": cs.Location,
"securityGroupName": "nsg-compute",
"primaryScaleSetName": "ss-compute",
"vmType": "vmss",
})
}
func (derived) RunningUnderTest() bool {
return os.Getenv("RUNNING_UNDER_TEST") != ""
}
func (derived) ImageResourceGroup() string {
return os.Getenv("IMAGE_RESOURCEGROUP")
}
func (derived) ImageResourceName() string {
return os.Getenv("IMAGE_RESOURCENAME")
}
|
[
"\"RUNNING_UNDER_TEST\"",
"\"IMAGE_RESOURCEGROUP\"",
"\"IMAGE_RESOURCENAME\""
] |
[] |
[
"IMAGE_RESOURCENAME",
"RUNNING_UNDER_TEST",
"IMAGE_RESOURCEGROUP"
] |
[]
|
["IMAGE_RESOURCENAME", "RUNNING_UNDER_TEST", "IMAGE_RESOURCEGROUP"]
|
go
| 3 | 0 | |
train_script.py
|
import hparams
from model.wavenet_model import *
from data.dataset import TimbreDataset
from model.timbre_training import *
import atexit
import os
from model_logging import *
from scipy.io import wavfile
#os.environ["CUDA_VISIBLE_DEVICES"] = "0"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = WaveNetModel(hparams.create_harmonic_hparams(), device).to(device)
print('model: ', model)
print('receptive field: ', model.receptive_field)
print('parameter count: ', model.parameter_count())
trainer = ModelTrainer(model=model,
data_folder='data/timbre_model',
lr=0.0001,
weight_decay=0.0,
snapshot_path='./snapshots/harmonic',
snapshot_name='chaconne_model',
snapshot_interval=2000,
device=device,
temperature=0.05)
def exit_handler():
trainer.save_model()
print("exit from keyboard")
#atexit.register(exit_handler)
#epoch = trainer.load_checkpoint('/home/sean/pythonProj/torch_npss/snapshots/harmonic/chaconne_model_930_2019-03-26_06-18-49')
print('start training...')
trainer.train(batch_size=128,
epochs=1650)
# model = WaveNetModel(hparams.create_aperiodic_hparams(), device).to(device)
#
# print('model: ', model)
# print('receptive field: ', model.receptive_field)
# print('parameter count: ', model.parameter_count())
#
# data = TimbreDataset(data_folder='data/timbre_model', receptive_field=model.receptive_field, type=1)
#
# print('the dataset has ' + str(len(data)) + ' items')
#
#
#
# trainer = TimbreTrainer(model=model,
# dataset=data,
# lr=0.0005,
# weight_decay=0.0,
# snapshot_path='./snapshots/aperiodic',
# snapshot_name='chaconne_model',
# snapshot_interval=50000,
# device=device)
#
# print('start training...')
# trainer.train(batch_size=32,
# epochs=420)
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
cmd/server/main.go
|
// Default package
package main
//
// Kubeview API scraping service and client host
// Ben Coleman, July 2019, v1
//
import (
"fmt"
"log"
"net/http"
"os"
"path/filepath"
"strings"
"github.com/benc-uk/go-starter/pkg/envhelper"
"github.com/gorilla/mux"
_ "github.com/joho/godotenv/autoload" // Autoloads .env file if it exists
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
var (
healthy = true // Simple health flag
version = "0.0.0" // App version number, set at build time with -ldflags "-X main.version=1.2.3"
buildInfo = "No build details" // Build details, set at build time with -ldflags "-X main.buildInfo='Foo bar'"
clientset *kubernetes.Clientset // Clientset is global because I don't care
)
//
// Main entry point, will start HTTP service
//
func main() {
log.SetOutput(os.Stdout) // Personal preference on log output
log.Printf("### Kubeview v%v starting...", version)
// Port to listen on, change the default as you see fit
serverPort := envhelper.GetEnvInt("PORT", 8000)
inCluster := envhelper.GetEnvBool("IN_CLUSTER", false)
log.Println("### Connecting to Kubernetes...")
var kubeConfig *rest.Config
var err error
// In cluster connect using in-cluster "magic", else build config from .kube/config file
if inCluster {
log.Println("### Creating client in cluster mode")
kubeConfig, err = rest.InClusterConfig()
} else {
var kubeconfigFile = filepath.Join(os.Getenv("HOME"), ".kube", "config")
log.Println("### Creating client with config file:", kubeconfigFile)
kubeConfig, err = clientcmd.BuildConfigFromFlags("", kubeconfigFile)
}
// We have to give up if we can't connect to Kubernetes
if err != nil {
panic(err.Error())
}
log.Println("### Connected to:", kubeConfig.Host)
// Create the clientset, which is our main interface to the Kubernetes API
clientset, err = kubernetes.NewForConfig(kubeConfig)
if err != nil {
panic(err.Error())
}
// Use gorilla/mux for routing
router := mux.NewRouter()
// Add middleware for logging and CORS
router.Use(starterMiddleware)
// Application API routes here
router.HandleFunc("/healthz", routeHealthCheck)
router.HandleFunc("/api/status", routeStatus)
router.HandleFunc("/api/namespaces", routeGetNamespaces)
router.HandleFunc("/api/scrape/{ns}", routeScrapeData)
router.HandleFunc("/api/config", routeConfig)
// Serve the frontend Vue.js SPA
staticDirectory := envhelper.GetEnvString("STATIC_DIR", "./frontend")
fileServer := http.FileServer(http.Dir(staticDirectory))
router.PathPrefix("/js").Handler(http.StripPrefix("/", fileServer))
router.PathPrefix("/css").Handler(http.StripPrefix("/", fileServer))
router.PathPrefix("/img").Handler(http.StripPrefix("/", fileServer))
router.PathPrefix("/favicon.png").Handler(http.StripPrefix("/", fileServer))
// EVERYTHING else redirect to index.html
router.NotFoundHandler = http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
http.ServeFile(resp, req, staticDirectory+"/index.html")
})
log.Printf("### Serving static content from '%v'\n", staticDirectory)
// Start server
log.Printf("### Server listening on %v\n", serverPort)
err = http.ListenAndServe(fmt.Sprintf(":%d", serverPort), router)
if err != nil {
panic(err.Error())
}
}
//
// Log all HTTP requests with client address, method and request URI
// Plus a cheap and dirty CORS enabler
//
func starterMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
resp.Header().Set("Access-Control-Allow-Origin", "*")
log.Println("###", strings.Split(req.RemoteAddr, ":")[0], req.Method, req.RequestURI)
next.ServeHTTP(resp, req)
})
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
packages/seacas/libraries/ioss/src/visualization/catalyst/phactori/Operation/test_PhactoriCameraBlock.py
|
# Copyright(C) 1999-2020 National Technology & Engineering Solutions
# of Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with
# NTESS, the U.S. Government retains certain rights in this software.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of NTESS nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
global RollUpAllJsonForRunningDuringSetUpClass
RollUpAllJsonForRunningDuringSetUpClass = True
#from phactori import *
import unittest
##from paraview.simple import *
import os
import subprocess
import json
import importlib
import time
#exodus2catalyst_import = importlib.reload("exodus2catalyst4test")
#import exodus2catalyst4test
class TestPhactoriCameraBlock(unittest.TestCase):
def MakeTestJsonSet1(self, testCam1Json, testImageBasename):
#add camera type
testCam1Json["camera type"] = "camera"
#make this version of boilerplate json for test
testJsonSetup1 = {
"camera blocks":{
"testcam1":testCam1Json
},
"representation blocks":{},
"imageset blocks":{
"imageset1":{
"camera":"testcam1",
"image basename":testImageBasename
}
},
"operation blocks":{}
}
return testJsonSetup1
@staticmethod
def runit1(testJson1, testName = "testcatalystscript1"):
#print("runit entered")
#print("testName:")
#print(testName)
#print("testJson1:")
#print(str(testJson1))
testJsonFilename = testName + ".json"
os.environ["PHACTORI_TEST_CAMERA_MODE"] = "on"
os.environ["SNL_CATALYST_SIERRA_USAGE_LOG_FLAG"] = "disable"
ff = open(testJsonFilename, "w")
json.dump(testJson1, ff)
ff.close()
myenv = os.environ
#"/home/jamauld/vbworkdisk1/paraview/paraview_install_2020Aug26/ParaView-5.8.0-MPI-Linux-Python3.7-64bit/bin/pvbatch",
runresult1 = subprocess.run(["pvbatch",
"exodus2catalyst4test.py", "-pj", testJsonFilename,
"TestData/SimpleExodusForTest1.e"], env=myenv)
#print("runit calling import_module\n")
#exodus2catalyst_import = importlib.import_module("exodus2catalyst4test")
##exodus2catalyst_import = importlib.reload("exodus2catalyst4test")
#print("runit returned from import_module, calling main2\n")
#exodus2catalyst_import.main2(testJsonFilename, "SimpleExodusForTest1.e")
#print("runit returned calling main2\n")
#print("runit calling main2\n")
#exodus2catalyst4test.main2(testJsonFilename, "SimpleExodusForTest1.e")
#print("runit returned from calling main2\n")
#print("runit returning")
subprocess.run(["rm", testJsonFilename])
@classmethod
def setUpClass(cls):
global RollUpAllJsonForRunningDuringSetUpClass
if RollUpAllJsonForRunningDuringSetUpClass:
testJson1 = cls.GetBaseTestJson1()
testname, thisTestSubJson1 = cls.getjson_CameraLookDirectionWithDefaultLookAtPointZMinus1()
cls.MergeTestJson1(testJson1, thisTestSubJson1)
testname, thisTestSubJson1 = cls.getjson_CameraLookDirectionWithDefaultLookAtPointZPlus1()
cls.MergeTestJson1(testJson1, thisTestSubJson1)
testname, thisTestSubJson1 = cls.getjson_CameraLookDirectionWithDefaultLookAtPointXYZMinus1()
cls.MergeTestJson1(testJson1, thisTestSubJson1)
testname, thisTestSubJson1 = cls.getjson_CameraLookAtAbsolutePoint1()
cls.MergeTestJson1(testJson1, thisTestSubJson1)
testname, thisTestSubJson1 = cls.getjson_CameraLookAtRelativePoint1()
cls.MergeTestJson1(testJson1, thisTestSubJson1)
testname, thisTestSubJson1 = cls.getjson_CameraLookAtElement1()
cls.MergeTestJson1(testJson1, thisTestSubJson1)
testname, thisTestSubJson1 = cls.getjson_CameraLookAtNode1()
cls.MergeTestJson1(testJson1, thisTestSubJson1)
testname, thisTestSubJson1 = cls.getjson_CameraLookDirection()
cls.MergeTestJson1(testJson1, thisTestSubJson1)
testname, thisTestSubJson1 = cls.getjson_CameraLookAtRelativeDistance()
cls.MergeTestJson1(testJson1, thisTestSubJson1)
testname, thisTestSubJson1 = cls.getjson_CameraLookAtAbsoluteDistance()
cls.MergeTestJson1(testJson1, thisTestSubJson1)
testname, thisTestSubJson1 = cls.getjson_CameraAtAbsolutePoint()
cls.MergeTestJson1(testJson1, thisTestSubJson1)
testname, thisTestSubJson1 = cls.getjson_CameraAtRelativePoint()
cls.MergeTestJson1(testJson1, thisTestSubJson1)
testname, thisTestSubJson1 = cls.getjson_CameraAtElement()
cls.MergeTestJson1(testJson1, thisTestSubJson1)
testname, thisTestSubJson1 = cls.getjson_CameraAtElementDisplaced()
cls.MergeTestJson1(testJson1, thisTestSubJson1)
testname, thisTestSubJson1 = cls.getjson_CameraAtNodeDisplaced()
cls.MergeTestJson1(testJson1, thisTestSubJson1)
testname, thisTestSubJson1 = cls.getjson_CameraUpVector()
cls.MergeTestJson1(testJson1, thisTestSubJson1)
testname, thisTestSubJson1 = cls.getjson_CameraFOV()
cls.MergeTestJson1(testJson1, thisTestSubJson1)
testname, thisTestSubJson1 = cls.getjson_CameraParallelProjection1()
cls.MergeTestJson1(testJson1, thisTestSubJson1)
testname, thisTestSubJson1 = cls.getjson_CameraImageNameAddon()
cls.MergeTestJson1(testJson1, thisTestSubJson1)
TestPhactoriCameraBlock.runit1(testJson1, "test_PhactoriCameraBlockRolledUpTests")
def CheckVec3AlmostEqual(self, vec1, vec2):
for ii in range (0,3):
self.assertAlmostEqual(vec1[ii], vec2[ii])
def CheckCameraPostionAndFocalPoint(self, testOutputJsonFileName,
baselineCamPos = None, baselineCamFocalPt = None,
baselineCamViewUp = None, baselineCamViewAngle = None,
baselineCamParallelProjection = None, baselineCamParallelScale = None):
ff = open(testOutputJsonFileName,"r")
testJson = json.load(ff)
ff.close()
camPos = testJson["CameraPosition"]
camFocalPt = testJson["CameraFocalPoint"]
camViewUp = testJson["CameraViewUp"]
camViewAngle = testJson["CameraViewAngle"]
camParallelProjection = testJson["CameraParallelProjection"]
camParallelScale = testJson["CameraParallelScale"]
if baselineCamPos != None:
self.CheckVec3AlmostEqual(camPos, baselineCamPos)
if baselineCamFocalPt != None:
self.CheckVec3AlmostEqual(camFocalPt, baselineCamFocalPt)
if baselineCamViewUp != None:
self.CheckVec3AlmostEqual(baselineCamViewUp, camViewUp)
if baselineCamViewAngle != None:
self.assertEqual(baselineCamViewAngle, camViewAngle)
if baselineCamParallelProjection != None:
self.assertEqual(baselineCamParallelProjection, camParallelProjection)
if baselineCamParallelScale != None:
self.assertAlmostEqual(baselineCamParallelScale, camParallelScale)
def RemoveTestOutputFiles2(self, testname):
cmditems = []
cmditems.append("rm")
doRemoveImages = False
for ii in range(0,3):
cmditems.append(testname + ".000" + str(ii) + ".png.test.camera.txt")
if doRemoveImages:
cmditems.append(testname + ".000" + str(ii) + ".png")
subprocess.run(cmditems)
@staticmethod
def GetBaseTestJson1():
testJsonSetup1 = {
"camera blocks":{},
"representation blocks":{},
"imageset blocks":{},
"operation blocks":{}
}
return testJsonSetup1
@staticmethod
def MergeTestJson1(allTestsJson, testJsonToMerge):
#print("MergeTestJson1 entered")
#print("allTestsJson:")
#print(str(allTestsJson))
#print("testJsonToMerge:")
#print(str(testJsonToMerge))
cbjson = testJsonToMerge["camera blocks"]
for cameraNameKey, cameraJsonValue in cbjson.items():
allTestsJson["camera blocks"][cameraNameKey] = cameraJsonValue
isjson = testJsonToMerge["imageset blocks"]
for imagesetBlockNameKey, imagesetBlockJsonValue in isjson.items():
allTestsJson["imageset blocks"][imagesetBlockNameKey] = imagesetBlockJsonValue
#print("allTestsJson after merge:")
#print(str(allTestsJson))
#print("MergeTestJson1 returning")
@staticmethod
def getjson_ForTestHelper1(testname, thisTestJson):
cameraName = testname + "_cam"
imagesetName = testname + "_imageset"
imageBasename = testname + "."
myjson = {
"camera blocks":{
cameraName:thisTestJson
},
"imageset blocks":{
imagesetName:{
"image size":[800,450],
"camera":cameraName,
"image basename":imageBasename
}
}
}
return testname, myjson
@staticmethod
def getjson_CameraLookDirectionWithDefaultLookAtPointZMinus1():
testname = "CameraLookDirectionWithDefaultLookAtPointZMinus1"
thisTestJson = {"look direction":[0.0,0.0,-1.0],"camera type":"camera"}
return TestPhactoriCameraBlock.getjson_ForTestHelper1(testname, thisTestJson)
def test_CameraLookDirectionWithDefaultLookAtPointZMinus1(self):
testname, thisTestSubJson1 = self.getjson_CameraLookDirectionWithDefaultLookAtPointZMinus1()
testOutFileEnding = ".png.test.camera.txt"
testOutFile = testname + ".0002" + testOutFileEnding
testJson1 = self.GetBaseTestJson1()
self.MergeTestJson1(testJson1, thisTestSubJson1)
#testCam1 = {"look direction":[0.0,0.0,-1.0]}
#testJson1 = self.MakeTestJsonSet1(testCam1, testImageBasename)
global RollUpAllJsonForRunningDuringSetUpClass
if RollUpAllJsonForRunningDuringSetUpClass == False:
self.runit1(testJson1, testname)
baselineCamPos = [5.0, -0.001999974250793457, 11.69152674091426]
baselineCamFocalPt = [5.0, -0.001999974250793457, -0.625]
self.CheckCameraPostionAndFocalPoint(testOutFile, baselineCamPos, baselineCamFocalPt)
self.RemoveTestOutputFiles2(testname);
@staticmethod
def getjson_CameraLookDirectionWithDefaultLookAtPointZPlus1():
testname = "CameraLookDirectionWithDefaultLookAtPointZPlus1"
thisTestJson = {"look direction":[0.0,0.0,1.0],"camera type":"camera"}
return TestPhactoriCameraBlock.getjson_ForTestHelper1(testname, thisTestJson)
def test_CameraLookDirectionWithDefaultLookAtPointZPlus1(self):
testname, thisTestSubJson1 = self.getjson_CameraLookDirectionWithDefaultLookAtPointZPlus1()
testOutFileEnding = ".png.test.camera.txt"
testOutFile = testname + ".0002" + testOutFileEnding
testJson1 = self.GetBaseTestJson1()
self.MergeTestJson1(testJson1, thisTestSubJson1)
global RollUpAllJsonForRunningDuringSetUpClass
if RollUpAllJsonForRunningDuringSetUpClass == False:
self.runit1(testJson1, testname)
baselineCamPos = [5.0, -0.001999974250793457, -12.94152674091426]
baselineCamFocalPt = [5.0, -0.001999974250793457, -0.625]
self.CheckCameraPostionAndFocalPoint(testOutFile, baselineCamPos, baselineCamFocalPt)
self.RemoveTestOutputFiles2(testname)
@staticmethod
def getjson_CameraLookDirectionWithDefaultLookAtPointXYZMinus1():
testname = "CameraLookDirectionWithDefaultLookAtPointXYZMinus1"
thisTestJson = {"look direction":[-1.0,-1.0,-1.0],"camera type":"camera"}
return TestPhactoriCameraBlock.getjson_ForTestHelper1(testname, thisTestJson)
def test_CameraLookDirectionWithDefaultLookAtPointXYZMinus1(self):
testname, thisTestSubJson1 = self.getjson_CameraLookDirectionWithDefaultLookAtPointXYZMinus1()
testOutFileEnding = ".png.test.camera.txt"
testOutFile = testname + ".0002" + testOutFileEnding
testJson1 = self.GetBaseTestJson1()
self.MergeTestJson1(testJson1, thisTestSubJson1)
global RollUpAllJsonForRunningDuringSetUpClass
if RollUpAllJsonForRunningDuringSetUpClass == False:
self.runit1(testJson1, testname)
baselineCamPos = [15.955234275935393, 10.9532343016846, 10.330234275935393]
baselineCamFocalPt = [5.0, -0.001999974250793457, -0.625]
self.CheckCameraPostionAndFocalPoint(testOutFile, baselineCamPos, baselineCamFocalPt)
self.RemoveTestOutputFiles2(testname)
@staticmethod
def getjson_CameraLookAtAbsolutePoint1():
testname = "CameraLookAtAbsolutePoint1"
thisTestJson = {"look at absolute point":[1.1, 2, 3e-8],"camera type":"camera"}
return TestPhactoriCameraBlock.getjson_ForTestHelper1(testname, thisTestJson)
def test_CameraLookAtAbsolutePoint1(self):
testname, thisTestSubJson1 = self.getjson_CameraLookAtAbsolutePoint1()
testOutFileEnding = ".png.test.camera.txt"
testOutFile = testname + ".0002" + testOutFileEnding
testJson1 = self.GetBaseTestJson1()
self.MergeTestJson1(testJson1, thisTestSubJson1)
global RollUpAllJsonForRunningDuringSetUpClass
if RollUpAllJsonForRunningDuringSetUpClass == False:
self.runit1(testJson1, testname)
baselineCamPos = [20.04062128767651, 20.94062128767651, 18.94062131767651]
baselineCamFocalPt = [1.1, 2.0, 3e-08]
self.CheckCameraPostionAndFocalPoint(testOutFile, baselineCamPos, baselineCamFocalPt)
self.RemoveTestOutputFiles2(testname)
@staticmethod
def getjson_CameraLookAtRelativePoint1():
testname = "CameraLookAtRelativePoint1"
thisTestJson = {"look at relative point":[1.5, 0.5, 0.5],"camera type":"camera"}
return TestPhactoriCameraBlock.getjson_ForTestHelper1(testname, thisTestJson)
def test_CameraLookAtRelativePoint1(self):
testname, thisTestSubJson1 = self.getjson_CameraLookAtRelativePoint1()
testOutFileEnding = ".png.test.camera.txt"
testOutFile = testname + ".0002" + testOutFileEnding
testJson1 = self.GetBaseTestJson1()
self.MergeTestJson1(testJson1, thisTestSubJson1)
global RollUpAllJsonForRunningDuringSetUpClass
if RollUpAllJsonForRunningDuringSetUpClass == False:
self.runit1(testJson1, testname)
baselineCamPos = [36.283676885212415, 18.279676936710832, 16.28367688521242]
baselineCamFocalPt = [20.0, 1.996000051498413, 0.0]
self.CheckCameraPostionAndFocalPoint(testOutFile, baselineCamPos, baselineCamFocalPt)
self.RemoveTestOutputFiles2(testname)
@staticmethod
def getjson_CameraLookAtElement1():
testname = "CameraLookAtElement1"
thisTestJson = {"look at element":17,"camera type":"camera"}
return TestPhactoriCameraBlock.getjson_ForTestHelper1(testname, thisTestJson)
def test_CameraLookAtElement1(self):
testname, thisTestSubJson1 = self.getjson_CameraLookAtElement1()
testOutFileEnding = ".png.test.camera.txt"
testOutFile = testname + ".0002" + testOutFileEnding
testJson1 = self.GetBaseTestJson1()
self.MergeTestJson1(testJson1, thisTestSubJson1)
global RollUpAllJsonForRunningDuringSetUpClass
if RollUpAllJsonForRunningDuringSetUpClass == False:
self.runit1(testJson1, testname)
baselineCamPos = [17.57686849224281, 13.693803655979154, 12.070801931497572]
baselineCamFocalPt = [4.881066560745239, 0.9980017244815826, -0.625]
self.CheckCameraPostionAndFocalPoint(testOutFile, baselineCamPos, baselineCamFocalPt)
self.RemoveTestOutputFiles2(testname)
@staticmethod
def getjson_CameraLookAtNode1():
testname = "CameraLookAtNode1"
thisTestJson = {"look at node":20,"camera type":"camera"}
return TestPhactoriCameraBlock.getjson_ForTestHelper1(testname, thisTestJson)
def test_CameraLookAtNode1(self):
testname, thisTestSubJson1 = self.getjson_CameraLookAtNode1()
testOutFileEnding = ".png.test.camera.txt"
testOutFile = testname + ".0002" + testOutFileEnding
testJson1 = self.GetBaseTestJson1()
self.MergeTestJson1(testJson1, thisTestSubJson1)
global RollUpAllJsonForRunningDuringSetUpClass
if RollUpAllJsonForRunningDuringSetUpClass == False:
self.runit1(testJson1, testname)
baselineCamPos = [16.37710985121342, 9.618462409349407, 8.368462409349407]
baselineCamFocalPt = [6.758647441864014, 0.0, -1.25]
self.CheckCameraPostionAndFocalPoint(testOutFile, baselineCamPos, baselineCamFocalPt)
self.RemoveTestOutputFiles2(testname)
@staticmethod
def getjson_CameraLookDirection():
testname = "CameraLookDirection"
thisTestJson = {"look direction": [1, 2, 3],"camera type":"camera"}
return TestPhactoriCameraBlock.getjson_ForTestHelper1(testname, thisTestJson)
def test_CameraLookDirection(self):
testname, thisTestSubJson1 = self.getjson_CameraLookDirection()
testOutFileEnding = ".png.test.camera.txt"
testOutFile = testname + ".0002" + testOutFileEnding
testJson1 = self.GetBaseTestJson1()
self.MergeTestJson1(testJson1, thisTestSubJson1)
global RollUpAllJsonForRunningDuringSetUpClass
if RollUpAllJsonForRunningDuringSetUpClass == False:
self.runit1(testJson1, testname)
baselineCamPos = [1.3139980597431027, -7.374003854764588, -11.683005820770692]
baselineCamFocalPt = [5.0, -0.001999974250793457, -0.625]
self.CheckCameraPostionAndFocalPoint(testOutFile, baselineCamPos, baselineCamFocalPt)
self.RemoveTestOutputFiles2(testname)
@staticmethod
def getjson_CameraLookAtRelativeDistance():
testname = "CameraLookAtRelativeDistance"
thisTestJson = {"look at relative distance": 2.0,"camera type":"camera"}
return TestPhactoriCameraBlock.getjson_ForTestHelper1(testname, thisTestJson)
def test_CameraLookAtRelativeDistance(self):
testname, thisTestSubJson1 = self.getjson_CameraLookAtRelativeDistance()
testOutFileEnding = ".png.test.camera.txt"
testOutFile = testname + ".0002" + testOutFileEnding
testJson1 = self.GetBaseTestJson1()
self.MergeTestJson1(testJson1, thisTestSubJson1)
global RollUpAllJsonForRunningDuringSetUpClass
if RollUpAllJsonForRunningDuringSetUpClass == False:
self.runit1(testJson1, testname)
baselineCamPos = [26.910468551870785, 21.908468577619992, 21.285468551870785]
baselineCamFocalPt = [5.0, -0.001999974250793457, -0.625]
self.CheckCameraPostionAndFocalPoint(testOutFile, baselineCamPos, baselineCamFocalPt)
self.RemoveTestOutputFiles2(testname)
@staticmethod
def getjson_CameraLookAtAbsoluteDistance():
testname = "CameraLookAtAbsoluteDistance"
thisTestJson = {"look at absolute distance": 15.0,"camera type":"camera"}
return TestPhactoriCameraBlock.getjson_ForTestHelper1(testname, thisTestJson)
def test_CameraLookAtAbsoluteDistance(self):
testname, thisTestSubJson1 = self.getjson_CameraLookAtAbsoluteDistance()
testOutFileEnding = ".png.test.camera.txt"
testOutFile = testname + ".0002" + testOutFileEnding
testJson1 = self.GetBaseTestJson1()
self.MergeTestJson1(testJson1, thisTestSubJson1)
global RollUpAllJsonForRunningDuringSetUpClass
if RollUpAllJsonForRunningDuringSetUpClass == False:
self.runit1(testJson1, testname)
baselineCamPos = [13.660254037844387, 8.658254063593594, 8.035254037844387]
baselineCamFocalPt = [5.0, -0.001999974250793457, -0.625]
self.CheckCameraPostionAndFocalPoint(testOutFile, baselineCamPos, baselineCamFocalPt)
self.RemoveTestOutputFiles2(testname)
@staticmethod
def getjson_CameraAtAbsolutePoint():
testname = "CameraAtAbsolutePoint"
thisTestJson = {"camera at absolute point": [-2.0, 3.0, 30.0], "camera type":"camera"}
return TestPhactoriCameraBlock.getjson_ForTestHelper1(testname, thisTestJson)
def test_CameraAtAbsolutePoint(self):
testname, thisTestSubJson1 = self.getjson_CameraAtAbsolutePoint()
testOutFileEnding = ".png.test.camera.txt"
testOutFile = testname + ".0002" + testOutFileEnding
testJson1 = self.GetBaseTestJson1()
self.MergeTestJson1(testJson1, thisTestSubJson1)
global RollUpAllJsonForRunningDuringSetUpClass
if RollUpAllJsonForRunningDuringSetUpClass == False:
self.runit1(testJson1, testname)
baselineCamPos = [-2.0, 3.0, 30.0]
baselineCamFocalPt = [5.0, -0.001999974250793457, -0.625]
self.CheckCameraPostionAndFocalPoint(testOutFile, baselineCamPos, baselineCamFocalPt)
self.RemoveTestOutputFiles2(testname)
@staticmethod
def getjson_CameraAtRelativePoint():
testname = "CameraAtRelativePoint"
thisTestJson = {"camera at relative point": [-0.5, 1.5, 20.0], "camera type":"camera"}
return TestPhactoriCameraBlock.getjson_ForTestHelper1(testname, thisTestJson)
def test_CameraAtRelativePoint(self):
testname, thisTestSubJson1 = self.getjson_CameraAtRelativePoint()
testOutFileEnding = ".png.test.camera.txt"
testOutFile = testname + ".0002" + testOutFileEnding
testJson1 = self.GetBaseTestJson1()
self.MergeTestJson1(testJson1, thisTestSubJson1)
global RollUpAllJsonForRunningDuringSetUpClass
if RollUpAllJsonForRunningDuringSetUpClass == False:
self.runit1(testJson1, testname)
baselineCamPos = [0.0, 5.992000102996826, 24.375]
baselineCamFocalPt = [5.0, -0.001999974250793457, -0.625]
self.CheckCameraPostionAndFocalPoint(testOutFile, baselineCamPos, baselineCamFocalPt)
self.RemoveTestOutputFiles2(testname)
@staticmethod
def getjson_CameraAtElement():
testname = "CameraAtElement"
thisTestJson = {"camera at element": 1, "camera type":"camera"}
return TestPhactoriCameraBlock.getjson_ForTestHelper1(testname, thisTestJson)
def test_CameraAtElement(self):
testname, thisTestSubJson1 = self.getjson_CameraAtElement()
testOutFileEnding = ".png.test.camera.txt"
testOutFile = testname + ".0002" + testOutFileEnding
testJson1 = self.GetBaseTestJson1()
self.MergeTestJson1(testJson1, thisTestSubJson1)
global RollUpAllJsonForRunningDuringSetUpClass
if RollUpAllJsonForRunningDuringSetUpClass == False:
self.runit1(testJson1, testname)
baselineCamPos = [0.37538909912109375, 0.3326896131038666, -0.625]
baselineCamFocalPt = [5.0, -0.001999974250793457, -0.625]
self.CheckCameraPostionAndFocalPoint(testOutFile, baselineCamPos, baselineCamFocalPt)
self.RemoveTestOutputFiles2(testname)
@staticmethod
def getjson_CameraAtElementDisplaced():
testname = "CameraAtElementDisplaced"
thisTestJson = {"camera at element displaced": [1,-3.0,10.0,20.0], "look at element": 1, "camera type":"camera"}
return TestPhactoriCameraBlock.getjson_ForTestHelper1(testname, thisTestJson)
def test_CameraAtElementDisplaced(self):
testname, thisTestSubJson1 = self.getjson_CameraAtElementDisplaced()
testOutFileEnding = ".png.test.camera.txt"
testOutFile = testname + ".0002" + testOutFileEnding
testJson1 = self.GetBaseTestJson1()
self.MergeTestJson1(testJson1, thisTestSubJson1)
global RollUpAllJsonForRunningDuringSetUpClass
if RollUpAllJsonForRunningDuringSetUpClass == False:
self.runit1(testJson1, testname)
baselineCamPos = [-2.6246109008789062, 10.332689613103867, 19.375]
baselineCamFocalPt = [0.37538909912109375, 0.3326896131038666, -0.625]
self.CheckCameraPostionAndFocalPoint(testOutFile, baselineCamPos, baselineCamFocalPt)
self.RemoveTestOutputFiles2(testname)
@staticmethod
def getjson_CameraAtNodeDisplaced():
testname = "CameraAtNodeDisplaced"
thisTestJson = {"camera at node displaced": [1,-3.0,10.0,20.0], "look at node": 1, "camera type":"camera"}
return TestPhactoriCameraBlock.getjson_ForTestHelper1(testname, thisTestJson)
def test_CameraAtNodeDisplaced(self):
testname, thisTestSubJson1 = self.getjson_CameraAtNodeDisplaced()
testOutFileEnding = ".png.test.camera.txt"
testOutFile = testname + ".0002" + testOutFileEnding
testJson1 = self.GetBaseTestJson1()
self.MergeTestJson1(testJson1, thisTestSubJson1)
global RollUpAllJsonForRunningDuringSetUpClass
if RollUpAllJsonForRunningDuringSetUpClass == False:
self.runit1(testJson1, testname)
baselineCamPos = [-3.0, 10.0, 20.0]
baselineCamFocalPt = [0.0, 0.0, 0.0]
self.CheckCameraPostionAndFocalPoint(testOutFile, baselineCamPos, baselineCamFocalPt)
self.RemoveTestOutputFiles2(testname)
@staticmethod
def getjson_CameraUpVector():
testname = "CameraUpVector"
thisTestJson = {"up vector": [0,1,2], "camera type":"camera"}
return TestPhactoriCameraBlock.getjson_ForTestHelper1(testname, thisTestJson)
def test_CameraUpVector(self):
testname, thisTestSubJson1 = self.getjson_CameraUpVector()
testOutFileEnding = ".png.test.camera.txt"
testOutFile = testname + ".0002" + testOutFileEnding
testJson1 = self.GetBaseTestJson1()
self.MergeTestJson1(testJson1, thisTestSubJson1)
global RollUpAllJsonForRunningDuringSetUpClass
if RollUpAllJsonForRunningDuringSetUpClass == False:
self.runit1(testJson1, testname)
baselineCamPos = [16.67302481767549, 11.671024843424698, 11.048024817675492]
baselineCamFocalPt = [5.0, -0.001999974250793457, -0.625]
baselineCamViewUp = [0.0, 1.0, 2.0]
self.CheckCameraPostionAndFocalPoint(testOutFile, baselineCamPos, baselineCamFocalPt, baselineCamViewUp)
self.RemoveTestOutputFiles2(testname)
@staticmethod
def getjson_CameraFOV():
testname = "CameraFOV"
thisTestJson = {"camera fov": 45,
"look at relative point": [0.0, 0.0, 0.0],
"look at absolute distance": 10.0,
"camera type":"camera"}
return TestPhactoriCameraBlock.getjson_ForTestHelper1(testname, thisTestJson)
def test_CameraFOV(self):
testname, thisTestSubJson1 = self.getjson_CameraFOV()
testOutFileEnding = ".png.test.camera.txt"
testOutFile = testname + ".0002" + testOutFileEnding
testJson1 = self.GetBaseTestJson1()
self.MergeTestJson1(testJson1, thisTestSubJson1)
global RollUpAllJsonForRunningDuringSetUpClass
if RollUpAllJsonForRunningDuringSetUpClass == False:
self.runit1(testJson1, testname)
baselineCamPos = [10.773502691896258, 5.771502717645465, 5.148502691896258]
baselineCamFocalPt = [5.0, -0.001999974250793457, -0.625]
baselineCamAngle = 45.0
self.CheckCameraPostionAndFocalPoint(testOutFile, baselineCamPos, baselineCamFocalPt, None, baselineCamAngle)
self.RemoveTestOutputFiles2(testname)
@staticmethod
def getjson_CameraParallelProjection1():
testname = "CameraParallelProjection1"
thisTestJson = {"projection type": "parallel", "look direction": [-5,-1,-1], "camera type":"camera"}
return TestPhactoriCameraBlock.getjson_ForTestHelper1(testname, thisTestJson)
def test_CameraParallelProjection1(self):
testname, thisTestSubJson1 = self.getjson_CameraParallelProjection1()
testOutFileEnding = ".png.test.camera.txt"
testOutFile = testname + ".0002" + testOutFileEnding
testJson1 = self.GetBaseTestJson1()
self.MergeTestJson1(testJson1, thisTestSubJson1)
global RollUpAllJsonForRunningDuringSetUpClass
if RollUpAllJsonForRunningDuringSetUpClass == False:
self.runit1(testJson1, testname)
baselineCamPos = [21.08689812331651, 3.2153796504125087, 2.592379624663302]
baselineCamFocalPt = [5.0, -0.001999974250793457, -0.625]
baselineCamParallelProjection = 1
baselineCamParallelScale = 5.69160334053072
self.CheckCameraPostionAndFocalPoint(testOutFile, baselineCamPos,
baselineCamFocalPt, None, None,
baselineCamParallelProjection, baselineCamParallelScale)
self.RemoveTestOutputFiles2(testname)
@staticmethod
def getjson_CameraImageNameAddon():
testname = "CameraImageNameAddon"
thisTestJson = {"image name addon": "_foo_", "camera type":"camera"}
return TestPhactoriCameraBlock.getjson_ForTestHelper1(testname, thisTestJson)
def test_CameraImageNameAddon(self):
testname, thisTestSubJson1 = self.getjson_CameraImageNameAddon()
testOutFileEnding = ".png.test.camera.txt"
testOutFile = testname + ".0002" + testOutFileEnding
testJson1 = self.GetBaseTestJson1()
self.MergeTestJson1(testJson1, thisTestSubJson1)
global RollUpAllJsonForRunningDuringSetUpClass
if RollUpAllJsonForRunningDuringSetUpClass == False:
self.runit1(testJson1, testname)
#name of image file should be different, so test for that.
imageNameGotAddon = os.path.exists("CameraImageNameAddon._foo_0002.png.test.camera.txt")
self.assertTrue(imageNameGotAddon)
subprocess.run(["rm",
"CameraImageNameAddon._foo_0000.png.test.camera.txt",
"CameraImageNameAddon._foo_0001.png.test.camera.txt",
"CameraImageNameAddon._foo_0002.png.test.camera.txt",])
if __name__ == '__main__':
cc = Cone()
rr = Show()
unittest.main()
|
[] |
[] |
[
"PHACTORI_TEST_CAMERA_MODE",
"SNL_CATALYST_SIERRA_USAGE_LOG_FLAG"
] |
[]
|
["PHACTORI_TEST_CAMERA_MODE", "SNL_CATALYST_SIERRA_USAGE_LOG_FLAG"]
|
python
| 2 | 0 | |
unstable/unstable.go
|
package main
import (
"math/rand"
v "unstable/version"
"fmt"
"os"
"github.com/SebastiaanPasterkamp/gobernate"
ge "github.com/SebastiaanPasterkamp/gonyexpress"
pl "github.com/SebastiaanPasterkamp/gonyexpress/payload"
log "github.com/sirupsen/logrus"
)
func main() {
log.Info("Unstable")
port := os.Getenv("PORT")
if port == "" {
log.Fatal("PORT is not set.")
}
rabbitmq := os.Getenv("RABBITMQ")
if rabbitmq == "" {
log.Fatal("RABBITMQ is not set.")
}
g := gobernate.New(port, v.Name, v.Release, v.Commit, v.BuildTime)
shutdown := g.Launch()
defer g.Shutdown()
c := ge.NewConsumer(rabbitmq, "unstable", 4, randomlyFail)
err := c.Run()
if err != nil {
log.Fatal(err)
}
defer c.Shutdown()
g.Ready()
<-shutdown
}
func randomlyFail(
_ string, _ pl.MetaData, args pl.Arguments, _ pl.Documents,
) (*pl.Documents, *pl.MetaData, error) {
rate, ok := args["error_rate"]
if !ok {
return nil, nil, fmt.Errorf("missing 'error_rate' in arguments")
}
if rand.Float64() <= rate.(float64) {
return nil, nil, fmt.Errorf("randomly failed for no reason")
}
return nil, nil, nil
}
|
[
"\"PORT\"",
"\"RABBITMQ\""
] |
[] |
[
"PORT",
"RABBITMQ"
] |
[]
|
["PORT", "RABBITMQ"]
|
go
| 2 | 0 | |
delphi/icm_api/config.py
|
from pathlib import Path
# Statement for enabling the development environment
DEBUG = True
# Define the application directory
import os
BASE_DIR = Path(__file__).parent
# Define the database - we are working with
# SQLite for this example
SQLALCHEMY_DATABASE_URI = os.environ.get(
"SQLALCHEMY_DATABASE_URI", f"sqlite:///{BASE_DIR}/delphi.db"
)
SQLALCHEMY_TRACK_MODIFICATIONS = False
DATABASE_CONNECT_OPTIONS = {}
# Application threads. A common general assumption is
# using 2 per available processor cores - to handle
# incoming requests using one and performing background
# operations using the other.
THREADS_PER_PAGE = 2
|
[] |
[] |
[
"SQLALCHEMY_DATABASE_URI"
] |
[]
|
["SQLALCHEMY_DATABASE_URI"]
|
python
| 1 | 0 | |
vendor/github.com/k0kubun/pp/v3/pp.go
|
package pp
import (
"errors"
"fmt"
"io"
"os"
"runtime"
"sync"
"github.com/mattn/go-colorable"
)
var (
defaultOut = colorable.NewColorableStdout()
defaultWithLineInfo = false
defaultPrettyPrinter = newPrettyPrinter(3) // pp.* => PrettyPrinter.* => formatAll
)
type PrettyPrinter struct {
// WithLineInfo adds file name and line information to output.
// Call this function with care, because getting stack has performance penalty.
WithLineInfo bool
// To support WithLineInfo, we need to know which frame we should look at.
// Thus callerLevel sets the number of frames it needs to skip.
callerLevel int
out io.Writer
currentScheme ColorScheme
outLock sync.Mutex
maxDepth int
coloringEnabled bool
decimalUint bool
thousandsSeparator bool
// This skips unexported fields of structs.
exportedOnly bool
}
// New creates a new PrettyPrinter that can be used to pretty print values
func New() *PrettyPrinter {
return newPrettyPrinter(2) // PrettyPrinter.* => formatAll
}
func newPrettyPrinter(callerLevel int) *PrettyPrinter {
return &PrettyPrinter{
WithLineInfo: defaultWithLineInfo,
callerLevel: callerLevel,
out: defaultOut,
currentScheme: defaultScheme,
maxDepth: -1,
coloringEnabled: true,
decimalUint: true,
exportedOnly: false,
}
}
// Print prints given arguments.
func (pp *PrettyPrinter) Print(a ...interface{}) (n int, err error) {
return fmt.Fprint(pp.out, pp.formatAll(a)...)
}
// Printf prints a given format.
func (pp *PrettyPrinter) Printf(format string, a ...interface{}) (n int, err error) {
return fmt.Fprintf(pp.out, format, pp.formatAll(a)...)
}
// Println prints given arguments with newline.
func (pp *PrettyPrinter) Println(a ...interface{}) (n int, err error) {
return fmt.Fprintln(pp.out, pp.formatAll(a)...)
}
// Sprint formats given arguemnts and returns the result as string.
func (pp *PrettyPrinter) Sprint(a ...interface{}) string {
return fmt.Sprint(pp.formatAll(a)...)
}
// Sprintf formats with pretty print and returns the result as string.
func (pp *PrettyPrinter) Sprintf(format string, a ...interface{}) string {
return fmt.Sprintf(format, pp.formatAll(a)...)
}
// Sprintln formats given arguemnts with newline and returns the result as string.
func (pp *PrettyPrinter) Sprintln(a ...interface{}) string {
return fmt.Sprintln(pp.formatAll(a)...)
}
// Fprint prints given arguments to a given writer.
func (pp *PrettyPrinter) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprint(w, pp.formatAll(a)...)
}
// Fprintf prints format to a given writer.
func (pp *PrettyPrinter) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
return fmt.Fprintf(w, format, pp.formatAll(a)...)
}
// Fprintln prints given arguments to a given writer with newline.
func (pp *PrettyPrinter) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprintln(w, pp.formatAll(a)...)
}
// Errorf formats given arguments and returns it as error type.
func (pp *PrettyPrinter) Errorf(format string, a ...interface{}) error {
return errors.New(pp.Sprintf(format, a...))
}
// Fatal prints given arguments and finishes execution with exit status 1.
func (pp *PrettyPrinter) Fatal(a ...interface{}) {
fmt.Fprint(pp.out, pp.formatAll(a)...)
os.Exit(1)
}
// Fatalf prints a given format and finishes execution with exit status 1.
func (pp *PrettyPrinter) Fatalf(format string, a ...interface{}) {
fmt.Fprintf(pp.out, format, pp.formatAll(a)...)
os.Exit(1)
}
// Fatalln prints given arguments with newline and finishes execution with exit status 1.
func (pp *PrettyPrinter) Fatalln(a ...interface{}) {
fmt.Fprintln(pp.out, pp.formatAll(a)...)
os.Exit(1)
}
func (pp *PrettyPrinter) SetColoringEnabled(enabled bool) {
pp.coloringEnabled = enabled
}
func (pp *PrettyPrinter) SetDecimalUint(enabled bool) {
pp.decimalUint = enabled
}
func (pp *PrettyPrinter) SetExportedOnly(enabled bool) {
pp.exportedOnly = enabled
}
func (pp *PrettyPrinter) SetThousandsSeparator(enabled bool) {
pp.thousandsSeparator = enabled
}
// SetOutput sets pp's output
func (pp *PrettyPrinter) SetOutput(o io.Writer) {
pp.outLock.Lock()
defer pp.outLock.Unlock()
pp.out = o
}
// GetOutput returns pp's output.
func (pp *PrettyPrinter) GetOutput() io.Writer {
return pp.out
}
// ResetOutput sets pp's output back to the default output
func (pp *PrettyPrinter) ResetOutput() {
pp.outLock.Lock()
defer pp.outLock.Unlock()
pp.out = defaultOut
}
// SetColorScheme takes a colorscheme used by all future Print calls.
func (pp *PrettyPrinter) SetColorScheme(scheme ColorScheme) {
scheme.fixColors()
pp.currentScheme = scheme
}
// ResetColorScheme resets colorscheme to default.
func (pp *PrettyPrinter) ResetColorScheme() {
pp.currentScheme = defaultScheme
}
func (pp *PrettyPrinter) formatAll(objects []interface{}) []interface{} {
results := []interface{}{}
// fix for backwards capability
withLineInfo := pp.WithLineInfo
if pp == defaultPrettyPrinter {
withLineInfo = WithLineInfo
}
if withLineInfo {
_, fn, line, _ := runtime.Caller(pp.callerLevel)
results = append(results, fmt.Sprintf("%s:%d\n", fn, line))
}
for _, object := range objects {
results = append(results, pp.format(object))
}
return results
}
// Print prints given arguments.
func Print(a ...interface{}) (n int, err error) {
return defaultPrettyPrinter.Print(a...)
}
// Printf prints a given format.
func Printf(format string, a ...interface{}) (n int, err error) {
return defaultPrettyPrinter.Printf(format, a...)
}
// Println prints given arguments with newline.
func Println(a ...interface{}) (n int, err error) {
return defaultPrettyPrinter.Println(a...)
}
// Sprint formats given arguemnts and returns the result as string.
func Sprint(a ...interface{}) string {
return defaultPrettyPrinter.Sprint(a...)
}
// Sprintf formats with pretty print and returns the result as string.
func Sprintf(format string, a ...interface{}) string {
return defaultPrettyPrinter.Sprintf(format, a...)
}
// Sprintln formats given arguemnts with newline and returns the result as string.
func Sprintln(a ...interface{}) string {
return defaultPrettyPrinter.Sprintln(a...)
}
// Fprint prints given arguments to a given writer.
func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
return defaultPrettyPrinter.Fprint(w, a...)
}
// Fprintf prints format to a given writer.
func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
return defaultPrettyPrinter.Fprintf(w, format, a...)
}
// Fprintln prints given arguments to a given writer with newline.
func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
return defaultPrettyPrinter.Fprintln(w, a...)
}
// Errorf formats given arguments and returns it as error type.
func Errorf(format string, a ...interface{}) error {
return defaultPrettyPrinter.Errorf(format, a...)
}
// Fatal prints given arguments and finishes execution with exit status 1.
func Fatal(a ...interface{}) {
defaultPrettyPrinter.Fatal(a...)
}
// Fatalf prints a given format and finishes execution with exit status 1.
func Fatalf(format string, a ...interface{}) {
defaultPrettyPrinter.Fatalf(format, a...)
}
// Fatalln prints given arguments with newline and finishes execution with exit status 1.
func Fatalln(a ...interface{}) {
defaultPrettyPrinter.Fatalln(a...)
}
// Change Print* functions' output to a given writer.
// For example, you can limit output by ENV.
//
// func init() {
// if os.Getenv("DEBUG") == "" {
// pp.SetDefaultOutput(ioutil.Discard)
// }
// }
func SetDefaultOutput(o io.Writer) {
defaultPrettyPrinter.SetOutput(o)
}
// GetOutput returns pp's default output.
func GetDefaultOutput() io.Writer {
return defaultPrettyPrinter.GetOutput()
}
// Change Print* functions' output to default one.
func ResetDefaultOutput() {
defaultPrettyPrinter.ResetOutput()
}
// SetColorScheme takes a colorscheme used by all future Print calls.
func SetColorScheme(scheme ColorScheme) {
defaultPrettyPrinter.SetColorScheme(scheme)
}
// ResetColorScheme resets colorscheme to default.
func ResetColorScheme() {
defaultPrettyPrinter.ResetColorScheme()
}
// SetMaxDepth sets the printer's Depth, -1 prints all
func SetDefaultMaxDepth(v int) {
defaultPrettyPrinter.maxDepth = v
}
// WithLineInfo add file name and line information to output
// call this function with care, because getting stack has performance penalty
var WithLineInfo bool
|
[
"\"DEBUG\""
] |
[] |
[
"DEBUG"
] |
[]
|
["DEBUG"]
|
go
| 1 | 0 | |
setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import ast
import codecs
import os.path
import re
import subprocess
import sys
from codecs import open
from distutils import log
from distutils.errors import DistutilsError
from setuptools import find_packages, setup
from setuptools.command.install import install
from setuptools.command.sdist import sdist as BaseSDistCommand
ROOT = os.path.realpath(os.path.dirname(__file__))
init = os.path.join(ROOT, 'src', 'etools_permissions', '__init__.py')
_version_re = re.compile(r'__version__\s+=\s+(.*)')
_name_re = re.compile(r'NAME\s+=\s+(.*)')
sys.path.insert(0, os.path.join(ROOT, 'src'))
with open(init, 'rb') as f:
content = f.read().decode('utf-8')
VERSION = str(ast.literal_eval(_version_re.search(content).group(1)))
NAME = str(ast.literal_eval(_name_re.search(content).group(1)))
def read(*files):
content = []
for f in files:
content.extend(codecs.open(os.path.join(ROOT, 'src', 'requirements', f), 'r').readlines())
return "\n".join(filter(lambda l:not l.startswith('-'), content))
def check(cmd, filename):
out = subprocess.run(cmd, stdout=subprocess.PIPE)
f = os.path.join('src', 'requirements', filename)
reqs = codecs.open(os.path.join(ROOT, f), 'r').readlines()
existing = {re.split("(==|>=|<=>|<|)", name[:-1])[0] for name in reqs}
declared = {re.split("(==|>=|<=>|<|)", name)[0] for name in out.stdout.decode('utf8').split("\n") if name and not name.startswith('-')}
if existing != declared:
msg = """Requirements file not updated.
Run 'make requiremets'
""".format(' '.join(cmd), f)
raise DistutilsError(msg)
class SDistCommand(BaseSDistCommand):
def run(self):
checks = {'install.pip': ['pipenv', 'lock', '--requirements'],
'testing.pip': ['pipenv', 'lock', '-d', '--requirements']}
for filename, cmd in checks.items():
check (cmd, filename)
super().run()
class VerifyTagVersion(install):
"""Verify that the git tag matches version"""
def run(self):
tag = os.getenv("CIRCLE_TAG")
if tag != VERSION:
info = "Git tag: {} does not match the version of this app: {}".format(
tag,
VERSION
)
sys.exit(info)
setup(name=NAME,
version=VERSION,
url='https://github.com/unicef/etools-permissions',
author='UNICEF',
author_email='[email protected]',
license="Apache 2 License",
description='Django package that handles permissions',
long_description=codecs.open('README.rst').read(),
package_dir={'': 'src'},
packages=find_packages(where='src'),
include_package_data=True,
install_requires=read('install.pip'),
extras_require={
'test': read('install.pip', 'testing.pip'),
},
platforms=['any'],
classifiers=[
'Environment :: Web Environment',
'Programming Language :: Python :: 3.6',
'Framework :: Django',
'Intended Audience :: Developers'],
scripts=[],
cmdclass={
'sdist': SDistCommand,
"verify": VerifyTagVersion,
}
)
|
[] |
[] |
[
"CIRCLE_TAG"
] |
[]
|
["CIRCLE_TAG"]
|
python
| 1 | 0 | |
providers/ibm/database_etcd.go
|
// Copyright 2019 The Terraformer Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ibm
import (
"os"
"github.com/GoogleCloudPlatform/terraformer/terraformutils"
bluemix "github.com/IBM-Cloud/bluemix-go"
"github.com/IBM-Cloud/bluemix-go/api/resource/resourcev1/catalog"
"github.com/IBM-Cloud/bluemix-go/api/resource/resourcev2/controllerv2"
"github.com/IBM-Cloud/bluemix-go/session"
)
// DatabaseETCDGenerator ...
type DatabaseETCDGenerator struct {
IBMService
}
// loadETCDDB ...
func (g DatabaseETCDGenerator) loadETCDDB(dbID string, dbName string) terraformutils.Resource {
resources := terraformutils.NewSimpleResource(
dbID,
normalizeResourceName(dbName, false),
"ibm_database",
"ibm",
[]string{})
return resources
}
// InitResources ...
func (g *DatabaseETCDGenerator) InitResources() error {
region := os.Getenv("IC_REGION")
bmxConfig := &bluemix.Config{
BluemixAPIKey: os.Getenv("IC_API_KEY"),
Region: region,
}
sess, err := session.New(bmxConfig)
if err != nil {
return err
}
catalogClient, err := catalog.New(sess)
if err != nil {
return err
}
controllerClient, err := controllerv2.New(sess)
if err != nil {
return err
}
serviceID, err := catalogClient.ResourceCatalog().FindByName("databases-for-etcd", true)
if err != nil {
return err
}
query := controllerv2.ServiceInstanceQuery{
ServiceID: serviceID[0].ID,
}
etcdInstances, err := controllerClient.ResourceServiceInstanceV2().ListInstances(query)
if err != nil {
return err
}
for _, db := range etcdInstances {
if db.RegionID == region {
g.Resources = append(g.Resources, g.loadETCDDB(db.ID, db.Name))
}
}
return nil
}
|
[
"\"IC_REGION\"",
"\"IC_API_KEY\""
] |
[] |
[
"IC_API_KEY",
"IC_REGION"
] |
[]
|
["IC_API_KEY", "IC_REGION"]
|
go
| 2 | 0 | |
ucloud-sdk-java-udisk/src/test/java/cn/ucloud/udisk/client/SetUDiskUDataArkModeTest.java
|
package cn.ucloud.udisk.client;
import cn.ucloud.common.handler.UcloudHandler;
import cn.ucloud.common.pojo.Account;
import cn.ucloud.udisk.model.SetUDiskUDataArkModeParam;
import cn.ucloud.udisk.model.SetUDiskUDataArkModeResult;
import cn.ucloud.udisk.pojo.UdiskConfig;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
import static org.junit.Assert.assertNull;
/**
* @description:
* @author: joshua
* @E-mail: [email protected]
* @date: 2018/9/26 18:01
*/
public class SetUDiskUDataArkModeTest {
private UdiskClient client;
private SetUDiskUDataArkModeParam param;
@Before
public void initData() {
client = new DefaultUdiskClient(new UdiskConfig(
new Account(System.getenv("UCloudPrivateKey"),
System.getenv("UCloudPublicKey"))));
param = new SetUDiskUDataArkModeParam("cn-sh2", "cn-sh2-01", "bs-4qfrwv","org-4nfe1i",
"Yes");
}
@Test
public void setUDiskUDataArkMode() {
try {
SetUDiskUDataArkModeResult result = client.setUDiskUDataArkMode(param);
JSONComparator.jsonComparator(result);
} catch (Exception e) {
e.printStackTrace();
}
}
@Ignore
@Test
public void setUDiskUDataArkModeCallback() {
client.setUDiskUDataArkMode(param, new UcloudHandler<SetUDiskUDataArkModeResult>() {
@Override
public Object success(SetUDiskUDataArkModeResult result) {
JSONComparator.jsonComparator(result);
return null;
}
@Override
public Object failed(SetUDiskUDataArkModeResult result) {
JSONComparator.jsonComparator(result);
return null;
}
@Override
public Object error(Exception e) {
assertNull(e);
return null;
}
}, false);
}
}
|
[
"\"UCloudPrivateKey\"",
"\"UCloudPublicKey\""
] |
[] |
[
"UCloudPrivateKey",
"UCloudPublicKey"
] |
[]
|
["UCloudPrivateKey", "UCloudPublicKey"]
|
java
| 2 | 0 | |
src/main/java/com/verizon/dispatch/route/web/controllers/DispatchController.java
|
package com.verizon.dispatch.route.web.controllers;
import java.io.IOException;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.cloud.Cloud;
import org.springframework.data.mongodb.core.geo.Distance;
import org.springframework.data.mongodb.core.geo.Metrics;
import org.springframework.data.mongodb.core.geo.Point;
import org.springframework.stereotype.Controller;
import org.springframework.ui.Model;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.RequestParam;
import com.fasterxml.jackson.core.JsonParseException;
import com.fasterxml.jackson.databind.JsonMappingException;
import com.verizon.dispatch.route.domain.User;
import com.verizon.dispatch.route.domain.TechRoute;
import com.verizon.dispatch.route.repositories.mongodb.DispatchRepository;
import com.verizon.dispatch.route.repositories.mongodb.TechRouteRepository;
import com.verizon.dispatch.route.util.EmailTemplate;
import com.verizon.dispatch.route.util.CommonUtil;
/**
* Controller for the Cloud Foundry workshop - Spring MVC version.
*
*/
@Controller
public class DispatchController {
private static final Logger logger = LoggerFactory.getLogger(DispatchController.class);
@Autowired
private DispatchRepository dispatchRepository;
@Autowired
private TechRouteRepository techRouteRepository;
@Autowired (required=false) Cloud cloud;
/**
* Gets basic environment information. This is the application's
* default action.
* @param model The model for this action.
* @return The path to the view.
* @throws IOException
* @throws JsonMappingException
* @throws JsonParseException
*/
@RequestMapping(value = "/", method = RequestMethod.GET)
public String index(Model model) throws Exception {
Date date = new Date();
DateFormat dateFormat = new SimpleDateFormat("MM/dd/yyyy h:mm a");
String serverTime = dateFormat.format(date);
model.addAttribute("serverTime", serverTime);
String port = System.getenv("PORT");
model.addAttribute("port", port);
String vcapServices = System.getenv("VCAP_SERVICES");
model.addAttribute("vcapServices", vcapServices);
if (cloud == null ){
model.addAttribute("isCloudEnvironment",false);
} else {
model.addAttribute("isCloudEnvironment",true);
model.addAttribute("vcapApplication", cloud.getApplicationInstanceInfo().getProperties());
logger.info("VCAP_SERVICES [{}] ", vcapServices);
logger.info("VCAP_APPLICATION [{}] ", System.getenv("VCAP_APPLICATION"));
}
logger.info("Current date and time = [{}], port = [{}].", serverTime, port);
User update = new User();
update.setId("2548579");
update.setEmail("[email protected]");
update.setFirstName("Pavan");
update.setLastName("Kumar");
update.setPhoneNumber("8332898007");
update.setZipCode(500050);
update.setPoolMode("P");
update.setVehicleType("4 Wheeler");
update.setVehicleCapacity(4);
update.setIsEnrolled("Y");
double plocation[] = {78.340129,17.493686};
update.setLocation(plocation);
// update.setStartDateTime("30-07-2015");
dispatchRepository.save(update);
/*
update = new User();
update.setId("2548580");
update.setEmail("[email protected]");
update.setFirstName("Satya");
update.setLastName("Pavan");
update.setPhoneNumber("121313123");
update.setZipCode("500050");
update.setPoolMode("N");
String slocation[] = {"78.360294","17.484168"};
update.setLocation(slocation);
update.setStartDateTime("07/30/2015");
dispatchRepository.save(update);
update = new User();
update.setId("2548581");
update.setEmail("[email protected]");
update.setFirstName("Surendra");
update.setLastName("Ganti");
update.setPhoneNumber("8332898007");
update.setZipCode("500050");
update.setPoolMode("N");
String glocation[] = {"78.533762","17.449104"};
update.setLocation(glocation);
update.setStartDateTime("07/30/2015");
dispatchRepository.save(update); */
return "index";
}
@RequestMapping(value = "/faq", method = RequestMethod.GET)
public String faq(@RequestParam("username") String userId, Model model) {
model.addAttribute("username", userId);
return "faq";
}
@RequestMapping(value = "/login", method = { RequestMethod.GET, RequestMethod.POST })
public String login(@RequestParam("username") String userId, Model model) {
User user = dispatchRepository.findOne(userId);
if(user!=null && !user.getId().equals(""))
{
model.addAttribute("empid", userId);
model.addAttribute("firstname", user.getFirstName());
model.addAttribute("lastname", user.getLastName());
model.addAttribute("email", user.getEmail());
model.addAttribute("zipcode", String.valueOf(user.getZipCode()));
model.addAttribute("status", user.getIsEnrolled());
Date startDate = user.getStartDate();
if(startDate!=null)
{
SimpleDateFormat sdf = new SimpleDateFormat("dd-MM-yyyy~HH:mm");
String sd = sdf.format(startDate);
model.addAttribute("startDate", sd.split("~")[0]);
model.addAttribute("startTimeHr", sd.split("~")[1].split(":")[0]);
model.addAttribute("startTimeMin", sd.split("~")[1].split(":")[1]);
}
model.addAttribute("addressDesc", user.getAddressDesc());
model.addAttribute("poolType", user.getPoolMode());
model.addAttribute("vehicleType", user.getVehicleType());
model.addAttribute("capacity", String.valueOf(user.getVehicleCapacity()));
if(user.getPoolMode()!=null && user.getPoolMode().equals("P"))
{
List<User> tUsers = dispatchRepository.findByProviderUserId(userId);
if(tUsers!=null)
{
StringBuilder takers = new StringBuilder();
StringBuilder currentPool = new StringBuilder();
takers.append("[");
for(User u: tUsers)
{
if(u.getIsEnrolled().equals("Y") && u.getPoolMode().equals("N") && u.getLocation()!=null && u.getLocation().length == 2)
{
if(takers.toString().length() > 1)
takers.append(",");
takers.append("['").append(u.getFirstName()).append(" ").append(u.getLastName()).append("',")
.append(u.getLocation()[1]).append(",").append(u.getLocation()[0]).append(",'").append("N").append("']");
if(currentPool.toString().length()>0)
currentPool.append("<br>");
currentPool.append(u.getFirstName()).append(" ").append(u.getLastName()).append(" | ").append(u.getPhoneNumber()).append(" | ").append(u.getEmail());
}
}
takers.append("]");
model.addAttribute("others", takers.toString());
model.addAttribute("currentPool",currentPool.toString());
}
}
else if(user.getPoolMode()!=null && user.getPoolMode().equals("N"))
{
if(user.getProviderUserId()!=null)
{
StringBuilder providers = new StringBuilder();
providers.append("[");
User u = dispatchRepository.findOne(user.getProviderUserId());
if(u.getIsEnrolled().equals("Y") && u.getLocation()!=null && u.getLocation().length == 2)
{
providers.append("['").append(u.getFirstName()).append(" ").append(u.getLastName()).append("',")
.append(u.getLocation()[1]).append(",").append(u.getLocation()[0]).append(",'").append("P").append("']");
String currentPool = u.getFirstName()+ " "+u.getLastName()+" | "+u.getPhoneNumber()+" | "+u.getEmail();
model.addAttribute("currentPool",currentPool);
}
providers.append("]");
model.addAttribute("others", providers.toString());
}
}
double[] geoData = user.getLocation();
if(geoData!=null && geoData.length == 2)
{
model.addAttribute("location", (String.valueOf(geoData[1])+","+String.valueOf(geoData[0])));
// set near users
}
return "poolingRequest";
}
else
{
model.addAttribute("message","Login failed. If you are a new user please register first.");
return "index";
}
}
@RequestMapping(value = "/report", method = RequestMethod.GET)
public String report(@RequestParam("username") String userId, Model model) {
if(userId!=null)
{
List<User> users = dispatchRepository.findAll();
StringBuilder providers = new StringBuilder();
StringBuilder takers = new StringBuilder();
StringBuilder mappedusers = new StringBuilder();
providers.append("["); takers.append("[");
mappedusers.append("[");
Set<String> usersChecked = new HashSet<String>();
for(User u: users)
{
if(u.getIsEnrolled()!=null && u.getIsEnrolled().equals("Y") && u.getLocation()!=null && u.getLocation().length == 2)
{
if(u.getProviderUserId()!=null && !usersChecked.contains(u.getId()))
{
usersChecked.add(u.getId());
usersChecked.add(u.getProviderUserId());
if(mappedusers.toString().length() > 1)
mappedusers.append(",");
mappedusers.append("['").append(u.getFirstName()).append(" ").append(u.getLastName()).append("<br>")
.append(u.getId()).append("<br>").append(u.getPhoneNumber()).append("<br>").append(u.getEmail())
.append("',")
.append(u.getLocation()[1]).append(",").append(u.getLocation()[0]).append(",'").append("M").append("']");
u = dispatchRepository.findOne(u.getProviderUserId());
mappedusers.append(",['").append(u.getFirstName()).append(" ").append(u.getLastName()).append("<br>")
.append(u.getId()).append("<br>").append(u.getPhoneNumber()).append("<br>").append(u.getEmail())
.append("',")
.append(u.getLocation()[1]).append(",").append(u.getLocation()[0]).append(",'").append("M").append("']");
}
else if(u.getPoolMode().equals("P") && !usersChecked.contains(u.getId()))
{
List<User> pu = dispatchRepository.findByProviderUserId(u.getId());
if(!(pu!=null && pu.size()>0))
{
usersChecked.add(u.getId());
if(providers.toString().length() > 1)
providers.append(",");
providers.append("['").append(u.getFirstName()).append(" ").append(u.getLastName()).append("<br>")
.append(u.getId()).append("<br>").append(u.getPhoneNumber()).append("<br>").append(u.getEmail())
.append("',")
.append(u.getLocation()[1]).append(",").append(u.getLocation()[0]).append(",'").append("P").append("']");
}
}
else if(u.getPoolMode().equals("N") && !usersChecked.contains(u.getId()))
{
usersChecked.add(u.getId());
if(takers.toString().length() > 1)
takers.append(",");
takers.append("['").append(u.getFirstName()).append(" ").append(u.getLastName()).append("<br>")
.append(u.getId()).append("<br>").append(u.getPhoneNumber()).append("<br>").append(u.getEmail())
.append("',")
.append(u.getLocation()[1]).append(",").append(u.getLocation()[0]).append(",'").append("N").append("']");
}
}
}
providers.append("]"); takers.append("]");mappedusers.append("]");
model.addAttribute("providers", providers.toString());
model.addAttribute("takers", takers.toString());
model.addAttribute("mappedusers", mappedusers.toString());
model.addAttribute("username", userId);
return "reports";
}
else
{
model.addAttribute("message", "Session expired!! please login again");
return "index";
}
}
// @RequestMapping(value = "/submitRequest", method = RequestMethod.POST)
// public String submitRequest(@RequestParam("userId") String userId,
// @RequestParam("avlVehicleChk") String avlVehicleChk,
// Model model) {
//
// if(userId!=null)
// {
// User user = dispatchRepository.findOne(userId);
// String prevProviderUserId = user.getProviderUserId();
// if(prevProviderUserId!=null && !prevProviderUserId.equals(avlVehicleChk))
// {
// User prevProvider = dispatchRepository.findOne(user.getProviderUserId());
// prevProvider.setPickCount(prevProvider.getPickCount()>0?prevProvider.getPickCount()-1:0);
// prevProvider.setAvailableCount(prevProvider.getVehicleCapacity()-prevProvider.getPickCount());
// dispatchRepository.save(prevProvider);
// // send email to old provider that user de-tagged from his pool
// String emailBody = EmailTemplate.TEXT_CAR_POOL_DROPPED_PROVIDER.replace(EmailTemplate.RECEIPIENT, prevProvider.getFirstName())
// .replace(EmailTemplate.FIRST_NAME, user.getFirstName()).replace(EmailTemplate.LAST_NAME, user.getLastName())
// .replace(EmailTemplate.MOBILE, user.getPhoneNumber()).replace(EmailTemplate.EMAIL, user.getEmail())
// .replace(EmailTemplate.ADDRESS, user.getAddressDesc());
// logger.info("SENDING EMAIL");
// CommonUtil.sendEmail(prevProvider.getEmail(),EmailTemplate.SUB_CAR_POOL_DROPPED_PROVIDER, emailBody);
// // send email to the user that he is de-tagged from his current pool
// emailBody = EmailTemplate.TEXT_CAR_POOL_DROPPED_USER.replace(EmailTemplate.RECEIPIENT, user.getFirstName())
// .replace(EmailTemplate.FIRST_NAME, prevProvider.getFirstName()).replace(EmailTemplate.LAST_NAME, prevProvider.getLastName())
// .replace(EmailTemplate.MOBILE, prevProvider.getPhoneNumber()).replace(EmailTemplate.EMAIL, prevProvider.getEmail())
// .replace(EmailTemplate.ADDRESS, prevProvider.getAddressDesc());
// logger.info("SENDING EMAIL");
// CommonUtil.sendEmail(user.getEmail(),EmailTemplate.SUB_CAR_POOL_DROPPED_USER, emailBody);
// }
// user.setProviderUserId(avlVehicleChk);
// dispatchRepository.save(user);
// model.addAttribute("empid", userId);
// model.addAttribute("firstname", user.getFirstName());
// model.addAttribute("lastname", user.getLastName());
// model.addAttribute("email", user.getEmail());
// model.addAttribute("zipcode", String.valueOf(user.getZipCode()));
// model.addAttribute("status", user.getIsEnrolled());
// Date startDate = user.getStartDate();
// if(startDate!=null)
// {
// SimpleDateFormat sdf = new SimpleDateFormat("dd-MM-yyyy~HH:mm");
// String sd = sdf.format(startDate);
// model.addAttribute("startDate", sd.split("~")[0]);
// model.addAttribute("startTimeHr", sd.split("~")[1].split(":")[0]);
// model.addAttribute("startTimeMin", sd.split("~")[1].split(":")[1]);
// }
// model.addAttribute("addressDesc", user.getAddressDesc());
// model.addAttribute("poolType", user.getPoolMode());
// model.addAttribute("vehicleType", user.getVehicleType());
// model.addAttribute("capacity", String.valueOf(user.getVehicleCapacity()));
// if(user.getProviderUserId()!=null)
// {
// User providerUser = dispatchRepository.findOne(user.getProviderUserId());
// if(prevProviderUserId ==null)
// prevProviderUserId = "";
// if(!prevProviderUserId.equals(avlVehicleChk))
// {
// providerUser.setPickCount(providerUser.getPickCount()+1);
// providerUser.setAvailableCount(providerUser.getVehicleCapacity()-providerUser.getPickCount());
// dispatchRepository.save(providerUser);
// // send email to new provider that user tagged to his pool
// String emailBody = EmailTemplate.TEXT_CAR_POOL_ENROLLED_PROVIDER.replace(EmailTemplate.RECEIPIENT, providerUser.getFirstName())
// .replace(EmailTemplate.FIRST_NAME, user.getFirstName()).replace(EmailTemplate.LAST_NAME, user.getLastName())
// .replace(EmailTemplate.MOBILE, user.getPhoneNumber()).replace(EmailTemplate.EMAIL, user.getEmail())
// .replace(EmailTemplate.ADDRESS, user.getAddressDesc());
// logger.info("SENDING EMAIL");
// CommonUtil.sendEmail(providerUser.getEmail(),EmailTemplate.SUB_CAR_POOL_ENROLLED_PROVIDER, emailBody);
// // send email to the user that he is de-tagged from his current pool
// emailBody = EmailTemplate.TEXT_CAR_POOL_ENROLLED_USER.replace(EmailTemplate.RECEIPIENT, user.getFirstName())
// .replace(EmailTemplate.FIRST_NAME, providerUser.getFirstName()).replace(EmailTemplate.LAST_NAME, providerUser.getLastName())
// .replace(EmailTemplate.MOBILE, providerUser.getPhoneNumber()).replace(EmailTemplate.EMAIL, providerUser.getEmail())
// .replace(EmailTemplate.ADDRESS, providerUser.getAddressDesc());
// CommonUtil.sendEmail(user.getEmail(),EmailTemplate.SUB_CAR_POOL_ENROLLED_USER, emailBody);
// logger.info("SENDING EMAIL");
// }
// StringBuilder providers = new StringBuilder();
// providers.append("[");
// providers.append("['").append(providerUser.getFirstName()).append(" ").append(providerUser.getLastName()).append("',")
// .append(providerUser.getLocation()[1]).append(",").append(providerUser.getLocation()[0]).append(",'").append("P").append("']");
// providers.append("]");
// model.addAttribute("others", providers.toString());
// String currentPool = providerUser.getFirstName()+ " "+providerUser.getLastName()+" | "+providerUser.getPhoneNumber()+" | "+providerUser.getEmail();
// model.addAttribute("currentPool",currentPool);
// }
// double[] geoData = user.getLocation();
// if(geoData!=null && geoData.length == 2)
// model.addAttribute("location", (String.valueOf(geoData[1])+","+String.valueOf(geoData[0])));
// return "poolingRequest";
// }
// else
// {
// model.addAttribute("message", "Session expired!! please login again");
// return "index";
// }
// }
@RequestMapping(value = "/update", method = RequestMethod.POST)
public String update(@RequestParam("username") String userId,
@RequestParam("userLocation") String location,
@RequestParam("status") String enrolledStatus,
@RequestParam("poolType") String poolMode,
@RequestParam("capacity") String vehicleCapacity,
@RequestParam("startDate") String startDate,
@RequestParam("startTimeHr") String startTimeHr,
@RequestParam("startTimeMin") String startTimeMin,
@RequestParam("addressDesc") String addressDesc,
@RequestParam("vehicleType") String vehicleType,
Model model) {
User user = dispatchRepository.findOne(userId);
if(user!=null && !user.getId().equals(""))
{
user.setAddressDesc(addressDesc);
if(poolMode.equals("P") && vehicleCapacity!=null && !vehicleCapacity.trim().equals(""))
user.setVehicleCapacity(Integer.parseInt(vehicleCapacity));
if(poolMode.equals("P") && vehicleType!=null && !vehicleType.trim().equals(""))
user.setVehicleType(vehicleType);
if(user.getPickCount()==0 && poolMode.equals("P"))
{
user.setPickCount(0);
}
user.setAvailableCount(user.getVehicleCapacity()-user.getPickCount());
logger.info("update Data:: "+location+","+enrolledStatus+","+poolMode+","+vehicleCapacity+","+startDate+","+startTimeHr+","+startTimeMin+","+addressDesc+","+vehicleType);
boolean isLocUpdate = false;
boolean isStatusUpdate = false;
boolean isPoolModeUpdate = false;
boolean isStartDateUpdate = false;
boolean isSentEmail = false;
isStatusUpdate = !(user.getIsEnrolled()!= null && enrolledStatus.equals(user.getIsEnrolled()));
isPoolModeUpdate = !(user.getPoolMode()!= null && poolMode.equals(user.getPoolMode()));
if(location!=null && location.contains(","))
{
if(location.contains("("))
location = location.substring(1, location.length()-1);
// flip lat and long for mongo
double [] loc = new double[2];
loc[0] = Double.parseDouble(location.split(",")[1].trim());
loc[1] = Double.parseDouble(location.split(",")[0].trim());
double [] prevLoc = user.getLocation();
if(!(prevLoc!=null && prevLoc[0] == loc[0] && prevLoc[1] == loc[1]))
{
user.setLocation(loc);
isLocUpdate = true;
}
}
Date stDate = user.getStartDate();
String sDate=null, sHr=null, sMin=null;
if(stDate!=null)
{
SimpleDateFormat sdf = new SimpleDateFormat("dd-MM-yyyy~HH:mm");
String sd = sdf.format(stDate);
sDate = sd.split("~")[0];
sHr = sd.split("~")[1].split(":")[0];
sMin = sd.split("~")[1].split(":")[1];
}
isStartDateUpdate = !(sDate!= null && startDate.equals(sDate) && sHr!= null && startTimeHr.equals(sHr)
&& sMin!= null && startTimeMin.equals(sMin));
if(isStartDateUpdate)
{
try{
SimpleDateFormat sdf = new SimpleDateFormat("dd-MM-yyyy HH:mm");
String sttDate = startDate+" "+startTimeHr+":"+startTimeMin;
user.setStartDate(sdf.parse(sttDate));
}
catch(Exception e)
{
logger.error("issue parsing date field");
e.printStackTrace();
}
}
if((isStatusUpdate || isLocUpdate || isStartDateUpdate) && user.getIsEnrolled()!= null)
{
if(user.getIsEnrolled().equals("N") && (isLocUpdate || isStartDateUpdate))
{
if(user.getPoolMode().equals("N"))
{
if(user.getProviderUserId()!=null)
{
User pUser = dispatchRepository.findOne(user.getProviderUserId());
pUser.setPickCount(pUser.getPickCount()>0?pUser.getPickCount()-1:0);
pUser.setAvailableCount(pUser.getVehicleCapacity() - pUser.getPickCount());
dispatchRepository.save(pUser);
user.setProviderUserId(null);
String emailBody = EmailTemplate.TEXT_CAR_POOL_DROPPED_PROVIDER.replace(EmailTemplate.RECEIPIENT, pUser.getFirstName())
.replace(EmailTemplate.FIRST_NAME, user.getFirstName()).replace(EmailTemplate.LAST_NAME, user.getLastName())
.replace(EmailTemplate.MOBILE, user.getPhoneNumber()).replace(EmailTemplate.EMAIL, user.getEmail())
.replace(EmailTemplate.ADDRESS, user.getAddressDesc());
CommonUtil.sendEmail(pUser.getEmail(),EmailTemplate.SUB_CAR_POOL_DROPPED_PROVIDER, emailBody);
}
isSentEmail = true;
}
else
{
List<User> pUsers = dispatchRepository.findByProviderUserId(userId);
for(User u:pUsers)
{
u.setProviderUserId(null);
dispatchRepository.save(u);
String emailBody = EmailTemplate.TEXT_CAR_POOL_DROPPED_USER.replace(EmailTemplate.RECEIPIENT, u.getFirstName())
.replace(EmailTemplate.FIRST_NAME, user.getFirstName()).replace(EmailTemplate.LAST_NAME, user.getLastName())
.replace(EmailTemplate.MOBILE, user.getPhoneNumber()).replace(EmailTemplate.EMAIL, user.getEmail())
.replace(EmailTemplate.ADDRESS, user.getAddressDesc());
CommonUtil.sendEmail(user.getEmail(),EmailTemplate.SUB_CAR_POOL_DROPPED_USER, emailBody);
isSentEmail = true;
}
user.setPickCount(0);
user.setAvailableCount(user.getVehicleCapacity());
}
}
}
if(isPoolModeUpdate && user.getPoolMode()!= null)
{
if(user.getPoolMode().equals("N"))
{
if(!isSentEmail)
{
if(user.getProviderUserId()!=null)
{
User pUser = dispatchRepository.findOne(user.getProviderUserId());
pUser.setPickCount(pUser.getPickCount()>0?pUser.getPickCount()-1:0);
pUser.setAvailableCount(pUser.getVehicleCapacity() - pUser.getPickCount());
dispatchRepository.save(pUser);
user.setProviderUserId(null);
String emailBody = EmailTemplate.TEXT_CAR_POOL_DROPPED_PROVIDER.replace(EmailTemplate.RECEIPIENT, pUser.getFirstName())
.replace(EmailTemplate.FIRST_NAME, user.getFirstName()).replace(EmailTemplate.LAST_NAME, user.getLastName())
.replace(EmailTemplate.MOBILE, user.getPhoneNumber()).replace(EmailTemplate.EMAIL, user.getEmail())
.replace(EmailTemplate.ADDRESS, user.getAddressDesc());
CommonUtil.sendEmail(pUser.getEmail(),EmailTemplate.SUB_CAR_POOL_DROPPED_PROVIDER, emailBody);
isSentEmail = true;
}
}
user.setPickCount(0);
user.setAvailableCount(user.getVehicleCapacity());
}
else
{
if(!isSentEmail)
{
List<User> pUsers = dispatchRepository.findByProviderUserId(userId);
for(User u:pUsers)
{
u.setProviderUserId(null);
dispatchRepository.save(u);
String emailBody = EmailTemplate.TEXT_CAR_POOL_DROPPED_USER.replace(EmailTemplate.RECEIPIENT, u.getFirstName())
.replace(EmailTemplate.FIRST_NAME, user.getFirstName()).replace(EmailTemplate.LAST_NAME, user.getLastName())
.replace(EmailTemplate.MOBILE, user.getPhoneNumber()).replace(EmailTemplate.EMAIL, user.getEmail())
.replace(EmailTemplate.ADDRESS, user.getAddressDesc());
CommonUtil.sendEmail(user.getEmail(),EmailTemplate.SUB_CAR_POOL_DROPPED_USER, emailBody);
isSentEmail = true;
}
}
user.setPickCount(0);
user.setAvailableCount(user.getVehicleCapacity());
}
}
if(poolMode!=null && poolMode.equals("P"))
{
List<User> tUsers = dispatchRepository.findByProviderUserId(userId);
if(isLocUpdate)
{
List<TechRoute> deleteur = techRouteRepository.findByUserId(user.getId());
techRouteRepository.delete(deleteur);
logger.info("Route point deleted: "+(deleteur!=null?deleteur.size():0));
TechRoute[] routepoints = CommonUtil.getRoutePoints(user.getLocation());
if(routepoints!=null)
{
for(TechRoute ur: routepoints)
{
ur.setUserId(user.getId());
techRouteRepository.save(ur);
}
}
}
if(tUsers!=null)
{
StringBuilder takers = new StringBuilder();
StringBuilder currentPool = new StringBuilder();
takers.append("[");
for(User u: tUsers)
{
if(u.getIsEnrolled().equals("Y") && u.getPoolMode().equals("N") && u.getLocation()!=null && u.getLocation().length == 2)
{
if(takers.toString().length() > 1)
takers.append(",");
takers.append("['").append(u.getFirstName()).append(" ").append(u.getLastName()).append("',")
.append(u.getLocation()[1]).append(",").append(u.getLocation()[0]).append(",'").append("N").append("']");
if(currentPool.toString().length()>0)
currentPool.append("<br>");
currentPool.append(u.getFirstName()).append(" ").append(u.getLastName()).append(" | ").append(u.getPhoneNumber()).append(" | ").append(u.getEmail());
}
}
takers.append("]");
model.addAttribute("others", takers.toString());
model.addAttribute("currentPool",currentPool.toString());
}
}
else if(poolMode!=null && poolMode.equals("N"))
{
if(user.getProviderUserId()!=null)
{
StringBuilder providers = new StringBuilder();
providers.append("[");
User u = dispatchRepository.findOne(user.getProviderUserId());
if(u.getIsEnrolled().equals("Y") && u.getLocation()!=null && u.getLocation().length == 2)
{
providers.append("['").append(u.getFirstName()).append(" ").append(u.getLastName()).append("',")
.append(u.getLocation()[1]).append(",").append(u.getLocation()[0]).append(",'").append("P").append("']");
String currentPool = u.getFirstName()+ " "+u.getLastName()+" | "+u.getPhoneNumber()+" | "+u.getEmail();
model.addAttribute("currentPool",currentPool);
}
providers.append("]");
model.addAttribute("others", providers.toString());
}
}
user.setIsEnrolled(enrolledStatus);
user.setPoolMode(poolMode);
dispatchRepository.save(user);
model.addAttribute("empid", userId);
model.addAttribute("firstname", user.getFirstName());
model.addAttribute("lastname", user.getLastName());
model.addAttribute("email", user.getEmail());
model.addAttribute("zipcode", String.valueOf(user.getZipCode()));
model.addAttribute("status", user.getIsEnrolled());
Date attStDate = user.getStartDate();
if(attStDate!=null)
{
SimpleDateFormat sdf = new SimpleDateFormat("dd-MM-yyyy~HH:mm");
String sd = sdf.format(attStDate);
model.addAttribute("startDate", sd.split("~")[0]);
model.addAttribute("startTimeHr", sd.split("~")[1].split(":")[0]);
model.addAttribute("startTimeMin", sd.split("~")[1].split(":")[1]);
}
model.addAttribute("addressDesc", user.getAddressDesc());
model.addAttribute("poolType", user.getPoolMode());
model.addAttribute("vehicleType", user.getVehicleType());
model.addAttribute("capacity", String.valueOf(user.getVehicleCapacity()));
double[] geoData = user.getLocation();
if(geoData!=null && geoData.length == 2)
{
model.addAttribute("location", (String.valueOf(geoData[1])+","+String.valueOf(geoData[0])));
}
if(user.getPoolMode().equals("P"))
return "poolingRequest";
else
{
List<User> providerList = new ArrayList<User>();
// get all users who are enrolled and of type P and who have start time within less 1 hr this user start time and
//who have pickcount < vehicle capacity and whose route fall within this user location.
List<String> userIdList = dispatchRepository.getProvidersList(); // to change
if(userIdList!=null && userIdList.size()>0)
{
Point point = new Point(user.getLocation()[0], user.getLocation()[1]);
Distance distance = new Distance(1, Metrics.KILOMETERS);
List<TechRoute> ur = techRouteRepository.findByLocationNear(point, distance);
logger.info("user routes found:"+(ur!=null?ur.size():0));
if(ur!= null && ur.size()>0)
{
Set<String> avUserIdSet = new HashSet<String>();
for(TechRoute usr: ur)
{
avUserIdSet.add(usr.getUserId());
}
if(user.getProviderUserId()!=null)
avUserIdSet.remove(user.getProviderUserId());
logger.info("near by providers identified :"+avUserIdSet.size());
if(avUserIdSet.size()>0)
{
List<User> users = dispatchRepository.getAvUsersList(avUserIdSet);
logger.info("user list :"+(users!=null?users.size():0));
logger.info("users size provider:: "+users.size());
StringBuilder providers = new StringBuilder();
providers.append("[");
if(users!=null)
for(User u: users)
{
if(providers.toString().length() > 1)
providers.append(",");
providers.append("['").append(u.getFirstName()).append(" ").append(u.getLastName()).append("',")
.append(u.getLocation()[1]).append(",").append(u.getLocation()[0]).append(",'").append("P").append("']");
providerList.add(u);
}
providers.append("]");
logger.info("provider json:"+providers.toString());
model.addAttribute("providers", providers.toString());
model.addAttribute("providerList",providerList);
}
}
}
return "availableVehicleDetails";
}
}
else
{
model.addAttribute("message", "Session expired!! please login again");
return "index";
}
}
}
|
[
"\"PORT\"",
"\"VCAP_SERVICES\"",
"\"VCAP_APPLICATION\""
] |
[] |
[
"PORT",
"VCAP_APPLICATION",
"VCAP_SERVICES"
] |
[]
|
["PORT", "VCAP_APPLICATION", "VCAP_SERVICES"]
|
java
| 3 | 0 | |
autoload/leetcode.py
|
import json
import logging
import re
import time
import os
from threading import Semaphore, Thread, current_thread
try:
from bs4 import BeautifulSoup
import requests
inited = 1
except ImportError:
inited = 0
try:
import vim
except ImportError:
vim = None
LC_BASE = os.environ['LEETCODE_BASE_URL']
LC_CSRF = LC_BASE + '/ensure_csrf/'
LC_LOGIN = LC_BASE + '/accounts/login/'
LC_GRAPHQL = LC_BASE + '/graphql'
LC_CATEGORY_PROBLEMS = LC_BASE + '/api/problems/{category}'
LC_PROBLEM = LC_BASE + '/problems/{slug}/description'
LC_TEST = LC_BASE + '/problems/{slug}/interpret_solution/'
LC_SUBMIT = LC_BASE + '/problems/{slug}/submit/'
LC_SUBMISSIONS = LC_BASE + '/api/submissions/{slug}'
LC_SUBMISSION = LC_BASE + '/submissions/detail/{submission}/'
LC_CHECK = LC_BASE + '/submissions/detail/{submission}/check/'
LC_PROBLEM_SET_ALL = LC_BASE + '/problemset/all/'
EMPTY_FREQUENCIES = [0, 0, 0, 0, 0, 0, 0, 0]
session = None
task_running = False
task_done = False
task_trigger = Semaphore(0)
task_name = ''
task_input = None
task_progress = ''
task_output = None
task_err = ''
log = logging.getLogger(__name__)
log.setLevel(logging.ERROR)
def enable_logging():
out_hdlr = logging.FileHandler('leetcode-vim.log')
out_hdlr.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
out_hdlr.setLevel(logging.INFO)
log.addHandler(out_hdlr)
log.setLevel(logging.INFO)
def _make_headers():
assert is_login()
headers = {'Origin': LC_BASE,
'Referer': LC_BASE,
'X-CSRFToken': session.cookies['csrftoken'],
'X-Requested-With': 'XMLHttpRequest'}
return headers
def _level_to_name(level):
if level == 1:
return 'Easy'
if level == 2:
return 'Medium'
if level == 3:
return 'Hard'
return ' '
def _state_to_flag(state):
if state == 'ac':
return 'X'
if state == 'notac':
return '?'
return ' '
def _status_to_name(status):
if status == 10:
return 'Accepted'
if status == 11:
return 'Wrong Answer'
if status == 12:
return 'Memory Limit Exceeded'
if status == 13:
return 'Output Limit Exceeded'
if status == 14:
return 'Time Limit Exceeded'
if status == 15:
return 'Runtime Error'
if status == 16:
return 'Internal Error'
if status == 20:
return 'Compile Error'
if status == 21:
return 'Unknown Error'
return 'Unknown State'
def _break_code_lines(s):
return s.replace('\r\n', '\n').replace('\xa0', ' ').split('\n')
def _break_paragraph_lines(s):
lines = _break_code_lines(s)
result = []
# reserve one and only one empty line between two non-empty lines
for line in lines:
if line.strip() != '': # a line with only whitespaces is also empty
result.append(line)
result.append('')
return result
def _remove_description(code):
eod = code.find('[End of Description]')
if eod == -1:
return code
eol = code.find('\n', eod)
if eol == -1:
return ''
return code[eol+1:]
def is_login():
return session and 'LEETCODE_SESSION' in session.cookies
def signin(username, password):
global session
session = requests.Session()
if 'cn' in LC_BASE:
res = session.get(LC_CSRF)
else:
res = session.get(LC_LOGIN)
if res.status_code != 200:
_echoerr('cannot open ' + LC_BASE)
return False
headers = {'Origin': LC_BASE,
'Referer': LC_LOGIN}
form = {'csrfmiddlewaretoken': session.cookies['csrftoken'],
'login': username,
'password': password}
log.info('signin request: headers="%s" login="%s"', headers, username)
# requests follows the redirect url by default
# disable redirection explicitly
res = session.post(LC_LOGIN, data=form, headers=headers, allow_redirects=False)
log.info('signin response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 302:
_echoerr('password incorrect')
return False
return True
def _get_category_problems(category):
headers = _make_headers()
url = LC_CATEGORY_PROBLEMS.format(category=category)
res = session.get(url, headers=headers)
if res.status_code != 200:
_echoerr('cannot get the category: {}'.format(category))
return []
problems = []
content = res.json()
for p in content['stat_status_pairs']:
# skip hidden questions
if p['stat']['question__hide']:
continue
problem = {'state': _state_to_flag(p['status']),
'id': p['stat']['question_id'],
'fid': p['stat']['frontend_question_id'],
'title': p['stat']['question__title'],
'slug': p['stat']['question__title_slug'],
'paid_only': p['paid_only'],
'ac_rate': p['stat']['total_acs'] / p['stat']['total_submitted'],
'level': _level_to_name(p['difficulty']['level']),
'favor': p['is_favor'],
'category': content['category_slug'],
'frequency': p['frequency']}
problems.append(problem)
return problems
def get_problems(categories):
assert is_login()
problems = []
for c in categories:
problems.extend(_get_category_problems(c))
return sorted(problems, key=lambda p: p['id'])
def get_problem(slug):
assert is_login()
headers = _make_headers()
headers['Referer'] = LC_PROBLEM.format(slug=slug)
body = {'query': '''query getQuestionDetail($titleSlug : String!) {
question(titleSlug: $titleSlug) {
questionId
title
content
stats
difficulty
codeDefinition
sampleTestCase
enableRunCode
translatedContent
}
}''',
'variables': {'titleSlug': slug},
'operationName': 'getQuestionDetail'}
log.info('get_problem request: url="%s" headers="%s" body="%s"', LC_GRAPHQL, headers, body)
res = session.post(LC_GRAPHQL, json=body, headers=headers)
log.info('get_problem response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 200:
_echoerr('cannot get the problem: {}'.format(slug))
return None
q = res.json()['data']['question']
content = q['translatedContent'] or q['content']
if content is None:
_echoerr('cannot get the problem: {}'.format(slug))
return None
soup = BeautifulSoup(content, features='html.parser')
problem = {}
problem['id'] = q['questionId']
problem['title'] = q['title']
problem['slug'] = slug
problem['level'] = q['difficulty']
problem['desc'] = _break_paragraph_lines(soup.get_text())
problem['templates'] = {}
for t in json.loads(q['codeDefinition']):
problem['templates'][t['value']] = _break_code_lines(t['defaultCode'])
problem['testable'] = q['enableRunCode']
problem['testcase'] = q['sampleTestCase']
stats = json.loads(q['stats'])
problem['total_accepted'] = stats['totalAccepted']
problem['total_submission'] = stats['totalSubmission']
problem['ac_rate'] = stats['acRate']
return problem
def _split(s):
# str.split has an disadvantage that ''.split('\n') results in [''], but what we want
# is []. This small function returns [] if `s` is a blank string, that is, containing no
# characters other than whitespaces.
if s.strip() == '':
return []
return s.split('\n')
def _check_result(submission_id, expected_id=None, testinput=None):
global task_progress
if _in_task():
prog_stage = 'Uploading '
prog_bar = '.'
task_progress = prog_stage + prog_bar
r2 = dict()
while expected_id:
headers = _make_headers()
url = LC_CHECK.format(submission=expected_id)
log.info('check result request: url="%s" headers="%s"', url, headers)
res = session.get(url, headers=headers)
log.info('check result response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 200:
_echoerr('cannot get the execution result')
return None
if _in_task():
prog_bar += '.'
r2 = res.json()
if r2['state'] == 'SUCCESS':
prog_stage = 'Done '
break
elif r2['state'] == 'PENDING':
prog_stage = 'Pending '
elif r2['state'] == 'STARTED':
prog_stage = 'Running '
if _in_task():
task_progress = prog_stage + prog_bar
time.sleep(1)
while True:
headers = _make_headers()
url = LC_CHECK.format(submission=submission_id)
log.info('check result request: url="%s" headers="%s"', url, headers)
res = session.get(url, headers=headers)
log.info('check result response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 200:
_echoerr('cannot get the execution result')
return None
if _in_task():
prog_bar += '.'
r = res.json()
if r['state'] == 'SUCCESS':
prog_stage = 'Done '
break
elif r['state'] == 'PENDING':
prog_stage = 'Pending '
elif r['state'] == 'STARTED':
prog_stage = 'Running '
if _in_task():
task_progress = prog_stage + prog_bar
time.sleep(1)
result = {
# 'expected_answer': r2.get('code_answer', []),
'runtime': r['status_runtime'],
'state': _status_to_name(r['status_code']),
'testcase': _split(r.get('input', r.get('last_testcase', ''))),
'passed': r.get('total_correct') or 0,
'total': r.get('total_testcases') or 0,
'error': [v for k, v in r.items() if 'error' in k and v]
}
answer = r.get('code_answer',[])
expect_answer = r2.get('code_answer',[])
union_answer = []
for i in range(len(testinput)):
if answer[i] != expect_answer[i]:
union_answer.append('[%d] Input: %s'%(i+1,testinput[i]))
union_answer.append(' Output: %s <<<Wrong'%(answer[i]))
union_answer.append(' Output: %s <<<Right'%(expect_answer[i]))
else:
union_answer.append('[%d] Input: %s Output:%s'%(i+1,testinput[i],answer[i]))
result['answer'] = union_answer
# the keys differs between the result of testing the code and submitting it
# for submission judge_type is 'large', and for testing judge_type does not exist
if r.get('judge_type') == 'large':
result['answer'] = _split(r.get('code_output', ''))
result['expected_answer'] = _split(r.get('expected_output', ''))
result['stdout'] = _split(r.get('std_output', ''))
result['runtime_percentile'] = r.get('runtime_percentile', '')
else:
# Test states cannot distinguish accepted answers from wrong answers.
if result['state'] == 'Accepted':
result['state'] = 'Finished'
result['stdout'] = r.get('code_output', [])
result['runtime_percentile'] = r.get('runtime_percentile', '')
# result['expected_answer'] = r.get('expected_code_answer', [])
result['expected_answer'] = r2.get('code_output', [])
return result
def test_solution(problem_id, title, slug, filetype, code, test_input):
assert is_login()
code = _remove_description(code)
headers = _make_headers()
headers['Referer'] = LC_PROBLEM.format(slug=slug)
body = {'data_input': test_input,
'lang': filetype,
'question_id': str(problem_id),
'test_mode': False,
'typed_code': code}
url = LC_TEST.format(slug=slug)
log.info('test solution request: url="%s" headers="%s" body="%s"', url, headers, body)
res = session.post(url, json=body, headers=headers)
log.info('test solution response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 200:
if 'too fast' in res.text:
_echoerr('you are sending the request too fast')
else:
_echoerr('cannot test the solution for ' + slug)
return None
result = _check_result(res.json()['interpret_id'],res.json()['interpret_expected_id'],test_input.split('\n'))
result['testcase'] = test_input.split('\n')
result['title'] = title
return result
def test_solution_async(problem_id, title, slug, filetype, code, test_input):
assert is_login()
global task_input, task_name
if task_running:
_echoerr('there is other task running: ' + task_name)
return False
code = _remove_description(code)
task_name = 'test_solution'
task_input = [problem_id, title, slug, filetype, code, test_input]
task_trigger.release()
return True
def submit_solution(slug, filetype, code=None):
assert is_login()
problem = get_problem(slug)
if not problem:
return None
if code is None:
code = '\n'.join(vim.current.buffer)
code = _remove_description(code)
headers = _make_headers()
headers['Referer'] = LC_PROBLEM.format(slug=slug)
body = {'data_input': problem['testcase'],
'lang': filetype,
'question_id': str(problem['id']),
'test_mode': False,
'typed_code': code,
'judge_type': 'large'}
url = LC_SUBMIT.format(slug=slug)
log.info('submit solution request: url="%s" headers="%s" body="%s"', url, headers, body)
res = session.post(url, json=body, headers=headers)
log.info('submit solution response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 200:
if 'too fast' in res.text:
_echoerr('you are sending the request too fast')
else:
_echoerr('cannot submit the solution for ' + slug)
return None
result = _check_result(res.json()['submission_id'])
result['title'] = problem['title']
return result
def submit_solution_async(slug, filetype, code=None):
assert is_login()
global task_input, task_name
if task_running:
_echoerr('there is other task running: ' + task_name)
return False
if code is None:
code = '\n'.join(vim.current.buffer)
task_name = 'submit_solution'
task_input = [slug, filetype, code]
task_trigger.release()
return True
def get_submissions(slug):
assert is_login()
headers = _make_headers()
headers['Referer'] = LC_PROBLEM.format(slug=slug)
url = LC_SUBMISSIONS.format(slug=slug)
log.info('get submissions request: url="%s" headers="%s"', url, headers)
res = session.get(url, headers=headers)
log.info('get submissions response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 200:
_echoerr('cannot find the submissions of problem: ' + slug)
return None
submissions = []
for r in res.json()['submissions_dump']:
s = {
'id': r['url'].split('/')[3],
'time': r['time'].replace('\xa0', ' '),
'status': r['status_display'],
'runtime': r['runtime'],
}
submissions.append(s)
return submissions
def _group1(match, default):
if match:
return match.group(1)
return default
def _unescape(s):
return s.encode().decode('unicode_escape')
def get_submission(sid):
assert is_login()
headers = _make_headers()
url = LC_SUBMISSION.format(submission=sid)
log.info('get submission request: url="%s" headers="%s"', url, headers)
res = session.get(url, headers=headers)
log.info('get submission response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 200:
_echoerr('cannot find the submission: ' + sid)
return None
# we need to parse the data from the Javascript snippet
s = res.text
submission = {
'id': sid,
'state': _status_to_name(int(_group1(re.search(r"status_code: parseInt\('([^']*)'", s),
'not found'))),
'runtime': _group1(re.search("runtime: '([^']*)'", s), 'not found'),
'passed': _group1(re.search("total_correct : '([^']*)'", s), 'not found'),
'total': _group1(re.search("total_testcases : '([^']*)'", s), 'not found'),
'testcase': _split(_unescape(_group1(re.search("input : '([^']*)'", s), ''))),
'answer': _split(_unescape(_group1(re.search("code_output : '([^']*)'", s), ''))),
'expected_answer': _split(_unescape(_group1(re.search("expected_output : '([^']*)'", s),
''))),
'problem_id': _group1(re.search("questionId: '([^']*)'", s), 'not found'),
'slug': _group1(re.search("editCodeUrl: '([^']*)'", s), '///').split('/')[2],
'filetype': _group1(re.search("getLangDisplay: '([^']*)'", s), 'not found'),
'error': [],
'stdout': [],
}
problem = get_problem(submission['slug'])
submission['title'] = problem['title']
# the punctuations and newlines in the code are escaped like '\\u0010' ('\\' => real backslash)
# to unscape the string, we do the trick '\\u0010'.encode().decode('unicode_escape') ==> '\n'
submission['code'] = _break_code_lines(_unescape(_group1(
re.search("submissionCode: '([^']*)'", s), '')))
dist_str = _unescape(_group1(re.search("runtimeDistributionFormatted: '([^']*)'", s),
'{"distribution":[]}'))
dist = json.loads(dist_str)['distribution']
dist.reverse()
# the second key "runtime" is the runtime in milliseconds
# we need to search from the position after the first "runtime" key
prev_runtime = re.search("runtime: '([^']*)'", s)
if not prev_runtime:
my_runtime = 0
else:
my_runtime = int(_group1(re.search("runtime: '([^']*)'", s[prev_runtime.end():]), 0))
accum = 0
for runtime, frequency in dist:
accum += frequency
if my_runtime >= int(runtime):
break
submission['runtime_percentile'] = '{:.1f}%'.format(accum)
return submission
def _process_topic_element(topic):
return {'topic_name': topic.find(class_='text-gray').string.strip(),
'num_problems': topic.find(class_='badge').string,
'topic_slug': topic.get('href').split('/')[2]}
def _process_company_element(company):
return {'company_name': company.find(class_='text-gray').string.strip(),
'num_problems': company.find(class_='badge').string,
'company_slug': company.get('href').split('/')[2]}
def get_topics_and_companies():
headers = _make_headers()
log.info('get_topics_and_companies request: url="%s', LC_PROBLEM_SET_ALL)
res = session.get(LC_PROBLEM_SET_ALL, headers=headers)
log.info('get_topics_and_companies response: status="%s" body="%s"', res.status_code,
res.text)
if res.status_code != 200:
_echoerr('cannot get topics')
return []
soup = BeautifulSoup(res.text, features='html.parser')
topic_elements = soup.find_all(class_='sm-topic')
topics = [_process_topic_element(topic) for topic in topic_elements]
company_elements = soup.find_all(class_='sm-company')
companies = [_process_company_element(company) for company in company_elements]
return {
'topics': topics,
'companies': companies
}
def get_problems_of_topic(topic_slug):
request_body = {
'operationName':'getTopicTag',
'variables': {'slug': topic_slug},
'query': '''query getTopicTag($slug: String!) {
topicTag(slug: $slug) {
name
translatedName
questions {
status
questionId
questionFrontendId
title
titleSlug
translatedTitle
stats
difficulty
isPaidOnly
}
frequencies
}
}
'''}
headers = _make_headers()
log.info('get_problems_of_topic request: headers="%s" body="%s"', headers,
request_body)
res = session.post(LC_GRAPHQL, headers=headers, json=request_body)
log.info('get_problems_of_topic response: status="%s" body="%s"',
res.status_code, res.text)
if res.status_code != 200:
_echoerr('cannot get problems of the topic')
return {'topic_name': topic_slug, 'problems': []}
topic_tag = res.json()['data']['topicTag']
if not topic_tag:
return {'topic_name': topic_slug, 'problems': []}
if topic_tag['frequencies']:
id_to_frequency_map = json.loads(topic_tag['frequencies'])
else:
id_to_frequency_map = {}
def process_problem(p):
stats = json.loads(p['stats'])
return {
'state': _state_to_flag(p['status']),
'id': p['questionId'],
'fid': p['questionFrontendId'],
'title': p['title'],
'slug': p['titleSlug'],
'paid_only': p['isPaidOnly'],
'ac_rate': stats['totalAcceptedRaw'] / stats['totalSubmissionRaw'],
'level': p['difficulty'],
'favor': False,
'frequency': id_to_frequency_map.get(p['questionId'], 0)}
return {
'topic_name': topic_tag['name'],
'problems': [process_problem(p) for p in topic_tag['questions']]}
def get_problems_of_company(company_slug):
request_body = {
'operationName':'getCompanyTag',
'variables': {'slug': company_slug},
'query': '''query getCompanyTag($slug: String!) {
companyTag(slug: $slug) {
name
translatedName
frequencies
questions {
...questionFields
}
}
}
fragment questionFields on QuestionNode {
status
questionId
questionFrontendId
title
titleSlug
translatedTitle
stats
difficulty
isPaidOnly
frequencyTimePeriod
}
'''}
headers = _make_headers()
headers['Referer'] = 'https://leetcode.com/company/{}/'.format(company_slug)
log.info('get_problems_of_company request: headers="%s" body="%s"', headers,
request_body)
res = session.post(LC_GRAPHQL, headers=headers, json=request_body)
log.info('get_problems_of_company response: status="%s" body="%s"',
res.status_code, res.text)
if res.status_code != 200:
_echoerr('cannot get problems of the company')
return {'company_name': company_slug, 'problems': []}
company_tag = res.json()['data']['companyTag']
if not company_tag:
_echoerr('cannot get problems of the company')
return {'company_name': company_slug, 'problems': []}
if company_tag['frequencies']:
id_to_frequency_map = json.loads(company_tag['frequencies'])
else:
id_to_frequency_map = {}
def process_problem(p):
stats = json.loads(p['stats'])
return {
'state': _state_to_flag(p['status']),
'id': p['questionId'],
'fid': p['questionFrontendId'],
'title': p['title'],
'slug': p['titleSlug'],
'paid_only': p['isPaidOnly'],
'ac_rate': stats['totalAcceptedRaw'] / stats['totalSubmissionRaw'],
'level': p['difficulty'],
'favor': False,
'frequencies': id_to_frequency_map.get(p['questionId'],
EMPTY_FREQUENCIES)[4:]}
return {
'company_name': company_tag['name'],
'problems': [process_problem(p) for p in company_tag['questions']]}
def _thread_main():
global task_running, task_done, task_output, task_err
while True:
task_trigger.acquire()
task_running = True
task_done = False
task_output = None
task_err = ''
log.info('task thread input: name="%s" input="%s"', task_name, task_input)
try:
if task_name == 'test_solution':
task_output = test_solution(*task_input)
elif task_name == 'submit_solution':
task_output = submit_solution(*task_input)
except BaseException as e:
task_err = str(e)
log.info('task thread output: name="%s" output="%s" error="%s"', task_name, task_output,
task_err)
task_running = False
task_done = True
def _in_task():
return current_thread() == task_thread
def _echoerr(s):
global task_err
if _in_task():
task_err = s
else:
print(s)
task_thread = Thread(target=_thread_main, daemon=True)
task_thread.start()
|
[] |
[] |
[
"LEETCODE_BASE_URL"
] |
[]
|
["LEETCODE_BASE_URL"]
|
python
| 1 | 0 | |
docassemble_base/docassemble/base/read_config.py
|
import sys
import os
import re
from six import string_types, text_type, PY2
separator = re.compile(r' *[,;] *')
if __name__ == "__main__":
import docassemble.base.config
docassemble.base.config.load(arguments=sys.argv)
from docassemble.base.config import daconfig
if 'timezone' in daconfig and daconfig['timezone'] is not None:
print('export TIMEZONE="' + str(daconfig['timezone']) + '"')
if 'os locale' in daconfig and daconfig['os locale'] is not None:
print('export LOCALE="' + str(daconfig['os locale']) + '"')
if PY2:
print('export DAPYTHONVERSION="2"')
else:
print('export DAPYTHONVERSION="3"')
if 'other os locales' in daconfig and type(daconfig['other os locales']) is list:
print('declare -a OTHERLOCALES')
print('export OTHERLOCALES')
indexno = 0
for locale in daconfig['other os locales']:
print('OTHERLOCALES[' + str(indexno) + ']=' + repr(str(locale)))
indexno += 1
else:
other_locales_variable = os.getenv('OTHERLOCALES', None)
if other_locales_variable is not None and other_locales_variable != 'null':
print('declare -a OTHERLOCALES')
print('export OTHERLOCALES')
indexno = 0
for locale in map(lambda x: x.strip(), separator.split(other_locales_variable)):
print('OTHERLOCALES[' + str(indexno) + ']=' + repr(str(locale)))
indexno += 1
if 'debian packages' in daconfig and type(daconfig['debian packages']) is list:
print('declare -a PACKAGES')
print('export PACKAGES')
indexno = 0
for package in daconfig['debian packages']:
print('PACKAGES[' + str(indexno) + ']=' + repr(str(package)))
indexno += 1
else:
packages_variable = os.getenv('PACKAGES', None)
if packages_variable is not None and packages_variable != 'null':
print('declare -a PACKAGES')
print('export PACKAGES')
indexno = 0
for package in map(lambda x: x.strip(), separator.split(packages_variable)):
print('PACKAGES[' + str(indexno) + ']=' + repr(str(package)))
indexno += 1
if 'python packages' in daconfig and type(daconfig['python packages']) is list:
print('declare -a PYTHONPACKAGES')
print('export PYTHONPACKAGES')
indexno = 0
for package in daconfig['python packages']:
print('PYTHONPACKAGES[' + str(indexno) + ']=' + repr(str(package)))
indexno += 1
else:
packages_variable = os.getenv('PYTHONPACKAGES', None)
if packages_variable is not None and packages_variable != 'null':
print('declare -a PYTHONPACKAGES')
print('export PYTHONPACKAGES')
indexno = 0
for package in map(lambda x: x.strip(), separator.split(packages_variable)):
print('PYTHONPACKAGES[' + str(indexno) + ']=' + repr(str(package)))
indexno += 1
if 'db' in daconfig:
if 'prefix' in daconfig['db'] and daconfig['db']['prefix'] is not None:
if daconfig['db']['prefix'].startswith('postgresql'):
print('export DBTYPE="postgresql"')
elif daconfig['db']['prefix'].startswith('mysql'):
print('export DBTYPE="mysql"')
else:
print('export DBTYPE="other"')
print('export DBPREFIX="' + str(daconfig['db']['prefix']) + '"')
if 'name' in daconfig['db'] and daconfig['db']['name'] is not None:
print('export DBNAME="' + str(daconfig['db']['name']) + '"')
if 'user' in daconfig['db'] and daconfig['db']['user'] is not None:
print('export DBUSER="' + str(daconfig['db']['user']) + '"')
if 'password' in daconfig['db'] and daconfig['db']['password'] is not None:
print('export DBPASSWORD="' + str(daconfig['db']['password']) + '"')
if 'host' in daconfig['db'] and daconfig['db']['host'] is not None:
print('export DBHOST="' + str(daconfig['db']['host']) + '"')
if 'port' in daconfig['db'] and daconfig['db']['port'] is not None:
print('export DBPORT="' + str(daconfig['db']['port']) + '"')
if 'table prefix' in daconfig['db'] and daconfig['db']['table prefix'] is not None:
print('export DBTABLEPREFIX="' + str(daconfig['db']['table prefix']) + '"')
if 'update on start' in daconfig and daconfig['update on start'] is False:
print('export DAUPDATEONSTART=false')
if 'expose websockets' in daconfig and daconfig['expose websockets']:
print('export DAEXPOSEWEBSOCKETS=true')
if 'websockets ip' in daconfig and daconfig['websockets ip']:
print('export DAWEBSOCKETSIP="' + str(daconfig['websockets ip']) + '"')
else:
print('export DAWEBSOCKETSIP="127.0.0.1"')
if 'websockets port' in daconfig and daconfig['websockets port']:
print('export DAWEBSOCKETSPORT=' + str(daconfig['websockets port']))
else:
print('export DAWEBSOCKETSPORT=5000')
if 'redis' in daconfig and daconfig['redis'] is not None:
print('export REDIS="' + str(daconfig['redis']) + '"')
if 'rabbitmq' in daconfig and daconfig['rabbitmq'] is not None:
print('export RABBITMQ="' + str(daconfig['rabbitmq']) + '"')
if 'backup days' in daconfig:
try:
days = int(daconfig['backup days'])
assert days >= 0
except:
days = 14
print('export DABACKUPDAYS="' + str(days) + '"')
else:
print('export DABACKUPDAYS="14"')
if 's3' in daconfig:
if 'enable' in daconfig['s3'] and daconfig['s3']['enable']:
print('export S3ENABLE=true')
else:
print('export S3ENABLE=false')
if 'access key id' in daconfig['s3'] and daconfig['s3']['access key id'] is not None:
print('export S3ACCESSKEY="' + str(daconfig['s3']['access key id']) + '"')
print('export AWS_ACCESS_KEY_ID="' + str(daconfig['s3']['access key id']) + '"')
if 'secret access key' in daconfig['s3'] and daconfig['s3']['secret access key'] is not None:
print('export S3SECRETACCESSKEY="' + str(daconfig['s3']['secret access key']) + '"')
print('export AWS_SECRET_ACCESS_KEY="' + str(daconfig['s3']['secret access key']) + '"')
if 'bucket' in daconfig['s3'] and daconfig['s3']['bucket'] is not None:
print('export S3BUCKET="' + str(daconfig['s3']['bucket']) + '"')
if 'region' in daconfig['s3'] and daconfig['s3']['region'] is not None:
print('export S3REGION="' + str(daconfig['s3']['region']) + '"')
if 'azure' in daconfig:
if 'enable' in daconfig['azure'] and daconfig['azure']['enable']:
print('export AZUREENABLE=true')
else:
print('export AZUREENABLE=false')
if 'account name' in daconfig['azure'] and daconfig['azure']['account name'] is not None:
print('export AZUREACCOUNTNAME="' + str(daconfig['azure']['account name']) + '"')
if 'account key' in daconfig['azure'] and daconfig['azure']['account key'] is not None:
print('export AZUREACCOUNTKEY="' + str(daconfig['azure']['account key']) + '"')
if 'container' in daconfig['azure'] and daconfig['azure']['container'] is not None:
print('export AZURECONTAINER="' + str(daconfig['azure']['container']) + '"')
if 'ec2' in daconfig and daconfig['ec2']:
print('export EC2=true')
if 'log server' in daconfig and daconfig['log server'] is not None:
print('export LOGSERVER="' + str(daconfig['log server']) + '"')
if 'log' in daconfig and daconfig['log'] is not None:
print('export LOGDIRECTORY="' + str(daconfig['log']) + '"')
if 'use https' in daconfig and daconfig['use https']:
print('export USEHTTPS=true')
if 'use lets encrypt' in daconfig and daconfig['use lets encrypt']:
print('export USELETSENCRYPT=true')
if 'behind https load balancer' in daconfig and daconfig['behind https load balancer']:
print('export BEHINDHTTPSLOADBALANCER=true')
if 'lets encrypt email' in daconfig and daconfig['lets encrypt email'] is not None:
print('export LETSENCRYPTEMAIL="' + str(daconfig['lets encrypt email']) + '"')
if 'external hostname' in daconfig and daconfig['external hostname'] is not None:
print('export DAHOSTNAME="' + str(daconfig['external hostname']) + '"')
if 'root' in daconfig and daconfig['root'] is not None:
print('export POSTURLROOT="' + str(daconfig['root']) + '"')
print('export WSGIROOT="' + str(re.sub(r'^(.+)/$', r'\1', daconfig['root'])) + '"')
else:
print('export POSTURLROOT="/"')
print('export WSGIROOT="/"')
if 'server administrator email' in daconfig and daconfig['server administrator email']:
print('export SERVERADMIN="' + str(daconfig['server administrator email']) + '"')
else:
print('export SERVERADMIN="webmaster@localhost"')
if 'web server timeout' in daconfig and daconfig['web server timeout'] is not None:
print('export DATIMEOUT="' + str(daconfig['web server timeout']) + '"')
sys.exit(0)
|
[] |
[] |
[
"PACKAGES",
"OTHERLOCALES",
"PYTHONPACKAGES"
] |
[]
|
["PACKAGES", "OTHERLOCALES", "PYTHONPACKAGES"]
|
python
| 3 | 0 | |
pkg/utils/common/common.go
|
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"os"
"os/exec"
"path/filepath"
"cuelang.org/go/cue"
"cuelang.org/go/cue/ast"
"cuelang.org/go/cue/format"
"cuelang.org/go/encoding/openapi"
"github.com/AlecAivazis/survey/v2"
"github.com/hashicorp/hcl/v2/hclparse"
clustergatewayapi "github.com/oam-dev/cluster-gateway/pkg/apis/cluster/v1alpha1"
"github.com/oam-dev/terraform-config-inspect/tfconfig"
terraformv1beta1 "github.com/oam-dev/terraform-controller/api/v1beta1"
kruise "github.com/openkruise/kruise-api/apps/v1alpha1"
errors2 "github.com/pkg/errors"
certmanager "github.com/wonderflow/cert-manager-api/pkg/apis/certmanager/v1"
istioclientv1beta1 "istio.io/client-go/pkg/apis/networking/v1beta1"
v1 "k8s.io/api/core/v1"
crdv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
k8sruntime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/util/flowcontrol"
apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
ocmclusterv1 "open-cluster-management.io/api/cluster/v1"
ocmclusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1"
ocmworkv1 "open-cluster-management.io/api/work/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/yaml"
oamcore "github.com/oam-dev/kubevela/apis/core.oam.dev"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
oamstandard "github.com/oam-dev/kubevela/apis/standard.oam.dev/v1alpha1"
"github.com/oam-dev/kubevela/apis/types"
velacue "github.com/oam-dev/kubevela/pkg/cue"
"github.com/oam-dev/kubevela/pkg/cue/model"
"github.com/oam-dev/kubevela/pkg/oam"
)
var (
// Scheme defines the default KubeVela schema
Scheme = k8sruntime.NewScheme()
)
const (
// AddonObservabilityApplication is the application name for Addon Observability
AddonObservabilityApplication = "addon-observability"
// AddonObservabilityGrafanaSvc is grafana service name for Addon Observability
AddonObservabilityGrafanaSvc = "grafana"
)
// CreateCustomNamespace display the create namespace message
const CreateCustomNamespace = "create new namespace"
func init() {
_ = clientgoscheme.AddToScheme(Scheme)
_ = apiregistrationv1.AddToScheme(Scheme)
_ = crdv1.AddToScheme(Scheme)
_ = oamcore.AddToScheme(Scheme)
_ = oamstandard.AddToScheme(Scheme)
_ = istioclientv1beta1.AddToScheme(Scheme)
_ = certmanager.AddToScheme(Scheme)
_ = kruise.AddToScheme(Scheme)
_ = terraformv1beta1.AddToScheme(Scheme)
_ = ocmclusterv1alpha1.Install(Scheme)
_ = ocmclusterv1.Install(Scheme)
_ = ocmworkv1.Install(Scheme)
_ = clustergatewayapi.AddToScheme(Scheme)
// +kubebuilder:scaffold:scheme
}
// InitBaseRestConfig will return reset config for create controller runtime client
func InitBaseRestConfig() (Args, error) {
restConf, err := config.GetConfig()
if err != nil && os.Getenv("IGNORE_KUBE_CONFIG") != "true" {
fmt.Println("get kubeConfig err", err)
os.Exit(1)
} else if err != nil {
return Args{}, err
}
restConf.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(100, 200)
return Args{
Config: restConf,
Schema: Scheme,
}, nil
}
// globalClient will be a client for whole command lifecycle
var globalClient client.Client
// SetGlobalClient will set a client for one cli command
func SetGlobalClient(clt client.Client) error {
globalClient = clt
return nil
}
// GetClient will K8s client in args
func GetClient() (client.Client, error) {
if globalClient != nil {
return globalClient, nil
}
return nil, errors.New("client not set, call SetGlobalClient first")
}
// HTTPGet will send GET http request with context
func HTTPGet(ctx context.Context, url string) ([]byte, error) {
// Change NewRequest to NewRequestWithContext and pass context it
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
if err != nil {
return nil, err
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
//nolint:errcheck
defer resp.Body.Close()
return io.ReadAll(resp.Body)
}
// GetCUEParameterValue converts definitions to cue format
func GetCUEParameterValue(cueStr string) (cue.Value, error) {
r := cue.Runtime{}
template, err := r.Compile("", cueStr+velacue.BaseTemplate)
if err != nil {
return cue.Value{}, err
}
tempStruct, err := template.Value().Struct()
if err != nil {
return cue.Value{}, err
}
// find the parameter definition
var paraDef cue.FieldInfo
var found bool
for i := 0; i < tempStruct.Len(); i++ {
paraDef = tempStruct.Field(i)
if paraDef.Name == model.ParameterFieldName {
found = true
break
}
}
if !found {
return cue.Value{}, errors.New("parameter not exist")
}
arguments := paraDef.Value
return arguments, nil
}
// GenOpenAPI generates OpenAPI json schema from cue.Instance
func GenOpenAPI(inst *cue.Instance) ([]byte, error) {
if inst.Err != nil {
return nil, inst.Err
}
paramOnlyIns, err := RefineParameterInstance(inst)
if err != nil {
return nil, err
}
defaultConfig := &openapi.Config{}
b, err := openapi.Gen(paramOnlyIns, defaultConfig)
if err != nil {
return nil, err
}
var out = &bytes.Buffer{}
_ = json.Indent(out, b, "", " ")
return out.Bytes(), nil
}
// extractParameterDefinitionNodeFromInstance extracts the `#parameter` ast.Node from root instance, if failed fall back to `parameter` by LookUpDef
func extractParameterDefinitionNodeFromInstance(inst *cue.Instance) ast.Node {
opts := []cue.Option{cue.All(), cue.DisallowCycles(true), cue.ResolveReferences(true), cue.Docs(true)}
node := inst.Value().Syntax(opts...)
if fileNode, ok := node.(*ast.File); ok {
for _, decl := range fileNode.Decls {
if field, ok := decl.(*ast.Field); ok {
if label, ok := field.Label.(*ast.Ident); ok && label.Name == "#"+model.ParameterFieldName {
return decl.(*ast.Field).Value
}
}
}
}
paramVal := inst.LookupDef(model.ParameterFieldName)
return paramVal.Syntax(opts...)
}
// RefineParameterInstance refines cue instance to merely include `parameter` identifier
func RefineParameterInstance(inst *cue.Instance) (*cue.Instance, error) {
r := cue.Runtime{}
paramVal := inst.LookupDef(model.ParameterFieldName)
var paramOnlyStr string
switch k := paramVal.IncompleteKind(); k {
case cue.StructKind, cue.ListKind:
paramSyntax, _ := format.Node(extractParameterDefinitionNodeFromInstance(inst))
paramOnlyStr = fmt.Sprintf("#%s: %s\n", model.ParameterFieldName, string(paramSyntax))
case cue.IntKind, cue.StringKind, cue.FloatKind, cue.BoolKind:
paramOnlyStr = fmt.Sprintf("#%s: %v", model.ParameterFieldName, paramVal)
case cue.BottomKind:
paramOnlyStr = fmt.Sprintf("#%s: {}", model.ParameterFieldName)
default:
return nil, fmt.Errorf("unsupport parameter kind: %s", k.String())
}
paramOnlyIns, err := r.Compile("-", paramOnlyStr)
if err != nil {
return nil, err
}
return paramOnlyIns, nil
}
// RealtimePrintCommandOutput prints command output in real time
// If logFile is "", it will prints the stdout, or it will write to local file
func RealtimePrintCommandOutput(cmd *exec.Cmd, logFile string) error {
var writer io.Writer
if logFile == "" {
writer = io.MultiWriter(os.Stdout)
} else {
if _, err := os.Stat(filepath.Dir(logFile)); err != nil {
return err
}
f, err := os.Create(filepath.Clean(logFile))
if err != nil {
return err
}
writer = io.MultiWriter(f)
}
cmd.Stdout = writer
cmd.Stderr = writer
if err := cmd.Run(); err != nil {
return err
}
return nil
}
// ClusterObject2Map convert ClusterObjectReference to a readable map
func ClusterObject2Map(refs []common.ClusterObjectReference) map[string]string {
clusterResourceRefTmpl := "Cluster: %s | Namespace: %s | Component: %s | Kind: %s"
objs := make(map[string]string, len(refs))
for _, r := range refs {
if r.Cluster == "" {
r.Cluster = "local"
}
objs[r.Cluster+"/"+r.Namespace+"/"+r.Name+"/"+r.Kind] = fmt.Sprintf(clusterResourceRefTmpl, r.Cluster, r.Namespace, r.Name, r.Kind)
}
return objs
}
// ResourceLocation indicates the resource location
type ResourceLocation struct {
Cluster string
Namespace string
}
type clusterObjectReferenceFilter func(common.ClusterObjectReference) bool
func clusterObjectReferenceTypeFilterGenerator(allowedKinds ...string) clusterObjectReferenceFilter {
allowedKindMap := map[string]bool{}
for _, allowedKind := range allowedKinds {
allowedKindMap[allowedKind] = true
}
return func(item common.ClusterObjectReference) bool {
_, exists := allowedKindMap[item.Kind]
return exists
}
}
var isWorkloadClusterObjectReferenceFilter = clusterObjectReferenceTypeFilterGenerator("Deployment", "StatefulSet", "CloneSet", "Job", "Configuration")
var isPortForwardEndpointClusterObjectReferenceFilter = clusterObjectReferenceTypeFilterGenerator("Deployment",
"StatefulSet", "CloneSet", "Job", "Service", "HelmRelease")
func filterResource(inputs []common.ClusterObjectReference, filters ...clusterObjectReferenceFilter) (outputs []common.ClusterObjectReference) {
for _, item := range inputs {
flag := true
for _, filter := range filters {
if !filter(item) {
flag = false
break
}
}
if flag {
outputs = append(outputs, item)
}
}
return
}
func askToChooseOneResource(app *v1beta1.Application, filters ...clusterObjectReferenceFilter) (*common.ClusterObjectReference, error) {
resources := app.Status.AppliedResources
if len(resources) == 0 {
return nil, fmt.Errorf("no resources in the application deployed yet")
}
resources = filterResource(resources, filters...)
if app.Name == AddonObservabilityApplication {
resources = filterClusterObjectRefFromAddonObservability(resources)
}
// filter locations
if len(resources) == 0 {
return nil, fmt.Errorf("no supported resources detected in deployed resources")
}
if len(resources) == 1 {
return &resources[0], nil
}
opMap := ClusterObject2Map(resources)
var ops []string
for _, r := range opMap {
ops = append(ops, r)
}
prompt := &survey.Select{
Message: fmt.Sprintf("You have %d deployed resources in your app. Please choose one:", len(ops)),
Options: ops,
}
var selectedRsc string
err := survey.AskOne(prompt, &selectedRsc)
if err != nil {
return nil, fmt.Errorf("choosing resource err %w", err)
}
for k, resource := range ops {
if selectedRsc == resource {
return &resources[k], nil
}
}
return nil, fmt.Errorf("choosing resource err %w", err)
}
// AskToChooseOneNamespace ask for choose one namespace as env
func AskToChooseOneNamespace(c client.Client, envMeta *types.EnvMeta) error {
var nsList v1.NamespaceList
if err := c.List(context.TODO(), &nsList); err != nil {
return err
}
var ops = []string{CreateCustomNamespace}
for _, r := range nsList.Items {
ops = append(ops, r.Name)
}
prompt := &survey.Select{
Message: "Would you like to choose an existing namespaces as your env?",
Options: ops,
}
err := survey.AskOne(prompt, &envMeta.Namespace)
if err != nil {
return fmt.Errorf("choosing namespace err %w", err)
}
if envMeta.Namespace == CreateCustomNamespace {
err = survey.AskOne(&survey.Input{
Message: "Please name the new namespace:",
}, &envMeta.Namespace)
if err != nil {
return err
}
return nil
}
for _, ns := range nsList.Items {
if ns.Name == envMeta.Namespace && envMeta.Name == "" {
envMeta.Name = ns.Labels[oam.LabelNamespaceOfEnvName]
return nil
}
}
return nil
}
func filterClusterObjectRefFromAddonObservability(resources []common.ClusterObjectReference) []common.ClusterObjectReference {
var observabilityResources []common.ClusterObjectReference
for _, res := range resources {
if res.Namespace == types.DefaultKubeVelaNS && res.Name == AddonObservabilityGrafanaSvc {
res.Kind = "Service"
res.APIVersion = "v1"
observabilityResources = append(observabilityResources, res)
}
}
resources = observabilityResources
return resources
}
// AskToChooseOneEnvResource will ask users to select one applied resource of the application if more than one
// resource is a map for component to applied resources
// return the selected ClusterObjectReference
func AskToChooseOneEnvResource(app *v1beta1.Application) (*common.ClusterObjectReference, error) {
return askToChooseOneResource(app, isWorkloadClusterObjectReferenceFilter)
}
// AskToChooseOnePortForwardEndpoint will ask user to select one applied resource as port forward endpoint
func AskToChooseOnePortForwardEndpoint(app *v1beta1.Application) (*common.ClusterObjectReference, error) {
return askToChooseOneResource(app, isPortForwardEndpointClusterObjectReferenceFilter)
}
func askToChooseOneInApplication(category string, options []string) (decision string, err error) {
if len(options) == 0 {
return "", fmt.Errorf("no %s exists in the application", category)
}
if len(options) == 1 {
return options[0], nil
}
prompt := &survey.Select{
Message: fmt.Sprintf("You have multiple %ss in your app. Please choose one %s: ", category, category),
Options: options,
}
if err = survey.AskOne(prompt, &decision); err != nil {
return "", errors2.Wrapf(err, "choosing %s failed", category)
}
return
}
// AskToChooseOneService will ask users to select one service of the application if more than one
func AskToChooseOneService(svcNames []string) (string, error) {
return askToChooseOneInApplication("service", svcNames)
}
// AskToChooseOnePods will ask users to select one pods of the resource if more than one
func AskToChooseOnePods(podNames []string) (string, error) {
return askToChooseOneInApplication("pod", podNames)
}
// ReadYamlToObject will read a yaml K8s object to runtime.Object
func ReadYamlToObject(path string, object k8sruntime.Object) error {
data, err := os.ReadFile(filepath.Clean(path))
if err != nil {
return err
}
return yaml.Unmarshal(data, object)
}
// ParseTerraformVariables get variables from Terraform Configuration
func ParseTerraformVariables(configuration string) (map[string]*tfconfig.Variable, map[string]*tfconfig.Output, error) {
p := hclparse.NewParser()
hclFile, diagnostic := p.ParseHCL([]byte(configuration), "")
if diagnostic != nil {
return nil, nil, errors.New(diagnostic.Error())
}
mod := tfconfig.Module{Variables: map[string]*tfconfig.Variable{}, Outputs: map[string]*tfconfig.Output{}}
diagnostic = tfconfig.LoadModuleFromFile(hclFile, &mod)
if diagnostic != nil {
return nil, nil, errors.New(diagnostic.Error())
}
return mod.Variables, mod.Outputs, nil
}
// GenerateUnstructuredObj generate UnstructuredObj
func GenerateUnstructuredObj(name, ns string, gvk schema.GroupVersionKind) *unstructured.Unstructured {
u := &unstructured.Unstructured{}
u.SetGroupVersionKind(gvk)
u.SetName(name)
u.SetNamespace(ns)
return u
}
// SetSpecObjIntoUnstructuredObj set UnstructuredObj spec field
func SetSpecObjIntoUnstructuredObj(spec interface{}, u *unstructured.Unstructured) error {
bts, err := json.Marshal(spec)
if err != nil {
return err
}
data := make(map[string]interface{})
if err := json.Unmarshal(bts, &data); err != nil {
return err
}
_ = unstructured.SetNestedMap(u.Object, data, "spec")
return nil
}
// NewK8sClient init a local k8s client which add oamcore scheme
func NewK8sClient() (client.Client, error) {
conf, err := config.GetConfig()
if err != nil {
return nil, err
}
scheme := k8sruntime.NewScheme()
if err := clientgoscheme.AddToScheme(scheme); err != nil {
return nil, err
}
if err := oamcore.AddToScheme(scheme); err != nil {
return nil, err
}
k8sClient, err := client.New(conf, client.Options{Scheme: scheme})
if err != nil {
return nil, err
}
return k8sClient, nil
}
|
[
"\"IGNORE_KUBE_CONFIG\""
] |
[] |
[
"IGNORE_KUBE_CONFIG"
] |
[]
|
["IGNORE_KUBE_CONFIG"]
|
go
| 1 | 0 | |
tests/base/test_context.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import absolute_import, division, print_function, unicode_literals
from itertools import chain
import os
from os.path import join, abspath
from tempfile import gettempdir
from unittest import TestCase, mock
import pytest
from conda.auxlib.collection import AttrDict
from conda.auxlib.ish import dals
from conda._vendor.toolz.itertoolz import concat
from conda.base.constants import PathConflict, ChannelPriority
from conda.base.context import context, reset_context, conda_tests_ctxt_mgmt_def_pol
from conda.common.compat import odict, iteritems
from conda.common.configuration import ValidationError, YamlRawParameter
from conda.common.io import env_var, env_vars
from conda.common.path import expand, win_path_backout
from conda.common.url import join_url, path_to_url
from conda.common.serialize import yaml_round_trip_load
from conda.core.package_cache_data import PackageCacheData
from conda.gateways.disk.create import mkdir_p, create_package_cache_directory
from conda.gateways.disk.delete import rm_rf
from conda.gateways.disk.permissions import make_read_only
from conda.gateways.disk.update import touch
from conda.models.channel import Channel
from conda.models.match_spec import MatchSpec
from conda.utils import on_win
from ..helpers import tempdir
class ContextCustomRcTests(TestCase):
def setUp(self):
string = dals("""
custom_channels:
darwin: https://some.url.somewhere/stuff
chuck: http://another.url:8080/with/path
custom_multichannels:
michele:
- https://do.it.with/passion
- learn_from_every_thing
steve:
- more-downloads
migrated_custom_channels:
darwin: s3://just/cant
chuck: file:///var/lib/repo/
migrated_channel_aliases:
- https://conda.anaconda.org
channel_alias: ftp://new.url:8082
conda-build:
root-dir: /some/test/path
proxy_servers:
http: http://user:[email protected]:8080
https: none
ftp:
sftp: ''
ftps: false
rsync: 'false'
aggressive_update_packages: []
channel_priority: false
""")
reset_context(())
rd = odict(testdata=YamlRawParameter.make_raw_parameters('testdata', yaml_round_trip_load(string)))
context._set_raw_data(rd)
def tearDown(self):
reset_context()
def test_migrated_custom_channels(self):
assert Channel('https://some.url.somewhere/stuff/darwin/noarch/a-mighty-fine.tar.bz2').canonical_name == 'darwin'
assert Channel('s3://just/cant/darwin/noarch/a-mighty-fine.tar.bz2').canonical_name == 'darwin'
assert Channel('s3://just/cant/darwin/noarch/a-mighty-fine.tar.bz2').urls() == [
'https://some.url.somewhere/stuff/darwin/noarch']
def test_old_channel_alias(self):
platform = context.subdir
cf_urls = ["ftp://new.url:8082/conda-forge/%s" % platform,
"ftp://new.url:8082/conda-forge/noarch"]
assert Channel('conda-forge').urls() == cf_urls
url = "https://conda.anaconda.org/conda-forge/osx-64/some-great-package.tar.bz2"
assert Channel(url).canonical_name == 'conda-forge'
assert Channel(url).base_url == 'ftp://new.url:8082/conda-forge'
assert Channel(url).urls() == [
'ftp://new.url:8082/conda-forge/osx-64',
'ftp://new.url:8082/conda-forge/noarch'
]
assert Channel("https://conda.anaconda.org/conda-forge/label/dev/linux-64/"
"some-great-package.tar.bz2").urls() == [
"ftp://new.url:8082/conda-forge/label/dev/linux-64",
"ftp://new.url:8082/conda-forge/label/dev/noarch",
]
def test_signing_metadata_url_base(self):
SIGNING_URL_BASE = "https://conda.example.com/pkgs"
string = f"signing_metadata_url_base: {SIGNING_URL_BASE}"
reset_context()
rd = odict(testdata=YamlRawParameter.make_raw_parameters('testdata', yaml_round_trip_load(string)))
context._set_raw_data(rd)
assert context.signing_metadata_url_base == SIGNING_URL_BASE
def test_signing_metadata_url_base_empty_default_channels(self):
string = dals("""
default_channels: []
""")
reset_context()
rd = odict(testdata=YamlRawParameter.make_raw_parameters('testdata', yaml_round_trip_load(string)))
context._set_raw_data(rd)
assert len(context.default_channels) is 0
assert context.signing_metadata_url_base is None
def test_client_ssl_cert(self):
string = dals("""
client_ssl_cert_key: /some/key/path
""")
reset_context()
rd = odict(testdata=YamlRawParameter.make_raw_parameters('testdata', yaml_round_trip_load(string)))
context._set_raw_data(rd)
pytest.raises(ValidationError, context.validate_configuration)
def test_conda_envs_path(self):
saved_envs_path = os.environ.get('CONDA_ENVS_PATH')
beginning = "C:" + os.sep if on_win else os.sep
path1 = beginning + os.sep.join(['my', 'envs', 'dir', '1'])
path2 = beginning + os.sep.join(['my', 'envs', 'dir', '2'])
try:
os.environ['CONDA_ENVS_PATH'] = path1
reset_context()
assert context.envs_dirs[0] == path1
os.environ['CONDA_ENVS_PATH'] = os.pathsep.join([path1, path2])
reset_context()
assert context.envs_dirs[0] == path1
assert context.envs_dirs[1] == path2
finally:
if saved_envs_path:
os.environ['CONDA_ENVS_PATH'] = saved_envs_path
else:
del os.environ['CONDA_ENVS_PATH']
def test_conda_bld_path(self):
conda_bld_path = join(gettempdir(), 'conda-bld')
conda_bld_url = path_to_url(conda_bld_path)
try:
mkdir_p(conda_bld_path)
with env_var('CONDA_BLD_PATH', conda_bld_path, stack_callback=conda_tests_ctxt_mgmt_def_pol):
assert len(context.conda_build_local_paths) >= 1
assert context.conda_build_local_paths[0] == conda_bld_path
channel = Channel('local')
assert channel.channel_name == "local"
assert channel.channel_location is None
assert channel.platform is None
assert channel.package_filename is None
assert channel.auth is None
assert channel.token is None
assert channel.scheme is None
assert channel.canonical_name == "local"
assert channel.url() is None
urls = list(concat((
join_url(url, context.subdir),
join_url(url, 'noarch'),
) for url in context.conda_build_local_urls))
assert channel.urls() == urls
channel = Channel(conda_bld_url)
assert channel.canonical_name == "local"
assert channel.platform is None
assert channel.package_filename is None
assert channel.auth is None
assert channel.token is None
assert channel.scheme == "file"
assert channel.urls() == [
join_url(conda_bld_url, context.subdir),
join_url(conda_bld_url, 'noarch'),
]
assert channel.url() == join_url(conda_bld_url, context.subdir)
assert channel.channel_name.lower() == win_path_backout(conda_bld_path).lstrip('/').lower()
assert channel.channel_location == '' # location really is an empty string; all path information is in channel_name
assert channel.canonical_name == "local"
finally:
rm_rf(conda_bld_path)
def test_custom_multichannels(self):
assert context.custom_multichannels['michele'] == (
Channel('passion'),
Channel('learn_from_every_thing'),
)
def test_restore_free_channel(self):
assert 'https://repo.anaconda.com/pkgs/free' not in context.default_channels
with env_var("CONDA_RESTORE_FREE_CHANNEL", 'true', stack_callback=conda_tests_ctxt_mgmt_def_pol):
assert context.default_channels.index('https://repo.anaconda.com/pkgs/free') == 1
def test_proxy_servers(self):
assert context.proxy_servers['http'] == 'http://user:[email protected]:8080'
assert context.proxy_servers['https'] is None
assert context.proxy_servers['ftp'] is None
assert context.proxy_servers['sftp'] == ''
assert context.proxy_servers['ftps'] == 'False'
assert context.proxy_servers['rsync'] == 'false'
def test_conda_build_root_dir(self):
assert context.conda_build['root-dir'] == "/some/test/path"
def test_clobber_enum(self):
with env_var("CONDA_PATH_CONFLICT", 'prevent', stack_callback=conda_tests_ctxt_mgmt_def_pol):
assert context.path_conflict == PathConflict.prevent
def test_context_parameter_map(self):
all_parameter_names = context.list_parameters()
all_mapped_parameter_names = tuple(chain.from_iterable(context.category_map.values()))
unmapped_parameter_names = set(all_parameter_names) - set(all_mapped_parameter_names)
assert not unmapped_parameter_names, unmapped_parameter_names
assert len(all_parameter_names) == len(all_mapped_parameter_names)
def test_context_parameters_have_descriptions(self):
skip_categories = ('CLI-only', 'Hidden and Undocumented')
documented_parameter_names = chain.from_iterable((
parameter_names for category, parameter_names in iteritems(context.category_map)
if category not in skip_categories
))
from pprint import pprint
for name in documented_parameter_names:
description = context.get_descriptions()[name]
pprint(context.describe_parameter(name))
def test_local_build_root_custom_rc(self):
assert context.local_build_root == abspath("/some/test/path")
test_path_1 = join(os.getcwd(), 'test_path_1')
with env_var("CONDA_CROOT", test_path_1, stack_callback=conda_tests_ctxt_mgmt_def_pol):
assert context.local_build_root == test_path_1
test_path_2 = join(os.getcwd(), 'test_path_2')
with env_var("CONDA_BLD_PATH", test_path_2, stack_callback=conda_tests_ctxt_mgmt_def_pol):
assert context.local_build_root == test_path_2
def test_default_target_is_root_prefix(self):
assert context.target_prefix == context.root_prefix
def test_target_prefix(self):
with tempdir() as prefix:
mkdir_p(join(prefix, 'first', 'envs'))
mkdir_p(join(prefix, 'second', 'envs'))
create_package_cache_directory(join(prefix, 'first', 'pkgs'))
create_package_cache_directory(join(prefix, 'second', 'pkgs'))
envs_dirs = (join(prefix, 'first', 'envs'), join(prefix, 'second', 'envs'))
with env_var('CONDA_ENVS_DIRS', os.pathsep.join(envs_dirs), stack_callback=conda_tests_ctxt_mgmt_def_pol):
# with both dirs writable, choose first
reset_context((), argparse_args=AttrDict(name='blarg', func='create'))
assert context.target_prefix == join(envs_dirs[0], 'blarg')
# with first dir read-only, choose second
PackageCacheData._cache_.clear()
make_read_only(join(envs_dirs[0], '.conda_envs_dir_test'))
reset_context((), argparse_args=AttrDict(name='blarg', func='create'))
assert context.target_prefix == join(envs_dirs[1], 'blarg')
# if first dir is read-only but environment exists, choose first
PackageCacheData._cache_.clear()
mkdir_p(join(envs_dirs[0], 'blarg'))
touch(join(envs_dirs[0], 'blarg', 'history'))
reset_context((), argparse_args=AttrDict(name='blarg', func='create'))
assert context.target_prefix == join(envs_dirs[0], 'blarg')
def test_aggressive_update_packages(self):
assert context.aggressive_update_packages == tuple()
specs = ['certifi', 'openssl>=1.1']
with env_var('CONDA_AGGRESSIVE_UPDATE_PACKAGES', ','.join(specs), stack_callback=conda_tests_ctxt_mgmt_def_pol):
assert context.aggressive_update_packages == tuple(MatchSpec(s) for s in specs)
def test_channel_priority(self):
assert context.channel_priority == ChannelPriority.DISABLED
def test_cuda_detection(self):
# confirm that CUDA detection doesn't raise exception
version = context.cuda_version
assert version is None or isinstance(version, str)
def test_cuda_override(self):
with env_var('CONDA_OVERRIDE_CUDA', '4.5'):
version = context.cuda_version
assert version == '4.5'
def test_cuda_override_none(self):
with env_var('CONDA_OVERRIDE_CUDA', ''):
version = context.cuda_version
assert version is None
def test_threads(self):
default_value = None
assert context.default_threads == default_value
assert context.repodata_threads == default_value
assert context.verify_threads == 1
assert context.execute_threads == 1
with env_var('CONDA_DEFAULT_THREADS', '3',
stack_callback=conda_tests_ctxt_mgmt_def_pol):
assert context.default_threads == 3
assert context.verify_threads == 3
assert context.repodata_threads == 3
assert context.execute_threads == 3
with env_var('CONDA_VERIFY_THREADS', '3',
stack_callback=conda_tests_ctxt_mgmt_def_pol):
assert context.default_threads == default_value
assert context.verify_threads == 3
assert context.repodata_threads == default_value
assert context.execute_threads == 1
with env_var('CONDA_REPODATA_THREADS', '3',
stack_callback=conda_tests_ctxt_mgmt_def_pol):
assert context.default_threads == default_value
assert context.verify_threads == 1
assert context.repodata_threads == 3
assert context.execute_threads == 1
with env_var('CONDA_EXECUTE_THREADS', '3',
stack_callback=conda_tests_ctxt_mgmt_def_pol):
assert context.default_threads == default_value
assert context.verify_threads == 1
assert context.repodata_threads == default_value
assert context.execute_threads == 3
with env_vars({'CONDA_EXECUTE_THREADS': '3',
'CONDA_DEFAULT_THREADS': '1'},
stack_callback=conda_tests_ctxt_mgmt_def_pol):
assert context.default_threads == 1
assert context.verify_threads == 1
assert context.repodata_threads == 1
assert context.execute_threads == 3
def test_channels_defaults(self):
"""
Test when no channels provided in cli
"""
reset_context(())
assert context.channels == ('defaults',)
def test_channels_defaults_condarc(self):
"""
Test when no channels provided in cli, but some in condarc
"""
reset_context(())
string = dals("""
channels: ['defaults', 'conda-forge']
""")
rd = odict(testdata=YamlRawParameter.make_raw_parameters('testdata', yaml_round_trip_load(string)))
context._set_raw_data(rd)
assert context.channels == ('defaults', 'conda-forge')
def test_specify_channels_cli_adding_defaults_no_condarc(self):
"""
When the channel haven't been specified in condarc, 'defaults'
should be present when specifying channel in the cli
"""
reset_context((), argparse_args=AttrDict(channel=['conda-forge']))
assert context.channels == ('conda-forge', 'defaults')
def test_specify_channels_cli_condarc(self):
"""
When the channel have been specified in condarc, these channels
should be used along with the one specified
"""
reset_context((), argparse_args=AttrDict(channel=['conda-forge']))
string = dals("""
channels: ['defaults', 'conda-forge']
""")
rd = odict(testdata=YamlRawParameter.make_raw_parameters('testdata', yaml_round_trip_load(string)))
context._set_raw_data(rd)
assert context.channels == ('defaults', 'conda-forge')
def test_specify_different_channels_cli_condarc(self):
"""
When the channel have been specified in condarc, these channels
should be used along with the one specified
In this test, the given channel in cli is different from condarc
'defaults' should not be added
"""
reset_context((), argparse_args=AttrDict(channel=['other']))
string = dals("""
channels: ['conda-forge']
""")
rd = odict(testdata=YamlRawParameter.make_raw_parameters('testdata', yaml_round_trip_load(string)))
context._set_raw_data(rd)
assert context.channels == ('conda-forge', 'other')
def test_specify_same_channels_cli_as_in_condarc(self):
"""
When the channel have been specified in condarc, these channels
should be used along with the one specified
In this test, the given channel in cli is the same as in condarc
'defaults' should not be added
See https://github.com/conda/conda/issues/10732
"""
reset_context((), argparse_args=AttrDict(channel=['conda-forge']))
string = dals("""
channels: ['conda-forge']
""")
rd = odict(testdata=YamlRawParameter.make_raw_parameters('testdata', yaml_round_trip_load(string)))
context._set_raw_data(rd)
assert context.channels == ('conda-forge',)
def test_expandvars(self):
"""
Environment variables should be expanded in settings that have expandvars=True.
"""
def _get_expandvars_context(attr, config_expr, env_value):
with mock.patch.dict(os.environ, {"TEST_VAR": env_value}):
reset_context(())
string = f"{attr}: {config_expr}"
rd = odict(testdata=YamlRawParameter.make_raw_parameters('testdata', yaml_round_trip_load(string)))
context._set_raw_data(rd)
return getattr(context, attr)
ssl_verify = _get_expandvars_context("ssl_verify", "${TEST_VAR}", "yes")
assert ssl_verify
for attr, env_value in [
("client_ssl_cert", "foo"),
("client_ssl_cert_key", "foo"),
("channel_alias", "http://foo"),
]:
value = _get_expandvars_context(attr, "${TEST_VAR}", env_value)
assert value == env_value
for attr in [
"migrated_custom_channels",
"proxy_servers",
]:
value = _get_expandvars_context("proxy_servers", "{'x': '${TEST_VAR}'}", "foo")
assert value == {"x": "foo"}
for attr in [
"channels",
"default_channels",
"whitelist_channels",
]:
value = _get_expandvars_context(attr, "['${TEST_VAR}']", "foo")
assert value == ("foo",)
custom_channels = _get_expandvars_context("custom_channels", "{'x': '${TEST_VAR}'}", "http://foo")
assert custom_channels["x"].location == "foo"
custom_multichannels = _get_expandvars_context("custom_multichannels", "{'x': ['${TEST_VAR}']}", "http://foo")
assert len(custom_multichannels["x"]) == 1
assert custom_multichannels["x"][0].location == "foo"
envs_dirs = _get_expandvars_context("envs_dirs", "['${TEST_VAR}']", "/foo")
assert any("foo" in d for d in envs_dirs)
pkgs_dirs = _get_expandvars_context("pkgs_dirs", "['${TEST_VAR}']", "/foo")
assert any("foo" in d for d in pkgs_dirs)
class ContextDefaultRcTests(TestCase):
def test_subdirs(self):
assert context.subdirs == (context.subdir, 'noarch')
subdirs = ('linux-highest', 'linux-64', 'noarch')
with env_var('CONDA_SUBDIRS', ','.join(subdirs), stack_callback=conda_tests_ctxt_mgmt_def_pol):
assert context.subdirs == subdirs
def test_local_build_root_default_rc(self):
if context.root_writable:
assert context.local_build_root == join(context.root_prefix, 'conda-bld')
else:
assert context.local_build_root == expand('~/conda-bld')
|
[] |
[] |
[
"CONDA_ENVS_PATH"
] |
[]
|
["CONDA_ENVS_PATH"]
|
python
| 1 | 0 | |
test/unit/agent/common/util/container.py
|
# -*- coding: utf-8 -*-
import os
from hamcrest import *
from test.base import BaseTestCase, container_test
from amplify.agent.common.util import container
from amplify.agent.common.context import context
__author__ = "Grant Hulegaard"
__copyright__ = "Copyright (C) Nginx, Inc. All rights reserved."
__license__ = ""
__maintainer__ = "Grant Hulegaard"
__email__ = "[email protected]"
@container_test
class ContainerTestCase(BaseTestCase):
def test_is_docker(self):
flag = container.is_docker()
assert_that(flag, equal_to(True))
def test_is_lxc(self):
flag = container.is_lxc()
assert_that(flag, equal_to(False))
os.environ.setdefault('container', 'lxc')
flag = container.is_lxc()
assert_that(flag, equal_to(True))
def test_container_environment(self):
container_type = container.container_environment()
assert_that(container_type, equal_to('docker'))
def test_context(self):
assert_that(context.container_type, equal_to('docker'))
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
etc/openshift_settings.py
|
# coding: utf-8
import os
# MONGO
MONGODB_DB = os.environ['OPENSHIFT_APP_NAME']
MONGODB_HOST = os.environ['OPENSHIFT_MONGODB_DB_HOST']
MONGODB_PORT = int(os.environ['OPENSHIFT_MONGODB_DB_PORT'])
MONGODB_USERNAME = os.environ['OPENSHIFT_MONGODB_DB_USERNAME']
MONGODB_PASSWORD = os.environ['OPENSHIFT_MONGODB_DB_PASSWORD']
SECURITY_REGISTERABLE = False
SECURITY_CHANGEABLE = False
SECURITY_RECOVERABLE = False
ADMIN_VIEW_EXCLUDE = [
'quokka.modules.accounts.models.User',
'quokka.modules.accounts.models.Role',
'quokka.modules.accounts.models.Connection'
]
# Logger
LOGGER_ENABLED = True
LOGGER_LEVEL = 'DEBUG'
LOGGER_FORMAT = '%(asctime)s %(name)-12s %(levelname)-8s %(message)s'
LOGGER_DATE_FORMAT = '%d.%m %H:%M:%S'
if os.environ['OPENSHIFT_APP_NAME'] == 'quokkadevelopment':
DEBUG_TOOLBAR_ENABLED = True
DEBUG = True
SHORTENER_ENABLED = True
# SERVER_NAME = os.environ['OPENSHIFT_APP_DNS']
MAP_STATIC_ROOT = (
'/robots.txt',
'/sitemap.xml',
'/favicon.ico',
'/vaddy-c603c78bbeba8d9.html'
)
ADMIN_HEADER = (
"<ul>"
"<li class='alert'>User management is disabled in demo mode!</li>"
"<li class='alert'>Demo server is limited, so it can be slow :(</li>"
"<li class='alert'>"
"<a href='https://quokkaslack.herokuapp.com/'>"
"<img src='https://camo.githubusercontent.com/4a26f42037d8f75f8826561de4"
"c0ad2ae8ac2701/68747470733a2f2f696d672e736869656c64732e696f2f6261646765"
"2f4a4f494e5f534c41434b2d434841542d677265656e2e737667' "
"alt='Join Slack Chat'></a>"
"</li>"
"</ul>"
)
|
[] |
[] |
[
"OPENSHIFT_MONGODB_DB_HOST",
"OPENSHIFT_APP_NAME",
"OPENSHIFT_MONGODB_DB_PORT",
"OPENSHIFT_APP_DNS",
"OPENSHIFT_MONGODB_DB_USERNAME",
"OPENSHIFT_MONGODB_DB_PASSWORD"
] |
[]
|
["OPENSHIFT_MONGODB_DB_HOST", "OPENSHIFT_APP_NAME", "OPENSHIFT_MONGODB_DB_PORT", "OPENSHIFT_APP_DNS", "OPENSHIFT_MONGODB_DB_USERNAME", "OPENSHIFT_MONGODB_DB_PASSWORD"]
|
python
| 6 | 0 | |
pkg/defaults/defaults_linux.go
|
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package defaults
import (
"fmt"
"os"
"path/filepath"
"github.com/containerd/containerd/plugin"
gocni "github.com/containerd/go-cni"
"github.com/containerd/nerdctl/pkg/rootlessutil"
"github.com/sirupsen/logrus"
)
const AppArmorProfileName = "dacsctl-default"
const Runtime = plugin.RuntimeRuncV2
func DataRoot() string {
if !rootlessutil.IsRootless() {
return "/var/lib/dacsctl"
}
xdh, err := rootlessutil.XDGDataHome()
if err != nil {
panic(err)
}
return filepath.Join(xdh, "nerdctl")
}
func CNIPath() string {
candidates := []string{
"/usr/local/libexec/cni",
"/usr/local/lib/cni",
"/usr/libexec/cni", // Fedora
"/usr/lib/cni", // debian (containernetworking-plugins)
}
if rootlessutil.IsRootless() {
home := os.Getenv("HOME")
if home == "" {
panic("environment variable HOME is not set")
}
candidates = append([]string{
// NOTE: These user paths are not defined in XDG
filepath.Join(home, ".local/libexec/cni"),
filepath.Join(home, ".local/lib/cni"),
filepath.Join(home, "opt/cni/bin"),
}, candidates...)
}
for _, f := range candidates {
if _, err := os.Stat(f); err == nil {
return f
}
}
// default: /opt/cni/bin
return gocni.DefaultCNIDir
}
func CNINetConfPath() string {
if !rootlessutil.IsRootless() {
return gocni.DefaultNetDir
}
xch, err := rootlessutil.XDGConfigHome()
if err != nil {
panic(err)
}
return filepath.Join(xch, "cni/net.d")
}
func BuildKitHost() string {
if !rootlessutil.IsRootless() {
return "unix:///run/buildkit/buildkitd.sock"
}
xdr, err := rootlessutil.XDGRuntimeDir()
if err != nil {
logrus.Warn(err)
xdr = fmt.Sprintf("/run/user/%d", rootlessutil.ParentEUID())
}
return fmt.Sprintf("unix://%s/buildkit/buildkitd.sock", xdr)
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
users-api/routes.py
|
from flask import jsonify, request
from flask_restx import Resource, reqparse, fields, marshal_with
import requests
import redis
import os
import logging
import time
import datetime
import json
from app import api, db
from models import User
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
user_fields = {
"id": fields.Integer,
"uuid": fields.Integer,
"status": fields.String
}
@api.route("/users")
class Users(Resource):
users_post_reqparser = reqparse.RequestParser()
users_post_reqparser.add_argument(
"uuid",
type=int,
location="json",
required=True,
help="Please provide the UUID -",
)
@api.expect(users_post_reqparser)
@marshal_with(user_fields)
def post(self):
args = self.users_post_reqparser.parse_args()
new_user = User(uuid=args["uuid"])
db.session.add(new_user)
db.session.flush()
db.session.commit()
return new_user, 201
@marshal_with(user_fields)
def get(self):
# TODO: some authorization would be nice
return User.query.all(), 200
@api.route("/usersByUUID/<int:uuid>")
class UserByUUID(Resource):
@marshal_with(user_fields)
def get(self, uuid):
user = User.query.filter_by(uuid=uuid).first()
if user is None:
# we should really return 404 here and don't do POST magic
# in a GET request but this will make some thing much easier...
user = User(uuid=uuid)
db.session.add(user)
db.session.flush()
db.session.commit()
return user, 200
@api.route("/users/<int:id>")
class SingleUser(Resource):
user_put_reqparser = reqparse.RequestParser()
user_put_reqparser.add_argument(
"status",
type=str,
location="json",
required=True,
help="Please provide the status value (healty, covid_positive, covid_negative) -",
)
@marshal_with(user_fields)
def get(self, id):
found_user = User.query.filter_by(uuid=id).first()
if found_user is None:
api.abort(404, "User does not exist.")
return found_user, 200
@marshal_with(user_fields)
def put(self, id):
user = User.query.filter_by(uuid=id).first()
if user is None:
api.abort(404, "User does not exist.")
args = self.user_put_reqparser.parse_args()
user.status = args["status"]
db.session.commit()
if args["status"] == "covid_positive":
self._submit_filtering_jobs(user.uuid)
return user, 200
def delete(self, id):
user = User.query.filter_by(uuid=id).first()
if user is None:
api.abort(404, "User does not exist.")
db.session.delete(user)
db.session.commit()
return {"msg": "ok"}, 200
@staticmethod
def _chunks(l, n):
n = max(1, n)
return (l[i : i + n] for i in range(0, len(l), n))
def _submit_filtering_jobs(self, uuid):
"""
Here we create the task and put it on the job queue.
"""
# Some optimization: we make a request to the Location API
# to get all the geohash prefixes for all locations the diagonzed patient
# has visited in the last two weeks
two_weeks_ago = datetime.date.today() - datetime.timedelta(14)
params = {
"from": int(two_weeks_ago.strftime("%s")),
"to": int(time.time()),
"unit": "seconds",
}
# TODO: Do not hardcode URIs or ports, use env vars instead
# TODO: Do not assume that the period is always 2 weeks long, make it parametrized
location_api_resp = requests.get(
f"http://location-api:5000/geohashRegionsForUser/{uuid}", params=params
)
if location_api_resp.status_code != 200:
logger.warning(location_api_resp)
api.abort(
500, "There was a problem when requesting data from the Location API"
)
visited_regions_geohash_prefixes = location_api_resp.json()
logger.info(f"Visited Regions for diagonzed patient: {str(visited_regions_geohash_prefixes)}")
location_api_resp_users = requests.get("http://location-api:5000/users")
if location_api_resp_users.status_code != 200:
logger.warning(location_api_resp_users)
api.abort(
500, "There was a problem when requesting data from the Location API"
)
all_influx_users = list(set(location_api_resp_users.json()) - {str(uuid)})
logger.info(f"All Influx users without diagnozed patient: {str(all_influx_users)}")
# So, we should split the whole job into rougly N*k jobs, where N is the
# number of workers listening on the queue, so that each worker will get roughly
# k tasks to execute (so we can achieve nice load balancing).
# Let's assume for simplicity now that we have always 3 workers and k = 1.
n_workers = 3
task_size = len(all_influx_users) // n_workers
all_influx_users_partitioned = SingleUser._chunks(all_influx_users, task_size)
# Create the tasks and put the onto the Redis queue
redis_instance = redis.Redis(
host=os.getenv("REDIS_HOST", "queue"),
port=os.getenv("REDIS_PORT", 6379),
db=os.getenv("REDIS_DB_ID", 0),
)
redis_namespace = os.getenv("REDIS_NAMESPACE", "worker")
redis_collection = os.getenv("REDIS_COLLECTION", "jobs")
logger.info(f"Connected with Redis ({redis_namespace}:{redis_collection})")
for idx, users_batch in enumerate(all_influx_users_partitioned):
job = {
"type": "scan_users_locations",
"args": {
"user_id_range": users_batch,
"diagnozed_uuid": uuid,
"diagnozed_visited_regions": visited_regions_geohash_prefixes,
},
}
redis_instance.rpush(
f"{redis_namespace}:{redis_collection}", json.dumps(job)
)
logger.info(
f"Successfully pushed job #{idx} to the Job Queue:\n{json.dumps(job)}"
)
logger.info("Finished pushing jobs to the Queue.")
|
[] |
[] |
[
"REDIS_PORT",
"REDIS_HOST",
"REDIS_COLLECTION",
"REDIS_DB_ID",
"REDIS_NAMESPACE"
] |
[]
|
["REDIS_PORT", "REDIS_HOST", "REDIS_COLLECTION", "REDIS_DB_ID", "REDIS_NAMESPACE"]
|
python
| 5 | 0 | |
sdks/python/apache_beam/io/source_test_utils_test.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
import logging
import tempfile
import unittest
from apache_beam.io import source_test_utils
from apache_beam.io.filebasedsource_test import LineSource
class SourceTestUtilsTest(unittest.TestCase):
def _create_file_with_data(self, lines):
assert isinstance(lines, list)
with tempfile.NamedTemporaryFile(delete=False) as f:
for line in lines:
f.write(line + b'\n')
return f.name
def _create_data(self, num_lines):
return [b'line ' + str(i).encode('latin1') for i in range(num_lines)]
def _create_source(self, data):
source = LineSource(self._create_file_with_data(data))
# By performing initial splitting, we can get a source for a single file.
# This source, that uses OffsetRangeTracker, is better for testing purposes,
# than using the original source for a file-pattern.
for bundle in source.split(float('inf')):
return bundle.source
def test_read_from_source(self):
data = self._create_data(100)
source = self._create_source(data)
self.assertCountEqual(
data, source_test_utils.read_from_source(source, None, None))
def test_source_equals_reference_source(self):
data = self._create_data(100)
reference_source = self._create_source(data)
sources_info = [(split.source, split.start_position, split.stop_position)
for split in reference_source.split(desired_bundle_size=50)]
if len(sources_info) < 2:
raise ValueError(
'Test is too trivial since splitting only generated %d'
'bundles. Please adjust the test so that at least '
'two splits get generated.' % len(sources_info))
source_test_utils.assert_sources_equal_reference_source(
(reference_source, None, None), sources_info)
def test_split_at_fraction_successful(self):
data = self._create_data(100)
source = self._create_source(data)
result1 = source_test_utils.assert_split_at_fraction_behavior(
source,
10,
0.5,
source_test_utils.ExpectedSplitOutcome.MUST_SUCCEED_AND_BE_CONSISTENT)
result2 = source_test_utils.assert_split_at_fraction_behavior(
source,
20,
0.5,
source_test_utils.ExpectedSplitOutcome.MUST_SUCCEED_AND_BE_CONSISTENT)
self.assertEqual(result1, result2)
self.assertEqual(100, result1[0] + result1[1])
result3 = source_test_utils.assert_split_at_fraction_behavior(
source,
30,
0.8,
source_test_utils.ExpectedSplitOutcome.MUST_SUCCEED_AND_BE_CONSISTENT)
result4 = source_test_utils.assert_split_at_fraction_behavior(
source,
50,
0.8,
source_test_utils.ExpectedSplitOutcome.MUST_SUCCEED_AND_BE_CONSISTENT)
self.assertEqual(result3, result4)
self.assertEqual(100, result3[0] + result4[1])
self.assertTrue(result1[0] < result3[0])
self.assertTrue(result1[1] > result3[1])
def test_split_at_fraction_fails(self):
data = self._create_data(100)
source = self._create_source(data)
result = source_test_utils.assert_split_at_fraction_behavior(
source, 90, 0.1, source_test_utils.ExpectedSplitOutcome.MUST_FAIL)
self.assertEqual(result[0], 100)
self.assertEqual(result[1], -1)
with self.assertRaises(ValueError):
source_test_utils.assert_split_at_fraction_behavior(
source, 10, 0.5, source_test_utils.ExpectedSplitOutcome.MUST_FAIL)
def test_split_at_fraction_binary(self):
data = self._create_data(100)
source = self._create_source(data)
stats = source_test_utils.SplitFractionStatistics([], [])
source_test_utils.assert_split_at_fraction_binary(
source, data, 10, 0.5, None, 0.8, None, stats)
# These lists should not be empty now.
self.assertTrue(stats.successful_fractions)
self.assertTrue(stats.non_trivial_fractions)
def test_split_at_fraction_exhaustive(self):
data = self._create_data(10)
source = self._create_source(data)
source_test_utils.assert_split_at_fraction_exhaustive(source)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
integration_test.go
|
package kioto_test
import (
"context"
"errors"
"net/http"
"net/http/httptest"
"os"
"strings"
"testing"
"time"
"github.com/mccutchen/go-httpbin/httpbin"
"github.com/stretchr/testify/suite"
"github.com/delicb/kioto"
"github.com/delicb/kioto/cliware"
"github.com/delicb/kioto/middlewares/auth"
"github.com/delicb/kioto/middlewares/responsebody"
"github.com/delicb/kioto/middlewares/retry"
)
type integrationSuite struct {
suite.Suite
server *httptest.Server
}
func (i *integrationSuite) SetupSuite() {
runIntegration := strings.ToLower(os.Getenv("KIOTO_INTEGRATION_TEST"))
var shouldRun bool
for _, val := range []string{"true", "on", "1"} {
if val == runIntegration {
shouldRun = true
}
}
if !shouldRun {
i.T().Skip()
}
i.server = httptest.NewServer(httpbin.NewHTTPBin().Handler())
}
func (i *integrationSuite) TearDownSuite() {
i.server.Close()
}
func (i *integrationSuite) url(path string) string {
return i.server.URL + "/" + path
}
func (i *integrationSuite) TestGet() {
client := kioto.New()
resp, err := client.Request().Get().URL(i.server.URL).Send()
i.Require().NoError(err)
i.Equal(200, resp.StatusCode)
}
func (i *integrationSuite) TestPost() {
client := kioto.New()
resp, err := client.Request().Post().URL(i.url("post")).Send()
i.Require().NoError(err)
i.Equal(200, resp.StatusCode)
}
func (i *integrationSuite) TestTimeout() {
client := kioto.New(kioto.HTTPClient(&http.Client{
Timeout: 1 * time.Second,
}))
_, err := client.Request().Get().URL(i.url("delay/2")).Send()
i.Require().Error(err)
i.Contains(strings.ToLower(err.Error()), "timeout")
}
func (i *integrationSuite) TestContextTimeout() {
client := kioto.New()
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
_, err := client.Request().Get().WithContext(ctx).URL(i.url("delay/2")).Send()
i.Require().Error(err)
i.Contains(strings.ToLower(err.Error()), "context deadline exceeded")
}
func (i *integrationSuite) TestJSON() {
client := kioto.New()
var response = make(map[string]interface{})
resp, err := client.Request().
Get().
URL(i.url("user-agent")).
Use(responsebody.JSON(&response)).
Send()
i.Require().NoError(err)
i.Equal(200, resp.StatusCode)
i.Contains(response, "user-agent")
}
func (i *integrationSuite) TestRetry() {
client := kioto.New()
tryTimes := 3
tried := 0
resp, err := client.Request().
Get().
URL(i.url("status/500")).
Use(retry.SetClassifier(func(r *http.Response, err error) bool {
tried++
return r.StatusCode >= 500
})).
Use(retry.Times(tryTimes)).
Send()
i.True(tried >= tryTimes, "retry was not attempted expected number of times")
i.Equal(500, resp.StatusCode)
i.NoError(err)
}
func (i *integrationSuite) TestDisableRetry() {
client := kioto.New(kioto.DisableRetry())
tried := 0
resp, err := client.Request().
Get().
URL(i.url("status/500")).
Use(retry.SetClassifier(func(r *http.Response, err error) bool {
tried++
// just simulate to retry everything, it should not happen
// anyway because of DisableRetry option
return true
})).
Send()
i.True(tried == 0, "Retry attempted and was not expected to")
i.Equal(500, resp.StatusCode)
i.NoError(err)
}
func (i *integrationSuite) TestAuth() {
client := kioto.New()
resp, err := client.Request().
Get().
URL(i.url("basic-auth/foo/bar")).
Use(auth.Basic("foo", "bar")).
Send()
i.NoError(err)
i.Equal(200, resp.StatusCode)
}
func (i *integrationSuite) TestErrors() {
client := kioto.New()
resp, err := client.Request().
Get().
URL(i.url("status/500")).
Use(cliware.ResponseProcessor(func(r *http.Response, e error) error {
if e != nil {
return e
}
if r.StatusCode >= 500 {
return errors.New("server error")
}
return nil
})).
Send()
i.Error(err)
i.Contains(err.Error(), "server error")
i.Equal(500, resp.StatusCode)
}
func (i *integrationSuite) TestClientLevelErrors() {
client := kioto.New()
client.UsePost(cliware.ResponseProcessor(func(resp *http.Response, err error) error {
if err != nil {
return err
}
if resp.StatusCode >= 500 {
return errors.New("server error")
}
return nil
}))
resp, err := client.Request().
Get().
URL(i.url("status/500")).
Send()
i.Require().Error(err)
i.Contains(err.Error(), "server error")
i.Equal(500, resp.StatusCode)
}
type errorRoundTripper struct {
errToReturn error
}
func (rt *errorRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
return nil, rt.errToReturn
}
func (i *integrationSuite) TestCustomHTTPClient() {
httpClient := &http.Client{
Transport: &errorRoundTripper{errors.New("round trip error")},
}
client := kioto.New(kioto.HTTPClient(httpClient))
_, err := client.Request().Get().URL("foobar").Send()
i.Error(err)
i.Regexp("round trip error", err.Error())
}
func TestIntegrationSuite(t *testing.T) {
suite.Run(t, new(integrationSuite))
}
|
[
"\"KIOTO_INTEGRATION_TEST\""
] |
[] |
[
"KIOTO_INTEGRATION_TEST"
] |
[]
|
["KIOTO_INTEGRATION_TEST"]
|
go
| 1 | 0 | |
archive/archive.py
|
import re
import os
"""Origin: main.py used to activate a script for conversation
Reason for archive: for future improvement of the bot's conversational script"""
@app.post("/activate-script/")
async def activate(script_id):
"""
Endpoint for the front end to utilize in the toggle function on the
Script Management modal see:
"""
BotScripts.activate_script(script_id)
@app.get("/select-all-from-bot-scripts/")
async def get_all_from_bot_scripts():
"""
Selects all from 'bot_scripts' table to populate Script Management modal.
"""
return DB.get_all_script_data()
"""Origin: tweep_dm.py
Reason for Archive: for future conversational state of the tweeter bot"""
list_of_A_B_txts = [
'Hi! I am a bot for Blue Witness, a project by @humanrights1st. We noticed your tweet may involve police misconduct, please confirm the date of this incident here: ',
'Hi! I am a bot for Blue Witness, a project by @humanrights1st. We noticed your tweet may involve police misconduct, please confirm the location of this incident here: ',
]
def get_tweet_id(tweet_url):
"""Get the tweet ID from the tweet URL"""
tweet_id = re.search(r'\d+$', tweet_url)
return tweet_id.group(0)
def form_tweet(tweet_source: str, information_requested: str):
tweet_id = get_tweet_id(tweet_source)
if information_requested == 'date':
tweet_txt = list_of_A_B_txts[0]
elif information_requested == 'location':
tweet_txt = list_of_A_B_txts[1]
else:
return {}
link = os.getenv("FORM_URL")
reply_message = f"{tweet_txt} \n {link}"
tweet = api.update_status(
reply_message,
in_reply_to_status_id=tweet_id,
auto_populate_reply_metadata=True,
)
return tweet
"""Origin: db.py
Readon for Archive: the class never comes to deployment"""
class BotScripts(Base):
__tablename__ = "bot_scripts"
script_id = Column(
Integer, primary_key=True, nullable=False, unique=True)
script = Column(String(255))
convo_node = Column(Integer)
use_count = Column(Integer)
positive_count = Column(Integer)
success_rate = Column(Float)
active = Column(Boolean)
def __repr__(self):
return (
"script_id:{}, script:{}, convo_node:{}, use_count:{}, positive_count:{}, success_rate:{}, active:{}"
).format(
self.script_id,
self.script,
self.convo_node,
self.use_count,
self.positive_count,
self.success_rate,
self.active
)
def activate_script(script_id):
script_id = int(script_id)
db = Database()
# Data is a BotScripts class obj
data = db.get_table(BotScripts, BotScripts.script_id, script_id)[-1][-1]
if data.active == True:
data.active = False
else:
data.active = True
with db.Sessionmaker() as session:
session.add(data)
session.commit()
def add_to_use_count(script_id):
"""
Uses functions from db.py as helper to increment the use_count
"""
old_count = Database.get_table(BotScripts.use_count, BotScripts.script_id, script_id)
print(old_count)
new_count = old_count[0][0] + 1
Database.bump_use_count(script_id, new_count)
def add_to_positive_count(script_id):
"""
Uses functions from db.py as helper to increment the positive_count
"""
data = Database.get_counts(script_id)
use = data[0][0]
pos = data[0][1]
pos += 1
rate = pos / use
Database.update_pos_and_success(script_id, pos, rate)
# Functions for selection of scripts
""" FUTURE update: add randomized functionality to choose between path-based
script selection based on traning from the 'script_training' and path
-generating options (the latter exist below). Possibly set this up to occur
automatically whence results from traing sessions of path-based data are
available.
Also consider setting up testing to occur automatically whence
sufficient training data becomes available. Also consider scheduling
automatic training per a given number of data points received thereafter.
Reccomend having said training take place on another optional instance
(with the bot sentiment analysis) as memory on current instance is running
low.
"""
def choose_script(self, status):
"""
Used to select a script for use by the twitter bot given a
conversation node.
Returns a tuple containing the script and its id to be used by the
Twitter bot.
The script for the conversation and the script_id to be used in
another two function calls within the bot to update
the use_count in 'bot_scripts' when the bot send the message
as well as updating the path in 'script_testing' a
fter the bot pairs this script_id with an incident_id.
-----
In a future implementation try switching between
choosing a random script and
choosing the better of two as originally coded.
-----
"""
# Pull the list of scripts for a convo_node given
script_data = Database.get_scripts_per_node(
self.convo_node_dict[status])
# Randomly select two script objects
l = len(script_data)
x = int(str(rand())[-6:])
y = int(str(rand())[-6:])
a = x % l
b = y % l
# conditional for selecting the best of two when count is achieved
if script_data[a][2] > 100 and script_data[b][2] > 100:
if script_data[a][3] >= script_data[b][3]:
use = a
else:
use = b
else:
if x >= y:
use = a
else:
use = b
return (script_data[use][0], script_data[use][1])
"""Origin: db.py
Reason for Archive: The bot never reached conversational stage"""
class Sources(Base):
__tablename__ = "sources"
source_id = Column(Integer, primary_key=True, nullable=False, unique=True)
incident_id = Column(Integer, ForeignKey("force_ranks.incident_id"))
source = Column(String(255))
def __repr__(self):
return (
"source_id:{}, incident_id:{}, sources:{}"
).format(
self.source_id,
self.incident_id,
self.source
)
class Tags(Base):
__tablename__ = "tags"
tags_id = Column(Integer, primary_key=True, nullable=False, unique=True)
incident_id = Column(Integer, ForeignKey("force_ranks.incident_id"))
tag = Column(String(40))
def __repr__(self):
return (
"tags_id:{}, incident_id:{}, sources:{}"
).format(
self.tags_id,
self.incident_id,
self.tag
)
"""Origin: db.py
Reason for Archive: method for the Database Class never used"""
def get_counts(self, script_id):
"""
Gets use_count and positive_count from 'bot_scripts' given script_id
"""
with self.Sessionmaker() as session:
query = select(
BotScripts.use_count,
BotScripts.positive_count,
).where(BotScripts.script_id == script_id)
counts = session.execute(query).fetchall()
return counts
def bump_use_count(self, script_id, new_count):
""" Updates the use_count for a script as identified by script_id """
with self.Sessionmaker() as session:
count_dict = {"use_count": new_count}
query = (
update(BotScripts).where(
BotScripts.script_id == script_id).values(**count_dict)
)
session.execute(query)
session.commit()
def update_pos_and_success(self, script_id, positive_count, success_rate):
""" Updates the positive_count and success_rate for a given script_id """
with self.Sessionmaker() as session:
data = {"positive_count": positive_count,
"success_rate": success_rate
}
query = update(BotScripts).where(
BotScripts.script_id == script_id
).values(**data)
session.execute(query)
session.commit()
|
[] |
[] |
[
"FORM_URL"
] |
[]
|
["FORM_URL"]
|
python
| 1 | 0 | |
cmd/restic/progress.go
|
package main
import (
"fmt"
"os"
"strconv"
"time"
"github.com/restic/restic/internal/ui/progress"
)
// calculateProgressInterval returns the interval configured via RESTIC_PROGRESS_FPS
// or if unset returns an interval for 60fps on interactive terminals and 0 (=disabled)
// for non-interactive terminals or when run using the --quiet flag
func calculateProgressInterval(show bool) time.Duration {
interval := time.Second / 60
fps, err := strconv.ParseFloat(os.Getenv("RESTIC_PROGRESS_FPS"), 64)
if err == nil && fps > 0 {
if fps > 60 {
fps = 60
}
interval = time.Duration(float64(time.Second) / fps)
} else if !stdoutIsTerminal() || !show {
interval = 0
}
return interval
}
// newProgressMax returns a progress.Counter that prints to stdout.
func newProgressMax(show bool, max uint64, description string) *progress.Counter {
if !show {
return nil
}
interval := calculateProgressInterval(show)
return progress.New(interval, max, func(v uint64, max uint64, d time.Duration, final bool) {
var status string
if max == 0 {
status = fmt.Sprintf("[%s] %d %s", formatDuration(d), v, description)
} else {
status = fmt.Sprintf("[%s] %s %d / %d %s",
formatDuration(d), formatPercent(v, max), v, max, description)
}
if w := stdoutTerminalWidth(); w > 0 {
status = shortenStatus(w, status)
}
PrintProgress("%s", status)
if final {
fmt.Print("\n")
}
})
}
|
[
"\"RESTIC_PROGRESS_FPS\""
] |
[] |
[
"RESTIC_PROGRESS_FPS"
] |
[]
|
["RESTIC_PROGRESS_FPS"]
|
go
| 1 | 0 | |
pkg/cmd/create/create_micro.go
|
package create
import (
"fmt"
"github.com/jenkins-x/jx/pkg/cmd/importcmd"
"os"
"path/filepath"
"runtime"
"github.com/jenkins-x/jx/pkg/cmd/helper"
"github.com/spf13/cobra"
"github.com/jenkins-x/jx/pkg/cmd/opts"
"github.com/jenkins-x/jx/pkg/cmd/templates"
"github.com/jenkins-x/jx/pkg/log"
"github.com/jenkins-x/jx/pkg/util"
)
var (
createMicroLong = templates.LongDesc(`
Creates a new micro application and then optionally setups CI/CD pipelines and GitOps promotion.
Micro is an application generator for gRPC services in Go with a set of tools/libraries.
This command is expected to be run within your '$GOHOME' directory. e.g. at '$GOHOME/src/github.com/myOrgOrUser/'
For more documentation about micro see: [https://github.com/microio/micro](https://github.com/microio/micro)
`)
createMicroExample = templates.Examples(`
# Create a micro application and be prompted for the folder name
jx create micro
# Create a micro application under test1
jx create micro -o test1
`)
)
// CreateMicroOptions the options for the create spring command
type CreateMicroOptions struct {
CreateProjectOptions
}
// NewCmdCreateMicro creates a command object for the "create" command
func NewCmdCreateMicro(commonOpts *opts.CommonOptions) *cobra.Command {
options := &CreateMicroOptions{
CreateProjectOptions: CreateProjectOptions{
ImportOptions: importcmd.ImportOptions{
CommonOptions: commonOpts,
},
},
}
cmd := &cobra.Command{
Use: "micro [github.com/myuser/myapp]",
Short: "Create a new micro based application and import the generated code into Git and Jenkins for CI/CD",
Long: createMicroLong,
Example: createMicroExample,
Run: func(cmd *cobra.Command, args []string) {
options.Cmd = cmd
options.Args = args
err := options.Run()
helper.CheckErr(err)
},
}
cmd.Flags().BoolVar(&options.CommonOptions.NoBrew, opts.OptionNoBrew, false, "Disables brew package manager on MacOS when installing binary dependencies")
return cmd
}
// checkMicroInstalled lazily install micro if its not installed already
func (o CreateMicroOptions) checkMicroInstalled() error {
_, err := o.GetCommandOutput("", "micro", "help")
if err != nil {
log.Logger().Info("Installing micro's dependencies...")
// lets install micro
err = o.InstallBrewIfRequired()
if err != nil {
return err
}
if runtime.GOOS == "darwin" && !o.NoBrew {
err = o.RunCommand("brew", "install", "protobuf")
if err != nil {
return err
}
}
log.Logger().Info("Downloading and building micro dependencies...")
packages := []string{"github.com/golang/protobuf/proto", "github.com/golang/protobuf/protoc-gen-go", "github.com/micro/protoc-gen-micro"}
for _, p := range packages {
log.Logger().Infof("Installing %s", p)
err = o.RunCommand("go", "get", "-u", p)
if err != nil {
return fmt.Errorf("Failed to install %s: %s", p, err)
}
}
log.Logger().Info("Installed micro dependencies")
log.Logger().Info("Downloading and building micro - this can take a minute or so...")
err = o.RunCommand("go", "get", "-u", "github.com/micro/micro")
if err == nil {
log.Logger().Info("Installed micro and its dependencies!")
}
}
return err
}
// GenerateMicro creates a fresh micro project by running micro on local shell
func (o CreateMicroOptions) GenerateMicro(dir string) error {
return o.RunCommand("micro", "new", dir)
}
// Run implements the command
func (o *CreateMicroOptions) Run() error {
gopath := os.Getenv("GOPATH")
if gopath == "" {
log.Logger().Warnf(`No $GOPATH found.
You need to have installed go on your machine to be able to create micro services.
For instructions please see: %s
`, util.ColorInfo("https://golang.org/doc/install#install"))
return nil
}
err := o.checkMicroInstalled()
if err != nil {
return err
}
dir := ""
args := o.Args
if len(args) > 0 {
dir = args[0]
}
if dir == "" {
if o.BatchMode {
return util.MissingOption(opts.OptionOutputDir)
}
dir, err = util.PickValue("Pick a fully qualified name for the new project:", "github.com/myuser/myapp", true, "", o.In, o.Out, o.Err)
if err != nil {
return err
}
if dir == "" || dir == "." {
return fmt.Errorf("Invalid project name: %s", dir)
}
}
log.Blank()
// generate micro project
err = o.GenerateMicro(dir)
if err != nil {
return err
}
path := filepath.Join(gopath, "src", dir)
log.Logger().Infof("Created micro project at %s\n", util.ColorInfo(path))
return o.ImportCreatedProject(path)
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
src/org/nutz/lang/Lang.java
|
package org.nutz.lang;
import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.PrintStream;
import java.io.Reader;
import java.io.StringReader;
import java.io.Writer;
import java.lang.management.ManagementFactory;
import java.lang.reflect.Array;
import java.lang.reflect.Field;
import java.lang.reflect.GenericArrayType;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import java.lang.reflect.TypeVariable;
import java.lang.reflect.WildcardType;
import java.nio.charset.Charset;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.Queue;
import java.util.Set;
import java.util.regex.Pattern;
import javax.crypto.Mac;
import javax.crypto.SecretKey;
import javax.crypto.spec.SecretKeySpec;
import javax.servlet.http.HttpServletRequest;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.ParserConfigurationException;
import org.nutz.castor.Castors;
import org.nutz.castor.FailToCastObjectException;
import org.nutz.dao.entity.annotation.Column;
import org.nutz.json.Json;
import org.nutz.lang.Encoding;
import org.nutz.lang.reflect.ReflectTool;
import org.nutz.lang.stream.StringInputStream;
import org.nutz.lang.stream.StringOutputStream;
import org.nutz.lang.stream.StringWriter;
import org.nutz.lang.util.Context;
import org.nutz.lang.util.NutMap;
import org.nutz.lang.util.NutType;
import org.nutz.lang.util.Regex;
import org.nutz.lang.util.SimpleContext;
/**
* 这些帮助函数让 Java 的某些常用功能变得更简单
*
* @author zozoh([email protected])
* @author wendal([email protected])
* @author bonyfish([email protected])
* @author wizzer([email protected])
*/
public abstract class Lang {
public static int HASH_BUFF_SIZE = 16 * 1024;
private static final Pattern IPV4_PATTERN = Pattern.compile("^(25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)(\\.(25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)){3}$");
private static final Pattern IPV6_STD_PATTERN = Pattern.compile("^(?:[0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}$");
private static final Pattern IPV6_HEX_COMPRESSED_PATTERN = Pattern.compile("^((?:[0-9A-Fa-f]{1,4}(?::[0-9A-Fa-f]{1,4})*)?)::((?:[0-9A-Fa-f]{1,4}(?::[0-9A-Fa-f]{1,4})*)?)$");
public static boolean isIPv4Address(final String input) {
return IPV4_PATTERN.matcher(input).matches();
}
public static boolean isIPv6StdAddress(final String input) {
return IPV6_STD_PATTERN.matcher(input).matches();
}
public static boolean isIPv6HexCompressedAddress(final String input) {
return IPV6_HEX_COMPRESSED_PATTERN.matcher(input).matches();
}
public static boolean isIPv6Address(final String input) {
return isIPv6StdAddress(input) || isIPv6HexCompressedAddress(input);
}
public static ComboException comboThrow(Throwable... es) {
ComboException ce = new ComboException();
for (Throwable e : es)
ce.add(e);
return ce;
}
/**
* 生成一个未实现的运行时异常
*
* @return 一个未实现的运行时异常
*/
public static RuntimeException noImplement() {
return new RuntimeException("Not implement yet!");
}
/**
* 生成一个不可能的运行时异常
*
* @return 一个不可能的运行时异常
*/
public static RuntimeException impossible() {
return new RuntimeException("r u kidding me?! It is impossible!");
}
/**
* 根据格式化字符串,生成运行时异常
*
* @param format
* 格式
* @param args
* 参数
* @return 运行时异常
*/
public static RuntimeException makeThrow(String format, Object... args) {
return new RuntimeException(String.format(format, args));
}
/**
* 根据格式化字符串,生成一个指定的异常。
*
* @param classOfT
* 异常类型, 需要有一个字符串为参数的构造函数
* @param format
* 格式
* @param args
* 参数
* @return 异常对象
*/
@SuppressWarnings("unchecked")
public static <T extends Throwable> T makeThrow(Class<T> classOfT,
String format,
Object... args) {
if (classOfT == RuntimeException.class)
return (T) new RuntimeException(String.format(format, args));
return Mirror.me(classOfT).born(String.format(format, args));
}
/**
* 将抛出对象包裹成运行时异常,并增加自己的描述
*
* @param e
* 抛出对象
* @param fmt
* 格式
* @param args
* 参数
* @return 运行时异常
*/
public static RuntimeException wrapThrow(Throwable e, String fmt, Object... args) {
return new RuntimeException(String.format(fmt, args), e);
}
/**
* 用运行时异常包裹抛出对象,如果抛出对象本身就是运行时异常,则直接返回。
* <p>
* 如果是 InvocationTargetException,那么将其剥离,只包裹其 TargetException
*
* @param e
* 抛出对象
* @return 运行时异常
*/
public static RuntimeException wrapThrow(Throwable e) {
if (e instanceof RuntimeException)
return (RuntimeException) e;
if (e instanceof InvocationTargetException)
return wrapThrow(((InvocationTargetException) e).getTargetException());
return new RuntimeException(e);
}
/**
* 用一个指定可抛出类型来包裹一个抛出对象。这个指定的可抛出类型需要有一个构造函数 接受 Throwable 类型的对象
*
* @param e
* 抛出对象
* @param wrapper
* 包裹类型
* @return 包裹后对象
*/
@SuppressWarnings("unchecked")
public static <T extends Throwable> T wrapThrow(Throwable e, Class<T> wrapper) {
if (wrapper.isAssignableFrom(e.getClass()))
return (T) e;
return Mirror.me(wrapper).born(e);
}
public static Throwable unwrapThrow(Throwable e) {
if (e == null)
return null;
if (e instanceof InvocationTargetException) {
InvocationTargetException itE = (InvocationTargetException) e;
if (itE.getTargetException() != null)
return unwrapThrow(itE.getTargetException());
}
if (e instanceof RuntimeException && e.getCause() != null)
return unwrapThrow(e.getCause());
return e;
}
public static boolean isCauseBy(Throwable e, Class<? extends Throwable> causeType) {
if (e.getClass() == causeType)
return true;
Throwable cause = e.getCause();
if (null == cause)
return false;
return isCauseBy(cause, causeType);
}
/**
* 判断两个对象是否相等。 这个函数用处是:
* <ul>
* <li>可以容忍 null
* <li>可以容忍不同类型的 Number
* <li>对数组,集合, Map 会深层比较
* </ul>
* 当然,如果你重写的 equals 方法会优先
*
* @param a0
* 比较对象1
* @param a1
* 比较对象2
* @return 是否相等
*/
public static boolean equals(Object a0, Object a1) {
if (a0 == a1)
return true;
if (a0 == null && a1 == null)
return true;
if (a0 == null || a1 == null)
return false;
// 简单的判断是否等于
if (a0.equals(a1))
return true;
Mirror<?> mi = Mirror.me(a0);
// 简单类型,变字符串比较,或者正则表达式
if (mi.isSimple() || mi.is(Pattern.class)) {
return a0.toString().equals(a1.toString());
}
// 如果类型就不能互相转换,那么一定是错的
if (!a0.getClass().isAssignableFrom(a1.getClass())
&& !a1.getClass().isAssignableFrom(a0.getClass()))
return false;
// Map
if (a0 instanceof Map && a1 instanceof Map) {
Map<?, ?> m1 = (Map<?, ?>) a0;
Map<?, ?> m2 = (Map<?, ?>) a1;
if (m1.size() != m2.size())
return false;
for (Entry<?, ?> e : m1.entrySet()) {
Object key = e.getKey();
if (!m2.containsKey(key) || !equals(m1.get(key), m2.get(key)))
return false;
}
return true;
}
// 数组
else if (a0.getClass().isArray() && a1.getClass().isArray()) {
int len = Array.getLength(a0);
if (len != Array.getLength(a1))
return false;
for (int i = 0; i < len; i++) {
if (!equals(Array.get(a0, i), Array.get(a1, i)))
return false;
}
return true;
}
// 集合
else if (a0 instanceof Collection && a1 instanceof Collection) {
Collection<?> c0 = (Collection<?>) a0;
Collection<?> c1 = (Collection<?>) a1;
if (c0.size() != c1.size())
return false;
Iterator<?> it0 = c0.iterator();
Iterator<?> it1 = c1.iterator();
while (it0.hasNext()) {
Object o0 = it0.next();
Object o1 = it1.next();
if (!equals(o0, o1))
return false;
}
return true;
}
// 一定不相等
return false;
}
/**
* 判断一个数组内是否包括某一个对象。 它的比较将通过 equals(Object,Object) 方法
*
* @param array
* 数组
* @param ele
* 对象
* @return true 包含 false 不包含
*/
public static <T> boolean contains(T[] array, T ele) {
if (null == array)
return false;
for (T e : array) {
if (equals(e, ele))
return true;
}
return false;
}
/**
* 从一个文本输入流读取所有内容,并将该流关闭
*
* @param reader
* 文本输入流
* @return 输入流所有内容
*/
public static String readAll(Reader reader) {
if (!(reader instanceof BufferedReader))
reader = new BufferedReader(reader);
try {
StringBuilder sb = new StringBuilder();
char[] data = new char[64];
int len;
while (true) {
if ((len = reader.read(data)) == -1)
break;
sb.append(data, 0, len);
}
return sb.toString();
}
catch (IOException e) {
throw Lang.wrapThrow(e);
}
finally {
Streams.safeClose(reader);
}
}
/**
* 将一段字符串写入一个文本输出流,并将该流关闭
*
* @param writer
* 文本输出流
* @param str
* 字符串
*/
public static void writeAll(Writer writer, String str) {
try {
writer.write(str);
writer.flush();
}
catch (IOException e) {
throw Lang.wrapThrow(e);
}
finally {
Streams.safeClose(writer);
}
}
/**
* 根据一段文本模拟出一个输入流对象
*
* @param cs
* 文本
* @return 输出流对象
*/
public static InputStream ins(CharSequence cs) {
return new StringInputStream(cs);
}
/**
* 根据一段文本模拟出一个文本输入流对象
*
* @param cs
* 文本
* @return 文本输出流对象
*/
public static Reader inr(CharSequence cs) {
return new StringReader(cs.toString());
}
/**
* 根据一个 StringBuilder 模拟一个文本输出流对象
*
* @param sb
* StringBuilder 对象
* @return 文本输出流对象
*/
public static Writer opw(StringBuilder sb) {
return new StringWriter(sb);
}
/**
* 根据一个 StringBuilder 模拟一个输出流对象
*
* @param sb
* StringBuilder 对象
* @return 输出流对象
*/
public static StringOutputStream ops(StringBuilder sb) {
return new StringOutputStream(sb);
}
/**
* 较方便的创建一个数组,比如:
*
* <pre>
* String[] strs = Lang.array("A", "B", "A"); => ["A","B","A"]
* </pre>
*
* @param eles
* 可变参数
* @return 数组对象
*/
public static <T> T[] array(T... eles) {
return eles;
}
/**
* 较方便的创建一个没有重复的数组,比如:
*
* <pre>
* String[] strs = Lang.arrayUniq("A","B","A"); => ["A","B"]
* String[] strs = Lang.arrayUniq(); => null
* </pre>
*
* 返回的顺序会遵循输入的顺序
*
* @param eles
* 可变参数
* @return 数组对象
*/
@SuppressWarnings("unchecked")
public static <T> T[] arrayUniq(T... eles) {
if (null == eles || eles.length == 0)
return null;
// 记录重复
HashSet<T> set = new HashSet<T>(eles.length);
for (T ele : eles) {
set.add(ele);
}
// 循环
T[] arr = (T[]) Array.newInstance(eles[0].getClass(), set.size());
int index = 0;
for (T ele : eles) {
if (set.remove(ele))
Array.set(arr, index++, ele);
}
return arr;
}
/**
* 判断一个对象是否为空。它支持如下对象类型:
* <ul>
* <li>null : 一定为空
* <li>数组
* <li>集合
* <li>Map
* <li>其他对象 : 一定不为空
* </ul>
*
* @param obj
* 任意对象
* @return 是否为空
*/
public static boolean isEmpty(Object obj) {
if (obj == null)
return true;
if (obj.getClass().isArray())
return Array.getLength(obj) == 0;
if (obj instanceof Collection<?>)
return ((Collection<?>) obj).isEmpty();
if (obj instanceof Map<?, ?>)
return ((Map<?, ?>) obj).isEmpty();
return false;
}
/**
* 判断一个数组是否是空数组
*
* @param ary
* 数组
* @return null 或者空数组都为 true 否则为 false
*/
public static <T> boolean isEmptyArray(T[] ary) {
return null == ary || ary.length == 0;
}
/**
* 较方便的创建一个列表,比如:
*
* <pre>
* List<Pet> pets = Lang.list(pet1, pet2, pet3);
* </pre>
*
* 注,这里的 List,是 ArrayList 的实例
*
* @param eles
* 可变参数
* @return 列表对象
*/
public static <T> ArrayList<T> list(T... eles) {
ArrayList<T> list = new ArrayList<T>(eles.length);
for (T ele : eles)
list.add(ele);
return list;
}
/**
* 创建一个 Hash 集合
*
* @param eles
* 可变参数
* @return 集合对象
*/
public static <T> Set<T> set(T... eles) {
Set<T> set = new HashSet<T>();
for (T ele : eles)
set.add(ele);
return set;
}
/**
* 将多个数组,合并成一个数组。如果这些数组为空,则返回 null
*
* @param arys
* 数组对象
* @return 合并后的数组对象
*/
@SuppressWarnings("unchecked")
public static <T> T[] merge(T[]... arys) {
Queue<T> list = new LinkedList<T>();
for (T[] ary : arys)
if (null != ary)
for (T e : ary)
if (null != e)
list.add(e);
if (list.isEmpty())
return null;
Class<T> type = (Class<T>) list.peek().getClass();
return list.toArray((T[]) Array.newInstance(type, list.size()));
}
/**
* 将一个对象添加成为一个数组的第一个元素,从而生成一个新的数组
*
* @param e
* 对象
* @param eles
* 数组
* @return 新数组
*/
@SuppressWarnings("unchecked")
public static <T> T[] arrayFirst(T e, T[] eles) {
try {
if (null == eles || eles.length == 0) {
T[] arr = (T[]) Array.newInstance(e.getClass(), 1);
arr[0] = e;
return arr;
}
T[] arr = (T[]) Array.newInstance(eles.getClass().getComponentType(), eles.length + 1);
arr[0] = e;
for (int i = 0; i < eles.length; i++) {
arr[i + 1] = eles[i];
}
return arr;
}
catch (NegativeArraySizeException e1) {
throw Lang.wrapThrow(e1);
}
}
/**
* 将一个对象添加成为一个数组的最后一个元素,从而生成一个新的数组
*
* @param e
* 对象
* @param eles
* 数组
* @return 新数组
*/
@SuppressWarnings("unchecked")
public static <T> T[] arrayLast(T[] eles, T e) {
try {
if (null == eles || eles.length == 0) {
T[] arr = (T[]) Array.newInstance(e.getClass(), 1);
arr[0] = e;
return arr;
}
T[] arr = (T[]) Array.newInstance(eles.getClass().getComponentType(), eles.length + 1);
for (int i = 0; i < eles.length; i++) {
arr[i] = eles[i];
}
arr[eles.length] = e;
return arr;
}
catch (NegativeArraySizeException e1) {
throw Lang.wrapThrow(e1);
}
}
/**
* 将一个数组转换成字符串
* <p>
* 所有的元素都被格式化字符串包裹。 这个格式话字符串只能有一个占位符, %s, %d 等,均可,请视你的数组内容而定
*
* @param fmt
* 格式
* @param objs
* 数组
* @return 拼合后的字符串
*/
public static <T> StringBuilder concatBy(String fmt, T[] objs) {
StringBuilder sb = new StringBuilder();
for (T obj : objs)
sb.append(String.format(fmt, obj));
return sb;
}
/**
* 将一个数组转换成字符串
* <p>
* 所有的元素都被格式化字符串包裹。 这个格式话字符串只能有一个占位符, %s, %d 等,均可,请视你的数组内容而定
* <p>
* 每个元素之间,都会用一个给定的字符分隔
*
* @param ptn
* 格式
* @param c
* 分隔符
* @param objs
* 数组
* @return 拼合后的字符串
*/
public static <T> StringBuilder concatBy(String ptn, Object c, T[] objs) {
StringBuilder sb = new StringBuilder();
for (T obj : objs)
sb.append(String.format(ptn, obj)).append(c);
if (sb.length() > 0)
sb.deleteCharAt(sb.length() - 1);
return sb;
}
/**
* 将一个数组转换成字符串
* <p>
* 每个元素之间,都会用一个给定的字符分隔
*
* @param c
* 分隔符
* @param objs
* 数组
* @return 拼合后的字符串
*/
public static <T> StringBuilder concat(Object c, T[] objs) {
StringBuilder sb = new StringBuilder();
if (null == objs || 0 == objs.length)
return sb;
sb.append(objs[0]);
for (int i = 1; i < objs.length; i++)
sb.append(c).append(objs[i]);
return sb;
}
/**
* 清除数组中的特定值
*
* @param objs
* 数组
* @param val
* 值,可以是 null,如果是对象,则会用 equals 来比较
* @return 新的数组实例
*/
@SuppressWarnings("unchecked")
public static <T> T[] without(T[] objs, T val) {
if (null == objs || objs.length == 0) {
return objs;
}
List<T> list = new ArrayList<T>(objs.length);
Class<?> eleType = null;
for (T obj : objs) {
if (obj == val || (null != obj && null != val && obj.equals(val)))
continue;
if (null == eleType && obj != null)
eleType = obj.getClass();
list.add(obj);
}
if (list.isEmpty()) {
return (T[]) new Object[0];
}
return list.toArray((T[]) Array.newInstance(eleType, list.size()));
}
/**
* 将一个长整型数组转换成字符串
* <p>
* 每个元素之间,都会用一个给定的字符分隔
*
* @param c
* 分隔符
* @param vals
* 数组
* @return 拼合后的字符串
*/
public static StringBuilder concat(Object c, long[] vals) {
StringBuilder sb = new StringBuilder();
if (null == vals || 0 == vals.length)
return sb;
sb.append(vals[0]);
for (int i = 1; i < vals.length; i++)
sb.append(c).append(vals[i]);
return sb;
}
/**
* 将一个整型数组转换成字符串
* <p>
* 每个元素之间,都会用一个给定的字符分隔
*
* @param c
* 分隔符
* @param vals
* 数组
* @return 拼合后的字符串
*/
public static StringBuilder concat(Object c, int[] vals) {
StringBuilder sb = new StringBuilder();
if (null == vals || 0 == vals.length)
return sb;
sb.append(vals[0]);
for (int i = 1; i < vals.length; i++)
sb.append(c).append(vals[i]);
return sb;
}
/**
* 将一个数组的部分元素转换成字符串
* <p>
* 每个元素之间,都会用一个给定的字符分隔
*
* @param offset
* 开始元素的下标
* @param len
* 元素数量
* @param c
* 分隔符
* @param objs
* 数组
* @return 拼合后的字符串
*/
public static <T> StringBuilder concat(int offset, int len, Object c, T[] objs) {
StringBuilder sb = new StringBuilder();
if (null == objs || len < 0 || 0 == objs.length)
return sb;
if (offset < objs.length) {
sb.append(objs[offset]);
for (int i = 1; i < len && i + offset < objs.length; i++) {
sb.append(c).append(objs[i + offset]);
}
}
return sb;
}
/**
* 将一个数组所有元素拼合成一个字符串
*
* @param objs
* 数组
* @return 拼合后的字符串
*/
public static <T> StringBuilder concat(T[] objs) {
StringBuilder sb = new StringBuilder();
for (T e : objs)
sb.append(e.toString());
return sb;
}
/**
* 将一个数组部分元素拼合成一个字符串
*
* @param offset
* 开始元素的下标
* @param len
* 元素数量
* @param array
* 数组
* @return 拼合后的字符串
*/
public static <T> StringBuilder concat(int offset, int len, T[] array) {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < len; i++) {
sb.append(array[i + offset].toString());
}
return sb;
}
/**
* 将一个集合转换成字符串
* <p>
* 每个元素之间,都会用一个给定的字符分隔
*
* @param c
* 分隔符
* @param coll
* 集合
* @return 拼合后的字符串
*/
public static <T> StringBuilder concat(Object c, Collection<T> coll) {
StringBuilder sb = new StringBuilder();
if (null == coll || coll.isEmpty())
return sb;
return concat(c, coll.iterator());
}
/**
* 将一个迭代器转换成字符串
* <p>
* 每个元素之间,都会用一个给定的字符分隔
*
* @param c
* 分隔符
* @param it
* 集合
* @return 拼合后的字符串
*/
public static <T> StringBuilder concat(Object c, Iterator<T> it) {
StringBuilder sb = new StringBuilder();
if (it == null || !it.hasNext())
return sb;
sb.append(it.next());
while (it.hasNext())
sb.append(c).append(it.next());
return sb;
}
/**
* 将一个或者多个数组填入一个集合。
*
* @param <C>
* 集合类型
* @param <T>
* 数组元素类型
* @param coll
* 集合
* @param objss
* 数组 (数目可变)
* @return 集合对象
*/
public static <C extends Collection<T>, T> C fill(C coll, T[]... objss) {
for (T[] objs : objss)
for (T obj : objs)
coll.add(obj);
return coll;
}
/**
* 将一个集合变成 Map。
*
* @param mapClass
* Map 的类型
* @param coll
* 集合对象
* @param keyFieldName
* 采用集合中元素的哪个一个字段为键。
* @return Map 对象
*/
public static <T extends Map<Object, Object>> T collection2map(Class<T> mapClass,
Collection<?> coll,
String keyFieldName) {
if (null == coll)
return null;
T map = createMap(mapClass);
if (coll.size() > 0) {
Iterator<?> it = coll.iterator();
Object obj = it.next();
Mirror<?> mirror = Mirror.me(obj.getClass());
Object key = mirror.getValue(obj, keyFieldName);
map.put(key, obj);
for (; it.hasNext();) {
obj = it.next();
key = mirror.getValue(obj, keyFieldName);
map.put(key, obj);
}
}
return (T) map;
}
/**
* 将集合变成 ArrayList
*
* @param col
* 集合对象
* @return 列表对象
*/
@SuppressWarnings("unchecked")
public static <E> List<E> collection2list(Collection<E> col) {
if (null == col)
return null;
if (col.size() == 0)
return new ArrayList<E>(0);
Class<E> eleType = (Class<E>) col.iterator().next().getClass();
return collection2list(col, eleType);
}
/**
* 将集合编程变成指定类型的列表
*
* @param col
* 集合对象
* @param eleType
* 列表类型
* @return 列表对象
*/
public static <E> List<E> collection2list(Collection<?> col, Class<E> eleType) {
if (null == col)
return null;
List<E> list = new ArrayList<E>(col.size());
for (Object obj : col)
list.add(Castors.me().castTo(obj, eleType));
return list;
}
/**
* 将集合变成数组,数组的类型为集合的第一个元素的类型。如果集合为空,则返回 null
*
* @param coll
* 集合对象
* @return 数组
*/
@SuppressWarnings("unchecked")
public static <E> E[] collection2array(Collection<E> coll) {
if (null == coll)
return null;
if (coll.size() == 0)
return (E[]) new Object[0];
Class<E> eleType = (Class<E>) Lang.first(coll).getClass();
return collection2array(coll, eleType);
}
/**
* 将集合变成指定类型的数组
*
* @param col
* 集合对象
* @param eleType
* 数组元素类型
* @return 数组
*/
@SuppressWarnings("unchecked")
public static <E> E[] collection2array(Collection<?> col, Class<E> eleType) {
if (null == col)
return null;
Object re = Array.newInstance(eleType, col.size());
int i = 0;
for (Iterator<?> it = col.iterator(); it.hasNext();) {
Object obj = it.next();
if (null == obj)
Array.set(re, i++, null);
else
Array.set(re, i++, Castors.me().castTo(obj, eleType));
}
return (E[]) re;
}
/**
* 将一个数组变成 Map
*
* @param mapClass
* Map 的类型
* @param array
* 数组
* @param keyFieldName
* 采用集合中元素的哪个一个字段为键。
* @return Map 对象
*/
public static <T extends Map<Object, Object>> T array2map(Class<T> mapClass,
Object array,
String keyFieldName) {
if (null == array)
return null;
T map = createMap(mapClass);
int len = Array.getLength(array);
if (len > 0) {
Object obj = Array.get(array, 0);
Mirror<?> mirror = Mirror.me(obj.getClass());
for (int i = 0; i < len; i++) {
obj = Array.get(array, i);
Object key = mirror.getValue(obj, keyFieldName);
map.put(key, obj);
}
}
return map;
}
@SuppressWarnings("unchecked")
private static <T extends Map<Object, Object>> T createMap(Class<T> mapClass) {
T map;
try {
map = mapClass.newInstance();
}
catch (Exception e) {
map = (T) new HashMap<Object, Object>();
}
if (!mapClass.isAssignableFrom(map.getClass())) {
throw Lang.makeThrow("Fail to create map [%s]", mapClass.getName());
}
return map;
}
/**
* 将数组转换成一个列表。
*
* @param array
* 原始数组
* @return 新列表
*
* @see org.nutz.castor.Castors
*/
public static <T> List<T> array2list(T[] array) {
if (null == array)
return null;
List<T> re = new ArrayList<T>(array.length);
for (T obj : array)
re.add(obj);
return re;
}
/**
* 将数组转换成一个列表。将会采用 Castor 来深层转换数组元素
*
* @param array
* 原始数组
* @param eleType
* 新列表的元素类型
* @return 新列表
*
* @see org.nutz.castor.Castors
*/
public static <T, E> List<E> array2list(Object array, Class<E> eleType) {
if (null == array)
return null;
int len = Array.getLength(array);
List<E> re = new ArrayList<E>(len);
for (int i = 0; i < len; i++) {
Object obj = Array.get(array, i);
re.add(Castors.me().castTo(obj, eleType));
}
return re;
}
/**
* 将数组转换成另外一种类型的数组。将会采用 Castor 来深层转换数组元素
*
* @param array
* 原始数组
* @param eleType
* 新数组的元素类型
* @return 新数组
* @throws FailToCastObjectException
*
* @see org.nutz.castor.Castors
*/
public static Object array2array(Object array, Class<?> eleType)
throws FailToCastObjectException {
if (null == array)
return null;
int len = Array.getLength(array);
Object re = Array.newInstance(eleType, len);
for (int i = 0; i < len; i++) {
Array.set(re, i, Castors.me().castTo(Array.get(array, i), eleType));
}
return re;
}
/**
* 将数组转换成Object[] 数组。将会采用 Castor 来深层转换数组元素
*
* @param args
* 原始数组
* @param pts
* 新数组的元素类型
* @return 新数组
* @throws FailToCastObjectException
*
* @see org.nutz.castor.Castors
*/
public static <T> Object[] array2ObjectArray(T[] args, Class<?>[] pts)
throws FailToCastObjectException {
if (null == args)
return null;
Object[] newArgs = new Object[args.length];
for (int i = 0; i < args.length; i++) {
newArgs[i] = Castors.me().castTo(args[i], pts[i]);
}
return newArgs;
}
/**
* 根据一个 Map,和给定的对象类型,创建一个新的 JAVA 对象
*
* @param src
* Map 对象
* @param toType
* JAVA 对象类型
* @return JAVA 对象
* @throws FailToCastObjectException
*/
@SuppressWarnings({"unchecked", "rawtypes"})
public static <T> T map2Object(Map<?, ?> src, Class<T> toType)
throws FailToCastObjectException {
if (null == toType)
throw new FailToCastObjectException("target type is Null");
// 类型相同
if (toType == Map.class)
return (T) src;
// 也是一种 Map
if (Map.class.isAssignableFrom(toType)) {
Map map;
try {
map = (Map) toType.newInstance();
map.putAll(src);
return (T) map;
}
catch (Exception e) {
throw new FailToCastObjectException("target type fail to born!", unwrapThrow(e));
}
}
// 数组
if (toType.isArray())
return (T) Lang.collection2array(src.values(), toType.getComponentType());
// List
if (List.class == toType) {
return (T) Lang.collection2list(src.values());
}
// POJO
Mirror<T> mirror = Mirror.me(toType);
T obj = mirror.born();
for (Field field : mirror.getFields()) {
Object v = null;
if (!Lang.isAndroid && field.isAnnotationPresent(Column.class)) {
String cv = field.getAnnotation(Column.class).value();
v = src.get(cv);
}
if (null == v && src.containsKey(field.getName())) {
v = src.get(field.getName());
}
if (null != v) {
//Class<?> ft = field.getType();
//获取泛型基类中的字段真实类型, https://github.com/nutzam/nutz/issues/1288
Class<?> ft = ReflectTool.getGenericFieldType(toType, field);
Object vv = null;
// 集合
if (v instanceof Collection) {
Collection c = (Collection) v;
// 集合到数组
if (ft.isArray()) {
vv = Lang.collection2array(c, ft.getComponentType());
}
// 集合到集合
else {
// 创建
Collection newCol;
//Class eleType = Mirror.getGenericTypes(field, 0);
Class<?> eleType = ReflectTool.getParameterRealGenericClass(toType,
field.getGenericType(),0);
if (ft == List.class) {
newCol = new ArrayList(c.size());
} else if (ft == Set.class) {
newCol = new LinkedHashSet();
} else {
try {
newCol = (Collection) ft.newInstance();
}
catch (Exception e) {
throw Lang.wrapThrow(e);
}
}
// 赋值
for (Object ele : c) {
newCol.add(Castors.me().castTo(ele, eleType));
}
vv = newCol;
}
}
// Map
else if (v instanceof Map && Map.class.isAssignableFrom(ft)) {
// 创建
final Map map;
// Map 接口
if (ft == Map.class) {
map = new HashMap();
}
// 自己特殊的 Map
else {
try {
map = (Map) ft.newInstance();
}
catch (Exception e) {
throw new FailToCastObjectException("target type fail to born!", e);
}
}
// 赋值
//final Class<?> valType = Mirror.getGenericTypes(field, 1);
//map的key和value字段类型
final Class<?> keyType = ReflectTool.getParameterRealGenericClass(toType,
field.getGenericType(),0);
final Class<?> valType =ReflectTool.getParameterRealGenericClass(toType,
field.getGenericType(),1);
each(v, new Each<Entry>() {
public void invoke(int i, Entry en, int length) {
map.put(Castors.me().castTo(en.getKey(), keyType),
Castors.me().castTo(en.getValue(), valType));
}
});
vv = map;
}
// 强制转换
else {
vv = Castors.me().castTo(v, ft);
}
mirror.setValue(obj, field, vv);
}
}
return obj;
}
/**
* 根据一段字符串,生成一个 Map 对象。
*
* @param str
* 参照 JSON 标准的字符串,但是可以没有前后的大括号
* @return Map 对象
*/
public static NutMap map(String str) {
if (null == str)
return null;
str = Strings.trim(str);
if (!Strings.isEmpty(str)
&& (Strings.isQuoteBy(str, '{', '}') || Strings.isQuoteBy(str, '(', ')'))) {
return Json.fromJson(NutMap.class, str);
}
return Json.fromJson(NutMap.class, "{" + str + "}");
}
/**
* 将一个 Map 所有的键都按照回调进行修改
*
* 本函数遇到数组或者集合,会自动处理每个元素
*
* @param obj
* 要转换的 Map 或者 集合或者数组
*
* @param mkc
* 键值修改的回调
* @param recur
* 遇到 Map 是否递归
*
* @see MapKeyConvertor
*/
@SuppressWarnings("unchecked")
public static void convertMapKey(Object obj, MapKeyConvertor mkc, boolean recur) {
// Map
if (obj instanceof Map<?, ?>) {
Map<String, Object> map = (Map<String, Object>) obj;
NutMap map2 = new NutMap();
for (Map.Entry<String, Object> en : map.entrySet()) {
String key = en.getKey();
Object val = en.getValue();
if (recur)
convertMapKey(val, mkc, recur);
String newKey = mkc.convertKey(key);
map2.put(newKey, val);
}
map.clear();
map.putAll(map2);
}
// Collection
else if (obj instanceof Collection<?>) {
for (Object ele : (Collection<?>) obj) {
convertMapKey(ele, mkc, recur);
}
}
// Array
else if (obj.getClass().isArray()) {
for (Object ele : (Object[]) obj) {
convertMapKey(ele, mkc, recur);
}
}
}
/**
* 创建一个一个键的 Map 对象
*
* @param key
* 键
* @param v
* 值
* @return Map 对象
*/
public static NutMap map(String key, Object v) {
return new NutMap().addv(key, v);
}
/**
* 根据一个格式化字符串,生成 Map 对象
*
* @param fmt
* 格式化字符串
* @param args
* 字符串参数
* @return Map 对象
*/
public static NutMap mapf(String fmt, Object... args) {
return map(String.format(fmt, args));
}
/**
* 创建一个新的上下文对象
*
* @return 一个新创建的上下文对象
*/
public static Context context() {
return new SimpleContext();
}
/**
* 根据key,val创建一个新的上下文对象
* @param key
* @param val
* @return
*/
public static Context context(String key, Object val) {
return context().set(key, val);
}
/**
* 根据一个 Map 包裹成一个上下文对象
*
* @param map
* Map 对象
*
* @return 一个新创建的上下文对象
*/
public static Context context(Map<String, Object> map) {
return new SimpleContext(map);
}
/**
* 根据一段 JSON 字符串,生产一个新的上下文对象
*
* @param fmt
* JSON 字符串模板
* @param args
* 模板参数
*
* @return 一个新创建的上下文对象
*/
public static Context contextf(String fmt, Object... args) {
return context(Lang.mapf(fmt, args));
}
/**
* 根据一段 JSON 字符串,生产一个新的上下文对象
*
* @return 一个新创建的上下文对象
*/
public static Context context(String str) {
return context(map(str));
}
/**
* 根据一段字符串,生成一个List 对象。
*
* @param str
* 参照 JSON 标准的字符串,但是可以没有前后的中括号
* @return List 对象
*/
@SuppressWarnings("unchecked")
public static List<Object> list4(String str) {
if (null == str)
return null;
if ((str.length() > 0 && str.charAt(0) == '[') && str.endsWith("]"))
return (List<Object>) Json.fromJson(str);
return (List<Object>) Json.fromJson("[" + str + "]");
}
/**
* 获得一个对象的长度。它可以接受:
* <ul>
* <li>null : 0
* <li>数组
* <li>集合
* <li>Map
* <li>一般 Java 对象。 返回 1
* </ul>
* 如果你想让你的 Java 对象返回不是 1 , 请在对象中声明 length() 函数
*
* @param obj
* @return 对象长度
* @deprecated 这玩意很脑残,为啥最后要动态调个 "length",导致字符串类很麻烦,以后用 Lang.eleSize 函数代替吧
*/
@Deprecated
public static int length(Object obj) {
if (null == obj)
return 0;
if (obj.getClass().isArray()) {
return Array.getLength(obj);
} else if (obj instanceof Collection<?>) {
return ((Collection<?>) obj).size();
} else if (obj instanceof Map<?, ?>) {
return ((Map<?, ?>) obj).size();
}
try {
return (Integer) Mirror.me(obj.getClass()).invoke(obj, "length");
}
catch (Exception e) {}
return 1;
}
/**
* 获得一个容器(Map/集合/数组)对象包含的元素数量
* <ul>
* <li>null : 0
* <li>数组
* <li>集合
* <li>Map
* <li>一般 Java 对象。 返回 1
* </ul>
*
* @param obj
* @return 对象长度
* @since Nutz 1.r.62
*/
public static int eleSize(Object obj) {
// 空指针,就是 0
if (null == obj)
return 0;
// 数组
if (obj.getClass().isArray()) {
return Array.getLength(obj);
}
// 容器
if (obj instanceof Collection<?>) {
return ((Collection<?>) obj).size();
}
// Map
if (obj instanceof Map<?, ?>) {
return ((Map<?, ?>) obj).size();
}
// 其他的就是 1 咯
return 1;
}
/**
* 如果是数组或集合取得第一个对象。 否则返回自身
*
* @param obj
* 任意对象
* @return 第一个代表对象
*/
public static Object first(Object obj) {
if (null == obj)
return obj;
if (obj instanceof Collection<?>) {
Iterator<?> it = ((Collection<?>) obj).iterator();
return it.hasNext() ? it.next() : null;
}
if (obj.getClass().isArray())
return Array.getLength(obj) > 0 ? Array.get(obj, 0) : null;
return obj;
}
/**
* 获取集合中的第一个元素,如果集合为空,返回 null
*
* @param coll
* 集合
* @return 第一个元素
*/
public static <T> T first(Collection<T> coll) {
if (null == coll || coll.isEmpty())
return null;
return coll.iterator().next();
}
/**
* 获得表中的第一个名值对
*
* @param map
* 表
* @return 第一个名值对
*/
public static <K, V> Entry<K, V> first(Map<K, V> map) {
if (null == map || map.isEmpty())
return null;
return map.entrySet().iterator().next();
}
/**
* 打断 each 循环
*/
public static void Break() throws ExitLoop {
throw new ExitLoop();
}
/**
* 继续 each 循环,如果再递归,则停止递归
*/
public static void Continue() throws ContinueLoop {
throw new ContinueLoop();
}
/**
* 用回调的方式,遍历一个对象,可以支持遍历
* <ul>
* <li>数组
* <li>集合
* <li>Map
* <li>单一元素
* </ul>
*
* @param obj
* 对象
* @param callback
* 回调
*/
public static <T> void each(Object obj, Each<T> callback) {
each(obj, true, callback);
}
/**
* 用回调的方式,遍历一个对象,可以支持遍历
* <ul>
* <li>数组
* <li>集合
* <li>Map
* <li>单一元素
* </ul>
*
* @param obj
* 对象
* @param loopMap
* 是否循环 Map,如果循环 Map 则主要看 callback 的 T,如果是 Map.Entry 则循环 Entry
* 否循环 value。如果本值为 false, 则将 Map 当作一个完整的对象来看待
* @param callback
* 回调
*/
@SuppressWarnings({"rawtypes", "unchecked"})
public static <T> void each(Object obj, boolean loopMap, Each<T> callback) {
if (null == obj || null == callback)
return;
try {
// 循环开始
if (callback instanceof Loop)
if (!((Loop) callback).begin())
return;
// 进行循环
if (obj.getClass().isArray()) {
int len = Array.getLength(obj);
for (int i = 0; i < len; i++)
try {
callback.invoke(i, (T) Array.get(obj, i), len);
}
catch (ContinueLoop e) {}
catch (ExitLoop e) {
break;
}
} else if (obj instanceof Collection) {
int len = ((Collection) obj).size();
int i = 0;
for (Iterator<T> it = ((Collection) obj).iterator(); it.hasNext();)
try {
callback.invoke(i++, it.next(), len);
}
catch (ContinueLoop e) {}
catch (ExitLoop e) {
break;
}
} else if (loopMap && obj instanceof Map) {
Map map = (Map) obj;
int len = map.size();
int i = 0;
Class<T> eType = Mirror.getTypeParam(callback.getClass(), 0);
if (null != eType && eType != Object.class && eType.isAssignableFrom(Entry.class)) {
for (Object v : map.entrySet())
try {
callback.invoke(i++, (T) v, len);
}
catch (ContinueLoop e) {}
catch (ExitLoop e) {
break;
}
} else {
for (Object v : map.entrySet())
try {
callback.invoke(i++, (T) ((Entry) v).getValue(), len);
}
catch (ContinueLoop e) {}
catch (ExitLoop e) {
break;
}
}
} else if (obj instanceof Iterator<?>) {
Iterator<?> it = (Iterator<?>) obj;
int i = 0;
while (it.hasNext()) {
try {
callback.invoke(i++, (T) it.next(), -1);
}
catch (ContinueLoop e) {}
catch (ExitLoop e) {
break;
}
}
} else
try {
callback.invoke(0, (T) obj, 1);
}
catch (ContinueLoop e) {}
catch (ExitLoop e) {}
// 循环结束
if (callback instanceof Loop)
((Loop) callback).end();
}
catch (LoopException e) {
throw Lang.wrapThrow(e.getCause());
}
}
/**
* 安全的从一个数组获取一个元素,容忍 null 数组,以及支持负数的 index
* <p>
* 如果该下标越界,则返回 null
*
* @param <T>
* @param array
* 数组,如果为 null 则直接返回 null
* @param index
* 下标,-1 表示倒数第一个, -2 表示倒数第二个,以此类推
* @return 数组元素
*/
public static <T> T get(T[] array, int index) {
if (null == array)
return null;
int i = index < 0 ? array.length + index : index;
if (i < 0 || i >= array.length)
return null;
return array[i];
}
/**
* 将一个抛出对象的异常堆栈,显示成一个字符串
*
* @param e
* 抛出对象
* @return 异常堆栈文本
*/
public static String getStackTrace(Throwable e) {
StringBuilder sb = new StringBuilder();
StringOutputStream sbo = new StringOutputStream(sb);
PrintStream ps = new PrintStream(sbo);
e.printStackTrace(ps);
ps.flush();
return sbo.getStringBuilder().toString();
}
/**
* 将字符串解析成 boolean 值,支持更多的字符串
* <ul>
* <li>1 | 0
* <li>yes | no
* <li>on | off
* <li>true | false
* </ul>
*
* @param s
* 字符串
* @return 布尔值
*/
public static boolean parseBoolean(String s) {
if (null == s || s.length() == 0)
return false;
if (s.length() > 5)
return true;
if ("0".equals(s))
return false;
s = s.toLowerCase();
return !"false".equals(s) && !"off".equals(s) && !"no".equals(s);
}
/**
* 帮你快速获得一个 DocumentBuilder,方便 XML 解析。
*
* @return 一个 DocumentBuilder 对象
* @throws ParserConfigurationException
*/
public static DocumentBuilder xmls() throws ParserConfigurationException {
return Xmls.xmls();
}
/**
* 对Thread.sleep(long)的简单封装,不抛出任何异常
*
* @param millisecond
* 休眠时间
*/
public static void quiteSleep(long millisecond) {
try {
if (millisecond > 0)
Thread.sleep(millisecond);
}
catch (Throwable e) {}
}
/**
* 将字符串,变成数字对象,现支持的格式为:
* <ul>
* <li>null - 整数 0</li>
* <li>23.78 - 浮点 Float</li>
* <li>0x45 - 16进制整数 Integer</li>
* <li>78L - 长整数 Long</li>
* <li>69 - 普通整数 Integer</li>
* </ul>
*
* @param s
* 参数
* @return 数字对象
*/
public static Number str2number(String s) {
// null 值
if (null == s) {
return 0;
}
s = s.toUpperCase();
// 浮点
if (s.indexOf('.') != -1) {
char c = s.charAt(s.length() - 1);
if (c == 'F' || c == 'f') {
return Float.valueOf(s);
}
return Double.valueOf(s);
}
// 16进制整数
if (s.startsWith("0X")) {
return Integer.valueOf(s.substring(2), 16);
}
// 长整数
if (s.charAt(s.length() - 1) == 'L' || s.charAt(s.length() - 1) == 'l') {
return Long.valueOf(s.substring(0, s.length() - 1));
}
// 普通整数
Long re = Long.parseLong(s);
if (Integer.MAX_VALUE >= re && re >= Integer.MIN_VALUE)
return re.intValue();
return re;
}
@SuppressWarnings("unchecked")
private static <T extends Map<String, Object>> void obj2map(Object obj,
T map,
final Map<Object, Object> memo) {
// 已经转换过了,不要递归转换
if (null == obj || memo.containsKey(obj))
return;
memo.put(obj, "");
// Fix issue #497
// 如果是 Map,就直接 putAll 一下咯
if (obj instanceof Map<?, ?>) {
map.putAll(__change_map_to_nutmap((Map<String, Object>) obj, memo));
return;
}
// 下面是普通的 POJO
Mirror<?> mirror = Mirror.me(obj.getClass());
Field[] flds = mirror.getFields();
for (Field fld : flds) {
Object v = mirror.getValue(obj, fld);
if (null == v) {
continue;
}
Mirror<?> mr = Mirror.me(v);
// 普通值
if (mr.isSimple()) {
map.put(fld.getName(), v);
}
// 已经输出过了
else if (memo.containsKey(v)) {
map.put(fld.getName(), null);
}
// 数组或者集合
else if (mr.isColl()) {
final List<Object> list = new ArrayList<Object>(Lang.length(v));
Lang.each(v, new Each<Object>() {
public void invoke(int index, Object ele, int length) {
__join_ele_to_list_as_map(list, ele, memo);
}
});
map.put(fld.getName(), list);
}
// Map
else if (mr.isMap()) {
NutMap map2 = __change_map_to_nutmap((Map<String, Object>) v, memo);
map.put(fld.getName(), map2);
}
// 看来要递归
else {
T sub;
try {
sub = (T) map.getClass().newInstance();
}
catch (Exception e) {
throw Lang.wrapThrow(e);
}
obj2map(v, sub, memo);
map.put(fld.getName(), sub);
}
}
}
@SuppressWarnings("unchecked")
private static NutMap __change_map_to_nutmap(Map<String, Object> map,
final Map<Object, Object> memo) {
NutMap re = new NutMap();
for (Map.Entry<String, Object> en : map.entrySet()) {
Object v = en.getValue();
if (null == v)
continue;
Mirror<?> mr = Mirror.me(v);
// 普通值
if (mr.isSimple()) {
re.put(en.getKey(), v);
}
// 已经输出过了
else if (memo.containsKey(v)) {
continue;
}
// 数组或者集合
else if (mr.isColl()) {
final List<Object> list2 = new ArrayList<Object>(Lang.length(v));
Lang.each(v, new Each<Object>() {
public void invoke(int index, Object ele, int length) {
__join_ele_to_list_as_map(list2, ele, memo);
}
});
re.put(en.getKey(), list2);
}
// Map
else if (mr.isMap()) {
NutMap map2 = __change_map_to_nutmap((Map<String, Object>) v, memo);
re.put(en.getKey(), map2);
}
// 看来要递归
else {
NutMap map2 = obj2nutmap(v);
re.put(en.getKey(), map2);
}
}
return re;
}
@SuppressWarnings("unchecked")
private static void __join_ele_to_list_as_map(List<Object> list,
Object o,
final Map<Object, Object> memo) {
if (null == o) {
return;
}
// 如果是 Map,就直接 putAll 一下咯
if (o instanceof Map<?, ?>) {
NutMap map2 = __change_map_to_nutmap((Map<String, Object>) o, memo);
list.add(map2);
return;
}
Mirror<?> mr = Mirror.me(o);
// 普通值
if (mr.isSimple()) {
list.add(o);
}
// 已经输出过了
else if (memo.containsKey(o)) {
list.add(null);
}
// 数组或者集合
else if (mr.isColl()) {
final List<Object> list2 = new ArrayList<Object>(Lang.length(o));
Lang.each(o, new Each<Object>() {
public void invoke(int index, Object ele, int length) {
__join_ele_to_list_as_map(list2, ele, memo);
}
});
list.add(list2);
}
// Map
else if (mr.isMap()) {
NutMap map2 = __change_map_to_nutmap((Map<String, Object>) o, memo);
list.add(map2);
}
// 看来要递归
else {
NutMap map = obj2nutmap(o);
list.add(map);
}
}
/**
* 将对象转换成 Map
*
* @param obj
* POJO 对象
* @return Map 对象
*/
@SuppressWarnings("unchecked")
public static Map<String, Object> obj2map(Object obj) {
return obj2map(obj, HashMap.class);
}
/**
* 将对象转为 Nutz 的标准 Map 封装
*
* @param obj
* POJO du对象
* @return NutMap 对象
*/
public static NutMap obj2nutmap(Object obj) {
return obj2map(obj, NutMap.class);
}
/**
* 将对象转换成 Map
*
* @param <T>
* @param obj
* POJO 对象
* @param mapType
* Map 的类型
* @return Map 对象
*/
public static <T extends Map<String, Object>> T obj2map(Object obj, Class<T> mapType) {
try {
T map = mapType.newInstance();
Lang.obj2map(obj, map, new HashMap<Object, Object>());
return map;
}
catch (Exception e) {
throw Lang.wrapThrow(e);
}
}
/**
* 返回一个集合对象的枚举对象。实际上就是对 Iterator 接口的一个封装
*
* @param col
* 集合对象
* @return 枚举对象
*/
public static <T> Enumeration<T> enumeration(Collection<T> col) {
final Iterator<T> it = col.iterator();
return new Enumeration<T>() {
public boolean hasMoreElements() {
return it.hasNext();
}
public T nextElement() {
return it.next();
}
};
}
/**
* 将枚举对象,变成集合
*
* @param enums
* 枚举对象
* @param cols
* 集合对象
* @return 集合对象
*/
public static <T extends Collection<E>, E> T enum2collection(Enumeration<E> enums, T cols) {
while (enums.hasMoreElements())
cols.add(enums.nextElement());
return cols;
}
/**
* 将字符数组强制转换成字节数组。如果字符为双字节编码,则会丢失信息
*
* @param cs
* 字符数组
* @return 字节数组
*/
public static byte[] toBytes(char[] cs) {
byte[] bs = new byte[cs.length];
for (int i = 0; i < cs.length; i++)
bs[i] = (byte) cs[i];
return bs;
}
/**
* 将整数数组强制转换成字节数组。整数的高位将会被丢失
*
* @param is
* 整数数组
* @return 字节数组
*/
public static byte[] toBytes(int[] is) {
byte[] bs = new byte[is.length];
for (int i = 0; i < is.length; i++)
bs[i] = (byte) is[i];
return bs;
}
/**
* 判断当前系统是否为Windows
*
* @return true 如果当前系统为Windows系统
*/
public static boolean isWin() {
try {
String os = System.getenv("OS");
return os != null && os.indexOf("Windows") > -1;
}
catch (Throwable e) {
return false;
}
}
/**
* 原方法使用线程ClassLoader,各种问题,改回原版.
*/
public static Class<?> loadClass(String className) throws ClassNotFoundException {
try {
return Thread.currentThread().getContextClassLoader().loadClass(className);
}
catch (Throwable e) {
return Class.forName(className);
}
}
/**
* 当前运行的 Java 虚拟机是 JDK6 及更高版本的话,则返回 true
*
* @return true 如果当前运行的 Java 虚拟机是 JDK6
*/
public static boolean isJDK6() {
return JdkTool.getMajorVersion() >= 6;
}
/**
* 获取基本类型的默认值
*
* @param pClass
* 基本类型
* @return 0/false,如果传入的pClass不是基本类型的类,则返回null
*/
public static Object getPrimitiveDefaultValue(Class<?> pClass) {
if (int.class.equals(pClass))
return Integer.valueOf(0);
if (long.class.equals(pClass))
return Long.valueOf(0);
if (short.class.equals(pClass))
return Short.valueOf((short) 0);
if (float.class.equals(pClass))
return Float.valueOf(0f);
if (double.class.equals(pClass))
return Double.valueOf(0);
if (byte.class.equals(pClass))
return Byte.valueOf((byte) 0);
if (char.class.equals(pClass))
return Character.valueOf((char) 0);
if (boolean.class.equals(pClass))
return Boolean.FALSE;
return null;
}
/**
* 当一个类使用<T,K>来定义泛型时,本方法返回类的一个字段的具体类型。
*
* @param me
* @param field
*/
public static Type getFieldType(Mirror<?> me, String field) throws NoSuchFieldException {
return getFieldType(me, me.getField(field));
}
/**
* 当一个类使用<T, K> 来定义泛型时, 本方法返回类的一个方法所有参数的具体类型
*
* @param me
* @param method
*/
public static Type[] getMethodParamTypes(Mirror<?> me, Method method) {
Type[] types = method.getGenericParameterTypes();
List<Type> ts = new ArrayList<Type>();
for (Type type : types) {
ts.add(getGenericsType(me, type));
}
return ts.toArray(new Type[ts.size()]);
}
/**
* 当一个类使用<T,K>来定义泛型时,本方法返回类的一个字段的具体类型。
*
* @param me
* @param field
*/
public static Type getFieldType(Mirror<?> me, Field field) {
Type type = field.getGenericType();
return getGenericsType(me, type);
}
/**
* 当一个类使用<T,K>来定义泛型时,本方法返回类的一个字段的具体类型。
*
* @param me
* @param type
*/
public static Type getGenericsType(Mirror<?> me, Type type) {
Type[] types = me.getGenericsTypes();
Type t = type;
if (type instanceof TypeVariable && types != null && types.length > 0) {
Type[] tvs = me.getType().getTypeParameters();
for (int i = 0; i < tvs.length; i++) {
if (type.equals(tvs[i])) {
type = me.getGenericsType(i);
break;
}
}
}
if (!type.equals(t)) {
return type;
}
if (types != null && types.length > 0 && type instanceof ParameterizedType) {
ParameterizedType pt = (ParameterizedType) type;
if (pt.getActualTypeArguments().length >= 0) {
NutType nt = new NutType();
nt.setOwnerType(pt.getOwnerType());
nt.setRawType(pt.getRawType());
Type[] tt = new Type[pt.getActualTypeArguments().length];
for (int i = 0; i < tt.length; i++) {
tt[i] = types[i];
}
nt.setActualTypeArguments(tt);
return nt;
}
}
return type;
}
/**
* 获取一个 Type 类型实际对应的Class
*
* @param type
* 类型
* @return 与Type类型实际对应的Class
*/
@SuppressWarnings("rawtypes")
public static Class<?> getTypeClass(Type type) {
Class<?> clazz = null;
if (type instanceof Class<?>) {
clazz = (Class<?>) type;
} else if (type instanceof ParameterizedType) {
ParameterizedType pt = (ParameterizedType) type;
clazz = (Class<?>) pt.getRawType();
} else if (type instanceof GenericArrayType) {
GenericArrayType gat = (GenericArrayType) type;
Class<?> typeClass = getTypeClass(gat.getGenericComponentType());
return Array.newInstance(typeClass, 0).getClass();
} else if (type instanceof TypeVariable) {
TypeVariable tv = (TypeVariable) type;
Type[] ts = tv.getBounds();
if (ts != null && ts.length > 0)
return getTypeClass(ts[0]);
} else if (type instanceof WildcardType) {
WildcardType wt = (WildcardType) type;
Type[] t_low = wt.getLowerBounds();// 取其下界
if (t_low.length > 0)
return getTypeClass(t_low[0]);
Type[] t_up = wt.getUpperBounds(); // 没有下界?取其上界
return getTypeClass(t_up[0]);// 最起码有Object作为上界
}
return clazz;
}
/**
* 返回一个 Type 的泛型数组, 如果没有, 则直接返回null
*
* @param type
* 类型
* @return 一个 Type 的泛型数组, 如果没有, 则直接返回null
*/
public static Type[] getGenericsTypes(Type type) {
if (type instanceof ParameterizedType) {
ParameterizedType pt = (ParameterizedType) type;
return pt.getActualTypeArguments();
}
return null;
}
/**
* 强制从字符串转换成一个 Class,将 ClassNotFoundException 包裹成 RuntimeException
*
* @param <T>
* @param name
* 类名
* @param type
* 这个类型的边界
* @return 类对象
*/
@SuppressWarnings("unchecked")
public static <T> Class<T> forName(String name, Class<T> type) {
Class<?> re;
try {
re = Lang.loadClass(name);
return (Class<T>) re;
}
catch (ClassNotFoundException e) {
throw Lang.wrapThrow(e);
}
}
/**
* 获取指定文件的 MD5 值
*
* @param f
* 文件
* @return 指定文件的 MD5 值
* @see #digest(String, File)
*/
public static String md5(File f) {
return digest("MD5", f);
}
/**
* 获取指定输入流的 MD5 值
*
* @param ins
* 输入流
* @return 指定输入流的 MD5 值
* @see #digest(String, InputStream)
*/
public static String md5(InputStream ins) {
return digest("MD5", ins);
}
/**
* 获取指定字符串的 MD5 值
*
* @param cs
* 字符串
* @return 指定字符串的 MD5 值
* @see #digest(String, CharSequence)
*/
public static String md5(CharSequence cs) {
return digest("MD5", cs);
}
/**
* 获取指定文件的 SHA1 值
*
* @param f
* 文件
* @return 指定文件的 SHA1 值
* @see #digest(String, File)
*/
public static String sha1(File f) {
return digest("SHA1", f);
}
/**
* 获取指定输入流的 SHA1 值
*
* @param ins
* 输入流
* @return 指定输入流的 SHA1 值
* @see #digest(String, InputStream)
*/
public static String sha1(InputStream ins) {
return digest("SHA1", ins);
}
/**
* 获取指定字符串的 SHA1 值
*
* @param cs
* 字符串
* @return 指定字符串的 SHA1 值
* @see #digest(String, CharSequence)
*/
public static String sha1(CharSequence cs) {
return digest("SHA1", cs);
}
/**
* 获取指定文件的 SHA256 值
*
* @param f
* 文件
* @return 指定文件的 SHA256 值
* @see #digest(String, File)
*/
public static String sha256(File f) {
return digest("SHA-256", f);
}
/**
* 获取指定输入流的 SHA256 值
*
* @param ins
* 输入流
* @return 指定输入流的 SHA256 值
* @see #digest(String, InputStream)
*/
public static String sha256(InputStream ins) {
return digest("SHA-256", ins);
}
/**
* 获取指定字符串的 SHA256 值
*
* @param cs
* 字符串
* @return 指定字符串的 SHA256 值
* @see #digest(String, CharSequence)
*/
public static String sha256(CharSequence cs) {
return digest("SHA-256", cs);
}
/**
* 从数据文件计算出数字签名
*
* @param algorithm
* 算法,比如 "SHA1" "SHA-256" 或者 "MD5" 等
* @param f
* 文件
* @return 数字签名
*/
public static String digest(String algorithm, File f) {
return digest(algorithm, Streams.fileIn(f));
}
/**
* 从流计算出数字签名,计算完毕流会被关闭
*
* @param algorithm
* 算法,比如 "SHA1" 或者 "MD5" 等
* @param ins
* 输入流
* @return 数字签名
*/
public static String digest(String algorithm, InputStream ins) {
try {
MessageDigest md = MessageDigest.getInstance(algorithm);
byte[] bs = new byte[HASH_BUFF_SIZE];
int len = 0;
while ((len = ins.read(bs)) != -1) {
md.update(bs, 0, len);
}
byte[] hashBytes = md.digest();
return fixedHexString(hashBytes);
}
catch (NoSuchAlgorithmException e) {
throw Lang.wrapThrow(e);
}
catch (FileNotFoundException e) {
throw Lang.wrapThrow(e);
}
catch (IOException e) {
throw Lang.wrapThrow(e);
}
finally {
Streams.safeClose(ins);
}
}
/**
* 从字符串计算出数字签名
*
* @param algorithm
* 算法,比如 "SHA1" 或者 "MD5" 等
* @param cs
* 字符串
* @return 数字签名
*/
public static String digest(String algorithm, CharSequence cs) {
return digest(algorithm, Strings.getBytesUTF8(null == cs ? "" : cs), null, 1);
}
/**
* 从字节数组计算出数字签名
*
* @param algorithm
* 算法,比如 "SHA1" 或者 "MD5" 等
* @param bytes
* 字节数组
* @param salt
* 随机字节数组
* @param iterations
* 迭代次数
* @return 数字签名
*/
public static String digest(String algorithm, byte[] bytes, byte[] salt, int iterations) {
try {
MessageDigest md = MessageDigest.getInstance(algorithm);
if (salt != null) {
md.update(salt);
}
byte[] hashBytes = md.digest(bytes);
for (int i = 1; i < iterations; i++) {
md.reset();
hashBytes = md.digest(hashBytes);
}
return fixedHexString(hashBytes);
}
catch (NoSuchAlgorithmException e) {
throw Lang.wrapThrow(e);
}
}
/** 当前运行的 Java 虚拟机是否是在安卓环境 */
public static final boolean isAndroid;
static {
boolean flag = false;
try {
Class.forName("android.Manifest");
flag = true;
}
catch (Throwable e) {}
isAndroid = flag;
}
/**
* 将指定的数组的内容倒序排序。注意,这会破坏原数组的内容
*
* @param arrays
* 指定的数组
*/
public static <T> void reverse(T[] arrays) {
int size = arrays.length;
for (int i = 0; i < size; i++) {
int ih = i;
int it = size - 1 - i;
if (ih == it || ih > it) {
break;
}
T ah = arrays[ih];
T swap = arrays[it];
arrays[ih] = swap;
arrays[it] = ah;
}
}
@Deprecated
public static String simpleMetodDesc(Method method) {
return simpleMethodDesc(method);
}
public static String simpleMethodDesc(Method method) {
return String.format("%s.%s(...)",
method.getDeclaringClass().getSimpleName(),
method.getName());
}
public static String fixedHexString(byte[] hashBytes) {
StringBuffer sb = new StringBuffer();
for (int i = 0; i < hashBytes.length; i++) {
sb.append(Integer.toString((hashBytes[i] & 0xff) + 0x100, 16).substring(1));
}
return sb.toString();
}
/**
* 一个便利的方法,将当前线程睡眠一段时间
*
* @param ms
* 要睡眠的时间 ms
*/
public static void sleep(long ms) {
try {
Thread.sleep(ms);
}
catch (InterruptedException e) {
throw Lang.wrapThrow(e);
}
}
/**
* 一个便利的等待方法同步一个对象
*
* @param lock
* 锁对象
* @param ms
* 要等待的时间 ms
*/
public static void wait(Object lock, long ms) {
if (null != lock)
synchronized (lock) {
try {
lock.wait(ms);
}
catch (InterruptedException e) {
throw Lang.wrapThrow(e);
}
}
}
/**
* 通知对象的同步锁
*
* @param lock
* 锁对象
*/
public static void notifyAll(Object lock) {
if (null != lock)
synchronized (lock) {
lock.notifyAll();
}
}
public static void runInAnThread(Runnable runnable) {
new Thread(runnable).start();
}
/**
* map对象浅过滤,返回值是一个新的map
*
* @param source
* 原始的map对象
* @param prefix
* 包含什么前缀,并移除前缀
* @param include
* 正则表达式 仅包含哪些key(如果有前缀要求,则已经移除了前缀)
* @param exclude
* 正则表达式 排除哪些key(如果有前缀要求,则已经移除了前缀)
* @param keyMap
* 映射map, 原始key--目标key (如果有前缀要求,则已经移除了前缀)
* @return 经过过滤的map,与原始map不是同一个对象
*/
public static Map<String, Object> filter(Map<String, Object> source,
String prefix,
String include,
String exclude,
Map<String, String> keyMap) {
LinkedHashMap<String, Object> dst = new LinkedHashMap<String, Object>();
if (source == null || source.isEmpty())
return dst;
Pattern includePattern = include == null ? null : Regex.getPattern(include);
Pattern excludePattern = exclude == null ? null : Regex.getPattern(exclude);
for (Entry<String, Object> en : source.entrySet()) {
String key = en.getKey();
if (prefix != null) {
if (key.startsWith(prefix))
key = key.substring(prefix.length());
else
continue;
}
if (includePattern != null && !includePattern.matcher(key).find())
continue;
if (excludePattern != null && excludePattern.matcher(key).find())
continue;
if (keyMap != null && keyMap.containsKey(key))
dst.put(keyMap.get(key), en.getValue());
else
dst.put(key, en.getValue());
}
return dst;
}
/**
* 获得访问者的IP地址, 反向代理过的也可以获得
*
* @param request
* 请求的req对象
* @return 来源ip
*/
public static String getIP(HttpServletRequest request) {
if (request == null)
return "";
String ip = request.getHeader("X-Forwarded-For");
if (ip == null || ip.length() == 0 || "unknown".equalsIgnoreCase(ip)) {
if (ip == null || ip.length() == 0 || "unknown".equalsIgnoreCase(ip)) {
ip = request.getHeader("Proxy-Client-IP");
}
if (ip == null || ip.length() == 0 || "unknown".equalsIgnoreCase(ip)) {
ip = request.getHeader("WL-Proxy-Client-IP");
}
if (ip == null || ip.length() == 0 || "unknown".equalsIgnoreCase(ip)) {
ip = request.getHeader("HTTP_CLIENT_IP");
}
if (ip == null || ip.length() == 0 || "unknown".equalsIgnoreCase(ip)) {
ip = request.getHeader("HTTP_X_FORWARDED_FOR");
}
if (ip == null || ip.length() == 0 || "unknown".equalsIgnoreCase(ip)) {
ip = request.getRemoteAddr();
}
} else if (ip.length() > 15) {
String[] ips = ip.split(",");
for (int index = 0; index < ips.length; index++) {
String strIp = ips[index];
if (!("unknown".equalsIgnoreCase(strIp))) {
ip = strIp;
break;
}
}
}
if (Strings.isBlank(ip))
return "";
if (isIPv4Address(ip) || isIPv6Address(ip)) {
return ip;
}
return "";
}
/**
* @return 返回当前程序运行的根目录
*/
public static String runRootPath() {
String cp = Lang.class.getClassLoader().getResource("").toExternalForm();
if (cp.startsWith("file:")) {
cp = cp.substring("file:".length());
}
return cp;
}
public static <T> T copyProperties(Object origin, T target) {
return copyProperties(origin, target, null, null, false, true);
}
public static <T> T copyProperties(Object origin,
T target,
String active,
String lock,
boolean ignoreNull,
boolean ignoreStatic) {
if (origin == null)
throw new IllegalArgumentException("origin is null");
if (target == null)
throw new IllegalArgumentException("target is null");
Pattern at = active == null ? null : Regex.getPattern(active);
Pattern lo = lock == null ? null : Regex.getPattern(lock);
Mirror<Object> originMirror = Mirror.me(origin);
Mirror<T> targetMirror = Mirror.me(target);
Field[] fields = targetMirror.getFields();
for (Field field : originMirror.getFields()) {
String name = field.getName();
if (at != null && !at.matcher(name).find())
continue;
if (lo != null && lo.matcher(name).find())
continue;
if (ignoreStatic && Modifier.isStatic(field.getModifiers()))
continue;
Object val = originMirror.getValue(origin, field);
if (ignoreNull && val == null)
continue;
for (Field _field : fields) {
if (_field.getName().equals(field.getName())) {
targetMirror.setValue(target, _field, val);
}
}
// TODO 支持getter/setter比对
}
return target;
}
public static StringBuilder execOutput(String cmd) throws IOException {
return execOutput(Strings.splitIgnoreBlank(cmd, " "), Encoding.CHARSET_UTF8);
}
public static StringBuilder execOutput(String cmd, Charset charset) throws IOException {
return execOutput(Strings.splitIgnoreBlank(cmd, " "), charset);
}
public static StringBuilder execOutput(String cmd[]) throws IOException {
return execOutput(cmd, Encoding.CHARSET_UTF8);
}
public static StringBuilder execOutput(String[] cmd, Charset charset) throws IOException {
Process p = Runtime.getRuntime().exec(cmd);
p.getOutputStream().close();
InputStreamReader r = new InputStreamReader(p.getInputStream(), charset);
StringBuilder sb = new StringBuilder();
Streams.readAndClose(r, sb);
return sb;
}
public static void exec(String cmd, StringBuilder out, StringBuilder err) throws IOException {
exec(Strings.splitIgnoreBlank(cmd, " "), Encoding.CHARSET_UTF8, out, err);
}
public static void exec(String[] cmd, StringBuilder out, StringBuilder err) throws IOException {
exec(cmd, Encoding.CHARSET_UTF8, out, err);
}
public static void exec(String[] cmd, Charset charset, StringBuilder out, StringBuilder err)
throws IOException {
Process p = Runtime.getRuntime().exec(cmd);
p.getOutputStream().close();
InputStreamReader sOut = new InputStreamReader(p.getInputStream(), charset);
Streams.readAndClose(sOut, out);
InputStreamReader sErr = new InputStreamReader(p.getErrorStream(), charset);
Streams.readAndClose(sErr, err);
}
public static Class<?> loadClassQuite(String className) {
try {
return loadClass(className);
}
catch (ClassNotFoundException e) {
return null;
}
}
public static byte[] toBytes(Object obj) {
try {
ByteArrayOutputStream bao = new ByteArrayOutputStream();
ObjectOutputStream oos = new ObjectOutputStream(bao);
oos.writeObject(obj);
return bao.toByteArray();
}
catch (IOException e) {
return null;
}
}
@SuppressWarnings("unchecked")
public static <T> T fromBytes(byte[] buf, Class<T> klass) {
try {
return (T) new ObjectInputStream(new ByteArrayInputStream(buf)).readObject();
}
catch (ClassNotFoundException e) {
return null;
}
catch (IOException e) {
return null;
}
}
public static class JdkTool {
public static String getVersionLong() {
Properties sys = System.getProperties();
return sys.getProperty("java.version");
}
public static int getMajorVersion() {
String ver = getVersionLong();
if (Strings.isBlank(ver))
return 6;
String[] tmp = ver.split("\\.");
if (tmp.length < 2)
return 6;
int t = Integer.parseInt(tmp[0]);
if (t > 1)
return t;
return Integer.parseInt(tmp[1]);
}
public static boolean isEarlyAccess() {
String ver = getVersionLong();
if (Strings.isBlank(ver))
return false;
return ver.contains("-ea");
}
/**
* 获取进程id
* @param fallback 如果获取失败,返回什么呢?
* @return 进程id
*/
public static String getProcessId(final String fallback) {
final String jvmName = ManagementFactory.getRuntimeMXBean().getName();
final int index = jvmName.indexOf('@');
if (index < 1) {
return fallback;
}
try {
return Long.toString(Long.parseLong(jvmName.substring(0, index)));
}
catch (NumberFormatException e) {
}
return fallback;
}
}
/**
* 判断一个对象是否不为空。它支持如下对象类型:
* <ul>
* <li>null : 一定为空
* <li>数组
* <li>集合
* <li>Map
* <li>其他对象 : 一定不为空
* </ul>
*
* @param obj
* 任意对象
* @return 是否为空
*/
public static boolean isNotEmpty(Object obj) {
return !isEmpty(obj);
}
/**
* 获取指定字符串的 HmacMD5 值
*
* @param data 字符串
* @param secret 密钥
* @return 指定字符串的 HmacMD5 值
*/
public static String hmacmd5(String data, String secret) {
if (isEmpty(data))
throw new NullPointerException("data is null");
if (isEmpty(secret))
throw new NullPointerException("secret is null");
byte[] bytes = null;
try {
SecretKey secretKey = new SecretKeySpec(secret.getBytes(Encoding.UTF8), "HmacMD5");
Mac mac = Mac.getInstance(secretKey.getAlgorithm());
mac.init(secretKey);
bytes = mac.doFinal(data.getBytes(Encoding.UTF8));
} catch (Exception e) {
e.printStackTrace();
throw Lang.wrapThrow(e);
}
return fixedHexString(bytes);
}
/**
* 获取指定字符串的 HmacSHA256 值
*
* @param data 字符串
* @param secret 密钥
* @return 指定字符串的 HmacSHA256 值
*/
public static String hmacSHA256(String data, String secret) {
if (isEmpty(data))
throw new NullPointerException("data is null");
if (isEmpty(secret))
throw new NullPointerException("secret is null");
byte[] bytes = null;
try {
SecretKey secretKey = new SecretKeySpec(secret.getBytes(Encoding.UTF8), "HmacSHA256");
Mac mac = Mac.getInstance(secretKey.getAlgorithm());
mac.init(secretKey);
bytes = mac.doFinal(data.getBytes(Encoding.UTF8));
} catch (Exception e) {
e.printStackTrace();
throw Lang.wrapThrow(e);
}
return fixedHexString(bytes);
}
}
|
[
"\"OS\""
] |
[] |
[
"OS"
] |
[]
|
["OS"]
|
java
| 1 | 0 | |
polygerrit-ui/server.go
|
// Copyright (C) 2015 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"archive/zip"
"bufio"
"bytes"
"compress/gzip"
"encoding/json"
"errors"
"flag"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/url"
"os"
"path/filepath"
"regexp"
"strings"
"golang.org/x/tools/godoc/vfs/httpfs"
"golang.org/x/tools/godoc/vfs/zipfs"
)
var (
plugins = flag.String("plugins", "", "comma seperated plugin paths to serve")
port = flag.String("port", ":8081", "Port to serve HTTP requests on")
host = flag.String("host", "gerrit-review.googlesource.com", "Host to proxy requests to")
scheme = flag.String("scheme", "https", "URL scheme")
cdnPattern = regexp.MustCompile("https://cdn.googlesource.com/polygerrit_ui/[0-9.]*")
bundledPluginsPattern = regexp.MustCompile("https://cdn.googlesource.com/polygerrit_assets/[0-9.]*")
)
func main() {
flag.Parse()
fontsArchive, err := openDataArchive("fonts.zip")
if err != nil {
log.Fatal(err)
}
componentsArchive, err := openDataArchive("app/test_components.zip")
if err != nil {
log.Fatal(err)
}
workspace := os.Getenv("BUILD_WORKSPACE_DIRECTORY")
if err := os.Chdir(filepath.Join(workspace, "polygerrit-ui")); err != nil {
log.Fatal(err)
}
http.Handle("/", http.FileServer(http.Dir("app")))
http.Handle("/bower_components/",
http.FileServer(httpfs.New(zipfs.New(componentsArchive, "bower_components"))))
http.Handle("/fonts/",
http.FileServer(httpfs.New(zipfs.New(fontsArchive, "fonts"))))
http.HandleFunc("/index.html", handleIndex)
http.HandleFunc("/changes/", handleProxy)
http.HandleFunc("/accounts/", handleProxy)
http.HandleFunc("/config/", handleProxy)
http.HandleFunc("/projects/", handleProxy)
http.HandleFunc("/static/", handleProxy)
http.HandleFunc("/accounts/self/detail", handleAccountDetail)
if len(*plugins) > 0 {
http.Handle("/plugins/", http.StripPrefix("/plugins/",
http.FileServer(http.Dir("../plugins"))))
log.Println("Local plugins from", "../plugins")
} else {
http.HandleFunc("/plugins/", handleProxy)
}
log.Println("Serving on port", *port)
log.Fatal(http.ListenAndServe(*port, &server{}))
}
func openDataArchive(path string) (*zip.ReadCloser, error) {
absBinPath, err := resourceBasePath()
if err != nil {
return nil, err
}
return zip.OpenReader(absBinPath + ".runfiles/gerrit/polygerrit-ui/" + path)
}
func resourceBasePath() (string, error) {
return filepath.Abs(os.Args[0])
}
func handleIndex(writer http.ResponseWriter, originalRequest *http.Request) {
fakeRequest := &http.Request{
URL: &url.URL{
Path: "/",
RawQuery: originalRequest.URL.RawQuery,
},
}
handleProxy(writer, fakeRequest)
}
func handleProxy(writer http.ResponseWriter, originalRequest *http.Request) {
patchedRequest := &http.Request{
Method: "GET",
URL: &url.URL{
Scheme: *scheme,
Host: *host,
Opaque: originalRequest.URL.EscapedPath(),
RawQuery: originalRequest.URL.RawQuery,
},
}
response, err := http.DefaultClient.Do(patchedRequest)
if err != nil {
http.Error(writer, err.Error(), http.StatusInternalServerError)
return
}
defer response.Body.Close()
for name, values := range response.Header {
for _, value := range values {
if name != "Content-Length" {
writer.Header().Add(name, value)
}
}
}
writer.WriteHeader(response.StatusCode)
if _, err := io.Copy(writer, patchResponse(originalRequest, response)); err != nil {
log.Println("Error copying response to ResponseWriter:", err)
return
}
}
func getJsonPropByPath(json map[string]interface{}, path []string) interface{} {
prop, path := path[0], path[1:]
if json[prop] == nil {
return nil
}
switch json[prop].(type) {
case map[string]interface{}: // map
return getJsonPropByPath(json[prop].(map[string]interface{}), path)
case []interface{}: // array
return json[prop].([]interface{})
default:
return json[prop].(interface{})
}
}
func setJsonPropByPath(json map[string]interface{}, path []string, value interface{}) {
prop, path := path[0], path[1:]
if json[prop] == nil {
return // path not found
}
if len(path) > 0 {
setJsonPropByPath(json[prop].(map[string]interface{}), path, value)
} else {
json[prop] = value
}
}
func patchResponse(req *http.Request, res *http.Response) io.Reader {
switch req.URL.EscapedPath() {
case "/":
return rewriteHostPage(res.Body)
case "/config/server/info":
return injectLocalPlugins(res.Body)
default:
return res.Body
}
}
func rewriteHostPage(reader io.Reader) io.Reader {
buf := new(bytes.Buffer)
buf.ReadFrom(reader)
original := buf.String()
// Simply remove all CDN references, so files are loaded from the local file system or the proxy
// server instead.
replaced := cdnPattern.ReplaceAllString(original, "")
// Modify window.INITIAL_DATA so that it has the same effect as injectLocalPlugins. To achieve
// this let's add JavaScript lines at the end of the <script>...</script> snippet that also
// contains window.INITIAL_DATA=...
// Here we rely on the fact that the <script> snippet that we want to append to is the first one.
if len(*plugins) > 0 {
// If the host page contains a reference to a plugin bundle that would be preloaded, then remove it.
replaced = bundledPluginsPattern.ReplaceAllString(replaced, "")
insertionPoint := strings.Index(replaced, "</script>")
builder := new(strings.Builder)
builder.WriteString(
"window.INITIAL_DATA['/config/server/info'].plugin.html_resource_paths = []; ")
builder.WriteString(
"window.INITIAL_DATA['/config/server/info'].plugin.js_resource_paths = []; ")
for _, p := range strings.Split(*plugins, ",") {
if filepath.Ext(p) == ".html" {
builder.WriteString(
"window.INITIAL_DATA['/config/server/info'].plugin.html_resource_paths.push('" + p + "'); ")
}
if filepath.Ext(p) == ".js" {
builder.WriteString(
"window.INITIAL_DATA['/config/server/info'].plugin.js_resource_paths.push('" + p + "'); ")
}
}
replaced = replaced[:insertionPoint] + builder.String() + replaced[insertionPoint:]
}
return strings.NewReader(replaced)
}
func injectLocalPlugins(reader io.Reader) io.Reader {
if len(*plugins) == 0 {
return reader
}
// Skip escape prefix
io.CopyN(ioutil.Discard, reader, 5)
dec := json.NewDecoder(reader)
var response map[string]interface{}
err := dec.Decode(&response)
if err != nil {
log.Fatal(err)
}
// Configuration path in the JSON server response
jsPluginsPath := []string{"plugin", "js_resource_paths"}
htmlPluginsPath := []string{"plugin", "html_resource_paths"}
htmlResources := getJsonPropByPath(response, htmlPluginsPath).([]interface{})
jsResources := getJsonPropByPath(response, jsPluginsPath).([]interface{})
for _, p := range strings.Split(*plugins, ",") {
if filepath.Ext(p) == ".html" {
htmlResources = append(htmlResources, p)
}
if filepath.Ext(p) == ".js" {
jsResources = append(jsResources, p)
}
}
setJsonPropByPath(response, jsPluginsPath, jsResources)
setJsonPropByPath(response, htmlPluginsPath, htmlResources)
reader, writer := io.Pipe()
go func() {
defer writer.Close()
io.WriteString(writer, ")]}'") // Write escape prefix
err := json.NewEncoder(writer).Encode(&response)
if err != nil {
log.Fatal(err)
}
}()
return reader
}
func handleAccountDetail(w http.ResponseWriter, r *http.Request) {
http.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)
}
type gzipResponseWriter struct {
io.WriteCloser
http.ResponseWriter
}
func newGzipResponseWriter(w http.ResponseWriter) *gzipResponseWriter {
gz := gzip.NewWriter(w)
return &gzipResponseWriter{WriteCloser: gz, ResponseWriter: w}
}
func (w gzipResponseWriter) Write(b []byte) (int, error) {
return w.WriteCloser.Write(b)
}
func (w gzipResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
h, ok := w.ResponseWriter.(http.Hijacker)
if !ok {
return nil, nil, errors.New("gzipResponseWriter: ResponseWriter does not satisfy http.Hijacker interface")
}
return h.Hijack()
}
type server struct{}
// Any path prefixes that should resolve to index.html.
var (
fePaths = []string{"/q/", "/c/", "/p/", "/x/", "/dashboard/", "/admin/", "/settings/"}
issueNumRE = regexp.MustCompile(`^\/\d+\/?$`)
)
func (_ *server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
log.Printf("%s %s %s %s\n", r.Proto, r.Method, r.RemoteAddr, r.URL)
for _, prefix := range fePaths {
if strings.HasPrefix(r.URL.Path, prefix) || r.URL.Path == "/" {
r.URL.Path = "/index.html"
log.Println("Redirecting to /index.html")
break
} else if match := issueNumRE.Find([]byte(r.URL.Path)); match != nil {
r.URL.Path = "/index.html"
log.Println("Redirecting to /index.html")
break
}
}
if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
http.DefaultServeMux.ServeHTTP(w, r)
return
}
w.Header().Set("Content-Encoding", "gzip")
gzw := newGzipResponseWriter(w)
defer gzw.Close()
http.DefaultServeMux.ServeHTTP(gzw, r)
}
|
[
"\"BUILD_WORKSPACE_DIRECTORY\""
] |
[] |
[
"BUILD_WORKSPACE_DIRECTORY"
] |
[]
|
["BUILD_WORKSPACE_DIRECTORY"]
|
go
| 1 | 0 | |
.history/src/pygame_functions_20190422173332.py
|
import pygame, math, sys, os
pygame.mixer.pre_init(44100, -16, 2, 512)
pygame.init()
pygame.mixer.init()
spriteGroup = pygame.sprite.OrderedUpdates()
textboxGroup = pygame.sprite.OrderedUpdates()
gameClock = pygame.time.Clock()
musicPaused = False
hiddenSprites= pygame.sprite.OrderedUpdates()
screenRefresh = True
background = None
keydict = {"space": pygame.K_SPACE, "esc": pygame.K_ESCAPE, "up": pygame.K_UP, "down": pygame.K_DOWN,
"left": pygame.K_LEFT, "right": pygame.K_RIGHT,
"a": pygame.K_a,
"b": pygame.K_b,
"c": pygame.K_c,
"d": pygame.K_d,
"e": pygame.K_e,
"f": pygame.K_f,
"g": pygame.K_g,
"h": pygame.K_h,
"i": pygame.K_i,
"j": pygame.K_j,
"k": pygame.K_k,
"l": pygame.K_l,
"m": pygame.K_m,
"n": pygame.K_n,
"o": pygame.K_o,
"p": pygame.K_p,
"q": pygame.K_q,
"r": pygame.K_r,
"s": pygame.K_s,
"t": pygame.K_t,
"u": pygame.K_u,
"v": pygame.K_v,
"w": pygame.K_w,
"x": pygame.K_x,
"y": pygame.K_y,
"z": pygame.K_z,
"1": pygame.K_KP1,
"2": pygame.K_KP2,
"3": pygame.K_KP3,
"4": pygame.K_KP4,
"5": pygame.K_KP5,
"6": pygame.K_KP6,
"7": pygame.K_KP7,
"8": pygame.K_KP8,
"9": pygame.K_KP9,
"0": pygame.K_KP0,
"backspace": pygame.K_BACKSPACE}
screen = ""
class Background():
def __init__(self):
self.colour = pygame.Color("black")
def setTiles(self,tiles):
if type(tiles) is str:
self.tiles = [[loadImage(tiles)]]
elif type(tiles[0]) is str:
self.tiles = [[loadImage(i) for i in tiles]]
else:
self.tiles = [ [loadImage(i) for i in row] for row in tiles]
self.stagePosX = 0
self.stagePosY = 0
self.tileWidth = self.tiles[0][0].get_width()
self.tileHeight = self.tiles[0][0].get_height()
screen.blit(self.tiles[0][0],[0,0])
self.surface = screen.copy()
def scroll(self,x,y):
self.stagePosX -= x
self.stagePosY -= y
col = (self.stagePosX % (self.tileWidth * len(self.tiles[0]))) // self.tileWidth
xOff = (0 - self.stagePosX%self.tileWidth)
row = (self.stagePosY % (self.tileHeight * len(self.tiles))) // self.tileHeight
yOff = (0 - self.stagePosY % self.tileHeight)
col2 = ((self.stagePosX + self.tileWidth) % (self.tileWidth * len(self.tiles[0]))) // self.tileWidth
row2 = ((self.stagePosY + self.tileHeight) % (self.tileHeight * len(self.tiles))) // self.tileHeight
screen.blit(self.tiles[row][col], [xOff, yOff])
screen.blit(self.tiles[row][col2], [xOff + self.tileWidth, yOff])
screen.blit(self.tiles[row2][col], [xOff, yOff+self.tileHeight])
screen.blit(self.tiles[row2][col2], [xOff + self.tileWidth, yOff + self.tileHeight])
self.surface = screen.copy()
def setColour(self,colour):
self.colour = parseColour(colour)
screen.fill(self.colour)
pygame.display.update()
self.surface = screen.copy()
class newSprite(pygame.sprite.Sprite):
def __init__(self, filename, frames = 1):
pygame.sprite.Sprite.__init__(self)
self.images = []
img = loadImage(filename)
self.originalWidth = img.get_width() // frames
self.originalHeight = img.get_height()
frameSurf = pygame.Surface((self.originalWidth, self.originalHeight), pygame.SRCALPHA, 32)
x = 0
for frameNo in range(frames):
frameSurf = pygame.Surface((self.originalWidth, self.originalHeight), pygame.SRCALPHA, 32)
frameSurf.blit(img, (x, 0))
self.images.append(frameSurf.copy())
x -= self.originalWidth
self.image = pygame.Surface.copy(self.images[0])
self.currentImage = 0
self.rect = self.image.get_rect()
self.rect.topleft = (0, 0)
self.mask = pygame.mask.from_surface(self.image)
self.angle = 0
self.scale = 1
def addImage(self, filename):
self.images.append(loadImage(filename))
def move(self, xpos, ypos, centre=False):
if centre:
self.rect.center = [xpos, ypos]
else:
self.rect.topleft = [xpos, ypos]
def changeImage(self, index):
self.currentImage = index
if self.angle == 0 and self.scale == 1:
self.image = self.images[index]
else:
self.image = pygame.transform.rotozoom(self.images[self.currentImage], -self.angle, self.scale)
oldcenter = self.rect.center
self.rect = self.image.get_rect()
originalRect = self.images[self.currentImage].get_rect()
self.originalWidth = originalRect.width
self.originalHeight = originalRect.height
self.rect.center = oldcenter
self.mask = pygame.mask.from_surface(self.image)
if screenRefresh:
updateDisplay()
class newTextBox(pygame.sprite.Sprite):
def __init__(self, text, xpos, ypos, width, case, maxLength, fontSize):
pygame.sprite.Sprite.__init__(self)
self.text = ""
self.width = width
self.initialText = text
self.case = case
self.maxLength = maxLength
self.boxSize = int(fontSize * 1.7)
self.image = pygame.Surface((width, self.boxSize))
self.image.fill((255, 255, 255))
pygame.draw.rect(self.image, (0, 0, 0), [0, 0, width - 1, self.boxSize - 1], 2)
self.rect = self.image.get_rect()
self.fontFace = pygame.font.match_font("Arial")
self.fontColour = pygame.Color("black")
self.initialColour = (180, 180, 180)
self.font = pygame.font.Font(self.fontFace, fontSize)
self.rect.topleft = [xpos, ypos]
newSurface = self.font.render(self.initialText, True, self.initialColour)
self.image.blit(newSurface, [10, 5])
def update(self, keyevent):
key = keyevent.key
unicode = keyevent.unicode
if key > 31 and key < 127 and (
self.maxLength == 0 or len(self.text) < self.maxLength): # only printable characters
if keyevent.mod in (1,2) and self.case == 1 and key >= 97 and key <= 122:
# force lowercase letters
self.text += chr(key)
elif keyevent.mod == 0 and self.case == 2 and key >= 97 and key <= 122:
self.text += chr(key-32)
else:
# use the unicode char
self.text += unicode
elif key == 8:
# backspace. repeat until clear
keys = pygame.key.get_pressed()
nexttime = pygame.time.get_ticks() + 200
deleting = True
while deleting:
keys = pygame.key.get_pressed()
if keys[pygame.K_BACKSPACE]:
thistime = pygame.time.get_ticks()
if thistime > nexttime:
self.text = self.text[0:len(self.text) - 1]
self.image.fill((255, 255, 255))
pygame.draw.rect(self.image, (0, 0, 0), [0, 0, self.width - 1, self.boxSize - 1], 2)
newSurface = self.font.render(self.text, True, self.fontColour)
self.image.blit(newSurface, [10, 5])
updateDisplay()
nexttime = thistime + 50
pygame.event.clear()
else:
deleting = False
self.image.fill((255, 255, 255))
pygame.draw.rect(self.image, (0, 0, 0), [0, 0, self.width - 1, self.boxSize - 1], 2)
newSurface = self.font.render(self.text, True, self.fontColour)
self.image.blit(newSurface, [10, 5])
if screenRefresh:
updateDisplay()
def move(self, xpos, ypos, centre=False):
if centre:
self.rect.topleft = [xpos, ypos]
else:
self.rect.center = [xpos, ypos]
def clear(self):
self.image.fill((255, 255, 255))
pygame.draw.rect(self.image, (0, 0, 0), [0, 0, self.width - 1, self.boxSize - 1], 2)
newSurface = self.font.render(self.initialText, True, self.initialColour)
self.image.blit(newSurface, [10, 5])
if screenRefresh:
updateDisplay()
class newLabel(pygame.sprite.Sprite):
def __init__(self, text, fontSize, font, fontColour, xpos, ypos, background):
pygame.sprite.Sprite.__init__(self)
self.text = text
self.fontColour = parseColour(fontColour)
self.fontFace = pygame.font.match_font(font)
self.fontSize = fontSize
self.background = background
self.font = pygame.font.Font(self.fontFace, self.fontSize)
self.renderText()
self.rect.topleft = [xpos, ypos]
def update(self, newText, fontColour, background):
self.text = newText
if fontColour:
self.fontColour = parseColour(fontColour)
if background:
self.background = parseColour(background)
oldTopLeft = self.rect.topleft
self.renderText()
self.rect.topleft = oldTopLeft
if screenRefresh:
updateDisplay()
def renderText(self):
lineSurfaces = []
textLines = self.text.split("<br>")
maxWidth = 0
maxHeight = 0
for line in textLines:
lineSurfaces.append(self.font.render(line, True, self.fontColour))
thisRect = lineSurfaces[-1].get_rect()
if thisRect.width > maxWidth:
maxWidth = thisRect.width
if thisRect.height > maxHeight:
maxHeight = thisRect.height
self.image = pygame.Surface((maxWidth, (self.fontSize+1)*len(textLines)+5), pygame.SRCALPHA, 32)
self.image.convert_alpha()
if self.background != "clear":
self.image.fill(parseColour(self.background))
linePos = 0
for lineSurface in lineSurfaces:
self.image.blit(lineSurface,[0,linePos])
linePos+=self.fontSize+1
self.rect = self.image.get_rect()
def loadImage(fileName, useColorKey=False):
if os.path.isfile(fileName):
image = pygame.image.load(fileName)
image = image.convert_alpha()
# Return the image
return image
else:
raise Exception("Error loading image: " + fileName + " - Check filename and path?")
def screenSize(sizex, sizey, label="Graphics Window", xpos=None, ypos=None, fullscreen=False):
global screen
global background
if xpos != None and ypos != None:
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d, %d" % (xpos, ypos + 50)
else:
windowInfo = pygame.display.Info()
monitorWidth = windowInfo.current_w
monitorHeight = windowInfo.current_h
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d, %d" % ((monitorWidth - sizex) / 2, (monitorHeight - sizey) / 2)
if fullscreen:
screen = pygame.display.set_mode([sizex, sizey], pygame.FULLSCREEN)
else:
screen = pygame.display.set_mode([sizex, sizey])
background = Background()
screen.fill(background.colour)
pygame.display.set_caption(label)
background.surface = screen.copy()
pygame.display.update()
return screen
def moveSprite(sprite, x, y, centre=False):
sprite.move(x, y, centre)
if screenRefresh:
updateDisplay()
def rotateSprite(sprite, angle):
print("rotateSprite has been deprecated. Please use transformSprite")
transformSprite(sprite, angle, 1)
def transformSprite(sprite, angle, scale, hflip=False, vflip=False):
oldmiddle = sprite.rect.center
if hflip or vflip:
tempImage = pygame.transform.flip(sprite.images[sprite.currentImage],hflip,vflip)
else:
tempImage = sprite.images[sprite.currentImage]
if angle != 0 or scale != 1:
sprite.angle = angle
sprite.scale = scale
tempImage = pygame.transform.rotozoom(tempImage, -angle, scale)
sprite.image = tempImage
sprite.rect = sprite.image.get_rect()
sprite.rect.center = oldmiddle
sprite.mask = pygame.mask.from_surface(sprite.image)
if screenRefresh:
updateDisplay()
def killSprite(sprite):
sprite.kill()
if screenRefresh:
updateDisplay()
def setBackgroundColour(colour):
background.setColour(colour)
if screenRefresh:
updateDisplay()
def setBackgroundImage(img):
global background
background.setTiles(img)
if screenRefresh:
updateDisplay()
def hideSprite(sprite):
hiddenSprites.add(sprite)
spriteGroup.remove(sprite)
if screenRefresh:
updateDisplay()
def hideAll():
hiddenSprites.add(spriteGroup.sprites())
spriteGroup.empty()
if screenRefresh:
updateDisplay()
def hideAllExcpt(sprite):
for s in spriteGroup.sprites():
if not s == sprite:
hideSprite(s)
def unhideAll():
spriteGroup.add(hiddenSprites.sprites())
hiddenSprites.empty()
if screenRefresh:
updateDisplay()
def showSprite(sprite):
spriteGroup.add(sprite)
if screenRefresh:
updateDisplay()
def makeSprite(filename, frames=1):
thisSprite = newSprite(filename,frames)
return thisSprite
def addSpriteImage(sprite, image):
sprite.addImage(image)
def changeSpriteImage(sprite, index):
sprite.changeImage(index)
def nextSpriteImage(sprite):
sprite.currentImage += 1
if sprite.currentImage > len(sprite.images) - 1:
sprite.currentImage = 0
sprite.changeImage(sprite.currentImage)
def prevSpriteImage(sprite):
sprite.currentImage -= 1
if sprite.currentImage < 0:
sprite.currentImage = len(sprite.images) - 1
sprite.changeImage(sprite.currentImage)
def makeImage(filename):
return loadImage(filename)
def touching(sprite1, sprite2):
collided = pygame.sprite.collide_mask(sprite1, sprite2)
return collided
def allTouching(spritename):
if spriteGroup.has(spritename):
collisions = pygame.sprite.spritecollide(spritename, spriteGroup, False, collided=pygame.sprite.collide_mask)
collisions.remove(spritename)
return collisions
else:
return []
def pause(milliseconds, allowEsc=True):
keys = pygame.key.get_pressed()
current_time = pygame.time.get_ticks()
waittime = current_time + milliseconds
updateDisplay()
while not (current_time > waittime or (keys[pygame.K_ESCAPE] and allowEsc)):
pygame.event.clear()
keys = pygame.key.get_pressed()
if (keys[pygame.K_ESCAPE] and allowEsc):
pygame.quit()
sys.exit()
current_time = pygame.time.get_ticks()
def drawRect(xpos, ypos, width, height, colour, linewidth=0):
global bgSurface
colour = parseColour(colour)
thisrect = pygame.draw.rect(screen, colour, [xpos, ypos, width, height], linewidth)
if screenRefresh:
pygame.display.update(thisrect)
def drawLine(x1, y1, x2, y2, colour, linewidth=1):
global bgSurface
colour = parseColour(colour)
thisrect = pygame.draw.line(screen, colour, (x1, y1), (x2, y2), linewidth)
if screenRefresh:
pygame.display.update(thisrect)
def drawPolygon(pointlist, colour, linewidth=0):
global bgSurface
colour = parseColour(colour)
thisrect = pygame.draw.polygon(screen, colour, pointlist, linewidth)
if screenRefresh:
pygame.display.update(thisrect)
def drawEllipse(centreX, centreY, width, height, colour, linewidth=0):
global bgSurface
colour = parseColour(colour)
thisrect = pygame.Rect(centreX - width / 2, centreY - height / 2, width, height)
pygame.draw.ellipse(screen, colour, thisrect, linewidth)
if screenRefresh:
pygame.display.update(thisrect)
def drawTriangle(x1, y1, x2, y2, x3, y3, colour, linewidth=0):
global bgSurface
colour = parseColour(colour)
thisrect = pygame.draw.polygon(screen, colour, [(x1, y1), (x2, y2), (x3, y3)], linewidth)
if screenRefresh:
pygame.display.update(thisrect)
def clearShapes():
global background
screen.blit(background.surface, [0, 0])
if screenRefresh:
updateDisplay()
def updateShapes():
pygame.display.update()
def end():
pygame.quit()
def makeSound(filename):
pygame.mixer.init()
thissound = pygame.mixer.Sound(filename)
return thissound
def playSound(sound, loops=0):
sound.play(loops)
def stopSound(sound):
sound.stop()
def playSoundAndWait(sound):
sound.play()
while pygame.mixer.get_busy():
# pause
pause(10)
def makeMusic(filename):
pygame.mixer.music.load(filename)
def playMusic(loops=0):
global musicPaused
if musicPaused:
pygame.mixer.music.unpause()
else:
pygame.mixer.music.play(loops)
musicPaused = False
def stopMusic():
pygame.mixer.music.stop()
def pauseMusic():
global musicPaused
pygame.mixer.music.pause()
musicPaused = True
def rewindMusic():
pygame.mixer.music.rewind()
def endWait():
updateDisplay()
print("Press ESC to quit")
keys = pygame.key.get_pressed()
current_time = pygame.time.get_ticks()
waittime = 0
while not keys[pygame.K_ESCAPE]:
current_time = pygame.time.get_ticks()
if current_time > waittime:
pygame.event.clear()
keys = pygame.key.get_pressed()
waittime += 20
pygame.quit()
def keyPressed(keyCheck=""):
global keydict
pygame.event.clear()
keys = pygame.key.get_pressed()
if sum(keys)>0:
if keyCheck=="" or keys[keydict[keyCheck.lower()]]:
return True
return False
def keyPress(keyCheck=""):
global keydict
pygame.event.clear()
keys = pygame.key.get_pressed()
if sum(keys) == 1:
if keyCheck=="" or keys[keydict[keyCheck.lower()]]:
return True
return False
def makeLabel(text, fontSize, xpos, ypos, fontColour='black', font='Arial', background="clear"):
# make a text sprite
thisText = newLabel(text, fontSize, font, fontColour, xpos, ypos, background)
return thisText
def moveLabel(sprite, x, y):
sprite.rect.topleft = [x, y]
if screenRefresh:
updateDisplay()
def changeLabel(textObject, newText, fontColour=None, background=None):
textObject.update(newText, fontColour, background)
# updateDisplay()
def waitPress():
pygame.event.clear()
keypressed = False
thisevent = pygame.event.wait()
while thisevent.type != pygame.KEYDOWN:
thisevent = pygame.event.wait()
return thisevent.key
def makeTextBox(xpos, ypos, width, case=0, startingText="Please type here", maxLength=0, fontSize=22):
thisTextBox = newTextBox(startingText, xpos, ypos, width, case, maxLength, fontSize)
textboxGroup.add(thisTextBox)
return thisTextBox
def textBoxInput(textbox, functionToCall=None, args=[]):
# starts grabbing key inputs, putting into textbox until enter pressed
global keydict
textbox.text = ""
returnVal=None
while True:
updateDisplay()
if functionToCall:
returnVal = functionToCall(*args)
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
textbox.clear()
if returnVal:
return textbox.text, returnVal
else:
return textbox.text
elif event.key == pygame.K_ESCAPE:
pygame.quit()
sys.exit()
else:
textbox.update(event)
elif event.type == pygame.QUIT:
pygame.quit()
sys.exit()
def clock():
current_time = pygame.time.get_ticks()
return current_time
def tick(fps):
pygame.event.clear()
keys = pygame.key.get_pressed()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
#if (keys[pygame.K_ESCAPE]):
# pygame.quit()
# sys.exit()
gameClock.tick(fps)
return gameClock.get_fps()
def showLabel(labelName):
textboxGroup.add(labelName)
if screenRefresh:
updateDisplay()
def hideLabel(labelName):
textboxGroup.remove(labelName)
if screenRefresh:
updateDisplay()
def showTextBox(textBoxName):
textboxGroup.add(textBoxName)
if screenRefresh:
updateDisplay()
def hideTextBox(textBoxName):
textboxGroup.remove(textBoxName)
if screenRefresh:
updateDisplay()
def updateDisplay():
global background
spriteRects = spriteGroup.draw(screen)
textboxRects = textboxGroup.draw(screen)
pygame.display.update()
keys = pygame.key.get_pressed()
if (keys[pygame.K_BACKSPACE]):
pygame.quit()
sys.exit()
spriteGroup.clear(screen, background.surface)
textboxGroup.clear(screen, background.surface)
def mousePressed():
pygame.event.clear()
mouseState = pygame.mouse.get_pressed()
if mouseState[0]:
return True
else:
return False
def spriteClicked(sprite):
mouseState = pygame.mouse.get_pressed()
if not mouseState[0]:
return False # not pressed
pos = pygame.mouse.get_pos()
if sprite.rect.collidepoint(pos):
return True
else:
return False
def parseColour(colour):
if type(colour) == str:
# check to see if valid colour
return pygame.Color(colour)
else:
colourRGB = pygame.Color("white")
colourRGB.r = colour[0]
colourRGB.g = colour[1]
colourRGB.b = colour[2]
return colourRGB
def mouseX():
x = pygame.mouse.get_pos()
return x[0]
def mouseY():
y = pygame.mouse.get_pos()
return y[1]
def scrollBackground(x,y):
global background
background.scroll(x,y)
def setAutoUpdate(val):
global screenRefresh
screenRefresh = val
if __name__ == "__main__":
print(""""pygame_functions is not designed to be run directly.
See the wiki at https://github.com/StevePaget/Pygame_Functions/wiki/Getting-Started for more information""")
|
[] |
[] |
[
"SDL_VIDEO_WINDOW_POS"
] |
[]
|
["SDL_VIDEO_WINDOW_POS"]
|
python
| 1 | 0 | |
tests/support/unit.py
|
# -*- coding: utf-8 -*-
"""
:codeauthor: Pedro Algarvio ([email protected])
============================
Unittest Compatibility Layer
============================
Compatibility layer to use :mod:`unittest <python2:unittest>` under Python
2.7 or `unittest2`_ under Python 2.6 without having to worry about which is
in use.
.. attention::
Please refer to Python's :mod:`unittest <python2:unittest>`
documentation as the ultimate source of information, this is just a
compatibility layer.
.. _`unittest2`: https://pypi.python.org/pypi/unittest2
"""
# pylint: disable=unused-import,blacklisted-module,deprecated-method
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import sys
from unittest import TestCase as _TestCase
from unittest import TestLoader as _TestLoader
from unittest import TestResult
from unittest import TestSuite as _TestSuite
from unittest import TextTestResult as _TextTestResult
from unittest import TextTestRunner as _TextTestRunner
from unittest import expectedFailure, skip, skipIf
from unittest.case import SkipTest, _id
from salt.ext import six
try:
import psutil
HAS_PSUTIL = True
except ImportError:
HAS_PSUTIL = False
log = logging.getLogger(__name__)
# Set SHOW_PROC to True to show
# process details when running in verbose mode
# i.e. [CPU:15.1%|MEM:48.3%|Z:0]
SHOW_PROC = "NO_SHOW_PROC" not in os.environ
LOREM_IPSUM = """\
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Quisque eget urna a arcu lacinia sagittis.
Sed scelerisque, lacus eget malesuada vestibulum, justo diam facilisis tortor, in sodales dolor
nibh eu urna. Aliquam iaculis massa risus, sed elementum risus accumsan id. Suspendisse mattis,
metus sed lacinia dictum, leo orci dapibus sapien, at porttitor sapien nulla ac velit.
Duis ac cursus leo, non varius metus. Sed laoreet felis magna, vel tempor diam malesuada nec.
Quisque cursus odio tortor. In consequat augue nisl, eget lacinia odio vestibulum eget.
Donec venenatis elementum arcu at rhoncus. Nunc pharetra erat in lacinia convallis. Ut condimentum
eu mauris sit amet convallis. Morbi vulputate vel odio non laoreet. Nullam in suscipit tellus.
Sed quis posuere urna."""
class TestSuite(_TestSuite):
def _handleClassSetUp(self, test, result):
previousClass = getattr(result, "_previousTestClass", None)
currentClass = test.__class__
if (
currentClass == previousClass
or getattr(currentClass, "setUpClass", None) is None
):
return super(TestSuite, self)._handleClassSetUp(test, result)
# Store a reference to all class attributes before running the setUpClass method
initial_class_attributes = dir(test.__class__)
super(TestSuite, self)._handleClassSetUp(test, result)
# Store the difference in in a variable in order to check later if they were deleted
test.__class__._prerun_class_attributes = [
attr for attr in dir(test.__class__) if attr not in initial_class_attributes
]
def _tearDownPreviousClass(self, test, result):
# Run any tearDownClass code defined
super(TestSuite, self)._tearDownPreviousClass(test, result)
previousClass = getattr(result, "_previousTestClass", None)
currentClass = test.__class__
if currentClass == previousClass:
return
# See if the previous class attributes have been cleaned
if previousClass and getattr(previousClass, "tearDownClass", None):
prerun_class_attributes = getattr(
previousClass, "_prerun_class_attributes", None
)
if prerun_class_attributes is not None:
previousClass._prerun_class_attributes = None
del previousClass._prerun_class_attributes
for attr in prerun_class_attributes:
if hasattr(previousClass, attr):
attr_value = getattr(previousClass, attr, None)
if attr_value is None:
continue
if isinstance(
attr_value, (bool,) + six.string_types + six.integer_types
):
setattr(previousClass, attr, None)
continue
log.warning(
"Deleting extra class attribute after test run: %s.%s(%s). "
"Please consider using 'del self.%s' on the test class "
"'tearDownClass()' method",
previousClass.__name__,
attr,
str(getattr(previousClass, attr))[:100],
attr,
)
delattr(previousClass, attr)
class TestLoader(_TestLoader):
# We're just subclassing to make sure tha tour TestSuite class is the one used
suiteClass = TestSuite
class TestCase(_TestCase):
# pylint: disable=expected-an-indented-block-comment,too-many-leading-hastag-for-block-comment
## Commented out because it may be causing tests to hang
## at the end of the run
#
# _cwd = os.getcwd()
# _chdir_counter = 0
# @classmethod
# def tearDownClass(cls):
# '''
# Overriden method for tearing down all classes in salttesting
#
# This hard-resets the environment between test classes
# '''
# # Compare where we are now compared to where we were when we began this family of tests
# if not cls._cwd == os.getcwd() and cls._chdir_counter > 0:
# os.chdir(cls._cwd)
# print('\nWARNING: A misbehaving test has modified the working directory!\nThe test suite has reset the working directory '
# 'on tearDown() to {0}\n'.format(cls._cwd))
# cls._chdir_counter += 1
# pylint: enable=expected-an-indented-block-comment,too-many-leading-hastag-for-block-comment
def run(self, result=None):
self._prerun_instance_attributes = dir(self)
self.maxDiff = None
outcome = super(TestCase, self).run(result=result)
for attr in dir(self):
if attr == "_prerun_instance_attributes":
continue
if attr in getattr(self.__class__, "_prerun_class_attributes", ()):
continue
if attr not in self._prerun_instance_attributes:
attr_value = getattr(self, attr, None)
if attr_value is None:
continue
if isinstance(
attr_value, (bool,) + six.string_types + six.integer_types
):
setattr(self, attr, None)
continue
log.warning(
"Deleting extra class attribute after test run: %s.%s(%s). "
"Please consider using 'del self.%s' on the test case "
"'tearDown()' method",
self.__class__.__name__,
attr,
getattr(self, attr),
attr,
)
delattr(self, attr)
self._prerun_instance_attributes = None
del self._prerun_instance_attributes
return outcome
def shortDescription(self):
desc = _TestCase.shortDescription(self)
if HAS_PSUTIL and SHOW_PROC:
show_zombie_processes = "SHOW_PROC_ZOMBIES" in os.environ
proc_info = "[CPU:{0}%|MEM:{1}%".format(
psutil.cpu_percent(), psutil.virtual_memory().percent
)
if show_zombie_processes:
found_zombies = 0
try:
for proc in psutil.process_iter():
if proc.status == psutil.STATUS_ZOMBIE:
found_zombies += 1
except Exception: # pylint: disable=broad-except
pass
proc_info += "|Z:{0}".format(found_zombies)
proc_info += "] {short_desc}".format(short_desc=desc if desc else "")
return proc_info
else:
return _TestCase.shortDescription(self)
def assertEquals(self, *args, **kwargs):
raise DeprecationWarning(
"The {0}() function is deprecated. Please start using {1}() "
"instead.".format("assertEquals", "assertEqual")
)
# return _TestCase.assertEquals(self, *args, **kwargs)
def assertNotEquals(self, *args, **kwargs):
raise DeprecationWarning(
"The {0}() function is deprecated. Please start using {1}() "
"instead.".format("assertNotEquals", "assertNotEqual")
)
# return _TestCase.assertNotEquals(self, *args, **kwargs)
def assert_(self, *args, **kwargs):
# The unittest2 library uses this deprecated method, we can't raise
# the exception.
raise DeprecationWarning(
"The {0}() function is deprecated. Please start using {1}() "
"instead.".format("assert_", "assertTrue")
)
# return _TestCase.assert_(self, *args, **kwargs)
def assertAlmostEquals(self, *args, **kwargs):
raise DeprecationWarning(
"The {0}() function is deprecated. Please start using {1}() "
"instead.".format("assertAlmostEquals", "assertAlmostEqual")
)
# return _TestCase.assertAlmostEquals(self, *args, **kwargs)
def assertNotAlmostEquals(self, *args, **kwargs):
raise DeprecationWarning(
"The {0}() function is deprecated. Please start using {1}() "
"instead.".format("assertNotAlmostEquals", "assertNotAlmostEqual")
)
# return _TestCase.assertNotAlmostEquals(self, *args, **kwargs)
def repack_state_returns(self, state_ret):
"""
Accepts a state return dict and returns it back with the top level key
names rewritten such that the ID declaration is the key instead of the
State's unique tag. For example: 'foo' instead of
'file_|-foo_|-/etc/foo.conf|-managed'
This makes it easier to work with state returns when crafting asserts
after running states.
"""
assert isinstance(state_ret, dict), state_ret
return {x.split("_|-")[1]: y for x, y in six.iteritems(state_ret)}
def failUnlessEqual(self, *args, **kwargs):
raise DeprecationWarning(
"The {0}() function is deprecated. Please start using {1}() "
"instead.".format("failUnlessEqual", "assertEqual")
)
# return _TestCase.failUnlessEqual(self, *args, **kwargs)
def failIfEqual(self, *args, **kwargs):
raise DeprecationWarning(
"The {0}() function is deprecated. Please start using {1}() "
"instead.".format("failIfEqual", "assertNotEqual")
)
# return _TestCase.failIfEqual(self, *args, **kwargs)
def failUnless(self, *args, **kwargs):
raise DeprecationWarning(
"The {0}() function is deprecated. Please start using {1}() "
"instead.".format("failUnless", "assertTrue")
)
# return _TestCase.failUnless(self, *args, **kwargs)
def failIf(self, *args, **kwargs):
raise DeprecationWarning(
"The {0}() function is deprecated. Please start using {1}() "
"instead.".format("failIf", "assertFalse")
)
# return _TestCase.failIf(self, *args, **kwargs)
def failUnlessRaises(self, *args, **kwargs):
raise DeprecationWarning(
"The {0}() function is deprecated. Please start using {1}() "
"instead.".format("failUnlessRaises", "assertRaises")
)
# return _TestCase.failUnlessRaises(self, *args, **kwargs)
def failUnlessAlmostEqual(self, *args, **kwargs):
raise DeprecationWarning(
"The {0}() function is deprecated. Please start using {1}() "
"instead.".format("failUnlessAlmostEqual", "assertAlmostEqual")
)
# return _TestCase.failUnlessAlmostEqual(self, *args, **kwargs)
def failIfAlmostEqual(self, *args, **kwargs):
raise DeprecationWarning(
"The {0}() function is deprecated. Please start using {1}() "
"instead.".format("failIfAlmostEqual", "assertNotAlmostEqual")
)
# return _TestCase.failIfAlmostEqual(self, *args, **kwargs)
@staticmethod
def assert_called_once(mock):
"""
mock.assert_called_once only exists in PY3 in 3.6 and newer
"""
try:
mock.assert_called_once()
except AttributeError:
log.warning("assert_called_once invoked, but not available")
if six.PY2:
def assertRegexpMatches(self, *args, **kwds):
raise DeprecationWarning(
"The {0}() function will be deprecated in python 3. Please start "
"using {1}() instead.".format("assertRegexpMatches", "assertRegex")
)
def assertRegex(
self, text, regex, msg=None
): # pylint: disable=arguments-differ
# In python 2, alias to the future python 3 function
return _TestCase.assertRegexpMatches(self, text, regex, msg=msg)
def assertNotRegexpMatches(self, *args, **kwds):
raise DeprecationWarning(
"The {0}() function will be deprecated in python 3. Please start "
"using {1}() instead.".format(
"assertNotRegexpMatches", "assertNotRegex"
)
)
def assertNotRegex(
self, text, regex, msg=None
): # pylint: disable=arguments-differ
# In python 2, alias to the future python 3 function
return _TestCase.assertNotRegexpMatches(self, text, regex, msg=msg)
def assertRaisesRegexp(self, *args, **kwds):
raise DeprecationWarning(
"The {0}() function will be deprecated in python 3. Please start "
"using {1}() instead.".format("assertRaisesRegexp", "assertRaisesRegex")
)
def assertRaisesRegex(
self, exception, regexp, *args, **kwds
): # pylint: disable=arguments-differ
# In python 2, alias to the future python 3 function
return _TestCase.assertRaisesRegexp(self, exception, regexp, *args, **kwds)
else:
def assertRegexpMatches(self, *args, **kwds):
raise DeprecationWarning(
"The {0}() function is deprecated. Please start using {1}() "
"instead.".format("assertRegexpMatches", "assertRegex")
)
def assertNotRegexpMatches(self, *args, **kwds):
raise DeprecationWarning(
"The {0}() function is deprecated. Please start using {1}() "
"instead.".format("assertNotRegexpMatches", "assertNotRegex")
)
def assertRaisesRegexp(self, *args, **kwds):
raise DeprecationWarning(
"The {0}() function is deprecated. Please start using {1}() "
"instead.".format("assertRaisesRegexp", "assertRaisesRegex")
)
class TextTestResult(_TextTestResult):
"""
Custom TestResult class whith logs the start and the end of a test
"""
def startTest(self, test):
log.debug(">>>>> START >>>>> {0}".format(test.id()))
return super(TextTestResult, self).startTest(test)
def stopTest(self, test):
log.debug("<<<<< END <<<<<<< {0}".format(test.id()))
return super(TextTestResult, self).stopTest(test)
class TextTestRunner(_TextTestRunner):
"""
Custom Text tests runner to log the start and the end of a test case
"""
resultclass = TextTestResult
__all__ = [
"TestLoader",
"TextTestRunner",
"TestCase",
"expectedFailure",
"TestSuite",
"skipIf",
"TestResult",
]
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
examples/upload-photo/main.go
|
package main
import (
"log"
"net/http"
"os"
"github.com/brunocruzsilva/goinsta"
)
func main() {
insta := goinsta.New(
os.Getenv("INSTAGRAM_USERNAME"),
os.Getenv("INSTAGRAM_PASSWORD"),
)
if err := insta.Login(); err != nil {
log.Fatal(err)
}
defer insta.Logout()
log.Println("Download random photo")
var client http.Client
request, err := http.NewRequest("GET", "https://picsum.photos/800/800", nil)
if err != nil {
log.Fatal(err)
}
resp, err := client.Do(request)
if err != nil {
log.Fatal(err)
}
defer resp.Body.Close()
postedPhoto, err := insta.UploadPhoto(resp.Body, "awesome! :)", 1, 1)
if err != nil {
log.Fatal(err)
}
log.Printf("Success upload photo %s", postedPhoto.ID)
}
|
[
"\"INSTAGRAM_USERNAME\"",
"\"INSTAGRAM_PASSWORD\""
] |
[] |
[
"INSTAGRAM_PASSWORD",
"INSTAGRAM_USERNAME"
] |
[]
|
["INSTAGRAM_PASSWORD", "INSTAGRAM_USERNAME"]
|
go
| 2 | 0 | |
py/untitled1.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 23 17:28:19 2020
@author: jrminter
"""
from mpl_toolkits.basemap import Basemap
from netCDF4 import Dataset, date2index
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
import os
os.environ['PROJ_LIB'] = '/Users/jrminter/miniconda3/share/proj'
date = datetime(2007,12,15,0) # date to plot.
# open dataset.
dataset = \
Dataset('https://www.ncdc.noaa.gov/thredds/dodsC/OISST-V2-AVHRR_agg')
timevar = dataset.variables['time']
timeindex = date2index(date,timevar) # find time index for desired date.
# read sst. Will automatically create a masked array using
# missing_value variable attribute. 'squeeze out' singleton dimensions.
"""
sst = dataset.variables['sst'][timeindex,:].squeeze()
# read ice.
ice = dataset.variables['ice'][timeindex,:].squeeze()
# read lats and lons (representing centers of grid boxes).
lats = dataset.variables['lat'][:]
lons = dataset.variables['lon'][:]
lons, lats = np.meshgrid(lons,lats)
# create figure, axes instances.
fig = plt.figure()
ax = fig.add_axes([0.05,0.05,0.9,0.9])
# create Basemap instance.
# coastlines not used, so resolution set to None to skip
# continent processing (this speeds things up a bit)
m = Basemap(projection='kav7',lon_0=0,resolution=None)
# draw line around map projection limb.
# color background of map projection region.
# missing values over land will show up this color.
m.drawmapboundary(fill_color='0.3')
# plot sst, then ice with pcolor
im1 = m.pcolormesh(lons,lats,sst,shading='flat',cmap=plt.cm.jet,latlon=True)
im2 = m.pcolormesh(lons,lats,ice,shading='flat',cmap=plt.cm.gist_gray,latlon=True)
# draw parallels and meridians, but don't bother labelling them.
m.drawparallels(np.arange(-90.,99.,30.))
m.drawmeridians(np.arange(-180.,180.,60.))
# add colorbar
cb = m.colorbar(im1,"bottom", size="5%", pad="2%")
# add a title.
ax.set_title('SST and ICE analysis for %s'%date)
plt.show()
"""
|
[] |
[] |
[
"PROJ_LIB"
] |
[]
|
["PROJ_LIB"]
|
python
| 1 | 0 | |
vendor/github.com/ulikunitz/xz/cmd/xb/cat.go
|
// Copyright 2014-2016 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
"text/template"
)
const catUsageString = `xb cat [options] <id>:<path>...
This xb command puts the contents of the files given as relative paths to
the GOPATH variable as string constants into a go file.
-h prints this message and exits
-p package name (default main)
-o file name of output
`
func catUsage(w io.Writer) {
fmt.Fprint(w, catUsageString)
}
type gopath struct {
p []string
i int
}
func newGopath() *gopath {
p := strings.Split(os.Getenv("GOPATH"), ":")
return &gopath{p: p}
}
type cpair struct {
id string
path string
}
func (p cpair) Read() (s string, err error) {
var r io.ReadCloser
if p.path == "-" {
r = os.Stdin
} else {
if r, err = os.Open(p.path); err != nil {
return
}
}
defer func() {
err = r.Close()
}()
b, err := ioutil.ReadAll(r)
if err != nil {
return
}
s = string(b)
return
}
func verifyPath(path string) error {
fi, err := os.Stat(path)
if err != nil {
return err
}
if !fi.Mode().IsRegular() {
return fmt.Errorf("%s is not a regular file", path)
}
return nil
}
func (gp *gopath) find(arg string) (p cpair, err error) {
t := strings.SplitN(arg, ":", 2)
switch len(t) {
case 0:
err = fmt.Errorf("empty argument not supported")
return
case 1:
gp.i++
p = cpair{fmt.Sprintf("gocat%d", gp.i), t[0]}
case 2:
p = cpair{t[0], t[1]}
}
if p.path == "-" {
return
}
// substitute first ~ by $HOME
p.path = strings.Replace(p.path, "~", os.Getenv("HOME"), 1)
paths := make([]string, 0, len(gp.p)+1)
if filepath.IsAbs(p.path) {
paths = append(paths, filepath.Clean(p.path))
} else {
for _, q := range gp.p {
u := filepath.Join(q, "src", p.path)
paths = append(paths, filepath.Clean(u))
}
u := filepath.Join(".", p.path)
paths = append(paths, filepath.Clean(u))
}
for _, u := range paths {
if err = verifyPath(u); err != nil {
if os.IsNotExist(err) {
continue
}
return
}
p.path = u
return
}
err = fmt.Errorf("file %s not found", p.path)
return
}
// Gofile is used with the template gofileTmpl.
type Gofile struct {
Pkg string
Cmap map[string]string
}
var gofileTmpl = `package {{.Pkg}}
{{range $k, $v := .Cmap}}const {{$k}} = ` + "`{{$v}}`\n{{end}}"
func cat() {
var err error
cmdName := filepath.Base(os.Args[0])
log.SetPrefix(fmt.Sprintf("%s: ", cmdName))
log.SetFlags(0)
flag.CommandLine = flag.NewFlagSet(cmdName, flag.ExitOnError)
flag.Usage = func() { catUsage(os.Stderr); os.Exit(1) }
help := flag.Bool("h", false, "")
pkg := flag.String("p", "main", "")
out := flag.String("o", "", "")
flag.Parse()
if *help {
catUsage(os.Stdout)
os.Exit(0)
}
if *pkg == "" {
log.Fatal("option -p must not be empty")
}
w := os.Stdout
if *out != "" {
if w, err = os.Create(*out); err != nil {
log.Fatal(err)
}
}
gp := newGopath()
gofile := Gofile{
Pkg: *pkg,
Cmap: make(map[string]string, len(flag.Args())),
}
for _, arg := range flag.Args() {
p, err := gp.find(arg)
if err != nil {
log.Print(err)
continue
}
s, err := p.Read()
if err != nil {
log.Print(err)
continue
}
gofile.Cmap[p.id] = s
}
tmpl, err := template.New("gofile").Parse(gofileTmpl)
if err != nil {
log.Panicf("goFileTmpl error %s", err)
}
if err = tmpl.Execute(w, gofile); err != nil {
log.Fatal(err)
}
os.Exit(0)
}
|
[
"\"GOPATH\"",
"\"HOME\""
] |
[] |
[
"GOPATH",
"HOME"
] |
[]
|
["GOPATH", "HOME"]
|
go
| 2 | 0 | |
awstesting/performance/logging.go
|
// +build testing
// Package performance contains shared step definitions that are used for performance testing
package performance
import (
"errors"
"fmt"
"os"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute"
)
// benchmarkLogger handles all benchmark logging
type benchmarkLogger struct {
outputer
}
// logger interface that handles any logging to an output
type logger interface {
log(key string, data map[string]interface{}) error
}
// init intializes the logger and uses dependecy injection for the
// outputer
func newBenchmarkLogger(output string) (*benchmarkLogger, error) {
b := &benchmarkLogger{}
switch output {
case "dynamodb":
region := os.Getenv("AWS_TESTING_REGION")
if region == "" {
return b, errors.New("No region specified. Please export AWS_TESTING_REGION")
}
table := os.Getenv("AWS_TESTING_DB_TABLE")
if table == "" {
return b, errors.New("No table specified. Please export AWS_TESTING_DB_TABLE")
}
b.outputer = newDynamodbOut(table, region)
case "stdout":
b.outputer = stdout{}
default:
return b, errors.New("Unsupported outputer")
}
return b, nil
}
type record struct {
Key string
Data interface{}
}
// log calls the output command and building a data structure
// to pass into its output formatter
func (b benchmarkLogger) log(key, data interface{}) error {
formatData := record{
Key: fmt.Sprintf("%d-%v", time.Now().Unix(), key.(string)),
Data: data,
}
return b.output(formatData)
}
// outputer is a simple interface that'll handle output
// to whatever system like dynamodb or stdout
type outputer interface {
output(record) error
}
// dyanmodbOut handles simple writes to dynamodb
type dynamodbOut struct {
table string // table to write to in dynamodb
region string
db *dynamodb.DynamoDB // the dynamodb session
}
// init initializes dynamodbOut to have a new session
func newDynamodbOut(table, region string) *dynamodbOut {
out := dynamodbOut{
table: table,
region: region,
}
out.db = dynamodb.New(
session.New(),
&aws.Config{Region: &out.region},
)
return &out
}
// output just writes to dynamodb
func (out dynamodbOut) output(data record) error {
input := &dynamodb.PutItemInput{
TableName: aws.String(out.table),
}
item, err := dynamodbattribute.ConvertToMap(data)
if err != nil {
return err
}
input.Item = item
_, err = out.db.PutItem(input)
return err
}
// stdout handles writes to stdout
type stdout struct{}
// output expects key value data to print to stdout
func (out stdout) output(data record) error {
item, err := dynamodbattribute.ConvertToMap(data.Data)
if err != nil {
return err
}
fmt.Println(item)
return nil
}
|
[
"\"AWS_TESTING_REGION\"",
"\"AWS_TESTING_DB_TABLE\""
] |
[] |
[
"AWS_TESTING_REGION",
"AWS_TESTING_DB_TABLE"
] |
[]
|
["AWS_TESTING_REGION", "AWS_TESTING_DB_TABLE"]
|
go
| 2 | 0 | |
helpers/database/conf.go
|
package database
import (
"context"
"database/sql"
"fmt"
"net/http"
"os"
"github.com/GoogleCloudPlatform/cloudsql-proxy/proxy/dialers/mysql"
"github.com/GoogleCloudPlatform/cloudsql-proxy/proxy/proxy"
goauth "golang.org/x/oauth2/google"
)
var (
DB *sql.DB
Driver = "mysql"
)
func Init() error {
var err error
if DB == nil {
if os.Getenv("DATABASE_INSTANCE") == "" {
DB, err = sql.Open(Driver, connectionString())
} else {
client, err := clientFromCredentials()
if err != nil {
return err
}
proxy.Init(client, nil, nil)
cfg := mysql.Cfg(os.Getenv("DATABASE_INSTANCE"), os.Getenv("DATABASE_USERNAME"), os.Getenv("DATABASE_PASSWORD"))
cfg.DBName = os.Getenv("CURT_DEV_NAME")
cfg.ParseTime = true
DB, err = mysql.DialCfg(cfg)
}
if err != nil {
return err
}
}
return nil
}
func connectionString() string {
if addr := os.Getenv("DATABASE_HOST"); addr != "" {
proto := os.Getenv("DATABASE_PROTOCOL")
user := os.Getenv("DATABASE_USERNAME")
pass := os.Getenv("DATABASE_PASSWORD")
databaseName := os.Getenv("CURT_DEV_NAME")
if databaseName == "" {
databaseName = "CurtData"
}
return fmt.Sprintf("%s:%s@%s(%s)/%s?parseTime=true&loc=%s", user, pass, proto, addr, databaseName, "America%2FChicago")
}
return "root:@tcp(127.0.0.1:3306)/CurtData?parseTime=true&loc=America%2FChicago"
}
func clientFromCredentials() (*http.Client, error) {
const SQLScope = "https://www.googleapis.com/auth/sqlservice.admin"
ctx := context.Background()
var client *http.Client
cfg, err := goauth.JWTConfigFromJSON([]byte(os.Getenv("DATABASE_TOKEN")), SQLScope)
if err != nil {
return nil, fmt.Errorf("invalid json file: %v", err)
}
client = cfg.Client(ctx)
return client, nil
}
|
[
"\"DATABASE_INSTANCE\"",
"\"DATABASE_INSTANCE\"",
"\"DATABASE_USERNAME\"",
"\"DATABASE_PASSWORD\"",
"\"CURT_DEV_NAME\"",
"\"DATABASE_HOST\"",
"\"DATABASE_PROTOCOL\"",
"\"DATABASE_USERNAME\"",
"\"DATABASE_PASSWORD\"",
"\"CURT_DEV_NAME\"",
"\"DATABASE_TOKEN\""
] |
[] |
[
"DATABASE_PROTOCOL",
"DATABASE_TOKEN",
"DATABASE_PASSWORD",
"DATABASE_HOST",
"CURT_DEV_NAME",
"DATABASE_USERNAME",
"DATABASE_INSTANCE"
] |
[]
|
["DATABASE_PROTOCOL", "DATABASE_TOKEN", "DATABASE_PASSWORD", "DATABASE_HOST", "CURT_DEV_NAME", "DATABASE_USERNAME", "DATABASE_INSTANCE"]
|
go
| 7 | 0 | |
lib/grimoirelab/github.go
|
package grimoirelab
import (
"encoding/json"
"fmt"
"io"
"net/url"
"os"
"regexp"
"github.com/philips-labs/tabia/lib/github"
)
// GithubMetadataFactory allows to provide a custom generated metadata
type GithubMetadataFactory func(repo github.Repository) Metadata
// GithubProjectMatcher matches a repository with a project
type GithubProjectMatcher struct {
Rules map[string]GithubProjectMatcherRule `json:"rules,omitempty"`
}
// GithubProjectMatcherRule rule that matches a repository to a project
type GithubProjectMatcherRule struct {
URL *Regexp `json:"url,omitempty"`
}
// Regexp embeds a regexp.Regexp, and adds Text/JSON
// (un)marshaling.
type Regexp struct {
regexp.Regexp
}
// Compile wraps the result of the standard library's
// regexp.Compile, for easy (un)marshaling.
func Compile(expr string) (*Regexp, error) {
r, err := regexp.Compile(expr)
if err != nil {
return nil, err
}
return &Regexp{*r}, nil
}
// MustCompile wraps the result of the standard library's
// regexp.Compile, for easy (un)marshaling.
func MustCompile(expr string) *Regexp {
r := regexp.MustCompile(expr)
return &Regexp{*r}
}
// UnmarshalText satisfies the encoding.TextMarshaler interface,
// also used by json.Unmarshal.
func (r *Regexp) UnmarshalText(b []byte) error {
rr, err := Compile(string(b))
if err != nil {
return err
}
*r = *rr
return nil
}
// MarshalText satisfies the encoding.TextMarshaler interface,
// also used by json.Marshal.
func (r *Regexp) MarshalText() ([]byte, error) {
return []byte(r.String()), nil
}
// NewGithubProjectMatcherFromJSON initializes GithubProjectMatcher from json
func NewGithubProjectMatcherFromJSON(data io.Reader) (*GithubProjectMatcher, error) {
var m GithubProjectMatcher
err := json.NewDecoder(data).Decode(&m)
if err != nil {
return nil, err
}
return &m, err
}
// ConvertGithubToProjectsJSON converts the repositories into grimoirelab projects.json
func ConvertGithubToProjectsJSON(repos []github.Repository, metadataFactory GithubMetadataFactory, projectMatcher *GithubProjectMatcher) Projects {
results := make(Projects)
bbUser := os.Getenv("TABIA_GITHUB_USER")
bbToken := os.Getenv("TABIA_GITHUB_TOKEN")
basicAuth := fmt.Sprintf("%s:%s", bbUser, bbToken)
for _, repo := range repos {
projectName := getProjectName(repo, projectMatcher)
project, found := results[projectName]
if !found {
results[projectName] = &Project{}
project = results[projectName]
project.Git = make([]string, 0)
}
updateFromGithubProject(project, repo, basicAuth, metadataFactory)
}
return results
}
func getProjectName(repo github.Repository, projectMatcher *GithubProjectMatcher) string {
if projectMatcher != nil {
for k, v := range projectMatcher.Rules {
if v.URL != nil && v.URL.MatchString(repo.URL) {
return k
}
}
}
// fallback to github organization name
return repo.Owner
}
func updateFromGithubProject(project *Project, repo github.Repository, basicAuth string, metadataFactory GithubMetadataFactory) {
project.Metadata = metadataFactory(repo)
link := repo.URL
if link != "" {
if repo.Visibility != github.Public {
u, _ := url.Parse(link)
link = fmt.Sprintf("%s://%s@%s%s", u.Scheme, basicAuth, u.Hostname(), u.EscapedPath())
}
project.Git = append(project.Git, link+".git")
project.Github = append(project.Github, link)
project.GithubRepo = append(project.GithubRepo, link)
}
}
|
[
"\"TABIA_GITHUB_USER\"",
"\"TABIA_GITHUB_TOKEN\""
] |
[] |
[
"TABIA_GITHUB_TOKEN",
"TABIA_GITHUB_USER"
] |
[]
|
["TABIA_GITHUB_TOKEN", "TABIA_GITHUB_USER"]
|
go
| 2 | 0 | |
cmd/analyseError_test.go
|
package cmd
import (
"bytes"
"errors"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"testing"
"github.com/damianoneill/nc-hammer/result"
"github.com/spf13/cobra"
"github.com/stretchr/testify/assert"
)
var myCmd = &cobra.Command{}
type fn func(*cobra.Command, []string)
// helper function to redirect to stdout for xxCmdRun
func CaptureStdout(runFunction fn, command *cobra.Command, args []string) (string, string) {
var buff bytes.Buffer
log.SetFlags(log.Flags() &^ (log.Ldate | log.Ltime))
log.SetOutput(&buff)
//reading from stdout
rescueStdout := os.Stdout
r, w, _ := os.Pipe()
os.Stdout = w
runFunction(command, args)
w.Close()
out, _ := ioutil.ReadAll(r)
os.Stdout = rescueStdout
st := strings.Join(strings.Fields(string(out)), " ") // stdout captured, spaces trimmed
buf := strings.TrimSpace(buff.String()) // logs captured
return st, buf
}
func Test_AnalyseErrorCmdArgs(t *testing.T) {
t.Run("test that a directory is not passed to the command", func(t *testing.T) {
args := []string{}
analyseErrorCmd.Args(myCmd, args)
assert.Equal(t, analyseErrorCmd.Args(myCmd, args), errors.New("error command requires a test results directory as an argument"), "failed")
})
t.Run("test that a directory is passed to the command", func(t *testing.T) {
args := []string{"../results/2018-07-18-19-56-01/"}
analyseErrorCmd.Args(myCmd, args)
assert.Equal(t, analyseErrorCmd.Args(myCmd, args), nil, "failed")
})
}
func Test_AnalyseErrorCmdRun(t *testing.T) {
t.Run("test that a wrong path is passed as arg", func(t *testing.T) {
// pathArgs used to give an error
if os.Getenv("BE_CRASHER") == "1" {
log.SetFlags(log.Flags() &^ (log.Ldate | log.Ltime))
pathArgs := []string{"error/2018-07-18-19-56-01/"}
analyseErrorCmd.Run(myCmd, pathArgs)
return
}
// Start the actual test in a different subprocess
cmd := exec.Command(os.Args[0], "-test.run=Test_AnalyseErrorCmdRun")
cmd.Env = append(os.Environ(), "BE_CRASHER=1")
stdout, _ := cmd.StderrPipe()
if err := cmd.Start(); err != nil {
t.Fatal(err)
}
// Check that the log fatal message is what we expected
gotBytes, _ := ioutil.ReadAll(stdout)
got := string(gotBytes)
_, _, errReturned := result.UnarchiveResults("error/2018-07-18-19-56-01/") // get err that triggered fatalf
expected := "Problem with loading result information: " + errReturned.Error() + " "
if !strings.HasSuffix(got[:len(got)-1], expected) {
t.Fatalf("Unexpected log message. Got '%s' but should contain '%s'", got[:len(got)-1], expected)
}
// Check that the program exited
err := cmd.Wait()
if e, ok := err.(*exec.ExitError); !ok || e.Success() {
t.Fatalf("Process ran with err %v, want exit status 1", err)
}
})
t.Run("test that a correct path is passed as arg", func(t *testing.T) {
pathArgs := []string{"../suite/testdata/results_test/2018-07-18-19-56-01/"}
_, _, err := result.UnarchiveResults(pathArgs[0])
CaptureStdout(analyseErrorCmd.Run, myCmd, pathArgs)
assert.Nil(t, err)
})
}
func Test_analyseErrors(t *testing.T) {
expectedResults := [][]string{
{"172.26.138.91", "kill-session", "kill-session is not a supported operation"},
{"172.26.138.92", "delete-config", "delete-config is not a supported operation"},
{"172.26.138.93", "kill-session", "kill-session is not a supported operation"},
{"172.26.138.94", "delete-config", "delete-config is not a supported operation"},
}
results, ts, err := result.UnarchiveResults("../suite/testdata/results_test/2018-07-18-19-56-01/")
if err != nil {
t.Error(err)
}
var errors [][]string
for i := range results {
if results[i].Err != "" {
errors = append(errors, []string{results[i].Hostname, results[i].Operation, results[i].Err})
}
}
var buff bytes.Buffer
log.SetFlags(log.Flags() &^ (log.Ldate | log.Ltime))
log.SetOutput(&buff)
//reading from stdout
rescueStdout := os.Stdout
r, w, _ := os.Pipe()
os.Stdout = w
analyseErrors(myCmd, ts, results)
w.Close()
out, _ := ioutil.ReadAll(r)
os.Stdout = rescueStdout
have := strings.Join(strings.Fields(string(out)), " ") // stdout captured and trim spaces
assert.Contains(t, have, "HOSTNAME OPERATION MESSAGE ID ERROR")
for _, expectedError := range expectedResults {
errorsTostring := strings.Join(expectedError, " ")
assert.Contains(t, have, errorsTostring) // check errors are printed to stdout
}
got := strings.TrimSpace(buff.String())
errLen := strconv.Itoa(len(errors))
want := strings.TrimSpace("Testsuite executed at " + strings.Split(ts.File, string(filepath.Separator))[1] +
"\n" + "Total Number of Errors for suite: " + errLen)
if got != want {
t.Errorf("wanted, '%s', but got '%s'", want, got)
}
}
|
[
"\"BE_CRASHER\""
] |
[] |
[
"BE_CRASHER"
] |
[]
|
["BE_CRASHER"]
|
go
| 1 | 0 | |
libbeat/tests/system/beat/beat.py
|
import subprocess
import jinja2
import unittest
import os
import shutil
import json
import signal
import sys
import time
import yaml
from datetime import datetime, timedelta
from .compose import ComposeMixin
BEAT_REQUIRED_FIELDS = ["@timestamp",
"beat.name", "beat.hostname", "beat.version"]
INTEGRATION_TESTS = os.environ.get('INTEGRATION_TESTS', False)
class TimeoutError(Exception):
pass
class Proc(object):
"""
Slim wrapper on subprocess.Popen that redirects
both stdout and stderr to a file on disk and makes
sure to stop the process and close the output file when
the object gets collected.
"""
def __init__(self, args, outputfile):
self.args = args
self.output = open(outputfile, "ab")
self.stdin_read, self.stdin_write = os.pipe()
def start(self):
if sys.platform.startswith("win"):
self.proc = subprocess.Popen(
self.args,
stdin=self.stdin_read,
stdout=self.output,
stderr=subprocess.STDOUT,
bufsize=0,
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
else:
self.proc = subprocess.Popen(
self.args,
stdin=self.stdin_read,
stdout=self.output,
stderr=subprocess.STDOUT,
bufsize=0,
)
return self.proc
def kill(self):
if sys.platform.startswith("win"):
# proc.terminate on Windows does not initiate a graceful shutdown
# through the processes signal handlers it just kills it hard. So
# this sends a SIGBREAK. You cannot sends a SIGINT (CTRL_C_EVENT)
# to a process group in Windows, otherwise Ctrl+C would be
# sent.
self.proc.send_signal(signal.CTRL_BREAK_EVENT)
else:
self.proc.terminate()
def wait(self):
try:
return self.proc.wait()
finally:
self.output.close()
def check_wait(self, exit_code=0):
actual_exit_code = self.wait()
assert actual_exit_code == exit_code, "Expected exit code to be %d, but it was %d" % (
exit_code, actual_exit_code)
return actual_exit_code
def kill_and_wait(self):
self.kill()
os.close(self.stdin_write)
return self.wait()
def check_kill_and_wait(self, exit_code=0):
self.kill()
os.close(self.stdin_write)
return self.check_wait(exit_code=exit_code)
def __del__(self):
# Ensure the process is stopped.
try:
self.proc.terminate()
self.proc.kill()
except:
pass
# Ensure the output is closed.
try:
self.output.close()
except:
pass
class TestCase(unittest.TestCase, ComposeMixin):
@classmethod
def setUpClass(self):
# Path to test binary
if not hasattr(self, 'beat_name'):
self.beat_name = "beat"
if not hasattr(self, 'beat_path'):
self.beat_path = "."
# Path to test binary
if not hasattr(self, 'test_binary'):
self.test_binary = os.path.abspath(self.beat_path + "/" + self.beat_name + ".test")
# Create build path
build_dir = self.beat_path + "/build"
self.build_path = build_dir + "/system-tests/"
# Start the containers needed to run these tests
self.compose_up()
@classmethod
def tearDownClass(self):
self.compose_down()
def run_beat(self,
cmd=None,
config=None,
output=None,
logging_args=["-e", "-v", "-d", "*"],
extra_args=[],
exit_code=None):
"""
Executes beat.
Waits for the process to finish before returning to
the caller.
"""
proc = self.start_beat(cmd=cmd, config=config, output=output,
logging_args=logging_args,
extra_args=extra_args)
if exit_code != None:
return proc.check_wait(exit_code)
return proc.wait()
def start_beat(self,
cmd=None,
config=None,
output=None,
logging_args=["-e", "-v", "-d", "*"],
extra_args=[]):
"""
Starts beat and returns the process handle. The
caller is responsible for stopping / waiting for the
Proc instance.
"""
# Init defaults
if cmd is None:
cmd = self.test_binary
if config is None:
config = self.beat_name + ".yml"
if output is None:
output = self.beat_name + ".log"
args = [cmd,
"-systemTest",
"-test.coverprofile",
os.path.join(self.working_dir, "coverage.cov"),
"-path.home", os.path.normpath(self.working_dir),
"-c", os.path.join(self.working_dir, config),
]
if logging_args:
args.extend(logging_args)
if extra_args:
args.extend(extra_args)
proc = Proc(args, os.path.join(self.working_dir, output))
proc.start()
return proc
def render_config_template(self, template_name=None,
output=None, **kargs):
# Init defaults
if template_name is None:
template_name = self.beat_name
template_path = os.path.join("./tests/system/config", template_name + ".yml.j2")
if output is None:
output = self.beat_name + ".yml"
template = self.template_env.get_template(template_path)
kargs["beat"] = self
output_str = template.render(**kargs)
output_path = os.path.join(self.working_dir, output)
with open(output_path, "wb") as f:
os.chmod(output_path, 0600)
f.write(output_str.encode('utf8'))
# Returns output as JSON object with flattened fields (. notation)
def read_output(self,
output_file=None,
required_fields=None):
# Init defaults
if output_file is None:
output_file = "output/" + self.beat_name
jsons = []
with open(os.path.join(self.working_dir, output_file), "r") as f:
for line in f:
if len(line) == 0 or line[len(line) - 1] != "\n":
# hit EOF
break
try:
jsons.append(self.flatten_object(json.loads(
line, object_pairs_hook=self.json_raise_on_duplicates), []))
except:
print("Fail to load the json {}".format(line))
raise
self.all_have_fields(jsons, required_fields or BEAT_REQUIRED_FIELDS)
return jsons
# Returns output as JSON object
def read_output_json(self, output_file=None):
# Init defaults
if output_file is None:
output_file = "output/" + self.beat_name
jsons = []
with open(os.path.join(self.working_dir, output_file), "r") as f:
for line in f:
if len(line) == 0 or line[len(line) - 1] != "\n":
# hit EOF
break
event = json.loads(line, object_pairs_hook=self.json_raise_on_duplicates)
del event['@metadata']
jsons.append(event)
return jsons
def json_raise_on_duplicates(self, ordered_pairs):
"""Reject duplicate keys. To be used as a custom hook in JSON unmarshaling
to error out in case of any duplicates in the keys."""
d = {}
for k, v in ordered_pairs:
if k in d:
raise ValueError("duplicate key: %r" % (k,))
else:
d[k] = v
return d
def copy_files(self, files, source_dir="files/"):
for file_ in files:
shutil.copy(os.path.join(source_dir, file_),
self.working_dir)
def setUp(self):
self.template_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(self.beat_path)
)
# create working dir
self.working_dir = os.path.abspath(os.path.join(
self.build_path + "run", self.id()))
if os.path.exists(self.working_dir):
shutil.rmtree(self.working_dir)
os.makedirs(self.working_dir)
fields_yml = os.path.join(self.beat_path, "fields.yml")
# Only add it if it exists
if os.path.isfile(fields_yml):
shutil.copyfile(fields_yml, os.path.join(self.working_dir, "fields.yml"))
try:
# update the last_run link
if os.path.islink(self.build_path + "last_run"):
os.unlink(self.build_path + "last_run")
os.symlink(self.build_path + "run/{}".format(self.id()),
self.build_path + "last_run")
except:
# symlink is best effort and can fail when
# running tests in parallel
pass
def wait_until(self, cond, max_timeout=10, poll_interval=0.1, name="cond"):
"""
Waits until the cond function returns true,
or until the max_timeout is reached. Calls the cond
function every poll_interval seconds.
If the max_timeout is reached before cond() returns
true, an exception is raised.
"""
start = datetime.now()
while not cond():
if datetime.now() - start > timedelta(seconds=max_timeout):
raise TimeoutError("Timeout waiting for '{}' to be true. ".format(name) +
"Waited {} seconds.".format(max_timeout))
time.sleep(poll_interval)
def get_log(self, logfile=None):
"""
Returns the log as a string.
"""
if logfile is None:
logfile = self.beat_name + ".log"
with open(os.path.join(self.working_dir, logfile), 'r') as f:
data = f.read()
return data
def wait_log_contains(self, msg, logfile=None,
max_timeout=10, poll_interval=0.1,
name="log_contains"):
self.wait_until(
cond=lambda: self.log_contains(msg, logfile),
max_timeout=max_timeout,
poll_interval=poll_interval,
name=name)
def log_contains(self, msg, logfile=None):
"""
Returns true if the give logfile contains the given message.
Note that the msg must be present in a single line.
"""
return self.log_contains_count(msg, logfile) > 0
def log_contains_count(self, msg, logfile=None):
"""
Returns the number of appearances of the given string in the log file
"""
counter = 0
# Init defaults
if logfile is None:
logfile = self.beat_name + ".log"
try:
with open(os.path.join(self.working_dir, logfile), "r") as f:
for line in f:
if line.find(msg) >= 0:
counter = counter + 1
except IOError:
counter = -1
return counter
def output_lines(self, output_file=None):
""" Count number of lines in a file."""
if output_file is None:
output_file = "output/" + self.beat_name
try:
with open(os.path.join(self.working_dir, output_file), "r") as f:
return sum([1 for line in f])
except IOError:
return 0
def output_has(self, lines, output_file=None):
"""
Returns true if the output has a given number of lines.
"""
# Init defaults
if output_file is None:
output_file = "output/" + self.beat_name
try:
with open(os.path.join(self.working_dir, output_file), "r") as f:
return len([1 for line in f]) == lines
except IOError:
return False
def output_has_message(self, message, output_file=None):
"""
Returns true if the output has the given message field.
"""
try:
return any(line for line in self.read_output(output_file=output_file, required_fields=["message"])
if line.get("message") == message)
except (IOError, TypeError):
return False
def all_have_fields(self, objs, fields):
"""
Checks that the given list of output objects have
all the given fields.
Raises Exception if not true.
"""
for field in fields:
if not all([field in o for o in objs]):
raise Exception("Not all objects have a '{}' field"
.format(field))
def all_have_only_fields(self, objs, fields):
"""
Checks if the given list of output objects have all
and only the given fields.
Raises Exception if not true.
"""
self.all_have_fields(objs, fields)
self.all_fields_are_expected(objs, fields)
def all_fields_are_expected(self, objs, expected_fields,
dict_fields=[]):
"""
Checks that all fields in the objects are from the
given list of expected fields.
"""
for o in objs:
for key in o.keys():
known = key in dict_fields or key in expected_fields
ismeta = key.startswith('@metadata.')
if not(known or ismeta):
raise Exception("Unexpected key '{}' found"
.format(key))
def load_fields(self, fields_doc=None):
"""
Returns a list of fields to expect in the output dictionaries
and a second list that contains the fields that have a
dictionary type.
Reads these lists from the fields documentation.
"""
if fields_doc is None:
fields_doc = self.beat_path + "/_meta/fields.generated.yml"
def extract_fields(doc_list, name):
fields = []
dictfields = []
if doc_list is None:
return fields, dictfields
for field in doc_list:
# Skip fields without name entry
if "name" not in field:
continue
# Chain together names
if name != "":
newName = name + "." + field["name"]
else:
newName = field["name"]
if field.get("type") == "group":
subfields, subdictfields = extract_fields(field["fields"], newName)
fields.extend(subfields)
dictfields.extend(subdictfields)
else:
fields.append(newName)
if field.get("type") in ["object", "geo_point"]:
dictfields.append(newName)
return fields, dictfields
# Not all beats have a fields.generated.yml. Fall back to fields.yml
if not os.path.isfile(fields_doc):
fields_doc = self.beat_path + "/_meta/fields.yml"
# TODO: Make fields_doc path more generic to work with beat-generator
with open(fields_doc, "r") as f:
path = os.path.abspath(os.path.dirname(__file__) + "../../../../_meta/fields.generated.yml")
if not os.path.isfile(path):
path = os.path.abspath(os.path.dirname(__file__) + "../../../../_meta/fields.common.yml")
with open(path) as f2:
content = f2.read()
#content = "fields:\n"
content += f.read()
doc = yaml.load(content)
fields = []
dictfields = []
for item in doc:
subfields, subdictfields = extract_fields(item["fields"], "")
fields.extend(subfields)
dictfields.extend(subdictfields)
return fields, dictfields
def flatten_object(self, obj, dict_fields, prefix=""):
result = {}
for key, value in obj.items():
if isinstance(value, dict) and prefix + key not in dict_fields:
new_prefix = prefix + key + "."
result.update(self.flatten_object(value, dict_fields,
new_prefix))
else:
result[prefix + key] = value
return result
def copy_files(self, files, source_dir="files/", target_dir=""):
if target_dir:
target_dir = os.path.join(self.working_dir, target_dir)
else:
target_dir = self.working_dir
for file_ in files:
shutil.copy(os.path.join(source_dir, file_),
target_dir)
def output_count(self, pred, output_file=None):
"""
Returns true if the output line count predicate returns true
"""
# Init defaults
if output_file is None:
output_file = "output/" + self.beat_name
try:
with open(os.path.join(self.working_dir, output_file), "r") as f:
return pred(len([1 for line in f]))
except IOError:
return False
def get_elasticsearch_url(self):
"""
Returns an elasticsearch.Elasticsearch instance built from the
env variables like the integration tests.
"""
return "http://{host}:{port}".format(
host=os.getenv("ES_HOST", "localhost"),
port=os.getenv("ES_PORT", "9200"),
)
def get_kibana_url(self):
"""
Returns kibana host URL
"""
return "http://{host}:{port}".format(
host=os.getenv("KIBANA_HOST", "localhost"),
port=os.getenv("KIBANA_PORT", "5601"),
)
|
[] |
[] |
[
"ES_HOST",
"ES_PORT",
"INTEGRATION_TESTS",
"KIBANA_HOST",
"KIBANA_PORT"
] |
[]
|
["ES_HOST", "ES_PORT", "INTEGRATION_TESTS", "KIBANA_HOST", "KIBANA_PORT"]
|
python
| 5 | 0 | |
WebODM-master/worker/celery.py
|
from celery import Celery
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'webodm.settings')
app = Celery('tasks')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.conf.beat_schedule = {
'update-nodes-info': {
'task': 'worker.tasks.update_nodes_info',
'schedule': 30,
'options': {
'expires': 14,
'retry': False
}
},
'cleanup-projects': {
'task': 'worker.tasks.cleanup_projects',
'schedule': 60,
'options': {
'expires': 29,
'retry': False
}
},
'cleanup-tmp-directory': {
'task': 'worker.tasks.cleanup_tmp_directory',
'schedule': 3600,
'options': {
'expires': 1799,
'retry': False
}
},
'process-pending-tasks': {
'task': 'worker.tasks.process_pending_tasks',
'schedule': 5,
'options': {
'expires': 2,
'retry': False
}
},
}
if __name__ == '__main__':
app.start()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
cli/helpers.go
|
/*
* Copyright (c) 2021 Michael Morris. All Rights Reserved.
*
* Licensed under the MIT license (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* https://github.com/mmmorris1975/aws-runas/blob/master/LICENSE
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License
* for the specific language governing permissions and limitations under the License.
*/
package cli
import (
"context"
"fmt"
"github.com/aws/aws-sdk-go-v2/aws/arn"
"github.com/aws/aws-sdk-go-v2/service/sts"
"github.com/aws/smithy-go/logging"
"github.com/dustin/go-humanize"
"github.com/mmmorris1975/aws-runas/client"
"github.com/mmmorris1975/aws-runas/client/external"
"github.com/mmmorris1975/aws-runas/config"
"github.com/mmmorris1975/aws-runas/credentials"
"github.com/mmmorris1975/aws-runas/identity"
"github.com/urfave/cli/v2"
"os"
"os/signal"
"sort"
"syscall"
"time"
)
// if the 1st command line arg doesn't exist, check env vars for profile name
// if a profile env var is found, unset them to avoid messing with the AWS Session setup
// and expose the profile name as a new env var (AWSRUNAS_PROFILE)
//
// returns the name of the profile discovered via the command line or env vars, and the
// resolved AwsConfig object for the discovered profile (or source profile, if requested).
// Error will be returned for a failure of configuration resolution.
func resolveConfig(ctx *cli.Context, expectedArgs int) (string, *config.AwsConfig, error) {
profile := checkProfileArgs(ctx, expectedArgs)
// profile might possibly be omitted from the command line as well, in which case, we'll check the
// environment for the standard AWS env vars for profile values
if len(profile) < 1 {
profile = checkProfileEnv()
}
cfg, err := configResolver.Config(profile)
if err != nil {
return profile, nil, err
}
if len(cfg.MfaType) < 1 {
cfg.MfaType = external.MfaTypeAuto
}
// return config for source profile, if any
if ctx.Bool(sessionFlag.Name) && cfg.SourceProfile() != nil {
jr := cfg.JumpRoleArn
cfg = cfg.SourceProfile()
// use SAML/OIDC jump role setting as the session credentials, if found
if len(jr) > 0 {
cfg.RoleArn = jr
cfg.JumpRoleArn = "" // unset jump role so we get the unwrapped SAML/OIDC client
}
}
if len(profile) > 0 && !arn.IsARN(profile) {
_ = os.Setenv("AWSRUNAS_PROFILE", profile)
}
cfg.MergeIn(cmdlineCfg) // I think this is a good idea??
return profile, cfg, nil
}
func checkProfileArgs(ctx *cli.Context, expectedArgs int) string {
// if we got here via a top-level flag, ctx.Args() could be empty, must check 1 level up via
// ctx.Lineage() for the value
var profile string
if ctx.NArg() >= expectedArgs {
profile = ctx.Args().First()
} else if ctx.NArg() == 0 && len(ctx.Lineage()) > 2 {
next := ctx.Lineage()[1]
if next.NArg() >= expectedArgs {
profile = next.Args().First()
// the 1st arg of the parent context matches our current command name. It's entirely
// possible that someone names a profile the same as the subcommand name, but we'll go
// on the assumption that what really happened is that the profile is coming in via an
// environment variable, and we should return and allow the env var to be used
if profile == ctx.Command.Name {
return ""
}
}
}
return profile
}
// Check for AWS profile env vars if nothing was found on the command line. This must be done because we
// need to know the source profile setting if any of these env vars specify a profile which uses a role.
func checkProfileEnv() string {
profile := os.Getenv("AWS_PROFILE")
if len(profile) < 1 {
profile = os.Getenv("AWS_DEFAULT_PROFILE")
}
// explicitly unset AWS profile env vars so they don't get in the way of AWS Session setup
_ = os.Unsetenv("AWS_PROFILE")
_ = os.Unsetenv("AWS_DEFAULT_PROFILE")
return profile
}
// configure signal handler to make runas ignore (pass through) the below signals.
// used by SSM shell, and 'wrapped' commands to pass signals to the called commands.
// code calling this function should configure a defer function to reset the signal handling, if desired.
func installSignalHandler() chan os.Signal {
sigCh := make(chan os.Signal, 3)
signal.Notify(sigCh, os.Interrupt, syscall.SIGQUIT)
go func() {
for {
sig := <-sigCh
log.Debugf("Got signal: %s", sig.String())
}
}()
return sigCh
}
// we're only clearing the cached AWS STS credentials and any Web Identity Token cache for the profile. Things like
// external IdP session state will not be cleaned up via this process. (Nor do I believe they should be). There is
// a non-zero chance when dealing with external IdP clients that you may need to re-authenticate if your IdP session
// expired. I believe it's more likely to happen with Web Identity clients as fetching identity information is part
// of the client setup.
//
// for things where we don't deal with sts credentials (-l, -r, -u, -D, password sub command), or could possibly
// deal with a lot of them (ec2 and ecs metadata services), this wouldn't make sense to use.
func refreshCreds(c client.AwsClient) {
if err := c.ClearCache(); err != nil {
log.Warningf("failed to clear cache: %v", err)
}
}
func printCredExpiration(creds *credentials.Credentials) {
var msg string
exp := creds.Expiration
if exp.IsZero() {
// honestly, this should _never_ happen, since it goes against the entire reason for this program
msg = "credentials will not expire"
} else {
format := exp.Format("2006-01-02 15:04:05")
hmn := humanize.Time(exp)
tense := "will expire"
if exp.Before(time.Now()) {
// will probably never see this either, since expired creds would likely be refreshed before we get here
tense = "expired"
}
msg = fmt.Sprintf("Credentials %s on %s (%s)", tense, format, hmn)
}
_, _ = fmt.Fprintln(os.Stderr, msg)
}
func printCredIdentity(api identity.StsApi) error {
id, err := api.GetCallerIdentity(context.Background(), new(sts.GetCallerIdentityInput))
if err != nil {
return err
}
idMap := struct {
UserId string
Arn string
Account string
}{
UserId: *id.UserId,
Arn: *id.Arn,
Account: *id.Account,
}
log.Infof("%+v", idMap)
return nil
}
func bashCompleteProfile(ctx *cli.Context) {
if ctx.NArg() > 0 {
return
}
p, err := config.DefaultIniLoader.Profiles()
if err != nil {
log.Debugf("completion error: %v", err)
return
}
var i int
vals := make([]string, len(p))
for k := range p {
vals[i] = k
i++
}
sort.Strings(vals)
for _, v := range vals {
fmt.Println(v)
}
}
var logFunc logging.LoggerFunc = func(c logging.Classification, fmt string, v ...interface{}) {
if log != nil {
switch c {
case logging.Warn:
log.Warningf(fmt, v...)
case logging.Debug:
log.Debugf(fmt, v...)
default:
log.Infof(fmt, v...)
}
}
}
|
[
"\"AWS_PROFILE\"",
"\"AWS_DEFAULT_PROFILE\""
] |
[] |
[
"AWS_DEFAULT_PROFILE",
"AWS_PROFILE"
] |
[]
|
["AWS_DEFAULT_PROFILE", "AWS_PROFILE"]
|
go
| 2 | 0 | |
build/tools/roomservice.py
|
#!/usr/bin/env python
# Copyright (C) 2012-2013, The CyanogenMod Project
# Copyright (C) 2012-2015, SlimRoms Project
# Copyright (C) 2016-2017, AOSiP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import base64
import json
import netrc
import os
import sys
from xml.etree import ElementTree
try:
# For python3
import urllib.error
import urllib.parse
import urllib.request
except ImportError:
# For python2
import imp
import urllib2
import urlparse
urllib = imp.new_module('urllib')
urllib.error = urllib2
urllib.parse = urlparse
urllib.request = urllib2
DEBUG = False
default_manifest = ".repo/manifest.xml"
custom_local_manifest = ".repo/local_manifests/dot_manifest.xml"
custom_default_revision = "dot11"
custom_dependencies = "dot.dependencies"
org_manifest = "dotOS-Devices" # leave empty if org is provided in manifest
org_display = "DotOS-Devices" # needed for displaying
github_auth = None
local_manifests = '.repo/local_manifests'
if not os.path.exists(local_manifests):
os.makedirs(local_manifests)
def debug(*args, **kwargs):
if DEBUG:
print(*args, **kwargs)
def add_auth(g_req):
global github_auth
if github_auth is None:
try:
auth = netrc.netrc().authenticators("api.github.com")
except (netrc.NetrcParseError, IOError):
auth = None
if auth:
github_auth = base64.b64encode(
('%s:%s' % (auth[0], auth[2])).encode()
)
else:
github_auth = ""
if github_auth:
g_req.add_header("Authorization", "Basic %s" % github_auth)
def indent(elem, level=0):
# in-place prettyprint formatter
i = "\n" + " " * level
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def load_manifest(manifest):
try:
man = ElementTree.parse(manifest).getroot()
except (IOError, ElementTree.ParseError):
man = ElementTree.Element("manifest")
return man
def get_default(manifest=None):
m = manifest or load_manifest(default_manifest)
d = m.findall('default')[0]
return d
def get_remote(manifest=None, remote_name=None):
m = manifest or load_manifest(default_manifest)
if not remote_name:
remote_name = get_default(manifest=m).get('remote')
remotes = m.findall('remote')
for remote in remotes:
if remote_name == remote.get('name'):
return remote
def get_from_manifest(device_name):
if os.path.exists(custom_local_manifest):
man = load_manifest(custom_local_manifest)
for local_path in man.findall("project"):
lp = local_path.get("path").strip('/')
if lp.startswith("device/") and lp.endswith("/" + device_name):
return lp
return None
def is_in_manifest(project_path):
for man in (custom_local_manifest, default_manifest):
man = load_manifest(man)
for local_path in man.findall("project"):
if local_path.get("path") == project_path:
return True
return False
def add_to_manifest(repos, fallback_branch=None):
lm = load_manifest(custom_local_manifest)
for repo in repos:
repo_name = repo['repository']
repo_path = repo['target_path']
if 'branch' in repo:
repo_branch=repo['branch']
else:
repo_branch=custom_default_revision
if 'remote' in repo:
repo_remote=repo['remote']
elif "/" not in repo_name:
repo_remote=org_manifest
elif "/" in repo_name:
repo_remote="github"
if is_in_manifest(repo_path):
print('already exists: %s' % repo_path)
continue
print('Adding dependency:\nRepository: %s\nBranch: %s\nRemote: %s\nPath: %s\n' % (repo_name, repo_branch,repo_remote, repo_path))
project = ElementTree.Element(
"project",
attrib={"path": repo_path,
"remote": repo_remote,
"name": "%s" % repo_name}
)
if repo_branch is not None:
project.set('revision', repo_branch)
elif fallback_branch:
print("Using branch %s for %s" %
(fallback_branch, repo_name))
project.set('revision', fallback_branch)
else:
print("Using default branch for %s" % repo_name)
if 'clone-depth' in repo:
print("Setting clone-depth to %s for %s" % (repo['clone-depth'], repo_name))
project.set('clone-depth', repo['clone-depth'])
lm.append(project)
indent(lm)
raw_xml = "\n".join(('<?xml version="1.0" encoding="UTF-8"?>',
ElementTree.tostring(lm).decode()))
f = open(custom_local_manifest, 'w')
f.write(raw_xml)
f.close()
_fetch_dep_cache = []
def fetch_dependencies(repo_path, fallback_branch=None):
global _fetch_dep_cache
if repo_path in _fetch_dep_cache:
return
_fetch_dep_cache.append(repo_path)
print('Looking for dependencies')
dep_p = '/'.join((repo_path, custom_dependencies))
if os.path.exists(dep_p):
with open(dep_p) as dep_f:
dependencies = json.load(dep_f)
else:
dependencies = {}
print('%s has no additional dependencies.' % repo_path)
fetch_list = []
syncable_repos = []
for dependency in dependencies:
if not is_in_manifest(dependency['target_path']):
if not dependency.get('branch'):
dependency['branch'] = custom_default_revision
fetch_list.append(dependency)
syncable_repos.append(dependency['target_path'])
else:
print("Dependency already present in manifest: %s => %s" % (dependency['repository'], dependency['target_path']))
if fetch_list:
print('Adding dependencies to manifest\n')
add_to_manifest(fetch_list, fallback_branch)
if syncable_repos:
print('Syncing dependencies')
os.system('repo sync --force-sync --no-tags --current-branch --no-clone-bundle %s' % ' '.join(syncable_repos))
for deprepo in syncable_repos:
fetch_dependencies(deprepo)
def has_branch(branches, revision):
return revision in (branch['name'] for branch in branches)
def detect_revision(repo):
"""
returns None if using the default revision, else return
the branch name if using a different revision
"""
print("Checking branch info")
githubreq = urllib.request.Request(
repo['branches_url'].replace('{/branch}', ''))
add_auth(githubreq)
result = json.loads(urllib.request.urlopen(githubreq).read().decode())
print("Calculated revision: %s" % custom_default_revision)
if has_branch(result, custom_default_revision):
return custom_default_revision
print("Branch %s not found" % custom_default_revision)
sys.exit()
def main():
global DEBUG
try:
depsonly = bool(sys.argv[2] in ['true', 1])
except IndexError:
depsonly = False
if os.getenv('ROOMSERVICE_DEBUG'):
DEBUG = True
product = sys.argv[1]
device = product[product.find("_") + 1:] or product
if depsonly:
repo_path = get_from_manifest(device)
if repo_path:
fetch_dependencies(repo_path)
else:
print("Trying dependencies-only mode on a"
"non-existing device tree?")
sys.exit()
print("Device {0} not found. Attempting to retrieve device repository from "
"{1} Github (http://github.com/{1}).".format(device, org_display))
githubreq = urllib.request.Request(
"https://api.github.com/search/repositories?"
"q={0}+user:{1}+in:name+fork:true".format(device, org_display))
add_auth(githubreq)
repositories = []
try:
result = json.loads(urllib.request.urlopen(githubreq).read().decode())
except urllib.error.URLError:
print("Failed to search GitHub")
sys.exit()
except ValueError:
print("Failed to parse return data from GitHub")
sys.exit()
for res in result.get('items', []):
repositories.append(res)
for repository in repositories:
repo_name = repository['name']
if not (repo_name.startswith("android_device_") and
repo_name.endswith("_" + device)):
continue
print("Found repository: %s" % repository['name'])
fallback_branch = detect_revision(repository)
manufacturer = repo_name[7:-(len(device)+1)]
repo_path = "device/%s/%s" % (manufacturer, device)
adding = [{'repository': repo_name, 'target_path': repo_path}]
add_to_manifest(adding, fallback_branch)
print("Syncing repository to retrieve project.")
os.system('repo sync --force-sync --no-tags --current-branch --no-clone-bundle %s' % repo_path)
print("Repository synced!")
fetch_dependencies(repo_path, fallback_branch)
print("Done")
sys.exit()
print("Repository for %s not found in the %s Github repository list."
% (device, org_display))
print("If this is in error, you may need to manually add it to your "
"%s" % custom_local_manifest)
if __name__ == "__main__":
main()
|
[] |
[] |
[
"ROOMSERVICE_DEBUG"
] |
[]
|
["ROOMSERVICE_DEBUG"]
|
python
| 1 | 0 | |
experiments/exp_basic.py
|
import os
import torch
import numpy as np
class Exp_Basic(object):
def __init__(self, args):
self.args = args
self.device = self._acquire_device()
self.model = self._build_model().cuda()
def _build_model(self):
raise NotImplementedError
def _acquire_device(self):
if self.args.use_gpu:
os.environ["CUDA_VISIBLE_DEVICES"] = str(self.args.gpu) if not self.args.use_multi_gpu else self.args.devices
device = torch.device('cuda:{}'.format(self.args.gpu))
print('Use GPU: cuda:{}'.format(self.args.gpu))
else:
device = torch.device('cpu')
print('Use CPU')
return device
def _get_data(self):
pass
def valid(self):
pass
def train(self):
pass
def test(self):
pass
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
vendor/github.com/hashicorp/consul/command/agent/check.go
|
package agent
import (
"crypto/tls"
"fmt"
"io"
"log"
"net"
"net/http"
"os"
"os/exec"
"sync"
"syscall"
"time"
"github.com/armon/circbuf"
docker "github.com/fsouza/go-dockerclient"
"github.com/hashicorp/consul/consul/structs"
"github.com/hashicorp/consul/lib"
"github.com/hashicorp/consul/types"
"github.com/hashicorp/go-cleanhttp"
)
const (
// Do not allow for a interval below this value.
// Otherwise we risk fork bombing a system.
MinInterval = time.Second
// Limit the size of a check's output to the
// last CheckBufSize. Prevents an enormous buffer
// from being captured
CheckBufSize = 4 * 1024 // 4KB
// Use this user agent when doing requests for
// HTTP health checks.
HttpUserAgent = "Consul Health Check"
)
// CheckType is used to create either the CheckMonitor or the CheckTTL.
// Five types are supported: Script, HTTP, TCP, Docker and TTL. Script, HTTP,
// Docker and TCP all require Interval. Only one of the types may to be
// provided: TTL or Script/Interval or HTTP/Interval or TCP/Interval or
// Docker/Interval.
type CheckType struct {
Script string
HTTP string
TCP string
Interval time.Duration
DockerContainerID string
Shell string
TLSSkipVerify bool
Timeout time.Duration
TTL time.Duration
// DeregisterCriticalServiceAfter, if >0, will cause the associated
// service, if any, to be deregistered if this check is critical for
// longer than this duration.
DeregisterCriticalServiceAfter time.Duration
Status string
Notes string
}
type CheckTypes []*CheckType
// Valid checks if the CheckType is valid
func (c *CheckType) Valid() bool {
return c.IsTTL() || c.IsMonitor() || c.IsHTTP() || c.IsTCP() || c.IsDocker()
}
// IsTTL checks if this is a TTL type
func (c *CheckType) IsTTL() bool {
return c.TTL != 0
}
// IsMonitor checks if this is a Monitor type
func (c *CheckType) IsMonitor() bool {
return c.Script != "" && c.DockerContainerID == "" && c.Interval != 0
}
// IsHTTP checks if this is a HTTP type
func (c *CheckType) IsHTTP() bool {
return c.HTTP != "" && c.Interval != 0
}
// IsTCP checks if this is a TCP type
func (c *CheckType) IsTCP() bool {
return c.TCP != "" && c.Interval != 0
}
func (c *CheckType) IsDocker() bool {
return c.DockerContainerID != "" && c.Script != "" && c.Interval != 0
}
// CheckNotifier interface is used by the CheckMonitor
// to notify when a check has a status update. The update
// should take care to be idempotent.
type CheckNotifier interface {
UpdateCheck(checkID types.CheckID, status, output string)
}
// CheckMonitor is used to periodically invoke a script to
// determine the health of a given check. It is compatible with
// nagios plugins and expects the output in the same format.
type CheckMonitor struct {
Notify CheckNotifier
CheckID types.CheckID
Script string
Interval time.Duration
Timeout time.Duration
Logger *log.Logger
stop bool
stopCh chan struct{}
stopLock sync.Mutex
}
// Start is used to start a check monitor.
// Monitor runs until stop is called
func (c *CheckMonitor) Start() {
c.stopLock.Lock()
defer c.stopLock.Unlock()
c.stop = false
c.stopCh = make(chan struct{})
go c.run()
}
// Stop is used to stop a check monitor.
func (c *CheckMonitor) Stop() {
c.stopLock.Lock()
defer c.stopLock.Unlock()
if !c.stop {
c.stop = true
close(c.stopCh)
}
}
// run is invoked by a goroutine to run until Stop() is called
func (c *CheckMonitor) run() {
// Get the randomized initial pause time
initialPauseTime := lib.RandomStagger(c.Interval)
c.Logger.Printf("[DEBUG] agent: pausing %v before first invocation of %s", initialPauseTime, c.Script)
next := time.After(initialPauseTime)
for {
select {
case <-next:
c.check()
next = time.After(c.Interval)
case <-c.stopCh:
return
}
}
}
// check is invoked periodically to perform the script check
func (c *CheckMonitor) check() {
// Create the command
cmd, err := ExecScript(c.Script)
if err != nil {
c.Logger.Printf("[ERR] agent: failed to setup invoke '%s': %s", c.Script, err)
c.Notify.UpdateCheck(c.CheckID, structs.HealthCritical, err.Error())
return
}
// Collect the output
output, _ := circbuf.NewBuffer(CheckBufSize)
cmd.Stdout = output
cmd.Stderr = output
// Start the check
if err := cmd.Start(); err != nil {
c.Logger.Printf("[ERR] agent: failed to invoke '%s': %s", c.Script, err)
c.Notify.UpdateCheck(c.CheckID, structs.HealthCritical, err.Error())
return
}
// Wait for the check to complete
errCh := make(chan error, 2)
go func() {
errCh <- cmd.Wait()
}()
go func() {
if c.Timeout > 0 {
time.Sleep(c.Timeout)
} else {
time.Sleep(30 * time.Second)
}
errCh <- fmt.Errorf("Timed out running check '%s'", c.Script)
}()
err = <-errCh
// Get the output, add a message about truncation
outputStr := string(output.Bytes())
if output.TotalWritten() > output.Size() {
outputStr = fmt.Sprintf("Captured %d of %d bytes\n...\n%s",
output.Size(), output.TotalWritten(), outputStr)
}
c.Logger.Printf("[DEBUG] agent: Check '%s' script '%s' output: %s",
c.CheckID, c.Script, outputStr)
// Check if the check passed
if err == nil {
c.Logger.Printf("[DEBUG] agent: Check '%v' is passing", c.CheckID)
c.Notify.UpdateCheck(c.CheckID, structs.HealthPassing, outputStr)
return
}
// If the exit code is 1, set check as warning
exitErr, ok := err.(*exec.ExitError)
if ok {
if status, ok := exitErr.Sys().(syscall.WaitStatus); ok {
code := status.ExitStatus()
if code == 1 {
c.Logger.Printf("[WARN] agent: Check '%v' is now warning", c.CheckID)
c.Notify.UpdateCheck(c.CheckID, structs.HealthWarning, outputStr)
return
}
}
}
// Set the health as critical
c.Logger.Printf("[WARN] agent: Check '%v' is now critical", c.CheckID)
c.Notify.UpdateCheck(c.CheckID, structs.HealthCritical, outputStr)
}
// CheckTTL is used to apply a TTL to check status,
// and enables clients to set the status of a check
// but upon the TTL expiring, the check status is
// automatically set to critical.
type CheckTTL struct {
Notify CheckNotifier
CheckID types.CheckID
TTL time.Duration
Logger *log.Logger
timer *time.Timer
lastOutput string
lastOutputLock sync.RWMutex
stop bool
stopCh chan struct{}
stopLock sync.Mutex
}
// Start is used to start a check ttl, runs until Stop()
func (c *CheckTTL) Start() {
c.stopLock.Lock()
defer c.stopLock.Unlock()
c.stop = false
c.stopCh = make(chan struct{})
c.timer = time.NewTimer(c.TTL)
go c.run()
}
// Stop is used to stop a check ttl.
func (c *CheckTTL) Stop() {
c.stopLock.Lock()
defer c.stopLock.Unlock()
if !c.stop {
c.timer.Stop()
c.stop = true
close(c.stopCh)
}
}
// run is used to handle TTL expiration and to update the check status
func (c *CheckTTL) run() {
for {
select {
case <-c.timer.C:
c.Logger.Printf("[WARN] agent: Check '%v' missed TTL, is now critical",
c.CheckID)
c.Notify.UpdateCheck(c.CheckID, structs.HealthCritical, c.getExpiredOutput())
case <-c.stopCh:
return
}
}
}
// getExpiredOutput formats the output for the case when the TTL is expired.
func (c *CheckTTL) getExpiredOutput() string {
c.lastOutputLock.RLock()
defer c.lastOutputLock.RUnlock()
const prefix = "TTL expired"
if c.lastOutput == "" {
return prefix
}
return fmt.Sprintf("%s (last output before timeout follows): %s", prefix, c.lastOutput)
}
// SetStatus is used to update the status of the check,
// and to renew the TTL. If expired, TTL is restarted.
func (c *CheckTTL) SetStatus(status, output string) {
c.Logger.Printf("[DEBUG] agent: Check '%v' status is now %v",
c.CheckID, status)
c.Notify.UpdateCheck(c.CheckID, status, output)
// Store the last output so we can retain it if the TTL expires.
c.lastOutputLock.Lock()
c.lastOutput = output
c.lastOutputLock.Unlock()
c.timer.Reset(c.TTL)
}
// persistedCheck is used to serialize a check and write it to disk
// so that it may be restored later on.
type persistedCheck struct {
Check *structs.HealthCheck
ChkType *CheckType
Token string
}
// persistedCheckState is used to persist the current state of a given
// check. This is different from the check definition, and includes an
// expiration timestamp which is used to determine staleness on later
// agent restarts.
type persistedCheckState struct {
CheckID types.CheckID
Output string
Status string
Expires int64
}
// CheckHTTP is used to periodically make an HTTP request to
// determine the health of a given check.
// The check is passing if the response code is 2XX.
// The check is warning if the response code is 429.
// The check is critical if the response code is anything else
// or if the request returns an error
type CheckHTTP struct {
Notify CheckNotifier
CheckID types.CheckID
HTTP string
Interval time.Duration
Timeout time.Duration
Logger *log.Logger
TLSSkipVerify bool
httpClient *http.Client
stop bool
stopCh chan struct{}
stopLock sync.Mutex
}
// Start is used to start an HTTP check.
// The check runs until stop is called
func (c *CheckHTTP) Start() {
c.stopLock.Lock()
defer c.stopLock.Unlock()
if c.httpClient == nil {
// Create the transport. We disable HTTP Keep-Alive's to prevent
// failing checks due to the keepalive interval.
trans := cleanhttp.DefaultTransport()
trans.DisableKeepAlives = true
// Skip SSL certificate verification if TLSSkipVerify is true
if trans.TLSClientConfig == nil {
trans.TLSClientConfig = &tls.Config{
InsecureSkipVerify: c.TLSSkipVerify,
}
} else {
trans.TLSClientConfig.InsecureSkipVerify = c.TLSSkipVerify
}
// Create the HTTP client.
c.httpClient = &http.Client{
Timeout: 10 * time.Second,
Transport: trans,
}
// For long (>10s) interval checks the http timeout is 10s, otherwise the
// timeout is the interval. This means that a check *should* return
// before the next check begins.
if c.Timeout > 0 && c.Timeout < c.Interval {
c.httpClient.Timeout = c.Timeout
} else if c.Interval < 10*time.Second {
c.httpClient.Timeout = c.Interval
}
}
c.stop = false
c.stopCh = make(chan struct{})
go c.run()
}
// Stop is used to stop an HTTP check.
func (c *CheckHTTP) Stop() {
c.stopLock.Lock()
defer c.stopLock.Unlock()
if !c.stop {
c.stop = true
close(c.stopCh)
}
}
// run is invoked by a goroutine to run until Stop() is called
func (c *CheckHTTP) run() {
// Get the randomized initial pause time
initialPauseTime := lib.RandomStagger(c.Interval)
c.Logger.Printf("[DEBUG] agent: pausing %v before first HTTP request of %s", initialPauseTime, c.HTTP)
next := time.After(initialPauseTime)
for {
select {
case <-next:
c.check()
next = time.After(c.Interval)
case <-c.stopCh:
return
}
}
}
// check is invoked periodically to perform the HTTP check
func (c *CheckHTTP) check() {
req, err := http.NewRequest("GET", c.HTTP, nil)
if err != nil {
c.Logger.Printf("[WARN] agent: http request failed '%s': %s", c.HTTP, err)
c.Notify.UpdateCheck(c.CheckID, structs.HealthCritical, err.Error())
return
}
req.Header.Set("User-Agent", HttpUserAgent)
req.Header.Set("Accept", "text/plain, text/*, */*")
resp, err := c.httpClient.Do(req)
if err != nil {
c.Logger.Printf("[WARN] agent: http request failed '%s': %s", c.HTTP, err)
c.Notify.UpdateCheck(c.CheckID, structs.HealthCritical, err.Error())
return
}
defer resp.Body.Close()
// Read the response into a circular buffer to limit the size
output, _ := circbuf.NewBuffer(CheckBufSize)
if _, err := io.Copy(output, resp.Body); err != nil {
c.Logger.Printf("[WARN] agent: Check '%v': Get error while reading body: %s", c.CheckID, err)
}
// Format the response body
result := fmt.Sprintf("HTTP GET %s: %s Output: %s", c.HTTP, resp.Status, output.String())
if resp.StatusCode >= 200 && resp.StatusCode <= 299 {
// PASSING (2xx)
c.Logger.Printf("[DEBUG] agent: Check '%v' is passing", c.CheckID)
c.Notify.UpdateCheck(c.CheckID, structs.HealthPassing, result)
} else if resp.StatusCode == 429 {
// WARNING
// 429 Too Many Requests (RFC 6585)
// The user has sent too many requests in a given amount of time.
c.Logger.Printf("[WARN] agent: Check '%v' is now warning", c.CheckID)
c.Notify.UpdateCheck(c.CheckID, structs.HealthWarning, result)
} else {
// CRITICAL
c.Logger.Printf("[WARN] agent: Check '%v' is now critical", c.CheckID)
c.Notify.UpdateCheck(c.CheckID, structs.HealthCritical, result)
}
}
// CheckTCP is used to periodically make an TCP/UDP connection to
// determine the health of a given check.
// The check is passing if the connection succeeds
// The check is critical if the connection returns an error
type CheckTCP struct {
Notify CheckNotifier
CheckID types.CheckID
TCP string
Interval time.Duration
Timeout time.Duration
Logger *log.Logger
dialer *net.Dialer
stop bool
stopCh chan struct{}
stopLock sync.Mutex
}
// Start is used to start a TCP check.
// The check runs until stop is called
func (c *CheckTCP) Start() {
c.stopLock.Lock()
defer c.stopLock.Unlock()
if c.dialer == nil {
// Create the socket dialer
c.dialer = &net.Dialer{DualStack: true}
// For long (>10s) interval checks the socket timeout is 10s, otherwise
// the timeout is the interval. This means that a check *should* return
// before the next check begins.
if c.Timeout > 0 && c.Timeout < c.Interval {
c.dialer.Timeout = c.Timeout
} else if c.Interval < 10*time.Second {
c.dialer.Timeout = c.Interval
}
}
c.stop = false
c.stopCh = make(chan struct{})
go c.run()
}
// Stop is used to stop a TCP check.
func (c *CheckTCP) Stop() {
c.stopLock.Lock()
defer c.stopLock.Unlock()
if !c.stop {
c.stop = true
close(c.stopCh)
}
}
// run is invoked by a goroutine to run until Stop() is called
func (c *CheckTCP) run() {
// Get the randomized initial pause time
initialPauseTime := lib.RandomStagger(c.Interval)
c.Logger.Printf("[DEBUG] agent: pausing %v before first socket connection of %s", initialPauseTime, c.TCP)
next := time.After(initialPauseTime)
for {
select {
case <-next:
c.check()
next = time.After(c.Interval)
case <-c.stopCh:
return
}
}
}
// check is invoked periodically to perform the TCP check
func (c *CheckTCP) check() {
conn, err := c.dialer.Dial(`tcp`, c.TCP)
if err != nil {
c.Logger.Printf("[WARN] agent: socket connection failed '%s': %s", c.TCP, err)
c.Notify.UpdateCheck(c.CheckID, structs.HealthCritical, err.Error())
return
}
conn.Close()
c.Logger.Printf("[DEBUG] agent: Check '%v' is passing", c.CheckID)
c.Notify.UpdateCheck(c.CheckID, structs.HealthPassing, fmt.Sprintf("TCP connect %s: Success", c.TCP))
}
// A custom interface since go-dockerclient doesn't have one
// We will use this interface in our test to inject a fake client
type DockerClient interface {
CreateExec(docker.CreateExecOptions) (*docker.Exec, error)
StartExec(string, docker.StartExecOptions) error
InspectExec(string) (*docker.ExecInspect, error)
}
// CheckDocker is used to periodically invoke a script to
// determine the health of an application running inside a
// Docker Container. We assume that the script is compatible
// with nagios plugins and expects the output in the same format.
type CheckDocker struct {
Notify CheckNotifier
CheckID types.CheckID
Script string
DockerContainerID string
Shell string
Interval time.Duration
Logger *log.Logger
dockerClient DockerClient
cmd []string
stop bool
stopCh chan struct{}
stopLock sync.Mutex
}
//Initializes the Docker Client
func (c *CheckDocker) Init() error {
//create the docker client
var err error
c.dockerClient, err = docker.NewClientFromEnv()
if err != nil {
c.Logger.Printf("[DEBUG] Error creating the Docker client: %s", err.Error())
return err
}
return nil
}
// Start is used to start checks.
// Docker Checks runs until stop is called
func (c *CheckDocker) Start() {
c.stopLock.Lock()
defer c.stopLock.Unlock()
//figure out the shell
if c.Shell == "" {
c.Shell = shell()
}
c.cmd = []string{c.Shell, "-c", c.Script}
c.stop = false
c.stopCh = make(chan struct{})
go c.run()
}
// Stop is used to stop a docker check.
func (c *CheckDocker) Stop() {
c.stopLock.Lock()
defer c.stopLock.Unlock()
if !c.stop {
c.stop = true
close(c.stopCh)
}
}
// run is invoked by a goroutine to run until Stop() is called
func (c *CheckDocker) run() {
// Get the randomized initial pause time
initialPauseTime := lib.RandomStagger(c.Interval)
c.Logger.Printf("[DEBUG] agent: pausing %v before first invocation of %s -c %s in container %s", initialPauseTime, c.Shell, c.Script, c.DockerContainerID)
next := time.After(initialPauseTime)
for {
select {
case <-next:
c.check()
next = time.After(c.Interval)
case <-c.stopCh:
return
}
}
}
func (c *CheckDocker) check() {
//Set up the Exec since
execOpts := docker.CreateExecOptions{
AttachStdin: false,
AttachStdout: true,
AttachStderr: true,
Tty: false,
Cmd: c.cmd,
Container: c.DockerContainerID,
}
var (
exec *docker.Exec
err error
)
if exec, err = c.dockerClient.CreateExec(execOpts); err != nil {
c.Logger.Printf("[DEBUG] agent: Error while creating Exec: %s", err.Error())
c.Notify.UpdateCheck(c.CheckID, structs.HealthCritical, fmt.Sprintf("Unable to create Exec, error: %s", err.Error()))
return
}
// Collect the output
output, _ := circbuf.NewBuffer(CheckBufSize)
err = c.dockerClient.StartExec(exec.ID, docker.StartExecOptions{Detach: false, Tty: false, OutputStream: output, ErrorStream: output})
if err != nil {
c.Logger.Printf("[DEBUG] Error in executing health checks: %s", err.Error())
c.Notify.UpdateCheck(c.CheckID, structs.HealthCritical, fmt.Sprintf("Unable to start Exec: %s", err.Error()))
return
}
// Get the output, add a message about truncation
outputStr := string(output.Bytes())
if output.TotalWritten() > output.Size() {
outputStr = fmt.Sprintf("Captured %d of %d bytes\n...\n%s",
output.Size(), output.TotalWritten(), outputStr)
}
c.Logger.Printf("[DEBUG] agent: Check '%s' script '%s' output: %s",
c.CheckID, c.Script, outputStr)
execInfo, err := c.dockerClient.InspectExec(exec.ID)
if err != nil {
c.Logger.Printf("[DEBUG] Error in inspecting check result : %s", err.Error())
c.Notify.UpdateCheck(c.CheckID, structs.HealthCritical, fmt.Sprintf("Unable to inspect Exec: %s", err.Error()))
return
}
// Sets the status of the check to healthy if exit code is 0
if execInfo.ExitCode == 0 {
c.Notify.UpdateCheck(c.CheckID, structs.HealthPassing, outputStr)
return
}
// Set the status of the check to Warning if exit code is 1
if execInfo.ExitCode == 1 {
c.Logger.Printf("[DEBUG] Check failed with exit code: %d", execInfo.ExitCode)
c.Notify.UpdateCheck(c.CheckID, structs.HealthWarning, outputStr)
return
}
// Set the health as critical
c.Logger.Printf("[WARN] agent: Check '%v' is now critical", c.CheckID)
c.Notify.UpdateCheck(c.CheckID, structs.HealthCritical, outputStr)
}
func shell() string {
if otherShell := os.Getenv("SHELL"); otherShell != "" {
return otherShell
} else {
return "/bin/sh"
}
}
|
[
"\"SHELL\""
] |
[] |
[
"SHELL"
] |
[]
|
["SHELL"]
|
go
| 1 | 0 | |
mev/api/runners/remote_cromwell.py
|
import os
import glob
import json
import datetime
import zipfile
import logging
import io
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from api.runners.base import OperationRunner
from api.utilities.operations import get_operation_instance_data
from api.utilities.basic_utils import make_local_directory, \
copy_local_resource, \
alert_admins
from api.runners.base import OperationRunner
from api.utilities.basic_utils import get_with_retry, post_with_retry
from api.utilities.wdl_utils import WDL_SUFFIX, \
get_docker_images_in_repo, \
edit_runtime_containers
from api.utilities.docker import build_docker_image, \
login_to_dockerhub, \
push_image_to_dockerhub
from api.storage_backends import get_storage_backend
from api.cloud_backends import get_instance_zone, get_instance_region
from api.converters.output_converters import RemoteCromwellOutputConverter
from api.models.executed_operation import ExecutedOperation
logger = logging.getLogger(__name__)
class RemoteCromwellRunner(OperationRunner):
'''
Class that handles execution of `Operation`s using the WDL/Cromwell
framework
'''
MODE = 'cromwell'
NAME = settings.CROMWELL
DOCKERFILE = 'Dockerfile'
MAIN_WDL = 'main.wdl'
DEPENDENCIES_ZIPNAME = 'dependencies.zip'
WDL_INPUTS = 'inputs.json'
# Constants that are part of the payload submitted to Cromwell
WORKFLOW_TYPE = 'WDL'
WORKFLOW_TYPE_VERSION = 'draft-2'
# API paths for the Cromwell server
SUBMIT_ENDPOINT = '/api/workflows/v1'
STATUS_ENDPOINT = '/api/workflows/v1/{cromwell_job_id}/status'
OUTPUTS_ENDPOINT = '/api/workflows/v1/{cromwell_job_id}/outputs'
METADATA_ENDPOINT = '/api/workflows/v1/{cromwell_job_id}/metadata'
ABORT_ENDPOINT = '/api/workflows/v1/{cromwell_job_id}/abort'
VERSION_ENDPOINT = '/engine/v1/version'
# Some other constants (often defined on the Cromwell side)
CROMWELL_DATETIME_STR_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
SUBMITTED_STATUS = 'Submitted'
SUCCEEDED_STATUS = 'Succeeded'
FAILED_STATUS = 'Failed'
OTHER_STATUS = 'unknown/other' # marker for other response strings.
# A list of files that are required to be part of the repository
REQUIRED_FILES = OperationRunner.REQUIRED_FILES + [
# the main "entrypoint" WDL
MAIN_WDL,
# the input json file, as a template
WDL_INPUTS
]
def __init__(self):
self.read_cromwell_url()
self.read_cromwell_bucket_name()
def read_cromwell_url(self):
try:
self.CROMWELL_URL = os.environ['CROMWELL_SERVER_URL']
except KeyError as ex:
raise ImproperlyConfigured('To use the Cromwell runner, you must'
' set the "{k}" environment variable.'.format(
k = ex
)
)
def read_cromwell_bucket_name(self):
# check that the storage bucket exists-- since remote jobs require
# the use of bucket storage, we can simply use the storage backend hook
# to verify that the storage bucket exists in the same region as our
# application
try:
self.CROMWELL_BUCKET = os.environ['CROMWELL_BUCKET']
except KeyError as ex:
raise ImproperlyConfigured('To use the Cromwell runner, you must'
' set the "CROMWELL_BUCKET" environment variable. Set it to the'
' name of the bucket, excluding any filesystem prefix like "gs://" or "s3://"'
)
def prepare_operation(self, operation_dir, repo_name, git_hash):
# get a list of the docker images in all the WDL files
docker_image_names = get_docker_images_in_repo(operation_dir)
logger.info('Found the following image names among the'
' WDL files: {imgs}'.format(
imgs = ', '.join(docker_image_names)
)
)
# iterate through those, building the images
name_mapping = {}
for full_image_name in docker_image_names:
# image name is something like
# <docker repo, e.g. docker.io>/<username>/<name>:<tag>
split_full_name = full_image_name.split(':')
if len(split_full_name) == 2: #if a tag is specified
image_prefix, tag = split_full_name
elif len(split_full_name) == 1: # if no tag
image_prefix = split_full_name[0]
else:
logger.error('Could not properly handle the following docker'
' image spec: {x}'.format(x = full_image_name)
)
raise Exception('Could not make sense of the docker'
' image handle: {x}'.format(x=full_image_name)
)
image_split = image_prefix.split('/')
if len(image_split) == 3:
docker_repo, username, image_name = image_split
elif len(image_split) == 2:
username, image_name = image_split
else:
logger.error('Could not properly handle the following docker'
' image spec: {x}'.format(x = full_image_name)
)
raise Exception('Could not make sense of the docker'
' image handle: {x}'.format(x=full_image_name)
)
dockerfile_name = '{df}.{name}'.format(
df = self.DOCKERFILE,
name = image_name
)
dockerfile_path = os.path.join(
operation_dir,
self.DOCKER_DIR,
dockerfile_name
)
if not os.path.exists(dockerfile_path):
raise Exception('To create the Docker image for {img}, expected'
' a Dockerfile at: {p}'.format(
p = dockerfile_path,
img = image_prefix
)
)
# to create unambiguous images, we take the "base" image name
# (e.g. docker.io/myuser/foo) and append a tag which is the
# github commit hash
# Noted that `image_prefix` does NOT include the repo (e.g. docker.io)
# or the username. This allows us to keep our own images in case a
# developer submitted a workflow that used images associated with their
# personal dockerhub account
build_docker_image(image_name,
git_hash,
dockerfile_path,
os.path.join(operation_dir, self.DOCKER_DIR)
)
login_to_dockerhub()
pushed_image_str = push_image_to_dockerhub(image_name, git_hash)
name_mapping[full_image_name] = pushed_image_str
# change the name of the image in the WDL file(s), saving them in-place:
edit_runtime_containers(operation_dir, name_mapping)
def check_if_ready(self):
'''
Makes sure all the proper environment variables, etc.
are present to use this job runner. Should be invoked
at startup of django app.
'''
# check that we can reach the Cromwell server
url = self.CROMWELL_URL + self.VERSION_ENDPOINT
try:
response = get_with_retry(url)
except Exception as ex:
logger.info('An exception was raised when checking if the remote Cromwell runner was ready.'
' The exception reads: {ex}'.format(ex=ex)
)
raise ImproperlyConfigured('Failed to check the remote Cromwell runner. See logs.')
if response.status_code != 200:
logger.info('The Cromwell server located at: {url}'
' was not ready.'.format(
url = url
)
)
raise ImproperlyConfigured('Failed to reach Cromwell server.')
bucket_region = get_storage_backend().get_bucket_region(self.CROMWELL_BUCKET)
instance_region = get_instance_region()
if bucket_region != instance_region:
raise ImproperlyConfigured('The application is running on a'
' machine in the following region: {instance_region}. The'
' Cromwell bucket was found in {bucket_region}. They should'
' be located in the same region.'.format(
bucket_region = bucket_region,
instance_region = instance_region
)
)
def _create_inputs_json(self, op_dir, validated_inputs, staging_dir):
'''
Takes the inputs (which are MEV-native data structures)
and make them into something that we can inject into Cromwell's
inputs.json format compatible with WDL
For instance, this takes a DataResource (which is a UUID identifying
the file), and turns it into a cloud-based path that Cromwell can access.
'''
# create/write the input JSON to a file in the staging location
arg_dict = self._map_inputs(op_dir, validated_inputs)
wdl_input_path = os.path.join(staging_dir, self.WDL_INPUTS)
with open(wdl_input_path, 'w') as fout:
json.dump(arg_dict, fout)
def _copy_workflow_contents(self, op_dir, staging_dir):
'''
Copy over WDL files and other elements necessary to submit
the job to Cromwell. Does not mean that we copy EVERYTHING
in the op dir.
Also creates zip archive of the "non main" WDL files, as required
by Cromwell
'''
# copy WDL files over to staging:
wdl_files = glob.glob(
os.path.join(op_dir, '*' + WDL_SUFFIX)
)
for w in wdl_files:
dest = os.path.join(staging_dir, os.path.basename(w))
copy_local_resource(w, dest)
# if there are WDL files in addition to the main one, they need to be zipped
# and submitted as 'dependencies'
additional_wdl_files = [
x for x in glob.glob(os.path.join(staging_dir, '*' + WDL_SUFFIX))
if os.path.basename(x) != self.MAIN_WDL
]
zip_archive = None
if len(additional_wdl_files) > 0:
zip_archive = os.path.join(staging_dir, self.DEPENDENCIES_ZIPNAME)
with zipfile.ZipFile(zip_archive, 'w') as zipout:
for f in additional_wdl_files:
zipout.write(f, os.path.basename(f))
def send_job(self, staging_dir, executed_op):
# the path of the input json file:
wdl_input_path = os.path.join(staging_dir, self.WDL_INPUTS)
# pull together the components of the POST request to the Cromwell server
submission_url = self.CROMWELL_URL + self.SUBMIT_ENDPOINT
payload = {}
payload = {'workflowType': self.WORKFLOW_TYPE, \
'workflowTypeVersion': self.WORKFLOW_TYPE_VERSION
}
# load the options file so we can fill-in the zones:
options_json = {}
current_zone = get_instance_zone()
options_json['default_runtime_attributes'] = {'zones': current_zone}
options_json_str = json.dumps(options_json)
options_io = io.BytesIO(options_json_str.encode('utf-8'))
files = {
'workflowOptions': options_io,
'workflowInputs': open(wdl_input_path,'rb'),
'workflowSource': open(os.path.join(staging_dir, self.MAIN_WDL), 'rb')
}
zip_archive = os.path.join(staging_dir, self.DEPENDENCIES_ZIPNAME)
if os.path.exists(zip_archive):
files['workflowDependencies'] = open(zip_archive, 'rb')
# start the job:
try:
response = post_with_retry(submission_url, data=payload, files=files)
except Exception as ex:
logger.info('Submitting job ({id}) to Cromwell failed.'
' Exception was: {ex}'.format(
ex = ex,
id = exec_op_id
)
)
self.handle_submission_response(response, executed_op)
def handle_submission_response(self, response, executed_op):
response_json = json.loads(response.text)
if response.status_code == 201:
try:
status = response_json['status']
except KeyError as ex:
status = 'Unknown'
if status == self.SUBMITTED_STATUS:
logger.info('Job was successfully submitted'
' to Cromwell.'
)
# Cromwell assigns its own UUID to the job
cromwell_job_id = response_json['id']
executed_op.job_id = cromwell_job_id
executed_op.execution_start_datetime = datetime.datetime.now()
else:
logger.info('Received an unexpected status'
' from Cromwell following a 201'
' response code: {status}'.format(
status = response_json['status']
)
)
executed_op.status = status
else:
error_msg = ('Received a response code of {rc} when submitting job'
' to the remote Cromwell runner.'.format(
rc = response.status_code
)
)
logger.info(error_msg)
alert_admins(error_msg)
executed_op.status = 'Not submitted. Try again later. Admins have been notified.'
executed_op.save()
def query_for_metadata(self, job_uuid):
'''
Calls out to the Cromwell server to get metadata about
a job. See
https://cromwell.readthedocs.io/en/stable/api/RESTAPI/#get-workflow-and-call-level-metadata-for-a-specified-workflow
'''
endpoint = self.METADATA_ENDPOINT.format(cromwell_job_id=job_uuid)
metadata_url = self.CROMWELL_URL + endpoint
response = get_with_retry(metadata_url)
bad_codes = [404, 400, 500]
if response.status_code in bad_codes:
logger.info('Request for Cromwell job metadata returned'
' a {code} status.'.format(code=response.status_code)
)
elif response.status_code == 200:
response_json = json.loads(response.text)
return response_json
else:
logging.info('Received an unexpected status code when querying'
' the metadata of a Cromwell job.'
)
def query_for_status(self, job_uuid):
'''
Performs the work of querying the Cromwell server.
Returns either a dict (i.e. the response) or None, if
the response did not have the expected 200 status code.
'''
endpoint = self.STATUS_ENDPOINT.format(cromwell_job_id=job_uuid)
status_url = self.CROMWELL_URL + endpoint
response = get_with_retry(status_url)
bad_codes = [404, 400, 500]
if response.status_code in bad_codes:
logger.info('Request for Cromwell job status returned'
' a {code} status.'.format(code=response.status_code)
)
elif response.status_code == 200:
response_json = json.loads(response.text)
return response_json
else:
logging.info('Received an unexpected status code when querying'
' the status of a Cromwell job.'
)
def _parse_status_response(self, response_json):
status = response_json['status']
if status == self.SUCCEEDED_STATUS:
return self.SUCCEEDED_STATUS
elif status == self.FAILED_STATUS:
return self.FAILED_STATUS
return self.OTHER_STATUS
def check_status(self, job_uuid):
'''
Returns a bool indicating whether we know if the job is finished.
Unexpected responses return False, which will essentially block
other actions until admins can investigate.
'''
response_json = self.query_for_status(job_uuid)
if response_json:
status = self._parse_status_response(response_json)
# the job is complete if it's marked as success of failure
if (status == self.SUCCEEDED_STATUS) or (status == self.FAILED_STATUS):
return True
return False
def handle_job_success(self, executed_op):
job_id = executed_op.job_id
job_metadata = self.query_for_metadata(job_id)
try:
end_time_str = job_metadata['end']
except KeyError as ex:
end_time = datetime.datetime.now()
else:
end_time = datetime.datetime.strptime(
end_time_str,
self.CROMWELL_DATETIME_STR_FORMAT
)
# get the job outputs
# This is a mapping of the Cromwell output ID (e.g. Workflow.Variable)
# to either a primitive (String, Number) or a filepath (in a bucket)
try:
outputs_dict = job_metadata['outputs']
except KeyError as ex:
outputs_dict = {}
error_msg = ('The job metadata payload received from executed op ({op_id})'
' with Cromwell ID {cromwell_id} did not contain the "outputs"'
' key in the payload'.format(
cromwell_id = job_id,
op_id = executed_op.id
)
)
logger.info(error_msg)
alert_admins(error_msg)
# instantiate the output converter class which will take the job outputs
# and create MEV-compatible data structures or resources:
converter = RemoteCromwellOutputConverter()
converted_outputs = self.convert_outputs(executed_op, converter, outputs_dict)
# set fields on the executed op:
executed_op.outputs = converted_outputs
executed_op.execution_stop_datetime = end_time
executed_op.job_failed = False
executed_op.status = ExecutedOperation.COMPLETION_SUCCESS
def handle_job_failure(self, executed_op):
job_id = executed_op.job_id
job_metadata = self.query_for_metadata(job_id)
try:
end_time_str = job_metadata['end']
except KeyError as ex:
end_time = datetime.datetime.now()
else:
end_time = datetime.datetime.strptime(
end_time_str,
self.CROMWELL_DATETIME_STR_FORMAT
)
failure_list = job_metadata['failures']
failure_messages = set()
for f in failure_list:
failure_messages.add(f['message'])
# set fields on the executed op:
executed_op.error_messages = list(failure_messages)
executed_op.execution_stop_datetime = end_time
executed_op.job_failed = True
executed_op.status = ExecutedOperation.COMPLETION_ERROR
def handle_other_job_outcome(self, executed_op):
executed_op.status = ('Experienced an unexpected response'
' when querying for the job status. Admins have been notified.'
)
alert_admins(
'Experienced an unexpected response when querying for '
'the job status of op: {op_id}.'.format(op_id=executed_op.job_id)
)
def finalize(self, executed_op):
'''
Finishes up an ExecutedOperation. Does things like registering files
with a user, cleanup, etc.
'''
job_id = str(executed_op.job_id)
status_json = self.query_for_status(job_id)
if status_json:
status = self._parse_status_response(status_json)
else:
status = None
if status == self.SUCCEEDED_STATUS:
self.handle_job_success(executed_op)
elif status == self.FAILED_STATUS:
self.handle_job_failure(executed_op)
else:
self.handle_other_job_outcome(executed_op)
executed_op.is_finalizing = False
executed_op.save()
def run(self, executed_op, op_data, validated_inputs):
logger.info('Running in remote Cromwell mode.')
logger.info('Executed op type: %s' % type(executed_op))
logger.info('Executed op ID: %s' % str(executed_op.id))
logger.info('Op data: %s' % op_data)
logger.info(validated_inputs)
# the UUID identifying the execution of this operation:
execution_uuid = str(executed_op.id)
# get the operation dir so we can look at which converters to use:
op_dir = os.path.join(
settings.OPERATION_LIBRARY_DIR,
str(op_data['id'])
)
# create a sandbox directory where we will store the files:
staging_dir = os.path.join(settings.OPERATION_EXECUTION_DIR, execution_uuid)
make_local_directory(staging_dir)
# create the Cromwell-compatible inputs.json from the user inputs
self._create_inputs_json(op_dir, validated_inputs, staging_dir)
# copy over the workflow contents:
self._copy_workflow_contents(op_dir, staging_dir)
# construct the request to the Cromwell server:
self.send_job(staging_dir, executed_op)
|
[] |
[] |
[
"CROMWELL_BUCKET",
"CROMWELL_SERVER_URL"
] |
[]
|
["CROMWELL_BUCKET", "CROMWELL_SERVER_URL"]
|
python
| 2 | 0 | |
clients/google-api-services-pubsub/v1beta2/1.31.0/com/google/api/services/pubsub/Pubsub.java
|
/*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.pubsub;
/**
* Service definition for Pubsub (v1beta2).
*
* <p>
* Provides reliable, many-to-many, asynchronous messaging between applications.
* </p>
*
* <p>
* For more information about this service, see the
* <a href="https://cloud.google.com/pubsub/docs" target="_blank">API Documentation</a>
* </p>
*
* <p>
* This service uses {@link PubsubRequestInitializer} to initialize global parameters via its
* {@link Builder}.
* </p>
*
* @since 1.3
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public class Pubsub extends com.google.api.client.googleapis.services.json.AbstractGoogleJsonClient {
// Note: Leave this static initializer at the top of the file.
static {
com.google.api.client.util.Preconditions.checkState(
com.google.api.client.googleapis.GoogleUtils.MAJOR_VERSION == 1 &&
(com.google.api.client.googleapis.GoogleUtils.MINOR_VERSION >= 32 ||
(com.google.api.client.googleapis.GoogleUtils.MINOR_VERSION == 31 &&
com.google.api.client.googleapis.GoogleUtils.BUGFIX_VERSION >= 1)),
"You are currently running with version %s of google-api-client. " +
"You need at least version 1.31.1 of google-api-client to run version " +
"1.31.0 of the Cloud Pub/Sub API library.", com.google.api.client.googleapis.GoogleUtils.VERSION);
}
/**
* The default encoded root URL of the service. This is determined when the library is generated
* and normally should not be changed.
*
* @since 1.7
*/
public static final String DEFAULT_ROOT_URL = "https://pubsub.googleapis.com/";
/**
* The default encoded mTLS root URL of the service. This is determined when the library is generated
* and normally should not be changed.
*
* @since 1.31
*/
public static final String DEFAULT_MTLS_ROOT_URL = "https://pubsub.mtls.googleapis.com/";
/**
* The default encoded service path of the service. This is determined when the library is
* generated and normally should not be changed.
*
* @since 1.7
*/
public static final String DEFAULT_SERVICE_PATH = "";
/**
* The default encoded batch path of the service. This is determined when the library is
* generated and normally should not be changed.
*
* @since 1.23
*/
public static final String DEFAULT_BATCH_PATH = "batch";
/**
* The default encoded base URL of the service. This is determined when the library is generated
* and normally should not be changed.
*/
public static final String DEFAULT_BASE_URL = DEFAULT_ROOT_URL + DEFAULT_SERVICE_PATH;
/**
* Constructor.
*
* <p>
* Use {@link Builder} if you need to specify any of the optional parameters.
* </p>
*
* @param transport HTTP transport, which should normally be:
* <ul>
* <li>Google App Engine:
* {@code com.google.api.client.extensions.appengine.http.UrlFetchTransport}</li>
* <li>Android: {@code newCompatibleTransport} from
* {@code com.google.api.client.extensions.android.http.AndroidHttp}</li>
* <li>Java: {@link com.google.api.client.googleapis.javanet.GoogleNetHttpTransport#newTrustedTransport()}
* </li>
* </ul>
* @param jsonFactory JSON factory, which may be:
* <ul>
* <li>Jackson: {@code com.google.api.client.json.jackson2.JacksonFactory}</li>
* <li>Google GSON: {@code com.google.api.client.json.gson.GsonFactory}</li>
* <li>Android Honeycomb or higher:
* {@code com.google.api.client.extensions.android.json.AndroidJsonFactory}</li>
* </ul>
* @param httpRequestInitializer HTTP request initializer or {@code null} for none
* @since 1.7
*/
public Pubsub(com.google.api.client.http.HttpTransport transport, com.google.api.client.json.JsonFactory jsonFactory,
com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
this(new Builder(transport, jsonFactory, httpRequestInitializer));
}
/**
* @param builder builder
*/
Pubsub(Builder builder) {
super(builder);
}
@Override
protected void initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest<?> httpClientRequest) throws java.io.IOException {
super.initialize(httpClientRequest);
}
/**
* An accessor for creating requests from the Projects collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code Pubsub pubsub = new Pubsub(...);}
* {@code Pubsub.Projects.List request = pubsub.projects().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Projects projects() {
return new Projects();
}
/**
* The "projects" collection of methods.
*/
public class Projects {
/**
* An accessor for creating requests from the Subscriptions collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code Pubsub pubsub = new Pubsub(...);}
* {@code Pubsub.Subscriptions.List request = pubsub.subscriptions().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Subscriptions subscriptions() {
return new Subscriptions();
}
/**
* The "subscriptions" collection of methods.
*/
public class Subscriptions {
/**
* Acknowledges the messages associated with the `ack_ids` in the `AcknowledgeRequest`. The Pub/Sub
* system can remove the relevant messages from the subscription. Acknowledging a message whose ack
* deadline has expired may succeed, but such a message may be redelivered later. Acknowledging a
* message more than once will not result in an error.
*
* Create a request for the method "subscriptions.acknowledge".
*
* This request holds the parameters needed by the pubsub server. After setting any optional
* parameters, call the {@link Acknowledge#execute()} method to invoke the remote operation.
*
* @param subscription The subscription whose message is being acknowledged.
* @param content the {@link com.google.api.services.pubsub.model.AcknowledgeRequest}
* @return the request
*/
public Acknowledge acknowledge(java.lang.String subscription, com.google.api.services.pubsub.model.AcknowledgeRequest content) throws java.io.IOException {
Acknowledge result = new Acknowledge(subscription, content);
initialize(result);
return result;
}
public class Acknowledge extends PubsubRequest<com.google.api.services.pubsub.model.Empty> {
private static final String REST_PATH = "v1beta2/{+subscription}:acknowledge";
private final java.util.regex.Pattern SUBSCRIPTION_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/subscriptions/[^/]+$");
/**
* Acknowledges the messages associated with the `ack_ids` in the `AcknowledgeRequest`. The
* Pub/Sub system can remove the relevant messages from the subscription. Acknowledging a message
* whose ack deadline has expired may succeed, but such a message may be redelivered later.
* Acknowledging a message more than once will not result in an error.
*
* Create a request for the method "subscriptions.acknowledge".
*
* This request holds the parameters needed by the the pubsub server. After setting any optional
* parameters, call the {@link Acknowledge#execute()} method to invoke the remote operation. <p>
* {@link
* Acknowledge#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param subscription The subscription whose message is being acknowledged.
* @param content the {@link com.google.api.services.pubsub.model.AcknowledgeRequest}
* @since 1.13
*/
protected Acknowledge(java.lang.String subscription, com.google.api.services.pubsub.model.AcknowledgeRequest content) {
super(Pubsub.this, "POST", REST_PATH, content, com.google.api.services.pubsub.model.Empty.class);
this.subscription = com.google.api.client.util.Preconditions.checkNotNull(subscription, "Required parameter subscription must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(SUBSCRIPTION_PATTERN.matcher(subscription).matches(),
"Parameter subscription must conform to the pattern " +
"^projects/[^/]+/subscriptions/[^/]+$");
}
}
@Override
public Acknowledge set$Xgafv(java.lang.String $Xgafv) {
return (Acknowledge) super.set$Xgafv($Xgafv);
}
@Override
public Acknowledge setAccessToken(java.lang.String accessToken) {
return (Acknowledge) super.setAccessToken(accessToken);
}
@Override
public Acknowledge setAlt(java.lang.String alt) {
return (Acknowledge) super.setAlt(alt);
}
@Override
public Acknowledge setCallback(java.lang.String callback) {
return (Acknowledge) super.setCallback(callback);
}
@Override
public Acknowledge setFields(java.lang.String fields) {
return (Acknowledge) super.setFields(fields);
}
@Override
public Acknowledge setKey(java.lang.String key) {
return (Acknowledge) super.setKey(key);
}
@Override
public Acknowledge setOauthToken(java.lang.String oauthToken) {
return (Acknowledge) super.setOauthToken(oauthToken);
}
@Override
public Acknowledge setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Acknowledge) super.setPrettyPrint(prettyPrint);
}
@Override
public Acknowledge setQuotaUser(java.lang.String quotaUser) {
return (Acknowledge) super.setQuotaUser(quotaUser);
}
@Override
public Acknowledge setUploadType(java.lang.String uploadType) {
return (Acknowledge) super.setUploadType(uploadType);
}
@Override
public Acknowledge setUploadProtocol(java.lang.String uploadProtocol) {
return (Acknowledge) super.setUploadProtocol(uploadProtocol);
}
/** The subscription whose message is being acknowledged. */
@com.google.api.client.util.Key
private java.lang.String subscription;
/** The subscription whose message is being acknowledged.
*/
public java.lang.String getSubscription() {
return subscription;
}
/** The subscription whose message is being acknowledged. */
public Acknowledge setSubscription(java.lang.String subscription) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(SUBSCRIPTION_PATTERN.matcher(subscription).matches(),
"Parameter subscription must conform to the pattern " +
"^projects/[^/]+/subscriptions/[^/]+$");
}
this.subscription = subscription;
return this;
}
@Override
public Acknowledge set(String parameterName, Object value) {
return (Acknowledge) super.set(parameterName, value);
}
}
/**
* Creates a subscription to a given topic. If the subscription already exists, returns
* `ALREADY_EXISTS`. If the corresponding topic doesn't exist, returns `NOT_FOUND`. If the name is
* not provided in the request, the server will assign a random name for this subscription on the
* same project as the topic. Note that for REST API requests, you must specify a name.
*
* Create a request for the method "subscriptions.create".
*
* This request holds the parameters needed by the pubsub server. After setting any optional
* parameters, call the {@link Create#execute()} method to invoke the remote operation.
*
* @param name The name of the subscription. It must have the format
* `"projects/{project}/subscriptions/{subscription}"`. `{subscription}` must start with a
* letter, and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`),
* underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent signs (`%`). It must
* be between 3 and 255 characters in length, and it must not start with `"goog"`.
* @param content the {@link com.google.api.services.pubsub.model.Subscription}
* @return the request
*/
public Create create(java.lang.String name, com.google.api.services.pubsub.model.Subscription content) throws java.io.IOException {
Create result = new Create(name, content);
initialize(result);
return result;
}
public class Create extends PubsubRequest<com.google.api.services.pubsub.model.Subscription> {
private static final String REST_PATH = "v1beta2/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/subscriptions/[^/]+$");
/**
* Creates a subscription to a given topic. If the subscription already exists, returns
* `ALREADY_EXISTS`. If the corresponding topic doesn't exist, returns `NOT_FOUND`. If the name is
* not provided in the request, the server will assign a random name for this subscription on the
* same project as the topic. Note that for REST API requests, you must specify a name.
*
* Create a request for the method "subscriptions.create".
*
* This request holds the parameters needed by the the pubsub server. After setting any optional
* parameters, call the {@link Create#execute()} method to invoke the remote operation. <p> {@link
* Create#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name The name of the subscription. It must have the format
* `"projects/{project}/subscriptions/{subscription}"`. `{subscription}` must start with a
* letter, and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`),
* underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent signs (`%`). It must
* be between 3 and 255 characters in length, and it must not start with `"goog"`.
* @param content the {@link com.google.api.services.pubsub.model.Subscription}
* @since 1.13
*/
protected Create(java.lang.String name, com.google.api.services.pubsub.model.Subscription content) {
super(Pubsub.this, "PUT", REST_PATH, content, com.google.api.services.pubsub.model.Subscription.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/subscriptions/[^/]+$");
}
}
@Override
public Create set$Xgafv(java.lang.String $Xgafv) {
return (Create) super.set$Xgafv($Xgafv);
}
@Override
public Create setAccessToken(java.lang.String accessToken) {
return (Create) super.setAccessToken(accessToken);
}
@Override
public Create setAlt(java.lang.String alt) {
return (Create) super.setAlt(alt);
}
@Override
public Create setCallback(java.lang.String callback) {
return (Create) super.setCallback(callback);
}
@Override
public Create setFields(java.lang.String fields) {
return (Create) super.setFields(fields);
}
@Override
public Create setKey(java.lang.String key) {
return (Create) super.setKey(key);
}
@Override
public Create setOauthToken(java.lang.String oauthToken) {
return (Create) super.setOauthToken(oauthToken);
}
@Override
public Create setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Create) super.setPrettyPrint(prettyPrint);
}
@Override
public Create setQuotaUser(java.lang.String quotaUser) {
return (Create) super.setQuotaUser(quotaUser);
}
@Override
public Create setUploadType(java.lang.String uploadType) {
return (Create) super.setUploadType(uploadType);
}
@Override
public Create setUploadProtocol(java.lang.String uploadProtocol) {
return (Create) super.setUploadProtocol(uploadProtocol);
}
/**
* The name of the subscription. It must have the format
* `"projects/{project}/subscriptions/{subscription}"`. `{subscription}` must start with a
* letter, and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`),
* underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent signs (`%`). It
* must be between 3 and 255 characters in length, and it must not start with `"goog"`.
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** The name of the subscription. It must have the format
`"projects/{project}/subscriptions/{subscription}"`. `{subscription}` must start with a letter, and
contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`), underscores (`_`), periods
(`.`), tildes (`~`), plus (`+`) or percent signs (`%`). It must be between 3 and 255 characters in
length, and it must not start with `"goog"`.
*/
public java.lang.String getName() {
return name;
}
/**
* The name of the subscription. It must have the format
* `"projects/{project}/subscriptions/{subscription}"`. `{subscription}` must start with a
* letter, and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`),
* underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent signs (`%`). It
* must be between 3 and 255 characters in length, and it must not start with `"goog"`.
*/
public Create setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/subscriptions/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Create set(String parameterName, Object value) {
return (Create) super.set(parameterName, value);
}
}
/**
* Deletes an existing subscription. All pending messages in the subscription are immediately
* dropped. Calls to `Pull` after deletion will return `NOT_FOUND`. After a subscription is deleted,
* a new one may be created with the same name, but the new one has no association with the old
* subscription, or its topic unless the same topic is specified.
*
* Create a request for the method "subscriptions.delete".
*
* This request holds the parameters needed by the pubsub server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param subscription The subscription to delete.
* @return the request
*/
public Delete delete(java.lang.String subscription) throws java.io.IOException {
Delete result = new Delete(subscription);
initialize(result);
return result;
}
public class Delete extends PubsubRequest<com.google.api.services.pubsub.model.Empty> {
private static final String REST_PATH = "v1beta2/{+subscription}";
private final java.util.regex.Pattern SUBSCRIPTION_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/subscriptions/[^/]+$");
/**
* Deletes an existing subscription. All pending messages in the subscription are immediately
* dropped. Calls to `Pull` after deletion will return `NOT_FOUND`. After a subscription is
* deleted, a new one may be created with the same name, but the new one has no association with
* the old subscription, or its topic unless the same topic is specified.
*
* Create a request for the method "subscriptions.delete".
*
* This request holds the parameters needed by the the pubsub server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation. <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param subscription The subscription to delete.
* @since 1.13
*/
protected Delete(java.lang.String subscription) {
super(Pubsub.this, "DELETE", REST_PATH, null, com.google.api.services.pubsub.model.Empty.class);
this.subscription = com.google.api.client.util.Preconditions.checkNotNull(subscription, "Required parameter subscription must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(SUBSCRIPTION_PATTERN.matcher(subscription).matches(),
"Parameter subscription must conform to the pattern " +
"^projects/[^/]+/subscriptions/[^/]+$");
}
}
@Override
public Delete set$Xgafv(java.lang.String $Xgafv) {
return (Delete) super.set$Xgafv($Xgafv);
}
@Override
public Delete setAccessToken(java.lang.String accessToken) {
return (Delete) super.setAccessToken(accessToken);
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setCallback(java.lang.String callback) {
return (Delete) super.setCallback(callback);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUploadType(java.lang.String uploadType) {
return (Delete) super.setUploadType(uploadType);
}
@Override
public Delete setUploadProtocol(java.lang.String uploadProtocol) {
return (Delete) super.setUploadProtocol(uploadProtocol);
}
/** The subscription to delete. */
@com.google.api.client.util.Key
private java.lang.String subscription;
/** The subscription to delete.
*/
public java.lang.String getSubscription() {
return subscription;
}
/** The subscription to delete. */
public Delete setSubscription(java.lang.String subscription) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(SUBSCRIPTION_PATTERN.matcher(subscription).matches(),
"Parameter subscription must conform to the pattern " +
"^projects/[^/]+/subscriptions/[^/]+$");
}
this.subscription = subscription;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Gets the configuration details of a subscription.
*
* Create a request for the method "subscriptions.get".
*
* This request holds the parameters needed by the pubsub server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param subscription The name of the subscription to get.
* @return the request
*/
public Get get(java.lang.String subscription) throws java.io.IOException {
Get result = new Get(subscription);
initialize(result);
return result;
}
public class Get extends PubsubRequest<com.google.api.services.pubsub.model.Subscription> {
private static final String REST_PATH = "v1beta2/{+subscription}";
private final java.util.regex.Pattern SUBSCRIPTION_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/subscriptions/[^/]+$");
/**
* Gets the configuration details of a subscription.
*
* Create a request for the method "subscriptions.get".
*
* This request holds the parameters needed by the the pubsub server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation. <p> {@link
* Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must be
* called to initialize this instance immediately after invoking the constructor. </p>
*
* @param subscription The name of the subscription to get.
* @since 1.13
*/
protected Get(java.lang.String subscription) {
super(Pubsub.this, "GET", REST_PATH, null, com.google.api.services.pubsub.model.Subscription.class);
this.subscription = com.google.api.client.util.Preconditions.checkNotNull(subscription, "Required parameter subscription must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(SUBSCRIPTION_PATTERN.matcher(subscription).matches(),
"Parameter subscription must conform to the pattern " +
"^projects/[^/]+/subscriptions/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** The name of the subscription to get. */
@com.google.api.client.util.Key
private java.lang.String subscription;
/** The name of the subscription to get.
*/
public java.lang.String getSubscription() {
return subscription;
}
/** The name of the subscription to get. */
public Get setSubscription(java.lang.String subscription) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(SUBSCRIPTION_PATTERN.matcher(subscription).matches(),
"Parameter subscription must conform to the pattern " +
"^projects/[^/]+/subscriptions/[^/]+$");
}
this.subscription = subscription;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Gets the access control policy for a resource. Returns an empty policy if the resource exists and
* does not have a policy set.
*
* Create a request for the method "subscriptions.getIamPolicy".
*
* This request holds the parameters needed by the pubsub server. After setting any optional
* parameters, call the {@link GetIamPolicy#execute()} method to invoke the remote operation.
*
* @param resource REQUIRED: The resource for which the policy is being requested. See the operation documentation for
* the appropriate value for this field.
* @return the request
*/
public GetIamPolicy getIamPolicy(java.lang.String resource) throws java.io.IOException {
GetIamPolicy result = new GetIamPolicy(resource);
initialize(result);
return result;
}
public class GetIamPolicy extends PubsubRequest<com.google.api.services.pubsub.model.Policy> {
private static final String REST_PATH = "v1beta2/{+resource}:getIamPolicy";
private final java.util.regex.Pattern RESOURCE_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/subscriptions/[^/]+$");
/**
* Gets the access control policy for a resource. Returns an empty policy if the resource exists
* and does not have a policy set.
*
* Create a request for the method "subscriptions.getIamPolicy".
*
* This request holds the parameters needed by the the pubsub server. After setting any optional
* parameters, call the {@link GetIamPolicy#execute()} method to invoke the remote operation. <p>
* {@link
* GetIamPolicy#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param resource REQUIRED: The resource for which the policy is being requested. See the operation documentation for
* the appropriate value for this field.
* @since 1.13
*/
protected GetIamPolicy(java.lang.String resource) {
super(Pubsub.this, "GET", REST_PATH, null, com.google.api.services.pubsub.model.Policy.class);
this.resource = com.google.api.client.util.Preconditions.checkNotNull(resource, "Required parameter resource must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_PATTERN.matcher(resource).matches(),
"Parameter resource must conform to the pattern " +
"^projects/[^/]+/subscriptions/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public GetIamPolicy set$Xgafv(java.lang.String $Xgafv) {
return (GetIamPolicy) super.set$Xgafv($Xgafv);
}
@Override
public GetIamPolicy setAccessToken(java.lang.String accessToken) {
return (GetIamPolicy) super.setAccessToken(accessToken);
}
@Override
public GetIamPolicy setAlt(java.lang.String alt) {
return (GetIamPolicy) super.setAlt(alt);
}
@Override
public GetIamPolicy setCallback(java.lang.String callback) {
return (GetIamPolicy) super.setCallback(callback);
}
@Override
public GetIamPolicy setFields(java.lang.String fields) {
return (GetIamPolicy) super.setFields(fields);
}
@Override
public GetIamPolicy setKey(java.lang.String key) {
return (GetIamPolicy) super.setKey(key);
}
@Override
public GetIamPolicy setOauthToken(java.lang.String oauthToken) {
return (GetIamPolicy) super.setOauthToken(oauthToken);
}
@Override
public GetIamPolicy setPrettyPrint(java.lang.Boolean prettyPrint) {
return (GetIamPolicy) super.setPrettyPrint(prettyPrint);
}
@Override
public GetIamPolicy setQuotaUser(java.lang.String quotaUser) {
return (GetIamPolicy) super.setQuotaUser(quotaUser);
}
@Override
public GetIamPolicy setUploadType(java.lang.String uploadType) {
return (GetIamPolicy) super.setUploadType(uploadType);
}
@Override
public GetIamPolicy setUploadProtocol(java.lang.String uploadProtocol) {
return (GetIamPolicy) super.setUploadProtocol(uploadProtocol);
}
/**
* REQUIRED: The resource for which the policy is being requested. See the operation
* documentation for the appropriate value for this field.
*/
@com.google.api.client.util.Key
private java.lang.String resource;
/** REQUIRED: The resource for which the policy is being requested. See the operation documentation for
the appropriate value for this field.
*/
public java.lang.String getResource() {
return resource;
}
/**
* REQUIRED: The resource for which the policy is being requested. See the operation
* documentation for the appropriate value for this field.
*/
public GetIamPolicy setResource(java.lang.String resource) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_PATTERN.matcher(resource).matches(),
"Parameter resource must conform to the pattern " +
"^projects/[^/]+/subscriptions/[^/]+$");
}
this.resource = resource;
return this;
}
/**
* Optional. The policy format version to be returned. Valid values are 0, 1, and 3.
* Requests specifying an invalid value will be rejected. Requests for policies with any
* conditional bindings must specify version 3. Policies without any conditional bindings
* may specify any valid value or leave the field unset. To learn which resources support
* conditions in their IAM policies, see the [IAM
* documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
*/
@com.google.api.client.util.Key("options.requestedPolicyVersion")
private java.lang.Integer optionsRequestedPolicyVersion;
/** Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests
specifying an invalid value will be rejected. Requests for policies with any conditional bindings
must specify version 3. Policies without any conditional bindings may specify any valid value or
leave the field unset. To learn which resources support conditions in their IAM policies, see the
[IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
*/
public java.lang.Integer getOptionsRequestedPolicyVersion() {
return optionsRequestedPolicyVersion;
}
/**
* Optional. The policy format version to be returned. Valid values are 0, 1, and 3.
* Requests specifying an invalid value will be rejected. Requests for policies with any
* conditional bindings must specify version 3. Policies without any conditional bindings
* may specify any valid value or leave the field unset. To learn which resources support
* conditions in their IAM policies, see the [IAM
* documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
*/
public GetIamPolicy setOptionsRequestedPolicyVersion(java.lang.Integer optionsRequestedPolicyVersion) {
this.optionsRequestedPolicyVersion = optionsRequestedPolicyVersion;
return this;
}
@Override
public GetIamPolicy set(String parameterName, Object value) {
return (GetIamPolicy) super.set(parameterName, value);
}
}
/**
* Lists matching subscriptions.
*
* Create a request for the method "subscriptions.list".
*
* This request holds the parameters needed by the pubsub server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param project The name of the cloud project that subscriptions belong to.
* @return the request
*/
public List list(java.lang.String project) throws java.io.IOException {
List result = new List(project);
initialize(result);
return result;
}
public class List extends PubsubRequest<com.google.api.services.pubsub.model.ListSubscriptionsResponse> {
private static final String REST_PATH = "v1beta2/{+project}/subscriptions";
private final java.util.regex.Pattern PROJECT_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+$");
/**
* Lists matching subscriptions.
*
* Create a request for the method "subscriptions.list".
*
* This request holds the parameters needed by the the pubsub server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation. <p> {@link
* List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must be
* called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project The name of the cloud project that subscriptions belong to.
* @since 1.13
*/
protected List(java.lang.String project) {
super(Pubsub.this, "GET", REST_PATH, null, com.google.api.services.pubsub.model.ListSubscriptionsResponse.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PROJECT_PATTERN.matcher(project).matches(),
"Parameter project must conform to the pattern " +
"^projects/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** The name of the cloud project that subscriptions belong to. */
@com.google.api.client.util.Key
private java.lang.String project;
/** The name of the cloud project that subscriptions belong to.
*/
public java.lang.String getProject() {
return project;
}
/** The name of the cloud project that subscriptions belong to. */
public List setProject(java.lang.String project) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PROJECT_PATTERN.matcher(project).matches(),
"Parameter project must conform to the pattern " +
"^projects/[^/]+$");
}
this.project = project;
return this;
}
/** Maximum number of subscriptions to return. */
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Maximum number of subscriptions to return.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/** Maximum number of subscriptions to return. */
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* The value returned by the last `ListSubscriptionsResponse`; indicates that this is a
* continuation of a prior `ListSubscriptions` call, and that the system should return the
* next page of data.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** The value returned by the last `ListSubscriptionsResponse`; indicates that this is a continuation
of a prior `ListSubscriptions` call, and that the system should return the next page of data.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* The value returned by the last `ListSubscriptionsResponse`; indicates that this is a
* continuation of a prior `ListSubscriptions` call, and that the system should return the
* next page of data.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
/**
* Modifies the ack deadline for a specific message. This method is useful to indicate that more
* time is needed to process a message by the subscriber, or to make the message available for
* redelivery if the processing was interrupted. Note that this does not modify the subscription-
* level `ackDeadlineSeconds` used for subsequent messages.
*
* Create a request for the method "subscriptions.modifyAckDeadline".
*
* This request holds the parameters needed by the pubsub server. After setting any optional
* parameters, call the {@link ModifyAckDeadline#execute()} method to invoke the remote operation.
*
* @param subscription The name of the subscription.
* @param content the {@link com.google.api.services.pubsub.model.ModifyAckDeadlineRequest}
* @return the request
*/
public ModifyAckDeadline modifyAckDeadline(java.lang.String subscription, com.google.api.services.pubsub.model.ModifyAckDeadlineRequest content) throws java.io.IOException {
ModifyAckDeadline result = new ModifyAckDeadline(subscription, content);
initialize(result);
return result;
}
public class ModifyAckDeadline extends PubsubRequest<com.google.api.services.pubsub.model.Empty> {
private static final String REST_PATH = "v1beta2/{+subscription}:modifyAckDeadline";
private final java.util.regex.Pattern SUBSCRIPTION_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/subscriptions/[^/]+$");
/**
* Modifies the ack deadline for a specific message. This method is useful to indicate that more
* time is needed to process a message by the subscriber, or to make the message available for
* redelivery if the processing was interrupted. Note that this does not modify the subscription-
* level `ackDeadlineSeconds` used for subsequent messages.
*
* Create a request for the method "subscriptions.modifyAckDeadline".
*
* This request holds the parameters needed by the the pubsub server. After setting any optional
* parameters, call the {@link ModifyAckDeadline#execute()} method to invoke the remote operation.
* <p> {@link ModifyAckDeadline#initialize(com.google.api.client.googleapis.services.AbstractGoogl
* eClientRequest)} must be called to initialize this instance immediately after invoking the
* constructor. </p>
*
* @param subscription The name of the subscription.
* @param content the {@link com.google.api.services.pubsub.model.ModifyAckDeadlineRequest}
* @since 1.13
*/
protected ModifyAckDeadline(java.lang.String subscription, com.google.api.services.pubsub.model.ModifyAckDeadlineRequest content) {
super(Pubsub.this, "POST", REST_PATH, content, com.google.api.services.pubsub.model.Empty.class);
this.subscription = com.google.api.client.util.Preconditions.checkNotNull(subscription, "Required parameter subscription must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(SUBSCRIPTION_PATTERN.matcher(subscription).matches(),
"Parameter subscription must conform to the pattern " +
"^projects/[^/]+/subscriptions/[^/]+$");
}
}
@Override
public ModifyAckDeadline set$Xgafv(java.lang.String $Xgafv) {
return (ModifyAckDeadline) super.set$Xgafv($Xgafv);
}
@Override
public ModifyAckDeadline setAccessToken(java.lang.String accessToken) {
return (ModifyAckDeadline) super.setAccessToken(accessToken);
}
@Override
public ModifyAckDeadline setAlt(java.lang.String alt) {
return (ModifyAckDeadline) super.setAlt(alt);
}
@Override
public ModifyAckDeadline setCallback(java.lang.String callback) {
return (ModifyAckDeadline) super.setCallback(callback);
}
@Override
public ModifyAckDeadline setFields(java.lang.String fields) {
return (ModifyAckDeadline) super.setFields(fields);
}
@Override
public ModifyAckDeadline setKey(java.lang.String key) {
return (ModifyAckDeadline) super.setKey(key);
}
@Override
public ModifyAckDeadline setOauthToken(java.lang.String oauthToken) {
return (ModifyAckDeadline) super.setOauthToken(oauthToken);
}
@Override
public ModifyAckDeadline setPrettyPrint(java.lang.Boolean prettyPrint) {
return (ModifyAckDeadline) super.setPrettyPrint(prettyPrint);
}
@Override
public ModifyAckDeadline setQuotaUser(java.lang.String quotaUser) {
return (ModifyAckDeadline) super.setQuotaUser(quotaUser);
}
@Override
public ModifyAckDeadline setUploadType(java.lang.String uploadType) {
return (ModifyAckDeadline) super.setUploadType(uploadType);
}
@Override
public ModifyAckDeadline setUploadProtocol(java.lang.String uploadProtocol) {
return (ModifyAckDeadline) super.setUploadProtocol(uploadProtocol);
}
/** The name of the subscription. */
@com.google.api.client.util.Key
private java.lang.String subscription;
/** The name of the subscription.
*/
public java.lang.String getSubscription() {
return subscription;
}
/** The name of the subscription. */
public ModifyAckDeadline setSubscription(java.lang.String subscription) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(SUBSCRIPTION_PATTERN.matcher(subscription).matches(),
"Parameter subscription must conform to the pattern " +
"^projects/[^/]+/subscriptions/[^/]+$");
}
this.subscription = subscription;
return this;
}
@Override
public ModifyAckDeadline set(String parameterName, Object value) {
return (ModifyAckDeadline) super.set(parameterName, value);
}
}
/**
* Modifies the `PushConfig` for a specified subscription. This may be used to change a push
* subscription to a pull one (signified by an empty `PushConfig`) or vice versa, or change the
* endpoint URL and other attributes of a push subscription. Messages will accumulate for delivery
* continuously through the call regardless of changes to the `PushConfig`.
*
* Create a request for the method "subscriptions.modifyPushConfig".
*
* This request holds the parameters needed by the pubsub server. After setting any optional
* parameters, call the {@link ModifyPushConfig#execute()} method to invoke the remote operation.
*
* @param subscription The name of the subscription.
* @param content the {@link com.google.api.services.pubsub.model.ModifyPushConfigRequest}
* @return the request
*/
public ModifyPushConfig modifyPushConfig(java.lang.String subscription, com.google.api.services.pubsub.model.ModifyPushConfigRequest content) throws java.io.IOException {
ModifyPushConfig result = new ModifyPushConfig(subscription, content);
initialize(result);
return result;
}
public class ModifyPushConfig extends PubsubRequest<com.google.api.services.pubsub.model.Empty> {
private static final String REST_PATH = "v1beta2/{+subscription}:modifyPushConfig";
private final java.util.regex.Pattern SUBSCRIPTION_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/subscriptions/[^/]+$");
/**
* Modifies the `PushConfig` for a specified subscription. This may be used to change a push
* subscription to a pull one (signified by an empty `PushConfig`) or vice versa, or change the
* endpoint URL and other attributes of a push subscription. Messages will accumulate for delivery
* continuously through the call regardless of changes to the `PushConfig`.
*
* Create a request for the method "subscriptions.modifyPushConfig".
*
* This request holds the parameters needed by the the pubsub server. After setting any optional
* parameters, call the {@link ModifyPushConfig#execute()} method to invoke the remote operation.
* <p> {@link ModifyPushConfig#initialize(com.google.api.client.googleapis.services.AbstractGoogle
* ClientRequest)} must be called to initialize this instance immediately after invoking the
* constructor. </p>
*
* @param subscription The name of the subscription.
* @param content the {@link com.google.api.services.pubsub.model.ModifyPushConfigRequest}
* @since 1.13
*/
protected ModifyPushConfig(java.lang.String subscription, com.google.api.services.pubsub.model.ModifyPushConfigRequest content) {
super(Pubsub.this, "POST", REST_PATH, content, com.google.api.services.pubsub.model.Empty.class);
this.subscription = com.google.api.client.util.Preconditions.checkNotNull(subscription, "Required parameter subscription must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(SUBSCRIPTION_PATTERN.matcher(subscription).matches(),
"Parameter subscription must conform to the pattern " +
"^projects/[^/]+/subscriptions/[^/]+$");
}
}
@Override
public ModifyPushConfig set$Xgafv(java.lang.String $Xgafv) {
return (ModifyPushConfig) super.set$Xgafv($Xgafv);
}
@Override
public ModifyPushConfig setAccessToken(java.lang.String accessToken) {
return (ModifyPushConfig) super.setAccessToken(accessToken);
}
@Override
public ModifyPushConfig setAlt(java.lang.String alt) {
return (ModifyPushConfig) super.setAlt(alt);
}
@Override
public ModifyPushConfig setCallback(java.lang.String callback) {
return (ModifyPushConfig) super.setCallback(callback);
}
@Override
public ModifyPushConfig setFields(java.lang.String fields) {
return (ModifyPushConfig) super.setFields(fields);
}
@Override
public ModifyPushConfig setKey(java.lang.String key) {
return (ModifyPushConfig) super.setKey(key);
}
@Override
public ModifyPushConfig setOauthToken(java.lang.String oauthToken) {
return (ModifyPushConfig) super.setOauthToken(oauthToken);
}
@Override
public ModifyPushConfig setPrettyPrint(java.lang.Boolean prettyPrint) {
return (ModifyPushConfig) super.setPrettyPrint(prettyPrint);
}
@Override
public ModifyPushConfig setQuotaUser(java.lang.String quotaUser) {
return (ModifyPushConfig) super.setQuotaUser(quotaUser);
}
@Override
public ModifyPushConfig setUploadType(java.lang.String uploadType) {
return (ModifyPushConfig) super.setUploadType(uploadType);
}
@Override
public ModifyPushConfig setUploadProtocol(java.lang.String uploadProtocol) {
return (ModifyPushConfig) super.setUploadProtocol(uploadProtocol);
}
/** The name of the subscription. */
@com.google.api.client.util.Key
private java.lang.String subscription;
/** The name of the subscription.
*/
public java.lang.String getSubscription() {
return subscription;
}
/** The name of the subscription. */
public ModifyPushConfig setSubscription(java.lang.String subscription) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(SUBSCRIPTION_PATTERN.matcher(subscription).matches(),
"Parameter subscription must conform to the pattern " +
"^projects/[^/]+/subscriptions/[^/]+$");
}
this.subscription = subscription;
return this;
}
@Override
public ModifyPushConfig set(String parameterName, Object value) {
return (ModifyPushConfig) super.set(parameterName, value);
}
}
/**
* Pulls messages from the server. Returns an empty list if there are no messages available in the
* backlog. The server may return `UNAVAILABLE` if there are too many concurrent pull requests
* pending for the given subscription.
*
* Create a request for the method "subscriptions.pull".
*
* This request holds the parameters needed by the pubsub server. After setting any optional
* parameters, call the {@link Pull#execute()} method to invoke the remote operation.
*
* @param subscription The subscription from which messages should be pulled.
* @param content the {@link com.google.api.services.pubsub.model.PullRequest}
* @return the request
*/
public Pull pull(java.lang.String subscription, com.google.api.services.pubsub.model.PullRequest content) throws java.io.IOException {
Pull result = new Pull(subscription, content);
initialize(result);
return result;
}
public class Pull extends PubsubRequest<com.google.api.services.pubsub.model.PullResponse> {
private static final String REST_PATH = "v1beta2/{+subscription}:pull";
private final java.util.regex.Pattern SUBSCRIPTION_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/subscriptions/[^/]+$");
/**
* Pulls messages from the server. Returns an empty list if there are no messages available in the
* backlog. The server may return `UNAVAILABLE` if there are too many concurrent pull requests
* pending for the given subscription.
*
* Create a request for the method "subscriptions.pull".
*
* This request holds the parameters needed by the the pubsub server. After setting any optional
* parameters, call the {@link Pull#execute()} method to invoke the remote operation. <p> {@link
* Pull#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must be
* called to initialize this instance immediately after invoking the constructor. </p>
*
* @param subscription The subscription from which messages should be pulled.
* @param content the {@link com.google.api.services.pubsub.model.PullRequest}
* @since 1.13
*/
protected Pull(java.lang.String subscription, com.google.api.services.pubsub.model.PullRequest content) {
super(Pubsub.this, "POST", REST_PATH, content, com.google.api.services.pubsub.model.PullResponse.class);
this.subscription = com.google.api.client.util.Preconditions.checkNotNull(subscription, "Required parameter subscription must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(SUBSCRIPTION_PATTERN.matcher(subscription).matches(),
"Parameter subscription must conform to the pattern " +
"^projects/[^/]+/subscriptions/[^/]+$");
}
}
@Override
public Pull set$Xgafv(java.lang.String $Xgafv) {
return (Pull) super.set$Xgafv($Xgafv);
}
@Override
public Pull setAccessToken(java.lang.String accessToken) {
return (Pull) super.setAccessToken(accessToken);
}
@Override
public Pull setAlt(java.lang.String alt) {
return (Pull) super.setAlt(alt);
}
@Override
public Pull setCallback(java.lang.String callback) {
return (Pull) super.setCallback(callback);
}
@Override
public Pull setFields(java.lang.String fields) {
return (Pull) super.setFields(fields);
}
@Override
public Pull setKey(java.lang.String key) {
return (Pull) super.setKey(key);
}
@Override
public Pull setOauthToken(java.lang.String oauthToken) {
return (Pull) super.setOauthToken(oauthToken);
}
@Override
public Pull setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Pull) super.setPrettyPrint(prettyPrint);
}
@Override
public Pull setQuotaUser(java.lang.String quotaUser) {
return (Pull) super.setQuotaUser(quotaUser);
}
@Override
public Pull setUploadType(java.lang.String uploadType) {
return (Pull) super.setUploadType(uploadType);
}
@Override
public Pull setUploadProtocol(java.lang.String uploadProtocol) {
return (Pull) super.setUploadProtocol(uploadProtocol);
}
/** The subscription from which messages should be pulled. */
@com.google.api.client.util.Key
private java.lang.String subscription;
/** The subscription from which messages should be pulled.
*/
public java.lang.String getSubscription() {
return subscription;
}
/** The subscription from which messages should be pulled. */
public Pull setSubscription(java.lang.String subscription) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(SUBSCRIPTION_PATTERN.matcher(subscription).matches(),
"Parameter subscription must conform to the pattern " +
"^projects/[^/]+/subscriptions/[^/]+$");
}
this.subscription = subscription;
return this;
}
@Override
public Pull set(String parameterName, Object value) {
return (Pull) super.set(parameterName, value);
}
}
/**
* Sets the access control policy on the specified resource. Replaces any existing policy. Can
* return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
*
* Create a request for the method "subscriptions.setIamPolicy".
*
* This request holds the parameters needed by the pubsub server. After setting any optional
* parameters, call the {@link SetIamPolicy#execute()} method to invoke the remote operation.
*
* @param resource REQUIRED: The resource for which the policy is being specified. See the operation documentation for
* the appropriate value for this field.
* @param content the {@link com.google.api.services.pubsub.model.SetIamPolicyRequest}
* @return the request
*/
public SetIamPolicy setIamPolicy(java.lang.String resource, com.google.api.services.pubsub.model.SetIamPolicyRequest content) throws java.io.IOException {
SetIamPolicy result = new SetIamPolicy(resource, content);
initialize(result);
return result;
}
public class SetIamPolicy extends PubsubRequest<com.google.api.services.pubsub.model.Policy> {
private static final String REST_PATH = "v1beta2/{+resource}:setIamPolicy";
private final java.util.regex.Pattern RESOURCE_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/subscriptions/[^/]+$");
/**
* Sets the access control policy on the specified resource. Replaces any existing policy. Can
* return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
*
* Create a request for the method "subscriptions.setIamPolicy".
*
* This request holds the parameters needed by the the pubsub server. After setting any optional
* parameters, call the {@link SetIamPolicy#execute()} method to invoke the remote operation. <p>
* {@link
* SetIamPolicy#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param resource REQUIRED: The resource for which the policy is being specified. See the operation documentation for
* the appropriate value for this field.
* @param content the {@link com.google.api.services.pubsub.model.SetIamPolicyRequest}
* @since 1.13
*/
protected SetIamPolicy(java.lang.String resource, com.google.api.services.pubsub.model.SetIamPolicyRequest content) {
super(Pubsub.this, "POST", REST_PATH, content, com.google.api.services.pubsub.model.Policy.class);
this.resource = com.google.api.client.util.Preconditions.checkNotNull(resource, "Required parameter resource must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_PATTERN.matcher(resource).matches(),
"Parameter resource must conform to the pattern " +
"^projects/[^/]+/subscriptions/[^/]+$");
}
}
@Override
public SetIamPolicy set$Xgafv(java.lang.String $Xgafv) {
return (SetIamPolicy) super.set$Xgafv($Xgafv);
}
@Override
public SetIamPolicy setAccessToken(java.lang.String accessToken) {
return (SetIamPolicy) super.setAccessToken(accessToken);
}
@Override
public SetIamPolicy setAlt(java.lang.String alt) {
return (SetIamPolicy) super.setAlt(alt);
}
@Override
public SetIamPolicy setCallback(java.lang.String callback) {
return (SetIamPolicy) super.setCallback(callback);
}
@Override
public SetIamPolicy setFields(java.lang.String fields) {
return (SetIamPolicy) super.setFields(fields);
}
@Override
public SetIamPolicy setKey(java.lang.String key) {
return (SetIamPolicy) super.setKey(key);
}
@Override
public SetIamPolicy setOauthToken(java.lang.String oauthToken) {
return (SetIamPolicy) super.setOauthToken(oauthToken);
}
@Override
public SetIamPolicy setPrettyPrint(java.lang.Boolean prettyPrint) {
return (SetIamPolicy) super.setPrettyPrint(prettyPrint);
}
@Override
public SetIamPolicy setQuotaUser(java.lang.String quotaUser) {
return (SetIamPolicy) super.setQuotaUser(quotaUser);
}
@Override
public SetIamPolicy setUploadType(java.lang.String uploadType) {
return (SetIamPolicy) super.setUploadType(uploadType);
}
@Override
public SetIamPolicy setUploadProtocol(java.lang.String uploadProtocol) {
return (SetIamPolicy) super.setUploadProtocol(uploadProtocol);
}
/**
* REQUIRED: The resource for which the policy is being specified. See the operation
* documentation for the appropriate value for this field.
*/
@com.google.api.client.util.Key
private java.lang.String resource;
/** REQUIRED: The resource for which the policy is being specified. See the operation documentation for
the appropriate value for this field.
*/
public java.lang.String getResource() {
return resource;
}
/**
* REQUIRED: The resource for which the policy is being specified. See the operation
* documentation for the appropriate value for this field.
*/
public SetIamPolicy setResource(java.lang.String resource) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_PATTERN.matcher(resource).matches(),
"Parameter resource must conform to the pattern " +
"^projects/[^/]+/subscriptions/[^/]+$");
}
this.resource = resource;
return this;
}
@Override
public SetIamPolicy set(String parameterName, Object value) {
return (SetIamPolicy) super.set(parameterName, value);
}
}
/**
* Returns permissions that a caller has on the specified resource. If the resource does not exist,
* this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is
* designed to be used for building permission-aware UIs and command-line tools, not for
* authorization checking. This operation may "fail open" without warning.
*
* Create a request for the method "subscriptions.testIamPermissions".
*
* This request holds the parameters needed by the pubsub server. After setting any optional
* parameters, call the {@link TestIamPermissions#execute()} method to invoke the remote operation.
*
* @param resource REQUIRED: The resource for which the policy detail is being requested. See the operation
* documentation for the appropriate value for this field.
* @param content the {@link com.google.api.services.pubsub.model.TestIamPermissionsRequest}
* @return the request
*/
public TestIamPermissions testIamPermissions(java.lang.String resource, com.google.api.services.pubsub.model.TestIamPermissionsRequest content) throws java.io.IOException {
TestIamPermissions result = new TestIamPermissions(resource, content);
initialize(result);
return result;
}
public class TestIamPermissions extends PubsubRequest<com.google.api.services.pubsub.model.TestIamPermissionsResponse> {
private static final String REST_PATH = "v1beta2/{+resource}:testIamPermissions";
private final java.util.regex.Pattern RESOURCE_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/subscriptions/[^/]+$");
/**
* Returns permissions that a caller has on the specified resource. If the resource does not
* exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This
* operation is designed to be used for building permission-aware UIs and command-line tools, not
* for authorization checking. This operation may "fail open" without warning.
*
* Create a request for the method "subscriptions.testIamPermissions".
*
* This request holds the parameters needed by the the pubsub server. After setting any optional
* parameters, call the {@link TestIamPermissions#execute()} method to invoke the remote
* operation. <p> {@link TestIamPermissions#initialize(com.google.api.client.googleapis.services.A
* bstractGoogleClientRequest)} must be called to initialize this instance immediately after
* invoking the constructor. </p>
*
* @param resource REQUIRED: The resource for which the policy detail is being requested. See the operation
* documentation for the appropriate value for this field.
* @param content the {@link com.google.api.services.pubsub.model.TestIamPermissionsRequest}
* @since 1.13
*/
protected TestIamPermissions(java.lang.String resource, com.google.api.services.pubsub.model.TestIamPermissionsRequest content) {
super(Pubsub.this, "POST", REST_PATH, content, com.google.api.services.pubsub.model.TestIamPermissionsResponse.class);
this.resource = com.google.api.client.util.Preconditions.checkNotNull(resource, "Required parameter resource must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_PATTERN.matcher(resource).matches(),
"Parameter resource must conform to the pattern " +
"^projects/[^/]+/subscriptions/[^/]+$");
}
}
@Override
public TestIamPermissions set$Xgafv(java.lang.String $Xgafv) {
return (TestIamPermissions) super.set$Xgafv($Xgafv);
}
@Override
public TestIamPermissions setAccessToken(java.lang.String accessToken) {
return (TestIamPermissions) super.setAccessToken(accessToken);
}
@Override
public TestIamPermissions setAlt(java.lang.String alt) {
return (TestIamPermissions) super.setAlt(alt);
}
@Override
public TestIamPermissions setCallback(java.lang.String callback) {
return (TestIamPermissions) super.setCallback(callback);
}
@Override
public TestIamPermissions setFields(java.lang.String fields) {
return (TestIamPermissions) super.setFields(fields);
}
@Override
public TestIamPermissions setKey(java.lang.String key) {
return (TestIamPermissions) super.setKey(key);
}
@Override
public TestIamPermissions setOauthToken(java.lang.String oauthToken) {
return (TestIamPermissions) super.setOauthToken(oauthToken);
}
@Override
public TestIamPermissions setPrettyPrint(java.lang.Boolean prettyPrint) {
return (TestIamPermissions) super.setPrettyPrint(prettyPrint);
}
@Override
public TestIamPermissions setQuotaUser(java.lang.String quotaUser) {
return (TestIamPermissions) super.setQuotaUser(quotaUser);
}
@Override
public TestIamPermissions setUploadType(java.lang.String uploadType) {
return (TestIamPermissions) super.setUploadType(uploadType);
}
@Override
public TestIamPermissions setUploadProtocol(java.lang.String uploadProtocol) {
return (TestIamPermissions) super.setUploadProtocol(uploadProtocol);
}
/**
* REQUIRED: The resource for which the policy detail is being requested. See the operation
* documentation for the appropriate value for this field.
*/
@com.google.api.client.util.Key
private java.lang.String resource;
/** REQUIRED: The resource for which the policy detail is being requested. See the operation
documentation for the appropriate value for this field.
*/
public java.lang.String getResource() {
return resource;
}
/**
* REQUIRED: The resource for which the policy detail is being requested. See the operation
* documentation for the appropriate value for this field.
*/
public TestIamPermissions setResource(java.lang.String resource) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_PATTERN.matcher(resource).matches(),
"Parameter resource must conform to the pattern " +
"^projects/[^/]+/subscriptions/[^/]+$");
}
this.resource = resource;
return this;
}
@Override
public TestIamPermissions set(String parameterName, Object value) {
return (TestIamPermissions) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the Topics collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code Pubsub pubsub = new Pubsub(...);}
* {@code Pubsub.Topics.List request = pubsub.topics().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Topics topics() {
return new Topics();
}
/**
* The "topics" collection of methods.
*/
public class Topics {
/**
* Creates the given topic with the given name.
*
* Create a request for the method "topics.create".
*
* This request holds the parameters needed by the pubsub server. After setting any optional
* parameters, call the {@link Create#execute()} method to invoke the remote operation.
*
* @param name The name of the topic. It must have the format `"projects/{project}/topics/{topic}"`. `{topic}` must
* start with a letter, and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes
* (`-`), underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent signs (`%`).
* It must be between 3 and 255 characters in length, and it must not start with `"goog"`.
* @param content the {@link com.google.api.services.pubsub.model.Topic}
* @return the request
*/
public Create create(java.lang.String name, com.google.api.services.pubsub.model.Topic content) throws java.io.IOException {
Create result = new Create(name, content);
initialize(result);
return result;
}
public class Create extends PubsubRequest<com.google.api.services.pubsub.model.Topic> {
private static final String REST_PATH = "v1beta2/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/topics/[^/]+$");
/**
* Creates the given topic with the given name.
*
* Create a request for the method "topics.create".
*
* This request holds the parameters needed by the the pubsub server. After setting any optional
* parameters, call the {@link Create#execute()} method to invoke the remote operation. <p> {@link
* Create#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name The name of the topic. It must have the format `"projects/{project}/topics/{topic}"`. `{topic}` must
* start with a letter, and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes
* (`-`), underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent signs (`%`).
* It must be between 3 and 255 characters in length, and it must not start with `"goog"`.
* @param content the {@link com.google.api.services.pubsub.model.Topic}
* @since 1.13
*/
protected Create(java.lang.String name, com.google.api.services.pubsub.model.Topic content) {
super(Pubsub.this, "PUT", REST_PATH, content, com.google.api.services.pubsub.model.Topic.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/topics/[^/]+$");
}
}
@Override
public Create set$Xgafv(java.lang.String $Xgafv) {
return (Create) super.set$Xgafv($Xgafv);
}
@Override
public Create setAccessToken(java.lang.String accessToken) {
return (Create) super.setAccessToken(accessToken);
}
@Override
public Create setAlt(java.lang.String alt) {
return (Create) super.setAlt(alt);
}
@Override
public Create setCallback(java.lang.String callback) {
return (Create) super.setCallback(callback);
}
@Override
public Create setFields(java.lang.String fields) {
return (Create) super.setFields(fields);
}
@Override
public Create setKey(java.lang.String key) {
return (Create) super.setKey(key);
}
@Override
public Create setOauthToken(java.lang.String oauthToken) {
return (Create) super.setOauthToken(oauthToken);
}
@Override
public Create setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Create) super.setPrettyPrint(prettyPrint);
}
@Override
public Create setQuotaUser(java.lang.String quotaUser) {
return (Create) super.setQuotaUser(quotaUser);
}
@Override
public Create setUploadType(java.lang.String uploadType) {
return (Create) super.setUploadType(uploadType);
}
@Override
public Create setUploadProtocol(java.lang.String uploadProtocol) {
return (Create) super.setUploadProtocol(uploadProtocol);
}
/**
* The name of the topic. It must have the format `"projects/{project}/topics/{topic}"`.
* `{topic}` must start with a letter, and contain only letters (`[A-Za-z]`), numbers
* (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or
* percent signs (`%`). It must be between 3 and 255 characters in length, and it must not
* start with `"goog"`.
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** The name of the topic. It must have the format `"projects/{project}/topics/{topic}"`. `{topic}`
must start with a letter, and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`),
underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent signs (`%`). It must be
between 3 and 255 characters in length, and it must not start with `"goog"`.
*/
public java.lang.String getName() {
return name;
}
/**
* The name of the topic. It must have the format `"projects/{project}/topics/{topic}"`.
* `{topic}` must start with a letter, and contain only letters (`[A-Za-z]`), numbers
* (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or
* percent signs (`%`). It must be between 3 and 255 characters in length, and it must not
* start with `"goog"`.
*/
public Create setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/topics/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Create set(String parameterName, Object value) {
return (Create) super.set(parameterName, value);
}
}
/**
* Deletes the topic with the given name. Returns `NOT_FOUND` if the topic does not exist. After a
* topic is deleted, a new topic may be created with the same name; this is an entirely new topic
* with none of the old configuration or subscriptions. Existing subscriptions to this topic are not
* deleted, but their `topic` field is set to `_deleted-topic_`.
*
* Create a request for the method "topics.delete".
*
* This request holds the parameters needed by the pubsub server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param topic Name of the topic to delete.
* @return the request
*/
public Delete delete(java.lang.String topic) throws java.io.IOException {
Delete result = new Delete(topic);
initialize(result);
return result;
}
public class Delete extends PubsubRequest<com.google.api.services.pubsub.model.Empty> {
private static final String REST_PATH = "v1beta2/{+topic}";
private final java.util.regex.Pattern TOPIC_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/topics/[^/]+$");
/**
* Deletes the topic with the given name. Returns `NOT_FOUND` if the topic does not exist. After a
* topic is deleted, a new topic may be created with the same name; this is an entirely new topic
* with none of the old configuration or subscriptions. Existing subscriptions to this topic are
* not deleted, but their `topic` field is set to `_deleted-topic_`.
*
* Create a request for the method "topics.delete".
*
* This request holds the parameters needed by the the pubsub server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation. <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param topic Name of the topic to delete.
* @since 1.13
*/
protected Delete(java.lang.String topic) {
super(Pubsub.this, "DELETE", REST_PATH, null, com.google.api.services.pubsub.model.Empty.class);
this.topic = com.google.api.client.util.Preconditions.checkNotNull(topic, "Required parameter topic must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TOPIC_PATTERN.matcher(topic).matches(),
"Parameter topic must conform to the pattern " +
"^projects/[^/]+/topics/[^/]+$");
}
}
@Override
public Delete set$Xgafv(java.lang.String $Xgafv) {
return (Delete) super.set$Xgafv($Xgafv);
}
@Override
public Delete setAccessToken(java.lang.String accessToken) {
return (Delete) super.setAccessToken(accessToken);
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setCallback(java.lang.String callback) {
return (Delete) super.setCallback(callback);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUploadType(java.lang.String uploadType) {
return (Delete) super.setUploadType(uploadType);
}
@Override
public Delete setUploadProtocol(java.lang.String uploadProtocol) {
return (Delete) super.setUploadProtocol(uploadProtocol);
}
/** Name of the topic to delete. */
@com.google.api.client.util.Key
private java.lang.String topic;
/** Name of the topic to delete.
*/
public java.lang.String getTopic() {
return topic;
}
/** Name of the topic to delete. */
public Delete setTopic(java.lang.String topic) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TOPIC_PATTERN.matcher(topic).matches(),
"Parameter topic must conform to the pattern " +
"^projects/[^/]+/topics/[^/]+$");
}
this.topic = topic;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Gets the configuration of a topic.
*
* Create a request for the method "topics.get".
*
* This request holds the parameters needed by the pubsub server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param topic The name of the topic to get.
* @return the request
*/
public Get get(java.lang.String topic) throws java.io.IOException {
Get result = new Get(topic);
initialize(result);
return result;
}
public class Get extends PubsubRequest<com.google.api.services.pubsub.model.Topic> {
private static final String REST_PATH = "v1beta2/{+topic}";
private final java.util.regex.Pattern TOPIC_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/topics/[^/]+$");
/**
* Gets the configuration of a topic.
*
* Create a request for the method "topics.get".
*
* This request holds the parameters needed by the the pubsub server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation. <p> {@link
* Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must be
* called to initialize this instance immediately after invoking the constructor. </p>
*
* @param topic The name of the topic to get.
* @since 1.13
*/
protected Get(java.lang.String topic) {
super(Pubsub.this, "GET", REST_PATH, null, com.google.api.services.pubsub.model.Topic.class);
this.topic = com.google.api.client.util.Preconditions.checkNotNull(topic, "Required parameter topic must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TOPIC_PATTERN.matcher(topic).matches(),
"Parameter topic must conform to the pattern " +
"^projects/[^/]+/topics/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** The name of the topic to get. */
@com.google.api.client.util.Key
private java.lang.String topic;
/** The name of the topic to get.
*/
public java.lang.String getTopic() {
return topic;
}
/** The name of the topic to get. */
public Get setTopic(java.lang.String topic) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TOPIC_PATTERN.matcher(topic).matches(),
"Parameter topic must conform to the pattern " +
"^projects/[^/]+/topics/[^/]+$");
}
this.topic = topic;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Gets the access control policy for a resource. Returns an empty policy if the resource exists and
* does not have a policy set.
*
* Create a request for the method "topics.getIamPolicy".
*
* This request holds the parameters needed by the pubsub server. After setting any optional
* parameters, call the {@link GetIamPolicy#execute()} method to invoke the remote operation.
*
* @param resource REQUIRED: The resource for which the policy is being requested. See the operation documentation for
* the appropriate value for this field.
* @return the request
*/
public GetIamPolicy getIamPolicy(java.lang.String resource) throws java.io.IOException {
GetIamPolicy result = new GetIamPolicy(resource);
initialize(result);
return result;
}
public class GetIamPolicy extends PubsubRequest<com.google.api.services.pubsub.model.Policy> {
private static final String REST_PATH = "v1beta2/{+resource}:getIamPolicy";
private final java.util.regex.Pattern RESOURCE_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/topics/[^/]+$");
/**
* Gets the access control policy for a resource. Returns an empty policy if the resource exists
* and does not have a policy set.
*
* Create a request for the method "topics.getIamPolicy".
*
* This request holds the parameters needed by the the pubsub server. After setting any optional
* parameters, call the {@link GetIamPolicy#execute()} method to invoke the remote operation. <p>
* {@link
* GetIamPolicy#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param resource REQUIRED: The resource for which the policy is being requested. See the operation documentation for
* the appropriate value for this field.
* @since 1.13
*/
protected GetIamPolicy(java.lang.String resource) {
super(Pubsub.this, "GET", REST_PATH, null, com.google.api.services.pubsub.model.Policy.class);
this.resource = com.google.api.client.util.Preconditions.checkNotNull(resource, "Required parameter resource must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_PATTERN.matcher(resource).matches(),
"Parameter resource must conform to the pattern " +
"^projects/[^/]+/topics/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public GetIamPolicy set$Xgafv(java.lang.String $Xgafv) {
return (GetIamPolicy) super.set$Xgafv($Xgafv);
}
@Override
public GetIamPolicy setAccessToken(java.lang.String accessToken) {
return (GetIamPolicy) super.setAccessToken(accessToken);
}
@Override
public GetIamPolicy setAlt(java.lang.String alt) {
return (GetIamPolicy) super.setAlt(alt);
}
@Override
public GetIamPolicy setCallback(java.lang.String callback) {
return (GetIamPolicy) super.setCallback(callback);
}
@Override
public GetIamPolicy setFields(java.lang.String fields) {
return (GetIamPolicy) super.setFields(fields);
}
@Override
public GetIamPolicy setKey(java.lang.String key) {
return (GetIamPolicy) super.setKey(key);
}
@Override
public GetIamPolicy setOauthToken(java.lang.String oauthToken) {
return (GetIamPolicy) super.setOauthToken(oauthToken);
}
@Override
public GetIamPolicy setPrettyPrint(java.lang.Boolean prettyPrint) {
return (GetIamPolicy) super.setPrettyPrint(prettyPrint);
}
@Override
public GetIamPolicy setQuotaUser(java.lang.String quotaUser) {
return (GetIamPolicy) super.setQuotaUser(quotaUser);
}
@Override
public GetIamPolicy setUploadType(java.lang.String uploadType) {
return (GetIamPolicy) super.setUploadType(uploadType);
}
@Override
public GetIamPolicy setUploadProtocol(java.lang.String uploadProtocol) {
return (GetIamPolicy) super.setUploadProtocol(uploadProtocol);
}
/**
* REQUIRED: The resource for which the policy is being requested. See the operation
* documentation for the appropriate value for this field.
*/
@com.google.api.client.util.Key
private java.lang.String resource;
/** REQUIRED: The resource for which the policy is being requested. See the operation documentation for
the appropriate value for this field.
*/
public java.lang.String getResource() {
return resource;
}
/**
* REQUIRED: The resource for which the policy is being requested. See the operation
* documentation for the appropriate value for this field.
*/
public GetIamPolicy setResource(java.lang.String resource) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_PATTERN.matcher(resource).matches(),
"Parameter resource must conform to the pattern " +
"^projects/[^/]+/topics/[^/]+$");
}
this.resource = resource;
return this;
}
/**
* Optional. The policy format version to be returned. Valid values are 0, 1, and 3.
* Requests specifying an invalid value will be rejected. Requests for policies with any
* conditional bindings must specify version 3. Policies without any conditional bindings
* may specify any valid value or leave the field unset. To learn which resources support
* conditions in their IAM policies, see the [IAM
* documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
*/
@com.google.api.client.util.Key("options.requestedPolicyVersion")
private java.lang.Integer optionsRequestedPolicyVersion;
/** Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests
specifying an invalid value will be rejected. Requests for policies with any conditional bindings
must specify version 3. Policies without any conditional bindings may specify any valid value or
leave the field unset. To learn which resources support conditions in their IAM policies, see the
[IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
*/
public java.lang.Integer getOptionsRequestedPolicyVersion() {
return optionsRequestedPolicyVersion;
}
/**
* Optional. The policy format version to be returned. Valid values are 0, 1, and 3.
* Requests specifying an invalid value will be rejected. Requests for policies with any
* conditional bindings must specify version 3. Policies without any conditional bindings
* may specify any valid value or leave the field unset. To learn which resources support
* conditions in their IAM policies, see the [IAM
* documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
*/
public GetIamPolicy setOptionsRequestedPolicyVersion(java.lang.Integer optionsRequestedPolicyVersion) {
this.optionsRequestedPolicyVersion = optionsRequestedPolicyVersion;
return this;
}
@Override
public GetIamPolicy set(String parameterName, Object value) {
return (GetIamPolicy) super.set(parameterName, value);
}
}
/**
* Lists matching topics.
*
* Create a request for the method "topics.list".
*
* This request holds the parameters needed by the pubsub server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param project The name of the cloud project that topics belong to.
* @return the request
*/
public List list(java.lang.String project) throws java.io.IOException {
List result = new List(project);
initialize(result);
return result;
}
public class List extends PubsubRequest<com.google.api.services.pubsub.model.ListTopicsResponse> {
private static final String REST_PATH = "v1beta2/{+project}/topics";
private final java.util.regex.Pattern PROJECT_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+$");
/**
* Lists matching topics.
*
* Create a request for the method "topics.list".
*
* This request holds the parameters needed by the the pubsub server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation. <p> {@link
* List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must be
* called to initialize this instance immediately after invoking the constructor. </p>
*
* @param project The name of the cloud project that topics belong to.
* @since 1.13
*/
protected List(java.lang.String project) {
super(Pubsub.this, "GET", REST_PATH, null, com.google.api.services.pubsub.model.ListTopicsResponse.class);
this.project = com.google.api.client.util.Preconditions.checkNotNull(project, "Required parameter project must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PROJECT_PATTERN.matcher(project).matches(),
"Parameter project must conform to the pattern " +
"^projects/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** The name of the cloud project that topics belong to. */
@com.google.api.client.util.Key
private java.lang.String project;
/** The name of the cloud project that topics belong to.
*/
public java.lang.String getProject() {
return project;
}
/** The name of the cloud project that topics belong to. */
public List setProject(java.lang.String project) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PROJECT_PATTERN.matcher(project).matches(),
"Parameter project must conform to the pattern " +
"^projects/[^/]+$");
}
this.project = project;
return this;
}
/** Maximum number of topics to return. */
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Maximum number of topics to return.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/** Maximum number of topics to return. */
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* The value returned by the last `ListTopicsResponse`; indicates that this is a
* continuation of a prior `ListTopics` call, and that the system should return the next
* page of data.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** The value returned by the last `ListTopicsResponse`; indicates that this is a continuation of a
prior `ListTopics` call, and that the system should return the next page of data.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* The value returned by the last `ListTopicsResponse`; indicates that this is a
* continuation of a prior `ListTopics` call, and that the system should return the next
* page of data.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
/**
* Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic does not exist. The
* message payload must not be empty; it must contain either a non-empty data field, or at least one
* attribute.
*
* Create a request for the method "topics.publish".
*
* This request holds the parameters needed by the pubsub server. After setting any optional
* parameters, call the {@link Publish#execute()} method to invoke the remote operation.
*
* @param topic The messages in the request will be published on this topic.
* @param content the {@link com.google.api.services.pubsub.model.PublishRequest}
* @return the request
*/
public Publish publish(java.lang.String topic, com.google.api.services.pubsub.model.PublishRequest content) throws java.io.IOException {
Publish result = new Publish(topic, content);
initialize(result);
return result;
}
public class Publish extends PubsubRequest<com.google.api.services.pubsub.model.PublishResponse> {
private static final String REST_PATH = "v1beta2/{+topic}:publish";
private final java.util.regex.Pattern TOPIC_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/topics/[^/]+$");
/**
* Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic does not exist. The
* message payload must not be empty; it must contain either a non-empty data field, or at least
* one attribute.
*
* Create a request for the method "topics.publish".
*
* This request holds the parameters needed by the the pubsub server. After setting any optional
* parameters, call the {@link Publish#execute()} method to invoke the remote operation. <p>
* {@link
* Publish#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param topic The messages in the request will be published on this topic.
* @param content the {@link com.google.api.services.pubsub.model.PublishRequest}
* @since 1.13
*/
protected Publish(java.lang.String topic, com.google.api.services.pubsub.model.PublishRequest content) {
super(Pubsub.this, "POST", REST_PATH, content, com.google.api.services.pubsub.model.PublishResponse.class);
this.topic = com.google.api.client.util.Preconditions.checkNotNull(topic, "Required parameter topic must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TOPIC_PATTERN.matcher(topic).matches(),
"Parameter topic must conform to the pattern " +
"^projects/[^/]+/topics/[^/]+$");
}
}
@Override
public Publish set$Xgafv(java.lang.String $Xgafv) {
return (Publish) super.set$Xgafv($Xgafv);
}
@Override
public Publish setAccessToken(java.lang.String accessToken) {
return (Publish) super.setAccessToken(accessToken);
}
@Override
public Publish setAlt(java.lang.String alt) {
return (Publish) super.setAlt(alt);
}
@Override
public Publish setCallback(java.lang.String callback) {
return (Publish) super.setCallback(callback);
}
@Override
public Publish setFields(java.lang.String fields) {
return (Publish) super.setFields(fields);
}
@Override
public Publish setKey(java.lang.String key) {
return (Publish) super.setKey(key);
}
@Override
public Publish setOauthToken(java.lang.String oauthToken) {
return (Publish) super.setOauthToken(oauthToken);
}
@Override
public Publish setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Publish) super.setPrettyPrint(prettyPrint);
}
@Override
public Publish setQuotaUser(java.lang.String quotaUser) {
return (Publish) super.setQuotaUser(quotaUser);
}
@Override
public Publish setUploadType(java.lang.String uploadType) {
return (Publish) super.setUploadType(uploadType);
}
@Override
public Publish setUploadProtocol(java.lang.String uploadProtocol) {
return (Publish) super.setUploadProtocol(uploadProtocol);
}
/** The messages in the request will be published on this topic. */
@com.google.api.client.util.Key
private java.lang.String topic;
/** The messages in the request will be published on this topic.
*/
public java.lang.String getTopic() {
return topic;
}
/** The messages in the request will be published on this topic. */
public Publish setTopic(java.lang.String topic) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TOPIC_PATTERN.matcher(topic).matches(),
"Parameter topic must conform to the pattern " +
"^projects/[^/]+/topics/[^/]+$");
}
this.topic = topic;
return this;
}
@Override
public Publish set(String parameterName, Object value) {
return (Publish) super.set(parameterName, value);
}
}
/**
* Sets the access control policy on the specified resource. Replaces any existing policy. Can
* return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
*
* Create a request for the method "topics.setIamPolicy".
*
* This request holds the parameters needed by the pubsub server. After setting any optional
* parameters, call the {@link SetIamPolicy#execute()} method to invoke the remote operation.
*
* @param resource REQUIRED: The resource for which the policy is being specified. See the operation documentation for
* the appropriate value for this field.
* @param content the {@link com.google.api.services.pubsub.model.SetIamPolicyRequest}
* @return the request
*/
public SetIamPolicy setIamPolicy(java.lang.String resource, com.google.api.services.pubsub.model.SetIamPolicyRequest content) throws java.io.IOException {
SetIamPolicy result = new SetIamPolicy(resource, content);
initialize(result);
return result;
}
public class SetIamPolicy extends PubsubRequest<com.google.api.services.pubsub.model.Policy> {
private static final String REST_PATH = "v1beta2/{+resource}:setIamPolicy";
private final java.util.regex.Pattern RESOURCE_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/topics/[^/]+$");
/**
* Sets the access control policy on the specified resource. Replaces any existing policy. Can
* return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
*
* Create a request for the method "topics.setIamPolicy".
*
* This request holds the parameters needed by the the pubsub server. After setting any optional
* parameters, call the {@link SetIamPolicy#execute()} method to invoke the remote operation. <p>
* {@link
* SetIamPolicy#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param resource REQUIRED: The resource for which the policy is being specified. See the operation documentation for
* the appropriate value for this field.
* @param content the {@link com.google.api.services.pubsub.model.SetIamPolicyRequest}
* @since 1.13
*/
protected SetIamPolicy(java.lang.String resource, com.google.api.services.pubsub.model.SetIamPolicyRequest content) {
super(Pubsub.this, "POST", REST_PATH, content, com.google.api.services.pubsub.model.Policy.class);
this.resource = com.google.api.client.util.Preconditions.checkNotNull(resource, "Required parameter resource must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_PATTERN.matcher(resource).matches(),
"Parameter resource must conform to the pattern " +
"^projects/[^/]+/topics/[^/]+$");
}
}
@Override
public SetIamPolicy set$Xgafv(java.lang.String $Xgafv) {
return (SetIamPolicy) super.set$Xgafv($Xgafv);
}
@Override
public SetIamPolicy setAccessToken(java.lang.String accessToken) {
return (SetIamPolicy) super.setAccessToken(accessToken);
}
@Override
public SetIamPolicy setAlt(java.lang.String alt) {
return (SetIamPolicy) super.setAlt(alt);
}
@Override
public SetIamPolicy setCallback(java.lang.String callback) {
return (SetIamPolicy) super.setCallback(callback);
}
@Override
public SetIamPolicy setFields(java.lang.String fields) {
return (SetIamPolicy) super.setFields(fields);
}
@Override
public SetIamPolicy setKey(java.lang.String key) {
return (SetIamPolicy) super.setKey(key);
}
@Override
public SetIamPolicy setOauthToken(java.lang.String oauthToken) {
return (SetIamPolicy) super.setOauthToken(oauthToken);
}
@Override
public SetIamPolicy setPrettyPrint(java.lang.Boolean prettyPrint) {
return (SetIamPolicy) super.setPrettyPrint(prettyPrint);
}
@Override
public SetIamPolicy setQuotaUser(java.lang.String quotaUser) {
return (SetIamPolicy) super.setQuotaUser(quotaUser);
}
@Override
public SetIamPolicy setUploadType(java.lang.String uploadType) {
return (SetIamPolicy) super.setUploadType(uploadType);
}
@Override
public SetIamPolicy setUploadProtocol(java.lang.String uploadProtocol) {
return (SetIamPolicy) super.setUploadProtocol(uploadProtocol);
}
/**
* REQUIRED: The resource for which the policy is being specified. See the operation
* documentation for the appropriate value for this field.
*/
@com.google.api.client.util.Key
private java.lang.String resource;
/** REQUIRED: The resource for which the policy is being specified. See the operation documentation for
the appropriate value for this field.
*/
public java.lang.String getResource() {
return resource;
}
/**
* REQUIRED: The resource for which the policy is being specified. See the operation
* documentation for the appropriate value for this field.
*/
public SetIamPolicy setResource(java.lang.String resource) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_PATTERN.matcher(resource).matches(),
"Parameter resource must conform to the pattern " +
"^projects/[^/]+/topics/[^/]+$");
}
this.resource = resource;
return this;
}
@Override
public SetIamPolicy set(String parameterName, Object value) {
return (SetIamPolicy) super.set(parameterName, value);
}
}
/**
* Returns permissions that a caller has on the specified resource. If the resource does not exist,
* this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is
* designed to be used for building permission-aware UIs and command-line tools, not for
* authorization checking. This operation may "fail open" without warning.
*
* Create a request for the method "topics.testIamPermissions".
*
* This request holds the parameters needed by the pubsub server. After setting any optional
* parameters, call the {@link TestIamPermissions#execute()} method to invoke the remote operation.
*
* @param resource REQUIRED: The resource for which the policy detail is being requested. See the operation
* documentation for the appropriate value for this field.
* @param content the {@link com.google.api.services.pubsub.model.TestIamPermissionsRequest}
* @return the request
*/
public TestIamPermissions testIamPermissions(java.lang.String resource, com.google.api.services.pubsub.model.TestIamPermissionsRequest content) throws java.io.IOException {
TestIamPermissions result = new TestIamPermissions(resource, content);
initialize(result);
return result;
}
public class TestIamPermissions extends PubsubRequest<com.google.api.services.pubsub.model.TestIamPermissionsResponse> {
private static final String REST_PATH = "v1beta2/{+resource}:testIamPermissions";
private final java.util.regex.Pattern RESOURCE_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/topics/[^/]+$");
/**
* Returns permissions that a caller has on the specified resource. If the resource does not
* exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This
* operation is designed to be used for building permission-aware UIs and command-line tools, not
* for authorization checking. This operation may "fail open" without warning.
*
* Create a request for the method "topics.testIamPermissions".
*
* This request holds the parameters needed by the the pubsub server. After setting any optional
* parameters, call the {@link TestIamPermissions#execute()} method to invoke the remote
* operation. <p> {@link TestIamPermissions#initialize(com.google.api.client.googleapis.services.A
* bstractGoogleClientRequest)} must be called to initialize this instance immediately after
* invoking the constructor. </p>
*
* @param resource REQUIRED: The resource for which the policy detail is being requested. See the operation
* documentation for the appropriate value for this field.
* @param content the {@link com.google.api.services.pubsub.model.TestIamPermissionsRequest}
* @since 1.13
*/
protected TestIamPermissions(java.lang.String resource, com.google.api.services.pubsub.model.TestIamPermissionsRequest content) {
super(Pubsub.this, "POST", REST_PATH, content, com.google.api.services.pubsub.model.TestIamPermissionsResponse.class);
this.resource = com.google.api.client.util.Preconditions.checkNotNull(resource, "Required parameter resource must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_PATTERN.matcher(resource).matches(),
"Parameter resource must conform to the pattern " +
"^projects/[^/]+/topics/[^/]+$");
}
}
@Override
public TestIamPermissions set$Xgafv(java.lang.String $Xgafv) {
return (TestIamPermissions) super.set$Xgafv($Xgafv);
}
@Override
public TestIamPermissions setAccessToken(java.lang.String accessToken) {
return (TestIamPermissions) super.setAccessToken(accessToken);
}
@Override
public TestIamPermissions setAlt(java.lang.String alt) {
return (TestIamPermissions) super.setAlt(alt);
}
@Override
public TestIamPermissions setCallback(java.lang.String callback) {
return (TestIamPermissions) super.setCallback(callback);
}
@Override
public TestIamPermissions setFields(java.lang.String fields) {
return (TestIamPermissions) super.setFields(fields);
}
@Override
public TestIamPermissions setKey(java.lang.String key) {
return (TestIamPermissions) super.setKey(key);
}
@Override
public TestIamPermissions setOauthToken(java.lang.String oauthToken) {
return (TestIamPermissions) super.setOauthToken(oauthToken);
}
@Override
public TestIamPermissions setPrettyPrint(java.lang.Boolean prettyPrint) {
return (TestIamPermissions) super.setPrettyPrint(prettyPrint);
}
@Override
public TestIamPermissions setQuotaUser(java.lang.String quotaUser) {
return (TestIamPermissions) super.setQuotaUser(quotaUser);
}
@Override
public TestIamPermissions setUploadType(java.lang.String uploadType) {
return (TestIamPermissions) super.setUploadType(uploadType);
}
@Override
public TestIamPermissions setUploadProtocol(java.lang.String uploadProtocol) {
return (TestIamPermissions) super.setUploadProtocol(uploadProtocol);
}
/**
* REQUIRED: The resource for which the policy detail is being requested. See the operation
* documentation for the appropriate value for this field.
*/
@com.google.api.client.util.Key
private java.lang.String resource;
/** REQUIRED: The resource for which the policy detail is being requested. See the operation
documentation for the appropriate value for this field.
*/
public java.lang.String getResource() {
return resource;
}
/**
* REQUIRED: The resource for which the policy detail is being requested. See the operation
* documentation for the appropriate value for this field.
*/
public TestIamPermissions setResource(java.lang.String resource) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_PATTERN.matcher(resource).matches(),
"Parameter resource must conform to the pattern " +
"^projects/[^/]+/topics/[^/]+$");
}
this.resource = resource;
return this;
}
@Override
public TestIamPermissions set(String parameterName, Object value) {
return (TestIamPermissions) super.set(parameterName, value);
}
}
/**
* An accessor for creating requests from the Subscriptions collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code Pubsub pubsub = new Pubsub(...);}
* {@code Pubsub.Subscriptions.List request = pubsub.subscriptions().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Subscriptions subscriptions() {
return new Subscriptions();
}
/**
* The "subscriptions" collection of methods.
*/
public class Subscriptions {
/**
* Lists the name of the subscriptions for this topic.
*
* Create a request for the method "subscriptions.list".
*
* This request holds the parameters needed by the pubsub server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param topic The name of the topic that subscriptions are attached to.
* @return the request
*/
public List list(java.lang.String topic) throws java.io.IOException {
List result = new List(topic);
initialize(result);
return result;
}
public class List extends PubsubRequest<com.google.api.services.pubsub.model.ListTopicSubscriptionsResponse> {
private static final String REST_PATH = "v1beta2/{+topic}/subscriptions";
private final java.util.regex.Pattern TOPIC_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/topics/[^/]+$");
/**
* Lists the name of the subscriptions for this topic.
*
* Create a request for the method "subscriptions.list".
*
* This request holds the parameters needed by the the pubsub server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation. <p> {@link
* List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must be
* called to initialize this instance immediately after invoking the constructor. </p>
*
* @param topic The name of the topic that subscriptions are attached to.
* @since 1.13
*/
protected List(java.lang.String topic) {
super(Pubsub.this, "GET", REST_PATH, null, com.google.api.services.pubsub.model.ListTopicSubscriptionsResponse.class);
this.topic = com.google.api.client.util.Preconditions.checkNotNull(topic, "Required parameter topic must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TOPIC_PATTERN.matcher(topic).matches(),
"Parameter topic must conform to the pattern " +
"^projects/[^/]+/topics/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** The name of the topic that subscriptions are attached to. */
@com.google.api.client.util.Key
private java.lang.String topic;
/** The name of the topic that subscriptions are attached to.
*/
public java.lang.String getTopic() {
return topic;
}
/** The name of the topic that subscriptions are attached to. */
public List setTopic(java.lang.String topic) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(TOPIC_PATTERN.matcher(topic).matches(),
"Parameter topic must conform to the pattern " +
"^projects/[^/]+/topics/[^/]+$");
}
this.topic = topic;
return this;
}
/** Maximum number of subscription names to return. */
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** Maximum number of subscription names to return.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/** Maximum number of subscription names to return. */
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* The value returned by the last `ListTopicSubscriptionsResponse`; indicates that this is
* a continuation of a prior `ListTopicSubscriptions` call, and that the system should
* return the next page of data.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** The value returned by the last `ListTopicSubscriptionsResponse`; indicates that this is a
continuation of a prior `ListTopicSubscriptions` call, and that the system should return the next
page of data.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* The value returned by the last `ListTopicSubscriptionsResponse`; indicates that this is
* a continuation of a prior `ListTopicSubscriptions` call, and that the system should
* return the next page of data.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
}
}
}
/**
* Builder for {@link Pubsub}.
*
* <p>
* Implementation is not thread-safe.
* </p>
*
* @since 1.3.0
*/
public static final class Builder extends com.google.api.client.googleapis.services.json.AbstractGoogleJsonClient.Builder {
private static String chooseEndpoint(com.google.api.client.http.HttpTransport transport) {
// If the GOOGLE_API_USE_MTLS_ENDPOINT environment variable value is "always", use mTLS endpoint.
// If the env variable is "auto", use mTLS endpoint if and only if the transport is mTLS.
// Use the regular endpoint for all other cases.
String useMtlsEndpoint = System.getenv("GOOGLE_API_USE_MTLS_ENDPOINT");
useMtlsEndpoint = useMtlsEndpoint == null ? "auto" : useMtlsEndpoint;
if ("always".equals(useMtlsEndpoint) || ("auto".equals(useMtlsEndpoint) && transport != null && transport.isMtls())) {
return DEFAULT_MTLS_ROOT_URL;
}
return DEFAULT_ROOT_URL;
}
/**
* Returns an instance of a new builder.
*
* @param transport HTTP transport, which should normally be:
* <ul>
* <li>Google App Engine:
* {@code com.google.api.client.extensions.appengine.http.UrlFetchTransport}</li>
* <li>Android: {@code newCompatibleTransport} from
* {@code com.google.api.client.extensions.android.http.AndroidHttp}</li>
* <li>Java: {@link com.google.api.client.googleapis.javanet.GoogleNetHttpTransport#newTrustedTransport()}
* </li>
* </ul>
* @param jsonFactory JSON factory, which may be:
* <ul>
* <li>Jackson: {@code com.google.api.client.json.jackson2.JacksonFactory}</li>
* <li>Google GSON: {@code com.google.api.client.json.gson.GsonFactory}</li>
* <li>Android Honeycomb or higher:
* {@code com.google.api.client.extensions.android.json.AndroidJsonFactory}</li>
* </ul>
* @param httpRequestInitializer HTTP request initializer or {@code null} for none
* @since 1.7
*/
public Builder(com.google.api.client.http.HttpTransport transport, com.google.api.client.json.JsonFactory jsonFactory,
com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
super(
transport,
jsonFactory,
Builder.chooseEndpoint(transport),
DEFAULT_SERVICE_PATH,
httpRequestInitializer,
false);
setBatchPath(DEFAULT_BATCH_PATH);
}
/** Builds a new instance of {@link Pubsub}. */
@Override
public Pubsub build() {
return new Pubsub(this);
}
@Override
public Builder setRootUrl(String rootUrl) {
return (Builder) super.setRootUrl(rootUrl);
}
@Override
public Builder setServicePath(String servicePath) {
return (Builder) super.setServicePath(servicePath);
}
@Override
public Builder setBatchPath(String batchPath) {
return (Builder) super.setBatchPath(batchPath);
}
@Override
public Builder setHttpRequestInitializer(com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
return (Builder) super.setHttpRequestInitializer(httpRequestInitializer);
}
@Override
public Builder setApplicationName(String applicationName) {
return (Builder) super.setApplicationName(applicationName);
}
@Override
public Builder setSuppressPatternChecks(boolean suppressPatternChecks) {
return (Builder) super.setSuppressPatternChecks(suppressPatternChecks);
}
@Override
public Builder setSuppressRequiredParameterChecks(boolean suppressRequiredParameterChecks) {
return (Builder) super.setSuppressRequiredParameterChecks(suppressRequiredParameterChecks);
}
@Override
public Builder setSuppressAllChecks(boolean suppressAllChecks) {
return (Builder) super.setSuppressAllChecks(suppressAllChecks);
}
/**
* Set the {@link PubsubRequestInitializer}.
*
* @since 1.12
*/
public Builder setPubsubRequestInitializer(
PubsubRequestInitializer pubsubRequestInitializer) {
return (Builder) super.setGoogleClientRequestInitializer(pubsubRequestInitializer);
}
@Override
public Builder setGoogleClientRequestInitializer(
com.google.api.client.googleapis.services.GoogleClientRequestInitializer googleClientRequestInitializer) {
return (Builder) super.setGoogleClientRequestInitializer(googleClientRequestInitializer);
}
}
}
|
[
"\"GOOGLE_API_USE_MTLS_ENDPOINT\""
] |
[] |
[
"GOOGLE_API_USE_MTLS_ENDPOINT"
] |
[]
|
["GOOGLE_API_USE_MTLS_ENDPOINT"]
|
java
| 1 | 0 | |
Data/Juliet-Java/Juliet-Java-v103/000/134/046/CWE23_Relative_Path_Traversal__Environment_21.java
|
/* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE23_Relative_Path_Traversal__Environment_21.java
Label Definition File: CWE23_Relative_Path_Traversal.label.xml
Template File: sources-sink-21.tmpl.java
*/
/*
* @description
* CWE: 23 Relative Path Traversal
* BadSource: Environment Read data from an environment variable
* GoodSource: A hardcoded string
* Sinks: readFile
* BadSink : no validation
* Flow Variant: 21 Control flow: Flow controlled by value of a private variable. All functions contained in one file.
*
* */
import java.io.*;
import java.util.logging.Level;
public class CWE23_Relative_Path_Traversal__Environment_21 extends AbstractTestCase
{
/* The variable below is used to drive control flow in the source function */
private boolean badPrivate = false;
public void bad() throws Throwable
{
String data;
badPrivate = true;
data = bad_source();
String root;
if(System.getProperty("os.name").toLowerCase().indexOf("win") >= 0)
{
/* running on Windows */
root = "C:\\uploads\\";
}
else
{
/* running on non-Windows */
root = "/home/user/uploads/";
}
if (data != null)
{
/* POTENTIAL FLAW: no validation of concatenated value */
File file = new File(root + data);
FileInputStream streamFileInputSink = null;
InputStreamReader readerInputStreamSink = null;
BufferedReader readerBufferdSink = null;
if (file.exists() && file.isFile())
{
try
{
streamFileInputSink = new FileInputStream(file);
readerInputStreamSink = new InputStreamReader(streamFileInputSink, "UTF-8");
readerBufferdSink = new BufferedReader(readerInputStreamSink);
IO.writeLine(readerBufferdSink.readLine());
}
catch (IOException exceptIO)
{
IO.logger.log(Level.WARNING, "Error with stream reading", exceptIO);
}
finally
{
/* Close stream reading objects */
try
{
if (readerBufferdSink != null)
{
readerBufferdSink.close();
}
}
catch (IOException exceptIO)
{
IO.logger.log(Level.WARNING, "Error closing BufferedReader", exceptIO);
}
try
{
if (readerInputStreamSink != null)
{
readerInputStreamSink.close();
}
}
catch (IOException exceptIO)
{
IO.logger.log(Level.WARNING, "Error closing InputStreamReader", exceptIO);
}
try
{
if (streamFileInputSink != null)
{
streamFileInputSink.close();
}
}
catch (IOException exceptIO)
{
IO.logger.log(Level.WARNING, "Error closing FileInputStream", exceptIO);
}
}
}
}
}
private String bad_source() throws Throwable
{
String data;
if (badPrivate)
{
/* get environment variable ADD */
/* POTENTIAL FLAW: Read data from an environment variable */
data = System.getenv("ADD");
}
else
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run
* but ensure data is inititialized before the Sink to avoid compiler errors */
data = null;
}
return data;
}
/* The variables below are used to drive control flow in the source functions. */
private boolean goodG2B1_private = false;
private boolean goodG2B2_private = false;
public void good() throws Throwable
{
goodG2B1();
goodG2B2();
}
/* goodG2B1() - use goodsource and badsink by setting the variable to false instead of true */
private void goodG2B1() throws Throwable
{
String data;
goodG2B1_private = false;
data = goodG2B1_source();
String root;
if(System.getProperty("os.name").toLowerCase().indexOf("win") >= 0)
{
/* running on Windows */
root = "C:\\uploads\\";
}
else
{
/* running on non-Windows */
root = "/home/user/uploads/";
}
if (data != null)
{
/* POTENTIAL FLAW: no validation of concatenated value */
File file = new File(root + data);
FileInputStream streamFileInputSink = null;
InputStreamReader readerInputStreamSink = null;
BufferedReader readerBufferdSink = null;
if (file.exists() && file.isFile())
{
try
{
streamFileInputSink = new FileInputStream(file);
readerInputStreamSink = new InputStreamReader(streamFileInputSink, "UTF-8");
readerBufferdSink = new BufferedReader(readerInputStreamSink);
IO.writeLine(readerBufferdSink.readLine());
}
catch (IOException exceptIO)
{
IO.logger.log(Level.WARNING, "Error with stream reading", exceptIO);
}
finally
{
/* Close stream reading objects */
try
{
if (readerBufferdSink != null)
{
readerBufferdSink.close();
}
}
catch (IOException exceptIO)
{
IO.logger.log(Level.WARNING, "Error closing BufferedReader", exceptIO);
}
try
{
if (readerInputStreamSink != null)
{
readerInputStreamSink.close();
}
}
catch (IOException exceptIO)
{
IO.logger.log(Level.WARNING, "Error closing InputStreamReader", exceptIO);
}
try
{
if (streamFileInputSink != null)
{
streamFileInputSink.close();
}
}
catch (IOException exceptIO)
{
IO.logger.log(Level.WARNING, "Error closing FileInputStream", exceptIO);
}
}
}
}
}
private String goodG2B1_source() throws Throwable
{
String data = null;
if (goodG2B1_private)
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run
* but ensure data is inititialized before the Sink to avoid compiler errors */
data = null;
}
else
{
/* FIX: Use a hardcoded string */
data = "foo";
}
return data;
}
/* goodG2B2() - use goodsource and badsink by reversing the blocks in the if in the sink function */
private void goodG2B2() throws Throwable
{
String data;
goodG2B2_private = true;
data = goodG2B2_source();
String root;
if(System.getProperty("os.name").toLowerCase().indexOf("win") >= 0)
{
/* running on Windows */
root = "C:\\uploads\\";
}
else
{
/* running on non-Windows */
root = "/home/user/uploads/";
}
if (data != null)
{
/* POTENTIAL FLAW: no validation of concatenated value */
File file = new File(root + data);
FileInputStream streamFileInputSink = null;
InputStreamReader readerInputStreamSink = null;
BufferedReader readerBufferdSink = null;
if (file.exists() && file.isFile())
{
try
{
streamFileInputSink = new FileInputStream(file);
readerInputStreamSink = new InputStreamReader(streamFileInputSink, "UTF-8");
readerBufferdSink = new BufferedReader(readerInputStreamSink);
IO.writeLine(readerBufferdSink.readLine());
}
catch (IOException exceptIO)
{
IO.logger.log(Level.WARNING, "Error with stream reading", exceptIO);
}
finally
{
/* Close stream reading objects */
try
{
if (readerBufferdSink != null)
{
readerBufferdSink.close();
}
}
catch (IOException exceptIO)
{
IO.logger.log(Level.WARNING, "Error closing BufferedReader", exceptIO);
}
try
{
if (readerInputStreamSink != null)
{
readerInputStreamSink.close();
}
}
catch (IOException exceptIO)
{
IO.logger.log(Level.WARNING, "Error closing InputStreamReader", exceptIO);
}
try
{
if (streamFileInputSink != null)
{
streamFileInputSink.close();
}
}
catch (IOException exceptIO)
{
IO.logger.log(Level.WARNING, "Error closing FileInputStream", exceptIO);
}
}
}
}
}
private String goodG2B2_source() throws Throwable
{
String data = null;
if (goodG2B2_private)
{
/* FIX: Use a hardcoded string */
data = "foo";
}
else
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run
* but ensure data is inititialized before the Sink to avoid compiler errors */
data = null;
}
return data;
}
/* Below is the main(). It is only used when building this testcase on
* its own for testing or for building a binary to use in testing binary
* analysis tools. It is not used when compiling all the testcases as one
* application, which is how source code analysis tools are tested.
*/
public static void main(String[] args) throws ClassNotFoundException,
InstantiationException, IllegalAccessException
{
mainFromParent(args);
}
}
|
[
"\"ADD\""
] |
[] |
[
"ADD"
] |
[]
|
["ADD"]
|
java
| 1 | 0 | |
dowwner/app/management/commands/create_admin_user.py
|
# TODO: Move to dowwner.__main__
from argparse import ArgumentParser
import os
from typing import Dict, List
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = "Create admin user"
def add_arguments(self, parser: ArgumentParser) -> None:
return
def handle(self, *args: List[str], **kargs: Dict[str, str]) -> None:
username = "10sr"
password = os.environ.get("ADMIN_PASSWORD", "")
assert password, "Aborting: ADMIN_PASSWORD is empty"
try:
user = User.objects.get(username=username)
except User.DoesNotExist as e:
user = User.objects.create_user(username)
user.set_password(password)
user.is_superuser = True
user.is_staff = True
user.save()
self.stdout.write(f"User {user} updated.")
return
|
[] |
[] |
[
"ADMIN_PASSWORD"
] |
[]
|
["ADMIN_PASSWORD"]
|
python
| 1 | 0 | |
pkg/dashboards/example_dashboard_test.go
|
package dashboards
import (
"fmt"
"log"
"os"
"github.com/harrykimpel/newrelic-client-go/pkg/config"
)
func Example_dashboard() {
// Initialize the client configuration. A Personal API key is required to
// communicate with the backend API.
cfg := config.New()
cfg.PersonalAPIKey = os.Getenv("NEW_RELIC_API_KEY")
// Initialize the client.
client := New(cfg)
// Search the dashboards for the current account by title.
listParams := &ListDashboardsParams{
Title: "Example dashboard",
}
dashboards, err := client.ListDashboards(listParams)
if err != nil {
log.Fatal("error listing dashboards:", err)
}
// Get dashboard by ID. This example assumes that at least one dashboard
// has been returned by the list endpoint, but in practice it is possible
// that an empty slice is returned.
dashboard, err := client.GetDashboard(dashboards[0].ID)
if err != nil {
log.Fatal("error getting dashboard:", err)
}
// Create a new dashboard.
applicationName := "Example application"
dashboard = &Dashboard{
Title: "Example dashboard",
Icon: DashboardIconTypes.BarChart,
}
requestsPerMinute := DashboardWidget{
Visualization: VisualizationTypes.Billboard,
Data: []DashboardWidgetData{
{
NRQL: fmt.Sprintf("FROM Transaction SELECT rate(count(*), 1 minute) WHERE appName = '%s'", applicationName),
},
},
Presentation: DashboardWidgetPresentation{
Title: "Requests per minute",
},
Layout: DashboardWidgetLayout{
Row: 1,
Column: 1,
},
}
errorRate := DashboardWidget{
Visualization: VisualizationTypes.Gauge,
Data: []DashboardWidgetData{
{
NRQL: fmt.Sprintf("FROM Transaction SELECT percentage(count(*), WHERE error IS true) WHERE appName = '%s'", applicationName),
},
},
Presentation: DashboardWidgetPresentation{
Title: "Error rate",
Threshold: &DashboardWidgetThreshold{
Red: 2.5,
},
},
Layout: DashboardWidgetLayout{
Row: 1,
Column: 2,
},
}
notes := DashboardWidget{
Visualization: VisualizationTypes.Markdown,
Data: []DashboardWidgetData{
{
Source: "### Helpful Links\n\n* [New Relic One](https://one.newrelic.com)\n* [Developer Portal](https://developer.newrelic.com)",
},
},
Presentation: DashboardWidgetPresentation{
Title: "Dashboard note",
},
Layout: DashboardWidgetLayout{
Row: 1,
Column: 3,
},
}
dashboard.Widgets = []DashboardWidget{
requestsPerMinute,
errorRate,
notes,
}
created, err := client.CreateDashboard(*dashboard)
if err != nil {
log.Fatal("error creating dashboard:", err)
}
// Add a widget to an existing dashboard.
topApdex := DashboardWidget{
Visualization: VisualizationTypes.FacetTable,
Data: []DashboardWidgetData{
{
NRQL: fmt.Sprintf("FROM Transaction SELECT rate(count(*), 1 minute) FACET name WHERE appName = '%s'", applicationName),
},
},
Presentation: DashboardWidgetPresentation{
Title: "Requests per minute, by transaction",
},
Layout: DashboardWidgetLayout{
Row: 1,
Column: 2,
Width: 3,
},
}
created.Widgets = append(created.Widgets, topApdex)
updated, err := client.UpdateDashboard(*created)
if err != nil {
log.Fatal("error updating dashboard:", err)
}
// Delete a dashaboard.
_, err = client.DeleteDashboard(updated.ID)
if err != nil {
log.Fatal("error deleting dashboard:", err)
}
}
|
[
"\"NEW_RELIC_API_KEY\""
] |
[] |
[
"NEW_RELIC_API_KEY"
] |
[]
|
["NEW_RELIC_API_KEY"]
|
go
| 1 | 0 | |
daq_mainAux.py
|
#!/opt/conda_envs/lsdc-server-2021-1.3/bin/ipython -i
"""
The server run when lsdcRemote is used
"""
import os
from daq_macros import *
import daq_lib
from daq_lib import *
from robot_lib import *
from beamline_lib import *
import atexit
from daq_main_common import pybass_init
import logging
from logging import handlers
logger = logging.getLogger()
logging.getLogger().setLevel(logging.INFO)
logging.getLogger('ophyd').setLevel(logging.WARN)
logging.getLogger('caproto').setLevel(logging.WARN)
handler1 = handlers.RotatingFileHandler('lsdcServerLog.txt', maxBytes=5000000, backupCount=100)
handler2 = handlers.RotatingFileHandler('/var/log/dama/%slsdcServerLog.txt' % os.environ['BEAMLINE_ID'], maxBytes=5000000, backupCount=100)
myformat = logging.Formatter('%(asctime)s %(name)-8s %(levelname)-8s %(message)s')
handler1.setFormatter(myformat)
handler2.setFormatter(myformat)
logger.addHandler(handler1)
logger.addHandler(handler2)
sitefilename = ""
global command_list,immediate_command_list,z
command_list = []
immediate_command_list = []
z = 25
pybass_init()
|
[] |
[] |
[
"BEAMLINE_ID"
] |
[]
|
["BEAMLINE_ID"]
|
python
| 1 | 0 | |
venv/lib/python3.9/site-packages/py2app/bootstrap/boot_aliasplugin.py
|
import re
import sys
cookie_re = re.compile(br"coding[:=]\s*([-\w.]+)")
if sys.version_info[0] == 2:
default_encoding = "ascii"
else:
default_encoding = "utf-8"
def guess_encoding(fp):
for _i in range(2):
ln = fp.readline()
m = cookie_re.search(ln)
if m is not None:
return m.group(1).decode("ascii")
return default_encoding
def _run():
global __file__
import os
import site # noqa: F401
sys.frozen = "macosx_plugin"
base = os.environ["RESOURCEPATH"]
if "ARGVZERO" in os.environ:
argv0 = os.path.basename(os.environ["ARGVZERO"])
else:
argv0 = None
script = SCRIPT_MAP.get(argv0, DEFAULT_SCRIPT) # noqa: F821
sys.argv[0] = __file__ = path = os.path.join(base, script)
if sys.version_info[0] == 2:
with open(path, "rU") as fp:
source = fp.read() + "\n"
else:
with open(path, "rb") as fp:
encoding = guess_encoding(fp)
with open(path, "r", encoding=encoding) as fp:
source = fp.read() + "\n"
BOM = b"\xef\xbb\xbf".decode("utf-8")
if source.startswith(BOM):
source = source[1:]
exec(compile(source, script, "exec"), globals(), globals())
|
[] |
[] |
[
"RESOURCEPATH",
"ARGVZERO"
] |
[]
|
["RESOURCEPATH", "ARGVZERO"]
|
python
| 2 | 0 | |
python/pyqtn/__init__.py
|
# coding: utf-8
# Author: Xin Luo
import os
Qtn_ROOT = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
"qtnProperty"
)
os.environ['PATH'] += os.pathsep + Qtn_ROOT
from PyQt5 import QtCore,QtWidgets,QtGui # just make sure all Qt related dll were loaded
from pyqtn import core
from pyqtn import widget
|
[] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
python
| 1 | 0 | |
integration/v6/experimental/v3_zdt_push_command_test.go
|
package experimental
import (
"fmt"
"io/ioutil"
"net/http"
"os"
"regexp"
"code.cloudfoundry.org/cli/integration/helpers"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gbytes"
. "github.com/onsi/gomega/gexec"
. "github.com/onsi/gomega/ghttp"
)
var _ = Describe("v3-zdt-push command", func() {
var (
orgName string
spaceName string
appName string
userName string
PublicDockerImage = "cloudfoundry/diego-docker-app-custom"
)
BeforeEach(func() {
orgName = helpers.NewOrgName()
spaceName = helpers.NewSpaceName()
appName = helpers.PrefixedRandomName("app")
userName, _ = helpers.GetCredentials()
helpers.TurnOffExperimental()
})
AfterEach(func() {
helpers.TurnOnExperimental()
})
Describe("help", func() {
Context("when --help flag is set", func() {
It("Displays command usage to output", func() {
session := helpers.CF("v3-zdt-push", "--help")
Eventually(session).Should(Say("NAME:"))
Eventually(session).Should(Say("v3-zdt-push - Update an app with zero down time"))
Eventually(session).Should(Say("USAGE:"))
Eventually(session).Should(Say(`cf v3-zdt-push APP_NAME \[-b BUILDPACK\]\.\.\. \[-p APP_PATH\] \[--no-route\]`))
Eventually(session).Should(Say(`cf v3-zdt-push APP_NAME --docker-image \[REGISTRY_HOST:PORT/\]IMAGE\[:TAG\] \[--docker-username USERNAME\] \[--no-route\]`))
Eventually(session).Should(Say("OPTIONS:"))
Eventually(session).Should(Say(`-b\s+Custom buildpack by name \(e\.g\. my-buildpack\) or Git URL \(e\.g\. 'https://github.com/cloudfoundry/java-buildpack.git'\) or Git URL with a branch or tag \(e\.g\. 'https://github.com/cloudfoundry/java-buildpack\.git#v3.3.0' for 'v3.3.0' tag\)\. To use built-in buildpacks only, specify 'default' or 'null'`))
Eventually(session).Should(Say(`-s\s+Stack to use \(a stack is a pre-built file system, including an operating system, that can run apps\)`))
Eventually(session).Should(Say(`--docker-image, -o\s+Docker image to use \(e\.g\. user/docker-image-name\)`))
Eventually(session).Should(Say(`--docker-username\s+Repository username; used with password from environment variable CF_DOCKER_PASSWORD`))
Eventually(session).Should(Say(`--no-route\s+Do not map a route to this app`))
Eventually(session).Should(Say(`-p\s+Path to app directory or to a zip file of the contents of the app directory`))
Eventually(session).Should(Say("ENVIRONMENT:"))
Eventually(session).Should(Say(`CF_DOCKER_PASSWORD=\s+Password used for private docker repository`))
Eventually(session).Should(Say(`CF_STAGING_TIMEOUT=15\s+Max wait time for buildpack staging, in minutes`))
Eventually(session).Should(Say(`CF_STARTUP_TIMEOUT=5\s+Max wait time for app instance startup, in minutes`))
Eventually(session).Should(Exit(0))
})
})
})
Context("when the app name is not provided", func() {
It("tells the user that the app name is required, prints help text, and exits 1", func() {
session := helpers.CF("v3-zdt-push")
Eventually(session.Err).Should(Say("Incorrect Usage: the required argument `APP_NAME` was not provided"))
Eventually(session).Should(Say("NAME:"))
Eventually(session).Should(Exit(1))
})
})
It("displays the experimental warning", func() {
session := helpers.CF("v3-zdt-push", appName)
Eventually(session.Err).Should(Say("This command is in EXPERIMENTAL stage and may change without notice"))
Eventually(session).Should(Exit())
})
Context("when the -b flag is not given an arg", func() {
It("tells the user that the flag requires an arg, prints help text, and exits 1", func() {
session := helpers.CF("v3-zdt-push", appName, "-b")
Eventually(session.Err).Should(Say("Incorrect Usage: expected argument for flag `-b'"))
Eventually(session).Should(Say("NAME:"))
Eventually(session).Should(Exit(1))
})
})
Context("when the -p flag is not given an arg", func() {
It("tells the user that the flag requires an arg, prints help text, and exits 1", func() {
session := helpers.CF("v3-zdt-push", appName, "-p")
Eventually(session.Err).Should(Say("Incorrect Usage: expected argument for flag `-p'"))
Eventually(session).Should(Say("NAME:"))
Eventually(session).Should(Exit(1))
})
})
Context("when the -p flag path does not exist", func() {
It("tells the user that the flag requires an arg, prints help text, and exits 1", func() {
session := helpers.CF("v3-zdt-push", appName, "-p", "path/that/does/not/exist")
Eventually(session.Err).Should(Say("Incorrect Usage: The specified path 'path/that/does/not/exist' does not exist."))
Eventually(session).Should(Say("NAME:"))
Eventually(session).Should(Exit(1))
})
})
Context("when the environment is not setup correctly", func() {
Context("when the v3 api version is lower than the minimum version", func() {
var server *Server
BeforeEach(func() {
server = helpers.StartAndTargetServerWithAPIVersions(helpers.DefaultV2Version, "3.0.0")
})
AfterEach(func() {
server.Close()
})
It("fails with error message that the minimum version is not met", func() {
session := helpers.CF("v3-zdt-push", appName)
Eventually(session).Should(Say("FAILED"))
Eventually(session.Err).Should(Say(`This command requires CF API version 3\.57\.0 or higher\.`))
Eventually(session).Should(Exit(1))
})
})
It("fails with the appropriate errors", func() {
helpers.CheckEnvironmentTargetedCorrectly(true, true, ReadOnlyOrg, "v3-zdt-push", appName)
})
})
Context("when the environment is set up correctly", func() {
var domainName string
BeforeEach(func() {
helpers.SetupCF(orgName, spaceName)
domainName = helpers.DefaultSharedDomain()
})
AfterEach(func() {
helpers.QuickDeleteOrg(orgName)
})
Context("when the app exists", func() {
var session *Session
BeforeEach(func() {
helpers.WithHelloWorldApp(func(appDir string) {
Eventually(helpers.CustomCF(helpers.CFEnv{WorkingDirectory: appDir}, "v3-zdt-push", appName)).Should(Exit(0))
})
helpers.WithHelloWorldApp(func(appDir string) {
session = helpers.CustomCF(helpers.CFEnv{WorkingDirectory: appDir}, "v3-zdt-push", appName, "-b", "https://github.com/cloudfoundry/staticfile-buildpack")
Eventually(session).Should(Exit(0))
})
})
It("pushes the app", func() {
Eventually(session).Should(Say(`Updating app %s in org %s / space %s as %s\.\.\.`, appName, orgName, spaceName, userName))
Eventually(session).Should(Say("OK"))
Eventually(session).Should(Say(""))
Eventually(session).Should(Say(`Uploading and creating bits package for app %s in org %s / space %s as %s\.\.\.`, appName, orgName, spaceName, userName))
Eventually(session).Should(Say("OK"))
Eventually(session).Should(Say(""))
Eventually(session).Should(Say(`Staging package for app %s in org %s / space %s as %s\.\.\.`, appName, orgName, spaceName, userName))
Eventually(session).Should(Say("OK"))
Eventually(session).Should(Say(""))
Eventually(session).Should(Say(`Mapping routes\.\.\.`))
Eventually(session).Should(Say("OK"))
Eventually(session).Should(Say(""))
Eventually(session).Should(Say(`Starting deployment for app %s in org %s / space %s as %s\.\.\.`, appName, orgName, spaceName, userName))
Eventually(session).Should(Say("OK"))
Eventually(session).Should(Say(""))
Eventually(session).Should(Say(`Waiting for app to start\.\.\.`))
Eventually(session).Should(Say(`Showing health and status for app %s in org %s / space %s as %s\.\.\.`, appName, orgName, spaceName, userName))
Eventually(session).Should(Say(""))
Eventually(session).Should(Say(`name:\s+%s`, appName))
Eventually(session).Should(Say(`requested state:\s+started`))
Eventually(session).Should(Say(`routes:\s+%s\.%s`, appName, domainName))
Eventually(session).Should(Say(`stack:\s+cflinuxfs`))
// TODO: Uncomment when capi sorts out droplet buildpack name/detectoutput
// Eventually(session).Should(Say(`buildpacks:\s+https://github.com/cloudfoundry/staticfile-buildpack`))
Eventually(session).Should(Say(""))
Eventually(session).Should(Say(`type:\s+web`))
Eventually(session).Should(Say(`instances:\s+1/1`))
Eventually(session).Should(Say(`memory usage:\s+\d+(M|G)`))
Eventually(session).Should(Say(`state\s+since\s+cpu\s+memory\s+disk`))
Eventually(session).Should(Say(`#0\s+running\s+\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} [AP]M`))
})
})
Context("when the app does not already exist", func() {
var session *Session
BeforeEach(func() {
helpers.WithHelloWorldApp(func(appDir string) {
session = helpers.CustomCF(helpers.CFEnv{WorkingDirectory: appDir}, "v3-zdt-push", appName)
Eventually(session).Should(Exit(0))
})
})
It("pushes the app", func() {
Eventually(session).Should(Say(`Creating app %s in org %s / space %s as %s\.\.\.`, appName, orgName, spaceName, userName))
Eventually(session).Should(Say("OK"))
Eventually(session).Should(Say(""))
Eventually(session).Should(Say(`Uploading and creating bits package for app %s in org %s / space %s as %s\.\.\.`, appName, orgName, spaceName, userName))
Eventually(session).Should(Say("OK"))
Eventually(session).Should(Say(""))
Consistently(session).ShouldNot(Say("Stopping app %s", appName))
Eventually(session).Should(Say(`Staging package for app %s in org %s / space %s as %s\.\.\.`, appName, orgName, spaceName, userName))
Eventually(session).Should(Say("OK"))
Eventually(session).Should(Say(""))
Eventually(session).Should(Say(`Mapping routes\.\.\.`))
Eventually(session).Should(Say("OK"))
Eventually(session).Should(Say(""))
Eventually(session).Should(Say(`Setting app %s to droplet .+ in org %s / space %s as %s\.\.\.`, appName, orgName, spaceName, userName))
Eventually(session).Should(Say("OK"))
Eventually(session).Should(Say(""))
Eventually(session).Should(Say(`Starting app %s in org %s / space %s as %s\.\.\.`, appName, orgName, spaceName, userName))
Eventually(session).Should(Say("OK"))
Eventually(session).Should(Say(""))
Eventually(session).Should(Say(`Waiting for app to start\.\.\.`))
Eventually(session).Should(Say(`Showing health and status for app %s in org %s / space %s as %s\.\.\.`, appName, orgName, spaceName, userName))
Eventually(session).Should(Say(""))
Eventually(session).Should(Say(`name:\s+%s`, appName))
Eventually(session).Should(Say(`requested state:\s+started`))
Eventually(session).Should(Say(`routes:\s+%s\.%s`, appName, domainName))
Eventually(session).Should(Say(`stack:\s+cflinuxfs`))
Eventually(session).Should(Say(`buildpacks:\s+staticfile`))
Eventually(session).Should(Say(""))
Eventually(session).Should(Say(`type:\s+web`))
Eventually(session).Should(Say(`instances:\s+1/1`))
Eventually(session).Should(Say(`memory usage:\s+\d+(M|G)`))
Eventually(session).Should(Say(`state\s+since\s+cpu\s+memory\s+disk`))
Eventually(session).Should(Say(`#0\s+running\s+\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} [AP]M`))
})
})
Context("when the app crashes", func() {
var session *Session
BeforeEach(func() {
helpers.WithCrashingApp(func(appDir string) {
session = helpers.CustomCF(helpers.CFEnv{WorkingDirectory: appDir}, "v3-zdt-push", appName)
Eventually(session).Should(Exit(0))
})
})
It("pushes the app", func() {
Eventually(session).Should(Say(`Creating app %s in org %s / space %s as %s\.\.\.`, appName, orgName, spaceName, userName))
Eventually(session).Should(Say("OK"))
Eventually(session).Should(Say(""))
Eventually(session).Should(Say(`Uploading and creating bits package for app %s in org %s / space %s as %s\.\.\.`, appName, orgName, spaceName, userName))
Eventually(session).Should(Say("OK"))
Eventually(session).Should(Say(""))
Consistently(session).ShouldNot(Say("Stopping app %s", appName))
Eventually(session).Should(Say(`Staging package for app %s in org %s / space %s as %s\.\.\.`, appName, orgName, spaceName, userName))
Eventually(session).Should(Say("OK"))
Eventually(session).Should(Say(""))
Eventually(session).Should(Say(`Mapping routes\.\.\.`))
Eventually(session).Should(Say("OK"))
Eventually(session).Should(Say(""))
Eventually(session).Should(Say(`Setting app %s to droplet .+ in org %s / space %s as %s\.\.\.`, appName, orgName, spaceName, userName))
Eventually(session).Should(Say("OK"))
Eventually(session).Should(Say(""))
Eventually(session).Should(Say(`Starting app %s in org %s / space %s as %s\.\.\.`, appName, orgName, spaceName, userName))
Eventually(session).Should(Say("OK"))
Eventually(session).Should(Say(""))
Eventually(session).Should(Say(`Waiting for app to start\.\.\.`))
Eventually(session).Should(Say(`Showing health and status for app %s in org %s / space %s as %s\.\.\.`, appName, orgName, spaceName, userName))
Eventually(session).Should(Say(""))
Eventually(session).Should(Say(`name:\s+%s`, appName))
Eventually(session).Should(Say(`requested state:\s+started`))
Eventually(session).Should(Say(`routes:\s+%s\.%s`, appName, domainName))
Eventually(session).Should(Say(`stack:\s+cflinuxfs`))
Eventually(session).Should(Say(`buildpacks:\s+ruby`))
Eventually(session).Should(Say(""))
Eventually(session).Should(Say(`type:\s+web`))
Eventually(session).Should(Say(`instances:\s+0/1`))
Eventually(session).Should(Say(`memory usage:\s+\d+(M|G)`))
Eventually(session).Should(Say(`state\s+since\s+cpu\s+memory\s+disk`))
Eventually(session).Should(Say(`#0\s+crashed\s+\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} [AP]M`))
})
})
Context("when the -p flag is provided", func() {
Context("when the path is a directory", func() {
Context("when the directory contains files", func() {
It("pushes the app from the directory", func() {
helpers.WithHelloWorldApp(func(appDir string) {
session := helpers.CF("v3-zdt-push", appName, "-p", appDir)
Eventually(session).Should(Say(`Starting app %s in org %s / space %s as %s\.\.\.`, appName, orgName, spaceName, userName))
Eventually(session).Should(Say(`Waiting for app to start\.\.\.`))
Eventually(session).Should(Say(`Showing health and status for app %s in org %s / space %s as %s\.\.\.`, appName, orgName, spaceName, userName))
Eventually(session).Should(Say(""))
Eventually(session).Should(Say(`name:\s+%s`, appName))
Eventually(session).Should(Say(`requested state:\s+started`))
Eventually(session).Should(Say(`routes:\s+%s\.%s`, appName, domainName))
Eventually(session).Should(Say(`stack:\s+cflinuxfs`))
Eventually(session).Should(Say(`buildpacks:\s+staticfile`))
Eventually(session).Should(Say(""))
Eventually(session).Should(Say(`type:\s+web`))
Eventually(session).Should(Say(`instances:\s+1/1`))
Eventually(session).Should(Say(`memory usage:\s+\d+(M|G)`))
Eventually(session).Should(Say(`state\s+since\s+cpu\s+memory\s+disk`))
Eventually(session).Should(Say(`#0\s+running\s+\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} [AP]M`))
Eventually(session).Should(Exit(0))
})
})
})
Context("when the directory is empty", func() {
var emptyDir string
BeforeEach(func() {
var err error
emptyDir, err = ioutil.TempDir("", "integration-push-path-empty")
Expect(err).ToNot(HaveOccurred())
})
AfterEach(func() {
Expect(os.RemoveAll(emptyDir)).ToNot(HaveOccurred())
})
It("returns an error", func() {
session := helpers.CF("v3-zdt-push", appName, "-p", emptyDir)
Eventually(session.Err).Should(Say("No app files found in '%s'", regexp.QuoteMeta(emptyDir)))
Eventually(session).Should(Exit(1))
})
})
})
Context("when the path is a zip file", func() {
Context("pushing a zip file", func() {
var archive string
BeforeEach(func() {
helpers.WithHelloWorldApp(func(appDir string) {
tmpfile, err := ioutil.TempFile("", "push-archive-integration")
Expect(err).ToNot(HaveOccurred())
archive = tmpfile.Name()
Expect(tmpfile.Close())
err = helpers.Zipit(appDir, archive, "")
Expect(err).ToNot(HaveOccurred())
})
})
AfterEach(func() {
Expect(os.RemoveAll(archive)).ToNot(HaveOccurred())
})
It("pushes the app from the zip file", func() {
session := helpers.CF("v3-zdt-push", appName, "-p", archive)
Eventually(session).Should(Say(`Starting app %s in org %s / space %s as %s\.\.\.`, appName, orgName, spaceName, userName))
Eventually(session).Should(Say(`Waiting for app to start\.\.\.`))
Eventually(session).Should(Say(`Showing health and status for app %s in org %s / space %s as %s\.\.\.`, appName, orgName, spaceName, userName))
Eventually(session).Should(Say(""))
Eventually(session).Should(Say(`name:\s+%s`, appName))
Eventually(session).Should(Say(`requested state:\s+started`))
Eventually(session).Should(Say(`routes:\s+%s\.%s`, appName, domainName))
Eventually(session).Should(Say(`stack:\s+cflinuxfs`))
Eventually(session).Should(Say(`buildpacks:\s+staticfile`))
Eventually(session).Should(Say(""))
Eventually(session).Should(Say(`type:\s+web`))
Eventually(session).Should(Say(`instances:\s+1/1`))
Eventually(session).Should(Say(`memory usage:\s+\d+(M|G)`))
Eventually(session).Should(Say(`state\s+since\s+cpu\s+memory\s+disk`))
Eventually(session).Should(Say(`#0\s+running\s+\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} [AP]M`))
Eventually(session).Should(Exit(0))
})
})
})
Context("when the path is a symlink to a directory", func() {
var symlinkPath string
BeforeEach(func() {
tempFile, err := ioutil.TempFile("", "symlink-")
Expect(err).ToNot(HaveOccurred())
Expect(tempFile.Close()).To(Succeed())
symlinkPath = tempFile.Name()
Expect(os.Remove(symlinkPath)).To(Succeed())
})
AfterEach(func() {
Expect(os.Remove(symlinkPath)).To(Succeed())
})
It("creates and uploads the package from the directory", func() {
helpers.WithHelloWorldApp(func(appDir string) {
Expect(os.Symlink(appDir, symlinkPath)).To(Succeed())
session := helpers.CF("v3-zdt-push", appName, "-p", symlinkPath)
Eventually(session).Should(Say(`Starting app %s in org %s / space %s as %s\.\.\.`, appName, orgName, spaceName, userName))
Eventually(session).Should(Say(`Waiting for app to start\.\.\.`))
Eventually(session).Should(Say(`Showing health and status for app %s in org %s / space %s as %s\.\.\.`, appName, orgName, spaceName, userName))
Eventually(session).Should(Say(""))
Eventually(session).Should(Say(`name:\s+%s`, appName))
Eventually(session).Should(Say(`requested state:\s+started`))
Eventually(session).Should(Say(`routes:\s+%s\.%s`, appName, domainName))
Eventually(session).Should(Say(`stack:\s+cflinuxfs`))
Eventually(session).Should(Say(`buildpacks:\s+staticfile`))
Eventually(session).Should(Say(""))
Eventually(session).Should(Say(`type:\s+web`))
Eventually(session).Should(Say(`instances:\s+1/1`))
Eventually(session).Should(Say(`memory usage:\s+\d+(M|G)`))
Eventually(session).Should(Say(`state\s+since\s+cpu\s+memory\s+disk`))
Eventually(session).Should(Say(`#0\s+running\s+\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} [AP]M`))
Eventually(session).Should(Exit(0))
})
})
})
Context("when the path is a symlink to a zip file", func() {
var (
archive string
symlinkPath string
)
BeforeEach(func() {
helpers.WithHelloWorldApp(func(appDir string) {
tmpfile, err := ioutil.TempFile("", "package-archive-integration")
Expect(err).ToNot(HaveOccurred())
archive = tmpfile.Name()
Expect(tmpfile.Close())
err = helpers.Zipit(appDir, archive, "")
Expect(err).ToNot(HaveOccurred())
})
tempFile, err := ioutil.TempFile("", "symlink-to-archive-")
Expect(err).ToNot(HaveOccurred())
Expect(tempFile.Close()).To(Succeed())
symlinkPath = tempFile.Name()
Expect(os.Remove(symlinkPath)).To(Succeed())
Expect(os.Symlink(archive, symlinkPath)).To(Succeed())
})
AfterEach(func() {
Expect(os.Remove(archive)).To(Succeed())
Expect(os.Remove(symlinkPath)).To(Succeed())
})
It("creates and uploads the package from the zip file", func() {
session := helpers.CF("v3-zdt-push", appName, "-p", symlinkPath)
Eventually(session).Should(Say(`Starting app %s in org %s / space %s as %s\.\.\.`, appName, orgName, spaceName, userName))
Eventually(session).Should(Say(`Waiting for app to start\.\.\.`))
Eventually(session).Should(Say(`Showing health and status for app %s in org %s / space %s as %s\.\.\.`, appName, orgName, spaceName, userName))
Eventually(session).Should(Say(""))
Eventually(session).Should(Say(`name:\s+%s`, appName))
Eventually(session).Should(Say(`requested state:\s+started`))
Eventually(session).Should(Say(`routes:\s+%s\.%s`, appName, domainName))
Eventually(session).Should(Say(`stack:\s+cflinuxfs`))
Eventually(session).Should(Say(`buildpacks:\s+staticfile`))
Eventually(session).Should(Say(""))
Eventually(session).Should(Say(`type:\s+web`))
Eventually(session).Should(Say(`instances:\s+1/1`))
Eventually(session).Should(Say(`memory usage:\s+\d+(M|G)`))
Eventually(session).Should(Say(`state\s+since\s+cpu\s+memory\s+disk`))
Eventually(session).Should(Say(`#0\s+running\s+\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} [AP]M`))
Eventually(session).Should(Exit(0))
})
})
})
Context("when the --no-route flag is set", func() {
var session *Session
BeforeEach(func() {
helpers.WithHelloWorldApp(func(appDir string) {
session = helpers.CustomCF(helpers.CFEnv{WorkingDirectory: appDir}, "v3-zdt-push", appName, "--no-route")
Eventually(session).Should(Exit(0))
})
})
It("does not map any routes to the app", func() {
Consistently(session).ShouldNot(Say(`Mapping routes\.\.\.`))
Eventually(session).Should(Say(`name:\s+%s`, appName))
Eventually(session).Should(Say(`requested state:\s+started`))
Eventually(session).Should(Say(`routes:\s+\n`))
Eventually(session).Should(Say(`stack:\s+cflinuxfs`))
Eventually(session).Should(Say(`buildpacks:\s+staticfile`))
Eventually(session).Should(Say(""))
Eventually(session).Should(Say(`type:\s+web`))
Eventually(session).Should(Say(`instances:\s+1/1`))
Eventually(session).Should(Say(`memory usage:\s+\d+(M|G)`))
Eventually(session).Should(Say(`state\s+since\s+cpu\s+memory\s+disk`))
Eventually(session).Should(Say(`#0\s+running\s+\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} [AP]M`))
})
})
Context("when the -s flag is set", func() {
When("the default stack is specified", func() {
It("uses the specified stack", func() {
var session *Session
helpers.WithHelloWorldApp(func(appDir string) {
session = helpers.CustomCF(helpers.CFEnv{WorkingDirectory: appDir}, "v3-zdt-push", appName, "-s", "cflinuxfs2")
Eventually(session).Should(Exit(0))
})
Eventually(session).Should(Say(`name:\s+%s`, appName))
Eventually(session).Should(Say(`requested state:\s+started`))
Eventually(session).Should(Say(`routes:\s+%s\.%s`, appName, domainName))
Eventually(session).Should(Say(`stack:\s+cflinuxfs2`))
})
})
When("a non-default stack is specified", func() {
It("uses the specified stack", func() {
var session *Session
helpers.WithHelloWorldApp(func(appDir string) {
session = helpers.CustomCF(helpers.CFEnv{WorkingDirectory: appDir}, "v3-zdt-push", appName, "-s", "cflinuxfs2")
Eventually(session).Should(Exit(0))
})
Eventually(session).Should(Say(`name:\s+%s`, appName))
Eventually(session).Should(Say(`requested state:\s+started`))
Eventually(session).Should(Say(`routes:\s+%s\.%s`, appName, domainName))
Eventually(session).Should(Say(`stack:\s+cflinuxfs2`))
})
})
When("the both -s and -b are specified", func() {
When("the buildpack and stack both exist", func() {
When("the buildpack specified exists for the stack specified", func() {
It("creates the app with the specified stack and buildpack", func() {
var session *Session
helpers.WithHelloWorldApp(func(appDir string) {
session = helpers.CustomCF(helpers.CFEnv{WorkingDirectory: appDir}, "v3-zdt-push", appName, "-b", "https://github.com/cloudfoundry/staticfile-buildpack", "-s", "cflinuxfs2")
Eventually(session).Should(Exit(0))
})
// TODO: assert specific error text when it is written
})
})
When("the buildpack specified does not exist for the stack specified", func() {
It("prints the appropriate error", func() {
var session *Session
helpers.WithHelloWorldApp(func(appDir string) {
session = helpers.CustomCF(helpers.CFEnv{WorkingDirectory: appDir}, "v3-zdt-push", appName, "-b", "ruby_buildpack", "-s", "windows2012R2")
Eventually(session).Should(Exit(1))
})
// TODO: assert specific error text when it is written
})
})
})
})
When("the specified stack does not exist", func() {
It("errors", func() {
var session *Session
helpers.WithHelloWorldApp(func(appDir string) {
session = helpers.CustomCF(helpers.CFEnv{WorkingDirectory: appDir}, "v3-zdt-push", appName, "-s", "invalid_stack")
Eventually(session).Should(Exit(1))
})
Eventually(session).Should(Say("FAILED"))
// TODO: confirm the text of the error message
Eventually(session.Err).Should(Say(`Stack must be an existing stack`))
})
})
})
Context("when the -b flag is set", func() {
var session *Session
Context("when pushing a multi-buildpack app", func() {
BeforeEach(func() {
helpers.WithMultiBuildpackApp(func(appDir string) {
session = helpers.CustomCF(helpers.CFEnv{WorkingDirectory: appDir}, "v3-zdt-push", appName, "-b", "ruby_buildpack", "-b", "go_buildpack")
// TODO: uncomment this expectation once capi-release displays all buildpacks on droplet
// Story: https://www.pivotaltracker.com/story/show/150425459
// Eventually(session).Should(Say("buildpacks:.*ruby_buildpack, go_buildpack"))
Eventually(session).Should(Exit(0))
})
})
It("successfully compiles and runs the app", func() {
resp, err := http.Get(fmt.Sprintf("http://%s.%s", appName, helpers.DefaultSharedDomain()))
Expect(err).ToNot(HaveOccurred())
Expect(resp.StatusCode).To(Equal(http.StatusOK))
})
})
Context("when resetting the buildpack to default", func() {
BeforeEach(func() {
helpers.WithHelloWorldApp(func(appDir string) {
Eventually(helpers.CustomCF(helpers.CFEnv{WorkingDirectory: appDir}, "v3-zdt-push", appName, "-b", "java_buildpack")).Should(Exit(1))
session = helpers.CustomCF(helpers.CFEnv{WorkingDirectory: appDir}, "v3-zdt-push", appName, "-b", "default")
Eventually(session).Should(Exit(0))
})
})
It("successfully pushes the app", func() {
Eventually(session).Should(Say(`name:\s+%s`, appName))
Eventually(session).Should(Say(`state\s+since\s+cpu\s+memory\s+disk`))
Eventually(session).Should(Say(`#0\s+running\s+\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} [AP]M`))
})
})
Context("when omitting the buildpack", func() {
BeforeEach(func() {
helpers.WithHelloWorldApp(func(appDir string) {
Eventually(helpers.CustomCF(helpers.CFEnv{WorkingDirectory: appDir}, "v3-zdt-push", appName, "-b", "java_buildpack")).Should(Exit(1))
session = helpers.CustomCF(helpers.CFEnv{WorkingDirectory: appDir}, "v3-zdt-push", appName)
Eventually(session).Should(Exit(1))
})
})
It("continues using previously set buildpack", func() {
Eventually(session).Should(Say("FAILED"))
})
})
Context("when the buildpack is invalid", func() {
BeforeEach(func() {
helpers.WithHelloWorldApp(func(appDir string) {
session = helpers.CustomCF(helpers.CFEnv{WorkingDirectory: appDir}, "v3-zdt-push", appName, "-b", "wut")
Eventually(session).Should(Exit(1))
})
})
It("errors and does not push the app", func() {
Consistently(session).ShouldNot(Say("Creating app"))
Eventually(session).Should(Say("FAILED"))
Eventually(session.Err).Should(Say(`Buildpack "wut" must be an existing admin buildpack or a valid git URI`))
})
})
Context("when the buildpack is valid", func() {
BeforeEach(func() {
helpers.WithHelloWorldApp(func(appDir string) {
session = helpers.CustomCF(helpers.CFEnv{WorkingDirectory: appDir}, "v3-zdt-push", appName, "-b", "https://github.com/cloudfoundry/staticfile-buildpack")
Eventually(session).Should(Exit(0))
})
})
It("uses the specified buildpack", func() {
Eventually(session).Should(Say(`name:\s+%s`, appName))
Eventually(session).Should(Say(`requested state:\s+started`))
Eventually(session).Should(Say(`routes:\s+%s\.%s`, appName, domainName))
Eventually(session).Should(Say(`stack:\s+cflinuxfs`))
// TODO: Uncomment when capi sorts out droplet buildpack name/detectoutput
// Eventually(session).Should(Say(`buildpacks:\s+https://github.com/cloudfoundry/staticfile-buildpack`))
Eventually(session).Should(Say(""))
Eventually(session).Should(Say(`type:\s+web`))
Eventually(session).Should(Say(`instances:\s+1/1`))
Eventually(session).Should(Say(`memory usage:\s+\d+(M|G)`))
Eventually(session).Should(Say(`state\s+since\s+cpu\s+memory\s+disk`))
Eventually(session).Should(Say(`#0\s+running\s+\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} [AP]M`))
})
})
})
Context("when the -o flag is set", func() {
Context("when the docker image is valid", func() {
It("uses the specified docker image", func() {
session := helpers.CF("v3-zdt-push", appName, "-o", PublicDockerImage)
Eventually(session).Should(Say(`name:\s+%s`, appName))
Eventually(session).Should(Say(`requested state:\s+started`))
Eventually(session).Should(Say(`routes:\s+%s\.%s`, appName, domainName))
Eventually(session).Should(Say("stack:"))
Eventually(session).ShouldNot(Say("buildpacks:"))
Eventually(session).Should(Say(`docker image:\s+%s`, PublicDockerImage))
Eventually(session).Should(Say(""))
Eventually(session).Should(Say(`type:\s+web`))
Eventually(session).Should(Say(`instances:\s+1/1`))
Eventually(session).Should(Say(`memory usage:\s+\d+(M|G)`))
Eventually(session).Should(Say(`state\s+since\s+cpu\s+memory\s+disk`))
Eventually(session).Should(Say(`#0\s+running\s+\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} [AP]M`))
Eventually(session).Should(Exit(0))
})
})
Context("when the docker image is invalid", func() {
It("displays an error and exits 1", func() {
session := helpers.CF("v3-zdt-push", appName, "-o", "some-invalid-docker-image")
Eventually(session).Should(Say("FAILED"))
Eventually(session.Err).Should(Say("StagingError - Staging error: staging failed"))
Eventually(session).Should(Exit(1))
})
})
Context("when a docker username and password are provided with a private image", func() {
var (
privateDockerImage string
privateDockerUsername string
privateDockerPassword string
)
BeforeEach(func() {
privateDockerImage = os.Getenv("CF_INT_DOCKER_IMAGE")
privateDockerUsername = os.Getenv("CF_INT_DOCKER_USERNAME")
privateDockerPassword = os.Getenv("CF_INT_DOCKER_PASSWORD")
if privateDockerImage == "" || privateDockerUsername == "" || privateDockerPassword == "" {
Skip("CF_INT_DOCKER_IMAGE, CF_INT_DOCKER_USERNAME, or CF_INT_DOCKER_PASSWORD is not set")
}
})
It("uses the specified private docker image", func() {
session := helpers.CustomCF(
helpers.CFEnv{
EnvVars: map[string]string{"CF_DOCKER_PASSWORD": privateDockerPassword},
},
"v3-zdt-push", "--docker-username", privateDockerUsername, "--docker-image", privateDockerImage, appName,
)
Eventually(session).Should(Say(`name:\s+%s`, appName))
Eventually(session).Should(Say(`requested state:\s+started`))
Eventually(session).Should(Say(`routes:\s+%s\.%s`, appName, domainName))
Eventually(session).Should(Say("stack:"))
Eventually(session).ShouldNot(Say("buildpacks:"))
Eventually(session).Should(Say(`docker image:\s+%s`, privateDockerImage))
Eventually(session).Should(Say(""))
Eventually(session).Should(Say(`type:\s+web`))
Eventually(session).Should(Say(`instances:\s+1/1`))
Eventually(session).Should(Say(`memory usage:\s+\d+(M|G)`))
Eventually(session).Should(Say(`state\s+since\s+cpu\s+memory\s+disk`))
Eventually(session).Should(Say(`#0\s+running\s+\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} [AP]M`))
Eventually(session).Should(Exit(0))
})
})
})
Describe("argument combination errors", func() {
Context("when the --docker-username is provided without the -o flag", func() {
It("displays an error and exits 1", func() {
helpers.WithHelloWorldApp(func(appDir string) {
session := helpers.CF("v3-zdt-push", appName, "--docker-username", "some-username")
Eventually(session).Should(Say("FAILED"))
Eventually(session.Err).Should(Say("Incorrect Usage: '--docker-image, -o' and '--docker-username' must be used together."))
Eventually(session).Should(Say("NAME:"))
Eventually(session).Should(Exit(1))
})
})
})
Context("when the --docker-username and -p flags are provided together", func() {
It("displays an error and exits 1", func() {
helpers.WithHelloWorldApp(func(appDir string) {
session := helpers.CF("v3-zdt-push", appName, "--docker-username", "some-username", "-p", appDir)
Eventually(session).Should(Say("FAILED"))
Eventually(session.Err).Should(Say("Incorrect Usage: '--docker-image, -o' and '--docker-username' must be used together."))
Eventually(session).Should(Say("NAME:"))
Eventually(session).Should(Exit(1))
})
})
})
Context("when the --docker-username is provided without a password", func() {
var oldPassword string
BeforeEach(func() {
oldPassword = os.Getenv("CF_DOCKER_PASSWORD")
err := os.Unsetenv("CF_DOCKER_PASSWORD")
Expect(err).ToNot(HaveOccurred())
})
AfterEach(func() {
err := os.Setenv("CF_DOCKER_PASSWORD", oldPassword)
Expect(err).ToNot(HaveOccurred())
})
It("displays an error and exits 1", func() {
helpers.WithHelloWorldApp(func(appDir string) {
session := helpers.CF("v3-zdt-push", appName, "--docker-username", "some-username", "--docker-image", "some-image")
Eventually(session).Should(Say("FAILED"))
Eventually(session.Err).Should(Say(`Environment variable CF_DOCKER_PASSWORD not set\.`))
Eventually(session).Should(Exit(1))
})
})
})
Context("when the -o and -p flags are provided together", func() {
It("displays an error and exits 1", func() {
helpers.WithHelloWorldApp(func(appDir string) {
session := helpers.CF("v3-zdt-push", appName, "-o", PublicDockerImage, "-p", appDir)
Eventually(session).Should(Say("FAILED"))
Eventually(session.Err).Should(Say("Incorrect Usage: The following arguments cannot be used together: --docker-image, -o, -p"))
Eventually(session).Should(Say("NAME:"))
Eventually(session).Should(Exit(1))
})
})
})
Context("when the -o and -b flags are provided together", func() {
It("displays an error and exits 1", func() {
helpers.WithHelloWorldApp(func(appDir string) {
session := helpers.CF("v3-zdt-push", appName, "-o", PublicDockerImage, "-b", "some-buildpack")
Eventually(session).Should(Say("FAILED"))
Eventually(session.Err).Should(Say("Incorrect Usage: The following arguments cannot be used together: -b, --docker-image, -o"))
Eventually(session).Should(Say("NAME:"))
Eventually(session).Should(Exit(1))
})
})
})
})
})
})
|
[
"\"CF_INT_DOCKER_IMAGE\"",
"\"CF_INT_DOCKER_USERNAME\"",
"\"CF_INT_DOCKER_PASSWORD\"",
"\"CF_DOCKER_PASSWORD\""
] |
[] |
[
"CF_INT_DOCKER_USERNAME",
"CF_INT_DOCKER_PASSWORD",
"CF_INT_DOCKER_IMAGE",
"CF_DOCKER_PASSWORD"
] |
[]
|
["CF_INT_DOCKER_USERNAME", "CF_INT_DOCKER_PASSWORD", "CF_INT_DOCKER_IMAGE", "CF_DOCKER_PASSWORD"]
|
go
| 4 | 0 | |
cmd/ore/do/create-image.go
|
// Copyright 2017 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package do
import (
"context"
"fmt"
"os"
"time"
"github.com/pborman/uuid"
"github.com/spf13/cobra"
ctplatform "github.com/coreos/container-linux-config-transpiler/config/platform"
"github.com/coreos/mantle/platform"
"github.com/coreos/mantle/platform/conf"
"github.com/coreos/mantle/util"
)
var (
cmdCreateImage = &cobra.Command{
Use: "create-image [options]",
Short: "Create image",
Long: `Create an image.`,
RunE: runCreateImage,
}
)
func init() {
DO.AddCommand(cmdCreateImage)
cmdCreateImage.Flags().StringVar(&options.Region, "region", "sfo2", "region slug")
cmdCreateImage.Flags().StringVarP(&imageName, "name", "n", "", "image name")
cmdCreateImage.Flags().StringVarP(&imageURL, "url", "u", "", "image source URL (e.g. \"https://stable.release.flatcar-linux.net/amd64-usr/current/flatcar_production_digitalocean_image.bin.bz2\"")
}
func runCreateImage(cmd *cobra.Command, args []string) error {
if len(args) != 0 {
fmt.Fprintf(os.Stderr, "Unrecognized args in do create-image cmd: %v\n", args)
os.Exit(2)
}
if err := createImage(); err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
return nil
}
func createImage() error {
if imageName == "" {
return fmt.Errorf("Image name must be specified")
}
if imageURL == "" {
return fmt.Errorf("Image URL must be specified")
}
// set smallest available size, so the image will run on any size droplet
options.Size = "512mb"
userdata, err := makeUserData()
if err != nil {
return err
}
ctx := context.Background()
key, err := platform.GenerateFakeKey()
if err != nil {
return err
}
keyID, err := API.AddKey(ctx, "ore-"+uuid.New(), key)
if err != nil {
return err
}
defer API.DeleteKey(ctx, keyID)
droplet, err := API.CreateDroplet(ctx, imageName+"-install", keyID, userdata)
if err != nil {
return fmt.Errorf("couldn't create droplet: %v", err)
}
dropletID := droplet.ID
defer API.DeleteDroplet(ctx, dropletID)
// the droplet will power itself off when install completes
err = util.WaitUntilReady(10*time.Minute, 15*time.Second, func() (bool, error) {
droplet, err := API.GetDroplet(ctx, dropletID)
if err != nil {
return false, err
}
return droplet.Status == "off", nil
})
if err != nil {
return fmt.Errorf("Failed waiting for droplet to power off (%v). Did install fail?", err)
}
if err := API.SnapshotDroplet(ctx, dropletID, imageName); err != nil {
return fmt.Errorf("couldn't snapshot droplet: %v", err)
}
return nil
}
func makeUserData() (string, error) {
clc := fmt.Sprintf(`storage:
files:
- filesystem: root
path: /root/initramfs/etc/resolv.conf
mode: 0644
contents:
inline: nameserver 8.8.8.8
- filesystem: root
path: /root/initramfs/shutdown
mode: 0755
contents:
inline: |
#!/busybox sh
set -e -o pipefail
echo "Starting install..."
disk=$(/busybox mountpoint -n /oldroot | /busybox sed -e 's/p*[0-9]* .*//')
echo "Unmounting filesystems..."
/busybox find /oldroot -depth -type d -exec /busybox mountpoint -q {} \; -exec /busybox umount {} \;
# Verify success
/busybox mountpoint -q /oldroot && /busybox false
echo "Zeroing ${disk}..."
/busybox dd if=/dev/zero of="${disk}" bs=1M ||:
echo "Installing to ${disk}..."
/busybox wget -O - "%s" | \
/busybox bunzip2 -c | \
/busybox dd of="${disk}" bs=1M
echo "Shutting down..."
/busybox poweroff -f
systemd:
units:
- name: install-prep.service
enabled: true
contents: |
[Unit]
Description=Launch Install
After=multi-user.target
[Service]
Type=oneshot
# https://github.com/coreos/bugs/issues/2205
ExecStart=/usr/bin/wget -O /root/initramfs/busybox https://busybox.net/downloads/binaries/1.27.1-i686/busybox
ExecStart=/bin/sh -c 'echo "b51b9328eb4e60748912e1c1867954a5cf7e9d5294781cae59ce225ed110523c /root/initramfs/busybox" | sha256sum -c -'
ExecStart=/usr/bin/chmod +x /root/initramfs/busybox
ExecStart=/usr/bin/rsync -a /root/initramfs/ /run/initramfs
ExecStart=/usr/bin/systemctl --no-block poweroff
[Install]
WantedBy=multi-user.target
`, imageURL)
conf, err := conf.ContainerLinuxConfig(clc).Render(ctplatform.DO)
if err != nil {
return "", fmt.Errorf("Couldn't render userdata: %v", err)
}
return conf.String(), nil
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
dallinger/recruiters.py
|
"""Recruiters manage the flow of participants to the experiment."""
import flask
import json
import logging
import os
import re
import requests
from rq import Queue
from sqlalchemy import func
from dallinger.config import get_config
from dallinger.db import redis_conn
from dallinger.db import session
from dallinger.experiment_server.utils import success_response
from dallinger.experiment_server.utils import crossdomain
from dallinger.experiment_server.worker_events import worker_function
from dallinger.heroku import tools as heroku_tools
from dallinger.notifications import get_messenger
from dallinger.notifications import MessengerError
from dallinger.models import Participant
from dallinger.models import Recruitment
from dallinger.mturk import MTurkService
from dallinger.mturk import DuplicateQualificationNameError
from dallinger.mturk import MTurkServiceException
from dallinger.mturk import QualificationNotFoundException
from dallinger.utils import get_base_url
from dallinger.utils import generate_random_id
from dallinger.utils import ParticipationTime
logger = logging.getLogger(__file__)
def _get_queue(name="default"):
# Connect to Redis Queue
return Queue(name, connection=redis_conn)
# These are constants because other components may listen for these
# messages in logs:
NEW_RECRUIT_LOG_PREFIX = "New participant requested:"
CLOSE_RECRUITMENT_LOG_PREFIX = "Close recruitment."
class Recruiter(object):
"""The base recruiter."""
nickname = None
external_submission_url = None # MTurkRecruiter, for one, overides this
def __init__(self):
"""For now, the contract of a Recruiter is that it takes no
arguments.
"""
logger.info("Initializing {}...".format(self.__class__.__name__))
def __call__(self):
"""For backward compatibility with experiments invoking recruiter()
as a method rather than a property.
"""
return self
def open_recruitment(self, n=1):
"""Return a list of one or more initial recruitment URLs and an initial
recruitment message:
{
items: [
'https://experiment-url-1',
'https://experiemnt-url-2'
],
message: 'More info about this particular recruiter's process'
}
"""
raise NotImplementedError
def recruit(self, n=1):
raise NotImplementedError
def close_recruitment(self):
"""Throw an error."""
raise NotImplementedError
def reward_bonus(self, assignment_id, amount, reason):
"""Throw an error."""
raise NotImplementedError
def notify_completed(self, participant):
"""Allow the Recruiter to be notified when a recruited Participant
has completed an experiment they joined.
"""
pass
def notify_duration_exceeded(self, participants, reference_time):
"""Some participants have been working beyond the defined duration of
the experiment.
"""
logger.warning(
"Received notification that some participants "
"have been active for too long. No action taken."
)
def rejects_questionnaire_from(self, participant):
"""Recruiters have different circumstances under which experiment
questionnaires should be accepted or rejected.
To reject a questionnaire, this method returns an error string.
By default, they are accepted, so we return None.
"""
return None
def submitted_event(self):
"""Return the appropriate event type to trigger when
an assignment is submitted. If no event should be processed,
return None.
"""
return "AssignmentSubmitted"
class CLIRecruiter(Recruiter):
"""A recruiter which prints out /ad URLs to the console for direct
assigment.
"""
nickname = "cli"
def __init__(self):
super(CLIRecruiter, self).__init__()
self.config = get_config()
def open_recruitment(self, n=1):
"""Return initial experiment URL list, plus instructions
for finding subsequent recruitment events in experiemnt logs.
"""
logger.info("Opening CLI recruitment for {} participants".format(n))
recruitments = self.recruit(n)
message = (
'Search for "{}" in the logs for subsequent recruitment URLs.\n'
"Open the logs for this experiment with "
'"dallinger logs --app {}"'.format(
NEW_RECRUIT_LOG_PREFIX, self.config.get("id")
)
)
return {"items": recruitments, "message": message}
def recruit(self, n=1):
"""Generate experiemnt URLs and print them to the console."""
logger.info("Recruiting {} CLI participants".format(n))
urls = []
template = "{}/ad?recruiter={}&assignmentId={}&hitId={}&workerId={}&mode={}"
for i in range(n):
ad_url = template.format(
get_base_url(),
self.nickname,
generate_random_id(),
generate_random_id(),
generate_random_id(),
self._get_mode(),
)
logger.info("{} {}".format(NEW_RECRUIT_LOG_PREFIX, ad_url))
urls.append(ad_url)
return urls
def close_recruitment(self):
"""Talk about closing recruitment."""
logger.info(CLOSE_RECRUITMENT_LOG_PREFIX + " cli")
def approve_hit(self, assignment_id):
"""Approve the HIT."""
logger.info("Assignment {} has been marked for approval".format(assignment_id))
return True
def reward_bonus(self, assignment_id, amount, reason):
"""Print out bonus info for the assignment"""
logger.info(
'Award ${} for assignment {}, with reason "{}"'.format(
amount, assignment_id, reason
)
)
def _get_mode(self):
return self.config.get("mode")
class HotAirRecruiter(CLIRecruiter):
"""A dummy recruiter: talks the talk, but does not walk the walk.
- Always invokes templates in debug mode
- Prints experiment /ad URLs to the console
"""
nickname = "hotair"
def open_recruitment(self, n=1):
"""Return initial experiment URL list, plus instructions
for finding subsequent recruitment events in experiemnt logs.
"""
logger.info("Opening HotAir recruitment for {} participants".format(n))
recruitments = self.recruit(n)
message = "Recruitment requests will open browser windows automatically."
return {"items": recruitments, "message": message}
def reward_bonus(self, assignment_id, amount, reason):
"""Logging-only, Hot Air implementation"""
logger.info(
"Were this a real Recruiter, we'd be awarding ${} for assignment {}, "
'with reason "{}"'.format(amount, assignment_id, reason)
)
def _get_mode(self):
# Ignore config settings and always use debug mode
return "debug"
class SimulatedRecruiter(Recruiter):
"""A recruiter that recruits simulated participants."""
nickname = "sim"
def open_recruitment(self, n=1):
"""Open recruitment."""
logger.info("Opening Sim recruitment for {} participants".format(n))
return {"items": self.recruit(n), "message": "Simulated recruitment only"}
def recruit(self, n=1):
"""Recruit n participants."""
logger.info("Recruiting {} Sim participants".format(n))
return []
def close_recruitment(self):
"""Do nothing."""
pass
mturk_resubmit_whimsical = """Dearest Friend,
I am writing to let you know that at {s.when}, during my regular (and thoroughly
enjoyable) perousal of the most charming participant data table, I happened to
notice that assignment {s.assignment_id} has been taking longer than we were
expecting. I recall you had suggested {s.allowed_minutes:.0f} minutes as an upper
limit for what was an acceptable length of time for each assignement, however
this assignment had been underway for a shocking {s.active_minutes:.0f} minutes, a
full {s.excess_minutes:.0f} minutes over your allowance. I immediately dispatched a
telegram to our mutual friends at AWS and they were able to assure me that
although the notification had failed to be correctly processed, the assignment
had in fact been completed. Rather than trouble you, I dealt with this myself
and I can assure you there is no immediate cause for concern. Nonetheless, for
my own peace of mind, I would appreciate you taking the time to look into this
matter at your earliest convenience.
I remain your faithful and obedient servant,
William H. Dallinger
P.S. Please do not respond to this message, I am busy with other matters.
"""
mturk_resubmit = """Dear experimenter,
This is an automated email from Dallinger. You are receiving this email because
the Dallinger platform has discovered evidence that a notification from Amazon
Web Services failed to arrive at the server. Dallinger has automatically
contacted AWS and has determined the dropped notification was a submitted
notification (i.e. the participant has finished the experiment). This is a non-
fatal error and so Dallinger has auto-corrected the problem. Nonetheless you may
wish to check the database.
Best,
The Dallinger dev. team.
Error details:
Assignment: {s.assignment_id}
Allowed time: {s.allowed_minutes:.0f} minute(s)
Time since participant started: {s.active_minutes:.0f}
"""
mturk_cancelled_hit_whimsical = """Dearest Friend,
I am afraid I write to you with most grave tidings. At {s.when}, during a
routine check of the usually most delightful participant data table, I happened
to notice that assignment {s.assignment_id} has been taking longer than we were
expecting. I recall you had suggested {s.allowed_minutes:.0f} minutes as an upper
limit for what was an acceptable length of time for each assignment, however
this assignment had been underway for a shocking {s.active_minutes:.0f} minutes, a
full {s.excess_minutes:.0f} minutes over your allowance. I immediately dispatched a
telegram to our mutual friends at AWS and they infact informed me that they had
already sent us a notification which we must have failed to process, implying
that the assignment had not been successfully completed. Of course when the
seriousness of this scenario dawned on me I had to depend on my trusting walking
stick for support: without the notification I didn't know to remove the old
assignment's data from the tables and AWS will have already sent their
replacement, meaning that the tables may already be in a most unsound state!
I am sorry to trouble you with this, however, I do not know how to proceed so
rather than trying to remedy the scenario myself, I have instead temporarily
ceased operations by expiring the HIT with the fellows at AWS and have
refrained from posting any further invitations myself. Once you see fit I
would be most appreciative if you could attend to this issue with the caution,
sensitivity and intelligence for which I know you so well.
I remain your faithful and
obedient servant,
William H. Dallinger
P.S. Please do not respond to this
message, I am busy with other matters.
"""
cancelled_hit = """Dear experimenter,
This is an automated email from Dallinger. You are receiving this email because
the Dallinger platform has discovered evidence that a notification from Amazon
Web Services failed to arrive at the server. Dallinger has automatically
contacted AWS and has determined the dropped notification was an
abandoned/returned notification (i.e. the participant had returned the
experiment or had run out of time). This is a serious error and so Dallinger has
paused the experiment - expiring the HIT on MTurk and setting auto_recruit to
false. Participants currently playing will be able to finish, however no further
participants will be recruited until you do so manually. We strongly suggest you
use the details below to check the database to make sure the missing
notification has not caused additional problems before resuming. If you are
receiving a lot of these emails this suggests something is wrong with your
experiment code.
Best,
The Dallinger dev. team.
Error details:
Assignment: {s.assignment_id}
Allowed time (minutes): {s.allowed_minutes:.0f}
Time since participant started: {s.active_minutes:.0f}
"""
class MTurkHITMessages(object):
@staticmethod
def by_flavor(summary, whimsical):
if whimsical:
return WhimsicalMTurkHITMessages(summary)
return MTurkHITMessages(summary)
_templates = {
"resubmitted": {
"subject": "Dallinger automated email - minor error.",
"template": mturk_resubmit,
},
"cancelled": {
"subject": "Dallinger automated email - major error.",
"template": cancelled_hit,
},
}
def __init__(self, summary):
self.summary = summary
def resubmitted_msg(self):
return self._build("resubmitted")
def hit_cancelled_msg(self):
return self._build("cancelled")
def _build(self, category):
data = self._templates[category]
return {
"body": data["template"].format(s=self.summary),
"subject": data["subject"],
}
class WhimsicalMTurkHITMessages(MTurkHITMessages):
_templates = {
"resubmitted": {
"subject": "A matter of minor concern.",
"template": mturk_resubmit_whimsical,
},
"cancelled": {
"subject": "Most troubling news.",
"template": mturk_cancelled_hit_whimsical,
},
}
class MTurkRecruiterException(Exception):
"""Custom exception for MTurkRecruiter"""
mturk_routes = flask.Blueprint("mturk_recruiter", __name__)
@mturk_routes.route("/mturk-sns-listener", methods=["POST", "GET"])
@crossdomain(origin="*")
def mturk_recruiter_notify():
"""Listens for:
1. AWS SNS subscription confirmation request
2. SNS subcription messages, which forward MTurk notifications
"""
recruiter = MTurkRecruiter()
logger.warning("Raw notification body: {}".format(flask.request.get_data()))
content = json.loads(flask.request.get_data())
message_type = content.get("Type")
# 1. SNS subscription confirmation request
if message_type == "SubscriptionConfirmation":
logger.warning("Received a SubscriptionConfirmation message from AWS.")
token = content.get("Token")
topic = content.get("TopicArn")
recruiter._confirm_sns_subscription(token=token, topic=topic)
# 2. MTurk Worker event
elif message_type == "Notification":
logger.warning("Received an Event Notification from AWS.")
message = json.loads(content.get("Message"))
events = message["Events"]
recruiter._report_event_notification(events)
else:
logger.warning("Unknown SNS notification type: {}".format(message_type))
return success_response()
class MTurkRecruiter(Recruiter):
"""Recruit participants from Amazon Mechanical Turk"""
nickname = "mturk"
extra_routes = mturk_routes
experiment_qualification_desc = "Experiment-specific qualification"
group_qualification_desc = "Experiment group qualification"
def __init__(self):
super(MTurkRecruiter, self).__init__()
self.config = get_config()
base_url = get_base_url()
self.ad_url = "{}/ad?recruiter={}".format(base_url, self.nickname)
self.notification_url = "{}/mturk-sns-listener".format(base_url)
self.hit_domain = os.getenv("HOST")
self.mturkservice = MTurkService(
aws_access_key_id=self.config.get("aws_access_key_id"),
aws_secret_access_key=self.config.get("aws_secret_access_key"),
region_name=self.config.get("aws_region"),
sandbox=self.config.get("mode") != "live",
)
self.messenger = get_messenger(self.config)
self._validate_config()
def _validate_config(self):
mode = self.config.get("mode")
if mode not in ("sandbox", "live"):
raise MTurkRecruiterException(
'"{}" is not a valid mode for MTurk recruitment. '
'The value of "mode" must be either "sandbox" or "live"'.format(mode)
)
@property
def external_submission_url(self):
"""On experiment completion, participants are returned to
the Mechanical Turk site to submit their HIT, which in turn triggers
notifications to the /notifications route.
"""
if self.config.get("mode") == "sandbox":
return "https://workersandbox.mturk.com/mturk/externalSubmit"
return "https://www.mturk.com/mturk/externalSubmit"
@property
def qualifications(self):
quals = {self.config.get("id"): self.experiment_qualification_desc}
group_name = self.config.get("group_name", None)
if group_name:
quals[group_name] = self.group_qualification_desc
return quals
def open_recruitment(self, n=1):
"""Open a connection to AWS MTurk and create a HIT."""
logger.info("Opening MTurk recruitment for {} participants".format(n))
if self.is_in_progress:
raise MTurkRecruiterException(
"Tried to open_recruitment on already open recruiter."
)
if self.hit_domain is None:
raise MTurkRecruiterException("Can't run a HIT from localhost")
self.mturkservice.check_credentials()
if self.config.get("assign_qualifications"):
self._create_mturk_qualifications()
hit_request = {
"experiment_id": self.config.get("id"),
"max_assignments": n,
"title": self.config.get("title"),
"description": self.config.get("description"),
"keywords": self._config_to_list("keywords"),
"reward": self.config.get("base_payment"),
"duration_hours": self.config.get("duration"),
"lifetime_days": self.config.get("lifetime"),
"ad_url": self.ad_url,
"notification_url": self.notification_url,
"approve_requirement": self.config.get("approve_requirement"),
"us_only": self.config.get("us_only"),
"blacklist": self._config_to_list("qualification_blacklist"),
"annotation": self.config.get("id"),
}
hit_info = self.mturkservice.create_hit(**hit_request)
if self.config.get("mode") == "sandbox":
lookup_url = (
"https://workersandbox.mturk.com/mturk/preview?groupId={type_id}"
)
else:
lookup_url = "https://worker.mturk.com/mturk/preview?groupId={type_id}"
return {
"items": [lookup_url.format(**hit_info)],
"message": "HIT now published to Amazon Mechanical Turk",
}
def recruit(self, n=1):
"""Recruit n new participants to an existing HIT"""
logger.info("Recruiting {} MTurk participants".format(n))
if not self.config.get("auto_recruit"):
logger.info("auto_recruit is False: recruitment suppressed")
return
hit_id = self.current_hit_id()
if hit_id is None:
logger.info("no HIT in progress: recruitment aborted")
return
try:
return self.mturkservice.extend_hit(
hit_id, number=n, duration_hours=self.config.get("duration")
)
except MTurkServiceException as ex:
logger.exception(str(ex))
def notify_completed(self, participant):
"""Assign a Qualification to the Participant for the experiment ID,
and for the configured group_name, if it's been set.
Overrecruited participants don't receive qualifications, since they
haven't actually completed the experiment. This allows them to remain
eligible for future runs.
"""
if participant.status == "overrecruited" or not self.qualification_active:
return
worker_id = participant.worker_id
for name in self.qualifications:
try:
self.mturkservice.increment_qualification_score(name, worker_id)
except QualificationNotFoundException as ex:
logger.exception(ex)
def notify_duration_exceeded(self, participants, reference_time):
"""The participant has exceed the maximum time for the activity,
defined in the "duration" config value. We need find out the assignment
status on MTurk and act based on this.
"""
unsubmitted = []
for participant in participants:
summary = ParticipationTime(participant, reference_time, self.config)
status = self._mturk_status_for(participant)
if status == "Approved":
participant.status = "approved"
session.commit()
elif status == "Rejected":
participant.status = "rejected"
session.commit()
elif status == "Submitted":
self._resend_submitted_rest_notification_for(participant)
self._message_researcher(self._resubmitted_msg(summary))
logger.warning(
"Error - submitted notification for participant {} missed. "
"A replacement notification was created and sent, "
"but proceed with caution.".format(participant.id)
)
else:
self._send_notification_missing_rest_notification_for(participant)
unsubmitted.append(summary)
if unsubmitted:
self._disable_autorecruit()
self.close_recruitment()
pick_one = unsubmitted[0]
# message the researcher about the one of the participants:
self._message_researcher(self._cancelled_msg(pick_one))
# Attempt to force-expire the hit via boto. It's possible
# that the HIT won't exist if the HIT has been deleted manually.
try:
self.mturkservice.expire_hit(pick_one.participant.hit_id)
except MTurkServiceException as ex:
logger.exception(ex)
def rejects_questionnaire_from(self, participant):
"""Mechanical Turk participants submit their HITs on the MTurk site
(see external_submission_url), and MTurk then sends a notification
to Dallinger which is used to mark the assignment completed.
If a HIT has already been submitted, it's too late to submit the
questionnaire.
"""
if participant.status != "working":
return (
"This participant has already sumbitted their HIT "
"on MTurk and can no longer submit the questionnaire"
)
def submitted_event(self):
"""MTurk will send its own notification when the worker
completes the HIT on that service.
"""
return None
def reward_bonus(self, assignment_id, amount, reason):
"""Reward the Turker for a specified assignment with a bonus."""
try:
return self.mturkservice.grant_bonus(assignment_id, amount, reason)
except MTurkServiceException as ex:
logger.exception(str(ex))
@property
def is_in_progress(self):
# Has this recruiter resulted in any participants?
return bool(Participant.query.filter_by(recruiter_id=self.nickname).first())
@property
def qualification_active(self):
return bool(self.config.get("assign_qualifications"))
def current_hit_id(self):
any_participant_record = (
Participant.query.with_entities(Participant.hit_id)
.filter_by(recruiter_id=self.nickname)
.first()
)
if any_participant_record is not None:
return str(any_participant_record.hit_id)
def approve_hit(self, assignment_id):
try:
return self.mturkservice.approve_assignment(assignment_id)
except MTurkServiceException as ex:
logger.exception(str(ex))
def close_recruitment(self):
"""Clean up once the experiment is complete.
This may be called before all users have finished so uses the
expire_hit rather than the disable_hit API call. This allows people
who have already picked up the hit to complete it as normal.
"""
logger.info(CLOSE_RECRUITMENT_LOG_PREFIX + " mturk")
# We are not expiring the hit currently as notifications are failing
# TODO: Reinstate this
# try:
# return self.mturkservice.expire_hit(
# self.current_hit_id(),
# )
# except MTurkServiceException as ex:
# logger.exception(str(ex))
def _confirm_sns_subscription(self, token, topic):
self.mturkservice.confirm_subscription(token=token, topic=topic)
def _report_event_notification(self, events):
q = _get_queue()
for event in events:
event_type = event.get("EventType")
assignment_id = event.get("AssignmentId")
participant_id = None
q.enqueue(worker_function, event_type, assignment_id, participant_id)
def _mturk_status_for(self, participant):
try:
assignment = self.mturkservice.get_assignment(participant.assignment_id)
status = assignment["status"]
except Exception:
status = None
return status
def _disable_autorecruit(self):
heroku_app = heroku_tools.HerokuApp(self.config.get("id"))
args = json.dumps({"auto_recruit": "false"})
headers = heroku_tools.request_headers(self.config.get("heroku_auth_token"))
requests.patch(heroku_app.config_url, data=args, headers=headers)
def _resend_submitted_rest_notification_for(self, participant):
q = _get_queue()
q.enqueue(
worker_function, "AssignmentSubmitted", participant.assignment_id, None
)
def _send_notification_missing_rest_notification_for(self, participant):
q = _get_queue()
q.enqueue(
worker_function, "NotificationMissing", participant.assignment_id, None
)
def _config_to_list(self, key):
# At some point we'll support lists, so all service code supports them,
# but the config system only supports strings for now, so we convert:
as_string = self.config.get(key, None)
if as_string is None:
return []
return [item.strip() for item in as_string.split(",") if item.strip()]
def _create_mturk_qualifications(self):
"""Create MTurk Qualification for experiment ID, and for group_name
if it's been set. Qualifications with these names already exist, but
it's faster to try and fail than to check, then try.
"""
for name, desc in self.qualifications.items():
try:
self.mturkservice.create_qualification_type(name, desc)
except DuplicateQualificationNameError:
pass
def _resubmitted_msg(self, summary):
templates = MTurkHITMessages.by_flavor(summary, self.config.get("whimsical"))
return templates.resubmitted_msg()
def _cancelled_msg(self, summary):
templates = MTurkHITMessages.by_flavor(summary, self.config.get("whimsical"))
return templates.hit_cancelled_msg()
def _message_researcher(self, message):
try:
self.messenger.send(message)
except MessengerError as ex:
logger.exception(ex)
class RedisTally(object):
_key = "num_recruited"
def __init__(self):
redis_conn.set(self._key, 0)
def increment(self, count):
redis_conn.incr(self._key, count)
@property
def current(self):
return int(redis_conn.get(self._key))
class MTurkLargeRecruiter(MTurkRecruiter):
nickname = "mturklarge"
pool_size = 10
def __init__(self, *args, **kwargs):
self.counter = kwargs.get("counter", RedisTally())
super(MTurkLargeRecruiter, self).__init__()
def open_recruitment(self, n=1):
logger.info("Opening MTurkLarge recruitment for {} participants".format(n))
if self.is_in_progress:
raise MTurkRecruiterException(
"Tried to open_recruitment on already open recruiter."
)
self.counter.increment(n)
to_recruit = max(n, self.pool_size)
return super(MTurkLargeRecruiter, self).open_recruitment(to_recruit)
def recruit(self, n=1):
logger.info("Recruiting {} MTurkLarge participants".format(n))
if not self.config.get("auto_recruit"):
logger.info("auto_recruit is False: recruitment suppressed")
return
needed = max(0, n - self.remaining_pool)
self.counter.increment(n)
if needed:
return super(MTurkLargeRecruiter, self).recruit(needed)
@property
def remaining_pool(self):
return max(0, self.pool_size - self.counter.current)
class BotRecruiter(Recruiter):
"""Recruit bot participants using a queue"""
nickname = "bots"
_timeout = "1h"
def __init__(self):
super(BotRecruiter, self).__init__()
self.config = get_config()
def open_recruitment(self, n=1):
"""Start recruiting right away."""
logger.info("Opening Bot recruitment for {} participants".format(n))
factory = self._get_bot_factory()
bot_class_name = factory("", "", "").__class__.__name__
return {
"items": self.recruit(n),
"message": "Bot recruitment started using {}".format(bot_class_name),
}
def recruit(self, n=1):
"""Recruit n new participant bots to the queue"""
logger.info("Recruiting {} Bot participants".format(n))
factory = self._get_bot_factory()
urls = []
q = _get_queue(name="low")
for _ in range(n):
base_url = get_base_url()
worker = generate_random_id()
hit = generate_random_id()
assignment = generate_random_id()
ad_parameters = (
"recruiter={}&assignmentId={}&hitId={}&workerId={}&mode=sandbox"
)
ad_parameters = ad_parameters.format(self.nickname, assignment, hit, worker)
url = "{}/ad?{}".format(base_url, ad_parameters)
urls.append(url)
bot = factory(url, assignment_id=assignment, worker_id=worker, hit_id=hit)
job = q.enqueue(bot.run_experiment, timeout=self._timeout)
logger.warning("Created job {} for url {}.".format(job.id, url))
return urls
def approve_hit(self, assignment_id):
return True
def close_recruitment(self):
"""Clean up once the experiment is complete.
This does nothing at this time.
"""
logger.info(CLOSE_RECRUITMENT_LOG_PREFIX + " bot")
def notify_duration_exceeded(self, participants, reference_time):
"""The bot participant has been working longer than the time defined in
the "duration" config value.
"""
for participant in participants:
participant.status = "rejected"
session.commit()
def reward_bonus(self, assignment_id, amount, reason):
"""Logging only. These are bots."""
logger.info("Bots don't get bonuses. Sorry, bots.")
def submitted_event(self):
return "BotAssignmentSubmitted"
def _get_bot_factory(self):
# Must be imported at run-time
from dallinger_experiment.experiment import Bot
return Bot
class MultiRecruiter(Recruiter):
nickname = "multi"
# recruiter spec e.g. recruiters = bots: 5, mturk: 1
SPEC_RE = re.compile(r"(\w+):\s*(\d+)")
def __init__(self):
super(MultiRecruiter, self).__init__()
self.spec = self.parse_spec()
def parse_spec(self):
"""Parse the specification of how to recruit participants.
Example: recruiters = bots: 5, mturk: 1
"""
recruiters = []
spec = get_config().get("recruiters")
for match in self.SPEC_RE.finditer(spec):
name = match.group(1)
count = int(match.group(2))
recruiters.append((name, count))
return recruiters
def recruiters(self, n=1):
"""Iterator that provides recruiters along with the participant
count to be recruited for up to `n` participants.
We use the `Recruitment` table in the db to keep track of
how many recruitments have been requested using each recruiter.
We'll use the first one from the specification that
hasn't already reached its quota.
"""
recruit_count = 0
while recruit_count <= n:
counts = dict(
session.query(Recruitment.recruiter_id, func.count(Recruitment.id))
.group_by(Recruitment.recruiter_id)
.all()
)
for recruiter_id, target_count in self.spec:
remaining = 0
count = counts.get(recruiter_id, 0)
if count >= target_count:
# This recruiter quota was reached;
# move on to the next one.
counts[recruiter_id] = count - target_count
continue
else:
# Quota is still available; let's use it.
remaining = target_count - count
break
else:
return
num_recruits = min(n - recruit_count, remaining)
# record the recruitments and commit
for i in range(num_recruits):
session.add(Recruitment(recruiter_id=recruiter_id))
session.commit()
recruit_count += num_recruits
yield by_name(recruiter_id), num_recruits
def open_recruitment(self, n=1):
"""Return initial experiment URL list.
"""
logger.info("Multi recruitment running for {} participants".format(n))
recruitments = []
messages = {}
remaining = n
for recruiter, count in self.recruiters(n):
if not count:
break
if recruiter.nickname in messages:
result = recruiter.recruit(count)
recruitments.extend(result)
else:
result = recruiter.open_recruitment(count)
recruitments.extend(result["items"])
messages[recruiter.nickname] = result["message"]
remaining -= count
if remaining <= 0:
break
logger.info(
(
"Multi-recruited {} out of {} participants, " "using {} recruiters."
).format(n - remaining, n, len(messages))
)
return {"items": recruitments, "message": "\n".join(messages.values())}
def recruit(self, n=1):
"""For multi recruitment recruit and open_recruitment
have the same logic. We may need to open recruitment on any of our
sub-recruiters at any point in recruitment.
"""
return self.open_recruitment(n)["items"]
def close_recruitment(self):
for name in set(name for name, count in self.spec):
recruiter = by_name(name)
recruiter.close_recruitment()
def for_experiment(experiment):
"""Return the Recruiter instance for the specified Experiment.
This provides a seam for testing.
"""
return experiment.recruiter
def from_config(config):
"""Return a Recruiter instance based on the configuration.
Default is HotAirRecruiter in debug mode (unless we're using
the bot recruiter, which can be used in debug mode)
and the MTurkRecruiter in other modes.
"""
debug_mode = config.get("mode") == "debug"
name = config.get("recruiter", None)
recruiter = None
# Special case 1: Don't use a configured recruiter in replay mode
if config.get("replay"):
return HotAirRecruiter()
if name is not None:
recruiter = by_name(name)
# Special case 2: may run BotRecruiter or MultiRecruiter in any mode
# (debug or not), so it trumps everything else:
if isinstance(recruiter, (BotRecruiter, MultiRecruiter)):
return recruiter
# Special case 3: if we're not using bots and we're in debug mode,
# ignore any configured recruiter:
if debug_mode:
return HotAirRecruiter()
# Configured recruiter:
if recruiter is not None:
return recruiter
if name and recruiter is None:
raise NotImplementedError("No such recruiter {}".format(name))
# Default if we're not in debug mode:
return MTurkRecruiter()
def _descendent_classes(cls):
for cls in cls.__subclasses__():
yield cls
for cls in _descendent_classes(cls):
yield cls
BY_NAME = {}
for cls in _descendent_classes(Recruiter):
BY_NAME[cls.__name__] = BY_NAME[cls.nickname] = cls
def by_name(name):
"""Attempt to return a recruiter class by name.
Actual class names and known nicknames are both supported.
"""
klass = BY_NAME.get(name)
if klass is not None:
return klass()
|
[] |
[] |
[
"HOST"
] |
[]
|
["HOST"]
|
python
| 1 | 0 | |
sahara/openstack/common/db/sqlalchemy/provision.py
|
# Copyright 2013 Mirantis.inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provision test environment for specific DB backends"""
import argparse
import logging
import os
import random
import string
from six import moves
import sqlalchemy
from sahara.openstack.common.db import exception as exc
LOG = logging.getLogger(__name__)
def get_engine(uri):
"""Engine creation
Call the function without arguments to get admin connection. Admin
connection required to create temporary user and database for each
particular test. Otherwise use existing connection to recreate connection
to the temporary database.
"""
return sqlalchemy.create_engine(uri, poolclass=sqlalchemy.pool.NullPool)
def _execute_sql(engine, sql, driver):
"""Initialize connection, execute sql query and close it."""
try:
with engine.connect() as conn:
if driver == 'postgresql':
conn.connection.set_isolation_level(0)
for s in sql:
conn.execute(s)
except sqlalchemy.exc.OperationalError:
msg = ('%s does not match database admin '
'credentials or database does not exist.')
LOG.exception(msg % engine.url)
raise exc.DBConnectionError(msg % engine.url)
def create_database(engine):
"""Provide temporary user and database for each particular test."""
driver = engine.name
auth = {
'database': ''.join(random.choice(string.ascii_lowercase)
for i in moves.range(10)),
'user': engine.url.username,
'passwd': engine.url.password,
}
sqls = [
"drop database if exists %(database)s;",
"create database %(database)s;"
]
if driver == 'sqlite':
return 'sqlite:////tmp/%s' % auth['database']
elif driver in ['mysql', 'postgresql']:
sql_query = map(lambda x: x % auth, sqls)
_execute_sql(engine, sql_query, driver)
else:
raise ValueError('Unsupported RDBMS %s' % driver)
params = auth.copy()
params['backend'] = driver
return "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" % params
def drop_database(admin_engine, current_uri):
"""Drop temporary database and user after each particular test."""
engine = get_engine(current_uri)
driver = engine.name
auth = {'database': engine.url.database, 'user': engine.url.username}
if driver == 'sqlite':
try:
os.remove(auth['database'])
except OSError:
pass
elif driver in ['mysql', 'postgresql']:
sql = "drop database if exists %(database)s;"
_execute_sql(admin_engine, [sql % auth], driver)
else:
raise ValueError('Unsupported RDBMS %s' % driver)
def main():
"""Controller to handle commands
::create: Create test user and database with random names.
::drop: Drop user and database created by previous command.
"""
parser = argparse.ArgumentParser(
description='Controller to handle database creation and dropping'
' commands.',
epilog='Under normal circumstances is not used directly.'
' Used in .testr.conf to automate test database creation'
' and dropping processes.')
subparsers = parser.add_subparsers(
help='Subcommands to manipulate temporary test databases.')
create = subparsers.add_parser(
'create',
help='Create temporary test '
'databases and users.')
create.set_defaults(which='create')
create.add_argument(
'instances_count',
type=int,
help='Number of databases to create.')
drop = subparsers.add_parser(
'drop',
help='Drop temporary test databases and users.')
drop.set_defaults(which='drop')
drop.add_argument(
'instances',
nargs='+',
help='List of databases uri to be dropped.')
args = parser.parse_args()
connection_string = os.getenv('OS_TEST_DBAPI_ADMIN_CONNECTION',
'sqlite://')
engine = get_engine(connection_string)
which = args.which
if which == "create":
for i in range(int(args.instances_count)):
print(create_database(engine))
elif which == "drop":
for db in args.instances:
drop_database(engine, db)
if __name__ == "__main__":
main()
|
[] |
[] |
[
"OS_TEST_DBAPI_ADMIN_CONNECTION"
] |
[]
|
["OS_TEST_DBAPI_ADMIN_CONNECTION"]
|
python
| 1 | 0 | |
src/crypto/x509/root_darwin.go
|
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run root_darwin_arm_gen.go -output root_darwin_armx.go
package x509
import (
"bufio"
"bytes"
"crypto/sha1"
"encoding/pem"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
"sync"
)
var debugExecDarwinRoots = strings.Contains(os.Getenv("GODEBUG"), "x509roots=1")
func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) {
return nil, nil
}
// This code is only used when compiling without cgo.
// It is here, instead of root_nocgo_darwin.go, so that tests can check it
// even if the tests are run with cgo enabled.
// The linker will not include these unused functions in binaries built with cgo enabled.
// execSecurityRoots finds the macOS list of trusted root certificates
// using only command-line tools. This is our fallback path when cgo isn't available.
//
// The strategy is as follows:
//
// 1. Run "security trust-settings-export" and "security
// trust-settings-export -d" to discover the set of certs with some
// user-tweaked trust policy. We're too lazy to parse the XML (at
// least at this stage of Go 1.8) to understand what the trust
// policy actually is. We just learn that there is _some_ policy.
//
// 2. Run "security find-certificate" to dump the list of system root
// CAs in PEM format.
//
// 3. For each dumped cert, conditionally verify it with "security
// verify-cert" if that cert was in the set discovered in Step 1.
// Without the Step 1 optimization, running "security verify-cert"
// 150-200 times takes 3.5 seconds. With the optimization, the
// whole process takes about 180 milliseconds with 1 untrusted root
// CA. (Compared to 110ms in the cgo path)
func execSecurityRoots() (*CertPool, error) {
hasPolicy, err := getCertsWithTrustPolicy()
if err != nil {
return nil, err
}
if debugExecDarwinRoots {
println(fmt.Sprintf("crypto/x509: %d certs have a trust policy", len(hasPolicy)))
}
args := []string{"find-certificate", "-a", "-p",
"/System/Library/Keychains/SystemRootCertificates.keychain",
"/Library/Keychains/System.keychain",
}
home, err := os.UserHomeDir()
if err != nil {
if debugExecDarwinRoots {
println("crypto/x509: can't get user home directory: %v", err)
}
} else {
args = append(args,
filepath.Join(home, "/Library/Keychains/login.keychain"),
// Fresh installs of Sierra use a slightly different path for the login keychain
filepath.Join(home, "/Library/Keychains/login.keychain-db"),
)
}
cmd := exec.Command("/usr/bin/security", args...)
data, err := cmd.Output()
if err != nil {
return nil, err
}
var (
mu sync.Mutex
roots = NewCertPool()
numVerified int // number of execs of 'security verify-cert', for debug stats
)
blockCh := make(chan *pem.Block)
var wg sync.WaitGroup
// Using 4 goroutines to pipe into verify-cert seems to be
// about the best we can do. The verify-cert binary seems to
// just RPC to another server with coarse locking anyway, so
// running 16 at a time for instance doesn't help at all. Due
// to the "if hasPolicy" check below, though, we will rarely
// (or never) call verify-cert on stock macOS systems, though.
// The hope is that we only call verify-cert when the user has
// tweaked their trust policy. These 4 goroutines are only
// defensive in the pathological case of many trust edits.
for i := 0; i < 4; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for block := range blockCh {
cert, err := ParseCertificate(block.Bytes)
if err != nil {
continue
}
sha1CapHex := fmt.Sprintf("%X", sha1.Sum(block.Bytes))
valid := true
verifyChecks := 0
if hasPolicy[sha1CapHex] {
verifyChecks++
if !verifyCertWithSystem(block, cert) {
valid = false
}
}
mu.Lock()
numVerified += verifyChecks
if valid {
roots.AddCert(cert)
}
mu.Unlock()
}
}()
}
for len(data) > 0 {
var block *pem.Block
block, data = pem.Decode(data)
if block == nil {
break
}
if block.Type != "CERTIFICATE" || len(block.Headers) != 0 {
continue
}
blockCh <- block
}
close(blockCh)
wg.Wait()
if debugExecDarwinRoots {
mu.Lock()
defer mu.Unlock()
println(fmt.Sprintf("crypto/x509: ran security verify-cert %d times", numVerified))
}
return roots, nil
}
func verifyCertWithSystem(block *pem.Block, cert *Certificate) bool {
data := pem.EncodeToMemory(block)
f, err := ioutil.TempFile("", "cert")
if err != nil {
fmt.Fprintf(os.Stderr, "can't create temporary file for cert: %v", err)
return false
}
defer os.Remove(f.Name())
if _, err := f.Write(data); err != nil {
fmt.Fprintf(os.Stderr, "can't write temporary file for cert: %v", err)
return false
}
if err := f.Close(); err != nil {
fmt.Fprintf(os.Stderr, "can't write temporary file for cert: %v", err)
return false
}
cmd := exec.Command("/usr/bin/security", "verify-cert", "-c", f.Name(), "-l", "-L")
var stderr bytes.Buffer
if debugExecDarwinRoots {
cmd.Stderr = &stderr
}
if err := cmd.Run(); err != nil {
if debugExecDarwinRoots {
println(fmt.Sprintf("crypto/x509: verify-cert rejected %s: %q", cert.Subject, bytes.TrimSpace(stderr.Bytes())))
}
return false
}
if debugExecDarwinRoots {
println(fmt.Sprintf("crypto/x509: verify-cert approved %s", cert.Subject))
}
return true
}
// getCertsWithTrustPolicy returns the set of certs that have a
// possibly-altered trust policy. The keys of the map are capitalized
// sha1 hex of the raw cert.
// They are the certs that should be checked against `security
// verify-cert` to see whether the user altered the default trust
// settings. This code is only used for cgo-disabled builds.
func getCertsWithTrustPolicy() (map[string]bool, error) {
set := map[string]bool{}
td, err := ioutil.TempDir("", "x509trustpolicy")
if err != nil {
return nil, err
}
defer os.RemoveAll(td)
run := func(file string, args ...string) error {
file = filepath.Join(td, file)
args = append(args, file)
cmd := exec.Command("/usr/bin/security", args...)
var stderr bytes.Buffer
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
// If there are no trust settings, the
// `security trust-settings-export` command
// fails with:
// exit status 1, SecTrustSettingsCreateExternalRepresentation: No Trust Settings were found.
// Rather than match on English substrings that are probably
// localized on macOS, just interpret any failure to mean that
// there are no trust settings.
if debugExecDarwinRoots {
println(fmt.Sprintf("crypto/x509: exec %q: %v, %s", cmd.Args, err, stderr.Bytes()))
}
return nil
}
f, err := os.Open(file)
if err != nil {
return err
}
defer f.Close()
// Gather all the runs of 40 capitalized hex characters.
br := bufio.NewReader(f)
var hexBuf bytes.Buffer
for {
b, err := br.ReadByte()
isHex := ('A' <= b && b <= 'F') || ('0' <= b && b <= '9')
if isHex {
hexBuf.WriteByte(b)
} else {
if hexBuf.Len() == 40 {
set[hexBuf.String()] = true
}
hexBuf.Reset()
}
if err == io.EOF {
break
}
if err != nil {
return err
}
}
return nil
}
if err := run("user", "trust-settings-export"); err != nil {
return nil, fmt.Errorf("dump-trust-settings (user): %v", err)
}
if err := run("admin", "trust-settings-export", "-d"); err != nil {
return nil, fmt.Errorf("dump-trust-settings (admin): %v", err)
}
return set, nil
}
|
[
"\"GODEBUG\""
] |
[] |
[
"GODEBUG"
] |
[]
|
["GODEBUG"]
|
go
| 1 | 0 | |
pkg/cli/display/parse_err.go
|
package display
import (
"strings"
"github.com/pingcap/ticat/pkg/cli/core"
)
func HandleParseResult(
cc *core.Cli,
flow *core.ParsedCmds,
env *core.Env,
isSearch bool,
isLess bool,
isMore bool) bool {
if isMore || isLess {
return true
}
for _, cmd := range flow.Cmds {
if cmd.ParseResult.Error == nil {
continue
}
// TODO: better handling: sub flow parse failed
/*
stackDepth := env.GetInt("sys.stack-depth")
if stackDepth > 0 {
panic(cmd.ParseResult.Error)
}
*/
input := cmd.ParseResult.Input
inputStr := strings.Join(input, " ")
switch cmd.ParseResult.Error.(type) {
case core.ParseErrExpectNoArg:
title := "[" + cmd.DisplayPath(cc.Cmds.Strs.PathSep, true) + "] doesn't have args."
return PrintFindResultByParseError(cc, cmd, env, title)
case core.ParseErrEnv:
PrintTipTitle(cc.Screen, env,
"["+cmd.DisplayPath(cc.Cmds.Strs.PathSep, true)+"] parse env failed, "+
"'"+inputStr+"' is not valid input.",
"",
"env setting examples:",
"",
SuggestEnvSetting(env),
"")
case core.ParseErrExpectArgs:
return PrintCmdByParseError(cc, cmd, env)
case core.ParseErrExpectCmd:
return PrintSubCmdByParseError(cc, flow, cmd, env, isSearch, isMore)
default:
return PrintFindResultByParseError(cc, cmd, env, "")
}
}
return true
}
func PrintCmdByParseError(
cc *core.Cli,
cmd core.ParsedCmd,
env *core.Env) bool {
sep := cc.Cmds.Strs.PathSep
cmdName := cmd.DisplayPath(sep, true)
printer := NewTipBoxPrinter(cc.Screen, env, true)
input := cmd.ParseResult.Input
printer.PrintWrap("[" + cmdName + "] parse args failed, '" +
strings.Join(input, " ") + "' is not valid input.")
printer.Prints("", "command detail:", "")
dumpArgs := NewDumpCmdArgs().NoFlatten().NoRecursive()
DumpCmds(cmd.Last().Matched.Cmd, printer, env, dumpArgs)
printer.Finish()
return false
}
func PrintSubCmdByParseError(
cc *core.Cli,
flow *core.ParsedCmds,
cmd core.ParsedCmd,
env *core.Env,
isSearch bool,
isMore bool) bool {
sep := cc.Cmds.Strs.PathSep
cmdName := cmd.DisplayPath(sep, true)
printer := NewTipBoxPrinter(cc.Screen, env, true)
input := cmd.ParseResult.Input
last := cmd.LastCmdNode()
if last == nil {
return PrintFreeSearchResultByParseError(cc, flow, env, isSearch, isMore, input...)
}
printer.PrintWrap("[" + cmdName + "] parse sub command failed, '" +
strings.Join(input, " ") + "' is not valid input.")
if last.HasSub() {
printer.Prints("", "commands on branch '"+last.DisplayPath()+"':", "")
dumpArgs := NewDumpCmdArgs().SetSkeleton()
DumpCmds(last, printer, env, dumpArgs)
} else {
printer.Prints("", "command branch '"+last.DisplayPath()+"' doesn't have any sub commands.")
// TODO: search hint
}
printer.Finish()
return false
}
func PrintFreeSearchResultByParseError(
cc *core.Cli,
flow *core.ParsedCmds,
env *core.Env,
isSearch bool,
isMore bool,
findStr ...string) bool {
selfName := env.GetRaw("strs.self-name")
input := findStr
inputStr := strings.Join(input, " ")
notValidStr := "'" + inputStr + "' is not valid input."
var lines int
for len(input) > 0 {
screen := NewCacheScreen()
dumpArgs := NewDumpCmdArgs().AddFindStrs(input...)
dumpArgs.Skeleton = !isMore
DumpCmds(cc.Cmds, screen, env, dumpArgs)
lines = screen.OutputNum()
if lines <= 0 {
input = input[:len(input)-1]
continue
}
helpStr := []string{
"search and found commands matched '" + strings.Join(input, " ") + "':",
}
if !isSearch {
helpStr = append([]string{notValidStr, ""}, helpStr...)
}
PrintTipTitle(cc.Screen, env, helpStr)
screen.WriteTo(cc.Screen)
return false
}
helpStr := []string{
"search but no commands matched '" + inputStr + "'.",
"",
"try to change keywords on the leftside, ",
selfName + " will filter results by kewords from left to right.",
}
if !isSearch {
helpStr = append([]string{notValidStr, ""}, helpStr...)
}
PrintTipTitle(cc.Screen, env, helpStr)
return false
}
func PrintFindResultByParseError(
cc *core.Cli,
cmd core.ParsedCmd,
env *core.Env,
title string) bool {
input := cmd.ParseResult.Input
inputStr := strings.Join(input, " ")
screen := NewCacheScreen()
dumpArgs := NewDumpCmdArgs().SetSkeleton().AddFindStrs(input...)
DumpCmds(cc.Cmds, screen, env, dumpArgs)
if len(title) == 0 {
title = cmd.ParseResult.Error.Error()
}
if screen.OutputNum() > 0 {
PrintTipTitle(cc.Screen, env,
title,
"",
"'"+inputStr+"' is not valid input, found related commands by search:")
screen.WriteTo(cc.Screen)
} else {
PrintTipTitle(cc.Screen, env,
title,
"",
"'"+inputStr+"' is not valid input and no related commands found.",
"",
"try to change input,", "or search commands by:", "",
SuggestFindCmds(env),
"")
}
return false
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
inttests/storagepool_test.go
|
/*
*
* Copyright © 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package inttests
import (
"fmt"
"os"
"testing"
"github.com/dell/goscaleio"
"github.com/stretchr/testify/assert"
)
// getStoragePoolName returns GOSCALEIO_STORAGEPOOL, if set
// if not set, returns the first storage pool found
func getStoragePoolName(t *testing.T) string {
if os.Getenv("GOSCALEIO_STORAGEPOOL") != "" {
return os.Getenv("GOSCALEIO_STORAGEPOOL")
}
system := getSystem()
assert.NotNil(t, system)
pd := getProtectionDomain(t)
assert.NotNil(t, pd)
if pd == nil {
return ""
}
pools, err := pd.GetStoragePool("")
assert.Nil(t, err)
assert.NotZero(t, len(pools))
if pools == nil {
return ""
}
return pools[0].Name
}
// getStoragePool returns the StoragePool with the name retured by getStoragePool
func getStoragePool(t *testing.T) *goscaleio.StoragePool {
pd := getProtectionDomain(t)
assert.NotNil(t, pd)
if pd == nil {
return nil
}
name := getStoragePoolName(t)
assert.NotEqual(t, name, "")
pool, err := pd.FindStoragePool("", name, "")
assert.Nil(t, err)
assert.NotNil(t, pool)
if pool == nil {
return nil
}
// create a StoragePool instance to return
outPool := goscaleio.NewStoragePoolEx(C, pool)
// creare a storagePool via NewStoragePool to test
tempPool := goscaleio.NewStoragePool(C)
tempPool.StoragePool = pool
assert.Equal(t, outPool.StoragePool.ID, tempPool.StoragePool.ID)
return outPool
}
// TestGetStoragePools will return all storage pools
func TestGetStoragePools(t *testing.T) {
system := getSystem()
assert.NotNil(t, system)
pd := getProtectionDomain(t)
assert.NotNil(t, pd)
if pd != nil {
pools, err := pd.GetStoragePool("")
assert.Nil(t, err)
assert.NotZero(t, len(pools))
}
}
// TestGetStoragePoolByName gets a single specific StoragePool by Name
func TestGetStoragePoolByName(t *testing.T) {
pd := getProtectionDomain(t)
assert.NotNil(t, pd)
pool := getStoragePool(t)
assert.NotNil(t, pool)
if pd != nil && pool != nil {
foundPool, err := pd.FindStoragePool("", pool.StoragePool.Name, "")
assert.Nil(t, err)
assert.Equal(t, foundPool.Name, pool.StoragePool.Name)
}
}
// TestGetStoragePoolByID gets a single specific StoragePool by ID
func TestGetStoragePoolByID(t *testing.T) {
pd := getProtectionDomain(t)
assert.NotNil(t, pd)
pool := getStoragePool(t)
assert.NotNil(t, pool)
if pd != nil && pool != nil {
foundPool, err := pd.FindStoragePool(pool.StoragePool.ID, "", "")
assert.Nil(t, err)
assert.Equal(t, foundPool.ID, pool.StoragePool.ID)
}
}
// TestGetStoragePoolByNameInvalid attempts to get a StoragePool that does not exist
func TestGetStoragePoolByNameInvalid(t *testing.T) {
pd := getProtectionDomain(t)
assert.NotNil(t, pd)
pool, err := pd.FindStoragePool("", invalidIdentifier, "")
assert.NotNil(t, err)
assert.Nil(t, pool)
}
// TestGetStoragePoolByIDInvalid attempts to get a StoragePool that does not exist
func TestGetStoragePoolByIDInvalid(t *testing.T) {
pd := getProtectionDomain(t)
assert.NotNil(t, pd)
pool, err := pd.FindStoragePool(invalidIdentifier, "", "")
assert.NotNil(t, err)
assert.Nil(t, pool)
}
// TestGetStoragePoolStatistics
func TestGetStoragePoolStatistics(t *testing.T) {
pool := getStoragePool(t)
assert.NotNil(t, pool)
stats, err := pool.GetStatistics()
assert.Nil(t, err)
assert.NotNil(t, stats)
}
func TestGetInstanceStoragePool(t *testing.T) {
name := getStoragePoolName(t)
assert.NotNil(t, name)
// Find by name
pool, err := C.FindStoragePool("", name, "")
assert.Nil(t, err)
assert.NotNil(t, pool)
// Find by ID
pool, err = C.FindStoragePool(pool.ID, "", "")
assert.Nil(t, err)
assert.NotNil(t, pool)
// Find by href
href := fmt.Sprintf("/api/instances/StoragePool::%s", pool.ID)
pool, err = C.FindStoragePool("", "", href)
assert.Nil(t, err)
assert.NotNil(t, pool)
// Find with invalid name
pool, err = C.FindStoragePool("", invalidIdentifier, "")
assert.NotNil(t, err)
assert.Nil(t, pool)
// Find with invalid ID
pool, err = C.FindStoragePool(invalidIdentifier, "", "")
assert.NotNil(t, err)
assert.Nil(t, pool)
// Find with invalid href
href = fmt.Sprintf("/api/badurl/willnotwork")
pool, err = C.FindStoragePool("", "", href)
assert.NotNil(t, err)
assert.Nil(t, pool)
}
func TestCreateDeleteStoragePool(t *testing.T) {
domain := getProtectionDomain(t)
assert.NotNil(t, domain)
poolName := fmt.Sprintf("%s-%s", testPrefix, "StoragePool")
// create the pool
poolID, err := domain.CreateStoragePool(poolName, "")
assert.Nil(t, err)
assert.NotNil(t, poolID)
// try to create a pool that exists
poolID, err = domain.CreateStoragePool(poolName, "")
assert.NotNil(t, err)
assert.Equal(t, "", poolID)
// delete the pool
err = domain.DeleteStoragePool(poolName)
assert.Nil(t, err)
// try to dleet non-existent storage pool
// delete the pool
err = domain.DeleteStoragePool(invalidIdentifier)
assert.NotNil(t, err)
}
|
[
"\"GOSCALEIO_STORAGEPOOL\"",
"\"GOSCALEIO_STORAGEPOOL\""
] |
[] |
[
"GOSCALEIO_STORAGEPOOL"
] |
[]
|
["GOSCALEIO_STORAGEPOOL"]
|
go
| 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.